diff --git a/doc/ReleaseNotes.md b/doc/ReleaseNotes.md index 3581142..6e489bc 100644 --- a/doc/ReleaseNotes.md +++ b/doc/ReleaseNotes.md @@ -1,910 +1,918 @@ # Laura++ release notes +16th Nov 2023 Thomas Latham +* Add functions to fit models that return: + - signal DP amplitudes + - signal and background DP likelihood(s) +* Add example to plot histograms of likelihood values +* Caveat: SCF signal is not currently handled - an appropriate warning is printed +* See https://phab.hepforge.org/T227 for more details + 16th Oct 2023 Thomas Latham * Remove all uses of deprecated ROOT ClassImp macro * Generate ROOT dictionary only for classes: LauAbsRValue, LauBlind and LauParameter - Remove ClassDef macro from all other classes - Remove all other classes from from LinkDef file and dictionary generation command * Take advantage of related improvements in the ROOT CMake - Consequently bump the minimum ROOT version to 6.18 * See https://phab.hepforge.org/T213 for more details 22nd June 2023 Thomas Latham * Added functionality to properly manage Gaussian constraints in toy generation - see https://phab.hepforge.org/T45 9th June 2023 Mark Whitehead * Added functionality to include n-dimensional Gaussian constraints - see https://phab.hepforge.org/T214 10th February 2023 Thomas Latham * Store the sqDP jacobian separately in weightEvents routines - Previously the weight was multiplied by the jacobian - Now the jacobian is stored in a separate branch - New info messages give instructions on what to do depending on how the input sample was generated 21st November 2022 Andy Morris * Use Minuit to automatically determine ASq max for signal in LauIsobarDynamics - see https://phab.hepforge.org/D85 28th June 2022 Thomas Latham * Improvements to Doxygen config 31st May 2022 Thomas Latham * Add examples for easily generating toy samples uniformly in any DP or SqDP * Very minor updates to related QuasiFlatSqDalitz examples 30th November 2021 Thomas Latham * Update CI config to: - Simplify changing compiler version - Disallow dev build failure in scheduled pipelines 7th September 2021 Thomas Latham * Update CI to test future ROOT versions in dev3/dev4 LCG builds 1st July 2021 Thomas Latham * Add version of QuasiFlatSqDalitz where particle masses can be supplied on command line 28th June 2021 Thomas Latham * Fix for compilation with ROOT 6.24 18th May 2021 Mark Whitehead * Add new MIPW example - see https://phab.hepforge.org/T147 26th March 2021 Thomas Latham * Fix bug in setting for linear interpolation in Lau1DHistPdf - see https://phab.hepforge.org/T142 9th March 2021 Mark Whitehead * Update particle properties to PDG 2020 - see https://phab.hepforge.org/T140 5th March 2021 Dan Johnson * Alterations to allow Blatt-Weisskopf factor for parent to be created in a model with only a K-matrix - see https://phab.hepforge.org/D52 4th February 2021 Dan Johnson * Extend the K-matrix implementation to handle non-zero spin - see https://phab.hepforge.org/T135 1st February 2021 Dan Johnson * Allow floating of parameters in the K-matrix - see https://phab.hepforge.org/T59 2nd December 2020 Thomas Latham * Fix LauFormulaPar to follow change in behaviour of TFormula - see https://phab.hepforge.org/T129 27th November 2020 Dan Johnson * Allow slope of NR exponential model to vary negative - improves fit stability in low-statistics fits - see https://phab.hepforge.org/T128 17th September 2020 Mark Whitehead * Begin updates to use inclusive language. Simultaneous fits now handled by Coordinator and Tasks - see https://phab.hepforge.org/T112 19th August 2020 Thomas Latham * Remove explicit normalisation of numerator Blatt-Weisskopf factors - See https://phab.hepforge.org/T93 22nd May 2020 Thomas Latham * Fix uninitialised variable (related to rho-omega mixing) in LauIsobarDynamics 12th December 2019 Thomas Latham & Daniel Johnson * Fix issue with generation of events for categories with small expected yield - See https://phab.hepforge.org/T76 21st November 2019 Thomas Latham * Add QuasiFlatSqDalitz example, which generates toy according to EvtGen's FLATSQDALITZ model 6th - 20th November 2019 Thomas Latham * Adopt CMake as the build system - See https://phab.hepforge.org/T33 === ## Laura++ v3r5 6th August 2019 Thomas Latham * Add some extra charmonium states to list of known resonances * Minor modifications to code for importing parameter values from file 17th May 2019 Thomas Latham * Fix licences and Doxygen in pole, nonresonant form factors, and rescattering line shapes * Add journal reference for pole line shape * Fix class names and add Doxygen to LauCalcChiSq, LauResultsExtractor, LauMergeDataFiles * Update Doxyfile from more recent Doxygen version * Make consistent the use of ClassDef and ClassImp everywhere * Add functions to return the "sanitised" names of parent and daughters 8th January 2019 Thomas Latham * Workaround for issue with splitting toy MC files when generating large number of experiments 5th December 2018 Thomas Latham * Move sources and headers for the utilities library into the main library - makes examples dir executables only (in preparation for CMake build) 22nd, 23rd July and 4th August 2018 Juan Otalora * Add pole, nonresonant form factors, and rescattering line shapes 17th April 2018 Daniel O'Hanlon * Fix bug in rho-omega mixing fit-fraction calculation: rho fit-fraction was not normalised to the total DP rate 23rd March 2018 Thomas Latham * Some fixes in LauRhoOmegaMix that propagate the following settings to the subcomponents: - spinType_ - flipHelicity_ - ignoreMomenta_ - ignoreSpin_ - ignoreBarrierScaling_ 23rd February 2018 Thomas Latham * Add section on structure of package to README file (requested by CPC technial editor) * Add copyright notice to files in test directory 21st February 2018 Thomas Latham * Improve comments in functions calculating helicity angles in LauKinematics 19th February 2018 Daniel O'Hanlon * Fix bug in LauCPFitModel introduced in code for importing parameters (25/01/18) - When parameters are asked to be randomised via useRandomInitFitPars, those parameters that were already fixed in the model, in addition to those that are fixed on import, were being freed. 19th February 2018 Daniel O'Hanlon * Bug fixes in rho/omega fit fractions calculation 25th January 2018 Daniel O'Hanlon * Add feature for importing parameters into LauCPFitModels from previous fit output files. 23rd January 2018 Daniel O'Hanlon * Calculate separate rho and omega fit-fractions for LauRhoOmegaMix, when turned on with calculateRhoOmegaFitFractions in LauIsobarDynamics. === ## Laura++ v3r4 16th January 2018 Thomas Latham * Update licence for all files to the Apache Software License Version 2.0 14th December 2017 Thomas Latham * Correct LauFlatteRes to use the exact formulae from the papers that report the default parameter values for each supported resonance - Deals correctly now with the cases where the m0 is absorbed into the couplings - Now uses the correct value of g1 for the a_0(980) (needed to be squared) - Also sets the mass values to those reported in those papers - Added printout to specify what is being done in each case * Improve the consistency between the weightEvents functions in LauSimpleFitModel and LauCPFitModel - Also remove the unecessary division by ASqMax in LauIsobarDynamics::getEventWeight 4th December 2017 Thomas Latham * Allow background yields to be specified as any LauAbsRValue (i.e. LauParameter or now also LauFormulaPar) 30th November 2017 Thomas Latham * In LauFitDataTree::readExperimentData add a check that the tree contains the branch "iExpt" and if not: - If the requested experiment is 0 then read all data in the tree (and print a warning) - Otherwise print an error message and return 29th November 2017 Thomas Latham * Improve error messages in LauCPFitModel::weightEvents === ## Laura++ v3r3 23rd November 2017 Thomas Latham * Add an example written as a python script: GenFit3pi.py * Various improvements to the other example code 23rd November 2017 Thomas Latham * Fix bug in the LauAbsResonance constructor used by the K-matrix - "resonance" charge is now set to the sum of the daughter charges 1st November 2017 Thomas Latham * Fix bug in LauFlatteRes - m_0 factor multiplying the width was, if useAdlerTerm_ is true, replaced by f_Adler rather than being multiplied by it - only affected case where using LauFlatteRes to describe K_0*(1430) === ## Laura++ v3r2 18th October 2017 Thomas Latham * Modify LauDaughters::testDPSymmetry to: - produce an ERROR message and exit if the symmetric DP does not have its daughters arranged correctly - detect also flavour-conjugate DPs and set a corresponding flag that can be retrieved via LauDaughters::gotFlavourConjugateDP - in this case if the daughters are sub-optimally arranged print a WARNING * Modify LauIsobarDynamics::calcDPNormalisationScheme so that any narrow resonance regions are symmetrised appropriately if the DP is fully-symmetric, symmetric or is flavour-conjugate (and in this last case, symmetrisation of the integration has been forced via a new boolean function) 11th October 2017 Thomas Latham * Allow the user to specify the randomiser to be used to randomise the initial values of the isobar parameters - simply use the new static function LauAbsCoeffSet::setRandomiser - if not specified it defaults to the current use of LauRandom::zeroSeedRandom 10th October 2017 Thomas Latham * Make symmetrisation of vetoes automatic in LauVetoes for symmetric DPs - but add new allowed indices (4 and 5) to allow vetoes to be applied to mMin or mMax if desired * Also apply to fully symmetric DPs * In addition implement the more efficient calculation of the amplitude in the fully symmetric case 23rd September 2017 Thomas Latham * Fix expressions for Covariant option for Blatt-Weisskopf momentum for spin > 1 * Make use of setSpinFormalism to set the formalism to Legendre for NR types (instead of kludging with ignoreMomentum) 15th September 2017 Thomas Latham * Various improvements to the examples 6th September 2017 Thomas Latham * Improve doxygen comments for event-embedding functions in fit models 5th September 2017 Thomas Latham * Improve efficiency of Covariant spin factor calculations 31st August 2017 Thomas Latham * Add further option for parent Blatt-Weisskopf momentum - Covariant (corresponding to Covariant spin factor) 8th August 2017 Thomas Latham * Implement expressions for the spin factor based on covariant tensor formalism * Make it much simpler to switch between formalism options via new functions in LauResonanceMaker: - setSpinFormalism, setBWType, setBWBachelorRestFrame - default settings are unchanged (and are equivalent to setting LauAbsResonance::Zemach_P, LauBlattWeisskopfFactor::BWPrimeBarrier, LauBlattWeisskopfFactor::ResonanceFrame, respectively) 21st July 2017 Thomas Latham * Add note to README file about compilation issue on Ubuntu 16.04 LTS 21st July 2017 Thomas Latham * Create public functions to update the kinematics based on one invariant mass and the corresponding helicity angle 20th June 2017 Daniel O'Hanlon * Terminate when asked to read a non-existent K-matrix parameter file 8th June 2017 Thomas Latham * Fix compilation error on gcc 4.9 30th May 2017 Thomas Latham * Permit different efficiency histograms to be defined in classic/square DP and force enable calculation of square DP coords (here and for background PDFs) if required 29th May 2017 Thomas Latham * Propagate information on EDM from the minimiser to the fit models and into the fit results ntuple 29th May 2017 Thomas Latham * Ensure that the kinematics will calculate the square DP co-ordinates if the integration requires them 29th May 2017 Thomas Latham * Reintegrate the RooFit-slave branch into the trunk (and delete the branch) 28th March 2017 Thomas Latham (in branch for developing RooFit-based slave) * Rename cacheFitData to verifyFitData in all fit models and RF-slave 28th March 2017 Daniel O'Hanlon * Fix bug in LauCPFitModel::weightEvents 24th March 2017 Thomas Latham (in branch for developing RooFit-based slave) * Refactor code between fit models, master, slave and fit-object classes 22nd March 2017 Thomas Latham (in branch for developing RooFit-based slave) * Make the compilation of the RF-based slave class, and its associated example binary, optional 22nd March 2017 Thomas Latham (in branch for developing RooFit-based slave) * Complete working example of RooFit-based slave - Have identified some scope for refactoring of code - Also want to make the compilation of this class, and its associated example binary, optional 15th March 2017 Mark Whitehead (in branch for developing time-dependent model) * Handle event-by-event mistag probabilities 7th March 2017 Thomas Latham * Rename the command for weighting events from "reweight" to "weight" 3rd March 2017 Daniel O'Hanlon * Implement LauCPFitModel::weightEvents based on the same function in LauSimpleFitModel 1st March 2017 Thomas Latham (in branch for developing RooFit-based slave) * Fix root-cling related errors 28th February 2017 Thomas Latham (in branch for developing RooFit-based slave) * Start work on creation of RooFit-based slave class - Will allow RooFit-based fitters to plug-in straightforwardly to the simultaneous fitting (JFit) framework 20th February 2017 Thomas Latham * Add warning messages to LauBkgndDPModel::setBkgndHisto when supplied backgroud histogram pointers are null 31st January 2017 Thomas Latham * Audit of the code to automate the creation of the integration binning scheme. - Fix one bug, some tidy-ups, and reintroduce a couple of lost features: - the use of a single square-DP grid if a narrow resonance is found in m12 - extension of region down to threshold if the narrow resonance is close to threshold 22nd January 2017 Daniel O'Hanlon * Fix bug in automated integration scheme where resonances were assumed to have been added in order of ascending mass 14th December 2016 Daniel O'Hanlon * Add several light resonances to the list in LauResonanceMaker 13th December 2016 Daniel O'Hanlon * Automate the determination of the integration scheme for an arbitrary number of narrow resonances in m13 and m23 12th December 2016 Daniel O'Hanlon * Efficiency saving from modifying the behaviour of LauIsobarDynamics::calculateAmplitudes, LauIsobarDynamics::resAmp and LauIsobarDynamics::incohResAmp in the case of symmetric DPs - Previously, there were 2N calls to LauKinematics::flipAndUpdateKinematics for N resonances, now reduced to 2 21st November 2016 John Back * Added two K-matrix examples for creating pedagogical plots (with associated data files): - B3piKMatrixPlots for K-matrix elements, propagator, and pole and SVP production terms - B3piKMatrixMassProj for pole and SVP mass projections generated over the DP 17th November 2016 John Back * Modifications of the K matrix implementation: - Changed the format of the parameter file to use keywords, with updated examples - The scattering coefficients are now symmetrised, unless "ScattSymmetry 0" is used - Allow the option to use the Adler zero suppression term for the production poles and SVPs (default = off). These are specified with the useProdAdler boolean in the LauKMatrixProdPole/SVP constructors - Added a few helper functions for obtaining coefficients and (internal) matrices - Added a method to return the Lorentz-invariant transition amplitude for plots/tests 1st November 2016 Wenbin Qian * Add calculation of so-called covariant factors for spin amplitude === ## Laura++ v3r1 9th September 2016 Thomas Latham * Modification of LauFitNtuple to check the size of the covariance matrix wrt known number of parameters and act accordingly: - If it is empty, just fill a diagonal correlation matrix and issue a warning. - If it results from a failed first stage of a two-stage fit then the correlation matrix is padded with 1s and 0s for the parameters that were fixed in the first stage. * Remove the feature that allows parameters to float in first stage of a two-stage fit but to be fixed in second. * Minor fix to LauAbsCoeffSet: the names of parameters would be mangled if they were the same as the basename, e.g. A1_A would become A1_ while A1_B would be correct. 8th September 2016 Thomas Latham * Modifications to LauResonanceInfo to allow customisation of which extra parameters are shared between charge conjugate or shared-parameter records. - Where the parameters are not shared they are independently created instead of being cloned. * Modification of LauRhoOmegaMix to take advantage of the above to have the magB and phiB parameters independent. * Addition to LauResonanceMaker of rho(770)_COPY record that is a shared record with rho(770) - this allows the above new feature to be used. * Minor unrelated improvement to information messages in LauIsobarDynamics. 25th August 2016 John Back * Modified LauRhoOmegaMix to allow either a RelBW or GS lineshape for the rho, as well as allowing the option to set the second-order denominator term to be equal to unity. Also fixed the bug where the spinTerm was not included in the rho-omega amplitude. Changed the LauAbsResonance enum from RhoOmegaMix to RhoOmegaMix_GS, RhoOmegaMix_RBW, RhoOmegaMix_GS_1 and RhoOmegaMix_RBW_1, where _1 sets the denominator term to unity and _GS or _RBW sets the appropriate rho lineshape. The omega is always a RelBW. * Added to LauAbsResonance: ignoreSpin and ignoreBarrierScaling boolean flags, which ignore either the spin terms or the barrier scaling factors for the amplitude. The latter does not turn off the resonance barrier factor for mass-dependent widths * Added to LauResonanceMaker: getResInfo(resonanceName), which retrieves the resonance information. This is used to obtain the PDG omega values for initialising LauRhoOmegaMix * Made LauRelBreitWignerRes ignore momentum-dependent terms for the resonance width if ignoreMomenta is set. This is used in the LauRhoOmegaMix for the omega lineshape where its width does not depend on momentum 23rd May 2016 John Back * Added new lineshape model for rho-omega mass mixing, LauRhoOmegaMix. 12th April 2016 Thomas Latham * Switch to integrating in square DP when narrow resonances are found in m12. - The integration grid size can be specified by the user 19th January 2016 John Back * Correct the f(m^2) factor in the denominator of the LauGounarisSakuraiRes lineshape to use Gamma_0 instead of Gamma(m) 14th January 2016 Thomas Latham * Documentation improvements 7th December 2015 Thomas Latham * Resolve bug that meant the order of resonances in LauIsobarDynamics was assumed to match with the order in which the complex coefficients are supplied to the fit model - The ordering of resonances is defined by LauIsobarDynamics: - Firstly all coherent resonances in order of addition - Followed by all incoherent resonances in order of addition - The complex coefficients are now rearranged to match that order - Printout of the model summary at the end of initialisation has been enhanced to indicate the ordering - Doxygen updated to reflect these changes 12th November 2015 Daniel Craik * Added support for Akima splines and linear interpolation to Lau1DCubicSpline * LauAbsModIndPartWave, LauModIndPartWaveRealImag and LauModIndPartWaveMagPhase updated to allow choice of spline interpolation method * LauEFKLLMRes updated to use Akima splines 10th November 2015 Thomas Latham & Daniel Craik * Add the EFKLLM form-factor model for the Kpi S-wave and an example using this lineshape * Modify LauResonanceMaker::getResonance to use a switch for greater clarity and easier checking on missing cases 4th November 2015 Daniel Craik * Add checks to LauIsobarDynamics::addResonance and LauIsobarDynamics::addIncohResonance to stop the wrong type of LauResonanceModel being used - LauAbsResonance::isIncoherentModel method added to identify incoherent models 8th September 2015 Mark Whitehead * Add the ability to modify the error of parameters via the CoeffSet - setParameterError added to LauAbsCoeffSet * Tweak the handling of initial error values passed to MINUIT (to determine initial step size) in LauMinuit 7th September 2015 Mark Whitehead * Add the ability to Gaussian constrain parameters via the CoeffSet - addGaussianConstraint added to LauAbsCoeffSet 12th June 2015 Thomas Latham * Modifications to Belle-style nonresonant models - LauBelleNR modified to use pure Legendre polynomials of cos(theta) in the spin term (i.e. to remove the q*p factors) - New form added to LauBelleSymNR (LauAbsResonance::BelleSymNRNoInter) that removes the interference term between the two DP halves - The new form also works with non-zero spin (warning added if non-zero spin specified for BelleSymNR and TaylorNR) 8th June 2015 Thomas Latham * Further work on the blinding mechanism: - New method added LauParameter::blindParameter that activates the blinding. - The rest of the framework updated to use another new method LauParameter::unblindedValue in all likelihood calculations etc. - Example GenFitNoDP updated to include lines to optionally blind the yield parameters. 29th May 2015 Daniel Craik * Added LauBlind class for blinding and unblinding a value with an offset based on a blinding string 26th May 2015 Daniel Craik * Stopped LauCPFitModel passing fixed signal/background yields or asymmetries to Minuit to avoid hitting limit of 101 fixed parameters 22nd April 2015 Daniel Craik * Updated MIPW classes to use Lau1DCubicSpline 19th April 2015 Daniel Craik * Added Lau1DCubicSpline class for 1D spline interpolation 26th March 2015 Thomas Latham * Reworked MIPW code into abstract base class and derived classes to allow different representations of the amplitude at each knot 31st December 2015 Daniel Craik * Added unbinned goodness of fit tests to examples 12th January 2015 Daniel Craik * Calculate effective masses for virtual resonances above the upper kinematic limit 10th December 2014 Daniel Craik * Added coefficient sets to extract gamma from a simultaneous fit to CP and nonCP final states, such as the B0->D_CP K pi and B0->D0bar K pi Dalitz plots, as proposed in Phys. Rev. D79, 051301 (2009) - LauPolarGammaCPCoeffSet uses the CP parameters r, delta and gamma directly - LauRealImagGammaCPCoeffSet parameterises CPV as X_CP+/- and Y_CP+/- - LauCartesianGammaCPCoeffSet parameterises CPV as X_CP, Y_CP DeltaX_CP DeltaY_CP - Fixed CP parameters are not passed to the fitter so the same coefficient sets can be used for both the CP and nonCP Dalitz plots - LauPolarGammaCPCoeffSet allows for a single gamma parameter to be shared between multiple resonances - LauAbsCoeffSet::adjustName made virtual to allow global variables such as gamma to not receive a prefix === ## Laura++ v3r0p1 19th June 2015 Thomas Latham * Factor out the JFit slave code from LauAbsFitModel into a new base class LauSimFitSlave 19th June 2015 Thomas Latham * Fix check in LauIsobarDynamics::calcDPNormalisationScheme to avoid using hardcoded number === ## Laura++ v3r0 24th October 2014 Thomas Latham * Fixed bug in floating of Blatt-Weisskopf barrier radii - The values at the pole mass were not being updated when the radii changed 21st October 2014 Daniel Craik * Fixed bug in LauIsobarDynamics where multiple incoherent amplitudes led to nonsensical fit fractions 17th October 2014 John Back * Added the ability to calculate the transition amplitude matrix T in LauKMatrixPropagator, as well as a few other minor speed-up changes and code checks. Example/PlotKMatrixTAmp.cc can be used to check the T amplitude variation, phase shift and inelasticity, for a given K matrix channel, as a function of the invariant mass squared variable s 15th October 2014 Thomas Latham * Add methods to LauIsobarDynamics to make the integration binning more tunable by the user: - setNarrowResonanceThreshold - modify the value below which a resonance is considered to be narrow (defaults to 0.02 GeV/c2) - setIntegralBinningFactor - modify the factor by which the narrow resonance width is divided to obtain the bin size (defaults to 100) * Print warning messages if the memory usage is likely to be very large 13th October 2014 Thomas Latham * Modify Makefile to allow compilation with ROOT 6 (in addition to maintaining support for ROOT 5) * Fix a few compilation errors on MacOSX 10.9 13th October 2014 Daniel Craik * Update LauModIndPartWave to allow knots at kinematic limits to be modified - Add new method setKnotAmp to modify existing knots (and the knot at the upper kinematic limit which is automatically added at initialisation) * Update doxygen for LauIsobarDynamics::addIncoherentResonance to mention that incoherent resonances must be added last 10th October 2014 Thomas Latham * Add new method to LauResonanceMaker to set whether the radius of a given Blatt-Weisskopf category should be fixed of floated * Modify the methods of LauResonanceMaker to set the radius value and whether it should be floated so that they work before and after the resonances have been created 9th October 2014 John Back * Corrected the eta-eta' and 4pi phase space factors in LauKMatrixPropagator, which is used for the K-matrix amplitude: - calcEtaEtaPRho() does not include the mass difference term m_eta - m_eta' following the recommendation in hep-ph/0204328 and from advice from M Pennington - calcFourPiRho() incorporates a better parameterisation of the double integral of Eq 4 in hep-ph/0204328 which avoids the exponential increase for small values of s (~< 0.1) - More detailed comments are provided in the above two functions to explain what is going on and the reason for the choices made 6th October 2014 Thomas Latham * Implement the mechanism for floating Blatt-Weisskopf barrier factor radius parameters 30th September 2014 Thomas Latham * Fix issue in the checks on toy MC generation validity - in the case of exceeding max iterations it was possible to enter an infinite loop - the checks now detect all three possible states: - aSqMaxSet_ is too low (generation is biased) => increase aSqMaxSet_ value - aSqMaxSet_ is too high => reduce aSqMaxSet_ value to improve efficiency - aSqMaxSet_ is high (causing low efficiency) but cannot be lowered without biasing => increase iterationsMax_ limit * Update resonance parameter in LauResonanceMaker to match PDG 2014 * Modify behaviour when TTree objects are saved into files to avoid having multiple cycle numbers present 29th September 2014 Daniel Craik * Add support for incoherent resonances in the signal model - LauIsobarDynamics updated to include incoherent terms - ABC for incoherent resonances, LauAbsIncohRes, added deriving from LauAbsResonance - LauGaussIncohRes added to implement a Gaussian incoherent resonance, deriving from LauAbsIncohRes - Small changes to various other classes to enable incoherent terms * Fixed small bug in LauMagPhaseCoeffSet which could hang if phase is exactly pi or -pi * Added charged version of the BelleNR resonances to LauResonanceMaker * Updated parameters in LauConstants to match PDG 2014 14th July 2014 Thomas Latham * Add intial support for fully-symmetric final states such as B0 -> KS KS KS - Performs the symmetrisation of the signal model - Background (and efficiency) histogram classes need some work if the user wants to provide folded histograms 8th July 2014 Daniel Craik * Add class for model-independent partial wave - Uses splines to produce a smooth amplitude from a set of magnitude and phase values at given invariant masses - The individual magnitudes and phases can be floated in the fit 16th June 2014 Thomas Latham * Allow floating of resonance parameters in simultaneous fits 13th June 2014 Thomas Latham * Fix bug in LauResonanceInfo cloning method, where the width parameter was given a clone of the mass 10th June 2014 Thomas Latham * Add new function to allow sharing of resonance parameters between components that are not charged conjugates, e.g. LASS_BW and LASS_NR 9th June 2014 Thomas Latham and Daniel Craik * Fix bug in the new integration scheme - Was not accounting for cases where several resonances share a floating parameter - Meant that the integrals and caches for that resonance were not being updated * Introduce a change in the implementation of the helicity flip for neutral parent decays - Prior to this change the helicity was flipped for any neutral resonance in the decay of a neutral particle. - Now the flip no longer occurs in flavour-specific decays (such as Bs0 -> D0bar K- pi+ or B0 -> K+ pi- pi0) since it is only required in flavour-conjugate modes (such as B0 -> KS pi+ pi-). - This does not affect any physical results but it does contribute a pi phase flip to some odd-spin resonances (for example K*(892)0 in Bs0->D0barKpi). - Therefore results are not necessarily comparable between fits run before and after this changeset. - This change will first be released in v3r0. === ## Laura++ v2r2 5th June 2014 Thomas Latham * Fix issue in asymmetric efficiency histogram errors - Fluctuation of bins was incorrectly sampling - assumed area each side of peak was the same 5th June 2014 Thomas Latham (in branch for release in v3r0) * Introduce intelligent calculation of amplitudes during recalculation of integrals and recaching of data points - Floating resonance parameters is now much more efficient * Make resonance parameters second-stage, also improves fit timing when floating them 3rd June 2014 Rafael Coutinho * Implement generation of toy MC from fit results in fitSlave 27th May 2014 Thomas Latham (in branch for release in v3r0) * Complete audit of special functions * Remove unncessary LauAbsDPDynamics base class and move all functionality into LauIsobarDynamics 20th May 2014 Daniel Craik (in branch for release in v3r0) * Add support for K*_0(1430) and a_0(980) to LauFlatteRes 16th-19th May 2014 Thomas Latham and Daniel Craik (in branch for release in v3r0) * Update all other lineshapes so that their parameters can float - The only resonance parameters that now cannot float are the Blatt-Weisskopf barrier factor radii 15th May 2014 Thomas Latham (in branch for release in v3r0) * Change the mechanism for getting floating resonance parameters into the fit - Moved from LauResonanceMaker to the resonances themselves - Lays some groundwork for improving the efficiency of recalculating the integrals - LauBelleNR and LauBelleSymNR lineshape parameters can now be floated 13th May 2014 Daniel Craik (in branch for release in v3r0) * Fix bug where illegal characters were being propagated from resonance names into TBranch names 6th May 2014 Thomas Latham (in branch for release in v3r0) * Provide accessors for mass and width parameters 5th May 2014 Louis Henry * Fix compilation problem by making LauDatabasePDG destructor virtual 4th May 2014 Thomas Latham * Provide a new argument to LauSimpleFitModel::splitSignalComponent and LauCPFitModel::splitSignalComponent to allow fluctuation of the bins on the SCF fraction histogram 29th April 2014 Thomas Latham * Fix bug in the determination of the integration scheme - Nearby narrow resonances caused problems if their "zones" overlap - These zones are now merged together 29th April 2014 Thomas Latham (in branch for release in v3r0) * Some improvments to integration scheme storage 26th April 2014 Juan Otalora (in branch for release in v3r0) * Make integation scheme fixed after first determination - is stored in a new class LauDPPartialIntegralInfo - used on subsequent re-evaluations of the integrals 23rd April 2014 Thomas Latham * Attempt to improve clarity of LauIsobarDynamics::addResonance function - the 3rd argument is now an enumeration of the various resonance models - removed the optional arguments regarding the change of mass, width & spin - the same functionality can be obtained by using the returned pointer and calling changeResonance - the resonance name now only affects the lineshape in the LauPolNR case - the BelleSymNR / TaylorNR choice is now made in the 3rd argument - similarly for the BelleNR / (newly introduced) PowerLawNR choice * Add new PowerLawNR nonresonant model (part of LauBelleNR) * All examples updated to use new interface 23rd April 2014 Thomas Latham * Address issue of setting values of resonance parameters for all models - decided to do away with need to have LauIsobarDynamics know everything - LauIsobarDynamics::addResonance now returns a pointer to LauAbsResonance - parameters can be changed through LauAbsResonance::setResonanceParameter - LauIsobarDynamics still knows about Blatt-Weisskopf factors - better to only need to set this once - Update GenFit3pi example to demonstrate mechanism 22nd April 2014 Thomas Latham * Allow Gaussian constraints to be added in simultaneous fitting - constraints will be ignored by the slaves - those added directly to fit parameters will be propogated to the master and handled there - those on combinations of parameters should be added in the master process 22nd April 2014 Mark Whitehead * Update Laura to cope with resonances of spin 4 and spin 5 - Zemach spin terms added to src/LauAbsResonance.cc - BW barrier factors added to src/LauRelBreitWignerRes.cc 19th April 2014 Daniel Craik * Add LauWeightedSumEffModel which gives an efficiency model from the weighted sum of several LauEffModel objects. * Added pABC, LauAbsEffModel, for LauEffModel and LauWeightedSumEffModel. * Various classes updated to use pointers to LauAbsEffModel instead of LauEffModel. 15th April 2014 Daniel Craik * Enable LauEfficiencyModel to contain several Lau2DAbsDP objects with the total efficiency calculated as the product. 10th April 2014 Mark Whitehead * Fix an issue with the likelihood penalty term for Gaussian constraints - Factor two missing in the denominator - New penalty term is: ( x-mean )^2 / 2*(width^2) 4th April 2014 Thomas Latham * Add storage of fit fractions that have not been efficiency corrected === ## Laura++ v2r1 1st April 2014 Thomas Latham * Fix issue in LauFitter that prevents compilation with g++ 4.8 - Missing virtual destructor - Take opportunity to audit other special functions 31st March 2014 Mark Whitehead (in branch for release in v2r1) * Added an efficiency branch to the ntuple produced for toy data samples - Both LauSimpleFitModel and LauCPFitModel updated 28th March 2014 Daniel Craik (in branch for release in v2r2) * Added support for asymmetric errors to Lau2DHistDP, Lau2DSplineDP and LauEffModel. 27th March 2014 Daniel Craik * Changed histogram classes to use seeded random number generator for fluctuation and raising or lowering of bins and updated doxygen. 20th March 2014 Mark Whitehead (in branch for release in v2r1) * Added the ability to add Gaussian contraints to LauFormulaPars of fit parameters - User supplies the information but the LauFormulaPar is constructed behind the scenes 18th March 2014 Thomas Latham * Improve behaviour of toy generation from fit results 13th March 2014 Juan Otalora (in branch for release in v3r0) * Extended ability to float mass and width to other resonance lineshapes (Flatte, LASS and G-S) 11th March 2014 Mark Whitehead (in branch for release in v2r1) * Added the functionality to make LauFormulaPars usable in fits - Added a new class LauAbsRValue which LauParameter and LauFormularPar inherit from - Many files updated to accept LauAbsRValues instead of LauParameters * Fairly major clean up of obsolete functions in LauAbsPdf - Only LauLinearPdf used any of them, this has now been updated 10th March 2014 Thomas Latham (in branch for release in v3r0) * First attempt at floating resonance parameters (work mostly from Juan) - Only works for RelBW lineshape - Can only float mass and width - Works nicely! - Still needs much work to generalise and make more efficient === ## Laura++ v2r0 8th March 2014 Thomas Latham * Some additional functionality for the CoeffSet classes: - allow the parameter values to be set (optionally setting the initial and generated values as well) - allow the parameters to be set to float or to be fixed in the fit These are needed when cloning but wanting some of the parameters to have different values and/or floating behaviour from the cloned set. * Improve the printout of the setting of the coefficient values in the fit models and the creation of resonances * Add LauFlatNR for the unform NR model - ends its rather strange special treatment * Fix bug setting resAmpInt to 0 for LauPolNR * Many other improvements to the info/warning/error printouts * Modify GenFitBelleCPKpipi example to demonstrate cloning in action * Add -Werror to compiler flags (treats warnings as errors) 5th March 2014 Thomas Latham * Some improvements to LauPolNR to speed up amplitude calculation 2nd March 2014 Thomas Latham * A number of updates to the CoeffSet classes: - allow specification of the basename just after construction (before being given to the fit model) - allow configuration of the parameter fit ranges (through static methods of base class) - more adaptable cloning of the parameters (e.g. can only clone phase but allow magnitude to float independently) - allow CP-violating parameters to be second-stage in Belle and CLEO formats * Some improvements to the Doxygen and runtime information printouts 20th February 2014 Louis Henry * Add LauPolNR - class for modelling the nonresonant contribution based on BaBar 3K model (arXiv:1201.5897) 6th February 2014 Thomas Latham * Correct helicity convention information in Doxygen for LauKinematics === ## Laura++ v1r2 5th February 2014 Thomas Latham * Add rule to the Makefile that creates a rootmap file for the library 4th February 2014 Daniel Craik * Fixed bug in Lau2DSplineDPPdf - normalisation was not being calculated * Added out-of-range warning in LauBkgndDPModel and supressed excessive warnings 3rd February 2014 Mark Whitehead (in branch for release in v2r0) * Added a new class to allow parameters to be a function of other parameters - inc/LauFormulaPar.hh - src/LauFormulaPar.cc 28th January 2014 Daniel Craik * Improved out-of-range efficiency warnings in LauEffModel and supressed excessive errors * Modified LauIsobarDynamics to allow LASS parameters to be configured for LauLASSBWRes and LauLASSNRRes 27th January 2014 Daniel Craik * Added spline interpolation to DP backgrounds - Added Lau2DSplineDPPdf which uses a spline to model a normalised PDF across a DP - Added pABC, Lau2DAbsDPPdf, for Lau2DHistDPPdf and Lau2DSplineDPPdf and moved common code in Lau2DHistDPPdf and Lau2DSplineDPPdf into ABC Lau2DAbsHistDPPdf - LauBkgndDPModel, modified to use Lau2DAbsDPPdf instead of Lau2DHistDPPdf - setBkgndSpline method added to LauBkgndDPModel to allow use of splines 22nd January 2014 Thomas Latham * Improve some error checks and corresponding warning messages in LauCPFitModel::setSignalDPParameters 16th January 2014 Thomas Latham * Add LauRealImagCPCoeffSet, which provides an (x,y), (xbar,ybar) way of parametrising the complex coefficients. * Try to improve timing in the *CoeffSet classes by making the complex coeffs into member variables. 20th December 2013 Daniel Craik * Added Lau2DCubicSpline which provides cubic spline interpolation of a histogram - Added Lau2DSplineDP which uses a spline to model variation across a DP (eg efficiency) - Added pABC, Lau2DAbsDP, for Lau2DHistDP and Lau2DSplineDP and moved common code in Lau2DHistDP and Lau2DSplineDP into ABC Lau2DAbsHistDP - LauEffModel, LauDPDepSumPdf and LauDPDepMapPdf modified to use Lau2DAbsDP instead of Lau2DHistDP - setEffSpline method added to LauEffModel and useSpline flag added to constructor for LauDPDepSumPdf to allow use of splines 18th December 2013 Mark Whitehead (in branch for release in v2r0) * Added functionality to include Gaussian constraints on floated parameters in the fit. The files updated are: - inc/LauAbsFitModel.hh - inc/LauParameter.hh - src/LauAbsFitModel.cc - src/LauParameter.cc 5th December 2013 Thomas Latham * Fix small bug in GenFitKpipi example where background asymmetry parameter had its limits the wrong way around 4th December 2013 Daniel Craik * Updated 2D chi-squared code to use adaptive binning. 3rd December 2013 Thomas Latham * Generalise the Makefile in the examples directory - results in minor changes to the names of 3 of the binaries 3rd December 2013 Thomas Latham (in branch for release in v2r0) * Have the master save an ntuple with all fitter parameters and the full correlation matrix information. 29th November 2013 Thomas Latham * Fixed bug in ResultsExtractor where the output file was not written 29th November 2013 Thomas Latham (in branch for release in v2r0) * Allow the slave ntuples to store the partial covariance matrices in the simultaneous fitting 26th November 2013 Thomas Latham (in branch for release in v2r0) * Added first version of the simultaneous fitting framework === ## Laura++ v1r1p1 22nd November 2013 Thomas Latham * Fixed bug in LauCPFitModel where values of q = -1 extra PDFs were used regardless of the event charge. === ## Laura++ v1r1 20th November 2013 Mark Whitehead * Changed convention for the barrier factors, swapping from p* to p. This seems to give more physically reasonable results. The files updated are: - src/LauGounarisSakuraiRes.cc - src/LauRelBreitWignerRes.cc 18th October 2013 Thomas Latham * Fix dependency problem in Makefile 8th October 2013 Thomas Latham * Some fixes to yield implementation * Minor bug fix in DP background histogram class 7th October 2013 Mark Whitehead * Update to accept the yields and yield asymmetries as LauParameters. All examples have been updated to reflect this change. This updated the following files: - inc/LauCPFitModel.hh - inc/LauSimpleFitModel.hh - inc/LauAbsFitModel.hh - src/LauCPFitModel.cc - src/LauSimpleFitModel.cc * Addition of the following particles to src/LauResonanceMaker.cc Ds*+-, Ds0*(2317)+-, Ds2*(2573)+-, Ds1*(2700)+- and Bs*0 === ## Laura++ v1r0 13th September 2013 Thomas Latham * Initial import of the package into HEPforge diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 2456f73..242a8cd 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -1,48 +1,49 @@ list(APPEND EXAMPLE_SOURCES B3piKMatrixMassProj B3piKMatrixPlots CalcChiSq FlatPhaseSpace FlatPhaseSpace-CustomMasses FlatSqDalitz FlatSqDalitz-CustomMasses GenFit3K GenFit3KS GenFit3pi GenFitBelleCPKpipi GenFitDpipi GenFitDs2KKpi GenFitEFKLLM GenFitKpipi GenFitNoDP GenFitNoDPMultiDim KMatrixDto3pi KMatrixExample MergeDataFiles mixedSampleTest PlotKMatrixTAmp + PlotLikelihood PlotResults point2PointTestSample QuasiFlatSqDalitz QuasiFlatSqDalitz-CustomMasses ResultsExtractor SimFitCoordinator SimFitTask SimFitTaskRooFit ) if(NOT LAURA_BUILD_ROOFIT_TASK) list(REMOVE_ITEM EXAMPLE_SOURCES SimFitTaskRooFit) endif() foreach( _example ${EXAMPLE_SOURCES}) add_executable(${_example} ${_example}.cc) target_link_libraries(${_example} PRIVATE Laura++) install(TARGETS ${_example} RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}) endforeach() # Also install the python script version of GenFit3pi configure_file(GenFit3pi.py.in GenFit3pi.py @ONLY) install(PROGRAMS ${CMAKE_CURRENT_BINARY_DIR}/GenFit3pi.py DESTINATION ${CMAKE_INSTALL_BINDIR}) diff --git a/examples/PlotLikelihood.cc b/examples/PlotLikelihood.cc new file mode 100644 index 0000000..fcc163a --- /dev/null +++ b/examples/PlotLikelihood.cc @@ -0,0 +1,194 @@ + +/* +Copyright 2023 University of Warwick + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Laura++ package authors: +John Back +Paul Harrison +Thomas Latham +*/ + +#include +#include +#include +#include + +#include "TFile.h" +#include "TH2.h" +#include "TString.h" +#include "TTree.h" + +#include "LauSimpleFitModel.hh" +#include "LauBkgndDPModel.hh" +#include "LauDaughters.hh" +#include "LauEffModel.hh" +#include "LauIsobarDynamics.hh" +#include "LauMagPhaseCoeffSet.hh" +#include "LauResonanceMaker.hh" +#include "LauVetoes.hh" + +int main( /*int argc, char** argv*/ ) +{ + // If you want to use square DP histograms for efficiency, + // backgrounds or you just want the square DP co-ordinates + // stored in the toy MC ntuple then set this to kTRUE + Bool_t squareDP = kFALSE; + + // This defines the DP => decay is B+ -> pi+ pi+ pi- + // Particle 1 = pi+ + // Particle 2 = pi+ + // Particle 3 = pi- + // The DP is defined in terms of m13Sq and m23Sq + LauDaughters* daughters = new LauDaughters("B+", "pi+", "pi+", "pi-", squareDP); + + // Optionally apply some vetoes to the DP + // (example syntax given but commented-out) + LauVetoes* vetoes = new LauVetoes(); + //Double_t DMin = 1.70; + //Double_t DMax = 1.925; + //Double_t JpsiMin = 3.051; + //Double_t JpsiMax = 3.222; + //Double_t psi2SMin = 3.676; + //Double_t psi2SMax = 3.866; + //vetoes->addMassVeto(1, DMin, DMax); // D0 veto, m23 (and automatically m13) + //vetoes->addMassVeto(1, JpsiMin, JpsiMax); // J/psi veto, m23 (and automatically m13) + //vetoes->addMassVeto(1, psi2SMin, psi2SMax); // psi(2S) veto, m23 (and automatically m13) + + // Define the efficiency model (defaults to unity everywhere) + LauEffModel* effModel = new LauEffModel(daughters, vetoes); + + // Can optionally provide a histogram to model variation over DP + // (example syntax given but commented-out) + //TFile *effHistFile = TFile::Open("histoFiles/B3piNRDPEff.root", "read"); + //TH2* effHist = dynamic_cast(effHistFile->Get("effHist")); + //Bool_t useInterpolation = kTRUE; + //Bool_t fluctuateBins = kFALSE; + //Bool_t useUpperHalf = kTRUE; + //effModel->setEffHisto(effHist, useInterpolation, fluctuateBins, 0.0, 0.0, useUpperHalf, squareDP); + + // Create the isobar model + + // Set the values of the Blatt-Weisskopf barrier radii and whether they are fixed or floating + LauResonanceMaker& resMaker = LauResonanceMaker::get(); + resMaker.setDefaultBWRadius( LauBlattWeisskopfFactor::Parent, 5.0 ); + resMaker.setDefaultBWRadius( LauBlattWeisskopfFactor::Light, 4.0 ); + resMaker.fixBWRadius( LauBlattWeisskopfFactor::Parent, kTRUE ); + resMaker.fixBWRadius( LauBlattWeisskopfFactor::Light, kTRUE ); + + LauIsobarDynamics* sigModel = new LauIsobarDynamics(daughters, effModel); + // Add various components to the isobar model, + // modifying some resonance parameters + LauAbsResonance* reson(0); + // addResonance arguments: resName, resPairAmpInt, resType + reson = sigModel->addResonance("rho0(770)", 1, LauAbsResonance::GS); // resPairAmpInt = 1 => resonance mass is m23. + reson = sigModel->addResonance("rho0(1450)", 1, LauAbsResonance::RelBW); + reson = sigModel->addResonance("f_0(980)", 1, LauAbsResonance::Flatte); + reson->setResonanceParameter("g1",0.2); + reson->setResonanceParameter("g2",1.0); + reson = sigModel->addResonance("f_2(1270)", 1, LauAbsResonance::RelBW); + const TString nrName = "BelleNR_Swave"; + reson = sigModel->addResonance(nrName, 0, LauAbsResonance::BelleSymNRNoInter); + reson->setResonanceParameter("alpha", 0.2); + + // Set the maximum signal DP ASq value + // If you do not provide a value, one will be determined automatically, + // which should be close to the true maximum but is not guaranteed to + // be optimal. + // Any value, whether manually provided or automatically determined, + // will be automatically adjusted to avoid bias or extreme inefficiency + // but it is best to set this by hand once you've found the right value + // through some trial and error. + sigModel->setASqMaxValue(0.35); + + // Create the fit model + LauSimpleFitModel* fitModel = new LauSimpleFitModel(sigModel); + + // Create the complex coefficients for the isobar model + // Here we're using the magnitude and phase form: + // c_j = a_j exp(i*delta_j) + std::vector coeffset; + coeffset.push_back( new LauMagPhaseCoeffSet("rho0(770)", 1.00, 0.00, kTRUE, kTRUE) ); + coeffset.push_back( new LauMagPhaseCoeffSet("rho0(1450)", 0.37, 1.99, kFALSE, kFALSE) ); + coeffset.push_back( new LauMagPhaseCoeffSet("f_0(980)", 0.27, -1.59, kFALSE, kFALSE) ); + coeffset.push_back( new LauMagPhaseCoeffSet("f_2(1270)", 0.53, 1.39, kFALSE, kFALSE) ); + coeffset.push_back( new LauMagPhaseCoeffSet(nrName, 0.54, -0.84, kFALSE, kFALSE) ); + for (std::vector::iterator iter=coeffset.begin(); iter!=coeffset.end(); ++iter) { + fitModel->setAmpCoeffSet(*iter); + } + + // Set the signal yield and define whether it is fixed or floated + const Double_t nSigEvents = 1500.0; + LauParameter * signalEvents = new LauParameter("signalEvents",nSigEvents,-1.0*nSigEvents,2.0*nSigEvents,kFALSE); + fitModel->setNSigEvents(signalEvents); + + // Set up a background model + + // First declare the names of the background class(es) + std::vector bkgndNames(1); + bkgndNames[0] = "qqbar"; + fitModel->setBkgndClassNames( bkgndNames ); + + // Define and set the yield parameter for the background + const Double_t nBkg = 1250.0; + LauParameter* nBkgndEvents = new LauParameter("qqbar",nBkg,-2.0*nBkg,2.0*nBkg,kFALSE); + fitModel->setNBkgndEvents( nBkgndEvents ); + + // Create the background DP model + LauBkgndDPModel* qqbarModel = new LauBkgndDPModel(daughters, vetoes); + + // Load in background DP model histogram + // (example syntax given but commented-out - the background will be treated as being uniform in the DP in the absence of a histogram) + //TString qqFileName("histoFiles/offResDP.root"); + //TFile* qqFile = TFile::Open(qqFileName.Data(), "read"); + //TH2* qqDP = dynamic_cast(qqFile->Get("AllmTheta")); // m', theta' + //qqbarModel->setBkgndHisto(qqDP, useInterpolation, fluctuateBins, useUpperHalf, squareDP); + + // Add the background DP model into the fit model + fitModel->setBkgndDPModel( "qqbar", qqbarModel ); + + + // Set the names of the files to read/write + const TString histFileName("plot-3pi.root"); + std::unique_ptr histFile { TFile::Open( histFileName, "recreate" ) }; + + const Int_t nBins{100}; + const Double_t xMin{0.0}; + const Double_t xMax{28.0}; + const Double_t yMin{0.0}; + const Double_t yMax{28.0}; + TH2* signalLike = new TH2D( "signalLike", "Signal Likelihood", nBins, xMin, xMax, nBins, yMin, yMax ); + TH2* qqbarLike = new TH2D( "qqbarLike", "qqbar Likelihood", nBins, xMin, xMax, nBins, yMin, yMax ); + + for ( Int_t iX{1}; iX < nBins; ++iX ) { + + const Double_t m13Sq { signalLike->GetXaxis()->GetBinCenter( iX ) }; + + for ( Int_t iY{1}; iY < nBins; ++iY ) { + const Double_t m23Sq { signalLike->GetYaxis()->GetBinCenter( iY ) }; + + const auto likelihoods { fitModel->getDPLikelihoods( m13Sq, m23Sq ) }; + + signalLike->SetBinContent( iX, iY, likelihoods.at("signal") ); + qqbarLike ->SetBinContent( iX, iY, likelihoods.at("qqbar") ); + } + } + + histFile->Write(); + histFile->Close(); + + return EXIT_SUCCESS; +} diff --git a/inc/LauAbsBkgndDPModel.hh b/inc/LauAbsBkgndDPModel.hh index 8066f9e..b563e2d 100644 --- a/inc/LauAbsBkgndDPModel.hh +++ b/inc/LauAbsBkgndDPModel.hh @@ -1,154 +1,172 @@ /* Copyright 2004 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ /*! \file LauAbsBkgndDPModel.hh \brief File containing declaration of LauAbsBkgndDPModel class. */ /*! \class LauAbsBkgndDPModel \brief The abstract interface for a background Dalitz plot model Class which defines the abstract interface for a background Dalitz plot model */ #ifndef LAU_ABS_BKGND_DP_MODEL #define LAU_ABS_BKGND_DP_MODEL #include "Rtypes.h" class LauDaughters; class LauKinematics; class LauFitDataTree; class LauVetoes; class LauAbsBkgndDPModel { public: //! Destructor virtual ~LauAbsBkgndDPModel() {} //! Initialise the model virtual void initialise() = 0; //! Generate a toy MC event from the model /*! \return success/failure flag */ virtual Bool_t generate() = 0; + //! Get likelihood for a given DP position + /*! + \param [in] m13Sq the invariant mass squared of children 1 and 3 + \param [in] m23Sq the invariant mass squared of children 2 and 3 + \return the likelihood value + */ + virtual Double_t getLikelihood( const Double_t m13Sq, + const Double_t m23Sq ) = 0; + + //! Get unnormalised likelihood for a given DP position + /*! + \param [in] m13Sq the invariant mass squared of children 1 and 3 + \param [in] m23Sq the invariant mass squared of children 2 and 3 + \return the unnormalised likelihood value + */ + virtual Double_t getUnNormValue( const Double_t m13Sq, + const Double_t m23Sq ) = 0; + //! Get likelihood for a given event /*! \param [in] iEvt the event number \return the likelihood value */ virtual Double_t getLikelihood(UInt_t iEvt) = 0; //! Get unnormalised likelihood for a given event /*! \param [in] iEvt the event number \return the unnormalised likelihood value */ virtual Double_t getUnNormValue(UInt_t iEvt) = 0; //! Get PDF normalisation constant /*! \return the PDF normalisation constant */ virtual Double_t getPdfNorm() const = 0; //! Cache the input data and (if appropriate) the per-event likelihood values /*! \param [in] fitDataTree the input data */ virtual void fillDataTree(const LauFitDataTree& fitDataTree) = 0; //! Get the daughter particles /*! \return the daughters */ const LauDaughters* getDaughters() const {return daughters_;} //! Get the daughter particles /*! \return the daughters */ LauDaughters* getDaughters() {return daughters_;} //! Get the Dalitz plot kinematics /*! \return the kinematics */ const LauKinematics* getKinematics() const {return kinematics_;} //! Get the Dalitz plot kinematics /*! \return the kinematics */ LauKinematics* getKinematics() {return kinematics_;} //! Get vetoes in the Dalitz plot /*! \return the vetoes */ const LauVetoes* getVetoes() const {return vetoes_;} //! Get vetoes in the Dalitz plot /*! \return the vetoes */ LauVetoes* getVetoes() {return vetoes_;} protected: //! Constructor /*! \param [in] daughters the daughter particles \param [in] vetoes the vetoes within the Dalitz plot */ LauAbsBkgndDPModel(LauDaughters* daughters, LauVetoes* vetoes); //! Set data event number /*! \param [in] iEvt the event number */ virtual void setDataEventNo(UInt_t iEvt) = 0; private: //! Copy constructor (not implemented) LauAbsBkgndDPModel(const LauAbsBkgndDPModel& rhs); //! Copy assignment operator (not implemented) LauAbsBkgndDPModel& operator=(const LauAbsBkgndDPModel& rhs); //! The daughter particles LauDaughters* daughters_; //! Dalitz plot kinematics LauKinematics* kinematics_; //! Vetoes within the Dalitz plot LauVetoes* vetoes_; }; #endif diff --git a/inc/LauAbsFitModel.hh b/inc/LauAbsFitModel.hh index 5a33feb..3d2b012 100644 --- a/inc/LauAbsFitModel.hh +++ b/inc/LauAbsFitModel.hh @@ -1,833 +1,852 @@ /* Copyright 2004 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ /*! \file LauAbsFitModel.hh \brief File containing declaration of LauAbsFitModel class. */ /*! \class LauAbsFitModel \brief Abstract interface to the fitting and toy MC model Abstract interface to the fitting and toy MC model Any class inheriting from this must implement the following functions: - cacheInputFitVars() - checkInitFitParams() - finaliseFitResults() - fixdSpeciesNames() - freeSpeciesNames() - genExpt() - getEventSum() - getTotEvtLikelihood() - initialise() - initialiseDPModels() - propagateParUpdates() - recalculateNormalisation() - scfDPSmear() - setAmpCoeffSet() - setNBkgndEvents() - setNSigEvents() - setupBkgndVectors() - setupGenNtupleBranches() - setupSPlotNtupleBranches() - splitSignal() - storePerEvtLlhds() - twodimPDFs() - updateCoeffs() - variableNames() - weightEvents() - writeOutTable() */ #ifndef LAU_ABS_FIT_MODEL #define LAU_ABS_FIT_MODEL #include "TMatrixDfwd.h" #include "TString.h" #include "TStopwatch.h" #include #include #include +#include "LauComplex.hh" #include "LauFitObject.hh" #include "LauFormulaPar.hh" #include "LauSimFitTask.hh" // LauSPlot included to get LauSPlot::NameSet typedef #include "LauSPlot.hh" class LauAbsCoeffSet; class LauAbsPdf; class LauFitDataTree; class LauGenNtuple; class LauAbsRValue; class LauParameter; class LauAbsFitModel : public LauSimFitTask { public: //! Constructor LauAbsFitModel(); //! Destructor virtual ~LauAbsFitModel(); //! Is the Dalitz plot term in the likelihood Bool_t useDP() const { return usingDP_; } //! Switch on/off the Dalitz plot term in the Likelihood (allows fits to other quantities, e.g. B mass) /*! \param [in] usingDP the boolean flag */ void useDP(Bool_t usingDP) { usingDP_ = usingDP; } //! Return the flag to store the status of using an sFit or not Bool_t doSFit() const { return doSFit_; } //! Do an sFit (use sWeights to isolate signal decays rather than using background histograms) /*! \param [in] sWeightBranchName name of the branch of the tree containing the sWeights \param [in] scaleFactor scaling factor to get the uncertainties correct */ void doSFit( const TString& sWeightBranchName, Double_t scaleFactor = 1.0 ); //! Determine whether an extended maximum likelihood fit it being performed Bool_t doEMLFit() const {return emlFit_;} //! Choice to perform an extended maximum likelihood fit /*! \param [in] emlFit boolean specifying whether or not to perform the EML */ void doEMLFit(Bool_t emlFit) {emlFit_ = emlFit;} //! Determine whether Poisson smearing is enabled for the toy MC generation Bool_t doPoissonSmearing() const {return poissonSmear_;} //! Turn Poisson smearing (for the toy MC generation) on or off /*! \param [in] poissonSmear boolean specifying whether or not to do Poisson smearing */ void doPoissonSmearing(Bool_t poissonSmear) {poissonSmear_ = poissonSmear;} //! Determine whether embedding of events is enabled in the generation Bool_t enableEmbedding() const {return enableEmbedding_;} //! Turn on or off embedding of events in the generation /*! \param [in] enable boolean specifying whether to embed events */ void enableEmbedding(Bool_t enable) {enableEmbedding_ = enable;} //! Determine whether writing out of the latex table is enabled Bool_t writeLatexTable() const {return writeLatexTable_;} //! Turn on or off the writing out of the latex table /*! \param [in] writeTable boolean specifying whether or not the latex table should be written out */ void writeLatexTable(Bool_t writeTable) {writeLatexTable_ = writeTable;} //! save files containing graphs of the resonance's PDFs Bool_t saveFilePDF() const {return savePDF_;} //! Turn on or off the save of files containing graphs of the resonance's PDFs /*! \param [in] savePDF boolean specifying whether or not the save of files containing graphs of the resonance's PDFs */ void saveFilePDF(Bool_t savePDF) {savePDF_ = savePDF;} //! Set up the sPlot ntuple /*! \param [in] fileName the sPlot file name \param [in] treeName the sPlot tree name \param [in] storeDPEfficiency whether or not to store the efficiency information too \param [in] verbosity define the level of output */ void writeSPlotData(const TString& fileName, const TString& treeName, Bool_t storeDPEfficiency, const TString& verbosity = "q"); //! Determine whether the sPlot data is to be written out Bool_t writeSPlotData() const {return writeSPlotData_;} //! Determine whether the efficiency information should be stored in the sPlot ntuple Bool_t storeDPEff() const {return storeDPEff_;} //! Determine whether the initial values of the fit parameters, in particular the isobar coefficient parameters, are to be randomised Bool_t useRandomInitFitPars() const {return randomFit_;} //! Randomise the initial values of the fit parameters, in particular the isobar coefficient parameters void useRandomInitFitPars(Bool_t boolean) {randomFit_ = boolean;} //! Setup the background class names /*! \param [in] names a vector of all the background names */ virtual void setBkgndClassNames( const std::vector& names ); //! Returns the number of background classes inline UInt_t nBkgndClasses() const {return bkgndClassNames_.size();} //! Set the number of signal events /*! \param [in] nSigEvents contains the signal yield and option to fix it */ virtual void setNSigEvents(LauParameter* nSigEvents) = 0; //! Set the number of background events /*! The name of the parameter must be that of the corresponding background category (so that it can be correctly assigned) \param [in] nBkgndEvents contains the name, yield and option to fix the yield of the background */ virtual void setNBkgndEvents(LauAbsRValue* nBkgndEvents) = 0; //! Set the DP amplitude coefficients /*! The name of the coeffSet must match the name of one of the resonances in the DP model. The supplied order of coefficients will be rearranged to match the order in which the resonances are stored in the dynamics, see LauIsobarDynamics::addResonance. \param [in] coeffSet the set of coefficients */ virtual void setAmpCoeffSet(LauAbsCoeffSet* coeffSet) = 0; //! Specify that a toy MC sample should be created for a successful fit to an experiment /*! Generation uses the fitted parameters so that the user can compare the fit to the data \param [in] toyMCScale the scale factor to get the number of events to generate \param [in] mcFileName the file name where the toy sample will be stored \param [in] tableFileName name of the output tex file \param [in] poissonSmearing turn smearing on or off */ void compareFitData(UInt_t toyMCScale = 10, const TString& mcFileName = "fitToyMC.root", const TString& tableFileName = "fitToyMCTable.tex", Bool_t poissonSmearing = kTRUE); //! Start the toy generation / fitting /*! \param [in] applicationCode specifies what to do, perform a fit ("fit") or generate toy MC ("gen") \param [in] dataFileName the name of the input data file \param [in] dataTreeName the name of the tree containing the data \param [in] histFileName the file name for the output histograms \param [in] tableFileName the file name for the latex output file */ void run(const TString& applicationCode, const TString& dataFileName, const TString& dataTreeName, const TString& histFileName, const TString& tableFileName = ""); //! This function sets the parameter values from Minuit /*! This function has to be public since it is called from the global FCN. It should not be called otherwise! \param [in] par an array storing the various parameter values \param [in] npar the number of free parameters */ virtual void setParsFromMinuit(Double_t* par, Int_t npar); //! Calculates the total negative log-likelihood /*! This function has to be public since it is called from the global FCN. It should not be called otherwise! */ virtual Double_t getTotNegLogLikelihood(); //! Set model parameters from a file /*! \param [in] fileName the name of the file with parameters to set \param [in] treeName the name of the tree in the file corresponding to the parameters to set \param [in] fix whether to fix (set constant) the loaded parameters, or leave them floating */ void setParametersFromFile(const TString& fileName, const TString& treeName, const Bool_t fix); //! Set model parameters from a given std::map /*! Only parameters named in the map are imported, all others are set to their values specified in the model configuration. \param [in] parameters map from parameter name to imported value \param [in] fix whether to fix (set constant) the loaded parameters, or leave them floating */ void setParametersFromMap(const std::map& parameters, const Bool_t fix); //! Set named model parameters from a file /*! Identical to setParametersFromFile, but only import parameters named from parameters set. All others are set to their values specified in the model configuration. \param [in] fileName the name of the file with parameters to set \param [in] treeName the name of the tree in the file corresponding to the parameters to set \param [in] parameters the set of parameters to import from the file \param [in] fix whether to fix (set constant) the loaded parameters, or leave them floating */ void setNamedParameters(const TString& fileName, const TString& treeName, const std::set& parameters, const Bool_t fix); //! Set named model parameters from a given std::map, with fallback to those from a file /*! Parameters named in the map are imported with their specified values. All other parameters are set to the values corresponding to the value in the given file. \param [in] fileName the name of the file with parameters to set \param [in] treeName the name of the tree in the file corresponding to the parameters to set \param [in] parameters map from parameter name to imported value (override parameters form the file) \param [in] fix whether to fix (set constant) the loaded parameters, or leave them floating */ void setParametersFileFallback(const TString& fileName, const TString& treeName, const std::map& parameters, const Bool_t fix); + //! Calculate the DP amplitude(s) for a given DP position + /*! + If not already done, this function will initialise the DP models + \param [in] m13Sq the invariant mass squared of children 1 and 3 + \param [in] m23Sq the invariant mass squared of children 2 and 3 + \return a container of complex amplitudes, labelled to indicate to which component they belong + */ + virtual std::map getDPAmps( const Double_t m13Sq, const Double_t m23Sq ) = 0; + + //! Calculate the DP likelihood(s) for a given DP position + /*! + If not already done, this function will initialise the DP models + \param [in] m13Sq the invariant mass squared of children 1 and 3 + \param [in] m23Sq the invariant mass squared of children 2 and 3 + \return a container of likelihood values, labelled to indicate to which component they belong + */ + virtual std::map getDPLikelihoods( const Double_t m13Sq, const Double_t m23Sq ) = 0; + protected: // Some typedefs //! List of Pdfs typedef std::vector LauPdfList; //! List of parameter pointers typedef std::vector LauParameterPList; //! List of parameter pointers typedef std::vector LauAbsRValuePList; //! Set of parameter pointers typedef std::set LauParameterPSet; //! List of parameters typedef std::vector LauParameterList; //! A type to store background classes typedef std::map LauBkgndClassMap; //! Clear the vectors containing fit parameters void clearFitParVectors(); //! Clear the vectors containing extra ntuple variables void clearExtraVarVectors(); //! Weighting - allows e.g. MC events to be weighted by the DP model /*! \param [in] dataFileName the name of the data file \param [in] dataTreeName the name of the tree containing the data */ virtual void weightEvents( const TString& dataFileName, const TString& dataTreeName ) = 0; //! Generate toy MC /*! \param [in] dataFileName the name of the file where the generated events are stored \param [in] dataTreeName the name of the tree used to store the variables \param [in] histFileName the name of the histogram output file (currently not used) \param [in] tableFileNameBase the name the latex output file */ virtual void generate(const TString& dataFileName, const TString& dataTreeName, const TString& histFileName, const TString& tableFileNameBase); //! The method that actually generates the toy MC events for the given experiment /*! \return the success/failure flag of the generation procedure */ virtual Bool_t genExpt() = 0; //! Perform the total fit /*! \param [in] dataFileName the name of the data file \param [in] dataTreeName the name of the tree containing the data \param [in] histFileName the name of the histogram output file \param [in] tableFileNameBase the name the of latex output file */ void fit(const TString& dataFileName, const TString& dataTreeName, const TString& histFileName, const TString& tableFileNameBase); //! Routine to perform the actual fit for a given experiment void fitExpt(); //! Routine to perform the minimisation /*! \return the success/failure flag of the fit */ Bool_t runMinimisation(); //! Create a toy MC sample from the fitted parameters /*! \param [in] mcFileName the file name where the toy sample will be stored \param [in] tableFileName name of the output tex file */ void createFitToyMC(const TString& mcFileName, const TString& tableFileName); //! Read in the data for the current experiment /*! \return the number of events read in */ virtual UInt_t readExperimentData(); //! Open the input file and verify that all required variables are present /*! \param [in] dataFileName the name of the input file \param [in] dataTreeName the name of the input tree */ virtual Bool_t verifyFitData(const TString& dataFileName, const TString& dataTreeName); //! Cache the input data values to calculate the likelihood during the fit virtual void cacheInputFitVars() = 0; //! Cache the value of the sWeights to be used in the sFit virtual void cacheInputSWeights(); //! Initialise the fit par vectors /*! Each class that inherits from this one must implement this sensibly for all vectors specified in clearFitParVectors, i.e. specify parameter names, initial, min, max and fixed values */ virtual void initialise() = 0; //! Recalculate normalisation the signal DP model(s) virtual void recalculateNormalisation() = 0; //! Initialise the DP models virtual void initialiseDPModels() = 0; /*! For each amp in the fit this function takes its particular parameters and from them calculates the single complex number that is its coefficient. The vector of these coeffs can then be passed to the signal dynamics. */ virtual void updateCoeffs() = 0; //! This function (specific to each model) calculates anything that depends on the fit parameter values virtual void propagateParUpdates() = 0; //! Calculate the sum of the log-likelihood over the specified events /*! \param [in] iStart the event number of the first event to be considered \param [in] iEnd the event number of the final event to be considered */ Double_t getLogLikelihood( UInt_t iStart, UInt_t iEnd ); //! Calculate the penalty terms to the log likelihood from Gaussian constraints Double_t getLogLikelihoodPenalty(); //! Calculates the likelihood for a given event /*! \param [in] iEvt the event number */ virtual Double_t getTotEvtLikelihood(UInt_t iEvt) = 0; //! Returns the sum of the expected events over all hypotheses; used in the EML fit scenario virtual Double_t getEventSum() const = 0; //! Prints the values of all the fit variables for the specified event - useful for diagnostics /*! \param [in] iEvt the event number */ virtual void printEventInfo(UInt_t iEvt) const; //! Same as printEventInfo, but printing out the values of the variables in the fit virtual void printVarsInfo() const; //! Update initial fit parameters if required virtual void checkInitFitParams() = 0; //! Setup saving of fit results to ntuple/LaTeX table etc. /*! \param [in] histFileName the file name for the output histograms \param [in] tableFileName the file name for the latex output file */ virtual void setupResultsOutputs( const TString& histFileName, const TString& tableFileName ); //! Package the initial fit parameters for transmission to the coordinator /*! \param [out] array the array to be filled with the LauParameter objects */ virtual void prepareInitialParArray( TObjArray& array ); //! Perform all finalisation actions /*! - Receive the results of the fit from the coordinator - Perform any finalisation routines - Package the finalised fit parameters for transmission back to the coordinator \param [in] fitStat the status of the fit, e.g. status code, EDM, NLL \param [in] parsFromCoordinator the parameters at the fit minimum \param [in] covMat the fit covariance matrix \param [out] parsToCoordinator the array to be filled with the finalised LauParameter objects */ virtual void finaliseExperiment( const LauAbsFitter::FitStatus& fitStat, const TObjArray* parsFromCoordinator, const TMatrixD* covMat, TObjArray& parsToCoordinator ); //! Write the results of the fit into the ntuple /*! \param [in] tableFileName the structure containing the results of the fit */ virtual void finaliseFitResults(const TString& tableFileName) = 0; //! Save the pdf Plots for all the resonances of experiment number fitExp /*! \param [in] label prefix for the file name to be saved */ virtual void savePDFPlots(const TString& label) = 0; //! Save the pdf Plots for the sum of ressonances correspondint to "sin" of experiment number fitExp /*! \param [in] label prefix for the file name to be saved \param [in] spin spin of the wave to be saved */ virtual void savePDFPlotsWave(const TString& label, const Int_t& spin) = 0; //! Write the latex table /*! \param [in] outputFile the name of the output file */ virtual void writeOutTable(const TString& outputFile) = 0; //! Store the per-event likelihood values virtual void storePerEvtLlhds() = 0; //! Calculate the sPlot data virtual void calculateSPlotData(); //! Make sure all parameters hold their genValue as the current value void setGenValues(); //! Method to set up the storage for background-related quantities called by setBkgndClassNames virtual void setupBkgndVectors() = 0; //! Check if the given background class is in the list /*! \param [in] className the name of the class to check \return true or false */ Bool_t validBkgndClass( const TString& className ) const; //! The number assigned to a background class /*! \param [in] className the name of the class to check \return the background class ID number */ UInt_t bkgndClassID( const TString& className ) const; //! Get the name of a background class from the number /*! \param [in] classID the ID number of the background class \return the class name */ const TString& bkgndClassName( UInt_t classID ) const; //! Setup the generation ntuple branches virtual void setupGenNtupleBranches() = 0; //! Add a branch to the gen tree for storing an integer /*! \param [in] name the name of the branch */ virtual void addGenNtupleIntegerBranch(const TString& name); //! Add a branch to the gen tree for storing a double /*! \param [in] name the name of the branch */ virtual void addGenNtupleDoubleBranch(const TString& name); //! Set the value of an integer branch in the gen tree /*! \param [in] name the name of the branch \param [in] value the value to be stored */ virtual void setGenNtupleIntegerBranchValue(const TString& name, Int_t value); //! Set the value of a double branch in the gen tree /*! \param [in] name the name of the branch \param [in] value the value to be stored */ virtual void setGenNtupleDoubleBranchValue(const TString& name, Double_t value); //! Get the value of an integer branch in the gen tree /*! \param [in] name the name of the branch \return the value of the parameter */ virtual Int_t getGenNtupleIntegerBranchValue(const TString& name) const; //! Get the value of a double branch in the gen tree /*! \param [in] name the name of the branch \return the value of the parameter */ virtual Double_t getGenNtupleDoubleBranchValue(const TString& name) const; //! Fill the gen tuple branches virtual void fillGenNtupleBranches(); //! Setup the branches of the sPlot tuple virtual void setupSPlotNtupleBranches() = 0; //! Add a branch to the sPlot tree for storing an integer /*! \param [in] name the name of the branch */ virtual void addSPlotNtupleIntegerBranch(const TString& name); //! Add a branch to the sPlot tree for storing a double /*! \param [in] name the name of the branch */ virtual void addSPlotNtupleDoubleBranch(const TString& name); //! Set the value of an integer branch in the sPlot tree /*! \param [in] name the name of the branch \param [in] value the value to be stored */ virtual void setSPlotNtupleIntegerBranchValue(const TString& name, Int_t value); //! Set the value of a double branch in the sPlot tree /*! \param [in] name the name of the branch \param [in] value the value to be stored */ virtual void setSPlotNtupleDoubleBranchValue(const TString& name, Double_t value); //! Fill the sPlot tuple virtual void fillSPlotNtupleBranches(); //! Returns the names of all variables in the fit virtual LauSPlot::NameSet variableNames() const = 0; //! Returns the names and yields of species that are free in the fit virtual LauSPlot::NumbMap freeSpeciesNames() const = 0; //! Returns the names and yields of species that are fixed in the fit virtual LauSPlot::NumbMap fixdSpeciesNames() const = 0; //! Returns the species and variables for all 2D PDFs in the fit virtual LauSPlot::TwoDMap twodimPDFs() const = 0; //! Check if the signal is split into well-reconstructed and mis-reconstructed types virtual Bool_t splitSignal() const = 0; //! Check if the mis-reconstructed signal is to be smeared in the DP virtual Bool_t scfDPSmear() const = 0; //! Add parameters of the PDFs in the list to the list of all fit parameters /*! \param [in] pdfList a list of Pdfs \return the number of parameters added */ UInt_t addFitParameters(LauPdfList& pdfList); //! Add parameters to the list of Gaussian constrained parameters void addConParameters(); //! Print the fit parameters for all PDFs in the list /*! \param [in] pdfList a list of Pdfs \param [in] fout the output stream to write to */ void printFitParameters(const LauPdfList& pdfList, std::ostream& fout) const; //! Update the fit parameters for all PDFs in the list /*! \param [in] pdfList a list of Pdfs */ void updateFitParameters(LauPdfList& pdfList); //! Have all PDFs in the list cache the data /*! \param [in] pdfList the list of pdfs \param [in] theData the data from the fit */ void cacheInfo(LauPdfList& pdfList, const LauFitDataTree& theData); //! Calculate the product of the per-event likelihoods of the PDFs in the list /*! \param [in] pdfList the list of pdfs \param [in] iEvt the event number */ Double_t prodPdfValue(LauPdfList& pdfList, UInt_t iEvt); //! Do any of the PDFs have a dependence on the DP? /*! \return the flag to indicated if there is a DP dependence */ Bool_t pdfsDependOnDP() const {return pdfsDependOnDP_;} //! Do any of the PDFs have a dependence on the DP? /*! \param [in] dependOnDP the flag to indicated if there is a DP dependence */ void pdfsDependOnDP(Bool_t dependOnDP) { pdfsDependOnDP_ = dependOnDP; } //! Const access the fit variables const LauParameterPList& fitPars() const {return fitVars_;} //! Access the fit variables LauParameterPList& fitPars() {return fitVars_;} //! Const access the fit variables which affect the DP normalisation const LauParameterPSet& resPars() const {return resVars_;} //! Access the fit variables which affect the DP normalisation LauParameterPSet& resPars() {return resVars_;} //! Const access the extra variables const LauParameterList& extraPars() const {return extraVars_;} //! Access the extra variables LauParameterList& extraPars() {return extraVars_;} //! Const access the Gaussian constrained variables const LauAbsRValuePList& conPars() const {return conVars_;} //! Access the Gaussian constrained variables LauAbsRValuePList& conPars() {return conVars_;} //! Const access the gen ntuple const LauGenNtuple* genNtuple() const {return genNtuple_;} //! Access the gen ntuple LauGenNtuple* genNtuple() {return genNtuple_;} //! Const access the sPlot ntuple const LauGenNtuple* sPlotNtuple() const {return sPlotNtuple_;} //! Access the sPlot ntuple LauGenNtuple* sPlotNtuple() {return sPlotNtuple_;} //! Const access the data store const LauFitDataTree* fitData() const {return inputFitData_;} //! Access the data store LauFitDataTree* fitData() {return inputFitData_;} //! Imported parameters file name TString fixParamFileName_; //! Imported parameters tree name TString fixParamTreeName_; //! Map from imported parameter name to value std::map fixParamMap_; //! Imported parameter names std::set fixParamNames_; //! Whether to fix the loaded parameters (kTRUE) or leave them floating (kFALSE) Bool_t fixParams_; //! The set of parameters that are imported (either from a file or by value) and not // set to be fixed in the fit. In addition to those from fixParamNames_, these // include those imported from a file. std::set allImportedFreeParams_; private: //! Copy constructor (not implemented) LauAbsFitModel(const LauAbsFitModel& rhs); //! Copy assignment operator (not implemented) LauAbsFitModel& operator=(const LauAbsFitModel& rhs); // Various control booleans //! Option to make toy from 1st successful experiment Bool_t compareFitData_; //! Option to output a .C file of PDF's Bool_t savePDF_; //! Option to output a Latex format table Bool_t writeLatexTable_; //! Option to write sPlot data Bool_t writeSPlotData_; //! Option to store DP efficiencies in the sPlot ntuple Bool_t storeDPEff_; //! Option to randomise the initial values of the fit parameters Bool_t randomFit_; //! Option to perform an extended ML fit Bool_t emlFit_; //! Option to perform Poisson smearing Bool_t poissonSmear_; //! Option to enable embedding Bool_t enableEmbedding_; //! Option to include the DP as part of the fit Bool_t usingDP_; //! Option to state if pdfs depend on DP position Bool_t pdfsDependOnDP_; // Info on number of experiments and number of events //! Internal vector of fit parameters LauParameterPList fitVars_; //! Internal set of fit parameters upon which the DP normalisation depends LauParameterPSet resVars_; //! Extra variables that aren't in the fit but are stored in the ntuple LauParameterList extraVars_; //! Internal vectors of Gaussian parameters LauAbsRValuePList conVars_; // Input data and output ntuple //! The input data LauFitDataTree* inputFitData_; //! The generated ntuple LauGenNtuple* genNtuple_; //! The sPlot ntuple LauGenNtuple* sPlotNtuple_; // Background class names //! The background class names LauBkgndClassMap bkgndClassNames_; //! An empty string const TString nullString_; // sFit related variables //! Option to perfom the sFit Bool_t doSFit_; //! The name of the sWeight branch TString sWeightBranchName_; //! The vector of sWeights std::vector sWeights_; //! The sWeight scaling factor Double_t sWeightScaleFactor_; // Fit timers //! The fit timer TStopwatch timer_; //! The total fit timer TStopwatch cumulTimer_; //! The output table name TString outputTableName_; // Comparison toy MC related variables //! The output file name for Toy MC TString fitToyMCFileName_; //! The output table name for Toy MC TString fitToyMCTableName_; //! The scaling factor (toy vs data statistics) UInt_t fitToyMCScale_; //! Option to perform Poisson smearing Bool_t fitToyMCPoissonSmear_; // sPlot related variables //! The name of the sPlot file TString sPlotFileName_; //! The name of the sPlot tree TString sPlotTreeName_; //! Control the verbosity of the sFit TString sPlotVerbosity_; }; #endif diff --git a/inc/LauBkgndDPModel.hh b/inc/LauBkgndDPModel.hh index 54d8a16..a1c6f40 100644 --- a/inc/LauBkgndDPModel.hh +++ b/inc/LauBkgndDPModel.hh @@ -1,171 +1,189 @@ /* Copyright 2004 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ /*! \file LauBkgndDPModel.hh \brief File containing declaration of LauBkgndDPModel class. */ /*! \class LauBkgndDPModel \brief Class for defining a histogram-based background Dalitz plot model Class for defining a histogram-based background Dalitz plot model */ #ifndef LAU_BKGND_DP_MODEL #define LAU_BKGND_DP_MODEL #include #include "LauAbsBkgndDPModel.hh" class TH2; class Lau2DAbsDPPdf; class LauDaughters; class LauFitDataTree; class LauVetoes; class LauBkgndDPModel : public LauAbsBkgndDPModel { public: //! Constructor /*! \param [in] daughters the daughters in the decay \param [in] vetoes the vetoes in the Datliz plot */ LauBkgndDPModel(LauDaughters* daughters, LauVetoes* vetoes); //! Destructor virtual ~LauBkgndDPModel(); //! Set background histogram /*! \param [in] histo the 2D histogram describing the DP distribution \param [in] useInterpolation boolean flag to determine whether linear interpolation between bins should be used or simply the raw bin values \param [in] fluctuateBins boolean flag to determine whether the bin contents should be fluctuated in accordance with their errors. The seed for the random number generator used to fluctuate the bins should first be set using LauRandom::setSeed. \param [in] useUpperHalfOnly boolean flag to specify that the supplied histogram contains only the upper half of a symmetric DP (or lower half if using square DP coordinates) \param [in] squareDP boolean flag to determine whether the supplied histogram is given in square DP coordinates */ void setBkgndHisto(const TH2* histo, Bool_t useInterpolation, Bool_t fluctuateBins, Bool_t useUpperHalfOnly, Bool_t squareDP = kFALSE); //! Set the background histogram and generate a spline /*! \param [in] histo the 2D histogram describing the DP distribution \param [in] fluctuateBins boolean flag to determine whether the bin contents should be fluctuated in accordance with their errors. The seed for the random number generator used to fluctuate the bins should first be set using LauRandom::setSeed. \param [in] useUpperHalfOnly boolean flag to specify that the supplied histogram contains only the upper half of a symmetric DP (or lower half if using square DP coordinates) \param [in] squareDP boolean flag to determine whether the supplied histogram is given in square DP coordinates */ void setBkgndSpline(const TH2* histo, Bool_t fluctuateBins, Bool_t useUpperHalfOnly, Bool_t squareDP); //! Initialise the model virtual void initialise(); //! Generate a toy MC event from the model /*! \return success/failure flag */ virtual Bool_t generate(); + //! Get likelihood for a given DP position + /*! + \param [in] m13Sq the invariant mass squared of children 1 and 3 + \param [in] m23Sq the invariant mass squared of children 2 and 3 + \return the likelihood value + */ + virtual Double_t getLikelihood( const Double_t m13Sq, + const Double_t m23Sq ); + + //! Get unnormalised likelihood for a given DP position + /*! + \param [in] m13Sq the invariant mass squared of children 1 and 3 + \param [in] m23Sq the invariant mass squared of children 2 and 3 + \return the unnormalised likelihood value + */ + virtual Double_t getUnNormValue( const Double_t m13Sq, + const Double_t m23Sq ); + + //! Get likelihood for a given event + /*! + \param [in] iEvt the event number + \return the likelihood value + */ + virtual Double_t getLikelihood(UInt_t iEvt); + //! Get unnormalised likelihood for a given event /*! \param [in] iEvt the event number \return the unnormalised likelihood value */ virtual Double_t getUnNormValue(UInt_t iEvt); //! Get PDF normalisation constant /*! \return the PDF normalisation constant */ virtual Double_t getPdfNorm() const {return pdfNorm_;} - //! Get likelihood for a given event - /*! - \param [in] iEvt the event number - \return the likelihood value - */ - virtual Double_t getLikelihood(UInt_t iEvt); - //! Cache the input data and (if appropriate) the per-event likelihood values /*! \param [in] fitDataTree the input data */ virtual void fillDataTree(const LauFitDataTree& fitDataTree); protected: //! Calulate histogram value at a given point /*! \param [in] xVal the x-value \param [in] yVal the y-value \return the histogram value */ Double_t calcHistValue(Double_t xVal, Double_t yVal); //! Set data event number /*! \param [in] iEvt the event number */ virtual void setDataEventNo(UInt_t iEvt); private: //! Copy constructor (not implemented) LauBkgndDPModel(const LauBkgndDPModel& rhs); //! Copy assignment operator (not implemented) LauBkgndDPModel& operator=(const LauBkgndDPModel& rhs); //! Flags whether the DP is symmetrical or not Bool_t symmetricalDP_; //! Flags whether or not to work in square DP coordinates Bool_t squareDP_; //! PDF of Dalitz plot background, from a 2D histogram Lau2DAbsDPPdf* bgHistDPPdf_; //! Cached histogram values for each event std::vector bgData_; //! Histogram value for the current event Double_t curEvtHistVal_; //! Maximum height of PDF Double_t maxPdfHeight_; //! Normalisation of PDF Double_t pdfNorm_; //! Boolean to indicate if the warning that there is no histogram has already been issued Bool_t doneGenWarning_; //! Flag to track whether a warning has been issued for bin values less than zero mutable Bool_t lowBinWarningIssued_; }; #endif diff --git a/inc/LauCPFitModel.hh b/inc/LauCPFitModel.hh index 38765c7..f13367f 100644 --- a/inc/LauCPFitModel.hh +++ b/inc/LauCPFitModel.hh @@ -1,711 +1,729 @@ /* Copyright 2004 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ /*! \file LauCPFitModel.hh \brief File containing declaration of LauCPFitModel class. */ /*! \class LauCPFitModel \brief Class for defining a CP fit model. LauCPFitModel is a class that allows the user to define a three-body Dalitz plot according to the isobar model, i.e. defining a set of resonances that have complex amplitudes that can interfere with each other. It extends the LauSimpleFitModel in that it allows simultaneous fitting of two parent flavours simultaneously. By default, it assumes perfect tagging of those flavours but it can also be used in an untagged scenario. */ #ifndef LAU_CP_FIT_MODEL #define LAU_CP_FIT_MODEL #include #include "TString.h" #include "LauAbsFitModel.hh" #include "LauComplex.hh" #include "LauParameter.hh" class TH2; class LauAbsBkgndDPModel; class LauAbsCoeffSet; class LauIsobarDynamics; class LauAbsPdf; class LauEffModel; class LauEmbeddedData; class LauKinematics; class LauScfMap; class LauCPFitModel : public LauAbsFitModel { public: //! Constructor /*! \param [in] negModel DP model for the antiparticle \param [in] posModel DP model for the particle \param [in] tagged is the analysis tagged or untagged? \param [in] tagVarName the variable name in the data tree that specifies the event tag */ LauCPFitModel(LauIsobarDynamics* negModel, LauIsobarDynamics* posModel, Bool_t tagged = kTRUE, const TString& tagVarName = "charge"); //! Destructor virtual ~LauCPFitModel(); //! Set the signal event yield /*! \param [in] nSigEvents contains the signal yield and option to fix it */ virtual void setNSigEvents(LauParameter* nSigEvents); //! Set the signal event yield if there is an asymmetry /*! \param [in] nSigEvents contains the signal yield and option to fix it \param [in] sigAsym contains the signal asymmetry and option to fix it \param [in] forceAsym the option to force there to be an asymmetry */ virtual void setNSigEvents(LauParameter* nSigEvents, LauParameter* sigAsym, Bool_t forceAsym = kFALSE); //! Set the background event yield(s) /*! The name of the parameter must be that of the corresponding background category (so that it can be correctly assigned) \param [in] nBkgndEvents contains the name, yield and option to fix the yield of the background */ virtual void setNBkgndEvents(LauAbsRValue* nBkgndEvents); //! Set the background event yield(s) /*! The names of the parameters must be that of the corresponding background category (so that they can be correctly assigned) \param [in] nBkgndEvents contains the name, yield and option to fix the yield of the background \param [in] bkgndAsym contains the background asymmetry and option to fix it */ virtual void setNBkgndEvents(LauAbsRValue* nBkgndEvents, LauAbsRValue* bkgndAsym); //! Set the background DP models /*! \param [in] bkgndClass the name of the background class \param [in] negModel the DP model of the B- background \param [in] posModel the DP model of the B+ background */ void setBkgndDPModels(const TString& bkgndClass, LauAbsBkgndDPModel* negModel, LauAbsBkgndDPModel* posModel); //! Split the signal component into well-reconstructed and mis-reconstructed parts /*! The nomenclature used here is TM (truth-matched) and SCF (self cross feed) In this option, the SCF fraction is DP-dependent Can also optionally provide a smearing matrix to smear the SCF DP PDF \param [in] dpHisto the DP histogram of the SCF fraction value \param [in] upperHalf boolean flag to specify that the supplied histogram contains only the upper half of a symmetric DP (or lower half if using square DP coordinates) \param [in] fluctuateBins whether the bins on the histogram should be varied in accordance with their uncertainties (for evaluation of systematic uncertainties) \param [in] scfMap the (optional) smearing matrix */ void splitSignalComponent( const TH2* dpHisto, const Bool_t upperHalf = kFALSE, const Bool_t fluctuateBins = kFALSE, LauScfMap* scfMap = 0 ); //! Split the signal component into well reconstructed and mis-reconstructed parts /*! The nomenclature used here is TM (truth-matched) and SCF (self cross feed) In this option, the SCF fraction is a single global number \param [in] scfFrac the SCF fraction value \param [in] fixed whether the SCF fraction is fixed or floated in the fit */ void splitSignalComponent( const Double_t scfFrac, const Bool_t fixed ); //! Determine whether we are splitting the signal into TM and SCF parts Bool_t useSCF() const { return useSCF_; } //! Determine whether the SCF fraction is DP-dependent Bool_t useSCFHist() const { return useSCFHist_; } //! Determine if we are smearing the SCF DP PDF Bool_t smearSCFDP() const { return (scfMap_ != 0); } // Set the DeltaE and mES models, i.e. give us the PDFs //! Set the signal PDFs /*! \param [in] negPdf the PDF to be added to the B- signal model \param [in] posPdf the PDF to be added to the B+ signal model */ void setSignalPdfs(LauAbsPdf* negPdf, LauAbsPdf* posPdf); //! Set the SCF PDF for a given variable /*! \param [in] negPdf the PDF to be added to the B- signal model \param [in] posPdf the PDF to be added to the B+ signal model */ void setSCFPdfs(LauAbsPdf* negPdf, LauAbsPdf* posPdf); //! Set the background PDFs /*! \param [in] bkgndClass the name of the background class \param [in] negPdf the PDF to be added to the B- background model \param [in] posPdf the PDF to be added to the B+ background model */ void setBkgndPdfs(const TString& bkgndClass, LauAbsPdf* negPdf, LauAbsPdf* posPdf); //! Embed full simulation events for the B- signal, rather than generating toy from the PDFs /*! \param [in] fileName the name of the file containing the events \param [in] treeName the name of the tree \param [in] reuseEventsWithinEnsemble sample with replacement but only replace events once each experiment has been generated \param [in] reuseEventsWithinExperiment sample with immediate replacement \param [in] useReweighting perform an accept/reject routine using the configured signal amplitude model based on the MC-truth DP coordinate */ void embedNegSignal(const TString& fileName, const TString& treeName, Bool_t reuseEventsWithinEnsemble, Bool_t reuseEventsWithinExperiment = kFALSE, Bool_t useReweighting = kFALSE); //! Embed full simulation events for the given background class, rather than generating toy from the PDFs /*! \param [in] bgClass the name of the background class \param [in] fileName the name of the file containing the events \param [in] treeName the name of the tree \param [in] reuseEventsWithinEnsemble sample with replacement but only replace events once each experiment has been generated \param [in] reuseEventsWithinExperiment sample with immediate replacement */ void embedNegBkgnd(const TString& bgClass, const TString& fileName, const TString& treeName, Bool_t reuseEventsWithinEnsemble, Bool_t reuseEventsWithinExperiment = kFALSE); //! Embed full simulation events for the B+ signal, rather than generating toy from the PDFs /*! \param [in] fileName the name of the file containing the events \param [in] treeName the name of the tree \param [in] reuseEventsWithinEnsemble sample with replacement but only replace events once each experiment has been generated \param [in] reuseEventsWithinExperiment sample with immediate replacement \param [in] useReweighting perform an accept/reject routine using the configured signal amplitude model based on the MC-truth DP coordinate */ void embedPosSignal(const TString& fileName, const TString& treeName, Bool_t reuseEventsWithinEnsemble, Bool_t reuseEventsWithinExperiment = kFALSE, Bool_t useReweighting = kFALSE); //! Embed full simulation events for the given background class, rather than generating toy from the PDFs /*! \param [in] bgClass the name of the background class \param [in] fileName the name of the file containing the events \param [in] treeName the name of the tree \param [in] reuseEventsWithinEnsemble sample with replacement but only replace events once each experiment has been generated \param [in] reuseEventsWithinExperiment sample with immediate replacement */ void embedPosBkgnd(const TString& bgClass, const TString& fileName, const TString& treeName, Bool_t reuseEventsWithinEnsemble, Bool_t reuseEventsWithinExperiment = kFALSE); //! Set the DP amplitude coefficients /*! The name of the coeffSet must match the name of one of the resonances in the DP model for the antiparticle (the name of the conjugate state in the model for the particle will be automatically determined). The supplied order of coefficients will be rearranged to match the order in which the resonances are stored in the dynamics, see LauIsobarDynamics::addResonance. \param [in] coeffSet the set of coefficients */ virtual void setAmpCoeffSet(LauAbsCoeffSet* coeffSet); + //! Calculate the DP amplitude(s) for a given DP position + /*! + If not already done, this function will initialise the DP models + \param [in] m13Sq the invariant mass squared of children 1 and 3 + \param [in] m23Sq the invariant mass squared of children 2 and 3 + \return a container of complex amplitudes, labelled to indicate to which component they belong + */ + virtual std::map getDPAmps( const Double_t m13Sq, const Double_t m23Sq ); + + //! Calculate the DP likelihood(s) for a given DP position + /*! + If not already done, this function will initialise the DP models + \param [in] m13Sq the invariant mass squared of children 1 and 3 + \param [in] m23Sq the invariant mass squared of children 2 and 3 + \return a container of likelihood values, labelled to indicate to which component they belong + */ + virtual std::map getDPLikelihoods( const Double_t m13Sq, const Double_t m23Sq ); + protected: //! Define a map to be used to store a category name and numbers typedef std::map< std::pair, std::pair > LauGenInfo; //! Typedef for a vector of background DP models typedef std::vector LauBkgndDPModelList; //! Typedef for a vector of background PDFs typedef std::vector LauBkgndPdfsList; //! Typedef for a vector of background yields typedef std::vector LauBkgndYieldList; //! Typedef for a vector of embedded data objects typedef std::vector LauBkgndEmbDataList; //! Typedef for a vector of booleans to flag if events are reused typedef std::vector LauBkgndReuseEventsList; //! Weight events based on the DP model /*! \param [in] dataFileName the name of the data file \param [in] dataTreeName the name of the data tree */ virtual void weightEvents( const TString& dataFileName, const TString& dataTreeName ); //! Initialise the fit virtual void initialise(); //! Initialise the signal DP models virtual void initialiseDPModels(); //! Recalculate Normalization the signal DP models virtual void recalculateNormalisation(); //! Update the coefficients virtual void updateCoeffs(); //! Toy MC generation and fitting overloaded functions virtual Bool_t genExpt(); //! Calculate things that depend on the fit parameters after they have been updated by Minuit virtual void propagateParUpdates(); //! Read in the input fit data variables, e.g. m13Sq and m23Sq virtual void cacheInputFitVars(); //! Check the initial fit parameters virtual void checkInitFitParams(); //! Get the fit results and store them /*! \param [in] tablePrefixName prefix for the name of the output file */ virtual void finaliseFitResults(const TString& tablePrefixName); //! Print the fit fractions, total DP rate and mean efficiency /*! \param [out] output the stream to which to print */ virtual void printFitFractions(std::ostream& output); //! Print the asymmetries /*! \param [out] output the stream to which to print */ virtual void printAsymmetries(std::ostream& output); //! Write the fit results in latex table format /*! \param [in] outputFile the name of the output file */ virtual void writeOutTable(const TString& outputFile); //! Save the pdf Plots for all the resonances /*! \param [in] label prefix for the file name to be saved */ virtual void savePDFPlots(const TString& label); //! Save the pdf Plots for the sum of resonances of a given spin /*! \param [in] label prefix for the file name to be saved \param [in] spin spin of the wave to be saved */ virtual void savePDFPlotsWave(const TString& label, const Int_t& spin); //! Store the per event likelihood values virtual void storePerEvtLlhds(); // Methods to do with calculating the likelihood functions // and manipulating the fitting parameters. //! Get the total likelihood for each event /*! \param [in] iEvt the event number */ virtual Double_t getTotEvtLikelihood(UInt_t iEvt); //! Calculate the signal and background likelihoods for the DP for a given event /*! \param [in] iEvt the event number */ virtual void getEvtDPLikelihood(UInt_t iEvt); //! Calculate the SCF likelihood for the DP for a given event /*! \param [in] iEvt the event number */ virtual Double_t getEvtSCFDPLikelihood(UInt_t iEvt); //! Determine the signal and background likelihood for the extra variables for a given event /*! \param [in] iEvt the event number */ virtual void getEvtExtraLikelihoods(UInt_t iEvt); //! Get the total number of events /*! \return the total number of events */ virtual Double_t getEventSum() const; //! Set the fit parameters for the DP model void setSignalDPParameters(); //! Set the fit parameters for the extra PDFs void setExtraPdfParameters(); //! Set the initial yields void setFitNEvents(); //! Set-up other parameters that are derived from the fit results, e.g. fit fractions void setExtraNtupleVars(); //! Randomise the initial fit parameters void randomiseInitFitPars(); //! Calculate the CP-conserving and CP-violating fit fractions /*! \param [in] initValues is this before or after the fit */ void calcExtraFractions(Bool_t initValues = kFALSE); //! Calculate the CP asymmetries /*! \param [in] initValues is this before or after the fit */ void calcAsymmetries(Bool_t initValues = kFALSE); //! Define the length of the background vectors virtual void setupBkgndVectors(); //! Determine the number of events to generate for each hypothesis std::pair eventsToGenerate(); //! Generate signal event Bool_t generateSignalEvent(); //! Generate background event /*! \param [in] bgID ID number of the background class */ Bool_t generateBkgndEvent(UInt_t bgID); //! Setup the required ntuple branches void setupGenNtupleBranches(); //! Store all of the DP information void setDPBranchValues(); //! Generate from the extra PDFs /*! \param [in] extraPdfs the list of extra PDFs \param [in] embeddedData the embedded data sample */ void generateExtraPdfValues(LauPdfList* extraPdfs, LauEmbeddedData* embeddedData); //! Store the MC truth info on the TM/SCF nature of the embedded signal event /*! \param [in] embeddedData the full simulation information */ Bool_t storeSignalMCMatch(LauEmbeddedData* embeddedData); //! Add sPlot branches for the extra PDFs /*! \param [in] extraPdfs the list of extra PDFs \param [in] prefix the list of prefixes for the branch names */ void addSPlotNtupleBranches(const LauPdfList* extraPdfs, const TString& prefix); //! Set the branches for the sPlot ntuple with extra PDFs /*! \param [in] extraPdfs the list of extra PDFs \param [in] prefix the list of prefixes for the branch names \param [in] iEvt the event number */ Double_t setSPlotNtupleBranchValues(LauPdfList* extraPdfs, const TString& prefix, UInt_t iEvt); //! Update the signal events after Minuit sets background parameters void updateSigEvents(); //! Add branches to store experiment number and the event number within the experiment virtual void setupSPlotNtupleBranches(); //! Returns the names of all variables in the fit virtual LauSPlot::NameSet variableNames() const; //! Returns the names and yields of species that are free in the fit virtual LauSPlot::NumbMap freeSpeciesNames() const; //! Returns the names and yields of species that are fixed in the fit virtual LauSPlot::NumbMap fixdSpeciesNames() const; //! Returns the species and variables for all 2D PDFs in the fit virtual LauSPlot::TwoDMap twodimPDFs() const; //! Check if the signal is split into well-reconstructed and mis-reconstructed types virtual Bool_t splitSignal() const {return this->useSCF();} //! Check if the mis-reconstructed signal is to be smeared in the DP virtual Bool_t scfDPSmear() const {return (scfMap_ != 0);} //! Append fake data points to the inputData for each bin in the SCF smearing matrix /*! We'll be caching the DP amplitudes and efficiencies of the centres of the true bins. To do so, we attach some fake points at the end of inputData, the number of the entry minus the total number of events corresponding to the number of the histogram for that given true bin in the LauScfMap object. (What this means is that when Laura is provided with the LauScfMap object by the user, it's the latter who has to make sure that it contains the right number of histograms and in exactly the right order!) \param [in] inputData the fit data */ void appendBinCentres( LauFitDataTree* inputData ); LauIsobarDynamics* getNegSigModel() {return negSigModel_;} LauIsobarDynamics* getPosSigModel() {return posSigModel_;} //! Retrieve a named parameter from a TTree /*! \param [in] tree a reference to the tree from which to obtain the parameters \param [in] name the name of the parameter to retrive */ Double_t getParamFromTree( TTree& tree, const TString& name ); //! Set a LauParameter to a given value /*! \param [in] param a pointer to the LauParameter to set \param [in] val the value to set \param [in] fix whether to fix the LauParameter or leave it floating */ void fixParam( LauParameter* param, const Double_t val, const Bool_t fix ); //! Set a vector of LauParameters according to the specified method /*! \param [in] params the vector of pointers to LauParameter to set values of */ void fixParams( std::vector& params ); private: //! Copy constructor (not implemented) LauCPFitModel(const LauCPFitModel& rhs); //! Copy assignment operator (not implemented) LauCPFitModel& operator=(const LauCPFitModel& rhs); //! The B- signal Dalitz plot model LauIsobarDynamics *negSigModel_; //! The B+ signal Dalitz plot model LauIsobarDynamics *posSigModel_; //! The B- background Dalitz plot models LauBkgndDPModelList negBkgndDPModels_; //! The B+ background Dalitz plot models LauBkgndDPModelList posBkgndDPModels_; //! The B- Dalitz plot kinematics object LauKinematics *negKinematics_; //! The B+ Dalitz plot kinematics object LauKinematics *posKinematics_; //! The B- signal PDFs LauPdfList negSignalPdfs_; //! The B+ signal PDFs LauPdfList posSignalPdfs_; //! The B- SCF PDFs LauPdfList negScfPdfs_; //! The B+ SCF PDFs LauPdfList posScfPdfs_; //! The B- background PDFs LauBkgndPdfsList negBkgndPdfs_; //! The B+ background PDFs LauBkgndPdfsList posBkgndPdfs_; //! Background boolean Bool_t usingBkgnd_; //! Number of signal components UInt_t nSigComp_; //! Number of signal DP parameters UInt_t nSigDPPar_; //! Number of extra PDF parameters UInt_t nExtraPdfPar_; //! Number of normalisation parameters (yields, asymmetries) UInt_t nNormPar_; //! Magnitudes and Phases std::vector coeffPars_; //! The B- fit fractions LauParArray negFitFrac_; //! The B+ fit fractions LauParArray posFitFrac_; //! Fit B- fractions (uncorrected for the efficiency) LauParArray negFitFracEffUnCorr_; //! Fit B+ fractions (uncorrected for the efficiency) LauParArray posFitFracEffUnCorr_; //! The CP violating fit fraction LauParArray CPVFitFrac_; //! The CP conserving fit fraction LauParArray CPCFitFrac_; //! The fit fraction asymmetries std::vector fitFracAsymm_; //! A_CP parameter std::vector acp_; //! The mean efficiency for B- model LauParameter negMeanEff_; //! The mean efficiency for B+ model LauParameter posMeanEff_; //! The average DP rate for B- LauParameter negDPRate_; //! The average DP rate for B+ LauParameter posDPRate_; //! Signal yield LauParameter* signalEvents_; //! Signal asymmetry LauParameter* signalAsym_; //! Option to force an asymmetry Bool_t forceAsym_; //! Background yield(s) LauBkgndYieldList bkgndEvents_; //! Background asymmetries(s) LauBkgndYieldList bkgndAsym_; //! IS the analysis tagged? const Bool_t tagged_; //! Event charge const TString tagVarName_; //! Current event charge Int_t curEvtCharge_; //! Vector to store event charges std::vector evtCharges_; //! Is the signal split into TM and SCF Bool_t useSCF_; //! Is the SCF fraction DP-dependent Bool_t useSCFHist_; //! The (global) SCF fraction parameter LauParameter scfFrac_; //! The histogram giving the DP-dependence of the SCF fraction LauEffModel* scfFracHist_; //! The smearing matrix for the SCF DP PDF LauScfMap* scfMap_; //! The cached values of the SCF fraction for each event std::vector recoSCFFracs_; //! The cached values of the SCF fraction for each bin centre std::vector fakeSCFFracs_; //! The cached values of the sqDP jacobians for each event std::vector recoJacobians_; //! The cached values of the sqDP jacobians for each true bin std::vector fakeJacobians_; //! Run choice variables Bool_t compareFitData_; //! Name of the parent particle TString negParent_; //! Name of the parent particle TString posParent_; //! The complex coefficients for B- std::vector negCoeffs_; //! The complex coefficients for B+ std::vector posCoeffs_; // Embedding full simulation events //! The B- signal event tree LauEmbeddedData *negSignalTree_; //! The B+ signal event tree LauEmbeddedData *posSignalTree_; //! The B- background event tree LauBkgndEmbDataList negBkgndTree_; //! The B+ background event tree LauBkgndEmbDataList posBkgndTree_; //! Boolean to reuse signal events Bool_t reuseSignal_; //! Boolean to use reweighting for B- Bool_t useNegReweighting_; //! Boolean to use reweighting for B+ Bool_t usePosReweighting_; //! Vector of booleans to reuse background events LauBkgndReuseEventsList reuseBkgnd_; // Likelihood values //! Signal DP likelihood value Double_t sigDPLike_; //! SCF DP likelihood value Double_t scfDPLike_; //! Background DP likelihood value(s) std::vector bkgndDPLike_; //! Signal likelihood from extra PDFs Double_t sigExtraLike_; //! SCF likelihood from extra PDFs Double_t scfExtraLike_; //! Background likelihood value(s) from extra PDFs std::vector bkgndExtraLike_; //! Total signal likelihood Double_t sigTotalLike_; //! Total SCF likelihood Double_t scfTotalLike_; //! Total background likelihood(s) std::vector bkgndTotalLike_; }; #endif diff --git a/inc/LauSimpleFitModel.hh b/inc/LauSimpleFitModel.hh index f8526ef..bea14da 100644 --- a/inc/LauSimpleFitModel.hh +++ b/inc/LauSimpleFitModel.hh @@ -1,529 +1,547 @@ /* Copyright 2004 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ /*! \file LauSimpleFitModel.hh \brief File containing declaration of LauSimpleFitModel class. */ /*! \class LauSimpleFitModel \brief Define a Dalitz plot according to the isobar model. A class that allows the user to define a three-body B meson Dalitz plot according to the isobar model, i.e. defining a set of resonances that have complex amplitudes that can interfere with each other. */ #ifndef LAU_SIMPLE_FIT_MODEL #define LAU_SIMPLE_FIT_MODEL #include #include "TString.h" #include "LauAbsFitModel.hh" #include "LauComplex.hh" #include "LauParameter.hh" class TH2; class LauAbsBkgndDPModel; class LauAbsCoeffSet; class LauIsobarDynamics; class LauAbsPdf; class LauEffModel; class LauEmbeddedData; class LauKinematics; class LauScfMap; class LauSimpleFitModel : public LauAbsFitModel { public: //! Constructor /*! \param [in] sigDPModel the signal DP model */ explicit LauSimpleFitModel(LauIsobarDynamics* sigDPModel); //! Destructor virtual ~LauSimpleFitModel(); //! Set the signal event yield /*! \param [in] nSigEvents contains the signal yield and boolean to fix it or not */ virtual void setNSigEvents(LauParameter* nSigEvents); //! Set the background event yield(s) /*! The name of the parameter must be that of the corresponding background category (so that it can be correctly assigned) \param [in] nBkgndEvents contains the name, yield and option to fix the background yield */ virtual void setNBkgndEvents(LauAbsRValue* nBkgndEvents); //! Set the background DP models /*! \param [in] bkgndClass the name of the background class \param [in] bkgndModel the DP model of the background */ void setBkgndDPModel(const TString& bkgndClass, LauAbsBkgndDPModel* bkgndModel); //! Split the signal component into well-reconstructed and mis-reconstructed parts /*! The nomenclature used here is TM (truth-matched) and SCF (self cross feed) In this option, the SCF fraction is DP-dependent Can also optionally provide a smearing matrix to smear the SCF DP PDF \param [in] dpHisto the DP histogram of the SCF fraction value \param [in] upperHalf boolean flag to specify that the supplied histogram contains only the upper half of a symmetric DP (or lower half if using square DP coordinates) \param [in] fluctuateBins whether the bins on the histogram should be varied in accordance with their uncertainties (for evaluation of systematic uncertainties) \param [in] scfMap the (optional) smearing matrix */ void splitSignalComponent( const TH2* dpHisto, const Bool_t upperHalf = kFALSE, const Bool_t fluctuateBins = kFALSE, LauScfMap* scfMap = 0 ); //! Split the signal component into well reconstructed and mis-reconstructed parts /*! The nomenclature used here is TM (truth-matched) and SCF (self cross feed) In this option, the SCF fraction is a single global number \param [in] scfFrac the SCF fraction value \param [in] fixed whether the SCF fraction is fixed or floated in the fit */ void splitSignalComponent( const Double_t scfFrac, const Bool_t fixed ); //! Determine whether we are splitting the signal into TM and SCF parts Bool_t useSCF() const { return useSCF_; } //! Determine whether the SCF fraction is DP-dependent Bool_t useSCFHist() const { return useSCFHist_; } //! Determine if we are smearing the SCF DP PDF Bool_t smearSCFDP() const { return (scfMap_ != 0); } //! Set the signal PDF for a given variable /*! \param [in] pdf the PDF to be added to the signal model */ void setSignalPdf(LauAbsPdf* pdf); //! Set the SCF PDF for a given variable /*! \param [in] pdf the PDF to be added to the SCF model */ void setSCFPdf(LauAbsPdf* pdf); //! Set the background PDF /*! \param [in] bkgndClass the name of the background class \param [in] pdf the PDF to be added to the background model */ void setBkgndPdf(const TString& bkgndClass, LauAbsPdf* pdf); //! Embed full simulation events for the signal, rather than generating toy from the PDFs /*! \param [in] fileName the name of the file containing the events \param [in] treeName the name of the tree \param [in] reuseEventsWithinEnsemble sample with replacement but only replace events once each experiment has been generated \param [in] reuseEventsWithinExperiment sample with immediate replacement \param [in] useReweighting perform an accept/reject routine using the configured signal amplitude model based on the MC-truth DP coordinate */ void embedSignal(const TString& fileName, const TString& treeName, Bool_t reuseEventsWithinEnsemble, Bool_t reuseEventsWithinExperiment = kFALSE, Bool_t useReweighting = kFALSE); //! Embed full simulation events for the given background class, rather than generating toy from the PDFs /*! \param [in] bkgndClass the name of the background class \param [in] fileName the name of the file containing the events \param [in] treeName the name of the tree \param [in] reuseEventsWithinEnsemble sample with replacement but only replace events once each experiment has been generated \param [in] reuseEventsWithinExperiment sample with immediate replacement */ void embedBkgnd(const TString& bkgndClass, const TString& fileName, const TString& treeName, Bool_t reuseEventsWithinEnsemble, Bool_t reuseEventsWithinExperiment = kFALSE); //! Set the DP amplitude coefficients /*! The name of the coeffSet must match the name of one of the resonances in the DP model. The supplied order of coefficients will be rearranged to match the order in which the resonances are stored in the dynamics, see LauIsobarDynamics::addResonance. \param [in] coeffSet the set of coefficients */ virtual void setAmpCoeffSet(LauAbsCoeffSet* coeffSet); + //! Calculate the DP amplitude(s) for a given DP position + /*! + If not already done, this function will initialise the DP models + \param [in] m13Sq the invariant mass squared of children 1 and 3 + \param [in] m23Sq the invariant mass squared of children 2 and 3 + \return a container of complex amplitudes, labelled to indicate to which component they belong + */ + virtual std::map getDPAmps( const Double_t m13Sq, const Double_t m23Sq ); + + //! Calculate the DP likelihood(s) for a given DP position + /*! + If not already done, this function will initialise the DP models + \param [in] m13Sq the invariant mass squared of children 1 and 3 + \param [in] m23Sq the invariant mass squared of children 2 and 3 + \return a container of likelihood values, labelled to indicate to which component they belong + */ + virtual std::map getDPLikelihoods( const Double_t m13Sq, const Double_t m23Sq ); + protected: //! Define a map to be used to store a category name and numbers typedef std::map< TString, std::pair > LauGenInfo; //! Typedef for a vector of background DP models typedef std::vector LauBkgndDPModelList; //! Typedef for a vector of background PDFs typedef std::vector LauBkgndPdfsList; //! Typedef for a vector of background yields typedef std::vector LauBkgndYieldList; //! Typedef for a vector of embedded data objects typedef std::vector LauBkgndEmbDataList; //! Typedef for a vector of booleans to flag if events are reused typedef std::vector LauBkgndReuseEventsList; //! Weight events based on the DP model /*! \param [in] dataFileName the name of the data file \param [in] dataTreeName the name of the data tree */ virtual void weightEvents( const TString& dataFileName, const TString& dataTreeName ); //! Initialise the fit virtual void initialise(); //! Initialise the signal DP model virtual void initialiseDPModels(); //! Recalculate Normalization the signal DP models virtual void recalculateNormalisation(); //! Update the coefficients virtual void updateCoeffs(); //! Toy MC generation and fitting overloaded functions virtual Bool_t genExpt(); //! Calculate things that depend on the fit parameters after they have been updated by Minuit virtual void propagateParUpdates(); //! Read in the input fit data variables, e.g. m13Sq and m23Sq virtual void cacheInputFitVars(); //! Check the initial fit parameters virtual void checkInitFitParams(); //! Get the fit results and store them /*! \param [in] tablePrefixName prefix for the name of the output file */ virtual void finaliseFitResults(const TString& tablePrefixName); //! Print the fit fractions, total DP rate and mean efficiency /*! \param [out] output the stream to which to print */ virtual void printFitFractions(std::ostream& output); //! Write the fit results in latex table format /*! \param [in] outputFile the name of the output file */ virtual void writeOutTable(const TString& outputFile); //! Save the pdf Plots for all the resonances of experiment number fitExp /*! \param [in] label prefix for the file name to be saved */ virtual void savePDFPlots(const TString& label); //! Save the pdf Plots for the sum of ressonances correspondint to "sin" of experiment number fitExp /*! \param [in] label prefix for the file name to be saved \param [in] spin spin of the wave to be saved */ virtual void savePDFPlotsWave(const TString& label, const Int_t& spin); //! Store the per event likelihood values virtual void storePerEvtLlhds(); // Methods to do with calculating the likelihood functions // and manipulating the fitting parameters. //! Get the total likelihood for each event /*! \param [in] iEvt the event number */ virtual Double_t getTotEvtLikelihood(UInt_t iEvt); //! Calculate the signal and background likelihoods for the DP for a given event /*! \param [in] iEvt the event number */ virtual void getEvtDPLikelihood(UInt_t iEvt); //! Calculate the SCF likelihood for the DP for a given event /*! \param [in] iEvt the event number */ virtual Double_t getEvtSCFDPLikelihood(UInt_t iEvt); //! Determine the signal and background likelihood for the extra variables for a given event /*! \param [in] iEvt the event number */ virtual void getEvtExtraLikelihoods(UInt_t iEvt); //! Get the total number of events /*! \return the total number of events */ virtual Double_t getEventSum() const; //! Set the fit parameters for the DP model void setSignalDPParameters(); //! Set the fit parameters for the extra PDFs void setExtraPdfParameters(); //! Set the initial yields void setFitNEvents(); //! Set-up other parameters that are derived from the fit results, e.g. fit fractions void setExtraNtupleVars(); //! Randomise the initial fit parameters void randomiseInitFitPars(); //! Define the length of the background vectors virtual void setupBkgndVectors(); //! Determine the number of events to generate for each hypothesis std::pair eventsToGenerate(); //! Generate signal event Bool_t generateSignalEvent(); //! Generate background event /*! \param [in] bkgndID ID number of the background class */ Bool_t generateBkgndEvent(UInt_t bkgndID); //! Setup the required ntuple branches void setupGenNtupleBranches(); //! Store all of the DP information void setDPBranchValues(); //! Generate from the extra PDFs /*! \param [in] extraPdfs the list of extra PDFs \param [in] embeddedData the embedded data sample */ void generateExtraPdfValues(LauPdfList* extraPdfs, LauEmbeddedData* embeddedData); //! Add sPlot branches for the extra PDFs /*! \param [in] extraPdfs the list of extra PDFs \param [in] prefix the list of prefixes for the branch names */ void addSPlotNtupleBranches(const LauPdfList* extraPdfs, const TString& prefix); //! Set the branches for the sPlot ntuple with extra PDFs /*! \param [in] extraPdfs the list of extra PDFs \param [in] prefix the list of prefixes for the branch names \param [in] iEvt the event number */ Double_t setSPlotNtupleBranchValues(LauPdfList* extraPdfs, const TString& prefix, UInt_t iEvt); //! Update the signal events after Minuit sets background parameters void updateSigEvents(); //! Add branches to store experiment number and the event number within the experiment virtual void setupSPlotNtupleBranches(); //! Returns the names of all variables in the fit virtual LauSPlot::NameSet variableNames() const; //! Returns the names and yields of species that are free in the fit virtual LauSPlot::NumbMap freeSpeciesNames() const; //! Returns the names and yields of species that are fixed in the fit virtual LauSPlot::NumbMap fixdSpeciesNames() const; //! Returns the species and variables for all 2D PDFs in the fit virtual LauSPlot::TwoDMap twodimPDFs() const; //! Check if the signal is split into well-reconstructed and mis-reconstructed types virtual Bool_t splitSignal() const {return this->useSCF();} //! Check if the mis-reconstructed signal is to be smeared in the DP virtual Bool_t scfDPSmear() const {return (scfMap_ != 0);} //! Append fake data points to the inputData for each bin in the SCF smearing matrix /*! We'll be caching the DP amplitudes and efficiencies of the centres of the true bins. To do so, we attach some fake points at the end of inputData, the number of the entry minus the total number of events corresponding to the number of the histogram for that given true bin in the LauScfMap object. (What this means is that when Laura is provided with the LauScfMap object by the user, it's the latter who has to make sure that it contains the right number of histograms and in exactly the right order!) \param [in] inputData the fit data */ void appendBinCentres( LauFitDataTree* inputData ); private: //! Copy constructor (not implemented) LauSimpleFitModel(const LauSimpleFitModel& rhs); //! Copy assignment operator (not implemented) LauSimpleFitModel& operator=(const LauSimpleFitModel& rhs); //! The signal Dalitz plot model LauIsobarDynamics* sigDPModel_; //! The background Dalitz Plot model LauBkgndDPModelList bkgndDPModels_; //! The Dalitz plot kinematics object LauKinematics *kinematics_; //! The signal PDFs LauPdfList signalPdfs_; //! The SCF PDFs LauPdfList scfPdfs_; //! The background PDFs LauBkgndPdfsList bkgndPdfs_; //! Background boolean Bool_t usingBkgnd_; //! Number of signal components UInt_t nSigComp_; //! Number of signal DP parameters UInt_t nSigDPPar_; //! Number of extra PDF parameters UInt_t nExtraPdfPar_; //! Number of normalisation parameters (i.e. yields) UInt_t nNormPar_; //! Magnitudes and Phases std::vector coeffPars_; //! Fit fractions LauParArray fitFrac_; //! Fit fractions (uncorrected for the efficiency) LauParArray fitFracEffUnCorr_; //! The mean efficiency LauParameter meanEff_; //! The average DP rate LauParameter dpRate_; //! Signal yield LauParameter* signalEvents_; //! Background yield(s) LauBkgndYieldList bkgndEvents_; //! Is the signal split into TM and SCF Bool_t useSCF_; //! Is the SCF fraction DP-dependent Bool_t useSCFHist_; //! The (global) SCF fraction parameter LauParameter scfFrac_; //! The histogram giving the DP-dependence of the SCF fraction LauEffModel* scfFracHist_; //! The smearing matrix for the SCF DP PDF LauScfMap* scfMap_; //! The cached values of the SCF fraction for each event std::vector recoSCFFracs_; //! The cached values of the SCF fraction for each bin centre std::vector fakeSCFFracs_; //! The cached values of the sqDP jacobians for each event std::vector recoJacobians_; //! The cached values of the sqDP jacobians for each true bin std::vector fakeJacobians_; //! Run choice variables Bool_t compareFitData_; //! The complex coefficients std::vector coeffs_; // Embedding full simulation events //! The signal event tree LauEmbeddedData* signalTree_; //! The background event tree LauBkgndEmbDataList bkgndTree_; //! Boolean to reuse signal events Bool_t reuseSignal_; //! Boolean to use reweighting Bool_t useReweighting_; //! Vector of booleans to reuse background events LauBkgndReuseEventsList reuseBkgnd_; //! Signal likelihood value Double_t sigDPLike_; //! SCF likelihood value Double_t scfDPLike_; //! Background likelihood value(s) std::vector bkgndDPLike_; //! Signal likelihood from extra PDFs Double_t sigExtraLike_; //! SCF likelihood from extra PDFs Double_t scfExtraLike_; //! Background likelihood value(s) from extra PDFs std::vector bkgndExtraLike_; //! Total signal likelihood Double_t sigTotalLike_; //! Total SCF likelihood Double_t scfTotalLike_; //! Total background likelihood(s) std::vector bkgndTotalLike_; }; #endif diff --git a/src/LauBkgndDPModel.cc b/src/LauBkgndDPModel.cc index 6cf2ffa..f7a4f48 100644 --- a/src/LauBkgndDPModel.cc +++ b/src/LauBkgndDPModel.cc @@ -1,305 +1,314 @@ /* Copyright 2004 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ /*! \file LauBkgndDPModel.cc \brief File containing implementation of LauBkgndDPModel class. */ #include #include #include "TRandom.h" #include "TSystem.h" #include "Lau2DHistDPPdf.hh" #include "Lau2DSplineDPPdf.hh" #include "LauBkgndDPModel.hh" #include "LauDaughters.hh" #include "LauFitDataTree.hh" #include "LauKinematics.hh" #include "LauRandom.hh" #include "LauVetoes.hh" LauBkgndDPModel::LauBkgndDPModel(LauDaughters* daughters, LauVetoes* vetoes) : LauAbsBkgndDPModel(daughters, vetoes), symmetricalDP_(kFALSE), squareDP_(kFALSE), bgHistDPPdf_(0), curEvtHistVal_(0.0), maxPdfHeight_(1.0), pdfNorm_(1.0), doneGenWarning_(kFALSE), lowBinWarningIssued_(kFALSE) { if (daughters != 0) { symmetricalDP_ = daughters->gotSymmetricalDP(); } } LauBkgndDPModel::~LauBkgndDPModel() { if (bgHistDPPdf_ != 0) { delete bgHistDPPdf_; bgHistDPPdf_ = 0; } } void LauBkgndDPModel::setBkgndHisto(const TH2* histo, Bool_t useInterpolation, Bool_t fluctuateBins, Bool_t useUpperHalfOnly, Bool_t squareDP) { if ( ! histo ) { std::cerr << "WARNING in LauBkgndDPModel::setBkgndHisto : Supplied background histogram pointer is null, likelihood for this component will be flat in the Dalitz plot" << std::endl; } Bool_t upperHalf = kFALSE; if (symmetricalDP_ == kTRUE && useUpperHalfOnly == kTRUE) {upperHalf = kTRUE;} std::cout<<"INFO in LauBkgndDPModel::setBkgndHisto : Background histogram has upperHalf = "<(upperHalf)<getKinematics(); const LauVetoes* vetoes = this->getVetoes(); bgHistDPPdf_ = new Lau2DHistDPPdf(histo, kinematics, vetoes, useInterpolation, fluctuateBins, upperHalf, squareDP); maxPdfHeight_ = bgHistDPPdf_->getMaxHeight(); pdfNorm_ = bgHistDPPdf_->getHistNorm(); } void LauBkgndDPModel::setBkgndSpline(const TH2* histo, Bool_t fluctuateBins, Bool_t useUpperHalfOnly, Bool_t squareDP) { if ( ! histo ) { std::cerr << "WARNING in LauBkgndDPModel::setBkgndSpline : Supplied background histogram pointer is null, construction of spline will fail" << std::endl; } Bool_t upperHalf = kFALSE; if (symmetricalDP_ == kTRUE && useUpperHalfOnly == kTRUE) {upperHalf = kTRUE;} std::cout<<"INFO in LauBkgndDPModel::setBkgndSpline : Background histogram has upperHalf = "<(upperHalf)<getKinematics(); const LauVetoes* vetoes = this->getVetoes(); bgHistDPPdf_ = new Lau2DSplineDPPdf(histo, kinematics, vetoes, fluctuateBins, upperHalf, squareDP); maxPdfHeight_ = bgHistDPPdf_->getMaxHeight(); pdfNorm_ = bgHistDPPdf_->getHistNorm(); } Double_t LauBkgndDPModel::calcHistValue(Double_t xVal, Double_t yVal) { // Get the likelihood value of the background in the Dalitz plot. // Check that we have a valid histogram PDF if (bgHistDPPdf_ == 0) { std::cerr << "WARNING in LauBkgndDPModel::calcHistValue : We don't have a histogram so assuming the likelihood is flat in the Dalitz plot." << std::endl; this->setBkgndHisto( 0, kFALSE, kFALSE, kFALSE, kFALSE ); } // Find out the un-normalised PDF value Double_t value = bgHistDPPdf_->interpolateXY(xVal, yVal); // Check that the value is greater than zero // If we're using a spline then negative values can be caused by adjacent bins that all contain a value of zero. // The spline requires the value, its first derivatives and the mixed second derivative to be continuous and to match the input histogram // at the bin centres. Derivatives are calculated using a finite difference approximation taking the difference between the neighbouring bins. // If two bins are zero but the third is not then the second bin will have a positive first derivative causing the spline to dip below zero // between the two zero bins to remain smooth. Such dips are unavoidable but are correctly removed here. if ( value < 0.0 ) { if(!lowBinWarningIssued_) { std::cerr << "WARNING in LauBkgndDPModel::calcHistValue : Value " << value << " is less than 0 - setting to 0. You may want to check your histogram!" << std::endl << " : If you are using a spline then this could be caused by adjacent empty bins. Further warnings will be suppressed." << std::endl; lowBinWarningIssued_=kTRUE; } return 0.0; } LauKinematics* kinematics = this->getKinematics(); // For square DP co-ordinates, need to divide by Jacobian if (squareDP_ == kTRUE) { // Make sure that square DP kinematics are correct, then get Jacobian kinematics->updateSqDPKinematics(xVal, yVal); Double_t jacobian = kinematics->calcSqDPJacobian(); value /= jacobian; } return value; } void LauBkgndDPModel::initialise() { } Bool_t LauBkgndDPModel::generate() { // Routine to generate the background, using data provided by an // already defined histogram. LauKinematics* kinematics = this->getKinematics(); Bool_t gotBG(kFALSE); while (gotBG == kFALSE) { if (squareDP_ == kTRUE) { // Generate a point in m', theta' space. By construction, this point // is already within the DP region. Double_t mPrime(0.0), thetaPrime(0.0); kinematics->genFlatSqDP(mPrime, thetaPrime); // If we're in a symmetrical DP then we should only generate events in one half if ( symmetricalDP_ && thetaPrime > 0.5 ) { thetaPrime = 1.0 - thetaPrime; } // Calculate histogram height for DP point and // compare with the maximum height if ( bgHistDPPdf_ != 0 ) { Double_t bgContDP = bgHistDPPdf_->interpolateXY(mPrime, thetaPrime)/maxPdfHeight_; if (LauRandom::randomFun()->Rndm() < bgContDP) { kinematics->updateSqDPKinematics(mPrime, thetaPrime); gotBG = kTRUE; } } else { if ( !doneGenWarning_ ) { std::cerr << "WARNING in LauBkgndDPModel::generate : We don't have a histogram so generating flat in the square DP, which won't be flat in the conventional DP!" << std::endl; std::cerr << "WARNING in LauBkgndDPModel::generate : This should never happen!! What have you done?!" << std::endl; doneGenWarning_ = kTRUE; } kinematics->updateSqDPKinematics(mPrime, thetaPrime); gotBG = kTRUE; } } else { // Generate a point in the Dalitz plot (phase-space). Double_t m13Sq(0.0), m23Sq(0.0); kinematics->genFlatPhaseSpace(m13Sq, m23Sq); // If we're in a symmetrical DP then we should only generate events in one half if ( symmetricalDP_ && m13Sq > m23Sq ) { Double_t tmpSq = m13Sq; m13Sq = m23Sq; m23Sq = tmpSq; } // Calculate histogram height for DP point and // compare with the maximum height if ( bgHistDPPdf_ != 0 ) { Double_t bgContDP = bgHistDPPdf_->interpolateXY(m13Sq, m23Sq)/maxPdfHeight_; if (LauRandom::randomFun()->Rndm() < bgContDP) { kinematics->updateKinematics(m13Sq, m23Sq); gotBG = kTRUE; } } else { if ( !doneGenWarning_ ) { std::cerr << "WARNING in LauBkgndDPModel::generate : We don't have a histogram so generating flat in the DP." << std::endl; doneGenWarning_ = kTRUE; } kinematics->updateKinematics(m13Sq, m23Sq); gotBG = kTRUE; } } } // Implement veto Bool_t vetoOK(kTRUE); const LauVetoes* vetoes = this->getVetoes(); if (vetoes) { vetoOK = vetoes->passVeto(kinematics); } // Call this function recusively until we pass the veto. if (vetoOK == kFALSE) { this->generate(); } return kTRUE; } void LauBkgndDPModel::fillDataTree(const LauFitDataTree& inputFitTree) { // In LauFitDataTree, the first two variables should always be // m13^2 and m23^2. Other variables follow thus: charge. Int_t nBranches = inputFitTree.nBranches(); if (nBranches < 2) { std::cerr<<"ERROR in LauBkgndDPModel::fillDataTree : Expecting at least 2 variables "<<"in input data tree, but there are "<getKinematics(); + const UInt_t nEvents { inputFitTree.nEvents() }; // clear and resize the data vector bgData_.clear(); bgData_.resize(nEvents); + Double_t m13Sq { 0.0 }, m23Sq { 0.0 }; + for (UInt_t iEvt = 0; iEvt < nEvents; iEvt++) { const LauFitData& dataValues = inputFitTree.getData(iEvt); - LauFitData::const_iterator iter = dataValues.find("m13Sq"); - m13Sq = iter->second; - iter = dataValues.find("m23Sq"); - m23Sq = iter->second; + m13Sq = dataValues.at("m13Sq"); + m23Sq = dataValues.at("m23Sq"); - // Update the kinematics. This will also update m' and theta' if squareDP = true - kinematics->updateKinematics(m13Sq, m23Sq); + bgData_[iEvt] = this->getUnNormValue( m13Sq, m23Sq ); + } +} - if (squareDP_ == kTRUE) { - mPrime = kinematics->getmPrime(); - thetaPrime = kinematics->getThetaPrime(); - curEvtHistVal_ = this->calcHistValue(mPrime, thetaPrime); - } else { - curEvtHistVal_ = this->calcHistValue(m13Sq, m23Sq); - } +Double_t LauBkgndDPModel::getUnNormValue(const Double_t m13Sq, const Double_t m23Sq) +{ + // Update the kinematics. This will also update m' and theta' if squareDP = true + LauKinematics* kinematics = this->getKinematics(); + kinematics->updateKinematics(m13Sq, m23Sq); + + if (squareDP_) { + const Double_t mPrime { kinematics->getmPrime() }; + const Double_t thetaPrime { kinematics->getThetaPrime() }; + curEvtHistVal_ = this->calcHistValue(mPrime, thetaPrime); + } else { + curEvtHistVal_ = this->calcHistValue(m13Sq, m23Sq); + } + + return curEvtHistVal_; +} - bgData_[iEvt] = curEvtHistVal_; - } +Double_t LauBkgndDPModel::getLikelihood(const Double_t m13Sq, const Double_t m23Sq) +{ + this->getUnNormValue( m13Sq, m23Sq ); + return curEvtHistVal_ / this->getPdfNorm(); } Double_t LauBkgndDPModel::getUnNormValue(UInt_t iEvt) { // Retrieve the likelihood for the given event this->setDataEventNo(iEvt); return curEvtHistVal_; } Double_t LauBkgndDPModel::getLikelihood(UInt_t iEvt) { // Retrieve the likelihood for the given event this->setDataEventNo(iEvt); Double_t llhd = curEvtHistVal_ / this->getPdfNorm(); return llhd; } void LauBkgndDPModel::setDataEventNo(UInt_t iEvt) { // Retrieve the data for event iEvt if (bgData_.size() > iEvt) { curEvtHistVal_ = bgData_[iEvt]; } else { std::cerr<<"ERROR in LauBkgndDPModel::setDataEventNo : Event index too large: "<= "< #include #include #include #include "TVirtualFitter.h" #include "TSystem.h" #include "TMinuit.h" #include "TRandom.h" #include "TFile.h" #include "TTree.h" #include "TBranch.h" #include "TLeaf.h" #include "TMath.h" #include "TH2.h" #include "TGraph2D.h" #include "TGraph.h" #include "TStyle.h" #include "TCanvas.h" #include "LauAbsBkgndDPModel.hh" #include "LauAbsCoeffSet.hh" #include "LauIsobarDynamics.hh" #include "LauAbsPdf.hh" #include "LauAsymmCalc.hh" #include "LauComplex.hh" #include "LauConstants.hh" #include "LauCPFitModel.hh" #include "LauDaughters.hh" #include "LauEffModel.hh" #include "LauEmbeddedData.hh" #include "LauFitNtuple.hh" #include "LauGenNtuple.hh" #include "LauKinematics.hh" #include "LauPrint.hh" #include "LauRandom.hh" #include "LauScfMap.hh" LauCPFitModel::LauCPFitModel(LauIsobarDynamics* negModel, LauIsobarDynamics* posModel, Bool_t tagged, const TString& tagVarName) : LauAbsFitModel(), negSigModel_(negModel), posSigModel_(posModel), negKinematics_(negModel ? negModel->getKinematics() : 0), posKinematics_(posModel ? posModel->getKinematics() : 0), usingBkgnd_(kFALSE), nSigComp_(0), nSigDPPar_(0), nExtraPdfPar_(0), nNormPar_(0), negMeanEff_("negMeanEff",0.0,0.0,1.0), posMeanEff_("posMeanEff",0.0,0.0,1.0), negDPRate_("negDPRate",0.0,0.0,100.0), posDPRate_("posDPRate",0.0,0.0,100.0), signalEvents_(0), signalAsym_(0), forceAsym_(kFALSE), tagged_(tagged), tagVarName_(tagVarName), curEvtCharge_(0), useSCF_(kFALSE), useSCFHist_(kFALSE), scfFrac_("scfFrac",0.0,0.0,1.0), scfFracHist_(0), scfMap_(0), compareFitData_(kFALSE), negParent_("B-"), posParent_("B+"), negSignalTree_(0), posSignalTree_(0), reuseSignal_(kFALSE), useNegReweighting_(kFALSE), usePosReweighting_(kFALSE), sigDPLike_(0.0), scfDPLike_(0.0), sigExtraLike_(0.0), scfExtraLike_(0.0), sigTotalLike_(0.0), scfTotalLike_(0.0) { const LauDaughters* negDaug = negSigModel_->getDaughters(); if (negDaug != 0) {negParent_ = negDaug->getNameParent();} const LauDaughters* posDaug = posSigModel_->getDaughters(); if (posDaug != 0) {posParent_ = posDaug->getNameParent();} } LauCPFitModel::~LauCPFitModel() { delete negSignalTree_; delete posSignalTree_; for (LauBkgndEmbDataList::iterator iter = negBkgndTree_.begin(); iter != negBkgndTree_.end(); ++iter) { delete (*iter); } for (LauBkgndEmbDataList::iterator iter = posBkgndTree_.begin(); iter != posBkgndTree_.end(); ++iter) { delete (*iter); } delete scfFracHist_; } void LauCPFitModel::setupBkgndVectors() { UInt_t nBkgnds = this->nBkgndClasses(); negBkgndDPModels_.resize( nBkgnds ); posBkgndDPModels_.resize( nBkgnds ); negBkgndPdfs_.resize( nBkgnds ); posBkgndPdfs_.resize( nBkgnds ); bkgndEvents_.resize( nBkgnds ); bkgndAsym_.resize( nBkgnds ); negBkgndTree_.resize( nBkgnds ); posBkgndTree_.resize( nBkgnds ); reuseBkgnd_.resize( nBkgnds ); bkgndDPLike_.resize( nBkgnds ); bkgndExtraLike_.resize( nBkgnds ); bkgndTotalLike_.resize( nBkgnds ); } void LauCPFitModel::setNSigEvents(LauParameter* nSigEvents) { if ( nSigEvents == 0 ) { std::cerr << "ERROR in LauCPFitModel::setNSigEvents : The LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( signalEvents_ != 0 ) { std::cerr << "ERROR in LauCPFitModel::setNSigEvents : You are trying to overwrite the signal yield." << std::endl; return; } if ( signalAsym_ != 0 ) { std::cerr << "ERROR in LauCPFitModel::setNSigEvents : You are trying to overwrite the signal asymmetry." << std::endl; return; } signalEvents_ = nSigEvents; TString name = signalEvents_->name(); if ( ! name.Contains("signalEvents") && !( name.BeginsWith("signal") && name.EndsWith("Events") ) ) { signalEvents_->name("signalEvents"); } Double_t value = nSigEvents->value(); signalEvents_->range(-2.0*(TMath::Abs(value)+1.0), 2.0*(TMath::Abs(value)+1.0)); signalAsym_ = new LauParameter("signalAsym",0.0,-1.0,1.0,kTRUE); } void LauCPFitModel::setNSigEvents( LauParameter* nSigEvents, LauParameter* sigAsym, Bool_t forceAsym ) { if ( nSigEvents == 0 ) { std::cerr << "ERROR in LauCPFitModel::setNSigEvents : The event LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( sigAsym == 0 ) { std::cerr << "ERROR in LauCPFitModel::setNSigEvents : The asym LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( signalEvents_ != 0 ) { std::cerr << "ERROR in LauCPFitModel::setNSigEvents : You are trying to overwrite the signal yield." << std::endl; return; } if ( signalAsym_ != 0 ) { std::cerr << "ERROR in LauCPFitModel::setNSigEvents : You are trying to overwrite the signal asymmetry." << std::endl; return; } signalEvents_ = nSigEvents; signalEvents_->name("signalEvents"); Double_t value = nSigEvents->value(); signalEvents_->range(-2.0*(TMath::Abs(value)+1.0), 2.0*(TMath::Abs(value)+1.0)); signalAsym_ = sigAsym; signalAsym_->name("signalAsym"); signalAsym_->range(-1.0,1.0); forceAsym_ = forceAsym; } void LauCPFitModel::setNBkgndEvents( LauAbsRValue* nBkgndEvents ) { if ( nBkgndEvents == 0 ) { std::cerr << "ERROR in LauCPFitModel::setNBgkndEvents : The background yield LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( ! this->validBkgndClass( nBkgndEvents->name() ) ) { std::cerr << "ERROR in LauCPFitModel::setNBkgndEvents : Invalid background class \"" << nBkgndEvents->name() << "\"." << std::endl; std::cerr << " : Background class names must be provided in \"setBkgndClassNames\" before any other background-related actions can be performed." << std::endl; gSystem->Exit(EXIT_FAILURE); } UInt_t bkgndID = this->bkgndClassID( nBkgndEvents->name() ); if ( bkgndEvents_[bkgndID] != 0 ) { std::cerr << "ERROR in LauCPFitModel::setNBkgndEvents : You are trying to overwrite the background yield." << std::endl; return; } if ( bkgndAsym_[bkgndID] != 0 ) { std::cerr << "ERROR in LauCPFitModel::setNBkgndEvents : You are trying to overwrite the background asymmetry." << std::endl; return; } nBkgndEvents->name( nBkgndEvents->name()+"Events" ); if ( nBkgndEvents->isLValue() ) { Double_t value = nBkgndEvents->value(); LauParameter* yield = dynamic_cast( nBkgndEvents ); yield->range(-2.0*(TMath::Abs(value)+1.0), 2.0*(TMath::Abs(value)+1.0)); } bkgndEvents_[bkgndID] = nBkgndEvents; bkgndAsym_[bkgndID] = new LauParameter(nBkgndEvents->name()+"Asym",0.0,-1.0,1.0,kTRUE); } void LauCPFitModel::setNBkgndEvents(LauAbsRValue* nBkgndEvents, LauAbsRValue* bkgndAsym) { if ( nBkgndEvents == 0 ) { std::cerr << "ERROR in LauCPFitModel::setNBkgndEvents : The background yield LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( bkgndAsym == 0 ) { std::cerr << "ERROR in LauCPFitModel::setNBkgndEvents : The background asym LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( ! this->validBkgndClass( nBkgndEvents->name() ) ) { std::cerr << "ERROR in LauCPFitModel::setNBkgndEvents : Invalid background class \"" << nBkgndEvents->name() << "\"." << std::endl; std::cerr << " : Background class names must be provided in \"setBkgndClassNames\" before any other background-related actions can be performed." << std::endl; gSystem->Exit(EXIT_FAILURE); } UInt_t bkgndID = this->bkgndClassID( nBkgndEvents->name() ); if ( bkgndEvents_[bkgndID] != 0 ) { std::cerr << "ERROR in LauCPFitModel::setNBkgndEvents : You are trying to overwrite the background yield." << std::endl; return; } if ( bkgndAsym_[bkgndID] != 0 ) { std::cerr << "ERROR in LauCPFitModel::setNBkgndEvents : You are trying to overwrite the background asymmetry." << std::endl; return; } nBkgndEvents->name( nBkgndEvents->name()+"Events" ); if ( nBkgndEvents->isLValue() ) { Double_t value = nBkgndEvents->value(); LauParameter* yield = dynamic_cast( nBkgndEvents ); yield->range(-2.0*(TMath::Abs(value)+1.0), 2.0*(TMath::Abs(value)+1.0)); } bkgndEvents_[bkgndID] = nBkgndEvents; bkgndAsym->name( nBkgndEvents->name()+"Asym" ); if ( bkgndAsym->isLValue() ) { LauParameter* asym = dynamic_cast( bkgndAsym ); asym->range(-1.0, 1.0); } bkgndAsym_[bkgndID] = bkgndAsym; } void LauCPFitModel::splitSignalComponent( const TH2* dpHisto, const Bool_t upperHalf, const Bool_t fluctuateBins, LauScfMap* scfMap ) { if ( useSCF_ == kTRUE ) { std::cerr << "ERROR in LauCPFitModel::splitSignalComponent : Have already setup SCF." << std::endl; return; } if ( dpHisto == 0 ) { std::cerr << "ERROR in LauCPFitModel::splitSignalComponent : The histogram pointer is null." << std::endl; return; } const LauDaughters* daughters = negSigModel_->getDaughters(); scfFracHist_ = new LauEffModel( daughters, 0 ); scfFracHist_->setEffHisto( dpHisto, kTRUE, fluctuateBins, 0.0, 0.0, upperHalf, daughters->squareDP() ); scfMap_ = scfMap; useSCF_ = kTRUE; useSCFHist_ = kTRUE; } void LauCPFitModel::splitSignalComponent( const Double_t scfFrac, const Bool_t fixed ) { if ( useSCF_ == kTRUE ) { std::cerr << "ERROR in LauCPFitModel::splitSignalComponent : Have already setup SCF." << std::endl; return; } scfFrac_.range( 0.0, 1.0 ); scfFrac_.value( scfFrac ); scfFrac_.initValue( scfFrac ); scfFrac_.genValue( scfFrac ); scfFrac_.fixed( fixed ); useSCF_ = kTRUE; useSCFHist_ = kFALSE; } void LauCPFitModel::setBkgndDPModels(const TString& bkgndClass, LauAbsBkgndDPModel* negModel, LauAbsBkgndDPModel* posModel) { if ((negModel==0) || (posModel==0)) { std::cerr << "ERROR in LauCPFitModel::setBkgndDPModels : One or both of the model pointers is null." << std::endl; return; } // check that this background name is valid if ( ! this->validBkgndClass( bkgndClass) ) { std::cerr << "ERROR in LauCPFitModel::setBkgndDPModel : Invalid background class \"" << bkgndClass << "\"." << std::endl; std::cerr << " : Background class names must be provided in \"setBkgndClassNames\" before any other background-related actions can be performed." << std::endl; return; } UInt_t bkgndID = this->bkgndClassID( bkgndClass ); negBkgndDPModels_[bkgndID] = negModel; posBkgndDPModels_[bkgndID] = posModel; usingBkgnd_ = kTRUE; } void LauCPFitModel::setSignalPdfs(LauAbsPdf* negPdf, LauAbsPdf* posPdf) { if ( tagged_ ) { if (negPdf==0 || posPdf==0) { std::cerr << "ERROR in LauCPFitModel::setSignalPdfs : One or both of the PDF pointers is null." << std::endl; return; } } else { // if we're doing an untagged analysis we will only use the negative PDFs if ( negPdf==0 ) { std::cerr << "ERROR in LauCPFitModel::setSignalPdfs : The negative PDF pointer is null." << std::endl; return; } if ( posPdf!=0 ) { std::cerr << "WARNING in LauCPFitModel::setSignalPdfs : Doing an untagged fit so will not use the positive PDF." << std::endl; } } negSignalPdfs_.push_back(negPdf); posSignalPdfs_.push_back(posPdf); } void LauCPFitModel::setSCFPdfs(LauAbsPdf* negPdf, LauAbsPdf* posPdf) { if ( tagged_ ) { if (negPdf==0 || posPdf==0) { std::cerr << "ERROR in LauCPFitModel::setSCFPdfs : One or both of the PDF pointers is null." << std::endl; return; } } else { // if we're doing an untagged analysis we will only use the negative PDFs if ( negPdf==0 ) { std::cerr << "ERROR in LauCPFitModel::setSCFPdfs : The negative PDF pointer is null." << std::endl; return; } if ( posPdf!=0 ) { std::cerr << "WARNING in LauCPFitModel::setSCFPdfs : Doing an untagged fit so will not use the positive PDF." << std::endl; } } negScfPdfs_.push_back(negPdf); posScfPdfs_.push_back(posPdf); } void LauCPFitModel::setBkgndPdfs(const TString& bkgndClass, LauAbsPdf* negPdf, LauAbsPdf* posPdf) { if ( tagged_ ) { if (negPdf==0 || posPdf==0) { std::cerr << "ERROR in LauCPFitModel::setBkgndPdfs : One or both of the PDF pointers is null." << std::endl; return; } } else { // if we're doing an untagged analysis we will only use the negative PDFs if ( negPdf==0 ) { std::cerr << "ERROR in LauCPFitModel::setBkgndPdfs : The negative PDF pointer is null." << std::endl; return; } if ( posPdf!=0 ) { std::cerr << "WARNING in LauCPFitModel::setBkgndPdfs : Doing an untagged fit so will not use the positive PDF." << std::endl; } } // check that this background name is valid if ( ! this->validBkgndClass( bkgndClass ) ) { std::cerr << "ERROR in LauCPFitModel::setBkgndPdfs : Invalid background class \"" << bkgndClass << "\"." << std::endl; std::cerr << " : Background class names must be provided in \"setBkgndClassNames\" before any other background-related actions can be performed." << std::endl; return; } UInt_t bkgndID = this->bkgndClassID( bkgndClass ); negBkgndPdfs_[bkgndID].push_back(negPdf); posBkgndPdfs_[bkgndID].push_back(posPdf); usingBkgnd_ = kTRUE; } void LauCPFitModel::setAmpCoeffSet(LauAbsCoeffSet* coeffSet) { // Resize the coeffPars vector if not already done if ( coeffPars_.empty() ) { const UInt_t nNegAmp = negSigModel_->getnTotAmp(); const UInt_t nPosAmp = posSigModel_->getnTotAmp(); if ( nNegAmp != nPosAmp ) { std::cerr << "ERROR in LauCPFitModel::setAmpCoeffSet : Unequal number of signal DP components in the negative and positive models: " << nNegAmp << " != " << nPosAmp << std::endl; gSystem->Exit(EXIT_FAILURE); } coeffPars_.resize( nNegAmp ); for (std::vector::iterator iter = coeffPars_.begin(); iter != coeffPars_.end(); ++iter) { (*iter) = 0; } fitFracAsymm_.resize( nNegAmp ); acp_.resize( nNegAmp ); } // Is there a component called compName in the signal model? TString compName(coeffSet->name()); TString conjName = negSigModel_->getConjResName(compName); const Int_t negIndex = negSigModel_->resonanceIndex(compName); const Int_t posIndex = posSigModel_->resonanceIndex(conjName); if ( negIndex < 0 ) { std::cerr << "ERROR in LauCPFitModel::setAmpCoeffSet : " << negParent_ << " signal DP model doesn't contain component \"" << compName << "\"." << std::endl; return; } if ( posIndex < 0 ) { std::cerr << "ERROR in LauCPFitModel::setAmpCoeffSet : " << posParent_ << " signal DP model doesn't contain component \"" << conjName << "\"." << std::endl; return; } if ( posIndex != negIndex ) { std::cerr << "ERROR in LauCPFitModel::setAmpCoeffSet : " << negParent_ << " signal DP model and " << posParent_ << " signal DP model have different indices for components \"" << compName << "\" and \"" << conjName << "\"." << std::endl; return; } // Do we already have it in our list of names? if ( coeffPars_[negIndex] != 0 && coeffPars_[negIndex]->name() == compName) { std::cerr << "ERROR in LauCPFitModel::setAmpCoeffSet : Have already set coefficients for \"" << compName << "\"." << std::endl; return; } coeffSet->index(negIndex); coeffPars_[negIndex] = coeffSet; TString parName = coeffSet->baseName(); parName += "FitFracAsym"; fitFracAsymm_[negIndex] = LauParameter(parName, 0.0, -1.0, 1.0); acp_[negIndex] = coeffSet->acp(); ++nSigComp_; std::cout << "INFO in LauCPFitModel::setAmpCoeffSet : Added coefficients for component \"" << compName << "\" to the fit model." << std::endl; coeffSet->printParValues(); } void LauCPFitModel::initialise() { // Initialisation if (!this->useDP() && negSignalPdfs_.empty()) { std::cerr << "ERROR in LauCPFitModel::initialise : Signal model doesn't exist for any variable." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( this->useDP() ) { // Check that we have all the Dalitz-plot models if ((negSigModel_ == 0) || (posSigModel_ == 0)) { std::cerr << "ERROR in LauCPFitModel::initialise : the pointer to one (neg or pos) of the signal DP models is null.\n"; std::cerr << " : Removing the Dalitz Plot from the model." << std::endl; this->useDP(kFALSE); } if ( usingBkgnd_ ) { if ( negBkgndDPModels_.empty() || posBkgndDPModels_.empty() ) { std::cerr << "ERROR in LauCPFitModel::initialise : No background DP models found.\n"; std::cerr << " : Removing the Dalitz plot from the model." << std::endl; this->useDP(kFALSE); } for (LauBkgndDPModelList::const_iterator dpmodel_iter = negBkgndDPModels_.begin(); dpmodel_iter != negBkgndDPModels_.end(); ++dpmodel_iter ) { if ( (*dpmodel_iter) == 0 ) { std::cerr << "ERROR in LauCPFitModel::initialise : The pointer to one of the background DP models is null.\n"; std::cerr << " : Removing the Dalitz Plot from the model." << std::endl; this->useDP(kFALSE); break; } } for (LauBkgndDPModelList::const_iterator dpmodel_iter = posBkgndDPModels_.begin(); dpmodel_iter != posBkgndDPModels_.end(); ++dpmodel_iter ) { if ( (*dpmodel_iter) == 0 ) { std::cerr << "ERROR in LauCPFitModel::initialise : The pointer to one of the background DP models is null.\n"; std::cerr << " : Removing the Dalitz Plot from the model." << std::endl; this->useDP(kFALSE); break; } } } // Need to check that the number of components we have and that the dynamics has matches up const UInt_t nNegAmp = negSigModel_->getnTotAmp(); const UInt_t nPosAmp = posSigModel_->getnTotAmp(); if ( nNegAmp != nPosAmp ) { std::cerr << "ERROR in LauCPFitModel::initialise : Unequal number of signal DP components in the negative and positive models: " << nNegAmp << " != " << nPosAmp << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( nNegAmp != nSigComp_ ) { std::cerr << "ERROR in LauCPFitModel::initialise : Number of signal DP components in the model (" << nNegAmp << ") not equal to number of coefficients supplied (" << nSigComp_ << ")." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( !fixParamFileName_.IsNull() || !fixParamMap_.empty() ) { // Set coefficients std::vector params; for ( auto itr = coeffPars_.begin(); itr != coeffPars_.end(); ++itr ) { std::vector p = (*itr)->getParameters(); params.insert(params.end(), p.begin(), p.end()); } this->fixParams(params); // Set resonance parameters (if they exist) negSigModel_->collateResonanceParameters(); posSigModel_->collateResonanceParameters(); this->fixParams(negSigModel_->getFloatingParameters()); this->fixParams(posSigModel_->getFloatingParameters()); } // From the initial parameter values calculate the coefficients // so they can be passed to the signal model this->updateCoeffs(); // If all is well, go ahead and initialise them this->initialiseDPModels(); } // Next check that, if a given component is being used we've got the // right number of PDFs for all the variables involved // TODO - should probably check variable names and so on as well UInt_t nsigpdfvars(0); for ( LauPdfList::const_iterator pdf_iter = negSignalPdfs_.begin(); pdf_iter != negSignalPdfs_.end(); ++pdf_iter ) { std::vector varNames = (*pdf_iter)->varNames(); for ( std::vector::const_iterator var_iter = varNames.begin(); var_iter != varNames.end(); ++var_iter ) { if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { ++nsigpdfvars; } } } if (useSCF_) { UInt_t nscfpdfvars(0); for ( LauPdfList::const_iterator pdf_iter = negScfPdfs_.begin(); pdf_iter != negScfPdfs_.end(); ++pdf_iter ) { std::vector varNames = (*pdf_iter)->varNames(); for ( std::vector::const_iterator var_iter = varNames.begin(); var_iter != varNames.end(); ++var_iter ) { if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { ++nscfpdfvars; } } } if (nscfpdfvars != nsigpdfvars) { std::cerr << "ERROR in LauCPFitModel::initialise : There are " << nsigpdfvars << " TM signal PDF variables but " << nscfpdfvars << " SCF signal PDF variables." << std::endl; gSystem->Exit(EXIT_FAILURE); } } if (usingBkgnd_) { for (LauBkgndPdfsList::const_iterator bgclass_iter = negBkgndPdfs_.begin(); bgclass_iter != negBkgndPdfs_.end(); ++bgclass_iter) { UInt_t nbkgndpdfvars(0); const LauPdfList& pdfList = (*bgclass_iter); for ( LauPdfList::const_iterator pdf_iter = pdfList.begin(); pdf_iter != pdfList.end(); ++pdf_iter ) { std::vector varNames = (*pdf_iter)->varNames(); for ( std::vector::const_iterator var_iter = varNames.begin(); var_iter != varNames.end(); ++var_iter ) { if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { ++nbkgndpdfvars; } } } if (nbkgndpdfvars != nsigpdfvars) { std::cerr << "ERROR in LauCPFitModel::initialise : There are " << nsigpdfvars << " signal PDF variables but " << nbkgndpdfvars << " bkgnd PDF variables." << std::endl; gSystem->Exit(EXIT_FAILURE); } } } // Clear the vectors of parameter information so we can start from scratch this->clearFitParVectors(); // Set the fit parameters for signal and background models this->setSignalDPParameters(); // Set the fit parameters for the various extra PDFs this->setExtraPdfParameters(); // Set the initial bg and signal events this->setFitNEvents(); // Check that we have the expected number of fit variables const LauParameterPList& fitVars = this->fitPars(); if (fitVars.size() != (nSigDPPar_ + nExtraPdfPar_ + nNormPar_)) { std::cerr << "ERROR in LauCPFitModel::initialise : Number of fit parameters not of expected size. Exiting" << std::endl; gSystem->Exit(EXIT_FAILURE); } this->setExtraNtupleVars(); } void LauCPFitModel::recalculateNormalisation() { //std::cout << "INFO in LauCPFitModel::recalculateNormalizationInDPModels : Recalc Norm in DP model" << std::endl; negSigModel_->recalculateNormalisation(); posSigModel_->recalculateNormalisation(); negSigModel_->modifyDataTree(); posSigModel_->modifyDataTree(); } void LauCPFitModel::initialiseDPModels() { std::cout << "INFO in LauCPFitModel::initialiseDPModels : Initialising signal DP model" << std::endl; negSigModel_->initialise(negCoeffs_); posSigModel_->initialise(posCoeffs_); if (usingBkgnd_ == kTRUE) { for (LauBkgndDPModelList::iterator iter = negBkgndDPModels_.begin(); iter != negBkgndDPModels_.end(); ++iter) { (*iter)->initialise(); } for (LauBkgndDPModelList::iterator iter = posBkgndDPModels_.begin(); iter != posBkgndDPModels_.end(); ++iter) { (*iter)->initialise(); } } } void LauCPFitModel::setSignalDPParameters() { // Set the fit parameters for the signal model. nSigDPPar_ = 0; if ( ! this->useDP() ) { return; } std::cout << "INFO in LauCPFitModel::setSignalDPParameters : Setting the initial fit parameters for the signal DP model." << std::endl; // Place isobar coefficient parameters in vector of fit variables LauParameterPList& fitVars = this->fitPars(); for (UInt_t i = 0; i < nSigComp_; i++) { LauParameterPList pars = coeffPars_[i]->getParameters(); for (LauParameterPList::iterator iter = pars.begin(); iter != pars.end(); ++iter) { if ( !(*iter)->clone() ) { fitVars.push_back(*iter); ++nSigDPPar_; } } } // Obtain the resonance parameters and place them in the vector of fit variables and in a separate vector // Need to make sure that they are unique because some might appear in both DP models LauParameterPSet& resVars = this->resPars(); resVars.clear(); LauParameterPList& negSigDPPars = negSigModel_->getFloatingParameters(); LauParameterPList& posSigDPPars = posSigModel_->getFloatingParameters(); for ( LauParameterPList::iterator iter = negSigDPPars.begin(); iter != negSigDPPars.end(); ++iter ) { if ( resVars.insert(*iter).second ) { fitVars.push_back(*iter); ++nSigDPPar_; } } for ( LauParameterPList::iterator iter = posSigDPPars.begin(); iter != posSigDPPars.end(); ++iter ) { if ( resVars.insert(*iter).second ) { fitVars.push_back(*iter); ++nSigDPPar_; } } } void LauCPFitModel::setExtraPdfParameters() { // Include all the parameters of the PDF in the fit // NB all of them are passed to the fit, even though some have been fixed through parameter.fixed(kTRUE) // With the new "cloned parameter" scheme only "original" parameters are passed to the fit. // Their clones are updated automatically when the originals are updated. nExtraPdfPar_ = 0; nExtraPdfPar_ += this->addFitParameters(negSignalPdfs_); if ( tagged_ ) { nExtraPdfPar_ += this->addFitParameters(posSignalPdfs_); } if (useSCF_ == kTRUE) { nExtraPdfPar_ += this->addFitParameters(negScfPdfs_); if ( tagged_ ) { nExtraPdfPar_ += this->addFitParameters(posScfPdfs_); } } if (usingBkgnd_ == kTRUE) { for (LauBkgndPdfsList::iterator iter = negBkgndPdfs_.begin(); iter != negBkgndPdfs_.end(); ++iter) { nExtraPdfPar_ += this->addFitParameters(*iter); } if ( tagged_ ) { for (LauBkgndPdfsList::iterator iter = posBkgndPdfs_.begin(); iter != posBkgndPdfs_.end(); ++iter) { nExtraPdfPar_ += this->addFitParameters(*iter); } } } } void LauCPFitModel::setFitNEvents() { if ( signalEvents_ == 0 ) { std::cerr << "ERROR in LauCPFitModel::setFitNEvents : Signal yield not defined." << std::endl; return; } nNormPar_ = 0; // initialise the total number of events to be the sum of all the hypotheses Double_t nTotEvts = signalEvents_->value(); for (LauBkgndYieldList::const_iterator iter = bkgndEvents_.begin(); iter != bkgndEvents_.end(); ++iter) { nTotEvts += (*iter)->value(); if ( (*iter) == 0 ) { std::cerr << "ERROR in LauCPFitModel::setFitNEvents : Background yield not defined." << std::endl; return; } } this->eventsPerExpt(TMath::FloorNint(nTotEvts)); LauParameterPList& fitVars = this->fitPars(); // if doing an extended ML fit add the number of signal events into the fit parameters if (this->doEMLFit()) { std::cout << "INFO in LauCPFitModel::setFitNEvents : Initialising number of events for signal and background components..." << std::endl; // add the signal fraction to the list of fit parameters if(!signalEvents_->fixed()) { fitVars.push_back(signalEvents_); ++nNormPar_; } } else { std::cout << "INFO in LauCPFitModel::setFitNEvents : Initialising number of events for background components (and hence signal)..." << std::endl; } // if not using the DP in the model we need an explicit signal asymmetry parameter if (this->useDP() == kFALSE) { if(!signalAsym_->fixed()) { fitVars.push_back(signalAsym_); ++nNormPar_; } } if (useSCF_ && !useSCFHist_) { if(!scfFrac_.fixed()) { fitVars.push_back(&scfFrac_); ++nNormPar_; } } if (usingBkgnd_ == kTRUE) { for (LauBkgndYieldList::iterator iter = bkgndEvents_.begin(); iter != bkgndEvents_.end(); ++iter) { std::vector parameters = (*iter)->getPars(); for ( LauParameter* parameter : parameters ) { if(!parameter->clone()) { fitVars.push_back(parameter); ++nNormPar_; } } } for (LauBkgndYieldList::iterator iter = bkgndAsym_.begin(); iter != bkgndAsym_.end(); ++iter) { std::vector parameters = (*iter)->getPars(); for ( LauParameter* parameter : parameters ) { if(!parameter->clone()) { fitVars.push_back(parameter); ++nNormPar_; } } } } } void LauCPFitModel::setExtraNtupleVars() { // Set-up other parameters derived from the fit results, e.g. fit fractions. if (this->useDP() != kTRUE) { return; } // First clear the vectors so we start from scratch this->clearExtraVarVectors(); LauParameterList& extraVars = this->extraPars(); // Add the positive and negative fit fractions for each signal component negFitFrac_ = negSigModel_->getFitFractions(); if (negFitFrac_.size() != nSigComp_) { std::cerr << "ERROR in LauCPFitModel::setExtraNtupleVars : Initial Fit Fraction array of unexpected dimension: " << negFitFrac_.size() << std::endl; gSystem->Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } posFitFrac_ = posSigModel_->getFitFractions(); if (posFitFrac_.size() != nSigComp_) { std::cerr << "ERROR in LauCPFitModel::setExtraNtupleVars : Initial Fit Fraction array of unexpected dimension: " << posFitFrac_.size() << std::endl; gSystem->Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } // Add the positive and negative fit fractions that have not been corrected for the efficiency for each signal component negFitFracEffUnCorr_ = negSigModel_->getFitFractionsEfficiencyUncorrected(); if (negFitFracEffUnCorr_.size() != nSigComp_) { std::cerr << "ERROR in LauCPFitModel::setExtraNtupleVars : Initial Fit Fraction array of unexpected dimension: " << negFitFracEffUnCorr_.size() << std::endl; gSystem->Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } posFitFracEffUnCorr_ = posSigModel_->getFitFractionsEfficiencyUncorrected(); if (posFitFracEffUnCorr_.size() != nSigComp_) { std::cerr << "ERROR in LauCPFitModel::setExtraNtupleVars : Initial Fit Fraction array of unexpected dimension: " << posFitFracEffUnCorr_.size() << std::endl; gSystem->Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } for (UInt_t i(0); i negExtraPars = negSigModel_->getExtraParameters(); std::vector::iterator negExtraIter; for (negExtraIter = negExtraPars.begin(); negExtraIter != negExtraPars.end(); ++negExtraIter) { LauParameter negExtraParameter = (*negExtraIter); extraVars.push_back(negExtraParameter); } std::vector posExtraPars = posSigModel_->getExtraParameters(); std::vector::iterator posExtraIter; for (posExtraIter = posExtraPars.begin(); posExtraIter != posExtraPars.end(); ++posExtraIter) { LauParameter posExtraParameter = (*posExtraIter); extraVars.push_back(posExtraParameter); } // Now add in the DP efficiency value Double_t initMeanEff = negSigModel_->getMeanEff().initValue(); negMeanEff_.value(initMeanEff); negMeanEff_.genValue(initMeanEff); negMeanEff_.initValue(initMeanEff); extraVars.push_back(negMeanEff_); initMeanEff = posSigModel_->getMeanEff().initValue(); posMeanEff_.value(initMeanEff); posMeanEff_.genValue(initMeanEff); posMeanEff_.initValue(initMeanEff); extraVars.push_back(posMeanEff_); // Also add in the DP rates Double_t initDPRate = negSigModel_->getDPRate().initValue(); negDPRate_.value(initDPRate); negDPRate_.genValue(initDPRate); negDPRate_.initValue(initDPRate); extraVars.push_back(negDPRate_); initDPRate = posSigModel_->getDPRate().initValue(); posDPRate_.value(initDPRate); posDPRate_.genValue(initDPRate); posDPRate_.initValue(initDPRate); extraVars.push_back(posDPRate_); // Calculate the CPC and CPV Fit Fractions, ACPs and FitFrac asymmetries this->calcExtraFractions(kTRUE); this->calcAsymmetries(kTRUE); // Add the CP violating and CP conserving fit fractions for each signal component for (UInt_t i = 0; i < nSigComp_; i++) { for (UInt_t j = i; j < nSigComp_; j++) { extraVars.push_back(CPVFitFrac_[i][j]); } } for (UInt_t i = 0; i < nSigComp_; i++) { for (UInt_t j = i; j < nSigComp_; j++) { extraVars.push_back(CPCFitFrac_[i][j]); } } // Add the Fit Fraction asymmetry for each signal component for (UInt_t i = 0; i < nSigComp_; i++) { extraVars.push_back(fitFracAsymm_[i]); } // Add the calculated CP asymmetry for each signal component for (UInt_t i = 0; i < nSigComp_; i++) { extraVars.push_back(acp_[i]); } } void LauCPFitModel::calcExtraFractions(Bool_t initValues) { // Calculate the CP-conserving and CP-violating fit fractions if (initValues) { // create the structure CPCFitFrac_.clear(); CPVFitFrac_.clear(); CPCFitFrac_.resize(nSigComp_); CPVFitFrac_.resize(nSigComp_); for (UInt_t i(0); iacp(); LauAsymmCalc asymmCalc(negFitFrac_[i][i].value(), posFitFrac_[i][i].value()); Double_t asym = asymmCalc.getAsymmetry(); fitFracAsymm_[i].value(asym); if (initValues) { fitFracAsymm_[i].genValue(asym); fitFracAsymm_[i].initValue(asym); } } } void LauCPFitModel::finaliseFitResults(const TString& tablePrefixName) { // Retrieve parameters from the fit results for calculations and toy generation // and eventually store these in output root ntuples/text files // Now take the fit parameters and update them as necessary // i.e. to make mag > 0.0, phase in the right range. // This function will also calculate any other values, such as the // fit fractions, using any errors provided by fitParErrors as appropriate. // Also obtain the pull values: (measured - generated)/(average error) if (this->useDP() == kTRUE) { for (UInt_t i = 0; i < nSigComp_; ++i) { // Check whether we have "a/b > 0.0", and phases in the right range coeffPars_[i]->finaliseValues(); } } // update the pulls on the event fractions and asymmetries if (this->doEMLFit()) { signalEvents_->updatePull(); } if (this->useDP() == kFALSE) { signalAsym_->updatePull(); } if (useSCF_ && !useSCFHist_) { scfFrac_.updatePull(); } if (usingBkgnd_ == kTRUE) { for (LauBkgndYieldList::iterator iter = bkgndEvents_.begin(); iter != bkgndEvents_.end(); ++iter) { std::vector parameters = (*iter)->getPars(); for ( LauParameter* parameter : parameters ) { parameter->updatePull(); } } for (LauBkgndYieldList::iterator iter = bkgndAsym_.begin(); iter != bkgndAsym_.end(); ++iter) { std::vector parameters = (*iter)->getPars(); for ( LauParameter* parameter : parameters ) { parameter->updatePull(); } } } // Update the pulls on all the extra PDFs' parameters this->updateFitParameters(negSignalPdfs_); this->updateFitParameters(posSignalPdfs_); if (useSCF_ == kTRUE) { this->updateFitParameters(negScfPdfs_); this->updateFitParameters(posScfPdfs_); } if (usingBkgnd_ == kTRUE) { for (LauBkgndPdfsList::iterator iter = negBkgndPdfs_.begin(); iter != negBkgndPdfs_.end(); ++iter) { this->updateFitParameters(*iter); } for (LauBkgndPdfsList::iterator iter = posBkgndPdfs_.begin(); iter != posBkgndPdfs_.end(); ++iter) { this->updateFitParameters(*iter); } } // Fill the fit results to the ntuple // update the coefficients and then calculate the fit fractions and ACP's if (this->useDP() == kTRUE) { this->updateCoeffs(); negSigModel_->updateCoeffs(negCoeffs_); negSigModel_->calcExtraInfo(); posSigModel_->updateCoeffs(posCoeffs_); posSigModel_->calcExtraInfo(); LauParArray negFitFrac = negSigModel_->getFitFractions(); if (negFitFrac.size() != nSigComp_) { std::cerr << "ERROR in LauCPFitModel::finaliseFitResults : Fit Fraction array of unexpected dimension: " << negFitFrac.size() << std::endl; gSystem->Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } LauParArray posFitFrac = posSigModel_->getFitFractions(); if (posFitFrac.size() != nSigComp_) { std::cerr << "ERROR in LauCPFitModel::finaliseFitResults : Fit Fraction array of unexpected dimension: " << posFitFrac.size() << std::endl; gSystem->Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } LauParArray negFitFracEffUnCorr = negSigModel_->getFitFractionsEfficiencyUncorrected(); if (negFitFracEffUnCorr.size() != nSigComp_) { std::cerr << "ERROR in LauCPFitModel::finaliseFitResults : Fit Fraction array of unexpected dimension: " << negFitFracEffUnCorr.size() << std::endl; gSystem->Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } LauParArray posFitFracEffUnCorr = posSigModel_->getFitFractionsEfficiencyUncorrected(); if (posFitFracEffUnCorr.size() != nSigComp_) { std::cerr << "ERROR in LauCPFitModel::finaliseFitResults : Fit Fraction array of unexpected dimension: " << posFitFracEffUnCorr.size() << std::endl; gSystem->Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } for (UInt_t i(0); igetMeanEff().value()); posMeanEff_.value(posSigModel_->getMeanEff().value()); negDPRate_.value(negSigModel_->getDPRate().value()); posDPRate_.value(posSigModel_->getDPRate().value()); this->calcExtraFractions(); this->calcAsymmetries(); // Then store the final fit parameters, and any extra parameters for // the signal model (e.g. fit fractions, FF asymmetries, ACPs, mean efficiency and DP rate) this->clearExtraVarVectors(); LauParameterList& extraVars = this->extraPars(); // Add the positive and negative fit fractions for each signal component for (UInt_t i(0); i negExtraPars = negSigModel_->getExtraParameters(); std::vector::iterator negExtraIter; for (negExtraIter = negExtraPars.begin(); negExtraIter != negExtraPars.end(); ++negExtraIter) { LauParameter negExtraParameter = (*negExtraIter); extraVars.push_back(negExtraParameter); } std::vector posExtraPars = posSigModel_->getExtraParameters(); std::vector::iterator posExtraIter; for (posExtraIter = posExtraPars.begin(); posExtraIter != posExtraPars.end(); ++posExtraIter) { LauParameter posExtraParameter = (*posExtraIter); extraVars.push_back(posExtraParameter); } extraVars.push_back(negMeanEff_); extraVars.push_back(posMeanEff_); extraVars.push_back(negDPRate_); extraVars.push_back(posDPRate_); for (UInt_t i = 0; i < nSigComp_; i++) { for (UInt_t j(i); jprintFitFractions(std::cout); this->printAsymmetries(std::cout); } const LauParameterPList& fitVars = this->fitPars(); const LauParameterList& extraVars = this->extraPars(); LauFitNtuple* ntuple = this->fitNtuple(); ntuple->storeParsAndErrors(fitVars, extraVars); // find out the correlation matrix for the parameters ntuple->storeCorrMatrix(this->iExpt(), this->fitStatus(), this->covarianceMatrix()); // Fill the data into ntuple ntuple->updateFitNtuple(); // Print out the partial fit fractions, phases and the // averaged efficiency, reweighted by the dynamics (and anything else) if (this->writeLatexTable()) { TString sigOutFileName(tablePrefixName); sigOutFileName += "_"; sigOutFileName += this->iExpt(); sigOutFileName += "Expt.tex"; this->writeOutTable(sigOutFileName); } } void LauCPFitModel::printFitFractions(std::ostream& output) { // Print out Fit Fractions, total DP rate and mean efficiency // First for the B- events for (UInt_t i = 0; i < nSigComp_; i++) { const TString compName(coeffPars_[i]->name()); output << negParent_ << " FitFraction for component " << i << " (" << compName << ") = " << negFitFrac_[i][i] << std::endl; } output << negParent_ << " overall DP rate (integral of matrix element squared) = " << negDPRate_ << std::endl; output << negParent_ << " average efficiency weighted by whole DP dynamics = " << negMeanEff_ << std::endl; // Then for the positive sample for (UInt_t i = 0; i < nSigComp_; i++) { const TString compName(coeffPars_[i]->name()); const TString conjName(negSigModel_->getConjResName(compName)); output << posParent_ << " FitFraction for component " << i << " (" << conjName << ") = " << posFitFrac_[i][i] << std::endl; } output << posParent_ << " overall DP rate (integral of matrix element squared) = " << posDPRate_ << std::endl; output << posParent_ << " average efficiency weighted by whole DP dynamics = " << posMeanEff_ << std::endl; } void LauCPFitModel::printAsymmetries(std::ostream& output) { for (UInt_t i = 0; i < nSigComp_; i++) { const TString compName(coeffPars_[i]->name()); output << "Fit Fraction asymmetry for component " << i << " (" << compName << ") = " << fitFracAsymm_[i] << std::endl; } for (UInt_t i = 0; i < nSigComp_; i++) { const TString compName(coeffPars_[i]->name()); output << "ACP for component " << i << " (" << compName << ") = " << acp_[i] << std::endl; } } void LauCPFitModel::writeOutTable(const TString& outputFile) { // Write out the results of the fit to a tex-readable table // TODO - need to include the yields in this table std::ofstream fout(outputFile); LauPrint print; std::cout << "INFO in LauCPFitModel::writeOutTable : Writing out results of the fit to the tex file " << outputFile << std::endl; if (this->useDP() == kTRUE) { // print the fit coefficients in one table coeffPars_.front()->printTableHeading(fout); for (UInt_t i = 0; i < nSigComp_; i++) { coeffPars_[i]->printTableRow(fout); } fout << "\\hline" << std::endl; fout << "\\end{tabular}" << std::endl << std::endl; // print the fit fractions and asymmetries in another fout << "\\begin{tabular}{|l|c|c|c|c|}" << std::endl; fout << "\\hline" << std::endl; fout << "Component & " << negParent_ << " Fit Fraction & " << posParent_ << " Fit Fraction & Fit Fraction Asymmetry & ACP \\\\" << std::endl; fout << "\\hline" << std::endl; Double_t negFitFracSum(0.0); Double_t posFitFracSum(0.0); for (UInt_t i = 0; i < nSigComp_; i++) { TString resName = coeffPars_[i]->name(); resName = resName.ReplaceAll("_", "\\_"); Double_t negFitFrac = negFitFrac_[i][i].value(); Double_t posFitFrac = posFitFrac_[i][i].value(); negFitFracSum += negFitFrac; posFitFracSum += posFitFrac; Double_t fitFracAsymm = fitFracAsymm_[i].value(); Double_t acp = acp_[i].value(); Double_t acpErr = acp_[i].error(); fout << resName << " & $"; print.printFormat(fout, negFitFrac); fout << "$ & $"; print.printFormat(fout, posFitFrac); fout << "$ & $"; print.printFormat(fout, fitFracAsymm); fout << "$ & $"; print.printFormat(fout, acp); fout << " \\pm "; print.printFormat(fout, acpErr); fout << "$ \\\\" << std::endl; } fout << "\\hline" << std::endl; // Also print out sum of fit fractions fout << "Fit Fraction Sum & $"; print.printFormat(fout, negFitFracSum); fout << "$ & $"; print.printFormat(fout, posFitFracSum); fout << "$ & & \\\\" << std::endl; fout << "\\hline" << std::endl; fout << "DP rate & $"; print.printFormat(fout, negDPRate_.value()); fout << "$ & $"; print.printFormat(fout, posDPRate_.value()); fout << "$ & & \\\\" << std::endl; fout << "$< \\varepsilon > $ & $"; print.printFormat(fout, negMeanEff_.value()); fout << "$ & $"; print.printFormat(fout, posMeanEff_.value()); fout << "$ & & \\\\" << std::endl; fout << "\\hline" << std::endl; fout << "\\end{tabular}" << std::endl << std::endl; } if (!negSignalPdfs_.empty()) { fout << "\\begin{tabular}{|l|c|}" << std::endl; fout << "\\hline" << std::endl; if (useSCF_ == kTRUE) { fout << "\\Extra TM Signal PDFs' Parameters: & \\\\" << std::endl; } else { fout << "\\Extra Signal PDFs' Parameters: & \\\\" << std::endl; } this->printFitParameters(negSignalPdfs_, fout); if ( tagged_ ) { this->printFitParameters(posSignalPdfs_, fout); } if (useSCF_ == kTRUE && !negScfPdfs_.empty()) { fout << "\\hline" << std::endl; fout << "\\Extra SCF Signal PDFs' Parameters: & \\\\" << std::endl; this->printFitParameters(negScfPdfs_, fout); if ( tagged_ ) { this->printFitParameters(posScfPdfs_, fout); } } if (usingBkgnd_ == kTRUE && !negBkgndPdfs_.empty()) { fout << "\\hline" << std::endl; fout << "\\Extra Background PDFs' Parameters: & \\\\" << std::endl; for (LauBkgndPdfsList::const_iterator iter = negBkgndPdfs_.begin(); iter != negBkgndPdfs_.end(); ++iter) { this->printFitParameters(*iter, fout); } if ( tagged_ ) { for (LauBkgndPdfsList::const_iterator iter = posBkgndPdfs_.begin(); iter != posBkgndPdfs_.end(); ++iter) { this->printFitParameters(*iter, fout); } } } fout << "\\hline \n\\end{tabular}" << std::endl << std::endl; } } void LauCPFitModel::checkInitFitParams() { // Update the number of signal events to be total-sum(background events) this->updateSigEvents(); // Check whether we want to have randomised initial fit parameters for the signal model if (this->useRandomInitFitPars() == kTRUE) { std::cout << "INFO in LauCPFitModel::checkInitFitParams : Setting random parameters for the signal model" << std::endl; this->randomiseInitFitPars(); } } void LauCPFitModel::randomiseInitFitPars() { // Only randomise those parameters that are not fixed! std::cout << "INFO in LauCPFitModel::randomiseInitFitPars : Randomising the initial fit magnitudes and phases of the components..." << std::endl; if ( fixParamFileName_.IsNull() && fixParamMap_.empty() ) { // No params are imported - randomise as normal for (UInt_t i = 0; i < nSigComp_; i++) { coeffPars_[i]->randomiseInitValues(); } } else { // Only randomise those that are not imported (i.e., not found in allImportedFreeParams_) // by temporarily fixing all imported parameters, and then freeing those not set to be fixed when imported, // except those that are previously set to be fixed anyhow. // Convoluted, but beats changing the behaviour of functions that call checkInitFitParams or the coeffSet // itself. for (auto p : allImportedFreeParams_) { p->fixed(kTRUE); } for (UInt_t i = 0; i < nSigComp_; i++) { coeffPars_[i]->randomiseInitValues(); } for (auto p : allImportedFreeParams_) { p->fixed(kFALSE); } } } std::pair LauCPFitModel::eventsToGenerate() { // Determine the number of events to generate for each hypothesis // If we're smearing then smear each one individually LauGenInfo nEvtsGen; // Keep track of whether any yield or asymmetry parameters are blinded Bool_t blind = kFALSE; // Signal if ( signalEvents_->blind() ) { blind = kTRUE; } Double_t evtWeight(1.0); Double_t nEvts = signalEvents_->genValue(); if ( nEvts < 0.0 ) { evtWeight = -1.0; nEvts = TMath::Abs( nEvts ); } Double_t sigAsym(0.0); // need to include this as an alternative in case the DP isn't in the model if ( !this->useDP() || forceAsym_ ) { sigAsym = signalAsym_->genValue(); if ( signalAsym_->blind() ) { blind = kTRUE; } } else { Double_t negRate = negSigModel_->getDPNorm(); Double_t posRate = posSigModel_->getDPNorm(); if (negRate+posRate>1e-30) { sigAsym = (negRate-posRate)/(negRate+posRate); } } Double_t nPosEvts = (nEvts/2.0 * (1.0 - sigAsym)); Double_t nNegEvts = (nEvts/2.0 * (1.0 + sigAsym)); Int_t nPosEvtsToGen { static_cast(nPosEvts) }; Int_t nNegEvtsToGen { static_cast(nNegEvts) }; if (this->doPoissonSmearing()) { nPosEvtsToGen = LauRandom::randomFun()->Poisson(nPosEvts); nNegEvtsToGen = LauRandom::randomFun()->Poisson(nNegEvts); } nEvtsGen[std::make_pair("signal",+1)] = std::make_pair(nPosEvtsToGen,evtWeight); nEvtsGen[std::make_pair("signal",-1)] = std::make_pair(nNegEvtsToGen,evtWeight); // backgrounds const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t bkgndID(0); bkgndID < nBkgnds; ++bkgndID ) { const LauAbsRValue* evtsPar = bkgndEvents_[bkgndID]; const LauAbsRValue* asymPar = bkgndAsym_[bkgndID]; if ( evtsPar->blind() || asymPar->blind() ) { blind = kTRUE; } evtWeight = 1.0; nEvts = evtsPar->genValue(); if ( nEvts < 0 ) { evtWeight = -1.0; nEvts = TMath::Abs( nEvts ); } const Double_t asym = asymPar->genValue(); nPosEvts = (nEvts/2.0 * (1.0 - asym)); nNegEvts = (nEvts/2.0 * (1.0 + asym)); nPosEvtsToGen = static_cast(nPosEvts); nNegEvtsToGen = static_cast(nNegEvts); if (this->doPoissonSmearing()) { nPosEvtsToGen = LauRandom::randomFun()->Poisson(nPosEvts); nNegEvtsToGen = LauRandom::randomFun()->Poisson(nNegEvts); } const TString& bkgndClass = this->bkgndClassName(bkgndID); nEvtsGen[std::make_pair(bkgndClass,+1)] = std::make_pair(nPosEvtsToGen,evtWeight); nEvtsGen[std::make_pair(bkgndClass,-1)] = std::make_pair(nNegEvtsToGen,evtWeight); } // Print out the information on what we're generating, but only if none of the parameters are blind (otherwise we risk unblinding them!) if ( !blind ) { std::cout << "INFO in LauCPFitModel::eventsToGenerate : Generating toy MC with:" << std::endl; std::cout << " : Signal asymmetry = " << sigAsym << " and number of signal events = " << signalEvents_->genValue() << std::endl; for ( UInt_t bkgndID(0); bkgndID < nBkgnds; ++bkgndID ) { const TString& bkgndClass = this->bkgndClassName(bkgndID); const LauAbsRValue* evtsPar = bkgndEvents_[bkgndID]; const LauAbsRValue* asymPar = bkgndAsym_[bkgndID]; std::cout << " : " << bkgndClass << " asymmetry = " << asymPar->genValue() << " and number of " << bkgndClass << " events = " << evtsPar->genValue() << std::endl; } } return std::make_pair( nEvtsGen, blind ); } Bool_t LauCPFitModel::genExpt() { // Routine to generate toy Monte Carlo events according to the various models we have defined. // Determine the number of events to generate for each hypothesis std::pair info = this->eventsToGenerate(); LauGenInfo nEvts = info.first; const Bool_t blind = info.second; Bool_t genOK(kTRUE); Int_t evtNum(0); const UInt_t nBkgnds = this->nBkgndClasses(); std::vector bkgndClassNames(nBkgnds); std::vector bkgndClassNamesGen(nBkgnds); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { TString name( this->bkgndClassName(iBkgnd) ); bkgndClassNames[iBkgnd] = name; bkgndClassNamesGen[iBkgnd] = "gen"+name; } const Bool_t storeSCFTruthInfo = ( useSCF_ || ( this->enableEmbedding() && negSignalTree_ != 0 && negSignalTree_->haveBranch("mcMatch") && posSignalTree_ != 0 && posSignalTree_->haveBranch("mcMatch") ) ); // Loop over the hypotheses and generate the requested number of events for each one for (LauGenInfo::const_iterator iter = nEvts.begin(); iter != nEvts.end(); ++iter) { const TString& type(iter->first.first); curEvtCharge_ = iter->first.second; Double_t evtWeight( iter->second.second ); Int_t nEvtsGen( iter->second.first ); for (Int_t iEvt(0); iEvtsetGenNtupleDoubleBranchValue( "evtWeight", evtWeight ); this->setGenNtupleDoubleBranchValue( "efficiency", 1.0 ); if (type == "signal") { this->setGenNtupleIntegerBranchValue("genSig",1); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { this->setGenNtupleIntegerBranchValue( bkgndClassNamesGen[iBkgnd], 0 ); } genOK = this->generateSignalEvent(); if ( curEvtCharge_ > 0 ){ this->setGenNtupleDoubleBranchValue( "efficiency", posSigModel_->getEvtEff() ); } else { this->setGenNtupleDoubleBranchValue( "efficiency", negSigModel_->getEvtEff() ); } } else { this->setGenNtupleIntegerBranchValue("genSig",0); if ( storeSCFTruthInfo ) { this->setGenNtupleIntegerBranchValue("genTMSig",0); this->setGenNtupleIntegerBranchValue("genSCFSig",0); } UInt_t bkgndID(0); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { Int_t gen(0); if ( bkgndClassNames[iBkgnd] == type ) { gen = 1; bkgndID = iBkgnd; } this->setGenNtupleIntegerBranchValue( bkgndClassNamesGen[iBkgnd], gen ); } genOK = this->generateBkgndEvent(bkgndID); } if (!genOK) { // If there was a problem with the generation then break out and return. // The problem model will have adjusted itself so that all should be OK next time. break; } if (this->useDP() == kTRUE) { this->setDPBranchValues(); } // Store the event charge this->setGenNtupleIntegerBranchValue(tagVarName_,curEvtCharge_); // Store the event number (within this experiment) // and then increment it this->setGenNtupleIntegerBranchValue("iEvtWithinExpt",evtNum); ++evtNum; this->fillGenNtupleBranches(); if ( !blind && (iEvt%500 == 0) ) { std::cout << "INFO in LauCPFitModel::genExpt : Generated event number " << iEvt << " out of " << nEvtsGen << " " << type << " events." << std::endl; } } if (!genOK) { break; } } if (this->useDP() && genOK) { negSigModel_->checkToyMC(kTRUE,kTRUE); posSigModel_->checkToyMC(kTRUE,kTRUE); // Get the fit fractions if they're to be written into the latex table if (this->writeLatexTable()) { LauParArray negFitFrac = negSigModel_->getFitFractions(); if (negFitFrac.size() != nSigComp_) { std::cerr << "ERROR in LauCPFitModel::genExpt : Fit Fraction array of unexpected dimension: " << negFitFrac.size() << std::endl; gSystem->Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } LauParArray posFitFrac = posSigModel_->getFitFractions(); if (posFitFrac.size() != nSigComp_) { std::cerr << "ERROR in LauCPFitModel::genExpt : Fit Fraction array of unexpected dimension: " << posFitFrac.size() << std::endl; gSystem->Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } for (UInt_t i(0); igetMeanEff().value()); posMeanEff_.value(posSigModel_->getMeanEff().value()); negDPRate_.value(negSigModel_->getDPRate().value()); posDPRate_.value(posSigModel_->getDPRate().value()); } } // If we're reusing embedded events or if the generation is being // reset then clear the lists of used events if (reuseSignal_ || !genOK) { if (negSignalTree_) { negSignalTree_->clearUsedList(); } if (posSignalTree_) { posSignalTree_->clearUsedList(); } } for ( UInt_t bkgndID(0); bkgndID < nBkgnds; ++bkgndID ) { LauEmbeddedData* data = negBkgndTree_[bkgndID]; if (reuseBkgnd_[bkgndID] || !genOK) { if (data) { data->clearUsedList(); } } } for ( UInt_t bkgndID(0); bkgndID < nBkgnds; ++bkgndID ) { LauEmbeddedData* data = posBkgndTree_[bkgndID]; if (reuseBkgnd_[bkgndID] || !genOK) { if (data) { data->clearUsedList(); } } } return genOK; } Bool_t LauCPFitModel::generateSignalEvent() { // Generate signal event Bool_t genOK(kTRUE); Bool_t genSCF(kFALSE); LauIsobarDynamics* model(0); LauKinematics* kinematics(0); LauEmbeddedData* embeddedData(0); LauPdfList* sigPdfs(0); LauPdfList* scfPdfs(0); Bool_t doReweighting(kFALSE); if (curEvtCharge_<0) { model = negSigModel_; kinematics = negKinematics_; sigPdfs = &negSignalPdfs_; scfPdfs = &negScfPdfs_; if (this->enableEmbedding()) { embeddedData = negSignalTree_; doReweighting = useNegReweighting_; } } else { model = posSigModel_; kinematics = posKinematics_; if ( tagged_ ) { sigPdfs = &posSignalPdfs_; scfPdfs = &posScfPdfs_; } else { sigPdfs = &negSignalPdfs_; scfPdfs = &negScfPdfs_; } if (this->enableEmbedding()) { embeddedData = posSignalTree_; doReweighting = usePosReweighting_; } } if (this->useDP()) { if (embeddedData) { if (doReweighting) { // Select a (random) event from the generated data. Then store the // reconstructed DP co-ords, together with other pdf information, // as the embedded data. genOK = embeddedData->getReweightedEvent(model); } else { // Just get the information of a (randomly) selected event in the // embedded data embeddedData->getEmbeddedEvent(kinematics); } genSCF = this->storeSignalMCMatch( embeddedData ); } else { genOK = model->generate(); if ( genOK && useSCF_ ) { Double_t frac(0.0); if ( useSCFHist_ ) { frac = scfFracHist_->calcEfficiency( kinematics ); } else { frac = scfFrac_.genValue(); } if ( frac < LauRandom::randomFun()->Rndm() ) { this->setGenNtupleIntegerBranchValue("genTMSig",1); this->setGenNtupleIntegerBranchValue("genSCFSig",0); genSCF = kFALSE; } else { this->setGenNtupleIntegerBranchValue("genTMSig",0); this->setGenNtupleIntegerBranchValue("genSCFSig",1); genSCF = kTRUE; // Optionally smear the DP position // of the SCF event if ( scfMap_ != 0 ) { Double_t xCoord(0.0), yCoord(0.0); if ( kinematics->squareDP() ) { xCoord = kinematics->getmPrime(); yCoord = kinematics->getThetaPrime(); } else { xCoord = kinematics->getm13Sq(); yCoord = kinematics->getm23Sq(); } // Find the bin number where this event is generated Int_t binNo = scfMap_->binNumber( xCoord, yCoord ); // Retrieve the migration histogram TH2* histo = scfMap_->trueHist( binNo ); const LauAbsEffModel * effModel = model->getEffModel(); do { // Get a random point from the histogram histo->GetRandom2( xCoord, yCoord ); // Update the kinematics if ( kinematics->squareDP() ) { kinematics->updateSqDPKinematics( xCoord, yCoord ); } else { kinematics->updateKinematics( xCoord, yCoord ); } } while ( ! effModel->passVeto( kinematics ) ); } } } } } else { if (embeddedData) { embeddedData->getEmbeddedEvent(0); genSCF = this->storeSignalMCMatch( embeddedData ); } else if ( useSCF_ ) { Double_t frac = scfFrac_.genValue(); if ( frac < LauRandom::randomFun()->Rndm() ) { this->setGenNtupleIntegerBranchValue("genTMSig",1); this->setGenNtupleIntegerBranchValue("genSCFSig",0); genSCF = kFALSE; } else { this->setGenNtupleIntegerBranchValue("genTMSig",0); this->setGenNtupleIntegerBranchValue("genSCFSig",1); genSCF = kTRUE; } } } if (genOK) { if ( useSCF_ ) { if ( genSCF ) { this->generateExtraPdfValues(scfPdfs, embeddedData); } else { this->generateExtraPdfValues(sigPdfs, embeddedData); } } else { this->generateExtraPdfValues(sigPdfs, embeddedData); } } // Check for problems with the embedding if (embeddedData && (embeddedData->nEvents() == embeddedData->nUsedEvents())) { std::cerr << "WARNING in LauCPFitModel::generateSignalEvent : Source of embedded signal events used up, clearing the list of used events." << std::endl; embeddedData->clearUsedList(); } return genOK; } Bool_t LauCPFitModel::generateBkgndEvent(UInt_t bkgndID) { // Generate Bkgnd event Bool_t genOK(kTRUE); LauAbsBkgndDPModel* model(0); LauEmbeddedData* embeddedData(0); LauPdfList* extraPdfs(0); LauKinematics* kinematics(0); if (curEvtCharge_<0) { model = negBkgndDPModels_[bkgndID]; if (this->enableEmbedding()) { embeddedData = negBkgndTree_[bkgndID]; } extraPdfs = &negBkgndPdfs_[bkgndID]; kinematics = negKinematics_; } else { model = posBkgndDPModels_[bkgndID]; if (this->enableEmbedding()) { embeddedData = posBkgndTree_[bkgndID]; } if ( tagged_ ) { extraPdfs = &posBkgndPdfs_[bkgndID]; } else { extraPdfs = &negBkgndPdfs_[bkgndID]; } kinematics = posKinematics_; } if (this->useDP()) { if (embeddedData) { embeddedData->getEmbeddedEvent(kinematics); } else { if (model == 0) { const TString& bkgndClass = this->bkgndClassName(bkgndID); std::cerr << "ERROR in LauCPFitModel::generateBkgndEvent : Can't find the DP model for background class \"" << bkgndClass << "\"." << std::endl; gSystem->Exit(EXIT_FAILURE); } genOK = model->generate(); } } else { if (embeddedData) { embeddedData->getEmbeddedEvent(0); } } if (genOK) { this->generateExtraPdfValues(extraPdfs, embeddedData); } // Check for problems with the embedding if (embeddedData && (embeddedData->nEvents() == embeddedData->nUsedEvents())) { const TString& bkgndClass = this->bkgndClassName(bkgndID); std::cerr << "WARNING in LauCPFitModel::generateBkgndEvent : Source of embedded " << bkgndClass << " events used up, clearing the list of used events." << std::endl; embeddedData->clearUsedList(); } return genOK; } void LauCPFitModel::setupGenNtupleBranches() { // Setup the required ntuple branches this->addGenNtupleDoubleBranch("evtWeight"); this->addGenNtupleIntegerBranch("genSig"); this->addGenNtupleDoubleBranch("efficiency"); if ( useSCF_ || ( this->enableEmbedding() && negSignalTree_ != 0 && negSignalTree_->haveBranch("mcMatch") && posSignalTree_ != 0 && posSignalTree_->haveBranch("mcMatch") ) ) { this->addGenNtupleIntegerBranch("genTMSig"); this->addGenNtupleIntegerBranch("genSCFSig"); } const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { TString name( this->bkgndClassName(iBkgnd) ); name.Prepend("gen"); this->addGenNtupleIntegerBranch(name); } this->addGenNtupleIntegerBranch("charge"); if (this->useDP() == kTRUE) { this->addGenNtupleDoubleBranch("m12"); this->addGenNtupleDoubleBranch("m23"); this->addGenNtupleDoubleBranch("m13"); this->addGenNtupleDoubleBranch("m12Sq"); this->addGenNtupleDoubleBranch("m23Sq"); this->addGenNtupleDoubleBranch("m13Sq"); this->addGenNtupleDoubleBranch("cosHel12"); this->addGenNtupleDoubleBranch("cosHel23"); this->addGenNtupleDoubleBranch("cosHel13"); if (negKinematics_->squareDP() && posKinematics_->squareDP()) { this->addGenNtupleDoubleBranch("mPrime"); this->addGenNtupleDoubleBranch("thPrime"); } } for (LauPdfList::const_iterator pdf_iter = negSignalPdfs_.begin(); pdf_iter != negSignalPdfs_.end(); ++pdf_iter) { std::vector varNames = (*pdf_iter)->varNames(); for ( std::vector::const_iterator var_iter = varNames.begin(); var_iter != varNames.end(); ++var_iter ) { if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { this->addGenNtupleDoubleBranch( (*var_iter) ); } } } } void LauCPFitModel::setDPBranchValues() { LauKinematics* kinematics(0); if (curEvtCharge_<0) { kinematics = negKinematics_; } else { kinematics = posKinematics_; } // Store all the DP information this->setGenNtupleDoubleBranchValue("m12", kinematics->getm12()); this->setGenNtupleDoubleBranchValue("m23", kinematics->getm23()); this->setGenNtupleDoubleBranchValue("m13", kinematics->getm13()); this->setGenNtupleDoubleBranchValue("m12Sq", kinematics->getm12Sq()); this->setGenNtupleDoubleBranchValue("m23Sq", kinematics->getm23Sq()); this->setGenNtupleDoubleBranchValue("m13Sq", kinematics->getm13Sq()); this->setGenNtupleDoubleBranchValue("cosHel12", kinematics->getc12()); this->setGenNtupleDoubleBranchValue("cosHel23", kinematics->getc23()); this->setGenNtupleDoubleBranchValue("cosHel13", kinematics->getc13()); if (kinematics->squareDP()) { this->setGenNtupleDoubleBranchValue("mPrime", kinematics->getmPrime()); this->setGenNtupleDoubleBranchValue("thPrime", kinematics->getThetaPrime()); } } void LauCPFitModel::generateExtraPdfValues(LauPdfList* extraPdfs, LauEmbeddedData* embeddedData) { LauKinematics* kinematics(0); if (curEvtCharge_<0) { kinematics = negKinematics_; } else { kinematics = posKinematics_; } if (!extraPdfs) { std::cerr << "ERROR in LauCPFitModel::generateExtraPdfValues : Null pointer to PDF list." << std::endl; gSystem->Exit(EXIT_FAILURE); } if (extraPdfs->empty()) { //std::cerr << "WARNING in LauCPFitModel::generateExtraPdfValues : PDF list is empty." << std::endl; return; } // Generate from the extra PDFs for (LauPdfList::iterator pdf_iter = extraPdfs->begin(); pdf_iter != extraPdfs->end(); ++pdf_iter) { LauFitData genValues; if (embeddedData) { genValues = embeddedData->getValues( (*pdf_iter)->varNames() ); } else { genValues = (*pdf_iter)->generate(kinematics); } for ( LauFitData::const_iterator var_iter = genValues.begin(); var_iter != genValues.end(); ++var_iter ) { TString varName = var_iter->first; if ( varName != "m13Sq" && varName != "m23Sq" ) { Double_t value = var_iter->second; this->setGenNtupleDoubleBranchValue(varName,value); } } } } Bool_t LauCPFitModel::storeSignalMCMatch(LauEmbeddedData* embeddedData) { // Default to TM Bool_t genSCF(kFALSE); Int_t match(1); // Check that we have a valid pointer and that embedded data has // the mcMatch branch. If so then get the match value. if ( embeddedData && embeddedData->haveBranch("mcMatch") ) { match = TMath::Nint( embeddedData->getValue("mcMatch") ); } // Set the variables accordingly. if (match) { this->setGenNtupleIntegerBranchValue("genTMSig",1); this->setGenNtupleIntegerBranchValue("genSCFSig",0); genSCF = kFALSE; } else { this->setGenNtupleIntegerBranchValue("genTMSig",0); this->setGenNtupleIntegerBranchValue("genSCFSig",1); genSCF = kTRUE; } return genSCF; } void LauCPFitModel::propagateParUpdates() { // Update the signal parameters and then the total normalisation for the signal likelihood if (this->useDP() == kTRUE) { this->updateCoeffs(); negSigModel_->updateCoeffs(negCoeffs_); posSigModel_->updateCoeffs(posCoeffs_); } // Update the signal fraction from the background fractions if not doing an extended fit if ( !this->doEMLFit() && !signalEvents_->fixed() ) { this->updateSigEvents(); } } void LauCPFitModel::updateSigEvents() { // The background parameters will have been set from Minuit. // We need to update the signal events using these. Double_t nTotEvts = this->eventsPerExpt(); signalEvents_->range(-2.0*nTotEvts,2.0*nTotEvts); for (LauBkgndYieldList::iterator iter = bkgndEvents_.begin(); iter != bkgndEvents_.end(); ++iter) { LauAbsRValue* nBkgndEvents = (*iter); if ( nBkgndEvents->isLValue() ) { LauParameter* yield = dynamic_cast( nBkgndEvents ); yield->range(-2.0*nTotEvts,2.0*nTotEvts); } } if (signalEvents_->fixed()) { return; } // Subtract background events (if any) from signal. Double_t signalEvents = nTotEvts; if (usingBkgnd_ == kTRUE) { for (LauBkgndYieldList::const_iterator iter = bkgndEvents_.begin(); iter != bkgndEvents_.end(); ++iter) { signalEvents -= (*iter)->value(); } } signalEvents_->value(signalEvents); } void LauCPFitModel::cacheInputFitVars() { // Fill the internal data trees of the signal and background models. // Note that we store the events of both charges in both the // negative and the positive models. It's only later, at the stage // when the likelihood is being calculated, that we separate them. LauFitDataTree* inputFitData = this->fitData(); // First the Dalitz plot variables (m_ij^2) if (this->useDP() == kTRUE) { // need to append SCF smearing bins before caching DP amplitudes if ( scfMap_ != 0 ) { this->appendBinCentres( inputFitData ); } negSigModel_->fillDataTree(*inputFitData); posSigModel_->fillDataTree(*inputFitData); if (usingBkgnd_ == kTRUE) { for (LauBkgndDPModelList::iterator iter = negBkgndDPModels_.begin(); iter != negBkgndDPModels_.end(); ++iter) { (*iter)->fillDataTree(*inputFitData); } for (LauBkgndDPModelList::iterator iter = posBkgndDPModels_.begin(); iter != posBkgndDPModels_.end(); ++iter) { (*iter)->fillDataTree(*inputFitData); } } } // ...and then the extra PDFs this->cacheInfo(negSignalPdfs_, *inputFitData); this->cacheInfo(negScfPdfs_, *inputFitData); for (LauBkgndPdfsList::iterator iter = negBkgndPdfs_.begin(); iter != negBkgndPdfs_.end(); ++iter) { this->cacheInfo((*iter), *inputFitData); } if ( tagged_ ) { this->cacheInfo(posSignalPdfs_, *inputFitData); this->cacheInfo(posScfPdfs_, *inputFitData); for (LauBkgndPdfsList::iterator iter = posBkgndPdfs_.begin(); iter != posBkgndPdfs_.end(); ++iter) { this->cacheInfo((*iter), *inputFitData); } } // the SCF fractions and jacobians if ( useSCF_ && useSCFHist_ ) { if ( !inputFitData->haveBranch( "m13Sq" ) || !inputFitData->haveBranch( "m23Sq" ) ) { std::cerr << "ERROR in LauCPFitModel::cacheInputFitVars : Input data does not contain DP branches and so can't cache the SCF fraction." << std::endl; gSystem->Exit(EXIT_FAILURE); } UInt_t nEvents = inputFitData->nEvents(); recoSCFFracs_.clear(); recoSCFFracs_.reserve( nEvents ); if ( negKinematics_->squareDP() ) { recoJacobians_.clear(); recoJacobians_.reserve( nEvents ); } for (UInt_t iEvt = 0; iEvt < nEvents; iEvt++) { const LauFitData& dataValues = inputFitData->getData(iEvt); LauFitData::const_iterator m13_iter = dataValues.find("m13Sq"); LauFitData::const_iterator m23_iter = dataValues.find("m23Sq"); negKinematics_->updateKinematics( m13_iter->second, m23_iter->second ); Double_t scfFrac = scfFracHist_->calcEfficiency( negKinematics_ ); recoSCFFracs_.push_back( scfFrac ); if ( negKinematics_->squareDP() ) { recoJacobians_.push_back( negKinematics_->calcSqDPJacobian() ); } } } // finally cache the event charge evtCharges_.clear(); if ( tagged_ ) { if ( !inputFitData->haveBranch( tagVarName_ ) ) { std::cerr << "ERROR in LauCPFitModel::cacheInputFitVars : Input data does not contain branch \"" << tagVarName_ << "\"." << std::endl; gSystem->Exit(EXIT_FAILURE); } UInt_t nEvents = inputFitData->nEvents(); evtCharges_.reserve( nEvents ); for (UInt_t iEvt = 0; iEvt < nEvents; iEvt++) { const LauFitData& dataValues = inputFitData->getData(iEvt); LauFitData::const_iterator iter = dataValues.find( tagVarName_ ); curEvtCharge_ = static_cast( iter->second ); evtCharges_.push_back( curEvtCharge_ ); } } } void LauCPFitModel::appendBinCentres( LauFitDataTree* inputData ) { // We'll be caching the DP amplitudes and efficiencies of the centres of the true bins. // To do so, we attach some fake points at the end of inputData, the number of the entry // minus the total number of events corresponding to the number of the histogram for that // given true bin in the LauScfMap object. (What this means is that when Laura is provided with // the LauScfMap object by the user, it's the latter who has to make sure that it contains the // right number of histograms and in exactly the right order!) // Get the x and y co-ordinates of the bin centres std::vector binCentresXCoords; std::vector binCentresYCoords; scfMap_->listBinCentres(binCentresXCoords, binCentresYCoords); // The SCF histograms could be in square Dalitz plot histograms. // The dynamics takes normal Dalitz plot coords, so we might have to convert them back. Bool_t sqDP = negKinematics_->squareDP(); UInt_t nBins = binCentresXCoords.size(); fakeSCFFracs_.clear(); fakeSCFFracs_.reserve( nBins ); if ( sqDP ) { fakeJacobians_.clear(); fakeJacobians_.reserve( nBins ); } for (UInt_t iBin = 0; iBin < nBins; ++iBin) { if ( sqDP ) { negKinematics_->updateSqDPKinematics(binCentresXCoords[iBin],binCentresYCoords[iBin]); binCentresXCoords[iBin] = negKinematics_->getm13Sq(); binCentresYCoords[iBin] = negKinematics_->getm23Sq(); fakeJacobians_.push_back( negKinematics_->calcSqDPJacobian() ); } else { negKinematics_->updateKinematics(binCentresXCoords[iBin],binCentresYCoords[iBin]); } fakeSCFFracs_.push_back( scfFracHist_->calcEfficiency( negKinematics_ ) ); } // Set up inputFitVars_ object to hold the fake events inputData->appendFakePoints(binCentresXCoords,binCentresYCoords); } Double_t LauCPFitModel::getTotEvtLikelihood(UInt_t iEvt) { // Find out whether we have B- or B+ if ( tagged_ ) { curEvtCharge_ = evtCharges_[iEvt]; // check that the charge is either +1 or -1 if (TMath::Abs(curEvtCharge_)!=1) { std::cerr << "ERROR in LauCPFitModel::getTotEvtLikelihood : Charge/tag not accepted value: " << curEvtCharge_ << std::endl; if (curEvtCharge_>0) { curEvtCharge_ = +1; } else { curEvtCharge_ = -1; } std::cerr << " : Making it: " << curEvtCharge_ << "." << std::endl; } } // Get the DP likelihood for signal and backgrounds this->getEvtDPLikelihood(iEvt); // Get the combined extra PDFs likelihood for signal and backgrounds this->getEvtExtraLikelihoods(iEvt); // If appropriate, combine the TM and SCF likelihoods Double_t sigLike = sigDPLike_ * sigExtraLike_; if ( useSCF_ ) { Double_t scfFrac(0.0); if (useSCFHist_) { scfFrac = recoSCFFracs_[iEvt]; } else { scfFrac = scfFrac_.unblindValue(); } sigLike *= (1.0 - scfFrac); if ( (scfMap_ != 0) && (this->useDP() == kTRUE) ) { // if we're smearing the SCF DP PDF then the SCF frac // is already included in the SCF DP likelihood sigLike += (scfDPLike_ * scfExtraLike_); } else { sigLike += (scfFrac * scfDPLike_ * scfExtraLike_); } } // Get the correct event fractions depending on the charge // Signal asymmetry is built into the DP model... but when the DP // isn't in the fit we need an explicit parameter Double_t signalEvents = signalEvents_->unblindValue() * 0.5; if (this->useDP() == kFALSE) { signalEvents *= (1.0 - curEvtCharge_ * signalAsym_->unblindValue()); } // Construct the total event likelihood Double_t likelihood(0.0); if (usingBkgnd_) { likelihood = sigLike*signalEvents; const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t bkgndID(0); bkgndID < nBkgnds; ++bkgndID ) { Double_t bkgndEvents = bkgndEvents_[bkgndID]->unblindValue() * 0.5 * (1.0 - curEvtCharge_ * bkgndAsym_[bkgndID]->unblindValue()); likelihood += bkgndEvents*bkgndDPLike_[bkgndID]*bkgndExtraLike_[bkgndID]; } } else { likelihood = sigLike*0.5; } return likelihood; } Double_t LauCPFitModel::getEventSum() const { Double_t eventSum(0.0); eventSum += signalEvents_->unblindValue(); if (usingBkgnd_) { for (LauBkgndYieldList::const_iterator iter = bkgndEvents_.begin(); iter != bkgndEvents_.end(); ++iter) { eventSum += (*iter)->unblindValue(); } } return eventSum; } void LauCPFitModel::getEvtDPLikelihood(UInt_t iEvt) { // Function to return the signal and background likelihoods for the // Dalitz plot for the given event evtNo. if ( ! this->useDP() ) { // There's always going to be a term in the likelihood for the // signal, so we'd better not zero it. sigDPLike_ = 1.0; scfDPLike_ = 1.0; const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t bkgndID(0); bkgndID < nBkgnds; ++bkgndID ) { if (usingBkgnd_ == kTRUE) { bkgndDPLike_[bkgndID] = 1.0; } else { bkgndDPLike_[bkgndID] = 0.0; } } return; } const UInt_t nBkgnds = this->nBkgndClasses(); if ( tagged_ ) { if (curEvtCharge_==+1) { posSigModel_->calcLikelihoodInfo(iEvt); sigDPLike_ = posSigModel_->getEvtIntensity(); for ( UInt_t bkgndID(0); bkgndID < nBkgnds; ++bkgndID ) { if (usingBkgnd_ == kTRUE) { bkgndDPLike_[bkgndID] = posBkgndDPModels_[bkgndID]->getLikelihood(iEvt); } else { bkgndDPLike_[bkgndID] = 0.0; } } } else { negSigModel_->calcLikelihoodInfo(iEvt); sigDPLike_ = negSigModel_->getEvtIntensity(); for ( UInt_t bkgndID(0); bkgndID < nBkgnds; ++bkgndID ) { if (usingBkgnd_ == kTRUE) { bkgndDPLike_[bkgndID] = negBkgndDPModels_[bkgndID]->getLikelihood(iEvt); } else { bkgndDPLike_[bkgndID] = 0.0; } } } } else { posSigModel_->calcLikelihoodInfo(iEvt); negSigModel_->calcLikelihoodInfo(iEvt); sigDPLike_ = 0.5 * ( posSigModel_->getEvtIntensity() + negSigModel_->getEvtIntensity() ); for ( UInt_t bkgndID(0); bkgndID < nBkgnds; ++bkgndID ) { if (usingBkgnd_ == kTRUE) { bkgndDPLike_[bkgndID] = 0.5 * ( posBkgndDPModels_[bkgndID]->getLikelihood(iEvt) + negBkgndDPModels_[bkgndID]->getLikelihood(iEvt) ); } else { bkgndDPLike_[bkgndID] = 0.0; } } } if ( useSCF_ == kTRUE ) { if ( scfMap_ == 0 ) { // we're not smearing the SCF DP position // so the likelihood is the same as the TM scfDPLike_ = sigDPLike_; } else { // calculate the smeared SCF DP likelihood scfDPLike_ = this->getEvtSCFDPLikelihood(iEvt); } } // Calculate the signal normalisation // NB the 2.0 is there so that the 0.5 factor is applied to // signal and background in the same place otherwise you get // normalisation problems when you switch off the DP in the fit Double_t norm = negSigModel_->getDPNorm() + posSigModel_->getDPNorm(); sigDPLike_ *= 2.0/norm; scfDPLike_ *= 2.0/norm; } Double_t LauCPFitModel::getEvtSCFDPLikelihood(UInt_t iEvt) { Double_t scfDPLike(0.0); Double_t recoJacobian(1.0); Double_t xCoord(0.0); Double_t yCoord(0.0); Bool_t squareDP = negKinematics_->squareDP(); if ( squareDP ) { xCoord = negSigModel_->getEvtmPrime(); yCoord = negSigModel_->getEvtthPrime(); recoJacobian = recoJacobians_[iEvt]; } else { xCoord = negSigModel_->getEvtm13Sq(); yCoord = negSigModel_->getEvtm23Sq(); } // Find the bin that our reco event falls in Int_t recoBin = scfMap_->binNumber( xCoord, yCoord ); // Find out which true Bins contribute to the given reco bin const std::vector* trueBins = scfMap_->trueBins(recoBin); const Int_t nDataEvents = this->eventsPerExpt(); // Loop over the true bins for (std::vector::const_iterator iter = trueBins->begin(); iter != trueBins->end(); ++iter) { Int_t trueBin = (*iter); // prob of a true event in the given true bin migrating to the reco bin Double_t pRecoGivenTrue = scfMap_->prob( recoBin, trueBin ); Double_t pTrue(0.0); // We've cached the DP amplitudes and the efficiency for the // true bin centres, just after the data points if ( tagged_ ) { LauIsobarDynamics* sigModel(0); if (curEvtCharge_<0) { sigModel = negSigModel_; } else { sigModel = posSigModel_; } sigModel->calcLikelihoodInfo( nDataEvents + trueBin ); pTrue = sigModel->getEvtIntensity(); } else { posSigModel_->calcLikelihoodInfo( nDataEvents + trueBin ); negSigModel_->calcLikelihoodInfo( nDataEvents + trueBin ); pTrue = 0.5 * ( posSigModel_->getEvtIntensity() + negSigModel_->getEvtIntensity() ); } // Get the cached SCF fraction (and jacobian if we're using the square DP) Double_t scfFraction = fakeSCFFracs_[ trueBin ]; Double_t jacobian(1.0); if ( squareDP ) { jacobian = fakeJacobians_[ trueBin ]; } scfDPLike += pTrue * jacobian * scfFraction * pRecoGivenTrue; } // Divide by the reco jacobian scfDPLike /= recoJacobian; return scfDPLike; } void LauCPFitModel::getEvtExtraLikelihoods(UInt_t iEvt) { // Function to return the signal and background likelihoods for the // extra variables for the given event evtNo. sigExtraLike_ = 1.0; const UInt_t nBkgnds = this->nBkgndClasses(); if ( ! tagged_ || curEvtCharge_ < 0 ) { sigExtraLike_ = this->prodPdfValue( negSignalPdfs_, iEvt ); if (useSCF_) { scfExtraLike_ = this->prodPdfValue( negScfPdfs_, iEvt ); } for ( UInt_t bkgndID(0); bkgndID < nBkgnds; ++bkgndID ) { if (usingBkgnd_) { bkgndExtraLike_[bkgndID] = this->prodPdfValue( negBkgndPdfs_[bkgndID], iEvt ); } else { bkgndExtraLike_[bkgndID] = 0.0; } } } else { sigExtraLike_ = this->prodPdfValue( posSignalPdfs_, iEvt ); if (useSCF_) { scfExtraLike_ = this->prodPdfValue( posScfPdfs_, iEvt ); } for ( UInt_t bkgndID(0); bkgndID < nBkgnds; ++bkgndID ) { if (usingBkgnd_) { bkgndExtraLike_[bkgndID] = this->prodPdfValue( posBkgndPdfs_[bkgndID], iEvt ); } else { bkgndExtraLike_[bkgndID] = 0.0; } } } } void LauCPFitModel::updateCoeffs() { negCoeffs_.clear(); posCoeffs_.clear(); negCoeffs_.reserve(nSigComp_); posCoeffs_.reserve(nSigComp_); for (UInt_t i = 0; i < nSigComp_; i++) { negCoeffs_.push_back(coeffPars_[i]->antiparticleCoeff()); posCoeffs_.push_back(coeffPars_[i]->particleCoeff()); } } void LauCPFitModel::setupSPlotNtupleBranches() { // add branches for storing the experiment number and the number of // the event within the current experiment this->addSPlotNtupleIntegerBranch("iExpt"); this->addSPlotNtupleIntegerBranch("iEvtWithinExpt"); // Store the efficiency of the event (for inclusive BF calculations). if (this->storeDPEff()) { this->addSPlotNtupleDoubleBranch("efficiency"); if ( negSigModel_->usingScfModel() && posSigModel_->usingScfModel() ) { this->addSPlotNtupleDoubleBranch("scffraction"); } } // Store the total event likelihood for each species. if (useSCF_) { this->addSPlotNtupleDoubleBranch("sigTMTotalLike"); this->addSPlotNtupleDoubleBranch("sigSCFTotalLike"); this->addSPlotNtupleDoubleBranch("sigSCFFrac"); } else { this->addSPlotNtupleDoubleBranch("sigTotalLike"); } if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { TString name( this->bkgndClassName(iBkgnd) ); name += "TotalLike"; this->addSPlotNtupleDoubleBranch(name); } } // Store the DP likelihoods if (this->useDP()) { if (useSCF_) { this->addSPlotNtupleDoubleBranch("sigTMDPLike"); this->addSPlotNtupleDoubleBranch("sigSCFDPLike"); } else { this->addSPlotNtupleDoubleBranch("sigDPLike"); } if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { TString name( this->bkgndClassName(iBkgnd) ); name += "DPLike"; this->addSPlotNtupleDoubleBranch(name); } } } // Store the likelihoods for each extra PDF if (useSCF_) { this->addSPlotNtupleBranches(&negSignalPdfs_, "sigTM"); this->addSPlotNtupleBranches(&negScfPdfs_, "sigSCF"); } else { this->addSPlotNtupleBranches(&negSignalPdfs_, "sig"); } if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { const TString& bkgndClass = this->bkgndClassName(iBkgnd); const LauPdfList* pdfList = &(negBkgndPdfs_[iBkgnd]); this->addSPlotNtupleBranches(pdfList, bkgndClass); } } } void LauCPFitModel::addSPlotNtupleBranches(const LauPdfList* extraPdfs, const TString& prefix) { if (extraPdfs) { // Loop through each of the PDFs for (LauPdfList::const_iterator pdf_iter = extraPdfs->begin(); pdf_iter != extraPdfs->end(); ++pdf_iter) { // Count the number of input variables that are not // DP variables (used in the case where there is DP // dependence for e.g. MVA) UInt_t nVars(0); std::vector varNames = (*pdf_iter)->varNames(); for ( std::vector::const_iterator var_iter = varNames.begin(); var_iter != varNames.end(); ++var_iter ) { if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { ++nVars; } } if ( nVars == 1 ) { // If the PDF only has one variable then // simply add one branch for that variable TString varName = (*pdf_iter)->varName(); TString name(prefix); name += varName; name += "Like"; this->addSPlotNtupleDoubleBranch(name); } else if ( nVars == 2 ) { // If the PDF has two variables then we // need a branch for them both together and // branches for each TString allVars(""); for ( std::vector::const_iterator var_iter = varNames.begin(); var_iter != varNames.end(); ++var_iter ) { allVars += (*var_iter); TString name(prefix); name += (*var_iter); name += "Like"; this->addSPlotNtupleDoubleBranch(name); } TString name(prefix); name += allVars; name += "Like"; this->addSPlotNtupleDoubleBranch(name); } else { std::cerr << "WARNING in LauCPFitModel::addSPlotNtupleBranches : Can't yet deal with 3D PDFs." << std::endl; } } } } Double_t LauCPFitModel::setSPlotNtupleBranchValues(LauPdfList* extraPdfs, const TString& prefix, UInt_t iEvt) { // Store the PDF value for each variable in the list Double_t totalLike(1.0); Double_t extraLike(0.0); if (extraPdfs) { for (LauPdfList::iterator pdf_iter = extraPdfs->begin(); pdf_iter != extraPdfs->end(); ++pdf_iter) { // calculate the likelihood for this event (*pdf_iter)->calcLikelihoodInfo(iEvt); extraLike = (*pdf_iter)->getLikelihood(); totalLike *= extraLike; // Count the number of input variables that are not // DP variables (used in the case where there is DP // dependence for e.g. MVA) UInt_t nVars(0); std::vector varNames = (*pdf_iter)->varNames(); for ( std::vector::const_iterator var_iter = varNames.begin(); var_iter != varNames.end(); ++var_iter ) { if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { ++nVars; } } if ( nVars == 1 ) { // If the PDF only has one variable then // simply store the value for that variable TString varName = (*pdf_iter)->varName(); TString name(prefix); name += varName; name += "Like"; this->setSPlotNtupleDoubleBranchValue(name, extraLike); } else if ( nVars == 2 ) { // If the PDF has two variables then we // store the value for them both together // and for each on their own TString allVars(""); for ( std::vector::const_iterator var_iter = varNames.begin(); var_iter != varNames.end(); ++var_iter ) { allVars += (*var_iter); TString name(prefix); name += (*var_iter); name += "Like"; Double_t indivLike = (*pdf_iter)->getLikelihood( (*var_iter) ); this->setSPlotNtupleDoubleBranchValue(name, indivLike); } TString name(prefix); name += allVars; name += "Like"; this->setSPlotNtupleDoubleBranchValue(name, extraLike); } else { std::cerr << "WARNING in LauCPFitModel::setSPlotNtupleBranchValues : Can't yet deal with 3D PDFs." << std::endl; } } } return totalLike; } LauSPlot::NameSet LauCPFitModel::variableNames() const { LauSPlot::NameSet nameSet; if (this->useDP()) { nameSet.insert("DP"); } // Loop through all the signal PDFs for (LauPdfList::const_iterator pdf_iter = negSignalPdfs_.begin(); pdf_iter != negSignalPdfs_.end(); ++pdf_iter) { // Loop over the variables involved in each PDF std::vector varNames = (*pdf_iter)->varNames(); for ( std::vector::const_iterator var_iter = varNames.begin(); var_iter != varNames.end(); ++var_iter ) { // If they are not DP coordinates then add them if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { nameSet.insert( (*var_iter) ); } } } return nameSet; } LauSPlot::NumbMap LauCPFitModel::freeSpeciesNames() const { LauSPlot::NumbMap numbMap; if (!signalEvents_->fixed() && this->doEMLFit()) { numbMap["sig"] = signalEvents_->genValue(); } if ( usingBkgnd_ ) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { const TString& bkgndClass = this->bkgndClassName(iBkgnd); const LauAbsRValue* par = bkgndEvents_[iBkgnd]; if (!par->fixed()) { numbMap[bkgndClass] = par->genValue(); if ( ! par->isLValue() ) { std::cerr << "WARNING in LauCPFitModel::freeSpeciesNames : \"" << par->name() << "\" is a LauFormulaPar, which implies it is perhaps not entirely free to float in the fit, so the sWeight calculation may not be reliable" << std::endl; } } } } return numbMap; } LauSPlot::NumbMap LauCPFitModel::fixdSpeciesNames() const { LauSPlot::NumbMap numbMap; if (signalEvents_->fixed() && this->doEMLFit()) { numbMap["sig"] = signalEvents_->genValue(); } if ( usingBkgnd_ ) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { const TString& bkgndClass = this->bkgndClassName(iBkgnd); const LauAbsRValue* par = bkgndEvents_[iBkgnd]; if (par->fixed()) { numbMap[bkgndClass] = par->genValue(); } } } return numbMap; } LauSPlot::TwoDMap LauCPFitModel::twodimPDFs() const { // This makes the assumption that the form of the positive and // negative PDFs are the same, which seems reasonable to me LauSPlot::TwoDMap twodimMap; for (LauPdfList::const_iterator pdf_iter = negSignalPdfs_.begin(); pdf_iter != negSignalPdfs_.end(); ++pdf_iter) { // Count the number of input variables that are not DP variables UInt_t nVars(0); std::vector varNames = (*pdf_iter)->varNames(); for ( std::vector::const_iterator var_iter = varNames.begin(); var_iter != varNames.end(); ++var_iter ) { if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { ++nVars; } } if ( nVars == 2 ) { if (useSCF_) { twodimMap.insert( std::make_pair( "sigTM", std::make_pair( varNames[0], varNames[1] ) ) ); } else { twodimMap.insert( std::make_pair( "sig", std::make_pair( varNames[0], varNames[1] ) ) ); } } } if ( useSCF_ ) { for (LauPdfList::const_iterator pdf_iter = negScfPdfs_.begin(); pdf_iter != negScfPdfs_.end(); ++pdf_iter) { // Count the number of input variables that are not DP variables UInt_t nVars(0); std::vector varNames = (*pdf_iter)->varNames(); for ( std::vector::const_iterator var_iter = varNames.begin(); var_iter != varNames.end(); ++var_iter ) { if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { ++nVars; } } if ( nVars == 2 ) { twodimMap.insert( std::make_pair( "sigSCF", std::make_pair( varNames[0], varNames[1] ) ) ); } } } if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { const TString& bkgndClass = this->bkgndClassName(iBkgnd); const LauPdfList& pdfList = negBkgndPdfs_[iBkgnd]; for (LauPdfList::const_iterator pdf_iter = pdfList.begin(); pdf_iter != pdfList.end(); ++pdf_iter) { // Count the number of input variables that are not DP variables UInt_t nVars(0); std::vector varNames = (*pdf_iter)->varNames(); for ( std::vector::const_iterator var_iter = varNames.begin(); var_iter != varNames.end(); ++var_iter ) { if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { ++nVars; } } if ( nVars == 2 ) { twodimMap.insert( std::make_pair( bkgndClass, std::make_pair( varNames[0], varNames[1] ) ) ); } } } } return twodimMap; } void LauCPFitModel::storePerEvtLlhds() { std::cout << "INFO in LauCPFitModel::storePerEvtLlhds : Storing per-event likelihood values..." << std::endl; // if we've not been using the DP model then we need to cache all // the info here so that we can get the efficiency from it LauFitDataTree* inputFitData = this->fitData(); if (!this->useDP() && this->storeDPEff()) { negSigModel_->initialise(negCoeffs_); posSigModel_->initialise(posCoeffs_); negSigModel_->fillDataTree(*inputFitData); posSigModel_->fillDataTree(*inputFitData); } UInt_t evtsPerExpt(this->eventsPerExpt()); LauIsobarDynamics* sigModel(0); LauPdfList* sigPdfs(0); LauPdfList* scfPdfs(0); LauBkgndPdfsList* bkgndPdfs(0); for (UInt_t iEvt = 0; iEvt < evtsPerExpt; ++iEvt) { this->setSPlotNtupleIntegerBranchValue("iExpt",this->iExpt()); this->setSPlotNtupleIntegerBranchValue("iEvtWithinExpt",iEvt); // Find out whether we have B- or B+ if ( tagged_ ) { const LauFitData& dataValues = inputFitData->getData(iEvt); LauFitData::const_iterator iter = dataValues.find("charge"); curEvtCharge_ = static_cast(iter->second); if (curEvtCharge_==+1) { sigModel = posSigModel_; sigPdfs = &posSignalPdfs_; scfPdfs = &posScfPdfs_; bkgndPdfs = &posBkgndPdfs_; } else { sigModel = negSigModel_; sigPdfs = &negSignalPdfs_; scfPdfs = &negScfPdfs_; bkgndPdfs = &negBkgndPdfs_; } } else { sigPdfs = &negSignalPdfs_; scfPdfs = &negScfPdfs_; bkgndPdfs = &negBkgndPdfs_; } // the DP information this->getEvtDPLikelihood(iEvt); if (this->storeDPEff()) { if (!this->useDP()) { posSigModel_->calcLikelihoodInfo(iEvt); negSigModel_->calcLikelihoodInfo(iEvt); } if ( tagged_ ) { this->setSPlotNtupleDoubleBranchValue("efficiency",sigModel->getEvtEff()); if ( negSigModel_->usingScfModel() && posSigModel_->usingScfModel() ) { this->setSPlotNtupleDoubleBranchValue("scffraction",sigModel->getEvtScfFraction()); } } else { this->setSPlotNtupleDoubleBranchValue("efficiency",0.5*(posSigModel_->getEvtEff() + negSigModel_->getEvtEff()) ); if ( negSigModel_->usingScfModel() && posSigModel_->usingScfModel() ) { this->setSPlotNtupleDoubleBranchValue("scffraction",0.5*(posSigModel_->getEvtScfFraction() + negSigModel_->getEvtScfFraction())); } } } if (this->useDP()) { sigTotalLike_ = sigDPLike_; if (useSCF_) { scfTotalLike_ = scfDPLike_; this->setSPlotNtupleDoubleBranchValue("sigTMDPLike",sigDPLike_); this->setSPlotNtupleDoubleBranchValue("sigSCFDPLike",scfDPLike_); } else { this->setSPlotNtupleDoubleBranchValue("sigDPLike",sigDPLike_); } if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { TString name = this->bkgndClassName(iBkgnd); name += "DPLike"; this->setSPlotNtupleDoubleBranchValue(name,bkgndDPLike_[iBkgnd]); } } } else { sigTotalLike_ = 1.0; if (useSCF_) { scfTotalLike_ = 1.0; } if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { bkgndTotalLike_[iBkgnd] = 1.0; } } } // the signal PDF values if ( useSCF_ ) { sigTotalLike_ *= this->setSPlotNtupleBranchValues(sigPdfs, "sigTM", iEvt); scfTotalLike_ *= this->setSPlotNtupleBranchValues(scfPdfs, "sigSCF", iEvt); } else { sigTotalLike_ *= this->setSPlotNtupleBranchValues(sigPdfs, "sig", iEvt); } // the background PDF values if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { const TString& bkgndClass = this->bkgndClassName(iBkgnd); LauPdfList& pdfs = (*bkgndPdfs)[iBkgnd]; bkgndTotalLike_[iBkgnd] *= this->setSPlotNtupleBranchValues(&(pdfs), bkgndClass, iEvt); } } // the total likelihoods if (useSCF_) { Double_t scfFrac(0.0); if ( useSCFHist_ ) { scfFrac = recoSCFFracs_[iEvt]; } else { scfFrac = scfFrac_.unblindValue(); } this->setSPlotNtupleDoubleBranchValue("sigSCFFrac",scfFrac); sigTotalLike_ *= ( 1.0 - scfFrac ); if ( scfMap_ == 0 ) { scfTotalLike_ *= scfFrac; } this->setSPlotNtupleDoubleBranchValue("sigTMTotalLike",sigTotalLike_); this->setSPlotNtupleDoubleBranchValue("sigSCFTotalLike",scfTotalLike_); } else { this->setSPlotNtupleDoubleBranchValue("sigTotalLike",sigTotalLike_); } if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { TString name = this->bkgndClassName(iBkgnd); name += "TotalLike"; this->setSPlotNtupleDoubleBranchValue(name,bkgndTotalLike_[iBkgnd]); } } // fill the tree this->fillSPlotNtupleBranches(); } std::cout << "INFO in LauCPFitModel::storePerEvtLlhds : Finished storing per-event likelihood values." << std::endl; } +std::map LauCPFitModel::getDPAmps(const Double_t m13Sq, const Double_t m23Sq) +{ + // Initialise the DP model, if not already done + if ( negCoeffs_.empty() || posCoeffs_.empty() ) { + this->updateCoeffs(); + this->initialiseDPModels(); + } + + if ( ! posKinematics_->withinDPLimits( m13Sq, m23Sq ) ) { + return { { "signal_" + negParent_, {} }, + { "signal_" + posParent_, {} } }; + } + + negSigModel_->calcLikelihoodInfo( m13Sq, m23Sq ); + posSigModel_->calcLikelihoodInfo( m13Sq, m23Sq ); + + return { { "signal_" + negParent_, negSigModel_->getEvtDPAmp() }, + { "signal_" + posParent_, posSigModel_->getEvtDPAmp() } }; +} + +std::map LauCPFitModel::getDPLikelihoods(const Double_t m13Sq, const Double_t m23Sq) +{ + // Initialise the DP model, if not already done + if ( negCoeffs_.empty() || posCoeffs_.empty() ) { + this->updateCoeffs(); + this->initialiseDPModels(); + } + + std::map likelihoods; + + if ( ! posKinematics_->withinDPLimits( m13Sq, m23Sq ) ) { + likelihoods.emplace( "signal_" + negParent_, 0.0 ); + likelihoods.emplace( "signal_" + posParent_, 0.0 ); + if ( usingBkgnd_ ) { + const UInt_t nBkgnds { this->nBkgndClasses() }; + for ( UInt_t bkgndID( 0 ); bkgndID < nBkgnds; ++bkgndID ) { + likelihoods.emplace( this->bkgndClassName( bkgndID ) + "_" + negParent_, 0.0 ); + likelihoods.emplace( this->bkgndClassName( bkgndID ) + "_" + posParent_, 0.0 ); + } + } + return likelihoods; + } + + negSigModel_->calcLikelihoodInfo( m13Sq, m23Sq ); + posSigModel_->calcLikelihoodInfo( m13Sq, m23Sq ); + + // See comment in getEvtDPLikelihood for explanation of the 2.0 factor + const Double_t norm { 2.0 / ( negSigModel_->getDPNorm() + posSigModel_->getDPNorm() ) }; + + likelihoods.emplace( "signal_" + negParent_, negSigModel_->getEvtIntensity() * norm ); + likelihoods.emplace( "signal_" + posParent_, posSigModel_->getEvtIntensity() * norm ); + + // TODO - SCF signal + static bool warningIssued { false }; + if ( useSCF_ && ! warningIssued ) { + warningIssued = true; + std::cerr << "WARNING in LauCPFitModel::getDPLikelihoods : calculation of SCF likelihoods not currently implemented in this function\n"; + std::cerr << " : signal likelihood will just be the truth-matched value"; + std::cerr << std::endl; + } + + if ( usingBkgnd_ ) { + const UInt_t nBkgnds { this->nBkgndClasses() }; + for ( UInt_t bkgndID( 0 ); bkgndID < nBkgnds; ++bkgndID ) { + likelihoods.emplace( this->bkgndClassName( bkgndID ) + "_" + negParent_, + negBkgndDPModels_[bkgndID]->getLikelihood( m13Sq, m23Sq ) ); + likelihoods.emplace( this->bkgndClassName( bkgndID ) + "_" + posParent_, + posBkgndDPModels_[bkgndID]->getLikelihood( m13Sq, m23Sq ) ); + } + } + + return likelihoods; +} + void LauCPFitModel::embedNegSignal(const TString& fileName, const TString& treeName, Bool_t reuseEventsWithinEnsemble, Bool_t reuseEventsWithinExperiment, Bool_t useReweighting) { if (negSignalTree_) { std::cerr << "ERROR in LauCPFitModel::embedNegSignal : Already embedding signal from a file." << std::endl; return; } negSignalTree_ = new LauEmbeddedData(fileName,treeName,reuseEventsWithinExperiment); Bool_t dataOK = negSignalTree_->findBranches(); if (!dataOK) { delete negSignalTree_; negSignalTree_ = 0; std::cerr << "ERROR in LauCPFitModel::embedNegSignal : Problem creating data tree for embedding." << std::endl; return; } reuseSignal_ = reuseEventsWithinEnsemble; useNegReweighting_ = useReweighting; if (this->enableEmbedding() == kFALSE) {this->enableEmbedding(kTRUE);} } void LauCPFitModel::embedNegBkgnd(const TString& bkgndClass, const TString& fileName, const TString& treeName, Bool_t reuseEventsWithinEnsemble, Bool_t reuseEventsWithinExperiment) { if ( ! this->validBkgndClass( bkgndClass ) ) { std::cerr << "ERROR in LauCPFitModel::embedBkgnd : Invalid background class \"" << bkgndClass << "\"." << std::endl; std::cerr << " : Background class names must be provided in \"setBkgndClassNames\" before any other background-related actions can be performed." << std::endl; return; } UInt_t bkgndID = this->bkgndClassID( bkgndClass ); if (negBkgndTree_[bkgndID]) { std::cerr << "ERROR in LauCPFitModel::embedNegBkgnd : Already embedding background from a file." << std::endl; return; } negBkgndTree_[bkgndID] = new LauEmbeddedData(fileName,treeName,reuseEventsWithinExperiment); Bool_t dataOK = negBkgndTree_[bkgndID]->findBranches(); if (!dataOK) { delete negBkgndTree_[bkgndID]; negBkgndTree_[bkgndID] = 0; std::cerr << "ERROR in LauCPFitModel::embedNegBkgnd : Problem creating data tree for embedding." << std::endl; return; } reuseBkgnd_[bkgndID] = reuseEventsWithinEnsemble; if (this->enableEmbedding() == kFALSE) {this->enableEmbedding(kTRUE);} } void LauCPFitModel::embedPosSignal(const TString& fileName, const TString& treeName, Bool_t reuseEventsWithinEnsemble, Bool_t reuseEventsWithinExperiment, Bool_t useReweighting) { if (posSignalTree_) { std::cerr << "ERROR in LauCPFitModel::embedPosSignal : Already embedding signal from a file." << std::endl; return; } posSignalTree_ = new LauEmbeddedData(fileName,treeName,reuseEventsWithinExperiment); Bool_t dataOK = posSignalTree_->findBranches(); if (!dataOK) { delete posSignalTree_; posSignalTree_ = 0; std::cerr << "ERROR in LauCPFitModel::embedPosSignal : Problem creating data tree for embedding." << std::endl; return; } reuseSignal_ = reuseEventsWithinEnsemble; usePosReweighting_ = useReweighting; if (this->enableEmbedding() == kFALSE) {this->enableEmbedding(kTRUE);} } void LauCPFitModel::embedPosBkgnd(const TString& bkgndClass, const TString& fileName, const TString& treeName, Bool_t reuseEventsWithinEnsemble, Bool_t reuseEventsWithinExperiment) { if ( ! this->validBkgndClass( bkgndClass ) ) { std::cerr << "ERROR in LauCPFitModel::embedBkgnd : Invalid background class \"" << bkgndClass << "\"." << std::endl; std::cerr << " : Background class names must be provided in \"setBkgndClassNames\" before any other background-related actions can be performed." << std::endl; return; } UInt_t bkgndID = this->bkgndClassID( bkgndClass ); if (posBkgndTree_[bkgndID]) { std::cerr << "ERROR in LauCPFitModel::embedPosBkgnd : Already embedding background from a file." << std::endl; return; } posBkgndTree_[bkgndID] = new LauEmbeddedData(fileName,treeName,reuseEventsWithinExperiment); Bool_t dataOK = posBkgndTree_[bkgndID]->findBranches(); if (!dataOK) { delete posBkgndTree_[bkgndID]; posBkgndTree_[bkgndID] = 0; std::cerr << "ERROR in LauCPFitModel::embedPosBkgnd : Problem creating data tree for embedding." << std::endl; return; } reuseBkgnd_[bkgndID] = reuseEventsWithinEnsemble; if (this->enableEmbedding() == kFALSE) {this->enableEmbedding(kTRUE);} } void LauCPFitModel::weightEvents( const TString& dataFileName, const TString& dataTreeName ) { // Routine to provide weights for events that are uniformly distributed // in the DP (or square DP) so as to reproduce the given DP model const Bool_t squareDP { posKinematics_->squareDP() || negKinematics_->squareDP() }; if ( squareDP ) { std::cout << "INFO in LauCPFitModel::weightEvents : will store DP model weights and the square DP jacobian\n"; std::cout << " : the DP model weights can be used on their own to weight events that were generated flat in phase space\n"; std::cout << " : or they can be multiplied by the jacobian to weight events that were generated flat in the square DP\n"; std::cout << " : or they can be multiplied by max(1.0, jacobian) to weight events that were generated quasi-flat in the square DP" << std::endl; } else { std::cout << "INFO in LauCPFitModel::weightEvents : will store DP model weights suitable for weighting events that were generated flat in phase space" << std::endl; } // This reads in the given dataFile and creates an input // fit data tree that stores them for all events and experiments. const Bool_t dataOK { this->verifyFitData( dataFileName, dataTreeName ) }; if ( ! dataOK ) { std::cerr << "ERROR in LauCPFitModel::weightEvents : Problem caching the data." << std::endl; return; } LauFitDataTree* inputFitData { this->fitData() }; if ( ! inputFitData->haveBranch( "m13Sq_MC" ) || ! inputFitData->haveBranch( "m23Sq_MC" ) ) { std::cerr << "WARNING in LauCPFitModel::weightEvents : Cannot find MC truth DP coordinate branches in supplied data, aborting." << std::endl; return; } if ( ! inputFitData->haveBranch( "charge" ) ) { std::cerr << "WARNING in LauCPFitModel::weightEvents : Cannot find branch specifying event charge in supplied data, aborting." << std::endl; return; } // Create the ntuple to hold the DP weights TString weightsFileName{ dataFileName }; const Ssiz_t index { weightsFileName.Last('.') }; weightsFileName.Insert( index, "_DPweights" ); LauGenNtuple weightsTuple { weightsFileName, dataTreeName }; weightsTuple.addIntegerBranch( "iExpt" ); weightsTuple.addIntegerBranch( "iEvtWithinExpt" ); weightsTuple.addDoubleBranch( "dpModelWeight" ); if ( squareDP ) { weightsTuple.addDoubleBranch( "sqDPJacobian" ); } const UInt_t nExpmt { this->nExpt() }; const UInt_t firstExpmt { this->firstExpt() }; for (UInt_t iExpmt {firstExpmt}; iExpmt < (firstExpmt+nExpmt); ++iExpmt) { inputFitData->readExperimentData(iExpmt); const UInt_t nEvents { inputFitData->nEvents() }; if ( nEvents < 1 ) { std::cerr << "WARNING in LauCPFitModel::weightEvents : Zero events in experiment " << iExpmt << ", skipping..." << std::endl; continue; } weightsTuple.setIntegerBranchValue( "iExpt", iExpmt ); // Calculate and store the weights for the events in this experiment for ( UInt_t iEvent{0}; iEvent < nEvents; ++iEvent ) { weightsTuple.setIntegerBranchValue( "iEvtWithinExpt", iEvent ); const LauFitData& evtData = inputFitData->getData( iEvent ); const auto m13Sq_MC { evtData.at( "m13Sq_MC" ) }; const auto m23Sq_MC { evtData.at( "m23Sq_MC" ) }; const auto charge { static_cast( evtData.at( "charge" ) ) }; Double_t dpModelWeight{0.0}; Double_t jacobian{1.0}; LauKinematics * kinematics{nullptr}; LauIsobarDynamics * dpModel{nullptr}; if (charge > 0) { kinematics = posKinematics_; dpModel = posSigModel_; } else { kinematics = negKinematics_; dpModel = negSigModel_; } if ( kinematics->withinDPLimits( m13Sq_MC, m23Sq_MC ) ) { kinematics->updateKinematics( m13Sq_MC, m23Sq_MC ); dpModelWeight = dpModel->getEventWeight(); if ( squareDP ) { jacobian = kinematics->calcSqDPJacobian(); } const Double_t norm { 0.5 * ( negSigModel_->getDPNorm() + posSigModel_->getDPNorm() ) }; dpModelWeight /= norm; } weightsTuple.setDoubleBranchValue( "dpModelWeight", dpModelWeight ); if ( squareDP ) { weightsTuple.setDoubleBranchValue( "sqDPJacobian", jacobian ); } weightsTuple.fillBranches(); } } weightsTuple.buildIndex( "iExpt", "iEvtWithinExpt" ); weightsTuple.addFriendTree( dataFileName, dataTreeName ); weightsTuple.writeOutGenResults(); } void LauCPFitModel::savePDFPlots(const TString& label) { savePDFPlotsWave(label, 0); savePDFPlotsWave(label, 1); savePDFPlotsWave(label, 2); std::cout << "LauCPFitModel::plot" << std::endl; // ((LauIsobarDynamics*)negSigModel_)->plot(); //Double_t minm13 = negSigModel_->getKinematics()->getm13Min(); Double_t minm13 = 0.0; Double_t maxm13 = negSigModel_->getKinematics()->getm13Max(); //Double_t minm23 = negSigModel_->getKinematics()->getm23Min(); Double_t minm23 = 0.0; Double_t maxm23 = negSigModel_->getKinematics()->getm23Max(); Double_t mins13 = minm13*minm13; Double_t maxs13 = maxm13*maxm13; Double_t mins23 = minm23*minm23; Double_t maxs23 = maxm23*maxm23; Double_t s13, s23, posChPdf, negChPdf; TString xLabel = "s13"; TString yLabel = "s23"; if (negSigModel_->getDaughters()->gotSymmetricalDP()) { xLabel = "sHigh"; yLabel = "sLow";} Int_t n13=200.00, n23=200.00; Double_t delta13, delta23; delta13 = (maxs13 - mins13)/n13; delta23 = (maxs23 - mins23)/n23; UInt_t nAmp = negSigModel_->getnCohAmp(); for (UInt_t resID = 0; resID <= nAmp; ++resID) { TGraph2D *posDt = new TGraph2D(); TGraph2D *negDt = new TGraph2D(); TGraph2D *acpDt = new TGraph2D(); TString resName = "TotalAmp"; if (resID != nAmp){ TString tStrResID = Form("%d", resID); const LauIsobarDynamics* model = negSigModel_; const LauAbsResonance* resonance = model->getResonance(resID); resName = resonance->getResonanceName(); std::cout << "resName = " << resName << std::endl; } resName.ReplaceAll("(", ""); resName.ReplaceAll(")", ""); resName.ReplaceAll("*", "Star"); posDt->SetName(resName+label); posDt->SetTitle(resName+" ("+label+") Positive"); negDt->SetName(resName+label); negDt->SetTitle(resName+" ("+label+") Negative"); acpDt->SetName(resName+label); acpDt->SetTitle(resName+" ("+label+") Asymmetry"); Int_t count=0; for (Int_t i=0; igetKinematics()->withinDPLimits2(s23, s13)) { if (negSigModel_->getDaughters()->gotSymmetricalDP() && (s13>s23) ) continue; negSigModel_->calcLikelihoodInfo(s13, s23); posSigModel_->calcLikelihoodInfo(s13, s23); LauComplex negChAmp = negSigModel_->getEvtDPAmp(); LauComplex posChAmp = posSigModel_->getEvtDPAmp(); if (resID != nAmp){ negChAmp = negSigModel_->getFullAmplitude(resID); posChAmp = posSigModel_->getFullAmplitude(resID); } negChPdf = negChAmp.abs2(); posChPdf = posChAmp.abs2(); negDt->SetPoint(count,s23,s13,negChPdf); // s23=sHigh, s13 = sLow posDt->SetPoint(count,s23,s13,posChPdf); // s23=sHigh, s13 = sLow acpDt->SetPoint(count,s23,s13, negChPdf - posChPdf); // s23=sHigh, s13 = sLow count++; } } } gStyle->SetPalette(1); TCanvas *posC = new TCanvas("c"+resName+label + "Positive",resName+" ("+label+") Positive",0,0,600,400); posDt->GetXaxis()->SetTitle(xLabel); posDt->GetYaxis()->SetTitle(yLabel); posDt->Draw("SURF1"); posDt->GetXaxis()->SetTitle(xLabel); posDt->GetYaxis()->SetTitle(yLabel); posC->SaveAs("plot_2D_"+resName + "_"+label+"Positive.C"); TCanvas *negC = new TCanvas("c"+resName+label + "Negative",resName+" ("+label+") Negative",0,0,600,400); negDt->GetXaxis()->SetTitle(xLabel); negDt->GetYaxis()->SetTitle(yLabel); negDt->Draw("SURF1"); negDt->GetXaxis()->SetTitle(xLabel); negDt->GetYaxis()->SetTitle(yLabel); negC->SaveAs("plot_2D_"+resName + "_"+label+"Negative.C"); TCanvas *acpC = new TCanvas("c"+resName+label + "Asymmetry",resName+" ("+label+") Asymmetry",0,0,600,400); acpDt->GetXaxis()->SetTitle(xLabel); acpDt->GetYaxis()->SetTitle(yLabel); acpDt->Draw("SURF1"); acpDt->GetXaxis()->SetTitle(xLabel); acpDt->GetYaxis()->SetTitle(yLabel); acpC->SaveAs("plot_2D_"+resName + "_"+label+"Asymmetry.C"); } } void LauCPFitModel::savePDFPlotsWave(const TString& label, const Int_t& spin) { std::cout << "label = "<< label << ", spin = "<< spin << std::endl; TString tStrResID = "S_Wave"; if (spin == 1) tStrResID = "P_Wave"; if (spin == 2) tStrResID = "D_Wave"; TString xLabel = "s13"; TString yLabel = "s23"; std::cout << "LauCPFitModel::savePDFPlotsWave: "<< tStrResID << std::endl; Double_t minm13 = 0.0; Double_t maxm13 = negSigModel_->getKinematics()->getm13Max(); Double_t minm23 = 0.0; Double_t maxm23 = negSigModel_->getKinematics()->getm23Max(); Double_t mins13 = minm13*minm13; Double_t maxs13 = maxm13*maxm13; Double_t mins23 = minm23*minm23; Double_t maxs23 = maxm23*maxm23; Double_t s13, s23, posChPdf, negChPdf; TGraph2D *posDt = new TGraph2D(); TGraph2D *negDt = new TGraph2D(); TGraph2D *acpDt = new TGraph2D(); posDt->SetName(tStrResID+label); posDt->SetTitle(tStrResID+" ("+label+") Positive"); negDt->SetName(tStrResID+label); negDt->SetTitle(tStrResID+" ("+label+") Negative"); acpDt->SetName(tStrResID+label); acpDt->SetTitle(tStrResID+" ("+label+") Asymmetry"); Int_t n13=200.00, n23=200.00; Double_t delta13, delta23; delta13 = (maxs13 - mins13)/n13; delta23 = (maxs23 - mins23)/n23; UInt_t nAmp = negSigModel_->getnCohAmp(); Int_t count=0; for (Int_t i=0; igetKinematics()->withinDPLimits2(s23, s13)) { if (negSigModel_->getDaughters()->gotSymmetricalDP() && (s13>s23) ) continue; LauComplex negChAmp(0,0); LauComplex posChAmp(0,0); Bool_t noWaveRes = kTRUE; negSigModel_->calcLikelihoodInfo(s13, s23); for (UInt_t resID = 0; resID < nAmp; ++resID) { const LauIsobarDynamics* model = negSigModel_; const LauAbsResonance* resonance = model->getResonance(resID); Int_t spin_res = resonance->getSpin(); if (spin != spin_res) continue; noWaveRes = kFALSE; negChAmp += negSigModel_->getFullAmplitude(resID); posChAmp += posSigModel_->getFullAmplitude(resID); } if (noWaveRes) return; negChPdf = negChAmp.abs2(); posChPdf = posChAmp.abs2(); negDt->SetPoint(count,s23,s13,negChPdf); // s23=sHigh, s13 = sLow posDt->SetPoint(count,s23,s13,posChPdf); // s23=sHigh, s13 = sLow acpDt->SetPoint(count,s23,s13, negChPdf - posChPdf); // s23=sHigh, s13 = sLow count++; } } } gStyle->SetPalette(1); TCanvas *posC = new TCanvas("c"+tStrResID+label + "Positive",tStrResID+" ("+label+") Positive",0,0,600,400); posDt->GetXaxis()->SetTitle(xLabel); posDt->GetYaxis()->SetTitle(yLabel); posDt->Draw("SURF1"); posDt->GetXaxis()->SetTitle(xLabel); posDt->GetYaxis()->SetTitle(yLabel); posC->SaveAs("plot_2D_"+tStrResID + "_"+label+"Positive.C"); TCanvas *negC = new TCanvas("c"+tStrResID+label + "Negative",tStrResID+" ("+label+") Negative",0,0,600,400); negDt->GetXaxis()->SetTitle(xLabel); negDt->GetYaxis()->SetTitle(yLabel); negDt->Draw("SURF1"); negDt->GetXaxis()->SetTitle(xLabel); negDt->GetYaxis()->SetTitle(yLabel); negC->SaveAs("plot_2D_"+tStrResID + "_"+label+"Negative.C"); TCanvas *acpC = new TCanvas("c"+tStrResID+label + "Asymmetry",tStrResID+" ("+label+") Asymmetry",0,0,600,400); acpDt->GetXaxis()->SetTitle(xLabel); acpDt->GetYaxis()->SetTitle(yLabel); acpDt->Draw("SURF1"); acpDt->GetXaxis()->SetTitle(xLabel); acpDt->GetYaxis()->SetTitle(yLabel); acpC->SaveAs("plot_2D_"+tStrResID + "_"+label+"Asymmetry.C"); } Double_t LauCPFitModel::getParamFromTree( TTree& tree, const TString& name ) { TBranch* branch{tree.FindBranch( name )}; if ( branch ) { TLeaf* leaf{branch->GetLeaf( name )}; if ( leaf ) { tree.GetEntry(0); return leaf->GetValue(); } else { std::cout << "ERROR in LauCPFitModel::getParamFromTree : Leaf name " + name + " not found in parameter file!" << std::endl; } } else { std::cout << "ERROR in LauCPFitModel::getParamFromTree : Branch name " + name + " not found in parameter file!" << std::endl; } return -1.1; } void LauCPFitModel::fixParam( LauParameter* param, const Double_t val, const Bool_t fix) { std::cout << "INFO in LauCPFitModel::fixParam : Setting " << param->name() << " to " << val << std::endl; param->value(val); param->genValue(val); param->initValue(val); if (fix) { param->fixed(kTRUE); } else if (!param->fixed()){ // Add parameter name to list to indicate that this should not be randomised by randomiseInitFitPars // (otherwise only those that are fixed are not randomised). // This is only done to those that are not already fixed (see randomiseInitFitPars). allImportedFreeParams_.insert(param); } } void LauCPFitModel::fixParams( std::vector& params ) { const Bool_t fix{fixParams_}; // TODO: Allow some parameters to be fixed and some to remain floating (but initialised) if ( !fixParamFileName_.IsNull() ) { // Take param values from a file TFile * paramFile = TFile::Open(fixParamFileName_, "READ"); if (!paramFile) { std::cerr << "ERROR in LauCPFitModel::fixParams : File '" + fixParamFileName_ + "' could not be opened for reading!" << std::endl; return; } TTree * paramTree = dynamic_cast(paramFile->Get(fixParamTreeName_)); if (!paramTree) { std::cerr << "ERROR in LauCPFitModel::fixParams : Tree '" + fixParamTreeName_ + "' not found in parameter file!" << std::endl; return; } if ( !fixParamNames_.empty() ) { // Fix params from file, according to vector of names for( auto itr = params.begin(); itr != params.end(); ++itr ) { auto itrName = fixParamNames_.find( (*itr)->name() ); if ( itrName != fixParamNames_.end() ) { this->fixParam(*itr, this->getParamFromTree(*paramTree, *itrName), fix); } } } else { // Fix some (unspecified) parameters from file, prioritising the map (if it exists) for( auto itr = params.begin(); itr != params.end(); ++itr) { const TString& name = (*itr)->name(); if ( ! fixParamMap_.empty() ) { auto nameValItr = fixParamMap_.find(name); if ( nameValItr != fixParamMap_.end() ) { this->fixParam(*itr, nameValItr->second, fix); } } else { this->fixParam(*itr, this->getParamFromTree(*paramTree, name), fix); } } } // Vector of names? } else { // Fix param names fom map, leave others floating for( auto itr = params.begin(); itr != params.end(); ++itr ) { auto nameValItr = this->fixParamMap_.find( (*itr)->name() ); if ( nameValItr != this->fixParamMap_.end() ) { this->fixParam(*itr, nameValItr->second, fix); } } } } diff --git a/src/LauSimpleFitModel.cc b/src/LauSimpleFitModel.cc index ebf8d51..4a11c96 100644 --- a/src/LauSimpleFitModel.cc +++ b/src/LauSimpleFitModel.cc @@ -1,2364 +1,2425 @@ /* Copyright 2004 University of Warwick Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Laura++ package authors: John Back Paul Harrison Thomas Latham */ /*! \file LauSimpleFitModel.cc \brief File containing implementation of LauSimpleFitModel class. */ #include #include #include #include #include "TFile.h" #include "TH2.h" #include "TMinuit.h" #include "TRandom.h" #include "TSystem.h" #include "TVirtualFitter.h" #include "LauAbsBkgndDPModel.hh" #include "LauAbsCoeffSet.hh" #include "LauIsobarDynamics.hh" #include "LauAbsPdf.hh" #include "LauComplex.hh" #include "LauConstants.hh" #include "LauDaughters.hh" #include "LauEmbeddedData.hh" #include "LauEffModel.hh" #include "LauFitNtuple.hh" #include "LauGenNtuple.hh" #include "LauKinematics.hh" #include "LauPrint.hh" #include "LauRandom.hh" #include "LauScfMap.hh" #include "LauSimpleFitModel.hh" #include "TGraph2D.h" #include "TGraph.h" #include "TStyle.h" #include "TCanvas.h" LauSimpleFitModel::LauSimpleFitModel(LauIsobarDynamics* sigModel) : LauAbsFitModel(), sigDPModel_(sigModel), kinematics_(sigModel ? sigModel->getKinematics() : 0), usingBkgnd_(kFALSE), nSigComp_(0), nSigDPPar_(0), nExtraPdfPar_(0), nNormPar_(0), meanEff_("meanEff",0.0,0.0,1.0), dpRate_("dpRate",0.0,0.0,100.0), //signalEvents_("signalEvents",1.0,0.0,1.0), signalEvents_(0), useSCF_(kFALSE), useSCFHist_(kFALSE), scfFrac_("scfFrac",0.0,0.0,1.0), scfFracHist_(0), scfMap_(0), compareFitData_(kFALSE), signalTree_(0), reuseSignal_(kFALSE), useReweighting_(kFALSE), sigDPLike_(0.0), scfDPLike_(0.0), sigExtraLike_(0.0), scfExtraLike_(0.0), sigTotalLike_(0.0), scfTotalLike_(0.0) { } LauSimpleFitModel::~LauSimpleFitModel() { delete signalTree_; for (LauBkgndEmbDataList::iterator iter = bkgndTree_.begin(); iter != bkgndTree_.end(); ++iter) { delete (*iter); } delete scfFracHist_; } void LauSimpleFitModel::setupBkgndVectors() { UInt_t nBkgnds = this->nBkgndClasses(); bkgndDPModels_.resize( nBkgnds ); bkgndPdfs_.resize( nBkgnds ); bkgndEvents_.resize( nBkgnds ); bkgndTree_.resize( nBkgnds ); reuseBkgnd_.resize( nBkgnds ); bkgndDPLike_.resize( nBkgnds ); bkgndExtraLike_.resize( nBkgnds ); bkgndTotalLike_.resize( nBkgnds ); } void LauSimpleFitModel::setNSigEvents(LauParameter* nSigEvents) { if ( nSigEvents == 0 ) { std::cerr << "ERROR in LauSimpleFitModel::setNSigEvents : The signal yield LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( signalEvents_ != 0 ) { std::cerr << "ERROR in LauSimpleFitModel::setNSigEvents : You are trying to overwrite the signal yield." << std::endl; return; } signalEvents_ = nSigEvents; TString name = signalEvents_->name(); if ( ! name.Contains("signalEvents") && !( name.BeginsWith("signal") && name.EndsWith("Events") ) ) { signalEvents_->name("signalEvents"); } Double_t value = nSigEvents->value(); signalEvents_->range(-2.0*(TMath::Abs(value)+1.0), 2.0*(TMath::Abs(value)+1.0)); } void LauSimpleFitModel::setNBkgndEvents(LauAbsRValue* nBkgndEvents) { if ( nBkgndEvents == 0 ) { std::cerr << "ERROR in LauSimpleFitModel::setNBkgndEvents : The background yield LauParameter pointer is null." << std::endl; gSystem->Exit(EXIT_FAILURE); } if ( ! this->validBkgndClass( nBkgndEvents->name() ) ) { std::cerr << "ERROR in LauSimpleFitModel::setNBkgndEvents : Invalid background class \"" << nBkgndEvents->name() << "\"." << std::endl; std::cerr << " : Background class names must be provided in \"setBkgndClassNames\" before any other background-related actions can be performed." << std::endl; gSystem->Exit(EXIT_FAILURE); } UInt_t bkgndID = this->bkgndClassID( nBkgndEvents->name() ); if ( bkgndEvents_[bkgndID] != 0 ) { std::cerr << "ERROR in LauSimpleFitModel::setNBkgndEvents : You are trying to overwrite the background yield." << std::endl; return; } nBkgndEvents->name( nBkgndEvents->name()+"Events" ); if ( nBkgndEvents->isLValue() ) { Double_t value = nBkgndEvents->value(); LauParameter* yield = dynamic_cast( nBkgndEvents ); yield->range(-2.0*(TMath::Abs(value)+1.0), 2.0*(TMath::Abs(value)+1.0)); } bkgndEvents_[bkgndID] = nBkgndEvents; } void LauSimpleFitModel::splitSignalComponent( const TH2* dpHisto, const Bool_t upperHalf, const Bool_t fluctuateBins, LauScfMap* scfMap ) { if ( useSCF_ == kTRUE ) { std::cerr << "ERROR in LauSimpleFitModel::splitSignalComponent : Have already setup SCF." << std::endl; return; } if ( dpHisto == 0 ) { std::cerr << "ERROR in LauSimpleFitModel::splitSignalComponent : The histogram pointer is null." << std::endl; return; } const LauDaughters* daughters = sigDPModel_->getDaughters(); scfFracHist_ = new LauEffModel( daughters, 0 ); scfFracHist_->setEffHisto( dpHisto, kTRUE, fluctuateBins, 0.0, 0.0, upperHalf, daughters->squareDP() ); scfMap_ = scfMap; useSCF_ = kTRUE; useSCFHist_ = kTRUE; } void LauSimpleFitModel::splitSignalComponent( const Double_t scfFrac, const Bool_t fixed ) { if ( useSCF_ == kTRUE ) { std::cerr << "ERROR in LauSimpleFitModel::splitSignalComponent : Have already setup SCF." << std::endl; return; } scfFrac_.range( 0.0, 1.0 ); scfFrac_.value( scfFrac ); scfFrac_.initValue( scfFrac ); scfFrac_.genValue( scfFrac ); scfFrac_.fixed( fixed ); useSCF_ = kTRUE; useSCFHist_ = kFALSE; } void LauSimpleFitModel::setBkgndDPModel(const TString& bkgndClass, LauAbsBkgndDPModel* bkgndDPModel) { if (bkgndDPModel == 0) { std::cerr << "ERROR in LauSimpleFitModel::setBkgndDPModel : The model pointer is null." << std::endl; return; } // check that this background name is valid if ( ! this->validBkgndClass( bkgndClass ) ) { std::cerr << "ERROR in LauSimpleFitModel::setBkgndDPModel : Invalid background class \"" << bkgndClass << "\"." << std::endl; std::cerr << " : Background class names must be provided in \"setBkgndClassNames\" before any other background-related actions can be performed." << std::endl; return; } UInt_t bkgndID = this->bkgndClassID( bkgndClass ); bkgndDPModels_[bkgndID] = bkgndDPModel; usingBkgnd_ = kTRUE; } void LauSimpleFitModel::setSignalPdf(LauAbsPdf* pdf) { if (pdf==0) { std::cerr << "ERROR in LauSimpleFitModel::setSignalPdf : The PDF pointer is null." << std::endl; return; } signalPdfs_.push_back(pdf); } void LauSimpleFitModel::setSCFPdf(LauAbsPdf* pdf) { if (pdf==0) { std::cerr << "ERROR in LauSimpleFitModel::setSCFPdf : The PDF pointer is null." << std::endl; return; } scfPdfs_.push_back(pdf); } void LauSimpleFitModel::setBkgndPdf(const TString& bkgndClass, LauAbsPdf* pdf) { if (pdf == 0) { std::cerr << "ERROR in LauSimpleFitModel::setBkgndPdf : The PDF pointer is null." << std::endl; return; } // check that this background name is valid if ( ! this->validBkgndClass( bkgndClass ) ) { std::cerr << "ERROR in LauSimpleFitModel::setBkgndPdf : Invalid background class \"" << bkgndClass << "\"." << std::endl; std::cerr << " : Background class names must be provided in \"setBkgndClassNames\" before any other background-related actions can be performed." << std::endl; return; } UInt_t bkgndID = this->bkgndClassID( bkgndClass ); bkgndPdfs_[bkgndID].push_back(pdf); usingBkgnd_ = kTRUE; } void LauSimpleFitModel::setAmpCoeffSet(LauAbsCoeffSet* coeffSet) { // Resize the coeffPars vector if not already done if ( coeffPars_.empty() ) { coeffPars_.resize( sigDPModel_->getnTotAmp() ); for (std::vector::iterator iter = coeffPars_.begin(); iter != coeffPars_.end(); ++iter) { (*iter) = 0; } } // Is there a component called compName in the signal model? TString compName(coeffSet->name()); const Int_t index = sigDPModel_->resonanceIndex(compName); if ( index < 0 ) { std::cerr << "ERROR in LauSimpleFitModel::setAmpCoeffSet : Signal DP model doesn't contain component \"" << compName << "\"." << std::endl; return; } // Do we already have it in our list of names? if ( coeffPars_[index] != 0 && coeffPars_[index]->name() == compName) { std::cerr << "ERROR in LauSimpleFitModel::setAmpCoeffSet : Have already set coefficients for \"" << compName << "\"." << std::endl; return; } coeffSet->index(index); coeffPars_[index] = coeffSet; ++nSigComp_; std::cout << "INFO in LauSimpleFitModel::setAmpCoeffSet : Added coefficients for component A"<< index << ": \"" << compName << "\" to the fit model." << std::endl; coeffSet->printParValues(); } void LauSimpleFitModel::initialise() { // Initialisation if (!this->useDP() && signalPdfs_.empty()) { std::cerr << "ERROR in LauSimpleFitModel::initialise : Signal model doesn't exist for any variable." << std::endl; gSystem->Exit(EXIT_FAILURE); } if (this->useDP()) { // Check that we have all the Dalitz-plot models if ( sigDPModel_ == 0 ) { std::cerr << "ERROR in LauSimpleFitModel::initialise : The pointer to the signal DP model is null.\n"; std::cerr << " : Removing the Dalitz Plot from the model." << std::endl; this->useDP(kFALSE); } if ( usingBkgnd_ ) { for (LauBkgndDPModelList::const_iterator dpmodel_iter = bkgndDPModels_.begin(); dpmodel_iter != bkgndDPModels_.end(); ++dpmodel_iter ) { if ( (*dpmodel_iter) == 0 ) { std::cerr << "ERROR in LauSimpleFitModel::initialise : The pointer to one of the background DP models is null.\n"; std::cerr << " : Removing the Dalitz Plot from the model." << std::endl; this->useDP(kFALSE); break; } } } // Need to check that the number of components we have and that the dynamics has matches up UInt_t nAmp = sigDPModel_->getnTotAmp(); if (nAmp != nSigComp_) { std::cerr << "ERROR in LauSimpleFitModel::initialise : Number of signal DP components with magnitude and phase set not right." << std::endl; gSystem->Exit(EXIT_FAILURE); } // From the initial parameter values calculate the coefficients // so they can be passed to the signal model this->updateCoeffs(); // If all is well, go ahead and initialise them this->initialiseDPModels(); } // Next check that, if a given component is being used we've got the // right number of PDFs for all the variables involved // TODO - should probably check variable names and so on as well UInt_t nsigpdfvars(0); for ( LauPdfList::const_iterator pdf_iter = signalPdfs_.begin(); pdf_iter != signalPdfs_.end(); ++pdf_iter ) { std::vector varNames = (*pdf_iter)->varNames(); for ( std::vector::const_iterator var_iter = varNames.begin(); var_iter != varNames.end(); ++var_iter ) { if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { ++nsigpdfvars; } } } if (useSCF_) { UInt_t nscfpdfvars(0); for ( LauPdfList::const_iterator pdf_iter = scfPdfs_.begin(); pdf_iter != scfPdfs_.end(); ++pdf_iter ) { std::vector varNames = (*pdf_iter)->varNames(); for ( std::vector::const_iterator var_iter = varNames.begin(); var_iter != varNames.end(); ++var_iter ) { if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { ++nscfpdfvars; } } } if (nscfpdfvars != nsigpdfvars) { std::cerr << "ERROR in LauSimpleFitModel::initialise : There are " << nsigpdfvars << " TM signal PDF variables but " << nscfpdfvars << " SCF signal PDF variables." << std::endl; gSystem->Exit(EXIT_FAILURE); } } if (usingBkgnd_) { for (LauBkgndPdfsList::const_iterator bgclass_iter = bkgndPdfs_.begin(); bgclass_iter != bkgndPdfs_.end(); ++bgclass_iter) { UInt_t nbkgndpdfvars(0); const LauPdfList& pdfList = (*bgclass_iter); for ( LauPdfList::const_iterator pdf_iter = pdfList.begin(); pdf_iter != pdfList.end(); ++pdf_iter ) { std::vector varNames = (*pdf_iter)->varNames(); for ( std::vector::const_iterator var_iter = varNames.begin(); var_iter != varNames.end(); ++var_iter ) { if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { ++nbkgndpdfvars; } } } if (nbkgndpdfvars != nsigpdfvars) { std::cerr << "ERROR in LauSimpleFitModel::initialise : There are " << nsigpdfvars << " signal PDF variables but " << nbkgndpdfvars << " bkgnd PDF variables." << std::endl; gSystem->Exit(EXIT_FAILURE); } } } // Clear the vectors of parameter information so we can start from scratch this->clearFitParVectors(); // Set the fit parameters for signal and background models this->setSignalDPParameters(); // Set the fit parameters for the various extra PDFs this->setExtraPdfParameters(); // Set the initial bg and signal events this->setFitNEvents(); // Check that we have the expected number of fit variables const LauParameterPList& fitVars = this->fitPars(); if (fitVars.size() != (nSigDPPar_ + nExtraPdfPar_ + nNormPar_)) { std::cerr << "ERROR in LauSimpleFitModel::initialise : Number of fit parameters not of expected size." << std::endl; gSystem->Exit(EXIT_FAILURE); } this->setExtraNtupleVars(); } void LauSimpleFitModel::initialiseDPModels() { std::cout << "INFO in LauSimpleFitModel::initialiseDPModels : Initialising DP models" << std::endl; sigDPModel_->initialise(coeffs_); if (usingBkgnd_ == kTRUE) { for (LauBkgndDPModelList::iterator iter = bkgndDPModels_.begin(); iter != bkgndDPModels_.end(); ++iter) { (*iter)->initialise(); } } } void LauSimpleFitModel::recalculateNormalisation() { //std::cout << "INFO in LauSimpleFitModel::recalculateNormalizationInDPModels : Recalc Norm in DP model" << std::endl; sigDPModel_->recalculateNormalisation(); sigDPModel_->modifyDataTree(); } void LauSimpleFitModel::setSignalDPParameters() { // Set the fit parameters for the signal model. nSigDPPar_ = 0; if ( ! this->useDP() ) { return; } std::cout << "INFO in LauSimpleFitModel::setSignalDPParameters : Setting the initial fit parameters for the signal DP model." << std::endl; // Place isobar coefficient parameters in vector of fit variables LauParameterPList& fitVars = this->fitPars(); for (UInt_t i = 0; i < nSigComp_; i++) { LauParameterPList pars = coeffPars_[i]->getParameters(); for (LauParameterPList::iterator iter = pars.begin(); iter != pars.end(); ++iter) { if ( !(*iter)->clone() ) { fitVars.push_back(*iter); ++nSigDPPar_; } } } // Obtain the resonance parameters and place them in the vector of fit variables and in a separate vector LauParameterPSet& resVars = this->resPars(); resVars.clear(); LauParameterPList& sigDPPars = sigDPModel_->getFloatingParameters(); for ( LauParameterPList::iterator iter = sigDPPars.begin(); iter != sigDPPars.end(); ++iter ) { if ( resVars.insert(*iter).second ) { fitVars.push_back(*iter); ++nSigDPPar_; } } } void LauSimpleFitModel::setExtraPdfParameters() { // Include all the parameters of the various PDFs in the fit // NB all of them are passed to the fit, even though some have been fixed through parameter.fixed(kTRUE) // With the new "cloned parameter" scheme only "original" parameters are passed to the fit. // Their clones are updated automatically when the originals are updated. nExtraPdfPar_ = 0; nExtraPdfPar_ += this->addFitParameters(signalPdfs_); if (useSCF_ == kTRUE) { nExtraPdfPar_ += this->addFitParameters(scfPdfs_); } if (usingBkgnd_ == kTRUE) { for (LauBkgndPdfsList::iterator iter = bkgndPdfs_.begin(); iter != bkgndPdfs_.end(); ++iter) { nExtraPdfPar_ += this->addFitParameters(*iter); } } } void LauSimpleFitModel::setFitNEvents() { if ( signalEvents_ == 0 ) { std::cerr << "ERROR in LauSimpleFitModel::setFitNEvents : Signal yield not defined." << std::endl; return; } nNormPar_ = 0; // initialise the total number of events to be the sum of all the hypotheses Double_t nTotEvts = signalEvents_->value(); for (LauBkgndYieldList::const_iterator iter = bkgndEvents_.begin(); iter != bkgndEvents_.end(); ++iter) { nTotEvts += (*iter)->value(); if ( (*iter) == 0 ) { std::cerr << "ERROR in LauSimpleFitModel::setFitNEvents : Background yield not defined." << std::endl; return; } } this->eventsPerExpt(TMath::FloorNint(nTotEvts)); LauParameterPList& fitVars = this->fitPars(); // if doing an extended ML fit add the number of signal events into the fit parameters if (this->doEMLFit()) { std::cout << "INFO in LauSimpleFitModel::setFitNEvents : Initialising number of events for signal and background components..." << std::endl; // add the signal events to the list of fit parameters fitVars.push_back(signalEvents_); ++nNormPar_; } else { std::cout << "INFO in LauSimpleFitModel::setFitNEvents : Initialising number of events for background components (and hence signal)..." << std::endl; } if (useSCF_ && !useSCFHist_) { fitVars.push_back(&scfFrac_); ++nNormPar_; } if (usingBkgnd_ == kTRUE) { for (LauBkgndYieldList::iterator iter = bkgndEvents_.begin(); iter != bkgndEvents_.end(); ++iter) { std::vector parameters = (*iter)->getPars(); for ( LauParameter* parameter : parameters ) { if ( ! parameter->clone() ) { fitVars.push_back(parameter); ++nNormPar_; } } } } } void LauSimpleFitModel::setExtraNtupleVars() { // Set-up other parameters derived from the fit results, e.g. fit fractions. if (this->useDP() != kTRUE) { return; } // First clear the vectors so we start from scratch this->clearExtraVarVectors(); LauParameterList& extraVars = this->extraPars(); // Add a fit fraction for each signal component fitFrac_ = sigDPModel_->getFitFractions(); if (fitFrac_.size() != nSigComp_) { std::cerr << "ERROR in LauSimpleFitModel::setExtraNtupleVars : Initial Fit Fraction array of unexpected dimension: " << fitFrac_.size() << std::endl; gSystem->Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } // Add the fit fraction that has not been corrected for the efficiency for each signal component fitFracEffUnCorr_ = sigDPModel_->getFitFractionsEfficiencyUncorrected(); if (fitFracEffUnCorr_.size() != nSigComp_) { std::cerr << "ERROR in LauSimpleFitModel::setExtraNtupleVars : Initial Fit Fraction array of unexpected dimension: " << fitFracEffUnCorr_.size() << std::endl; gSystem->Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } for (UInt_t i = 0; i < nSigComp_; i++) { for (UInt_t j = i; j < nSigComp_; j++) { extraVars.push_back(fitFrac_[i][j]); extraVars.push_back(fitFracEffUnCorr_[i][j]); } } // Store any extra parameters/quantities from the DP model (e.g. K-matrix total fit fractions) std::vector extraParams = sigDPModel_->getExtraParameters(); std::vector::iterator extraIter; for (extraIter = extraParams.begin(); extraIter != extraParams.end(); ++extraIter) { LauParameter extraParameter = (*extraIter); extraVars.push_back(extraParameter); } // Now add in the DP efficiency value Double_t initMeanEff = sigDPModel_->getMeanEff().initValue(); meanEff_.value(initMeanEff); meanEff_.initValue(initMeanEff); meanEff_.genValue(initMeanEff); extraVars.push_back(meanEff_); // Also add in the DP rate Double_t initDPRate = sigDPModel_->getDPRate().initValue(); dpRate_.value(initDPRate); dpRate_.initValue(initDPRate); dpRate_.genValue(initDPRate); extraVars.push_back(dpRate_); } void LauSimpleFitModel::finaliseFitResults(const TString& tablePrefixName) { // Retrieve parameters from the fit results for calculations and toy generation // and eventually store these in output root ntuples/text files // Now take the fit parameters and update them as necessary // e.g. to make mag > 0.0 and phase in the right range. // This function will also calculate any other values, such as the // fit fractions, using any errors provided by fitParErrors as appropriate. // Also obtain the pull values: (measured - generated)/(average error) if (this->useDP() == kTRUE) { for (UInt_t i = 0; i < nSigComp_; i++) { // Check whether we have mag > 0.0, and phase in the right range coeffPars_[i]->finaliseValues(); } } // update the pulls on the events if (this->doEMLFit()) { signalEvents_->updatePull(); } if (useSCF_ && !useSCFHist_) { scfFrac_.updatePull(); } if (usingBkgnd_ == kTRUE) { for (LauBkgndYieldList::iterator iter = bkgndEvents_.begin(); iter != bkgndEvents_.end(); ++iter) { std::vector parameters = (*iter)->getPars(); for ( LauParameter* parameter : parameters ) { parameter->updatePull(); } } } // Update the pulls on all the extra PDFs' parameters this->updateFitParameters(signalPdfs_); if (useSCF_ == kTRUE) { this->updateFitParameters(scfPdfs_); } if (usingBkgnd_ == kTRUE) { for (LauBkgndPdfsList::iterator iter = bkgndPdfs_.begin(); iter != bkgndPdfs_.end(); ++iter) { this->updateFitParameters(*iter); } } // Fill the fit results to the ntuple for current experiment // update the coefficients and then calculate the fit fractions if (this->useDP() == kTRUE) { this->updateCoeffs(); sigDPModel_->updateCoeffs(coeffs_); sigDPModel_->calcExtraInfo(); LauParArray fitFrac = sigDPModel_->getFitFractions(); if (fitFrac.size() != nSigComp_) { std::cerr << "ERROR in LauSimpleFitModel::finaliseFitResults : Fit Fraction array of unexpected dimension: " << fitFrac.size() << std::endl; gSystem->Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } LauParArray fitFracEffUnCorr = sigDPModel_->getFitFractionsEfficiencyUncorrected(); if (fitFracEffUnCorr.size() != nSigComp_) { std::cerr << "ERROR in LauSimpleFitModel::finaliseFitResults : Fit Fraction array of unexpected dimension: " << fitFracEffUnCorr.size() << std::endl; gSystem->Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } for (UInt_t i(0); igetMeanEff().value()); dpRate_.value(sigDPModel_->getDPRate().value()); this->clearExtraVarVectors(); LauParameterList& extraVars = this->extraPars(); // Then store the final fit parameters, and any extra parameters for // the signal model (e.g. fit fractions) for (UInt_t i(0); i extraParams = sigDPModel_->getExtraParameters(); std::vector::iterator extraIter; for (extraIter = extraParams.begin(); extraIter != extraParams.end(); ++extraIter) { LauParameter extraParameter = (*extraIter); extraVars.push_back(extraParameter); } // Now add in the DP efficiency value extraVars.push_back(meanEff_); // Also add in the DP rate extraVars.push_back(dpRate_); this->printFitFractions(std::cout); } const LauParameterPList& fitVars = this->fitPars(); const LauParameterList& extraVars = this->extraPars(); LauFitNtuple* ntuple = this->fitNtuple(); ntuple->storeParsAndErrors(fitVars, extraVars); // find out the correlation matrix for the parameters ntuple->storeCorrMatrix(this->iExpt(), this->fitStatus(), this->covarianceMatrix()); // Fill the data into ntuple ntuple->updateFitNtuple(); // Print out the partial fit fractions, phases and the // averaged efficiency, reweighted by the dynamics (and anything else) if (this->writeLatexTable()) { TString sigOutFileName(tablePrefixName); sigOutFileName += "_"; sigOutFileName += this->iExpt(); sigOutFileName += "Expt.tex"; this->writeOutTable(sigOutFileName); } } void LauSimpleFitModel::printFitFractions(std::ostream& output) { // Print out Fit Fractions, total DP rate and mean efficiency for (UInt_t i = 0; i < nSigComp_; i++) { output << "FitFraction for component " << i << " (" << coeffPars_[i]->name() << ") = " << fitFrac_[i][i] << std::endl; } output << "Overall DP rate (integral of matrix element squared) = " << dpRate_ << std::endl; output << "Average efficiency weighted by whole DP dynamics = " << meanEff_ << std::endl; } void LauSimpleFitModel::writeOutTable(const TString& outputFile) { // Write out the results of the fit to a tex-readable table // TODO - need to include the yields in this table std::ofstream fout(outputFile); LauPrint print; std::cout << "INFO in LauSimpleFitModel::writeOutTable : Writing out results of the fit to the tex file " << outputFile << std::endl; if (this->useDP() == kTRUE) { // print the fit coefficients in one table coeffPars_.front()->printTableHeading(fout); for (UInt_t i = 0; i < nSigComp_; i++) { coeffPars_[i]->printTableRow(fout); } fout << "\\hline" << std::endl; fout << "\\end{tabular}" << std::endl << std::endl; // print the fit fractions in another fout << "\\begin{tabular}{|l|c|}" << std::endl; fout << "\\hline" << std::endl; fout << "Component & FitFraction \\\\" << std::endl; fout << "\\hline" << std::endl; Double_t fitFracSum(0.0); for (UInt_t i = 0; i < nSigComp_; i++) { TString resName = coeffPars_[i]->name(); resName = resName.ReplaceAll("_", "\\_"); fout << resName << " & $"; Double_t fitFrac = fitFrac_[i][i].value(); fitFracSum += fitFrac; print.printFormat(fout, fitFrac); fout << "$ \\\\" << std::endl; } fout << "\\hline" << std::endl; // Also print out sum of fit fractions fout << "Fit Fraction Sum & $"; print.printFormat(fout, fitFracSum); fout << "$ \\\\" << std::endl; fout << "\\hline" << std::endl; fout << "DP rate & $"; print.printFormat(fout, dpRate_.value()); fout << "$ \\\\" << std::endl; fout << "\\hline" << std::endl; fout << "$< \\varepsilon >$ & $"; print.printFormat(fout, meanEff_.value()); fout << "$ \\\\" << std::endl; fout << "\\hline" << std::endl; fout << "\\end{tabular}" << std::endl << std::endl; } if (!signalPdfs_.empty()) { fout << "\\begin{tabular}{|l|c|}" << std::endl; fout << "\\hline" << std::endl; if (useSCF_ == kTRUE) { fout << "\\Extra TM Signal PDFs' Parameters: & \\\\" << std::endl; } else { fout << "\\Extra Signal PDFs' Parameters: & \\\\" << std::endl; } this->printFitParameters(signalPdfs_, fout); if (useSCF_ == kTRUE && !scfPdfs_.empty()) { fout << "\\hline" << std::endl; fout << "\\Extra SCF Signal PDFs' Parameters: & \\\\" << std::endl; this->printFitParameters(scfPdfs_, fout); } if (usingBkgnd_ == kTRUE && !bkgndPdfs_.empty()) { fout << "\\hline" << std::endl; fout << "\\Extra Background PDFs' Parameters: & \\\\" << std::endl; for (LauBkgndPdfsList::const_iterator iter = bkgndPdfs_.begin(); iter != bkgndPdfs_.end(); ++iter) { this->printFitParameters(*iter, fout); } } fout << "\\hline \n\\end{tabular}" << std::endl << std::endl; } } void LauSimpleFitModel::checkInitFitParams() { // Update the number of signal events to be total-sum(background events) this->updateSigEvents(); // Check whether we want to have randomised initial fit parameters for the signal model if (this->useRandomInitFitPars() == kTRUE) { std::cout << "INFO in LauSimpleFitModel::checkInitFitParams : Setting random parameters for the signal DP model" << std::endl; this->randomiseInitFitPars(); } } void LauSimpleFitModel::randomiseInitFitPars() { // Only randomise those parameters that are not fixed! std::cout << "INFO in LauSimpleFitModel::randomiseInitFitPars : Randomising the initial fit magnitudes and phases of the resonances..." << std::endl; for (UInt_t i = 0; i < nSigComp_; i++) { coeffPars_[i]->randomiseInitValues(); } } std::pair LauSimpleFitModel::eventsToGenerate() { // Determine the number of events to generate for each hypothesis // If we're smearing then smear each one individually LauGenInfo nEvtsGen; // Keep track of whether any yield or asymmetry parameters are blinded Bool_t blind = kFALSE; // Signal if ( signalEvents_->blind() ) { blind = kTRUE; } Double_t evtWeight(1.0); Double_t nEvts = signalEvents_->genValue(); if ( nEvts < 0 ) { evtWeight = -1.0; nEvts = TMath::Abs( nEvts ); } Int_t nEvtsToGen { static_cast(nEvts) }; if (this->doPoissonSmearing()) { nEvtsToGen = LauRandom::randomFun()->Poisson(nEvts); } nEvtsGen["signal"] = std::make_pair( nEvtsToGen, evtWeight ); // Backgrounds const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t bkgndID(0); bkgndID < nBkgnds; ++bkgndID ) { const LauAbsRValue* evtsPar = bkgndEvents_[bkgndID]; if ( evtsPar->blind() ) { blind = kTRUE; } evtWeight = 1.0; nEvts = evtsPar->genValue(); if ( nEvts < 0 ) { evtWeight = -1.0; nEvts = TMath::Abs( nEvts ); } nEvtsToGen = static_cast(nEvts); if (this->doPoissonSmearing()) { nEvtsToGen = LauRandom::randomFun()->Poisson(nEvts); } const TString& bkgndClass = this->bkgndClassName(bkgndID); nEvtsGen[bkgndClass] = std::make_pair( nEvtsToGen, evtWeight ); } return std::make_pair( nEvtsGen, blind ); } Bool_t LauSimpleFitModel::genExpt() { // Routine to generate toy Monte Carlo events according to the various models we have defined. // Determine the number of events to generate for each hypothesis std::pair info = this->eventsToGenerate(); LauGenInfo nEvts = info.first; const Bool_t blind = info.second; Bool_t genOK(kTRUE); Int_t evtNum(0); const UInt_t nBkgnds = this->nBkgndClasses(); std::vector bkgndClassNames(nBkgnds); std::vector bkgndClassNamesGen(nBkgnds); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { TString name( this->bkgndClassName(iBkgnd) ); bkgndClassNames[iBkgnd] = name; bkgndClassNamesGen[iBkgnd] = "gen"+name; } const Bool_t storeSCFTruthInfo = ( useSCF_ || ( this->enableEmbedding() && signalTree_ != 0 && signalTree_->haveBranch("mcMatch") ) ); // Loop over the hypotheses and generate the requested number of events for each one for (LauGenInfo::const_iterator iter = nEvts.begin(); iter != nEvts.end(); ++iter) { const TString& type(iter->first); Int_t nEvtsGen( iter->second.first ); Double_t evtWeight( iter->second.second ); for (Int_t iEvt(0); iEvtsetGenNtupleDoubleBranchValue( "evtWeight", evtWeight ); // Add efficiency information this->setGenNtupleDoubleBranchValue( "efficiency", 1 ); if (type == "signal") { this->setGenNtupleIntegerBranchValue("genSig",1); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { this->setGenNtupleIntegerBranchValue( bkgndClassNamesGen[iBkgnd], 0 ); } genOK = this->generateSignalEvent(); this->setGenNtupleDoubleBranchValue( "efficiency", sigDPModel_->getEvtEff() ); } else { this->setGenNtupleIntegerBranchValue("genSig",0); if ( storeSCFTruthInfo ) { this->setGenNtupleIntegerBranchValue("genTMSig",0); this->setGenNtupleIntegerBranchValue("genSCFSig",0); } UInt_t bkgndID(0); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { Int_t gen(0); if ( bkgndClassNames[iBkgnd] == type ) { gen = 1; bkgndID = iBkgnd; } this->setGenNtupleIntegerBranchValue( bkgndClassNamesGen[iBkgnd], gen ); } genOK = this->generateBkgndEvent(bkgndID); } if (!genOK) { // If there was a problem with the generation then break out and return. // The problem model will have adjusted itself so that all should be OK next time. break; } if (this->useDP() == kTRUE) { this->setDPBranchValues(); } // Store the event number (within this experiment) this->setGenNtupleIntegerBranchValue("iEvtWithinExpt",evtNum); // and then increment it ++evtNum; this->fillGenNtupleBranches(); if ( !blind && (iEvt%500 == 0) ) { std::cout << "INFO in LauSimpleFitModel::genExpt : Generated event number " << iEvt << " out of " << nEvtsGen << " " << type << " events." << std::endl; } } if (!genOK) { break; } } if (this->useDP() && genOK) { sigDPModel_->checkToyMC(kTRUE,kTRUE); // Get the fit fractions if they're to be written into the latex table if (this->writeLatexTable()) { LauParArray fitFrac = sigDPModel_->getFitFractions(); if (fitFrac.size() != nSigComp_) { std::cerr << "ERROR in LauSimpleFitModel::genExpt : Fit Fraction array of unexpected dimension: " << fitFrac.size() << std::endl; gSystem->Exit(EXIT_FAILURE); } for (UInt_t i(0); iExit(EXIT_FAILURE); } } for (UInt_t i(0); igetMeanEff().value()); dpRate_.value(sigDPModel_->getDPRate().value()); } } // If we're reusing embedded events or if the generation is being // reset then clear the lists of used events if (reuseSignal_ || !genOK) { if (signalTree_) { signalTree_->clearUsedList(); } } for ( UInt_t bkgndID(0); bkgndID < nBkgnds; ++bkgndID ) { LauEmbeddedData* data = bkgndTree_[bkgndID]; if (reuseBkgnd_[bkgndID] || !genOK) { if (data) { data->clearUsedList(); } } } return genOK; } Bool_t LauSimpleFitModel::generateSignalEvent() { // Generate signal event Bool_t genOK(kTRUE); Bool_t genSCF(kFALSE); if (this->useDP()) { if (this->enableEmbedding() && signalTree_) { if (useReweighting_) { // Select a (random) event from the generated data. Then store the // reconstructed DP co-ords, together with other pdf information, // as the embedded data. genOK = signalTree_->getReweightedEvent(sigDPModel_); } else { signalTree_->getEmbeddedEvent(kinematics_); } if (signalTree_->haveBranch("mcMatch")) { Int_t match = static_cast(signalTree_->getValue("mcMatch")); if (match) { this->setGenNtupleIntegerBranchValue("genTMSig",1); this->setGenNtupleIntegerBranchValue("genSCFSig",0); genSCF = kFALSE; } else { this->setGenNtupleIntegerBranchValue("genTMSig",0); this->setGenNtupleIntegerBranchValue("genSCFSig",1); genSCF = kTRUE; } } } else { genOK = sigDPModel_->generate(); if ( genOK && useSCF_ ) { Double_t frac(0.0); if ( useSCFHist_ ) { frac = scfFracHist_->calcEfficiency( kinematics_ ); } else { frac = scfFrac_.genValue(); } if ( frac < LauRandom::randomFun()->Rndm() ) { this->setGenNtupleIntegerBranchValue("genTMSig",1); this->setGenNtupleIntegerBranchValue("genSCFSig",0); genSCF = kFALSE; } else { this->setGenNtupleIntegerBranchValue("genTMSig",0); this->setGenNtupleIntegerBranchValue("genSCFSig",1); genSCF = kTRUE; // Optionally smear the DP position // of the SCF event if ( scfMap_ != 0 ) { Double_t xCoord(0.0), yCoord(0.0); if ( kinematics_->squareDP() ) { xCoord = kinematics_->getmPrime(); yCoord = kinematics_->getThetaPrime(); } else { xCoord = kinematics_->getm13Sq(); yCoord = kinematics_->getm23Sq(); } // Find the bin number where this event is generated Int_t binNo = scfMap_->binNumber( xCoord, yCoord ); // Retrieve the migration histogram TH2* histo = scfMap_->trueHist( binNo ); const LauAbsEffModel * effModel = sigDPModel_->getEffModel(); do { // Get a random point from the histogram histo->GetRandom2( xCoord, yCoord ); // Update the kinematics if ( kinematics_->squareDP() ) { kinematics_->updateSqDPKinematics( xCoord, yCoord ); } else { kinematics_->updateKinematics( xCoord, yCoord ); } } while ( ! effModel->passVeto( kinematics_ ) ); } } } } } else { if (this->enableEmbedding() && signalTree_) { signalTree_->getEmbeddedEvent(0); if (signalTree_->haveBranch("mcMatch")) { Int_t match = static_cast(signalTree_->getValue("mcMatch")); if (match) { this->setGenNtupleIntegerBranchValue("genTMSig",1); this->setGenNtupleIntegerBranchValue("genSCFSig",0); genSCF = kFALSE; } else { this->setGenNtupleIntegerBranchValue("genTMSig",0); this->setGenNtupleIntegerBranchValue("genSCFSig",1); genSCF = kTRUE; } } } else if ( useSCF_ ) { Double_t frac = scfFrac_.genValue(); if ( frac < LauRandom::randomFun()->Rndm() ) { this->setGenNtupleIntegerBranchValue("genTMSig",1); this->setGenNtupleIntegerBranchValue("genSCFSig",0); genSCF = kFALSE; } else { this->setGenNtupleIntegerBranchValue("genTMSig",0); this->setGenNtupleIntegerBranchValue("genSCFSig",1); genSCF = kTRUE; } } } if (genOK) { if ( useSCF_ ) { if ( genSCF ) { this->generateExtraPdfValues(&scfPdfs_, signalTree_); } else { this->generateExtraPdfValues(&signalPdfs_, signalTree_); } } else { this->generateExtraPdfValues(&signalPdfs_, signalTree_); } } // Check for problems with the embedding if (this->enableEmbedding() && signalTree_ && (signalTree_->nEvents() == signalTree_->nUsedEvents())) { std::cerr << "WARNING in LauSimpleFitModel::generateSignalEvent : Source of embedded signal events used up, clearing the list of used events." << std::endl; signalTree_->clearUsedList(); } return genOK; } Bool_t LauSimpleFitModel::generateBkgndEvent(UInt_t bkgndID) { // Generate background event Bool_t genOK(kTRUE); LauAbsBkgndDPModel* model = bkgndDPModels_[bkgndID]; LauEmbeddedData* embeddedData(0); if (this->enableEmbedding()) { embeddedData = bkgndTree_[bkgndID]; } LauPdfList* extraPdfs = &bkgndPdfs_[bkgndID]; if (this->useDP()) { if (embeddedData) { embeddedData->getEmbeddedEvent(kinematics_); } else { if (model == 0) { std::cerr << "ERROR in LauSimpleFitModel::generateBkgndEvent : Can't find the DP model for background class \"" << this->bkgndClassName(bkgndID) << "\"." << std::endl; gSystem->Exit(EXIT_FAILURE); } genOK = model->generate(); } } else { if (embeddedData) { embeddedData->getEmbeddedEvent(0); } } if (genOK) { this->generateExtraPdfValues(extraPdfs, embeddedData); } // Check for problems with the embedding if (embeddedData && (embeddedData->nEvents() == embeddedData->nUsedEvents())) { std::cerr << "WARNING in LauSimpleFitModel::generateBkgndEvent : Source of embedded " << this->bkgndClassName(bkgndID) << " events used up, clearing the list of used events." << std::endl; embeddedData->clearUsedList(); } return genOK; } void LauSimpleFitModel::setupGenNtupleBranches() { // Setup the required ntuple branches this->addGenNtupleDoubleBranch("evtWeight"); this->addGenNtupleIntegerBranch("genSig"); this->addGenNtupleDoubleBranch("efficiency"); if ( useSCF_ || ( this->enableEmbedding() && signalTree_ != 0 && signalTree_->haveBranch("mcMatch") ) ) { this->addGenNtupleIntegerBranch("genTMSig"); this->addGenNtupleIntegerBranch("genSCFSig"); } const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { TString name( this->bkgndClassName(iBkgnd) ); name.Prepend("gen"); this->addGenNtupleIntegerBranch(name); } if (this->useDP() == kTRUE) { this->addGenNtupleDoubleBranch("m12"); this->addGenNtupleDoubleBranch("m23"); this->addGenNtupleDoubleBranch("m13"); this->addGenNtupleDoubleBranch("m12Sq"); this->addGenNtupleDoubleBranch("m23Sq"); this->addGenNtupleDoubleBranch("m13Sq"); this->addGenNtupleDoubleBranch("cosHel12"); this->addGenNtupleDoubleBranch("cosHel23"); this->addGenNtupleDoubleBranch("cosHel13"); if (kinematics_->squareDP()) { this->addGenNtupleDoubleBranch("mPrime"); this->addGenNtupleDoubleBranch("thPrime"); } } for (LauPdfList::const_iterator pdf_iter = signalPdfs_.begin(); pdf_iter != signalPdfs_.end(); ++pdf_iter) { std::vector varNames = (*pdf_iter)->varNames(); for ( std::vector::const_iterator var_iter = varNames.begin(); var_iter != varNames.end(); ++var_iter ) { if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { this->addGenNtupleDoubleBranch( (*var_iter) ); } } } } void LauSimpleFitModel::setDPBranchValues() { // Store all the DP information this->setGenNtupleDoubleBranchValue("m12", kinematics_->getm12()); this->setGenNtupleDoubleBranchValue("m23", kinematics_->getm23()); this->setGenNtupleDoubleBranchValue("m13", kinematics_->getm13()); this->setGenNtupleDoubleBranchValue("m12Sq", kinematics_->getm12Sq()); this->setGenNtupleDoubleBranchValue("m23Sq", kinematics_->getm23Sq()); this->setGenNtupleDoubleBranchValue("m13Sq", kinematics_->getm13Sq()); this->setGenNtupleDoubleBranchValue("cosHel12", kinematics_->getc12()); this->setGenNtupleDoubleBranchValue("cosHel23", kinematics_->getc23()); this->setGenNtupleDoubleBranchValue("cosHel13", kinematics_->getc13()); if (kinematics_->squareDP()) { this->setGenNtupleDoubleBranchValue("mPrime", kinematics_->getmPrime()); this->setGenNtupleDoubleBranchValue("thPrime", kinematics_->getThetaPrime()); } } void LauSimpleFitModel::generateExtraPdfValues(LauPdfList* extraPdfs, LauEmbeddedData* embeddedData) { // Generate from the extra PDFs if (extraPdfs) { for (LauPdfList::iterator pdf_iter = extraPdfs->begin(); pdf_iter != extraPdfs->end(); ++pdf_iter) { LauFitData genValues; if (embeddedData) { genValues = embeddedData->getValues( (*pdf_iter)->varNames() ); } else { genValues = (*pdf_iter)->generate(kinematics_); } for ( LauFitData::const_iterator var_iter = genValues.begin(); var_iter != genValues.end(); ++var_iter ) { TString varName = var_iter->first; if ( varName != "m13Sq" && varName != "m23Sq" ) { Double_t value = var_iter->second; this->setGenNtupleDoubleBranchValue(varName,value); } } } } } void LauSimpleFitModel::propagateParUpdates() { // Update the total normalisation for the signal likelihood if (this->useDP() == kTRUE) { this->updateCoeffs(); sigDPModel_->updateCoeffs(coeffs_); } // Update the signal events from the background events if not doing an extended fit if ( !this->doEMLFit() && !signalEvents_->fixed() ) { this->updateSigEvents(); } } void LauSimpleFitModel::updateSigEvents() { // The background parameters will have been set from Minuit. // We need to update the signal events using these. Double_t nTotEvts = this->eventsPerExpt(); signalEvents_->range(-2.0*nTotEvts,2.0*nTotEvts); for (LauBkgndYieldList::iterator iter = bkgndEvents_.begin(); iter != bkgndEvents_.end(); ++iter) { LauAbsRValue* nBkgndEvents = (*iter); if ( nBkgndEvents->isLValue() ) { LauParameter* yield = dynamic_cast( nBkgndEvents ); yield->range(-2.0*nTotEvts,2.0*nTotEvts); } } if ( signalEvents_->fixed() ) { return; } // Subtract background events (if any) from signal. Double_t signalEvents = nTotEvts; if ( usingBkgnd_ == kTRUE ) { for (LauBkgndYieldList::const_iterator iter = bkgndEvents_.begin(); iter != bkgndEvents_.end(); ++iter) { signalEvents -= (*iter)->value(); } } signalEvents_->value(signalEvents); } void LauSimpleFitModel::cacheInputFitVars() { // Fill the internal data trees of the signal and backgrond models. LauFitDataTree* inputFitData = this->fitData(); // First the Dalitz plot variables (m_ij^2) if (this->useDP() == kTRUE) { // need to append SCF smearing bins before caching DP amplitudes if ( scfMap_ != 0 ) { this->appendBinCentres( inputFitData ); } sigDPModel_->fillDataTree(*inputFitData); if (usingBkgnd_ == kTRUE) { for (LauBkgndDPModelList::iterator iter = bkgndDPModels_.begin(); iter != bkgndDPModels_.end(); ++iter) { (*iter)->fillDataTree(*inputFitData); } } } // ...and then the extra PDFs this->cacheInfo(signalPdfs_, *inputFitData); this->cacheInfo(scfPdfs_, *inputFitData); for (LauBkgndPdfsList::iterator iter = bkgndPdfs_.begin(); iter != bkgndPdfs_.end(); ++iter) { this->cacheInfo((*iter), *inputFitData); } // and finally the SCF fractions and jacobians if ( useSCF_ && useSCFHist_ ) { if ( !inputFitData->haveBranch( "m13Sq" ) || !inputFitData->haveBranch( "m23Sq" ) ) { std::cerr << "ERROR in LauSimpleFitModel::cacheInputFitVars : Input data does not contain DP branches and so can't cache the SCF fraction." << std::endl; gSystem->Exit(EXIT_FAILURE); } UInt_t nEvents = inputFitData->nEvents(); recoSCFFracs_.clear(); recoSCFFracs_.reserve( nEvents ); if ( kinematics_->squareDP() ) { recoJacobians_.clear(); recoJacobians_.reserve( nEvents ); } for (UInt_t iEvt = 0; iEvt < nEvents; iEvt++) { const LauFitData& dataValues = inputFitData->getData(iEvt); LauFitData::const_iterator m13_iter = dataValues.find("m13Sq"); LauFitData::const_iterator m23_iter = dataValues.find("m23Sq"); kinematics_->updateKinematics( m13_iter->second, m23_iter->second ); Double_t scfFrac = scfFracHist_->calcEfficiency( kinematics_ ); recoSCFFracs_.push_back( scfFrac ); if ( kinematics_->squareDP() ) { recoJacobians_.push_back( kinematics_->calcSqDPJacobian() ); } } } } void LauSimpleFitModel::appendBinCentres( LauFitDataTree* inputData ) { // We'll be caching the DP amplitudes and efficiencies of the centres of the true bins. // To do so, we attach some fake points at the end of inputData, the number of the entry // minus the total number of events corresponding to the number of the histogram for that // given true bin in the LauScfMap object. (What this means is that when Laura is provided with // the LauScfMap object by the user, it's the latter who has to make sure that it contains the // right number of histograms and in exactly the right order!) // Get the x and y co-ordinates of the bin centres std::vector binCentresXCoords; std::vector binCentresYCoords; scfMap_->listBinCentres(binCentresXCoords, binCentresYCoords); // The SCF histograms could be in square Dalitz plot histograms. // The dynamics takes normal Dalitz plot coords, so we might have to convert them back. Bool_t sqDP = kinematics_->squareDP(); UInt_t nBins = binCentresXCoords.size(); fakeSCFFracs_.clear(); fakeSCFFracs_.reserve( nBins ); if ( sqDP ) { fakeJacobians_.clear(); fakeJacobians_.reserve( nBins ); } for (UInt_t iBin = 0; iBin < nBins; ++iBin) { if ( sqDP ) { kinematics_->updateSqDPKinematics(binCentresXCoords[iBin],binCentresYCoords[iBin]); binCentresXCoords[iBin] = kinematics_->getm13Sq(); binCentresYCoords[iBin] = kinematics_->getm23Sq(); fakeJacobians_.push_back( kinematics_->calcSqDPJacobian() ); } else { kinematics_->updateKinematics(binCentresXCoords[iBin],binCentresYCoords[iBin]); } fakeSCFFracs_.push_back( scfFracHist_->calcEfficiency( kinematics_ ) ); } // Set up inputFitVars_ object to hold the fake events inputData->appendFakePoints(binCentresXCoords,binCentresYCoords); } Double_t LauSimpleFitModel::getTotEvtLikelihood(UInt_t iEvt) { // Get the DP likelihood for signal and backgrounds this->getEvtDPLikelihood(iEvt); // Get the combined extra PDFs likelihood for signal and backgrounds this->getEvtExtraLikelihoods(iEvt); // If appropriate, combine the TM and SCF likelihoods Double_t sigLike = sigDPLike_ * sigExtraLike_; if ( useSCF_ ) { Double_t scfFrac(0.0); if (useSCFHist_) { scfFrac = recoSCFFracs_[iEvt]; } else { scfFrac = scfFrac_.unblindValue(); } sigLike *= (1.0 - scfFrac); if ( (scfMap_ != 0) && (this->useDP() == kTRUE) ) { // if we're smearing the SCF DP PDF then the SCF frac // is already included in the SCF DP likelihood sigLike += (scfDPLike_ * scfExtraLike_); } else { sigLike += (scfFrac * scfDPLike_ * scfExtraLike_); } } // Construct the total event likelihood Double_t likelihood = signalEvents_->unblindValue() * sigLike; const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t bkgndID(0); bkgndID < nBkgnds; ++bkgndID ) { likelihood += (bkgndEvents_[bkgndID]->unblindValue() * bkgndDPLike_[bkgndID] * bkgndExtraLike_[bkgndID]); } return likelihood; } Double_t LauSimpleFitModel::getEventSum() const { Double_t eventSum(0.0); eventSum += signalEvents_->unblindValue(); for (LauBkgndYieldList::const_iterator iter = bkgndEvents_.begin(); iter != bkgndEvents_.end(); ++iter) { eventSum += (*iter)->unblindValue(); } return eventSum; } void LauSimpleFitModel::getEvtDPLikelihood(UInt_t iEvt) { // Function to return the signal and background likelihoods for the // Dalitz plot for the given event evtNo. if (this->useDP() == kTRUE) { sigDPModel_->calcLikelihoodInfo(iEvt); sigDPLike_ = sigDPModel_->getEvtLikelihood(); if ( useSCF_ == kTRUE ) { if ( scfMap_ == 0 ) { // we're not smearing the SCF DP position // so the likelihood is the same as the TM scfDPLike_ = sigDPLike_; } else { // calculate the smeared SCF DP likelihood scfDPLike_ = this->getEvtSCFDPLikelihood(iEvt); } } const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t bkgndID(0); bkgndID < nBkgnds; ++bkgndID ) { if (usingBkgnd_ == kTRUE) { bkgndDPLike_[bkgndID] = bkgndDPModels_[bkgndID]->getLikelihood(iEvt); } else { bkgndDPLike_[bkgndID] = 0.0; } } } else { // There's always going to be a term in the likelihood for the // signal, so we'd better not zero it. sigDPLike_ = 1.0; scfDPLike_ = 1.0; const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t bkgndID(0); bkgndID < nBkgnds; ++bkgndID ) { if (usingBkgnd_ == kTRUE) { bkgndDPLike_[bkgndID] = 1.0; } else { bkgndDPLike_[bkgndID] = 0.0; } } } } Double_t LauSimpleFitModel::getEvtSCFDPLikelihood(UInt_t iEvt) { Double_t scfDPLike(0.0); Double_t recoJacobian(1.0); Double_t xCoord(0.0); Double_t yCoord(0.0); Bool_t squareDP = kinematics_->squareDP(); if ( squareDP ) { xCoord = sigDPModel_->getEvtmPrime(); yCoord = sigDPModel_->getEvtthPrime(); recoJacobian = recoJacobians_[iEvt]; } else { xCoord = sigDPModel_->getEvtm13Sq(); yCoord = sigDPModel_->getEvtm23Sq(); } // Find the bin that our reco event falls in Int_t recoBin = scfMap_->binNumber( xCoord, yCoord ); // Find out which true Bins contribute to the given reco bin const std::vector* trueBins = scfMap_->trueBins(recoBin); Int_t nDataEvents = this->eventsPerExpt(); // Loop over the true bins for (std::vector::const_iterator iter = trueBins->begin(); iter != trueBins->end(); ++iter) { Int_t trueBin = (*iter); // prob of a true event in the given true bin migrating to the reco bin Double_t pRecoGivenTrue = scfMap_->prob( recoBin, trueBin ); // We've cached the DP amplitudes and the efficiency for the // true bin centres, just after the data points sigDPModel_->calcLikelihoodInfo( nDataEvents + trueBin ); Double_t pTrue = sigDPModel_->getEvtLikelihood(); // Get the cached SCF fraction (and jacobian if we're using the square DP) Double_t scfFraction = fakeSCFFracs_[ trueBin ]; Double_t jacobian(1.0); if ( squareDP ) { jacobian = fakeJacobians_[ trueBin ]; } scfDPLike += pTrue * jacobian * scfFraction * pRecoGivenTrue; } // Divide by the reco jacobian scfDPLike /= recoJacobian; return scfDPLike; } void LauSimpleFitModel::getEvtExtraLikelihoods(UInt_t iEvt) { // Function to return the signal and background likelihoods for the // extra variables for the given event evtNo. sigExtraLike_ = 1.0; for (LauPdfList::iterator iter = signalPdfs_.begin(); iter != signalPdfs_.end(); ++iter) { (*iter)->calcLikelihoodInfo(iEvt); sigExtraLike_ *= (*iter)->getLikelihood(); } if (useSCF_) { scfExtraLike_ = 1.0; for (LauPdfList::iterator iter = scfPdfs_.begin(); iter != scfPdfs_.end(); ++iter) { (*iter)->calcLikelihoodInfo(iEvt); scfExtraLike_ *= (*iter)->getLikelihood(); } } const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t bkgndID(0); bkgndID < nBkgnds; ++bkgndID ) { if (usingBkgnd_) { bkgndExtraLike_[bkgndID] = 1.0; LauPdfList& pdfList = bkgndPdfs_[bkgndID]; for (LauPdfList::iterator pdf_iter = pdfList.begin(); pdf_iter != pdfList.end(); ++pdf_iter) { (*pdf_iter)->calcLikelihoodInfo(iEvt); bkgndExtraLike_[bkgndID] *= (*pdf_iter)->getLikelihood(); } } else { bkgndExtraLike_[bkgndID] = 0.0; } } } void LauSimpleFitModel::updateCoeffs() { coeffs_.clear(); coeffs_.reserve(nSigComp_); for (UInt_t i = 0; i < nSigComp_; i++) { coeffs_.push_back(coeffPars_[i]->particleCoeff()); } } void LauSimpleFitModel::setupSPlotNtupleBranches() { // add branches for storing the experiment number and the number of // the event within the current experiment this->addSPlotNtupleIntegerBranch("iExpt"); this->addSPlotNtupleIntegerBranch("iEvtWithinExpt"); // Store the efficiency of the event (for inclusive BF calculations). if (this->storeDPEff()) { this->addSPlotNtupleDoubleBranch("efficiency"); if ( sigDPModel_->usingScfModel() ) { this->addSPlotNtupleDoubleBranch("scffraction"); } } // Store the total event likelihood for each species. if (useSCF_) { this->addSPlotNtupleDoubleBranch("sigTMTotalLike"); this->addSPlotNtupleDoubleBranch("sigSCFTotalLike"); this->addSPlotNtupleDoubleBranch("sigSCFFrac"); } else { this->addSPlotNtupleDoubleBranch("sigTotalLike"); } if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { TString name( this->bkgndClassName(iBkgnd) ); name += "TotalLike"; this->addSPlotNtupleDoubleBranch(name); } } // Store the DP likelihoods if (this->useDP()) { if (useSCF_) { this->addSPlotNtupleDoubleBranch("sigTMDPLike"); this->addSPlotNtupleDoubleBranch("sigSCFDPLike"); } else { this->addSPlotNtupleDoubleBranch("sigDPLike"); } if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { TString name( this->bkgndClassName(iBkgnd) ); name += "DPLike"; this->addSPlotNtupleDoubleBranch(name); } } } // Store the likelihoods for each extra PDF if (useSCF_) { this->addSPlotNtupleBranches(&signalPdfs_, "sigTM"); this->addSPlotNtupleBranches(&scfPdfs_, "sigSCF"); } else { this->addSPlotNtupleBranches(&signalPdfs_, "sig"); } if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { const TString& bkgndClass = this->bkgndClassName(iBkgnd); const LauPdfList* pdfList = &(bkgndPdfs_[iBkgnd]); this->addSPlotNtupleBranches(pdfList, bkgndClass); } } } void LauSimpleFitModel::addSPlotNtupleBranches(const LauPdfList* extraPdfs, const TString& prefix) { if (extraPdfs) { // Loop through each of the PDFs for (LauPdfList::const_iterator pdf_iter = extraPdfs->begin(); pdf_iter != extraPdfs->end(); ++pdf_iter) { // Count the number of input variables that are not // DP variables (used in the case where there is DP // dependence for e.g. MVA) UInt_t nVars(0); std::vector varNames = (*pdf_iter)->varNames(); for ( std::vector::const_iterator var_iter = varNames.begin(); var_iter != varNames.end(); ++var_iter ) { if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { ++nVars; } } if ( nVars == 1 ) { // If the PDF only has one variable then // simply add one branch for that variable TString varName = (*pdf_iter)->varName(); TString name(prefix); name += varName; name += "Like"; this->addSPlotNtupleDoubleBranch(name); } else if ( nVars == 2 ) { // If the PDF has two variables then we // need a branch for them both together and // branches for each TString allVars(""); for ( std::vector::const_iterator var_iter = varNames.begin(); var_iter != varNames.end(); ++var_iter ) { allVars += (*var_iter); TString name(prefix); name += (*var_iter); name += "Like"; this->addSPlotNtupleDoubleBranch(name); } TString name(prefix); name += allVars; name += "Like"; this->addSPlotNtupleDoubleBranch(name); } else { std::cerr << "WARNING in LauSimpleFitModel::addSPlotNtupleBranches : Can't yet deal with 3D PDFs." << std::endl; } } } } Double_t LauSimpleFitModel::setSPlotNtupleBranchValues(LauPdfList* extraPdfs, const TString& prefix, UInt_t iEvt) { // Store the PDF value for each variable in the list Double_t totalLike(1.0); Double_t extraLike(0.0); if (extraPdfs) { for (LauPdfList::iterator pdf_iter = extraPdfs->begin(); pdf_iter != extraPdfs->end(); ++pdf_iter) { // calculate the likelihood for this event (*pdf_iter)->calcLikelihoodInfo(iEvt); extraLike = (*pdf_iter)->getLikelihood(); totalLike *= extraLike; // Count the number of input variables that are not // DP variables (used in the case where there is DP // dependence for e.g. MVA) UInt_t nVars(0); std::vector varNames = (*pdf_iter)->varNames(); for ( std::vector::const_iterator var_iter = varNames.begin(); var_iter != varNames.end(); ++var_iter ) { if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { ++nVars; } } if ( nVars == 1 ) { // If the PDF only has one variable then // simply store the value for that variable TString varName = (*pdf_iter)->varName(); TString name(prefix); name += varName; name += "Like"; this->setSPlotNtupleDoubleBranchValue(name, extraLike); } else if ( nVars == 2 ) { // If the PDF has two variables then we // store the value for them both together // and for each on their own TString allVars(""); for ( std::vector::const_iterator var_iter = varNames.begin(); var_iter != varNames.end(); ++var_iter ) { allVars += (*var_iter); TString name(prefix); name += (*var_iter); name += "Like"; Double_t indivLike = (*pdf_iter)->getLikelihood( (*var_iter) ); this->setSPlotNtupleDoubleBranchValue(name, indivLike); } TString name(prefix); name += allVars; name += "Like"; this->setSPlotNtupleDoubleBranchValue(name, extraLike); } else { std::cerr << "WARNING in LauSimpleFitModel::setSPlotNtupleBranchValues : Can't yet deal with 3D PDFs." << std::endl; } } } return totalLike; } LauSPlot::NameSet LauSimpleFitModel::variableNames() const { LauSPlot::NameSet nameSet; if (this->useDP()) { nameSet.insert("DP"); } // Loop through all the signal PDFs for (LauPdfList::const_iterator pdf_iter = signalPdfs_.begin(); pdf_iter != signalPdfs_.end(); ++pdf_iter) { // Loop over the variables involved in each PDF std::vector varNames = (*pdf_iter)->varNames(); for ( std::vector::const_iterator var_iter = varNames.begin(); var_iter != varNames.end(); ++var_iter ) { // If they are not DP coordinates then add them if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { nameSet.insert( (*var_iter) ); } } } return nameSet; } LauSPlot::NumbMap LauSimpleFitModel::freeSpeciesNames() const { LauSPlot::NumbMap numbMap; if (!signalEvents_->fixed() && this->doEMLFit()) { numbMap["sig"] = signalEvents_->genValue(); } if ( usingBkgnd_ ) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { const TString& bkgndClass = this->bkgndClassName(iBkgnd); const LauAbsRValue* par = bkgndEvents_[iBkgnd]; if (!par->fixed()) { numbMap[bkgndClass] = par->genValue(); if ( ! par->isLValue() ) { std::cerr << "WARNING in LauSimpleFitModel::freeSpeciesNames : \"" << par->name() << "\" is a LauFormulaPar, which implies it is perhaps not entirely free to float in the fit, so the sWeight calculation may not be reliable" << std::endl; } } } } return numbMap; } LauSPlot::NumbMap LauSimpleFitModel::fixdSpeciesNames() const { LauSPlot::NumbMap numbMap; if (signalEvents_->fixed() && this->doEMLFit()) { numbMap["sig"] = signalEvents_->genValue(); } if ( usingBkgnd_ ) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { const TString& bkgndClass = this->bkgndClassName(iBkgnd); const LauAbsRValue* par = bkgndEvents_[iBkgnd]; if (par->fixed()) { numbMap[bkgndClass] = par->genValue(); } } } return numbMap; } LauSPlot::TwoDMap LauSimpleFitModel::twodimPDFs() const { LauSPlot::TwoDMap twodimMap; for (LauPdfList::const_iterator pdf_iter = signalPdfs_.begin(); pdf_iter != signalPdfs_.end(); ++pdf_iter) { // Count the number of input variables that are not DP variables UInt_t nVars(0); std::vector varNames = (*pdf_iter)->varNames(); for ( std::vector::const_iterator var_iter = varNames.begin(); var_iter != varNames.end(); ++var_iter ) { if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { ++nVars; } } if ( nVars == 2 ) { if (useSCF_) { twodimMap.insert( std::make_pair( "sigTM", std::make_pair( varNames[0], varNames[1] ) ) ); } else { twodimMap.insert( std::make_pair( "sig", std::make_pair( varNames[0], varNames[1] ) ) ); } } } if ( useSCF_ ) { for (LauPdfList::const_iterator pdf_iter = scfPdfs_.begin(); pdf_iter != scfPdfs_.end(); ++pdf_iter) { // Count the number of input variables that are not DP variables UInt_t nVars(0); std::vector varNames = (*pdf_iter)->varNames(); for ( std::vector::const_iterator var_iter = varNames.begin(); var_iter != varNames.end(); ++var_iter ) { if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { ++nVars; } } if ( nVars == 2 ) { twodimMap.insert( std::make_pair( "sigSCF", std::make_pair( varNames[0], varNames[1] ) ) ); } } } if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { const TString& bkgndClass = this->bkgndClassName(iBkgnd); const LauPdfList& pdfList = bkgndPdfs_[iBkgnd]; for (LauPdfList::const_iterator pdf_iter = pdfList.begin(); pdf_iter != pdfList.end(); ++pdf_iter) { // Count the number of input variables that are not DP variables UInt_t nVars(0); std::vector varNames = (*pdf_iter)->varNames(); for ( std::vector::const_iterator var_iter = varNames.begin(); var_iter != varNames.end(); ++var_iter ) { if ( (*var_iter) != "m13Sq" && (*var_iter) != "m23Sq" ) { ++nVars; } } if ( nVars == 2 ) { twodimMap.insert( std::make_pair( bkgndClass, std::make_pair( varNames[0], varNames[1] ) ) ); } } } } return twodimMap; } void LauSimpleFitModel::storePerEvtLlhds() { std::cout << "INFO in LauSimpleFitModel::storePerEvtLlhds : Storing per-event likelihood values..." << std::endl; // if we've not been using the DP model then we need to cache all // the info here so that we can get the efficiency from it LauFitDataTree* inputFitData = this->fitData(); if (!this->useDP() && this->storeDPEff()) { sigDPModel_->initialise(coeffs_); sigDPModel_->fillDataTree(*inputFitData); } UInt_t evtsPerExpt(this->eventsPerExpt()); for (UInt_t iEvt = 0; iEvt < evtsPerExpt; ++iEvt) { this->setSPlotNtupleIntegerBranchValue("iExpt",this->iExpt()); this->setSPlotNtupleIntegerBranchValue("iEvtWithinExpt",iEvt); // the DP information this->getEvtDPLikelihood(iEvt); if (this->storeDPEff()) { if (!this->useDP()) { sigDPModel_->calcLikelihoodInfo(iEvt); } this->setSPlotNtupleDoubleBranchValue("efficiency",sigDPModel_->getEvtEff()); if ( sigDPModel_->usingScfModel() ) { this->setSPlotNtupleDoubleBranchValue("scffraction",sigDPModel_->getEvtScfFraction()); } } if (this->useDP()) { sigTotalLike_ = sigDPLike_; if (useSCF_) { scfTotalLike_ = scfDPLike_; this->setSPlotNtupleDoubleBranchValue("sigTMDPLike",sigDPLike_); this->setSPlotNtupleDoubleBranchValue("sigSCFDPLike",scfDPLike_); } else { this->setSPlotNtupleDoubleBranchValue("sigDPLike",sigDPLike_); } if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { TString name = this->bkgndClassName(iBkgnd); name += "DPLike"; this->setSPlotNtupleDoubleBranchValue(name,bkgndDPLike_[iBkgnd]); } } } else { sigTotalLike_ = 1.0; if (useSCF_) { scfTotalLike_ = 1.0; } if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { bkgndTotalLike_[iBkgnd] = 1.0; } } } // the signal PDF values if ( useSCF_ ) { sigTotalLike_ *= this->setSPlotNtupleBranchValues(&signalPdfs_, "sigTM", iEvt); scfTotalLike_ *= this->setSPlotNtupleBranchValues(&scfPdfs_, "sigSCF", iEvt); } else { sigTotalLike_ *= this->setSPlotNtupleBranchValues(&signalPdfs_, "sig", iEvt); } // the background PDF values if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { const TString& bkgndClass = this->bkgndClassName(iBkgnd); LauPdfList& pdfs = bkgndPdfs_[iBkgnd]; bkgndTotalLike_[iBkgnd] *= this->setSPlotNtupleBranchValues(&(pdfs), bkgndClass, iEvt); } } // the total likelihoods if (useSCF_) { Double_t scfFrac(0.0); if ( useSCFHist_ ) { scfFrac = recoSCFFracs_[iEvt]; } else { scfFrac = scfFrac_.unblindValue(); } this->setSPlotNtupleDoubleBranchValue("sigSCFFrac",scfFrac); sigTotalLike_ *= ( 1.0 - scfFrac ); if ( scfMap_ == 0 ) { scfTotalLike_ *= scfFrac; } this->setSPlotNtupleDoubleBranchValue("sigTMTotalLike",sigTotalLike_); this->setSPlotNtupleDoubleBranchValue("sigSCFTotalLike",scfTotalLike_); } else { this->setSPlotNtupleDoubleBranchValue("sigTotalLike",sigTotalLike_); } if (usingBkgnd_) { const UInt_t nBkgnds = this->nBkgndClasses(); for ( UInt_t iBkgnd(0); iBkgnd < nBkgnds; ++iBkgnd ) { TString name = this->bkgndClassName(iBkgnd); name += "TotalLike"; this->setSPlotNtupleDoubleBranchValue(name,bkgndTotalLike_[iBkgnd]); } } // fill the tree this->fillSPlotNtupleBranches(); } std::cout << "INFO in LauSimpleFitModel::storePerEvtLlhds : Finished storing per-event likelihood values." << std::endl; } +std::map LauSimpleFitModel::getDPAmps(const Double_t m13Sq, const Double_t m23Sq) +{ + // Initialise the DP model, if not already done + if ( coeffs_.empty() ) { + this->updateCoeffs(); + this->initialiseDPModels(); + } + + if ( ! kinematics_->withinDPLimits( m13Sq, m23Sq ) ) { + return { { "signal", {} } }; + } + + sigDPModel_->calcLikelihoodInfo( m13Sq, m23Sq ); + + return { { "signal", sigDPModel_->getEvtDPAmp() } }; +} + +std::map LauSimpleFitModel::getDPLikelihoods(const Double_t m13Sq, const Double_t m23Sq) +{ + // Initialise the DP model, if not already done + if ( coeffs_.empty() ) { + this->updateCoeffs(); + this->initialiseDPModels(); + } + + std::map likelihoods; + + if ( ! kinematics_->withinDPLimits( m13Sq, m23Sq ) ) { + likelihoods.emplace( "signal", 0.0 ); + if ( usingBkgnd_ ) { + const UInt_t nBkgnds { this->nBkgndClasses() }; + for ( UInt_t bkgndID( 0 ); bkgndID < nBkgnds; ++bkgndID ) { + likelihoods.emplace( this->bkgndClassName( bkgndID ), 0.0 ); + } + } + return likelihoods; + } + + sigDPModel_->calcLikelihoodInfo( m13Sq, m23Sq ); + likelihoods.emplace( "signal", sigDPModel_->getEvtLikelihood() ); + + // TODO - SCF signal + static bool warningIssued { false }; + if ( useSCF_ && ! warningIssued ) { + warningIssued = true; + std::cerr << "WARNING in LauSimpleFitModel::getDPLikelihoods : calculation of SCF likelihoods not currently implemented in this function\n"; + std::cerr << " : signal likelihood will just be the truth-matched value"; + std::cerr << std::endl; + } + + if ( usingBkgnd_ ) { + const UInt_t nBkgnds { this->nBkgndClasses() }; + for ( UInt_t bkgndID( 0 ); bkgndID < nBkgnds; ++bkgndID ) { + likelihoods.emplace( this->bkgndClassName( bkgndID ), + bkgndDPModels_[bkgndID]->getLikelihood( m13Sq, m23Sq ) ); + } + } + + return likelihoods; +} + void LauSimpleFitModel::embedSignal(const TString& fileName, const TString& treeName, Bool_t reuseEventsWithinEnsemble, Bool_t reuseEventsWithinExperiment, Bool_t useReweighting) { if (signalTree_) { std::cerr << "ERROR in LauSimpleFitModel::embedSignal : Already embedding signal from a file." << std::endl; return; } signalTree_ = new LauEmbeddedData(fileName,treeName,reuseEventsWithinExperiment); Bool_t dataOK = signalTree_->findBranches(); if (!dataOK) { delete signalTree_; signalTree_ = 0; std::cerr << "ERROR in LauSimpleFitModel::embedSignal : Problem creating data tree for embedding." << std::endl; return; } reuseSignal_ = reuseEventsWithinEnsemble; useReweighting_ = useReweighting; if (this->enableEmbedding() == kFALSE) { this->enableEmbedding(kTRUE); } } void LauSimpleFitModel::embedBkgnd(const TString& bkgndClass, const TString& fileName, const TString& treeName, Bool_t reuseEventsWithinEnsemble, Bool_t reuseEventsWithinExperiment) { if ( ! this->validBkgndClass( bkgndClass ) ) { std::cerr << "ERROR in LauSimpleFitModel::embedBkgnd : Invalid background class \"" << bkgndClass << "\"." << std::endl; std::cerr << " : Background class names must be provided in \"setBkgndClassNames\" before any other background-related actions can be performed." << std::endl; return; } UInt_t bkgndID = this->bkgndClassID( bkgndClass ); if (bkgndTree_[bkgndID]) { std::cerr << "ERROR in LauSimpleFitModel::embedBkgnd : Already embedding background from a file." << std::endl; return; } bkgndTree_[bkgndID] = new LauEmbeddedData(fileName,treeName,reuseEventsWithinExperiment); Bool_t dataOK = bkgndTree_[bkgndID]->findBranches(); if (!dataOK) { delete bkgndTree_[bkgndID]; bkgndTree_[bkgndID] = 0; std::cerr << "ERROR in LauSimpleFitModel::embedBkgnd : Problem creating data tree for embedding." << std::endl; return; } reuseBkgnd_[bkgndID] = reuseEventsWithinEnsemble; if (this->enableEmbedding() == kFALSE) { this->enableEmbedding(kTRUE); } } void LauSimpleFitModel::weightEvents( const TString& dataFileName, const TString& dataTreeName ) { // Routine to provide weights for events that are uniformly distributed // in the DP (or square DP) so as to reproduce the given DP model const Bool_t squareDP { kinematics_->squareDP() }; if ( squareDP ) { std::cout << "INFO in LauSimpleFitModel::weightEvents : will store DP model weights and the square DP jacobian\n"; std::cout << " : the DP model weights can be used on their own to weight events that were generated flat in phase space\n"; std::cout << " : or they can be multiplied by the jacobian to weight events that were generated flat in the square DP\n"; std::cout << " : or they can be multiplied by max(1.0, jacobian) to weight events that were generated quasi-flat in the square DP" << std::endl; } else { std::cout << "INFO in LauSimpleFitModel::weightEvents : will store DP model weights suitable for weighting events that were generated flat in phase space" << std::endl; } // This reads in the given dataFile and creates an input // fit data tree that stores them for all events and experiments. const Bool_t dataOK { this->verifyFitData( dataFileName, dataTreeName ) }; if ( ! dataOK ) { std::cerr << "ERROR in LauSimpleFitModel::weightEvents : Problem caching the data." << std::endl; return; } LauFitDataTree* inputFitData { this->fitData() }; if ( ! inputFitData->haveBranch( "m13Sq_MC" ) || ! inputFitData->haveBranch( "m23Sq_MC" ) ) { std::cerr << "ERROR in LauSimpleFitModel::weightEvents : Cannot find MC truth DP coordinate branches in supplied data, aborting." << std::endl; return; } // Create the ntuple to hold the DP weights TString weightsFileName{ dataFileName }; const Ssiz_t index { weightsFileName.Last( '.' ) }; weightsFileName.Insert( index, "_DPweights" ); LauGenNtuple weightsTuple{ weightsFileName, dataTreeName }; weightsTuple.addIntegerBranch( "iExpt" ); weightsTuple.addIntegerBranch( "iEvtWithinExpt" ); weightsTuple.addDoubleBranch( "dpModelWeight" ); if ( squareDP ) { weightsTuple.addDoubleBranch( "sqDPJacobian" ); } const UInt_t nExpmt { this->nExpt() }; const UInt_t firstExpmt { this->firstExpt() }; for (UInt_t iExpmt {firstExpmt}; iExpmt < (firstExpmt+nExpmt); ++iExpmt) { inputFitData->readExperimentData(iExpmt); const UInt_t nEvents { inputFitData->nEvents() }; if ( nEvents < 1 ) { std::cerr << "WARNING in LauSimpleFitModel::weightEvents : Zero events in experiment " << iExpmt << ", skipping..." << std::endl; continue; } weightsTuple.setIntegerBranchValue( "iExpt", iExpmt ); // Calculate and store the weights for the events in this experiment for ( UInt_t iEvent{0}; iEvent < nEvents; ++iEvent ) { weightsTuple.setIntegerBranchValue( "iEvtWithinExpt", iEvent ); const LauFitData& evtData { inputFitData->getData( iEvent ) }; const Double_t m13Sq_MC { evtData.at( "m13Sq_MC" ) }; const Double_t m23Sq_MC { evtData.at( "m23Sq_MC" ) }; Double_t dpModelWeight{0.0}; Double_t jacobian{1.0}; if ( kinematics_->withinDPLimits( m13Sq_MC, m23Sq_MC ) ) { kinematics_->updateKinematics( m13Sq_MC, m23Sq_MC ); dpModelWeight = sigDPModel_->getEventWeight(); if ( squareDP ) { jacobian = kinematics_->calcSqDPJacobian(); } dpModelWeight /= sigDPModel_->getDPNorm(); } weightsTuple.setDoubleBranchValue( "dpModelWeight", dpModelWeight ); if ( squareDP ) { weightsTuple.setDoubleBranchValue( "sqDPJacobian", jacobian ); } weightsTuple.fillBranches(); } } weightsTuple.buildIndex( "iExpt", "iEvtWithinExpt" ); weightsTuple.addFriendTree( dataFileName, dataTreeName ); weightsTuple.writeOutGenResults(); } void LauSimpleFitModel::savePDFPlots(const TString& label) { savePDFPlotsWave(label, 0); savePDFPlotsWave(label, 1); savePDFPlotsWave(label, 2); std::cout << "LauCPFitModel::plot" << std::endl; // ((LauIsobarDynamics*)sigDPModel_)->plot(); //Double_t minm13 = sigDPModel_->getKinematics()->getm13Min(); Double_t minm13 = 0.0; Double_t maxm13 = sigDPModel_->getKinematics()->getm13Max(); //Double_t minm23 = sigDPModel_->getKinematics()->getm23Min(); Double_t minm23 = 0.0; Double_t maxm23 = sigDPModel_->getKinematics()->getm23Max(); Double_t mins13 = minm13*minm13; Double_t maxs13 = maxm13*maxm13; Double_t mins23 = minm23*minm23; Double_t maxs23 = maxm23*maxm23; Double_t s13, s23, chPdf; Int_t n13=200.00, n23=200.00; Double_t delta13, delta23; delta13 = (maxs13 - mins13)/n13; delta23 = (maxs23 - mins23)/n23; UInt_t nAmp = sigDPModel_->getnCohAmp(); for (UInt_t resID = 0; resID <= nAmp; ++resID) { TGraph2D *dt = new TGraph2D(); TString resName = "TotalAmp"; if (resID != nAmp){ TString tStrResID = Form("%d", resID); const LauIsobarDynamics* model = sigDPModel_; const LauAbsResonance* resonance = model->getResonance(resID); resName = resonance->getResonanceName(); std::cout << "resName = " << resName << std::endl; } resName.ReplaceAll("(", ""); resName.ReplaceAll(")", ""); resName.ReplaceAll("*", "Star"); TCanvas *c = new TCanvas("c"+resName+label,resName+" ("+label+")",0,0,600,400); dt->SetName(resName+label); dt->SetTitle(resName+" ("+label+")"); Int_t count=0; for (Int_t i=0; igetKinematics()->withinDPLimits2(s23, s13)) { //if (s13 > 4) continue; sigDPModel_->calcLikelihoodInfo(s13, s23); LauComplex chAmp = sigDPModel_->getEvtDPAmp(); if (resID != nAmp){ chAmp = sigDPModel_->getFullAmplitude(resID); } chPdf = chAmp.abs2(); //if ((z > 0.04)||(z < -0.03)) continue; //z = TMath::Log(z); if (sigDPModel_->getDaughters()->gotSymmetricalDP()){ Double_t sLow = s13; Double_t sHigh = s23; if (sLow>sHigh) { continue; //sLow = s23; //sHigh = s13; } //if (sLow > 3.5) continue; //if (i<10) std::cout << "SymmetricalDP" << std::endl; //if (z>0.02) z = 0.02; //if (z<-0.02) z = -0.02; dt->SetPoint(count,sHigh,sLow,chPdf); count++; } else { dt->SetPoint(count,s13,s23,chPdf); count++; } } } } gStyle->SetPalette(1); dt->GetXaxis()->SetTitle("m_{K#pi}(low)"); dt->GetYaxis()->SetTitle("m_{K#pi}(high)"); dt->Draw("SURF1"); dt->GetXaxis()->SetTitle("m_{K#pi}(low)"); dt->GetYaxis()->SetTitle("m_{K#pi}(high)"); c->SaveAs("plot_2D_"+resName + "_"+label+".C"); } } void LauSimpleFitModel::savePDFPlotsWave(const TString& label, const Int_t& spin) { std::cout << "label = "<< label << ", spin = "<< spin << std::endl; TString tStrResID = "S_Wave"; if (spin == 1) tStrResID = "P_Wave"; if (spin == 2) tStrResID = "D_Wave"; std::cout << "LauSimpleFitModel::savePDFPlotsWave: "<< tStrResID << std::endl; TCanvas *c = new TCanvas("c"+tStrResID+label,tStrResID+" ("+label+")",0,0,600,400); Double_t minm13 = 0.0; Double_t maxm13 = sigDPModel_->getKinematics()->getm13Max(); Double_t minm23 = 0.0; Double_t maxm23 = sigDPModel_->getKinematics()->getm23Max(); Double_t mins13 = minm13*minm13; Double_t maxs13 = maxm13*maxm13; Double_t mins23 = minm23*minm23; Double_t maxs23 = maxm23*maxm23; Double_t s13, s23, chPdf; TGraph2D *dt = new TGraph2D(); dt->SetName(tStrResID+label); dt->SetTitle(tStrResID+" ("+label+")"); Int_t n13=200.00, n23=200.00; Double_t delta13, delta23; delta13 = (maxs13 - mins13)/n13; delta23 = (maxs23 - mins23)/n23; UInt_t nAmp = sigDPModel_->getnCohAmp(); Int_t count=0; for (Int_t i=0; igetKinematics()->withinDPLimits2(s23, s13)) { //if (s13 > 4) continue; LauComplex chAmp(0,0); Bool_t noWaveRes = kTRUE; sigDPModel_->calcLikelihoodInfo(s13, s23); for (UInt_t resID = 0; resID < nAmp; ++resID) { const LauIsobarDynamics* model = sigDPModel_; const LauAbsResonance* resonance = model->getResonance(resID); Int_t spin_res = resonance->getSpin(); if (spin != spin_res) continue; noWaveRes = kFALSE; chAmp += sigDPModel_->getFullAmplitude(resID); } if (noWaveRes) return; chPdf = chAmp.abs2(); if (sigDPModel_->getDaughters()->gotSymmetricalDP()){ Double_t sLow = s13; Double_t sHigh = s23; if (sLow>sHigh) { continue; //sLow = s23; //sHigh = s13; } dt->SetPoint(count,sHigh,sLow,chPdf); count++; } else { dt->SetPoint(count,s13,s23,chPdf); count++; } } } } gStyle->SetPalette(1); dt->GetXaxis()->SetTitle("pipi"); dt->GetYaxis()->SetTitle("pipi"); dt->Draw("SURF1"); dt->GetXaxis()->SetTitle("pipi"); dt->GetYaxis()->SetTitle("pipi"); c->SaveAs("plot_2D_"+tStrResID+"_"+label+".C"); }