diff --git a/analyses/pluginATLAS/ATLAS_PBPB_CENTRALITY.cc b/analyses/pluginATLAS/ATLAS_PBPB_CENTRALITY.cc new file mode 100644 --- /dev/null +++ b/analyses/pluginATLAS/ATLAS_PBPB_CENTRALITY.cc @@ -0,0 +1,80 @@ +// -*- C++ -*- +#include "Rivet/Tools/AtlasCommon.hh" +#include "Rivet/Projections/ImpactParameterProjection.hh" +#include "Rivet/Analysis.hh" + +namespace Rivet { + + +class ATLAS_PBPB_CENTRALITY : public Analysis { + +public: + + DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_PBPB_CENTRALITY); + + /// Book histograms and initialise projections before the run + void init() { + + // One projection for the actual observable, and one for the + // generated impact parameter. + declare(ATLAS::SumET_PBPB_Centrality(), "Centrality"); + declare(ImpactParameterProjection(), "IMP"); + declare(ATLAS::MinBiasTrigger(), "Trigger"); + + // The calibration histogram: + _calib = bookHisto1D("sumETFwd"); + + // If histogram was pre-loaded, the calibration is done. + _done = ( _calib->numEntries() > 0 ); + + // The alternative histogram based on impact parameter. Note that + // it MUST be named the same as the histogram for the experimental + // observable with an added _IMP suffix for the Pecentile<> + // binning to work properly. + _impcalib = bookHisto1D("sumETFwd_IMP", 400, 0.0, 20.0); + + } + + /// Perform the per-event analysis + void analyze(const Event& event) { + + if ( _done ) return; + + const double weight = event.weight(); + + // The alternative centrality based on generated impact + // parameter, assumes that the generator does not describe the + // full final state, and should therefore be filled even if the + // event is not triggered. + _impcalib->fill(apply(event, "IMP")(), weight); + + if ( !apply(event, "Trigger")() ) vetoEvent; + + _calib->fill(apply(event, "Centrality")(), weight); + + } + + /// Finalize + void finalize() { + + _calib->normalize(); + _impcalib->normalize(); + + } + +private: + + /// The calibration histograms. + Histo1DPtr _calib; + Histo1DPtr _impcalib; + + /// Safeguard from adding to a pre-loaded histogram. + bool _done; + +}; + + +// The hook for the plugin system +DECLARE_RIVET_PLUGIN(ATLAS_PBPB_CENTRALITY); + +} diff --git a/analyses/pluginATLAS/ATLAS_PBPB_CENTRALITY.info b/analyses/pluginATLAS/ATLAS_PBPB_CENTRALITY.info new file mode 100644 --- /dev/null +++ b/analyses/pluginATLAS/ATLAS_PBPB_CENTRALITY.info @@ -0,0 +1,37 @@ +Name: ATLAS_PBPB_CENTRALITY +Summary: +Status: UNVALIDATED +Authors: + - Leif Lönnblad + - Christian Bierlich +NumEvents: 50000 +References: + - arXiv:1508.00848 [hep-ex], Eur.Phys.J. C76 (2016) no.4, 199 +RunInfo: Any! +Description: + 'Calibration analysis for ATLAS pPb centrality. The centrality measure + is $\sum E_\perp$ in the lead direction. The reference YODA file + contains the experimental centrality calibration, but extracted + from the plot in the paper. Note that this has not been unfolded + for detector effects.' + +BibKey: Aad:2015zza +BibTeX: '@article{Aad:2015zza, + author = "Aad, Georges and others", + title = "{Measurement of the centrality dependence of the + charged-particle pseudorapidity distribution in + proton–lead collisions at $\sqrt{s_{_\text {NN}}} = + 5.02$ TeV with the ATLAS detector}", + collaboration = "ATLAS", + journal = "Eur. Phys. J.", + volume = "C76", + year = "2016", + number = "4", + pages = "199", + doi = "10.1140/epjc/s10052-016-4002-3", + eprint = "1508.00848", + archivePrefix = "arXiv", + primaryClass = "hep-ex", + reportNumber = "CERN-PH-EP-2015-160", + SLACcitation = "%%CITATION = ARXIV:1508.00848;%%" +}' diff --git a/analyses/pluginATLAS/ATLAS_PBPB_CENTRALITY.yoda b/analyses/pluginATLAS/ATLAS_PBPB_CENTRALITY.yoda new file mode 100644 --- /dev/null +++ b/analyses/pluginATLAS/ATLAS_PBPB_CENTRALITY.yoda @@ -0,0 +1,109 @@ +BEGIN YODA_SCATTER2D_V2 /REF/ATLAS_PBPB_CENTRALITY/sumETFwd +Variations: [""] +IsRef: 1 +Path: /REF/ATLAS_PBPB_CENTRALITY/sumETFwd +Title: ~ +Type: Scatter2D +--- +# xval xerr- xerr+ yval yerr- yerr+ +1.788131e+00 1.788131e+00 3.074918e+00 1.396034e-01 0.000000e+00 0.000000e+00 +7.937967e+00 3.074918e+00 3.058206e+00 1.132275e-01 0.000000e+00 0.000000e+00 +1.405438e+01 3.058206e+00 3.066562e+00 9.426961e-02 0.000000e+00 0.000000e+00 +2.018750e+01 3.066562e+00 6.007788e+00 7.746573e-02 0.000000e+00 0.000000e+00 +3.220308e+01 6.007788e+00 7.503468e+00 6.365229e-02 0.000000e+00 0.000000e+00 +4.721001e+01 7.503468e+00 8.957369e+00 5.028704e-02 0.000000e+00 0.000000e+00 +6.512475e+01 8.957369e+00 8.965725e+00 4.077981e-02 0.000000e+00 0.000000e+00 +8.305620e+01 8.965725e+00 1.187353e+01 3.264017e-02 0.000000e+00 0.000000e+00 +1.068033e+02 1.187353e+01 1.334414e+01 2.752668e-02 0.000000e+00 0.000000e+00 +1.334915e+02 1.334414e+01 1.629372e+01 2.321339e-02 0.000000e+00 0.000000e+00 +1.660790e+02 1.629372e+01 1.626865e+01 1.932004e-02 0.000000e+00 0.000000e+00 +1.986163e+02 1.626865e+01 2.067214e+01 1.672334e-02 0.000000e+00 0.000000e+00 +2.399606e+02 2.067214e+01 1.771420e+01 1.466459e-02 0.000000e+00 0.000000e+00 +2.753890e+02 1.771420e+01 2.065543e+01 1.320122e-02 0.000000e+00 0.000000e+00 +3.166998e+02 2.065543e+01 1.916810e+01 1.188297e-02 0.000000e+00 0.000000e+00 +3.550360e+02 1.916810e+01 2.357994e+01 1.098035e-02 0.000000e+00 0.000000e+00 +4.021959e+02 2.357994e+01 2.063036e+01 1.014512e-02 0.000000e+00 0.000000e+00 +4.434566e+02 2.063036e+01 2.505891e+01 9.497597e-03 0.000000e+00 0.000000e+00 +4.935744e+02 2.505891e+01 2.063036e+01 8.660765e-03 0.000000e+00 0.000000e+00 +5.348351e+02 2.063036e+01 2.063036e+01 8.107981e-03 0.000000e+00 0.000000e+00 +5.760959e+02 2.063036e+01 2.060529e+01 7.590480e-03 0.000000e+00 0.000000e+00 +6.173064e+02 2.060529e+01 2.650445e+01 7.390459e-03 0.000000e+00 0.000000e+00 +6.703153e+02 2.650445e+01 2.650445e+01 7.008789e-03 0.000000e+00 0.000000e+00 +7.233243e+02 2.650445e+01 2.502549e+01 6.646829e-03 0.000000e+00 0.000000e+00 +7.733752e+02 2.502549e+01 2.503384e+01 6.386820e-03 0.000000e+00 0.000000e+00 +8.234429e+02 2.503384e+01 2.208426e+01 6.057214e-03 0.000000e+00 0.000000e+00 +8.676114e+02 2.208426e+01 2.355487e+01 5.820717e-03 0.000000e+00 0.000000e+00 +9.147212e+02 2.355487e+01 2.354652e+01 5.593239e-03 0.000000e+00 0.000000e+00 +9.618142e+02 2.354652e+01 2.502549e+01 5.445430e-03 0.000000e+00 0.000000e+00 +1.011865e+03 2.502549e+01 2.355487e+01 5.232417e-03 0.000000e+00 0.000000e+00 +1.058975e+03 2.355487e+01 2.353816e+01 5.027930e-03 0.000000e+00 0.000000e+00 +1.106051e+03 2.353816e+01 2.207590e+01 4.959523e-03 0.000000e+00 0.000000e+00 +1.150203e+03 2.207590e+01 2.207590e+01 4.828647e-03 0.000000e+00 0.000000e+00 +1.194355e+03 2.207590e+01 2.208426e+01 4.701224e-03 0.000000e+00 0.000000e+00 +1.238523e+03 2.208426e+01 2.354652e+01 4.517670e-03 0.000000e+00 0.000000e+00 +1.285616e+03 2.354652e+01 2.502549e+01 4.398284e-03 0.000000e+00 0.000000e+00 +1.335667e+03 2.502549e+01 2.058858e+01 4.226233e-03 0.000000e+00 0.000000e+00 +1.376845e+03 2.058858e+01 2.356323e+01 4.223957e-03 0.000000e+00 0.000000e+00 +1.423971e+03 2.356323e+01 2.207590e+01 4.006125e-03 0.000000e+00 0.000000e+00 +1.468123e+03 2.207590e+01 2.206755e+01 3.900407e-03 0.000000e+00 0.000000e+00 +1.512258e+03 2.206755e+01 2.501713e+01 3.847489e-03 0.000000e+00 0.000000e+00 +1.562292e+03 2.501713e+01 2.500877e+01 3.745670e-03 0.000000e+00 0.000000e+00 +1.612310e+03 2.500877e+01 2.354652e+01 3.694566e-03 0.000000e+00 0.000000e+00 +1.659403e+03 2.354652e+01 2.060529e+01 3.596932e-03 0.000000e+00 0.000000e+00 +1.700613e+03 2.060529e+01 2.207590e+01 3.502148e-03 0.000000e+00 0.000000e+00 +1.744765e+03 2.207590e+01 2.207590e+01 3.409730e-03 0.000000e+00 0.000000e+00 +1.788917e+03 2.207590e+01 2.060529e+01 3.319751e-03 0.000000e+00 0.000000e+00 +1.830128e+03 2.060529e+01 2.353816e+01 3.232270e-03 0.000000e+00 0.000000e+00 +1.877204e+03 2.353816e+01 2.060529e+01 3.188294e-03 0.000000e+00 0.000000e+00 +1.918414e+03 2.060529e+01 2.354652e+01 3.104278e-03 0.000000e+00 0.000000e+00 +1.965507e+03 2.354652e+01 2.352980e+01 3.022243e-03 0.000000e+00 0.000000e+00 +2.012567e+03 2.352980e+01 2.353816e+01 3.020383e-03 0.000000e+00 0.000000e+00 +2.059643e+03 2.353816e+01 2.501713e+01 2.979289e-03 0.000000e+00 0.000000e+00 +2.109678e+03 2.501713e+01 2.207590e+01 2.900446e-03 0.000000e+00 0.000000e+00 +2.153829e+03 2.207590e+01 2.060529e+01 2.823906e-03 0.000000e+00 0.000000e+00 +2.195040e+03 2.060529e+01 2.060529e+01 2.749492e-03 0.000000e+00 0.000000e+00 +2.236251e+03 2.060529e+01 2.353816e+01 2.677039e-03 0.000000e+00 0.000000e+00 +2.283327e+03 2.353816e+01 2.354652e+01 2.640617e-03 0.000000e+00 0.000000e+00 +2.330420e+03 2.354652e+01 2.501713e+01 2.570835e-03 0.000000e+00 0.000000e+00 +2.380454e+03 2.501713e+01 2.353816e+01 2.502801e-03 0.000000e+00 0.000000e+00 +2.427531e+03 2.353816e+01 2.206755e+01 2.468749e-03 0.000000e+00 0.000000e+00 +2.471666e+03 2.206755e+01 2.061365e+01 2.435255e-03 0.000000e+00 0.000000e+00 +2.512893e+03 2.061365e+01 2.354652e+01 2.340263e-03 0.000000e+00 0.000000e+00 +2.559986e+03 2.354652e+01 2.501713e+01 2.278418e-03 0.000000e+00 0.000000e+00 +2.610020e+03 2.501713e+01 2.501713e+01 2.218123e-03 0.000000e+00 0.000000e+00 +2.660054e+03 2.501713e+01 2.647103e+01 2.159423e-03 0.000000e+00 0.000000e+00 +2.712997e+03 2.647103e+01 2.647103e+01 2.157927e-03 0.000000e+00 0.000000e+00 +2.765939e+03 2.647103e+01 2.354652e+01 2.156433e-03 0.000000e+00 0.000000e+00 +2.813032e+03 2.354652e+01 2.942897e+01 2.099446e-03 0.000000e+00 0.000000e+00 +2.871890e+03 2.942897e+01 2.942897e+01 2.043651e-03 0.000000e+00 0.000000e+00 +2.930748e+03 2.942897e+01 2.941226e+01 1.989339e-03 0.000000e+00 0.000000e+00 +2.989572e+03 2.941226e+01 3.237019e+01 1.987808e-03 0.000000e+00 0.000000e+00 +3.054312e+03 3.237019e+01 3.238690e+01 1.934831e-03 0.000000e+00 0.000000e+00 +3.119086e+03 3.238690e+01 3.384916e+01 1.834627e-03 0.000000e+00 0.000000e+00 +3.186785e+03 3.384916e+01 2.796671e+01 1.762453e-03 0.000000e+00 0.000000e+00 +3.242718e+03 2.796671e+01 3.237019e+01 1.693380e-03 0.000000e+00 0.000000e+00 +3.307458e+03 3.237019e+01 3.387423e+01 1.648249e-03 0.000000e+00 0.000000e+00 +3.375207e+03 3.387423e+01 3.389094e+01 1.522464e-03 0.000000e+00 0.000000e+00 +3.442989e+03 3.389094e+01 2.805027e+01 1.369958e-03 0.000000e+00 0.000000e+00 +3.499089e+03 2.805027e+01 2.511740e+01 1.154848e-03 0.000000e+00 0.000000e+00 +3.549324e+03 2.511740e+01 1.483146e+01 9.609351e-04 0.000000e+00 0.000000e+00 +3.578987e+03 1.483146e+01 1.485653e+01 7.894020e-04 0.000000e+00 0.000000e+00 +3.608700e+03 1.485653e+01 1.191531e+01 6.235290e-04 0.000000e+00 0.000000e+00 +3.632531e+03 1.191531e+01 1.191531e+01 4.925480e-04 0.000000e+00 0.000000e+00 +3.656361e+03 1.191531e+01 1.045305e+01 3.890814e-04 0.000000e+00 0.000000e+00 +3.677267e+03 1.045305e+01 7.520179e+00 3.033661e-04 0.000000e+00 0.000000e+00 +3.692308e+03 7.520179e+00 6.041211e+00 2.334776e-04 0.000000e+00 0.000000e+00 +3.704390e+03 6.041211e+00 1.047812e+01 1.820632e-04 0.000000e+00 0.000000e+00 +3.725346e+03 1.047812e+01 1.051154e+01 1.364907e-04 0.000000e+00 0.000000e+00 +3.746369e+03 1.051154e+01 9.040926e+00 9.710829e-05 0.000000e+00 0.000000e+00 +3.764451e+03 9.040926e+00 9.107773e+00 6.909176e-05 0.000000e+00 0.000000e+00 +3.782667e+03 9.107773e+00 1.199886e+01 4.427321e-05 0.000000e+00 0.000000e+00 +3.806665e+03 1.199886e+01 9.074350e+00 3.068412e-05 0.000000e+00 0.000000e+00 +3.824813e+03 9.074350e+00 9.107773e+00 2.071839e-05 0.000000e+00 0.000000e+00 +3.843029e+03 9.107773e+00 1.356139e+01 1.327611e-05 0.000000e+00 0.000000e+00 +3.870152e+03 1.356139e+01 1.055332e+01 7.967567e-06 0.000000e+00 0.000000e+00 +3.891258e+03 1.055332e+01 9.241465e+00 5.309693e-06 0.000000e+00 0.000000e+00 +3.909741e+03 9.241465e+00 9.208042e+00 2.759774e-06 0.000000e+00 0.000000e+00 +3.928157e+03 9.208042e+00 1.234981e+01 1.511490e-06 0.000000e+00 0.000000e+00 +3.952857e+03 1.234981e+01 1.234981e+01 6.046967e-07 0.000000e+00 0.000000e+00 +END YODA_SCATTER2D_V2 diff --git a/analyses/pluginCMS/CMS_2017_I1471287.cc b/analyses/pluginCMS/CMS_2017_I1471287.cc new file mode 100644 --- /dev/null +++ b/analyses/pluginCMS/CMS_2017_I1471287.cc @@ -0,0 +1,261 @@ +// -*- C++ -*- +#include "Rivet/Analysis.hh" +#include "Rivet/Projections/ChargedFinalState.hh" +#include "Rivet/Projections/PrimaryParticles.hh" +#include "Rivet/Tools/Correlators.hh" + +namespace Rivet { + + + /// @brief Add a short analysis description here + class CMS_2017_I1471287 : public CumulantAnalysis { + public: + + /// Constructor + CMS_2017_I1471287() : CumulantAnalysis("CMS_2017_I1471287") { + + }; + + + /// @name Analysis methods + //@{ + + /// Book histograms and initialise projections before the run + void init() { + // A projection for charged tracks to manage centrality, corresponding + // to CMS offline tracks. + ChargedFinalState cfsMult(Cuts::abseta < 2.4 && Cuts::pT > 0.4*GeV); + addProjection(cfsMult, "CFSMult"); + + // The positive eta side used for rapidity gap, integrated. + const ChargedFinalState& cfsp = ChargedFinalState(Cuts::eta > 1.0 && + Cuts::eta < 2.0 && Cuts::pT > 0.3*GeV && Cuts::pT < 3.0*GeV); + declare(cfsp, "CFSP"); + // ..negative ditto. + const ChargedFinalState& cfsn = ChargedFinalState(Cuts::eta < -1.0 && + Cuts::eta > -2.0 && Cuts::pT > 0.3*GeV && Cuts::pT < 3.0*GeV); + declare(cfsn, "CFSN"); + + + // The positive eta side used for rapidity gap, differential, charged particles. + const ChargedFinalState& cfsppT = ChargedFinalState(Cuts::eta > 1.0 && + Cuts::eta < 2.0 && Cuts::pT > 0.3*GeV && Cuts::pT < 6.0*GeV); + declare(cfsppT, "CFSPPT"); + // ..negative ditto. + const ChargedFinalState& cfsnpT = ChargedFinalState(Cuts::eta < -1.0 && + Cuts::eta > -2.0 && Cuts::pT > 0.3*GeV && Cuts::pT < 6.0*GeV); + declare(cfsnpT, "CFSNPT"); + + // The positive eta side used for rapidity gap, differential, Kaons. + const PrimaryParticles& kfsppT = PrimaryParticles({310},Cuts::eta > 1.0 && + Cuts::eta < 2.0 && Cuts::pT > 0.3*GeV && Cuts::pT < 6.0*GeV); + declare(kfsppT, "KFSP"); + // ..negative ditto. + const PrimaryParticles& kfsnpT = PrimaryParticles({310},Cuts::eta < -1.0 && + Cuts::eta > -2.0 && Cuts::pT > 0.3*GeV && Cuts::pT < 6.0*GeV); + declare(kfsnpT, "KFSN"); + // The positive eta side used for rapidity gap, differential, Lambda. + const PrimaryParticles& lfsppT = PrimaryParticles({3122},Cuts::eta > 1.0 && + Cuts::eta < 2.0 && Cuts::pT > 0.3*GeV && Cuts::pT < 6.0*GeV); + declare(lfsppT, "LFSP"); + // ..negative ditto. + const PrimaryParticles& lfsnpT = PrimaryParticles({3122},Cuts::eta < -1.0 && + Cuts::eta > -2.0 && Cuts::pT > 0.3*GeV && Cuts::pT < 6.0*GeV); + declare(lfsnpT, "LFSN"); + + // v22 |delta eta| > 2 (fig 4a) + h_v22 = bookScatter2D(1, 1, 1, true); + // v32 |delta eta| > 2 (fig 4b) + h_v32 = bookScatter2D(3, 1, 1, true); + // v22(pT) high mult., high pT (fig 6a) + h_v22pT = bookScatter2D(11, 1, 1, true); + // v22(pT) charged low mult. (fig. 7a) + h_v22pTh = bookScatter2D(17, 1, 1, true); + // v22(pT) K0S low mult. (fig. 7a) + h_v22pTK = bookScatter2D(18, 1, 1, true); + // v22(pT) Lambda low mult. (fig. 7a) + h_v22pTL = bookScatter2D(19, 1, 1, true); + // v22(pT) K0S high mult. (fig. 7b) + h_v22pTKc = bookScatter2D(21, 1, 1, true); + // v22(pT) Lambda high mult. (fig. 7b) + h_v22pTLc = bookScatter2D(22, 1, 1, true); + // c24 (fig. 9a) + h_c24 = bookScatter2D(28, 1, 1, true); + // c26 (fig. 9b) + h_c26 = bookScatter2D(31, 1, 1, true); + + // Corresponding event averaged correlators. + ec22 = bookECorrelatorGap<2,2>("ec22",h_v22); + ec32 = bookECorrelatorGap<3,2>("ec32",h_v32); + + // ... pT binned + ec22pT = bookECorrelatorGap<2,2>("ec22pT",h_v22pT); + ec22pTh = bookECorrelatorGap<2,2>("ec22pTh",h_v22pTh); + ec22pTK = bookECorrelatorGap<2,2>("ec22pTK",h_v22pTK); + ec22pTL = bookECorrelatorGap<2,2>("ec22pTL",h_v22pTL); + ec22pTKc = bookECorrelatorGap<2,2>("ec22pTKc",h_v22pTKc); + ec22pTLc = bookECorrelatorGap<2,2>("ec22pTLc",h_v22pTLc); + + // Maximal N and P for the gapped. + pair max = getMaxValues(); + + // For the four particle cumulant. + ec22_4 = bookECorrelator<2,2>("ec22_4",h_c24); + ec24_4 = bookECorrelator<2,4>("ec24_4",h_c24); + + // For the six particle cumulant. + ec22_6 = bookECorrelator<2,2>("ec22_6",h_c26); + ec24_6 = bookECorrelator<2,4>("ec24_6",h_c26); + ec26_6 = bookECorrelator<2,6>("ec26_6",h_c26); + + // Maximal N and P for the higher orders. + pair maxH = getMaxValues(); + + // Declare correlator projections. + // For integrated. + declare(Correlators(cfsMult, maxH.first, maxH.second),"CH"); + + // ... gapped + declare(Correlators(cfsp, max.first, max.second),"CPos"); + declare(Correlators(cfsn, max.first, max.second),"CNeg"); + + // For pT differential, charged particles, low multiplicity. + declare(Correlators(cfsppT, max.first, max.second, h_v22pTh),"CPosLowPT"); + declare(Correlators(cfsnpT, max.first, max.second, h_v22pTh),"CNegLowPT"); + + // For pT differential, charged particles, high multiplicity. + declare(Correlators(cfsppT, max.first, max.second, h_v22pT),"CPosHighPT"); + declare(Correlators(cfsnpT, max.first, max.second, h_v22pT),"CNegHighPT"); + + // For pT differential, kaons. low multiplicity. + declare(Correlators(kfsppT, max.first, max.second, h_v22pTK),"CPosLowPTK"); + declare(Correlators(kfsnpT, max.first, max.second, h_v22pTK),"CNegLowPTK"); + + // For pT differential, kaons. high multiplicity. + declare(Correlators(kfsppT, max.first, max.second, h_v22pTKc),"CPosHighPTK"); + declare(Correlators(kfsnpT, max.first, max.second, h_v22pTKc),"CNegHighPTK"); + + // For pT differential, lambda. low multiplicity. + declare(Correlators(lfsppT, max.first, max.second, h_v22pTL),"CPosLowPTL"); + declare(Correlators(lfsnpT, max.first, max.second, h_v22pTL),"CNegLowPTL"); + + // For pT differential, lambda. high multiplicity. + declare(Correlators(lfsppT, max.first, max.second, h_v22pTLc),"CPosHighPTL"); + declare(Correlators(lfsnpT, max.first, max.second, h_v22pTLc),"CNegHighPTL"); + + + } + + + /// Perform the per-event analysis + void analyze(const Event& event) { + const double w = event.weight(); + const double nTrk = apply(event, "CFSMult").particles().size(); + if (nTrk < 10) vetoEvent; + + // The correlators. + const Correlators& ch = apply(event, "CH"); + + const Correlators& cp = apply(event, "CPos"); + const Correlators& cn = apply(event, "CNeg"); + + const Correlators& cpLow = apply(event, "CPosLowPT"); + const Correlators& cnLow = apply(event, "CNegLowPT"); + + const Correlators& cpHigh = apply(event, "CPosHighPT"); + const Correlators& cnHigh = apply(event, "CNegHighPT"); + + const Correlators& cpLowK = apply(event, "CPosLowPTK"); + const Correlators& cnLowK = apply(event, "CNegLowPTK"); + + const Correlators& cpHighK = apply(event, "CPosHighPTK"); + const Correlators& cnHighK = apply(event, "CNegHighPTK"); + + const Correlators& cpLowL = apply(event, "CPosLowPTL"); + const Correlators& cnLowL = apply(event, "CNegLowPTL"); + + const Correlators& cpHighL = apply(event, "CPosHighPTL"); + const Correlators& cnHighL = apply(event, "CNegHighPTL"); + + ec22->fill(nTrk, cp, cn, w); + ec32->fill(nTrk, cp, cn, w); + + ec22_4->fill(nTrk, ch, w); + ec24_4->fill(nTrk, ch, w); + ec22_6->fill(nTrk, ch, w); + ec24_6->fill(nTrk, ch, w); + ec26_6->fill(nTrk, ch, w); + + if (nTrk < 20) { + ec22pTh->fill(cpLow, cnLow, w); + ec22pTK->fill(cpLowK, cnLowK, w); + ec22pTL->fill(cpLowL, cnLowL, w); + } + else if(nTrk >= 105 && nTrk < 150) + ec22pT->fill(cpHigh, cnHigh, w); + ec22pTKc->fill(cpHighK, cnHighK, w); + ec22pTLc->fill(cpHighL, cnHighL, w); + } + + + /// Normalise histograms etc., after the run + void finalize() { + // Correlators must be streamed + // in order to run reentrant finalize. + stream(); + cnTwoInt(h_v22, ec22); + cnTwoInt(h_v32, ec32); + vnTwoDiff(h_v22pT, ec22pT); + vnTwoDiff(h_v22pTh, ec22pTh); + cnFourInt(h_c24, ec22_4, ec24_4); + cnSixInt(h_c26, ec22_6, ec24_6, ec26_6); + + // Set correct reference flow for pid flow. + ec22pTK->setReference(ec22pTh->getReference()); + vnTwoDiff(h_v22pTK, ec22pTK); + ec22pTL->setReference(ec22pTh->getReference()); + vnTwoDiff(h_v22pTL, ec22pTL); + ec22pTKc->setReference(ec22pT->getReference()); + vnTwoDiff(h_v22pTKc, ec22pTKc); + ec22pTLc->setReference(ec22pT->getReference()); + vnTwoDiff(h_v22pTLc, ec22pTLc); + + } + + //@} + Scatter2DPtr h_v22; + Scatter2DPtr h_v32; + Scatter2DPtr h_v22pT; + Scatter2DPtr h_v22pTh; + Scatter2DPtr h_v22pTK; + Scatter2DPtr h_v22pTL; + Scatter2DPtr h_v22pTKc; + Scatter2DPtr h_v22pTLc; + Scatter2DPtr h_c24; + Scatter2DPtr h_c26; + + ECorrPtr ec22; + ECorrPtr ec32; + + ECorrPtr ec22_4; + ECorrPtr ec24_4; + + ECorrPtr ec22_6; + ECorrPtr ec24_6; + ECorrPtr ec26_6; + + ECorrPtr ec22pT; + ECorrPtr ec22pTh; + ECorrPtr ec22pTK; + ECorrPtr ec22pTL; + ECorrPtr ec22pTKc; + ECorrPtr ec22pTLc; + + }; + + + // The hook for the plugin system + DECLARE_RIVET_PLUGIN(CMS_2017_I1471287); + + +} diff --git a/analyses/pluginCMS/CMS_2017_I1471287.info b/analyses/pluginCMS/CMS_2017_I1471287.info new file mode 100644 --- /dev/null +++ b/analyses/pluginCMS/CMS_2017_I1471287.info @@ -0,0 +1,36 @@ +Name: CMS_2017_I1471287 +Year: 2017 +Summary: Two- and multi-particle angular correlations in pp collisions at $\sqrt{s} = 13$ TeV. +Experiment: CMS +Collider: LHC +InspireID: 1471287 +Status: UNVALIDATED +Authors: + - Christian Bierlich +References: +- Phys.Lett. B765 (2017) 193-220 +- DOI:10.1016/j.physletb.2016.12.009 +- arXiv:1606.06198 +RunInfo: Minimum bias QCD events. +NeedCrossSection: no +Beams: [p+, p+] +Energies: [13000] +Description: +'Measurement of two- and multi-particle angular correlations of charged particles as well as $K^0_s$ and $\Lambda + \bar{\Lambda}$. Measurements are done $p_\perp$ integrated as function of event multiplicity, and $p_\perp$ differential in two bins of event multiplicity; high and low. The experimental analysis performs also a subtraction procedure of low multiplicity results from high multiplicity ones. Such a subtraction is not performed in the RIVET analysis (due to the difficulty of performing the same procedure on MC), and as such, only unsubtracted values are used. Bin edges for integrated correlations are not reported in HepData, and are as such based simply on midpoints between reported points.' +BibKey: Khachatryan:2016txc +BibTeX: '@article{Khachatryan:2016txc, + author = "Khachatryan, Vardan and others", + title = "{Evidence for collectivity in pp collisions at the LHC}", + collaboration = "CMS", + journal = "Phys. Lett.", + volume = "B765", + year = "2017", + pages = "193-220", + doi = "10.1016/j.physletb.2016.12.009", + eprint = "1606.06198", + archivePrefix = "arXiv", + primaryClass = "nucl-ex", + reportNumber = "CMS-HIN-16-010, CERN-EP-2016-147", + SLACcitation = "%%CITATION = ARXIV:1606.06198;%%" +}' + diff --git a/analyses/pluginCMS/CMS_2017_I1471287.plot b/analyses/pluginCMS/CMS_2017_I1471287.plot new file mode 100644 --- /dev/null +++ b/analyses/pluginCMS/CMS_2017_I1471287.plot @@ -0,0 +1,62 @@ +# BEGIN PLOT /CMS_2017_I1471287/d01-x01-y01 +Title=$c_2\{2,|\Delta \eta > 2|\}$ $(0.3 GeV < p_\perp < 3 GeV)$ $\sqrt{s} = 13$ TeV +XLabel=$N_{ch}$ $(|\eta| < 2.4, p_\perp > 0.4$ GeV) +YLabel=$V_{2\Delta}$ +LogY=0 +# END PLOT +# BEGIN PLOT /CMS_2017_I1471287/d03-x01-y01 +Title=$c_3\{2,|\Delta \eta > 2|\}$ $(0.3 GeV < p_\perp < 3 GeV)$ $\sqrt{s} = 13$ TeV +XLabel=$N_{ch}$ $(|\eta| < 2.4, p_\perp > 0.4$ GeV) +YLabel=$V_{3\Delta}$ +YMin=-0.0004 +YMax=0.00025 +LogY=0 +# END PLOT +# BEGIN PLOT /CMS_2017_I1471287/d11-x01-y01 +Title=$v_2\{2,|\Delta \eta > 2|\}$ $(105 < N_{ch} < 150)$ $\sqrt{s} = 13$ TeV +XLabel=$p_{\perp}$ [GeV] +YLabel=$v_{2}\{2,|\Delta \eta > 2|\}$ +LogY=0 +# END PLOT +# BEGIN PLOT /CMS_2017_I1471287/d17-x01-y01 +Title=$v_2\{2,|\Delta \eta > 2|\}$ $(N_{ch} < 20)$ $\sqrt{s} = 13$ TeV +XLabel=$p_{\perp}$ [GeV] +YLabel=$v_{2}\{2,|\Delta \eta > 2|\}$ +LogY=0 +# END PLOT +# BEGIN PLOT /CMS_2017_I1471287/d18-x01-y01 +Title=$K^0_s$ $v_2\{2,|\Delta \eta > 2|\}$ $(N_{ch} < 20)$ $\sqrt{s} = 13$ TeV +XLabel=$p_{\perp}$ [GeV] +YLabel=$v_{2}\{2,|\Delta \eta > 2|\}$ +LogY=0 +# END PLOT +# BEGIN PLOT /CMS_2017_I1471287/d19-x01-y01 +Title=$\Lambda+\bar{\Lambda}$ $v_2\{2,|\Delta \eta > 2|\}$ $(N_{ch} < 20)$ $\sqrt{s} = 13$ TeV +XLabel=$p_{\perp}$ [GeV] +YLabel=$v_{2}\{2,|\Delta \eta > 2|\}$ +LogY=0 +# END PLOT +# BEGIN PLOT /CMS_2017_I1471287/d21-x01-y01 +Title=$K^0_s$ $v_2\{2,|\Delta \eta > 2|\}$ $(105 < N_{ch} < 150)$ $\sqrt{s} = 13$ TeV +XLabel=$p_{\perp}$ [GeV] +YLabel=$v_{2}\{2,|\Delta \eta > 2|\}$ +LogY=0 +# END PLOT +# BEGIN PLOT /CMS_2017_I1471287/d22-x01-y01 +Title=$\Lambda+\bar{\Lambda}$ $v_2\{2,|\Delta \eta > 2|\}$ $(105 < N_{ch} < 150)$ $\sqrt{s} = 13$ TeV +XLabel=$p_{\perp}$ [GeV] +YLabel=$v_{2}\{2,|\Delta \eta > 2|\}$ +LogY=0 +# END PLOT +# BEGIN PLOT /CMS_2017_I1471287/d28-x01-y01 +Title=$c_2\{4\}$ $(0.3 GeV < p_\perp < 3 GeV)$ $\sqrt{s} = 13$ TeV +XLabel=$N_{ch}$ $(|\eta| < 2.4, p_\perp > 0.4$ GeV) +YLabel=$c_2\{4\}$ +LogY=0 +# END PLOT +# BEGIN PLOT /CMS_2017_I1471287/d31-x01-y01 +Title=$c_2\{6\}$ $(0.3 GeV < p_\perp < 3 GeV)$ $\sqrt{s} = 13$ TeV +XLabel=$N_{ch}$ $(|\eta| < 2.4, p_\perp > 0.4$ GeV) +YLabel=$c_2\{6\}$ +LogY=0 +# END PLOT diff --git a/analyses/pluginCMS/CMS_2017_I1471287.yoda b/analyses/pluginCMS/CMS_2017_I1471287.yoda new file mode 100644 --- /dev/null +++ b/analyses/pluginCMS/CMS_2017_I1471287.yoda @@ -0,0 +1,210 @@ +BEGIN YODA_SCATTER2D_V2 /REF/CMS_2017_I1471287/d01-x01-y01 +Variations: [""] +IsRef: 1 +Path: /REF/CMS_2017_I1471287/d01-x01-y01 +Title: doi:10.17182/hepdata.76506.v1/t1 +Type: Scatter2D +--- +# xval xerr- xerr+ yval yerr- yerr- +1.400000e+01 4.000000e+00 5.000000e+00 3.070000e-03 1.448878e-04 1.448878e-04 +2.400000e+01 5.000000e+00 5.000000e+00 2.930000e-03 1.382960e-04 1.382960e-04 +3.400000e+01 5.000000e+00 6.500000e+00 2.930000e-03 1.377227e-04 1.377227e-04 +4.700000e+01 6.500000e+00 1.050000e+01 2.950000e-03 1.389461e-04 1.389461e-04 +6.800000e+01 1.050000e+01 1.050000e+01 3.020000e-03 1.421521e-04 1.421521e-04 +8.900000e+01 1.050000e+01 5.000000e+00 3.140000e-03 1.947334e-04 1.947334e-04 +9.900000e+01 5.000000e+00 5.000000e+00 3.130000e-03 1.942036e-04 1.942036e-04 +1.090000e+02 5.000000e+00 5.000000e+00 3.180000e-03 1.973362e-04 1.973362e-04 +1.190000e+02 5.000000e+00 5.000000e+00 3.170000e-03 1.966424e-04 1.966424e-04 +1.290000e+02 5.000000e+00 5.500000e+00 3.230000e-03 2.014785e-04 2.014785e-04 +1.400000e+02 5.500000e+00 9.000000e+00 3.120000e-03 1.946101e-04 1.946101e-04 +1.580000e+02 9.000000e+00 1.700000e+01 3.120000e-03 1.975759e-04 1.975759e-04 +END YODA_SCATTER2D_V2 + + +BEGIN YODA_SCATTER2D_V2 /REF/CMS_2017_I1471287/d03-x01-y01 +Variations: [""] +IsRef: 1 +Path: /REF/CMS_2017_I1471287/d03-x01-y01 +Title: doi:10.17182/hepdata.76506.v1/t3 +Type: Scatter2D +--- +# xval xerr- xerr+ yval yerr- yerr- +1.400000e+01 4.000000e+00 5.000000e+00 -3.250000e-04 1.991422e-05 1.991422e-05 +2.400000e+01 5.000000e+00 5.000000e+00 -2.080000e-04 1.394812e-05 1.394812e-05 +3.400000e+01 5.000000e+00 6.500000e+00 -1.690000e-04 1.189599e-05 1.189599e-05 +4.700000e+01 6.500000e+00 1.050000e+01 -9.700000e-05 8.649868e-06 8.649868e-06 +6.800000e+01 1.050000e+01 1.200000e+01 -4.520000e-05 1.105052e-05 1.105052e-05 +9.200000e+01 1.200000e+01 8.500000e+00 3.910000e-05 7.819692e-06 7.819692e-06 +1.090000e+02 8.500000e+00 6.000000e+00 4.310000e-05 7.382700e-06 7.382700e-06 +1.210000e+02 6.000000e+00 9.500000e+00 4.590000e-05 9.892870e-06 9.892870e-06 +1.400000e+02 9.500000e+00 9.000000e+00 8.820000e-05 2.055712e-05 2.055712e-05 +1.580000e+02 9.000000e+00 1.700000e+01 1.490000e-04 3.889140e-05 3.889140e-05 +END YODA_SCATTER2D_V2 + +BEGIN YODA_SCATTER2D_V2 /REF/CMS_2017_I1471287/d11-x01-y01 +Variations: [""] +IsRef: 1 +Path: /REF/CMS_2017_I1471287/d11-x01-y01 +Title: doi:10.17182/hepdata.76506.v1/t11 +Type: Scatter2D +--- +# xval xerr- xerr+ yval yerr- yerr- +2.960000e-01 2.960000e-01 9.900000e-02 2.340000e-02 1.185118e-03 1.185118e-03 +4.940000e-01 9.900000e-02 1.000000e-01 3.640000e-02 1.832929e-03 1.832929e-03 +6.940000e-01 1.000000e-01 1.005000e-01 4.730000e-02 2.376919e-03 2.376919e-03 +8.950000e-01 1.005000e-01 1.425000e-01 5.790000e-02 2.907922e-03 2.907922e-03 +1.180000e+00 1.425000e-01 2.000000e-01 7.140000e-02 3.576802e-03 3.576802e-03 +1.580000e+00 2.000000e-01 2.000000e-01 8.880000e-02 4.451634e-03 4.451634e-03 +1.980000e+00 2.000000e-01 2.400000e-01 1.050000e-01 5.252398e-03 5.252398e-03 +2.460000e+00 2.400000e-01 3.400000e-01 1.190000e-01 5.993003e-03 5.993003e-03 +3.140000e+00 3.400000e-01 4.400000e-01 1.390000e-01 7.001948e-03 7.001948e-03 +4.020000e+00 4.400000e-01 5.750000e-01 1.520000e-01 7.696012e-03 7.696012e-03 +5.170000e+00 5.750000e-01 8.300000e-01 1.660000e-01 8.436335e-03 8.436335e-03 +END YODA_SCATTER2D_V2 + +BEGIN YODA_SCATTER2D_V2 /REF/CMS_2017_I1471287/d17-x01-y01 +Variations: [""] +IsRef: 1 +Path: /REF/CMS_2017_I1471287/d17-x01-y01 +Title: doi:10.17182/hepdata.76506.v1/t17 +Type: Scatter2D +--- +# xval xerr- xerr+ yval yerr- yerr- +2.930000e-01 2.930000e-01 9.900000e-02 1.680000e-02 6.410631e-04 6.410631e-04 +4.910000e-01 9.900000e-02 1.000000e-01 3.520000e-02 1.247391e-03 1.247391e-03 +6.910000e-01 1.000000e-01 1.005000e-01 5.270000e-02 1.810787e-03 1.810787e-03 +8.920000e-01 1.005000e-01 1.390000e-01 6.980000e-02 2.384057e-03 2.384057e-03 +1.170000e+00 1.390000e-01 2.000000e-01 9.080000e-02 2.959653e-03 2.959653e-03 +1.570000e+00 2.000000e-01 2.050000e-01 1.260000e-01 4.145082e-03 4.145082e-03 +1.980000e+00 2.050000e-01 2.350000e-01 1.660000e-01 5.535146e-03 5.535146e-03 +2.450000e+00 2.350000e-01 3.400000e-01 2.130000e-01 7.071177e-03 7.071177e-03 +3.130000e+00 3.400000e-01 4.350000e-01 2.850000e-01 9.603130e-03 9.603130e-03 +4.000000e+00 4.350000e-01 5.750000e-01 3.840000e-01 1.342065e-02 1.342065e-02 +5.150000e+00 5.750000e-01 5.750000e-01 5.230000e-01 1.908538e-02 1.908538e-02 +END YODA_SCATTER2D_V2 + +BEGIN YODA_SCATTER2D_V2 /REF/CMS_2017_I1471287/d18-x01-y01 +Variations: [""] +IsRef: 1 +Path: /REF/CMS_2017_I1471287/d18-x01-y01 +Title: doi:10.17182/hepdata.76506.v1/t18 +Type: Scatter2D +--- +# xval xerr- xerr+ yval yerr- yerr- +2.940000e-01 2.940000e-01 1.185000e-01 1.580000e-02 6.997278e-03 6.997278e-03 +5.310000e-01 1.185000e-01 1.515000e-01 2.680000e-02 5.154437e-03 5.154437e-03 +8.340000e-01 1.515000e-01 1.530000e-01 5.560000e-02 6.040278e-03 6.040278e-03 +1.140000e+00 1.530000e-01 1.650000e-01 7.410000e-02 7.377115e-03 7.377115e-03 +1.470000e+00 1.650000e-01 2.250000e-01 9.330000e-02 8.514602e-03 8.514602e-03 +1.920000e+00 2.250000e-01 2.700000e-01 1.530000e-01 1.203772e-02 1.203772e-02 +2.460000e+00 2.700000e-01 3.950000e-01 1.900000e-01 1.578818e-02 1.578818e-02 +3.250000e+00 3.950000e-01 7.100000e-01 2.670000e-01 2.113853e-02 2.113853e-02 +4.670000e+00 7.100000e-01 8.300000e-01 4.370000e-01 3.766808e-02 3.766808e-02 +END YODA_SCATTER2D_V2 + +BEGIN YODA_SCATTER2D_V2 /REF/CMS_2017_I1471287/d19-x01-y01 +Variations: [""] +IsRef: 1 +Path: /REF/CMS_2017_I1471287/d19-x01-y01 +Title: doi:10.17182/hepdata.76506.v1/t19 +Type: Scatter2D +--- +# xval xerr- xerr+ yval yerr- yerr- +8.280000e-01 8.280000e-01 1.560000e-01 5.000000e-02 1.163311e-02 1.163311e-02 +1.140000e+00 1.560000e-01 1.650000e-01 4.760000e-02 1.215013e-02 1.215013e-02 +1.470000e+00 1.650000e-01 2.200000e-01 1.070000e-01 1.353647e-02 1.353647e-02 +1.910000e+00 2.200000e-01 2.700000e-01 1.160000e-01 1.613309e-02 1.613309e-02 +2.450000e+00 2.700000e-01 3.900000e-01 1.960000e-01 2.258414e-02 2.258414e-02 +3.230000e+00 3.900000e-01 6.950000e-01 2.650000e-01 2.981822e-02 2.981822e-02 +4.620000e+00 6.950000e-01 8.800000e-01 4.830000e-01 6.249298e-02 6.249298e-02 +END YODA_SCATTER2D_V2 + +BEGIN YODA_SCATTER2D_V2 /REF/CMS_2017_I1471287/d20-x01-y01 +Variations: [""] +IsRef: 1 +Path: /REF/CMS_2017_I1471287/d20-x01-y01 +Title: doi:10.17182/hepdata.76506.v1/t20 +Type: Scatter2D +--- +# xval xerr- xerr+ yval yerr- yerr- +2.960000e-01 2.960000e-01 9.900000e-02 2.340000e-02 1.185118e-03 1.185118e-03 +4.940000e-01 9.900000e-02 1.000000e-01 3.640000e-02 1.832929e-03 1.832929e-03 +6.940000e-01 1.000000e-01 1.005000e-01 4.730000e-02 2.376919e-03 2.376919e-03 +8.950000e-01 1.005000e-01 1.425000e-01 5.790000e-02 2.907922e-03 2.907922e-03 +1.180000e+00 1.425000e-01 2.000000e-01 7.140000e-02 3.576802e-03 3.576802e-03 +1.580000e+00 2.000000e-01 2.000000e-01 8.880000e-02 4.451634e-03 4.451634e-03 +1.980000e+00 2.000000e-01 2.400000e-01 1.050000e-01 5.252398e-03 5.252398e-03 +2.460000e+00 2.400000e-01 3.400000e-01 1.190000e-01 5.993003e-03 5.993003e-03 +3.140000e+00 3.400000e-01 4.400000e-01 1.390000e-01 7.001948e-03 7.001948e-03 +4.020000e+00 4.400000e-01 5.750000e-01 1.520000e-01 7.696012e-03 7.696012e-03 +5.170000e+00 5.750000e-01 5.750000e-01 1.660000e-01 8.436335e-03 8.436335e-03 +END YODA_SCATTER2D_V2 + +BEGIN YODA_SCATTER2D_V2 /REF/CMS_2017_I1471287/d21-x01-y01 +Variations: [""] +IsRef: 1 +Path: /REF/CMS_2017_I1471287/d21-x01-y01 +Title: doi:10.17182/hepdata.76506.v1/t21 +Type: Scatter2D +--- +# xval xerr- xerr+ yval yerr- yerr- +2.980000e-01 2.980000e-01 1.215000e-01 1.720000e-02 4.728536e-03 4.728536e-03 +5.410000e-01 1.215000e-01 1.505000e-01 2.710000e-02 3.203778e-03 3.203778e-03 +8.420000e-01 1.505000e-01 1.490000e-01 4.030000e-02 3.374707e-03 3.374707e-03 +1.140000e+00 1.490000e-01 1.700000e-01 5.880000e-02 4.187732e-03 4.187732e-03 +1.480000e+00 1.700000e-01 2.200000e-01 7.490000e-02 4.906341e-03 4.906341e-03 +1.920000e+00 2.200000e-01 2.700000e-01 9.600000e-02 6.085243e-03 6.085243e-03 +2.460000e+00 2.700000e-01 4.100000e-01 1.110000e-01 7.157379e-03 7.157379e-03 +3.280000e+00 4.100000e-01 7.200000e-01 1.300000e-01 8.346303e-03 8.346303e-03 +4.720000e+00 7.200000e-01 7.200000e-01 1.460000e-01 1.068133e-02 1.068133e-02 +END YODA_SCATTER2D_V2 + +BEGIN YODA_SCATTER2D_V2 /REF/CMS_2017_I1471287/d22-x01-y01 +Variations: [""] +IsRef: 1 +Path: /REF/CMS_2017_I1471287/d22-x01-y01 +Title: doi:10.17182/hepdata.76506.v1/t22 +Type: Scatter2D +--- +# xval xerr- xerr+ yval yerr- yerr- +8.370000e-01 8.370000e-01 1.515000e-01 2.100000e-02 7.603899e-03 7.603899e-03 +1.140000e+00 1.515000e-01 1.700000e-01 2.210000e-02 6.416394e-03 6.416394e-03 +1.480000e+00 1.700000e-01 2.250000e-01 5.250000e-02 5.976662e-03 5.976662e-03 +1.930000e+00 2.250000e-01 2.700000e-01 7.790000e-02 6.597289e-03 6.597289e-03 +2.470000e+00 2.700000e-01 4.050000e-01 1.110000e-01 8.181653e-03 8.181653e-03 +3.280000e+00 4.050000e-01 6.950000e-01 1.340000e-01 9.337150e-03 9.337150e-03 +4.670000e+00 6.950000e-01 6.950000e-01 1.770000e-01 1.384855e-02 1.384855e-02 +END YODA_SCATTER2D_V2 + +BEGIN YODA_SCATTER2D_V2 /REF/CMS_2017_I1471287/d28-x01-y01 +Variations: [""] +IsRef: 1 +Path: /REF/CMS_2017_I1471287/d28-x01-y01 +Title: doi:10.17182/hepdata.76506.v1/t28 +Type: Scatter2D +--- +# xval xerr- xerr+ yval yerr- yerr- +1.400000e+01 1.400000e+01 5.000000e+00 3.000000e-05 6.269979e-06 6.269979e-06 +2.400000e+01 5.000000e+00 5.000000e+00 1.720000e-05 3.159413e-06 3.159413e-06 +3.400000e+01 5.000000e+00 6.000000e+00 8.810000e-06 1.954818e-06 1.954818e-06 +4.600000e+01 6.000000e+00 1.250000e+01 1.850000e-06 5.392488e-07 5.392488e-07 +7.100000e+01 1.250000e+01 8.500000e+00 -1.610000e-06 4.250619e-07 4.250619e-07 +8.800000e+01 8.500000e+00 5.000000e+00 -3.510000e-06 9.156514e-07 9.156514e-07 +9.800000e+01 5.000000e+00 6.500000e+00 -3.870000e-06 9.839949e-07 9.839949e-07 +1.110000e+02 6.500000e+00 7.000000e+00 -3.380000e-06 8.780067e-07 8.780067e-07 +1.250000e+02 7.000000e+00 7.500000e+00 -4.120000e-06 1.098815e-06 1.098815e-06 +1.400000e+02 7.500000e+00 1.000000e+01 -4.060000e-06 8.761764e-07 8.761764e-07 +END YODA_SCATTER2D_V2 + +BEGIN YODA_SCATTER2D_V2 /REF/CMS_2017_I1471287/d31-x01-y01 +Variations: [""] +IsRef: 1 +Path: /REF/CMS_2017_I1471287/d31-x01-y01 +Title: doi:10.17182/hepdata.76506.v1/t31 +Type: Scatter2D +--- +# xval xerr- xerr+ yval yerr- yerr- +9.800000e+01 8.000000e+00 6.500000e+00 4.811000e-08 1.707479e-08 1.707479e-08 +1.110000e+02 6.500000e+00 7.000000e+00 2.830000e-08 1.024164e-08 1.024164e-08 +1.250000e+02 7.000000e+00 3.500000e+01 2.170000e-08 9.262327e-09 9.262327e-09 +END YODA_SCATTER2D_V2 diff --git a/analyses/pluginRHIC/BRAHMS_2004_I647076.cc b/analyses/pluginRHIC/BRAHMS_2004_I647076.cc --- a/analyses/pluginRHIC/BRAHMS_2004_I647076.cc +++ b/analyses/pluginRHIC/BRAHMS_2004_I647076.cc @@ -1,217 +1,217 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/SingleValueProjection.hh" #include "Rivet/Projections/ImpactParameterProjection.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/UnstableFinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" namespace Rivet { /// @brief BRAHMS Centrality projection. class BRAHMSCentrality : public SingleValueProjection { public: // Constructor BRAHMSCentrality() : SingleValueProjection() { // Using here the BRAHMS reaction centrality from eg. 1602.01183, which // might not be correct. declare(ChargedFinalState(Cuts::pT > 0.1*GeV && Cuts::abseta < 2.2), "ChargedFinalState"); } // Destructor virtual ~BRAHMSCentrality() {} // Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(BRAHMSCentrality); protected: // Do the projection. Count the number of charged particles in // the specified range. virtual void project(const Event& e) { clear(); set(apply (e, "ChargedFinalState").particles().size()); } // Compare to another projection. virtual int compare(const Projection& p) const { // This projection is only used for the analysis below. return UNDEFINED; } }; /// @brief Brahms centrality calibration analysis based on the // BrahmsCentrality projection. No data is given for this // analysis, so one MUST do a calibration run. class BRAHMS_2004_CENTRALITY : public Analysis { public: // Constructor BRAHMS_2004_CENTRALITY() : Analysis("BRAHMS_2004_CENTRALITY") {} // Initialize the analysis void init() { declare(BRAHMSCentrality(),"Centrality"); declare(ImpactParameterProjection(), "IMP"); // The central multiplicity. mult = bookHisto1D("mult",450,0,4500); // Safeguard against filling preloaded histograms. done = (mult->numEntries() > 0); // The impact parameter. imp = bookHisto1D("mult_IMP",100,0,20); } // Analyse a single event void analyze(const Event& event) { if (done) return; // Fill impact parameter. imp->fill(apply(event,"IMP")(), event.weight()); // Fill multiplicity. mult->fill(apply(event,"Centrality")(), event.weight()); } // Finalize the analysis void finalize() { // Normalize the distributions, safeguarding against // yoda normalization error. if(mult->numEntries() > 0) mult->normalize(); if(imp->numEntries() > 0) imp->normalize(); } private: // Histograms. Histo1DPtr mult; Histo1DPtr imp; // Flag to test if we have preloaded histograms. bool done; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(BRAHMS_2004_CENTRALITY); /// @brief Brahms pT spectra for id particles (pi+, pi-, K+, K-) // in small bins of rapidity, 5% central collisions. // System: AuAu @ 200GeV/nn. class BRAHMS_2004_I647076 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(BRAHMS_2004_I647076); /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { // Initialise and register projections // Centrality Projection. declareCentrality(BRAHMSCentrality(), "BRAHMS_2004_CENTRALITY","mult","BCEN"); // TODO: Feed down correction is unclear. - declare(UnstableFinalState(Cuts::abseta < 5 && Cuts::pT > 100*MeV), "FS"); + declare(FinalState(Cuts::rap < 4 && Cuts::rap > -0.1 && Cuts::pT > 100*MeV), "FS"); // The measured rapidity intervals for pions. rapIntervalsPi = {{-0.1,0.},{0.,0.1},{0.4,0.6},{0.6,0.8},{0.8,1.0}, {1.0,1.2},{1.2,1.4},{2.1,2.3},{2.4,2.6},{3.0,3.1},{3.1,3.2},{3.2,3.3}, {3.3,3.4},{3.4,3.66}}; // The measured rapidity intervals for kaons. rapIntervalsK = {{-0.1,0.},{0.,0.1},{0.4,0.6},{0.6,0.8},{0.8,1.0}, {1.0,1.2},{2.0,2.2},{2.3,2.5},{2.9,3.0},{3.0,3.1},{3.1,3.2},{3.2,3.4}}; // Book histograms for (int i = 1, N = rapIntervalsPi.size(); i <= N; ++i) { piPlus.push_back(bookHisto1D(1, 1, i)); piMinus.push_back(bookHisto1D(1, 1, 14 + i)); } for (int i = 1, N = rapIntervalsK.size(); i <= N; ++i) { kPlus.push_back(bookHisto1D(2, 1, i)); kMinus.push_back(bookHisto1D(2, 1, 12 + i)); } // Counter for accepted sum of weights (centrality cut). centSow = bookCounter("centSow"); } /// Perform the per-event analysis void analyze(const Event& event) { const double w = event.weight(); // Reject all non-central events. The paper does not speak of // any other event trigger, which in any case should matter // little for central events. if(apply(event,"BCEN")() > 5.0) return; // Keep track of sum of weights. centSow->fill(w); - const UnstableFinalState& fs = apply(event,"FS"); + const FinalState& fs = apply(event,"FS"); // Loop over particles. for (const auto& p : fs.particles()) { const double y = p.rapidity(); const double pT = p.pT(); const int id = p.pid(); // First pions. if (abs(id) == 211) { // Protect against decaying K0S and Lambda if (p.hasAncestor(310) || p.hasAncestor(-310) || p.hasAncestor(3122) || p.hasAncestor(3122)) continue; for (int i = 0, N = rapIntervalsPi.size(); i < N; ++i) { if (y > rapIntervalsPi[i].first && y <= rapIntervalsPi[i].second) { const double dy = rapIntervalsPi[i].second - rapIntervalsPi[i].first; const double nWeight = w / ( 2*M_PI*pT*dy); if (id == 211) piPlus[i]->fill(pT, nWeight); else piMinus[i]->fill(pT, nWeight); break; } } } // Then kaons. else if (abs(id) == 321) { for (int i = 0, N = rapIntervalsK.size(); i < N; ++i) { if (y > rapIntervalsK[i].first && y <= rapIntervalsK[i].second) { const double dy = rapIntervalsK[i].second - rapIntervalsK[i].first; const double nWeight = w / ( 2*M_PI*pT*dy); if (id == 321) kPlus[i]->fill(pT, nWeight); else kMinus[i]->fill(pT, nWeight); break; } } } } } /// Normalise histograms etc., after the run void finalize() { // Normalize all histograms to per-event yields. for (int i = 0, N = rapIntervalsPi.size(); i < N; ++i) { piPlus[i]->scaleW(1./centSow->sumW()); piMinus[i]->scaleW(1./centSow->sumW()); } for (int i = 0, N = rapIntervalsK.size(); i < N; ++i) { kPlus[i]->scaleW(1./centSow->sumW()); kMinus[i]->scaleW(1./centSow->sumW()); } } //@} // The rapidity intervals. vector > rapIntervalsPi; vector > rapIntervalsK; /// @name Histograms //@{ vector piPlus; vector piMinus; vector kPlus; vector kMinus; CounterPtr centSow; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(BRAHMS_2004_I647076); } diff --git a/include/Rivet/Tools/AtlasCommon.hh b/include/Rivet/Tools/AtlasCommon.hh --- a/include/Rivet/Tools/AtlasCommon.hh +++ b/include/Rivet/Tools/AtlasCommon.hh @@ -1,85 +1,120 @@ // -*- C++ -*- #ifndef RIVET_ATLAS_COMMON_HH #define RIVET_ATLAS_COMMON_HH #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/SingleValueProjection.hh" #include "Rivet/Projections/TriggerProjection.hh" namespace Rivet { /// Common projections for ATLAS trigger conditions and centrality. namespace ATLAS { /// Centrality projection for pPb collisions (one sided) class SumET_PB_Centrality: public SingleValueProjection { public: /// Constructor. SumET_PB_Centrality() { declare(FinalState(Cuts::eta < -3.2 && Cuts::eta > -4.9 && Cuts::pT > 0.1*GeV), "SumET_PB_Centrality"); } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(SumET_PB_Centrality); protected: /// Perform the projection on the Event void project(const Event& e) { clear(); const FinalState & fsfwd = apply(e, "SumET_PB_Centrality"); double estimate = 0.0; for ( const Particle & p : fsfwd.particles() ) { estimate += p.Et(); } set(estimate); } /// Compare projections int compare(const Projection& p) const { return mkNamedPCmp(p, "SumET_PB_Centrality"); } }; +/// Centrality projection for PbPb collisions (two sided) +class SumET_PBPB_Centrality: public SingleValueProjection { + +public: + + /// Constructor. + SumET_PBPB_Centrality() { + declare(FinalState(Cuts::abseta > 3.2 && Cuts::abseta < 4.9 && Cuts::pT > 0.1*GeV), + "SumET_PBPB_Centrality"); + } + + /// Clone on the heap. + DEFAULT_RIVET_PROJ_CLONE(SumET_PBPB_Centrality); + +protected: + + /// Perform the projection on the Event + void project(const Event& e) { + clear(); + const FinalState & fsfwd = + apply(e, "SumET_PBPB_Centrality"); + double estimate = 0.0; + for ( const Particle & p : fsfwd.particles() ) { + estimate += p.Et(); + } + set(estimate); + } + + /// Compare projections + int compare(const Projection& p) const { + return mkNamedPCmp(p, "SumET_PBPB_Centrality"); + } + +}; + /// ATLAS min bias trigger conditions. class MinBiasTrigger: public TriggerProjection { public: /// Constructor. MinBiasTrigger() { declare(ChargedFinalState(Cuts::eta > 2.09 && Cuts::eta < 3.84 && Cuts::pT > 0.1*GeV), "MBB"); declare(ChargedFinalState(Cuts::eta < -2.09 && Cuts::eta > -3.84 && Cuts::pT > 0.1*GeV), "MBF"); } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(MinBiasTrigger); protected: /// Perform the projection on the Event void project(const Event& event) { pass(); if ( applyProjection(event,"MBF").particles().empty() || applyProjection(event,"MBB").particles().empty() ) fail(); } /// Compare projections int compare(const Projection& p) const { return mkNamedPCmp(p, "MBF") || mkNamedPCmp(p, "MBB"); } }; } } #endif diff --git a/include/Rivet/Tools/Correlators.hh b/include/Rivet/Tools/Correlators.hh --- a/include/Rivet/Tools/Correlators.hh +++ b/include/Rivet/Tools/Correlators.hh @@ -1,1136 +1,1136 @@ // -*- C++ -*- #ifndef RIVET_Correlators_HH #define RIVET_Correlators_HH #include "Rivet/Projection.hh" #include "Rivet/Projections/ParticleFinder.hh" #include #include "YODA/Scatter2D.h" #include "Rivet/Analysis.hh" /* File contains tools for calculating flow coefficients using * correlators. * Classes: * Correlators: Calculates single event correlators of a given harmonic. * Cumulants: An additional base class for flow analyses * (use as: class MyAnalysis : public Analysis, Cumulants {};) * Includes a framework for calculating cumulants and flow coefficients * from single event correlators, including automatic handling of statistical * errors. Contains multiple internal sub-classes: * CorBinBase: Base class for correlators binned in event or particle observables. * CorSingleBin: A simple bin for correlators. * CorBin: Has the interface of a simple bin, but does automatic calculation * of statistical errors by a bootstrap method. * ECorrelator: Data type for event averaged correlators. * @author Vytautas Vislavicius, Christine O. Rasmussen, Christian Bierlich. */ namespace Rivet { /* @brief Projection for calculating correlators for flow measurements. * * A projection which calculates Q-vectors and P-vectors, and projects * them out as correlators. Implementation follows the description of * the ''Generic Framework'' * Phys. Rev. C 83 (2011) 044913, arXiv: 1010.0233 * Phys. Rev. C 89 (2014) 064904, arXiv: 1312.3572 * */ class Correlators : public Projection { public: // Constructor. Takes parameters @parm fsp, the Final State // projection correlators should be constructed from, @parm nMaxIn, // the maximal sum of harmonics, eg. for // c_2{2} = {2,-2} = 2 + 2 = 4 // c_2{4} = {2,2,-2,-2} = 2 + 2 + 2 + 2 = 8 // c_4{2} = {4,-4} = 4 + 4 = 8 // c_4{4} = {4,4,-4,-4} = 4 + 4 + 4 + 4 = 16. // @parm pMaxIn is the maximal number of particles // you want to correlate and @parm pTbinEdgesIn is the (lower) // edges of pT bins, the last one the upper edge of the final bin. Correlators(const ParticleFinder& fsp, int nMaxIn = 2, int pMaxIn = 0, vector pTbinEdgesIn = {}); // Constructor which takes a Scatter2D to estimate bin edges. Correlators(const ParticleFinder& fsp, int nMaxIn, int pMaxIn, const Scatter2DPtr hIn); /// @brief Integrated correlator of @parm n harmonic, with the /// number of powers being the size of @parm n. /// Eg. @parm n should be: /// <<2>>_2 => n = {2, -2} /// <<4>>_2 => n = {2, 2, -2, -2} /// <<2>>_4 => n = {4, -4} /// <<4>>_4 => n = {4, 4, -4, 4} and so on. const pair intCorrelator(vector n) const; /// @brief pT differential correlator of @parm n harmonic, with the /// number of powers being the size of @parm n. /// The method can include overflow/underflow bins in the /// beginning/end of the returned vector, by toggling /// @parm overflow = true. const vector> pTBinnedCorrelators(vector n, bool overflow = false) const; /// @brief Integrated correlator of @parm n1 harmonic, with the /// number of powers being the size of @parm n1. This method /// imposes an eta gap, correlating with another phase space, /// where another Correlators projection @parm other should be /// defined. The harmonics of the other phase space is given /// as @parm n2. /// To get eg. integrated <<4>>_2, @parm n1 should be: /// n1 = {2, 2} and n2 = {-2, -2} const pair intCorrelatorGap(const Correlators& other, vector n1, vector n2) const; /// @brief pT differential correlators of @parm n1 harmonic, with the /// number of powers being the size of @parm n1. This method /// imposes an eta gap, correlating with another phase space, /// where another Correlators projection @parm other should be /// defined. The harmonics of the other phase space is given /// as @parm n2. /// To get eg. differential <<4'>_2, @parm n1 should be: /// n1 = {2, 2} and @parm n2: n2 = {-2, -2}. /// To get eg. differential <<2'>>_4, @parm n1 should be: /// n1 = {4} and @parm n2: n2 = {-4}. /// The method can include overflow/underflow /// bins in the beginning/end of the returned vector, by toggling /// @parm overflow = true. const vector> pTBinnedCorrelatorsGap( const Correlators& other, vector n1, vector n2, bool oveflow = false) const; /// @brief Construct a harmonic vectors from @parm n harmonics /// and @parm m number of particles. /// TODO: In C++14 this can be done much nicer with TMP. static vector hVec(int n, int m) { if (m%2 != 0) { cout << "Harmonic Vector: Number of particles " "must be an even number." << endl; return {}; } vector ret = {}; for (int i = 0; i < m; ++i) { if (i < m/2) ret.push_back(n); else ret.push_back(-n); } return ret; } /// @brief Return the maximal values for n, p to be used in the /// constructor of Correlators(xxx, nMax, pMax, xxxx) static pair getMaxValues(vector< vector >& hList){ int nMax = 0, pMax = 0; for (vector h : hList) { int tmpN = 0, tmpP = 0; for ( int i = 0; i < int(h.size()); ++i) { tmpN += abs(h[i]); ++tmpP; } if (tmpN > nMax) nMax = tmpN; if (tmpP > pMax) pMax = tmpP; } return make_pair(nMax,pMax); } // Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(Correlators); protected: // @brief Project function. Loops over array and calculates Q vectors // and P vectors if needed void project(const Event& e); // @brief Compare against other projection. Test if harmonics, pT-bins // and underlying final state are similar. int compare(const Projection& p) const { const Correlators* other = dynamic_cast(&p); if (nMax != other->nMax) return UNDEFINED; if (pMax != other->pMax) return UNDEFINED; if (pTbinEdges != other->pTbinEdges) return UNDEFINED; return mkPCmp(p, "FS"); }; // @brief Calculate correlators from one particle. void fillCorrelators(const Particle& p, const double& weight); // @brief Return a Q-vector. const complex getQ(int n, int p) const { bool isNeg = (n < 0); if (isNeg) return conj( qVec[abs(n)][p] ); else return qVec[n][p]; }; // @brief Return a P-vector. const complex getP(int n, int p, double pT = 0.) const { bool isNeg = (n < 0); map::const_iterator pTitr = pVec.lower_bound(pT); if (pTitr == pVec.end()) return DBL_NAN; if (isNeg) return conj( pTitr->second[abs(n)][p] ); else return pTitr->second[n][p]; }; private: // Find correlators by recursion. Order = M (# of particles), // n's are harmonics, p's are the powers of the weights const complex recCorr(int order, vector n, vector p, bool useP, double pT = 0.) const; // Two-particle correlator Eq. (19) p. 6 // Flag if p-vectors or q-vectors should be used to // calculate the correlator. const complex twoPartCorr(int n1, int n2, int p1 = 1, int p2 = 1, double pT = 0., bool useP = false) const; // Set elements in vectors to zero. void setToZero(); // Shorthands for setting and comparing to zero. const complex _ZERO = {0., 0.}; const double _TINY = 1e-10; // Shorthand typedefs for vec>. typedef vector< vector> > Vec2D; // Define Q-vectors and p-vectors Vec2D qVec; // Q[n][p] map pVec; // p[pT][n][p] // The max values of n and p to be calculated. int nMax, pMax; // pT bin-edges vector pTbinEdges; bool isPtDiff; /// End class Correlators }; /// @brief Tools for flow analyses. /// The following are helper classes to construct event averaged correlators /// as well as cummulants and flow coefficents from the basic event // correlators defined above. They are all encapsulated in a Cumulants // class, which can be used as a(nother) base class for flow analyses, // to ensure access. class CumulantAnalysis : public Analysis { private: // Number of bins used for bootstrap calculation of statistical // uncertainties. It is hard coded, and shout NOT be changed unless there // are good reasons to do so. static const int BOOT_BINS = 9; // Enum for choosing the method of error calculation. enum Error { VARIANCE, ENVELOPE }; // The desired error method. Can be changed in the analysis constructor // by setting it appropriately. Error errorMethod; /// @brief Base class for correlator bins. class CorBinBase { public: CorBinBase() {} virtual ~CorBinBase() {}; // Derived class should have fill and mean defined. virtual void fill(const pair& cor, const double& weight) = 0; virtual const double mean() const = 0; }; /// @brief The CorSingleBin is the basic quantity filled in an ECorrelator. /// It is a simple counter with an even simpler structure than normal /// YODA type DBNs, but added functionality to test for out of /// bounds correlators. class CorSingleBin : public CorBinBase { public: /// @brief The default constructor. CorSingleBin() : _sumWX(0.), _sumW(0.), _sumW2(0.), _numEntries(0.) {} ~CorSingleBin() {} /// @brief Fill a correlator bin with the return type from a /// Correlator (a pair giving numerator and denominator of _event). void fill(const pair& cor, const double& weight) { // Test if denominator for the single event average is zero. if (cor.second < 1e-10) return; // The single event average is then cor.first / cor.second. // With weights this becomes just: _sumWX += cor.first * weight; _sumW += weight * cor.second; _sumW2 += weight * weight * cor.second * cor.second; _numEntries += 1.; } const double mean() const { if (_sumW < 1e-10) return 0; return _sumWX / _sumW; } // @brief Sum of weights. const double sumW() const { return _sumW; } const double sumW2() const { return _sumW2; } // @brief Sum of weight * X. const double sumWX() const { return _sumWX; } // @brief Number of entries. const double numEntries() const { return _numEntries; } void addContent(double ne, double sw, double sw2, double swx) { _numEntries += ne; _sumW += sw; _sumW2 += sw2; _sumWX += swx; } private: double _sumWX, _sumW, _sumW2, _numEntries; }; // End of CorSingleBin sub-class. /// @brief The CorBin is the basic bin quantity in ECorrelators. /// It consists of several CorSingleBins, to facilitate /// bootstrapping calculation of statistical uncertainties. class CorBin : public CorBinBase { public: // @brief The constructor. nBins signifies the period of the bootstrap // calculation, and should never be changed here, but in its definition // above -- and only if there are good reasons to do so. CorBin() : binIndex(0), nBins(BOOT_BINS) { for(size_t i = 0; i < nBins; ++i) bins.push_back(CorSingleBin()); } // Destructor must be implemented. ~CorBin() {} // @brief Fill the correct underlying bin and take a step. void fill(const pair& cor, const double& weight) { // Test if denominator for the single event average is zero. if (cor.second < 1e-10) return; // Fill the correct bin. bins[binIndex].fill(cor, weight); if (binIndex == nBins - 1) binIndex = 0; else ++binIndex; } // @brief Calculate the total sample mean with all // available statistics. const double mean() const { double sow = 0; double sowx = 0; for(auto b : bins) { if (b.sumW() < 1e-10) continue; sow += b.sumW(); sowx += b.sumWX(); } return sowx / sow; } // @brief Return a copy of the bins. const vector getBins() const { return bins; } // @brief Return the bins as pointers to the base class. template const vector getBinPtrs() { vector ret(bins.size()); transform(bins.begin(), bins.end(), ret.begin(), [](CorSingleBin& b) {return &b;}); return ret; } private: vector bins; size_t binIndex; size_t nBins; }; // End of CorBin sub-class. public: /// @brief The ECorrelator is a helper class to calculate all event /// averages of correlators, in order to construct cumulants. /// It can be binned in any variable. class ECorrelator { public: /// @brief Constructor. Takes as argument the desired harmonic and number /// of correlated particles as a generic framework style vector, eg, /// {2, -2} for <<2>>_2, no binning. /// TODO: Implement functionality for this if needed. //ECorrelator(vector h) : h1(h), h2({}), // binX(0), binContent(0), reference() { //}; /// @brief Constructor. Takes as argument the desired harmonic and number /// of correlated particles as a generic framework style vector, eg, /// {2, -2} for <<2>>_2 and binning. ECorrelator(vector h, vector binIn) : h1(h), h2({}), binX(binIn), binContent(binIn.size() - 1), reference() {}; /// @brief Constructor for gapped correlator. Takes as argument the /// desired harmonics for the two final states, and binning. ECorrelator(vector h1In, vector h2In, vector binIn) : h1(h1In), h2(h2In), binX(binIn), binContent(binIn.size() - 1), reference() {}; /// @brief Fill the appropriate bin given an input (per event) /// observable, eg. centrality. void fill(const double& obs, const Correlators& c, const double weight = 1.0) { int index = getBinIndex(obs); if (index < 0) return; binContent[index].fill(c.intCorrelator(h1), weight); } /// @brief Fill the appropriate bin given an input (per event) /// observable, eg. centrality. Using a rapidity gap between /// two Correlators. void fill(const double& obs, const Correlators& c1, const Correlators& c2, const double weight = 1.0) { if (!h2.size()) { cout << "Trying to fill gapped correlator, but harmonics behind " "the gap (h2) are not given!" << endl; return; } int index = getBinIndex(obs); if (index < 0) return; binContent[index].fill(c1.intCorrelatorGap(c2, h1, h2), weight); } /// @brief Fill the bins with the appropriate correlator, taking the /// binning directly from the Correlators object, and filling also the /// reference flow. void fill(const Correlators& c, const double& weight = 1.0) { vector< pair > diffCorr = c.pTBinnedCorrelators(h1); // We always skip overflow when calculating the all event average. if (diffCorr.size() != binX.size() - 1) cout << "Tried to fill event with wrong binning (ungapped)" << endl; for (size_t i = 0; i < diffCorr.size(); ++i) { int index = getBinIndex(binX[i]); if (index < 0) return; binContent[index].fill(diffCorr[i], weight); } reference.fill(c.intCorrelator(h1), weight); } /// @brief Fill bins with the appropriate correlator, taking the binning /// directly from the Correlators object, and also the reference flow. /// Using a rapidity gap between two Correlators. void fill(const Correlators& c1, const Correlators& c2, const double& weight = 1.0) { if (!h2.size()) { cout << "Trying to fill gapped correlator, but harmonics behind " "the gap (h2) are not given!" << endl; return; } vector< pair > diffCorr = c1.pTBinnedCorrelatorsGap(c2, h1, h2); // We always skip overflow when calculating the all event average. if (diffCorr.size() != binX.size() - 1) cout << "Tried to fill event with wrong binning (gapped)" << endl; for (size_t i = 0; i < diffCorr.size(); ++i) { int index = getBinIndex(binX[i]); if (index < 0) return; binContent[index].fill(diffCorr[i], weight); } reference.fill(c1.intCorrelatorGap(c2, h1, h2), weight); } /// @brief Get a copy of the bin contents. const vector getBins() const { return binContent; } // @brief Return the bins as pointers to the base class. const vector getBinPtrs() { vector ret(binContent.size()); transform(binContent.begin(), binContent.end(), ret.begin(), [](CorBin& b) {return &b;}); return ret; } /// @brief Get a copy of the bin x-values. const vector getBinX() const { return binX; } /// @brief Get a copy of the @ret h1 harmonic vector. const vector getH1() const { return h1; } /// @brief Get a copy of the @ret h2 harmonic vector. const vector getH2() const { return h2; } /// @brief Replace reference flow bin with another reference /// flow bin, eg. calculated in another phase space or with /// other pid. void setReference(CorBin refIn) { reference = refIn; } /// @brief Extract the reference flow from a differential event /// averaged correlator. const CorBin getReference() const { if (reference.mean() < 1e-10) cout << "Warning: ECorrelator, reference bin is zero." << endl; return reference; } /// @brief set the @parm prIn list of profile histograms associated /// with the internal bins. Is called automatically when booking, no /// need to call it yourself. void setProfs(list prIn) { profs = prIn; } /// @brief Fill bins with content from preloaded histograms. void fillFromProfs() { list::iterator hItr = profs.begin(); auto refs = reference.getBinPtrs(); for (size_t i = 0; i < profs.size(); ++i, ++hItr) { for (size_t j = 0; j < binX.size() - 1; ++j) { const YODA::ProfileBin1D& pBin = (*hItr)->binAt(binX[j]); auto tmp = binContent[j].getBinPtrs(); tmp[i]->addContent(pBin.numEntries(), pBin.sumW(), pBin.sumW2(), pBin.sumWY()); } // Get the reference flow from the underflow bin of the histogram. const YODA::Dbn2D& uBin = (*hItr)->underflow(); refs[i]->addContent(uBin.numEntries(), uBin.sumW(), uBin.sumW2(), uBin.sumWY()); } // End loop of bootstrapped correlators. } /// @brief begin() iterator for the list of associated profile histograms. list::iterator profBegin() { return profs.begin(); } /// @brief end() iterator for the list of associated profile histograms. list::iterator profEnd() { return profs.end(); } private: /// @brief Get correct bin index for a given @parm obs value. const int getBinIndex(const double& obs) const { // Find the correct index of binContent. // If we are in overflow, just skip. if (obs >= binX.back()) return -1; // If we are in underflow, ditto. if (obs < binX[0]) return -1; int index = 0; for (int i = 0, N = binX.size() - 1; i < N; ++i, ++index) if (obs >= binX[i] && obs < binX[i + 1]) break; return index; } // The harmonics vectors. vector h1; vector h2; // The bins. vector binX; vector binContent; // The reference flow. CorBin reference; // The profile histograms associated with the CorBins for streaming. list profs; }; // End of ECorrelator sub-class. // @brief Get the correct max N and max P for the set of // booked correlators. const pair getMaxValues() const { vector< vector> harmVecs; for ( auto eItr = eCorrPtrs.begin(); eItr != eCorrPtrs.end(); ++eItr) { vector h1 = (*eItr)->getH1(); vector h2 = (*eItr)->getH2(); if (h1.size() > 0) harmVecs.push_back(h1); if (h2.size() > 0) harmVecs.push_back(h2); } if (harmVecs.size() == 0) { cout << "Warning: You tried to extract max values from harmonic " "vectors, but have not booked any." << endl; return pair(); } return Correlators::getMaxValues(harmVecs); } // Typedeffing shared pointer to ECorrelator. typedef shared_ptr ECorrPtr; // @brief Book an ECorrelator in the same way as a histogram. ECorrPtr bookECorrelator(const string name, const vector& h, const Scatter2DPtr hIn) { vector binIn; for (auto b : hIn->points()) binIn.push_back(b.xMin()); binIn.push_back(hIn->points().back().xMax()); ECorrPtr ecPtr = ECorrPtr(new ECorrelator(h, binIn)); list eCorrProfs; for (int i = 0; i < BOOT_BINS; ++i) { Profile1DPtr tmp = bookProfile1D(name+"-"+to_string(i),*hIn); - tmp->setPath("/TMP/"+this->name()+"/FINAL/" + name+"-"+to_string(i)); + tmp->setPath(this->name()+"/FINAL/" + name+"-"+to_string(i)); //tmp->setPath(tmp->path()+"FINAL"); eCorrProfs.push_back(tmp); } ecPtr->setProfs(eCorrProfs); eCorrPtrs.push_back(ecPtr); return ecPtr; } // @brief Book a gapped ECorrelator with two harmonic vectors. ECorrPtr bookECorrelator(const string name, const vector& h1, const vector& h2, const Scatter2DPtr hIn ) { vector binIn; for (auto b : hIn->points()) binIn.push_back(b.xMin()); binIn.push_back(hIn->points().back().xMax()); ECorrPtr ecPtr = ECorrPtr(new ECorrelator(h1, h2, binIn)); list eCorrProfs; for (int i = 0; i < BOOT_BINS; ++i) { Profile1DPtr tmp = bookProfile1D(name+"-"+to_string(i),*hIn); - tmp->setPath("/TMP/"+this->name()+"/FINAL/" + name+"-"+to_string(i)); + tmp->setPath(this->name()+"/FINAL/" + name+"-"+to_string(i)); //tmp->setPath(tmp->path()+"FINAL"); eCorrProfs.push_back(tmp); } ecPtr->setProfs(eCorrProfs); eCorrPtrs.push_back(ecPtr); return ecPtr; } // @brief Short hand for gapped correlators which splits the harmonic // vector into negative and positive components. ECorrPtr bookECorrelatorGap (const string name, const vector& h, const Scatter2DPtr hIn) { const vector h1(h.begin(), h.begin() + h.size() / 2); const vector h2(h.begin() + h.size() / 2, h.end()); return bookECorrelator(name, h1, h2, hIn); } // @brief Templated version of correlator booking which takes // @parm N desired harmonic and @parm M number of particles. template ECorrPtr bookECorrelator(const string name, const Scatter2DPtr hIn) { return bookECorrelator(name, Correlators::hVec(N, M), hIn); } // @brief Templated version of gapped correlator booking which takes // @parm N desired harmonic and @parm M number of particles. template ECorrPtr bookECorrelatorGap(const string name, const Scatter2DPtr hIn) { return bookECorrelatorGap(name, Correlators::hVec(N, M), hIn); } // @brief The stream method MUST be called in finalize() if one wants to stream // correlators to the yoda file, in order to do reentrant finalize // (ie. multi-histogram merging) for the analysis. void stream() { for (auto ecItr = eCorrPtrs.begin(); ecItr != eCorrPtrs.end(); ++ecItr){ (*ecItr)->fillFromProfs(); corrPlot(list((*ecItr)->profBegin(), (*ecItr)->profEnd()), *ecItr); } } private: /// Bookkeeping of the event averaged correlators. list eCorrPtrs; public: // @brief Constructor. Use CumulantAnalysis as base class for the // analysis to have access to functionality. CumulantAnalysis (string n) : Analysis(n), errorMethod(VARIANCE) {}; // @brief Helper method for turning correlators into Scatter2Ds. // Takes @parm h a pointer to the resulting Scatter2D, @parm binx // the x-bins and a function @parm func defining the transformation. // Makes no attempt at statistical errors. // See usage in the methods below. // Can also be used directly in the analysis if a user wants to // perform an unforseen transformation from correlators to // Scatter2D. template static void fillScatter(Scatter2DPtr h, vector& binx, T func) { vector points; for (int i = 0, N = binx.size() - 1; i < N; ++i) { double xMid = (binx[i] + binx[i + 1]) / 2.0; double xeMin = fabs(xMid - binx[i]); double xeMax = fabs(xMid - binx[i + 1]); double yVal = func(i); if (std::isnan(yVal)) yVal = 0.; double yErr = 0; points.push_back(YODA::Point2D(xMid, yVal, xeMin, xeMax, yErr, yErr)); } h->reset(); h->points().clear(); for (int i = 0, N = points.size(); i < N; ++i) h->addPoint(points[i]); } // @brief Helper method for turning correlators into Scatter2Ds // with error estimates. // Takes @parm h a pointer to the resulting Scatter2D, @parm binx // the x-bins, a function @parm func defining the transformation // and a vector of errors @parm err. // See usage in the methods below. // Can also be used directly in the analysis if a user wants to // perform an unforseen transformation from correlators to // Scatter2D. template const void fillScatter(Scatter2DPtr h, vector& binx, F func, vector >& yErr) const { vector points; for (int i = 0, N = binx.size() - 1; i < N; ++i) { double xMid = (binx[i] + binx[i + 1]) / 2.0; double xeMin = fabs(xMid - binx[i]); double xeMax = fabs(xMid - binx[i + 1]); double yVal = func(i); if (std::isnan(yVal)) points.push_back(YODA::Point2D(xMid, 0., xeMin, xeMax,0., 0.)); else points.push_back(YODA::Point2D(xMid, yVal, xeMin, xeMax, yErr[i].first, yErr[i].second)); } h->reset(); h->points().clear(); for (int i = 0, N = points.size(); i < N; ++i) h->addPoint(points[i]); } /// @brief Take the @parm n th power of all points in @parm hIn, /// and put the result in @parm hOut. Optionally put a /// @parm k constant below the root. static void nthPow(Scatter2DPtr hOut, const Scatter2DPtr hIn, const double& n, const double& k = 1.0) { if (n == 0 || n == 1) { cout << "Error: Do not take the 0th or 1st power of a Scatter2D," " use scale instead." << endl; return; } if (hIn->points().size() != hOut->points().size()) { cout << "nthRoot: Scatterplots: " << hIn->name() << " and " << hOut->name() << " not initialized with same length." << endl; return; } vector points; // The error pre-factor is k^(1/n) / n by Taylors formula. double eFac = pow(k,1./n)/n; for (auto b : hIn->points()) { double yVal = pow(k * b.y(),n); if (std::isnan(yVal)) points.push_back(YODA::Point2D(b.x(), 0., b.xErrMinus(), b.xErrPlus(), 0, 0 )); else { double yemin = abs(eFac * pow(yVal,1./(n - 1.))) * b.yErrMinus(); if (std::isnan(yemin)) yemin = b.yErrMinus(); double yemax = abs(eFac * pow(yVal,1./(n - 1.))) * b.yErrPlus(); if (std::isnan(yemax)) yemax = b.yErrPlus(); points.push_back(YODA::Point2D(b.x(), yVal, b.xErrMinus(), b.xErrPlus(), yemin, yemax )); } } hOut->reset(); hOut->points().clear(); for (int i = 0, N = points.size(); i < N; ++i) hOut->addPoint(points[i]); } /// @brief Take the @parm n th power of all points in @parm h, /// and put the result back in the same Scatter2D. Optionally put a /// @parm k constant below the root. static void nthPow(Scatter2DPtr h, const double& n, const double& k = 1.0) { if (n == 0 || n == 1) { cout << "Error: Do not take the 0th or 1st power of a Scatter2D," " use scale instead." << endl; return; } vector points; vector pIn = h->points(); // The error pre-factor is k^(1/n) / n by Taylors formula. double eFac = pow(k,1./n)/n; for (auto b : pIn) { double yVal = pow(k * b.y(),n); if (std::isnan(yVal)) points.push_back(YODA::Point2D(b.x(), 0., b.xErrMinus(), b.xErrPlus(), 0, 0 )); else { double yemin = abs(eFac * pow(yVal,1./(n - 1.))) * b.yErrMinus(); if (std::isnan(yemin)) yemin = b.yErrMinus(); double yemax = abs(eFac * pow(yVal,1./(n - 1.))) * b.yErrPlus(); if (std::isnan(yemax)) yemax = b.yErrPlus(); points.push_back(YODA::Point2D(b.x(), yVal, b.xErrMinus(), b.xErrPlus(), yemin, yemax )); } } h->reset(); h->points().clear(); for (int i = 0, N = points.size(); i < N; ++i) h->addPoint(points[i]); } // @brief Calculate the bootstrapped sample variance for the observable // given by correlators and the transformation @parm func. template static pair sampleVariance(T func) { // First we calculate the mean (two pass calculation). double avg = 0.; for (int i = 0; i < BOOT_BINS; ++i) avg += func(i); avg /= double(BOOT_BINS); // Then we find the variance. double var = 0.; for (int i = 0; i < BOOT_BINS; ++i) var += pow(func(i) - avg, 2.); var /= (double(BOOT_BINS) - 1); return pair(var, var); } // @brief Calculate the bootstrapped sample envelope for the observable // given by correlators and the transformation @parm func. template static pair sampleEnvelope(T func) { // First we calculate the mean. double avg = 0.; for (int i = 0; i < BOOT_BINS; ++i) avg += func(i); avg /= double(BOOT_BINS); double yMax = avg; double yMin = avg; // Then we find the envelope using the mean as initial value. for (int i = 0; i < BOOT_BINS; ++i) { double yVal = func(i); if (yMin > yVal) yMin = yVal; else if (yMax < yVal) yMax = yVal; } return pair(fabs(avg - yMin), fabs(yMax - avg)); } // @brief Selection method for which sample error to use, given // in the constructor. template const pair sampleError(T func) const { if (errorMethod == VARIANCE) return sampleVariance(func); else if (errorMethod == ENVELOPE) return sampleEnvelope(func); else cout << "Error: Error method not found!" << endl; return pair(0.,0.); } // @brief Two particle integrated cn. const void cnTwoInt(Scatter2DPtr h, ECorrPtr e2) const { vector bins = e2->getBins(); vector binx = e2->getBinX(); // Assert bin size. if (binx.size() - 1 != bins.size()){ cout << "cnTwoInt: Bin size (x,y) differs!" << endl; return; } vector binPtrs; // The mean value of the cumulant. auto cn = [&] (int i) { return binPtrs[i]->mean(); }; // Error calculation. vector > yErr; for (int j = 0, N = bins.size(); j < N; ++j) { binPtrs = bins[j].getBinPtrs(); yErr.push_back(sampleError(cn)); } binPtrs = e2->getBinPtrs(); fillScatter(h, binx, cn, yErr); } // @brief Two particle integrated vn. const void vnTwoInt(Scatter2DPtr h, ECorrPtr e2) const { cnTwoInt(h, e2); nthPow(h, 0.5); } // @brief Put an event averaged correlator into a Scatter2D. // Reduces to cnTwoInt, but better with a proper name. const void corrPlot(Scatter2DPtr h, ECorrPtr e) const { cnTwoInt(h, e); } // @brief Put an event averaged correlator into Profile1Ds, one // for each bootstrapping bin. const void corrPlot(list profs, ECorrPtr e) const { vector corBins = e->getBins(); vector binx = e->getBinX(); auto ref = e->getReference(); auto refBins = ref.getBinPtrs(); // Assert bin size. if (binx.size() - 1 != corBins.size()){ cout << "corrPlot: Bin size (x,y) differs!" << endl; return; } list::iterator hItr = profs.begin(); // Loop over the boostrapped correlators. for (size_t i = 0; i < profs.size(); ++i, ++hItr) { vector profBins; // Numbers for the summary distribution double ne = 0., sow = 0., sow2 = 0.; for (size_t j = 0, N = binx.size() - 1; j < N; ++j) { vector binPtrs = corBins[j].getBinPtrs(); // Construct bin of the profiled quantities. We have no information // (and no desire to add it) of sumWX of the profile, so really // we should use a Dbn1D - but that does not work for Profile1D's. profBins.push_back( YODA::ProfileBin1D((*hItr)->bin(j).xEdges(), YODA::Dbn2D( binPtrs[i]->numEntries(), binPtrs[i]->sumW(), binPtrs[i]->sumW2(), 0., 0., binPtrs[i]->sumWX(), 0, 0))); ne += binPtrs[i]->numEntries(); sow += binPtrs[i]->sumW(); sow2 += binPtrs[i]->sumW2(); } // Reset the bins of the profiles. (*hItr)->reset(); (*hItr)->bins().clear(); // Add our new bins. // The total distribution (*hItr)->setTotalDbn(YODA::Dbn2D(ne,sow,sow2,0.,0.,0.,0.,0.)); // The bins. for (int j = 0, N = profBins.size(); j < N; ++j) (*hItr)->addBin(profBins[j]); // The reference flow in the underflow bin. (*hItr)->setUnderflow(YODA::Dbn2D(refBins[i]->numEntries(), refBins[i]->sumW(), refBins[i]->sumW2(), 0., 0., refBins[i]->sumWX(), 0., 0.)); } // End loop of bootstrapped correlators. } // @brief Four particle integrated cn. const void cnFourInt(Scatter2DPtr h, ECorrPtr e2, ECorrPtr e4) const { auto e2bins = e2->getBins(); auto e4bins = e4->getBins(); auto binx = e2->getBinX(); if (binx.size() - 1 != e2bins.size()){ cout << "cnFourInt: Bin size (x,y) differs!" << endl; return; } if (binx != e4->getBinX()) { cout << "Error in cnFourInt: Correlator x-binning differs!" << endl; return; } vector e2binPtrs; vector e4binPtrs; auto cn = [&] (int i) { double e22 = e2binPtrs[i]->mean() * e2binPtrs[i]->mean(); return e4binPtrs[i]->mean() - 2. * e22; }; // Error calculation. vector > yErr; for (int j = 0, N = e2bins.size(); j < N; ++j) { e2binPtrs = e2bins[j].getBinPtrs(); e4binPtrs = e4bins[j].getBinPtrs(); yErr.push_back(sampleError(cn)); } // Put the bin ptrs back in place. e2binPtrs = e2->getBinPtrs(); e4binPtrs = e4->getBinPtrs(); fillScatter(h, binx, cn, yErr); } // @brief Four particle integrated vn const void vnFourInt(Scatter2DPtr h, ECorrPtr e2, ECorrPtr e4) const { cnFourInt(h, e2, e4); nthPow(h, 0.25, -1.0); } // @brief Six particle integrated cn. const void cnSixInt(Scatter2DPtr h, ECorrPtr e2, ECorrPtr e4, ECorrPtr e6) const { auto e2bins = e2->getBins(); auto e4bins = e4->getBins(); auto e6bins = e6->getBins(); auto binx = e2->getBinX(); if (binx.size() - 1 != e2bins.size()){ cout << "cnSixInt: Bin size (x,y) differs!" << endl; return; } if (binx != e4->getBinX() || binx != e6->getBinX()) { cout << "Error in cnSixInt: Correlator x-binning differs!" << endl; return; } vector e2binPtrs; vector e4binPtrs; vector e6binPtrs; auto cn = [&] (int i) { double e23 = pow(e2binPtrs[i]->mean(), 3.0); return e6binPtrs[i]->mean() - 9.*e2binPtrs[i]->mean()*e4binPtrs[i]->mean() + 12.*e23; }; // Error calculation. vector > yErr; for (int j = 0, N = e2bins.size(); j < N; ++j) { e2binPtrs = e2bins[j].getBinPtrs(); e4binPtrs = e4bins[j].getBinPtrs(); e6binPtrs = e6bins[j].getBinPtrs(); yErr.push_back(sampleError(cn)); } // Put the bin ptrs back in place. e2binPtrs = e2->getBinPtrs(); e4binPtrs = e4->getBinPtrs(); e6binPtrs = e6->getBinPtrs(); fillScatter(h, binx, cn, yErr); } // @brief Six particle integrated vn const void vnSixInt(Scatter2DPtr h, ECorrPtr e2, ECorrPtr e4, ECorrPtr e6) const { cnSixInt(h, e2, e4, e6); nthPow(h, 1./6., 0.25); } // @brief Eight particle integrated cn. const void cnEightInt(Scatter2DPtr h, ECorrPtr e2, ECorrPtr e4, ECorrPtr e6, ECorrPtr e8) const { auto e2bins = e2->getBins(); auto e4bins = e4->getBins(); auto e6bins = e6->getBins(); auto e8bins = e8->getBins(); auto binx = e2->getBinX(); if (binx.size() - 1 != e2bins.size()){ cout << "cnEightInt: Bin size (x,y) differs!" << endl; return; } if (binx != e4->getBinX() || binx != e6->getBinX() || binx != e8->getBinX()) { cout << "Error in cnEightInt: Correlator x-binning differs!" << endl; return; } vector e2binPtrs; vector e4binPtrs; vector e6binPtrs; vector e8binPtrs; auto cn = [&] (int i ) { double e22 = e2binPtrs[i]->mean() * e2binPtrs[i]->mean(); double e24 = e22 * e22; double e42 = e4binPtrs[i]->mean() * e4binPtrs[i]->mean(); return e8binPtrs[i]->mean() - 16. * e6binPtrs[i]->mean() * e2binPtrs[i]->mean() - 18. * e42 + 144. * e4binPtrs[i]->mean()*e22 - 144. * e24; }; // Error calculation. vector > yErr; for (int j = 0, N = e2bins.size(); j < N; ++j) { e2binPtrs = e2bins[j].getBinPtrs(); e4binPtrs = e4bins[j].getBinPtrs(); e6binPtrs = e6bins[j].getBinPtrs(); e8binPtrs = e8bins[j].getBinPtrs(); yErr.push_back(sampleError(cn)); } // Put the bin ptrs back in place. e2binPtrs = e2->getBinPtrs(); e4binPtrs = e4->getBinPtrs(); e6binPtrs = e6->getBinPtrs(); e8binPtrs = e8->getBinPtrs(); fillScatter(h, binx, cn, yErr); } // @brief Eight particle integrated vn const void vnEightInt(Scatter2DPtr h, ECorrPtr e2, ECorrPtr e4, ECorrPtr e6, ECorrPtr e8) const { cnEightInt(h, e2, e4, e6, e8); nthPow(h, 1./8., -1./33.); } // @brief Two particle differential vn. const void vnTwoDiff(Scatter2DPtr h, ECorrPtr e2Dif) const { auto e2bins = e2Dif->getBins(); auto ref = e2Dif->getReference(); auto binx = e2Dif->getBinX(); if (binx.size() -1 != e2bins.size()) { cout << "vnTwoDif: Bin size (x,y) differs!" << endl; return; } vector e2binPtrs; vector refPtrs; auto vn = [&] (int i) { // Test reference flow. if (ref.mean() <= 0) return 0.; return e2binPtrs[i]->mean() / sqrt(ref.mean()); }; // We need here a seperate error function, as we don't // iterate over the reference flow. auto vnerr = [&] (int i) { // Test reference flow. if (refPtrs[i]->mean() <=0) return 0.; return e2binPtrs[i]->mean() / sqrt(refPtrs[i]->mean()); }; // Error calculation. vector > yErr; refPtrs = ref.getBinPtrs(); for (int j = 0, N = e2bins.size(); j < N; ++j) { e2binPtrs = e2bins[j].getBinPtrs(); yErr.push_back(sampleError(vnerr)); } // Put the e2binPtrs back in place. e2binPtrs = e2Dif->getBinPtrs(); fillScatter(h, binx, vn); } // @brief Four particle differential vn. const void vnFourDiff(Scatter2DPtr h, ECorrPtr e2Dif, ECorrPtr e4Dif) const { auto e2bins = e2Dif->getBins(); auto e4bins = e4Dif->getBins(); auto ref2 = e2Dif->getReference(); auto ref4 = e4Dif->getReference(); auto binx = e2Dif->getBinX(); if (binx.size() - 1 != e2bins.size()){ cout << "vnFourDif: Bin size (x,y) differs!" << endl; return; } if (binx != e4Dif->getBinX()) { cout << "Error in vnFourDif: Correlator x-binning differs!" << endl; return; } vector e2binPtrs; vector e4binPtrs; vector ref2Ptrs; vector ref4Ptrs; double denom = 2 * ref2.mean() * ref2.mean() - ref4.mean(); auto vn = [&] (int i) { // Test denominator. if (denom <= 0 ) return 0.; return ((2 * ref2.mean() * e2bins[i].mean() - e4bins[i].mean()) / pow(denom, 0.75)); }; // We need here a seperate error function, as we don't // iterate over the reference flow. auto vnerr = [&] (int i) { double denom2 = 2 * ref2Ptrs[i]->mean() * ref2Ptrs[i]->mean() - ref4Ptrs[i]->mean(); // Test denominator. if (denom2 <= 0) return 0.; return ((2 * ref2Ptrs[i]->mean() * e2binPtrs[i]->mean() - e4binPtrs[i]->mean()) / pow(denom2, 0.75)); }; // Error calculation. vector > yErr; ref2Ptrs = ref2.getBinPtrs(); ref4Ptrs = ref4.getBinPtrs(); for (int j = 0, N = e2bins.size(); j < N; ++j) { e2binPtrs = e2bins[j].getBinPtrs(); e4binPtrs = e4bins[j].getBinPtrs(); yErr.push_back(sampleError(vnerr)); } // Put the binPtrs back in place. e2binPtrs = e2Dif->getBinPtrs(); e4binPtrs = e4Dif->getBinPtrs(); fillScatter(h, binx, vn, yErr); } }; // End class CumulantAnalysis. } // End Rivet namespace. #endif