Page Menu
Home
HEPForge
Search
Configure Global Search
Log In
Files
F7878643
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Award Token
Flag For Later
Size
88 KB
Subscribers
None
View Options
diff --git a/analyses/pluginCMS/CMS_2016_I1486238.cc b/analyses/pluginCMS/CMS_2016_I1486238.cc
--- a/analyses/pluginCMS/CMS_2016_I1486238.cc
+++ b/analyses/pluginCMS/CMS_2016_I1486238.cc
@@ -1,124 +1,126 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
-#include "Rivet/Projections/InitialQuarks.hh"
#include "Rivet/Projections/UnstableFinalState.hh"
#include "Rivet/Projections/FastJets.hh"
+#define I_KNOW_THE_INITIAL_QUARKS_PROJECTION_IS_DODGY_BUT_NEED_TO_USE_IT
+#include "Rivet/Projections/InitialQuarks.hh"
+
namespace Rivet {
/// Studies of 2 b-jet + 2 jet production in proton-proton collisions at 7 TeV
class CMS_2016_I1486238 : public Analysis {
public:
/// Constructor
DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2016_I1486238);
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
FastJets akt(FinalState(), FastJets::ANTIKT, 0.5);
declare(akt, "antikT");
book(_h_Deltaphi_newway, 1,1,1);
book(_h_deltaphiafterlight, 9,1,1);
book(_h_SumPLight, 5,1,1);
book(_h_LeadingBJetpt, 11,1,1);
book(_h_SubleadingBJetpt, 15,1,1);
book(_h_LeadingLightJetpt, 13,1,1);
book(_h_SubleadingLightJetpt, 17,1,1);
book(_h_LeadingBJeteta, 10,1,1);
book(_h_SubleadingBJeteta, 14,1,1);
book(_h_LeadingLightJeteta, 12,1,1);
book(_h_SubleadingLightJeteta, 16,1,1);
}
/// Perform the per-event analysis
void analyze(const Event& event) {
const Jets& jets = apply<JetAlg>(event, "antikT").jetsByPt(Cuts::absrap < 4.7 && Cuts::pT > 20*GeV);
if (jets.size() < 4) vetoEvent;
// Initial quarks
/// @note Quark-level tagging...
Particles bquarks;
for (const GenParticle* p : particles(event.genEvent())) {
if (abs(p->pdg_id()) == PID::BQUARK) bquarks += Particle(p);
}
Jets bjets, ljets;
for (const Jet& j : jets) {
const bool btag = any(bquarks, deltaRLess(j, 0.3));
// for (const Particle& b : bquarks) if (deltaR(j, b) < 0.3) btag = true;
(btag && j.abseta() < 2.4 ? bjets : ljets).push_back(j);
}
// Fill histograms
const double weight = 1.0;
if (bjets.size() >= 2 && ljets.size() >= 2) {
_h_LeadingBJetpt->fill(bjets[0].pT()/GeV, weight);
_h_SubleadingBJetpt->fill(bjets[1].pT()/GeV, weight);
_h_LeadingLightJetpt->fill(ljets[0].pT()/GeV, weight);
_h_SubleadingLightJetpt->fill(ljets[1].pT()/GeV, weight);
//
_h_LeadingBJeteta->fill(bjets[0].eta(), weight);
_h_SubleadingBJeteta->fill(bjets[1].eta(), weight);
_h_LeadingLightJeteta->fill(ljets[0].eta(), weight);
_h_SubleadingLightJeteta->fill(ljets[1].eta(), weight);
const double lightdphi = deltaPhi(ljets[0], ljets[1]);
_h_deltaphiafterlight->fill(lightdphi, weight);
const double vecsumlightjets = sqrt(sqr(ljets[0].px()+ljets[1].px()) + sqr(ljets[0].py()+ljets[1].py())); //< @todo Just (lj0+lj1).pT()? Or use add_quad
const double term2 = vecsumlightjets/(sqrt(sqr(ljets[0].px()) + sqr(ljets[0].py())) + sqrt(sqr(ljets[1].px()) + sqr(ljets[1].py()))); //< @todo lj0.pT() + lj1.pT()? Or add_quad
_h_SumPLight->fill(term2, weight);
const double pxBsyst2 = bjets[0].px()+bjets[1].px(); // @todo (bj0+bj1).px()
const double pyBsyst2 = bjets[0].py()+bjets[1].py(); // @todo (bj0+bj1).py()
const double pxJetssyst2 = ljets[0].px()+ljets[1].px(); // @todo (lj0+lj1).px()
const double pyJetssyst2 = ljets[0].py()+ljets[1].py(); // @todo (lj0+lj1).py()
const double modulusB2 = sqrt(sqr(pxBsyst2)+sqr(pyBsyst2)); //< @todo add_quad
const double modulusJets2 = sqrt(sqr(pxJetssyst2)+sqr(pyJetssyst2)); //< @todo add_quad
const double cosphiBsyst2 = pxBsyst2/modulusB2;
const double cosphiJetssyst2 = pxJetssyst2/modulusJets2;
const double phiBsyst2 = ((pyBsyst2 > 0) ? 1 : -1) * acos(cosphiBsyst2); //< @todo sign(pyBsyst2)
const double phiJetssyst2 = sign(pyJetssyst2) * acos(cosphiJetssyst2);
const double Dphi2 = deltaPhi(phiBsyst2, phiJetssyst2);
_h_Deltaphi_newway->fill(Dphi2,weight);
}
}
/// Normalise histograms etc., after the run
void finalize() {
const double invlumi = crossSection()/picobarn/sumOfWeights();
normalize({_h_SumPLight, _h_deltaphiafterlight, _h_Deltaphi_newway});
scale({_h_LeadingLightJetpt, _h_SubleadingLightJetpt, _h_LeadingBJetpt, _h_SubleadingBJetpt}, invlumi);
scale({_h_LeadingLightJeteta, _h_SubleadingLightJeteta, _h_LeadingBJeteta, _h_SubleadingBJeteta}, invlumi);
}
//@}
private:
/// @name Histograms
//@{
Histo1DPtr _h_deltaphiafterlight, _h_Deltaphi_newway, _h_SumPLight;
Histo1DPtr _h_LeadingBJetpt, _h_SubleadingBJetpt, _h_LeadingLightJetpt, _h_SubleadingLightJetpt;
Histo1DPtr _h_LeadingBJeteta, _h_SubleadingBJeteta, _h_LeadingLightJeteta, _h_SubleadingLightJeteta;
};
// Hook for the plugin system
DECLARE_RIVET_PLUGIN(CMS_2016_I1486238);
}
diff --git a/analyses/pluginLEP/DELPHI_2000_S4328825.cc b/analyses/pluginLEP/DELPHI_2000_S4328825.cc
--- a/analyses/pluginLEP/DELPHI_2000_S4328825.cc
+++ b/analyses/pluginLEP/DELPHI_2000_S4328825.cc
@@ -1,143 +1,145 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/Beam.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/Sphericity.hh"
#include "Rivet/Projections/Thrust.hh"
#include "Rivet/Projections/FastJets.hh"
#include "Rivet/Projections/ParisiTensor.hh"
#include "Rivet/Projections/Hemispheres.hh"
+#include <cmath>
+
+#define I_KNOW_THE_INITIAL_QUARKS_PROJECTION_IS_DODGY_BUT_NEED_TO_USE_IT
#include "Rivet/Projections/InitialQuarks.hh"
-#include <cmath>
namespace Rivet {
/// @brief OPAL multiplicities at various energies
/// @author Peter Richardson
class DELPHI_2000_S4328825 : public Analysis {
public:
/// Constructor
DELPHI_2000_S4328825()
: Analysis("DELPHI_2000_S4328825")
{}
/// @name Analysis methods
//@{
void init() {
// Projections
declare(Beam(), "Beams");
declare(ChargedFinalState(), "CFS");
declare(InitialQuarks(), "IQF");
book(_weightedTotalChargedPartNumLight,"weight_totalch_light");
book(_weightedTotalChargedPartNumCharm,"weight_totalch_charm");
book(_weightedTotalChargedPartNumBottom,"weight_totalch_bottom");
book(_weightLight,"weight_light");
book(_weightCharm,"weight_charm");
book(_weightBottom,"weight_bottom");
book(h_bottom, 1, 1, 1);
book(h_charm, 1, 1, 2);
book(h_light, 1, 1, 3);
book(h_diff, 1, 1, 4); // bottom minus light
}
void analyze(const Event& event) {
// Even if we only generate hadronic events, we still need a cut on numCharged >= 2.
const FinalState& cfs = apply<FinalState>(event, "CFS");
if (cfs.size() < 2) vetoEvent;
int flavour = 0;
const InitialQuarks& iqf = apply<InitialQuarks>(event, "IQF");
// If we only have two quarks (qqbar), just take the flavour.
// If we have more than two quarks, look for the highest energetic q-qbar pair.
if (iqf.particles().size() == 2) {
flavour = iqf.particles().front().abspid();
}
else {
map<int, double> quarkmap;
for (const Particle& p : iqf.particles()) {
if (quarkmap[p.pid()] < p.E()) {
quarkmap[p.pid()] = p.E();
}
}
double maxenergy = 0.;
for (int i = 1; i <= 5; ++i) {
if (quarkmap[i]+quarkmap[-i] > maxenergy) {
flavour = i;
}
}
}
const size_t numParticles = cfs.particles().size();
switch (flavour) {
case 1: case 2: case 3:
_weightLight->fill();
_weightedTotalChargedPartNumLight ->fill(numParticles);
break;
case 4:
_weightCharm->fill();
_weightedTotalChargedPartNumCharm ->fill(numParticles);
break;
case 5:
_weightBottom->fill();
_weightedTotalChargedPartNumBottom->fill(numParticles);
break;
}
}
void finalize() {
Histo1D temphisto(refData(1, 1, 1));
const double avgNumPartsBottom = dbl(*_weightedTotalChargedPartNumBottom / *_weightBottom);
const double avgNumPartsCharm = dbl(*_weightedTotalChargedPartNumCharm / *_weightCharm);
const double avgNumPartsLight = dbl(*_weightedTotalChargedPartNumLight / *_weightLight);
for (size_t b = 0; b < temphisto.numBins(); b++) {
const double x = temphisto.bin(b).xMid();
const double ex = temphisto.bin(b).xWidth()/2.;
if (inRange(sqrtS()/GeV, x-ex, x+ex)) {
// @TODO: Fix y-error:
h_bottom->addPoint(x, avgNumPartsBottom, ex, 0.);
h_charm->addPoint(x, avgNumPartsCharm, ex, 0.);
h_light->addPoint(x, avgNumPartsLight, ex, 0.);
h_diff->addPoint(x, avgNumPartsBottom-avgNumPartsLight, ex, 0.);
}
}
}
//@}
private:
Scatter2DPtr h_bottom, h_charm, h_light, h_diff;
/// @name Multiplicities
//@{
CounterPtr _weightedTotalChargedPartNumLight;
CounterPtr _weightedTotalChargedPartNumCharm;
CounterPtr _weightedTotalChargedPartNumBottom;
//@}
/// @name Weights
//@{
CounterPtr _weightLight;
CounterPtr _weightCharm;
CounterPtr _weightBottom;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(DELPHI_2000_S4328825);
}
diff --git a/analyses/pluginLEP/L3_2004_I652683.cc b/analyses/pluginLEP/L3_2004_I652683.cc
--- a/analyses/pluginLEP/L3_2004_I652683.cc
+++ b/analyses/pluginLEP/L3_2004_I652683.cc
@@ -1,210 +1,212 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/Beam.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
-#include "Rivet/Projections/InitialQuarks.hh"
#include "Rivet/Projections/Thrust.hh"
#include "Rivet/Projections/ParisiTensor.hh"
#include "Rivet/Projections/Hemispheres.hh"
+#define I_KNOW_THE_INITIAL_QUARKS_PROJECTION_IS_DODGY_BUT_NEED_TO_USE_IT
+#include "Rivet/Projections/InitialQuarks.hh"
+
namespace Rivet {
/// Jet rates and event shapes at LEP I+II
class L3_2004_I652683 : public Analysis {
public:
/// Constructor
DEFAULT_RIVET_ANALYSIS_CTOR(L3_2004_I652683);
// L3_2004_I652683() : Analysis("L3_2004_I652683")
// { }
/// Book histograms and initialise projections before the run
void init() {
// Projections to use
const FinalState FS;
declare(FS, "FS");
declare(Beam(), "beams");
const ChargedFinalState CFS;
declare(CFS, "CFS");
const Thrust thrust(FS);
declare(thrust, "thrust");
declare(ParisiTensor(FS), "Parisi");
declare(Hemispheres(thrust), "Hemispheres");
declare(InitialQuarks(), "initialquarks");
// Book the histograms
book(_h_Thrust_udsc , 47, 1, 1);
book(_h_Thrust_bottom , 47, 1, 2);
book(_h_heavyJetmass_udsc , 48, 1, 1);
book(_h_heavyJetmass_bottom , 48, 1, 2);
book(_h_totalJetbroad_udsc , 49, 1, 1);
book(_h_totalJetbroad_bottom , 49, 1, 2);
book(_h_wideJetbroad_udsc , 50, 1, 1);
book(_h_wideJetbroad_bottom , 50, 1, 2);
book(_h_Cparameter_udsc , 51, 1, 1);
book(_h_Cparameter_bottom , 51, 1, 2);
book(_h_Dparameter_udsc , 52, 1, 1);
book(_h_Dparameter_bottom , 52, 1, 2);
book(_h_Ncharged , 59, 1, 1);
book(_h_Ncharged_udsc , 59, 1, 2);
book(_h_Ncharged_bottom , 59, 1, 3);
book(_h_scaledMomentum , 65, 1, 1);
book(_h_scaledMomentum_udsc , 65, 1, 2);
book(_h_scaledMomentum_bottom, 65, 1, 3);
book(_sumW_udsc, "sumW_udsc");
book(_sumW_b, "sumW_b");
book(_sumW_ch, "sumW_ch");
book(_sumW_ch_udsc, "sumW_ch_udsc");
book(_sumW_ch_b, "sumW_ch_b");
}
/// Perform the per-event analysis
void analyze(const Event& event) {
// Get beam average momentum
const ParticlePair& beams = apply<Beam>(event, "beams").beams();
const double beamMomentum = ( beams.first.p3().mod() + beams.second.p3().mod() ) / 2.0;
// InitialQuarks projection to have udsc events separated from b events
/// @todo Yuck!!! Eliminate when possible...
int flavour = 0;
const InitialQuarks& iqf = apply<InitialQuarks>(event, "initialquarks");
Particles quarks;
if ( iqf.particles().size() == 2 ) {
flavour = iqf.particles().front().abspid();
quarks = iqf.particles();
} else {
map<int, Particle> quarkmap;
for (const Particle& p : iqf.particles()) {
if (quarkmap.find(p.pid()) == quarkmap.end()) quarkmap[p.pid()] = p;
else if (quarkmap[p.pid()].E() < p.E()) quarkmap[p.pid()] = p;
}
double max_energy = 0.;
for (int i = 1; i <= 5; ++i) {
double energy = 0.;
if (quarkmap.find(i) != quarkmap.end())
energy += quarkmap[ i].E();
if (quarkmap.find(-i) != quarkmap.end())
energy += quarkmap[-i].E();
if (energy > max_energy)
flavour = i;
}
if (quarkmap.find(flavour) != quarkmap.end())
quarks.push_back(quarkmap[flavour]);
if (quarkmap.find(-flavour) != quarkmap.end())
quarks.push_back(quarkmap[-flavour]);
}
// Flavour label
/// @todo Change to a bool?
const int iflav = (flavour == PID::DQUARK || flavour == PID::UQUARK || flavour == PID::SQUARK || flavour == PID::CQUARK) ? 1 : (flavour == PID::BQUARK) ? 5 : 0;
// Update weight sums
if (iflav == 1) {
_sumW_udsc->fill();
} else if (iflav == 5) {
_sumW_b->fill();
}
_sumW_ch->fill();
// Charged multiplicity
const FinalState& cfs = applyProjection<FinalState>(event, "CFS");
_h_Ncharged->fill(cfs.size());
if (iflav == 1) {
_sumW_ch_udsc->fill();
_h_Ncharged_udsc->fill(cfs.size());
} else if (iflav == 5) {
_sumW_ch_b->fill();
_h_Ncharged_bottom->fill(cfs.size());
}
// Scaled momentum
const Particles& chparticles = cfs.particlesByPt();
for (const Particle& p : chparticles) {
const Vector3 momentum3 = p.p3();
const double mom = momentum3.mod();
const double scaledMom = mom/beamMomentum;
const double logScaledMom = std::log(scaledMom);
_h_scaledMomentum->fill(-logScaledMom);
if (iflav == 1) {
_h_scaledMomentum_udsc->fill(-logScaledMom);
} else if (iflav == 5) {
_h_scaledMomentum_bottom->fill(-logScaledMom);
}
}
// Thrust
const Thrust& thrust = applyProjection<Thrust>(event, "thrust");
if (iflav == 1) {
_h_Thrust_udsc->fill(thrust.thrust());
} else if (iflav == 5) {
_h_Thrust_bottom->fill(thrust.thrust());
}
// C and D Parisi parameters
const ParisiTensor& parisi = applyProjection<ParisiTensor>(event, "Parisi");
if (iflav == 1) {
_h_Cparameter_udsc->fill(parisi.C());
_h_Dparameter_udsc->fill(parisi.D());
} else if (iflav == 5) {
_h_Cparameter_bottom->fill(parisi.C());
_h_Dparameter_bottom->fill(parisi.D());
}
// The hemisphere variables
const Hemispheres& hemisphere = applyProjection<Hemispheres>(event, "Hemispheres");
if (iflav == 1) {
_h_heavyJetmass_udsc->fill(hemisphere.scaledM2high());
_h_totalJetbroad_udsc->fill(hemisphere.Bsum());
_h_wideJetbroad_udsc->fill(hemisphere.Bmax());
} else if (iflav == 5) {
_h_heavyJetmass_bottom->fill(hemisphere.scaledM2high());
_h_totalJetbroad_bottom->fill(hemisphere.Bsum());
_h_wideJetbroad_bottom->fill(hemisphere.Bmax());
}
}
/// Normalise histograms etc., after the run
void finalize() {
scale({_h_Thrust_udsc, _h_heavyJetmass_udsc, _h_totalJetbroad_udsc, _h_wideJetbroad_udsc, _h_Cparameter_udsc, _h_Dparameter_udsc}, 1/ *_sumW_udsc);
scale({_h_Thrust_bottom, _h_heavyJetmass_bottom, _h_totalJetbroad_bottom, _h_wideJetbroad_bottom, _h_Cparameter_bottom, _h_Dparameter_bottom}, 1./ *_sumW_b);
scale(_h_Ncharged, 2/ *_sumW_ch);
scale(_h_Ncharged_udsc, 2/ *_sumW_ch_udsc);
scale(_h_Ncharged_bottom, 2/ *_sumW_ch_b);
scale(_h_scaledMomentum, 1/ *_sumW_ch);
scale(_h_scaledMomentum_udsc, 1/ *_sumW_ch_udsc);
scale(_h_scaledMomentum_bottom, 1/ *_sumW_ch_b);
}
/// Weight counters
CounterPtr _sumW_udsc, _sumW_b, _sumW_ch, _sumW_ch_udsc, _sumW_ch_b;
/// @name Histograms
//@{
Histo1DPtr _h_Thrust_udsc, _h_Thrust_bottom;
Histo1DPtr _h_heavyJetmass_udsc, _h_heavyJetmass_bottom;
Histo1DPtr _h_totalJetbroad_udsc, _h_totalJetbroad_bottom;
Histo1DPtr _h_wideJetbroad_udsc, _h_wideJetbroad_bottom;
Histo1DPtr _h_Cparameter_udsc, _h_Cparameter_bottom;
Histo1DPtr _h_Dparameter_udsc, _h_Dparameter_bottom;
Histo1DPtr _h_Ncharged, _h_Ncharged_udsc, _h_Ncharged_bottom;
Histo1DPtr _h_scaledMomentum, _h_scaledMomentum_udsc, _h_scaledMomentum_bottom;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(L3_2004_I652683);
}
diff --git a/analyses/pluginLEP/OPAL_1998_S3780481.cc b/analyses/pluginLEP/OPAL_1998_S3780481.cc
--- a/analyses/pluginLEP/OPAL_1998_S3780481.cc
+++ b/analyses/pluginLEP/OPAL_1998_S3780481.cc
@@ -1,193 +1,195 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/Beam.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
+
+#define I_KNOW_THE_INITIAL_QUARKS_PROJECTION_IS_DODGY_BUT_NEED_TO_USE_IT
#include "Rivet/Projections/InitialQuarks.hh"
namespace Rivet {
/// @brief OPAL flavour-dependent fragmentation paper
/// @author Hendrik Hoeth
class OPAL_1998_S3780481 : public Analysis {
public:
/// Constructor
OPAL_1998_S3780481() : Analysis("OPAL_1998_S3780481") {
}
/// @name Analysis methods
//@{
void analyze(const Event& e) {
// First, veto on leptonic events by requiring at least 4 charged FS particles
const FinalState& fs = apply<FinalState>(e, "FS");
const size_t numParticles = fs.particles().size();
// Even if we only generate hadronic events, we still need a cut on numCharged >= 2.
if (numParticles < 2) {
MSG_DEBUG("Failed ncharged cut");
vetoEvent;
}
MSG_DEBUG("Passed ncharged cut");
_weightedTotalPartNum->fill(numParticles);
// Get beams and average beam momentum
const ParticlePair& beams = apply<Beam>(e, "Beams").beams();
const double meanBeamMom = ( beams.first.p3().mod() +
beams.second.p3().mod() ) / 2.0;
MSG_DEBUG("Avg beam momentum = " << meanBeamMom);
int flavour = 0;
const InitialQuarks& iqf = apply<InitialQuarks>(e, "IQF");
// If we only have two quarks (qqbar), just take the flavour.
// If we have more than two quarks, look for the highest energetic q-qbar pair.
/// @todo Yuck... does this *really* have to be quark-based?!?
if (iqf.particles().size() == 2) {
flavour = iqf.particles().front().abspid();
} else {
map<int, double> quarkmap;
for (const Particle& p : iqf.particles()) {
if (quarkmap[p.pid()] < p.E()) {
quarkmap[p.pid()] = p.E();
}
}
double maxenergy = 0.;
for (int i = 1; i <= 5; ++i) {
if (quarkmap[i]+quarkmap[-i] > maxenergy) {
flavour = i;
}
}
}
switch (flavour) {
case 1:
case 2:
case 3:
_SumOfudsWeights->fill();
break;
case 4:
_SumOfcWeights->fill();
break;
case 5:
_SumOfbWeights->fill();
break;
}
for (const Particle& p : fs.particles()) {
const double xp = p.p3().mod()/meanBeamMom;
const double logxp = -std::log(xp);
_histXpall->fill(xp);
_histLogXpall->fill(logxp);
_histMultiChargedall->fill(_histMultiChargedall->bin(0).xMid());
switch (flavour) {
/// @todo Use PDG code enums
case PID::DQUARK:
case PID::UQUARK:
case PID::SQUARK:
_histXpuds->fill(xp);
_histLogXpuds->fill(logxp);
_histMultiChargeduds->fill(_histMultiChargeduds->bin(0).xMid());
break;
case PID::CQUARK:
_histXpc->fill(xp);
_histLogXpc->fill(logxp);
_histMultiChargedc->fill(_histMultiChargedc->bin(0).xMid());
break;
case PID::BQUARK:
_histXpb->fill(xp);
_histLogXpb->fill(logxp);
_histMultiChargedb->fill(_histMultiChargedb->bin(0).xMid());
break;
}
}
}
void init() {
// Projections
declare(Beam(), "Beams");
declare(ChargedFinalState(), "FS");
declare(InitialQuarks(), "IQF");
// Book histos
book(_histXpuds ,1, 1, 1);
book(_histXpc ,2, 1, 1);
book(_histXpb ,3, 1, 1);
book(_histXpall ,4, 1, 1);
book(_histLogXpuds ,5, 1, 1);
book(_histLogXpc ,6, 1, 1);
book(_histLogXpb ,7, 1, 1);
book(_histLogXpall ,8, 1, 1);
book(_histMultiChargeduds ,9, 1, 1);
book(_histMultiChargedc ,9, 1, 2);
book(_histMultiChargedb ,9, 1, 3);
book(_histMultiChargedall ,9, 1, 4);
// Counters
book(_weightedTotalPartNum, "TotalPartNum");
book(_SumOfudsWeights, "udsWeights");
book(_SumOfcWeights, "cWeights");
book(_SumOfbWeights, "bWeights");
}
/// Finalize
void finalize() {
const double avgNumParts = dbl(*_weightedTotalPartNum) / sumOfWeights();
normalize(_histXpuds , avgNumParts);
normalize(_histXpc , avgNumParts);
normalize(_histXpb , avgNumParts);
normalize(_histXpall , avgNumParts);
normalize(_histLogXpuds , avgNumParts);
normalize(_histLogXpc , avgNumParts);
normalize(_histLogXpb , avgNumParts);
normalize(_histLogXpall , avgNumParts);
scale(_histMultiChargeduds, 1.0/ *_SumOfudsWeights);
scale(_histMultiChargedc , 1.0/ *_SumOfcWeights);
scale(_histMultiChargedb , 1.0/ *_SumOfbWeights);
scale(_histMultiChargedall, 1.0/sumOfWeights());
}
//@}
private:
/// Store the weighted sums of numbers of charged / charged+neutral
/// particles - used to calculate average number of particles for the
/// inclusive single particle distributions' normalisations.
CounterPtr _weightedTotalPartNum;
CounterPtr _SumOfudsWeights;
CounterPtr _SumOfcWeights;
CounterPtr _SumOfbWeights;
Histo1DPtr _histXpuds;
Histo1DPtr _histXpc;
Histo1DPtr _histXpb;
Histo1DPtr _histXpall;
Histo1DPtr _histLogXpuds;
Histo1DPtr _histLogXpc;
Histo1DPtr _histLogXpb;
Histo1DPtr _histLogXpall;
Histo1DPtr _histMultiChargeduds;
Histo1DPtr _histMultiChargedc;
Histo1DPtr _histMultiChargedb;
Histo1DPtr _histMultiChargedall;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(OPAL_1998_S3780481);
}
diff --git a/analyses/pluginLEP/OPAL_2002_S5361494.cc b/analyses/pluginLEP/OPAL_2002_S5361494.cc
--- a/analyses/pluginLEP/OPAL_2002_S5361494.cc
+++ b/analyses/pluginLEP/OPAL_2002_S5361494.cc
@@ -1,145 +1,147 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/Beam.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/Sphericity.hh"
#include "Rivet/Projections/Thrust.hh"
#include "Rivet/Projections/FastJets.hh"
#include "Rivet/Projections/ParisiTensor.hh"
#include "Rivet/Projections/Hemispheres.hh"
+#include <cmath>
+
+#define I_KNOW_THE_INITIAL_QUARKS_PROJECTION_IS_DODGY_BUT_NEED_TO_USE_IT
#include "Rivet/Projections/InitialQuarks.hh"
-#include <cmath>
namespace Rivet {
/// @brief OPAL multiplicities at various energies
/// @author Peter Richardson
class OPAL_2002_S5361494 : public Analysis {
public:
/// Constructor
OPAL_2002_S5361494()
: Analysis("OPAL_2002_S5361494")
{}
/// @name Analysis methods
//@{
void init() {
// Projections
declare(Beam(), "Beams");
declare(ChargedFinalState(), "CFS");
declare(InitialQuarks(), "IQF");
book(h_bottom, 1, 1, 1);
book(h_charm, 1, 1, 2);
book(h_light, 1, 1, 3);
book(h_diff, 1, 1, 4); // bottom minus light
book(_weightedTotalChargedPartNumLight, "TotalChargedPartNumLight");
book(_weightedTotalChargedPartNumCharm, "TotalChargedPartNumCharm");
book(_weightedTotalChargedPartNumBottom, "TotalChargedPartNumBottom");
book(_weightLight, "Light");
book(_weightCharm, "Charm");
book(_weightBottom, "Bottom");
}
void analyze(const Event& event) {
// Even if we only generate hadronic events, we still need a cut on numCharged >= 2.
const FinalState& cfs = apply<FinalState>(event, "CFS");
if (cfs.size() < 2) vetoEvent;
int flavour = 0;
const InitialQuarks& iqf = apply<InitialQuarks>(event, "IQF");
// If we only have two quarks (qqbar), just take the flavour.
// If we have more than two quarks, look for the highest energetic q-qbar pair.
if (iqf.particles().size() == 2) {
flavour = iqf.particles().front().abspid();
}
else {
map<int, double> quarkmap;
for (const Particle& p : iqf.particles()) {
if (quarkmap[p.pid()] < p.E()) {
quarkmap[p.pid()] = p.E();
}
}
double maxenergy = 0.;
for (int i = 1; i <= 5; ++i) {
if (quarkmap[i]+quarkmap[-i] > maxenergy) {
flavour = i;
}
}
}
const size_t numParticles = cfs.particles().size();
switch (flavour) {
case 1: case 2: case 3:
_weightLight ->fill();
_weightedTotalChargedPartNumLight ->fill(numParticles);
break;
case 4:
_weightCharm ->fill();
_weightedTotalChargedPartNumCharm ->fill(numParticles);
break;
case 5:
_weightBottom->fill();
_weightedTotalChargedPartNumBottom->fill(numParticles);
break;
}
}
void finalize() {
Histo1D temphisto(refData(1, 1, 1));
const double avgNumPartsBottom = _weightBottom->val() != 0. ? dbl(*_weightedTotalChargedPartNumBottom / *_weightBottom) : 0.;
const double avgNumPartsCharm = _weightCharm->val() != 0. ? dbl(*_weightedTotalChargedPartNumCharm / *_weightCharm ) : 0.;
const double avgNumPartsLight = _weightLight->val() != 0. ? dbl(*_weightedTotalChargedPartNumLight / *_weightLight ) : 0.;
for (size_t b = 0; b < temphisto.numBins(); b++) {
const double x = temphisto.bin(b).xMid();
const double ex = temphisto.bin(b).xWidth()/2.;
if (inRange(sqrtS()/GeV, x-ex, x+ex)) {
// @TODO: Fix y-error:
h_bottom->addPoint(x, avgNumPartsBottom, ex, 0.);
h_charm->addPoint(x, avgNumPartsCharm, ex, 0.);
h_light->addPoint(x, avgNumPartsLight, ex, 0.);
h_diff->addPoint(x, avgNumPartsBottom-avgNumPartsLight, ex, 0.);
}
}
}
//@}
private:
Scatter2DPtr h_bottom;
Scatter2DPtr h_charm ;
Scatter2DPtr h_light ;
Scatter2DPtr h_diff ;
/// @name Multiplicities
//@{
CounterPtr _weightedTotalChargedPartNumLight;
CounterPtr _weightedTotalChargedPartNumCharm;
CounterPtr _weightedTotalChargedPartNumBottom;
//@}
/// @name Weights
//@{
CounterPtr _weightLight;
CounterPtr _weightCharm;
CounterPtr _weightBottom;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(OPAL_2002_S5361494);
}
diff --git a/analyses/pluginLEP/SLD_1996_S3398250.cc b/analyses/pluginLEP/SLD_1996_S3398250.cc
--- a/analyses/pluginLEP/SLD_1996_S3398250.cc
+++ b/analyses/pluginLEP/SLD_1996_S3398250.cc
@@ -1,139 +1,141 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/Beam.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/Sphericity.hh"
#include "Rivet/Projections/Thrust.hh"
#include "Rivet/Projections/FastJets.hh"
#include "Rivet/Projections/ParisiTensor.hh"
#include "Rivet/Projections/Hemispheres.hh"
+#include <cmath>
+
+#define I_KNOW_THE_INITIAL_QUARKS_PROJECTION_IS_DODGY_BUT_NEED_TO_USE_IT
#include "Rivet/Projections/InitialQuarks.hh"
-#include <cmath>
namespace Rivet {
/// @brief SLD multiplicities at mZ
/// @author Peter Richardson
class SLD_1996_S3398250 : public Analysis {
public:
/// Constructor
SLD_1996_S3398250()
: Analysis("SLD_1996_S3398250")
{}
/// @name Analysis methods
//@{
void init() {
// Projections
declare(Beam(), "Beams");
declare(ChargedFinalState(), "CFS");
declare(InitialQuarks(), "IQF");
book(_h_bottom ,1, 1, 1);
book(_h_charm ,2, 1, 1);
book(_h_light ,3, 1, 1);
book(_weightLight, "weightLight");
book(_weightCharm, "weightCharm");
book(_weightBottom, "weightBottom");
book(scatter_c, 4,1,1);
book(scatter_b, 5,1,1);
}
void analyze(const Event& event) {
// Even if we only generate hadronic events, we still need a cut on numCharged >= 2.
const FinalState& cfs = apply<FinalState>(event, "CFS");
if (cfs.size() < 2) vetoEvent;
int flavour = 0;
const InitialQuarks& iqf = apply<InitialQuarks>(event, "IQF");
// If we only have two quarks (qqbar), just take the flavour.
// If we have more than two quarks, look for the highest energetic q-qbar pair.
if (iqf.particles().size() == 2) {
flavour = iqf.particles().front().abspid();
}
else {
map<int, double> quarkmap;
for (const Particle& p : iqf.particles()) {
if (quarkmap[p.pid()] < p.E()) {
quarkmap[p.pid()] = p.E();
}
}
double maxenergy = 0.;
for (int i = 1; i <= 5; ++i) {
if (quarkmap[i]+quarkmap[-i] > maxenergy) {
flavour = i;
}
}
}
const size_t numParticles = cfs.particles().size();
switch (flavour) {
case 1: case 2: case 3:
_weightLight ->fill();
_h_light->fillBin(0, numParticles);
break;
case 4:
_weightCharm ->fill();
_h_charm->fillBin(0, numParticles);
break;
case 5:
_weightBottom->fill();
_h_bottom->fillBin(0, numParticles);
break;
}
}
void multiplicity_subtract(const Histo1DPtr first, const Histo1DPtr second, Scatter2DPtr & scatter) {
const double x = first->bin(0).xMid();
const double ex = first->bin(0).xWidth()/2.;
const double y = first->bin(0).area() - second->bin(0).area();
const double ey = sqrt(sqr(first->bin(0).areaErr()) + sqr(second->bin(0).areaErr()));
scatter->addPoint(x, y, ex, ey);
}
void finalize() {
if (_weightBottom->val() != 0) scale(_h_bottom, 1./ *_weightBottom);
if (_weightCharm->val() != 0) scale(_h_charm, 1./ *_weightCharm );
if (_weightLight->val() != 0) scale(_h_light, 1./ *_weightLight );
multiplicity_subtract(_h_charm, _h_light, scatter_c);
multiplicity_subtract(_h_bottom, _h_light, scatter_b);
}
//@}
private:
Scatter2DPtr scatter_c, scatter_b;
/// @name Weights
//@{
CounterPtr _weightLight;
CounterPtr _weightCharm;
CounterPtr _weightBottom;
//@}
Histo1DPtr _h_bottom;
Histo1DPtr _h_charm;
Histo1DPtr _h_light;
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(SLD_1996_S3398250);
}
diff --git a/analyses/pluginLEP/SLD_1999_S3743934.cc b/analyses/pluginLEP/SLD_1999_S3743934.cc
--- a/analyses/pluginLEP/SLD_1999_S3743934.cc
+++ b/analyses/pluginLEP/SLD_1999_S3743934.cc
@@ -1,736 +1,738 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/Beam.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/UnstableFinalState.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
+#include "Rivet/Projections/Thrust.hh"
+
+#define I_KNOW_THE_INITIAL_QUARKS_PROJECTION_IS_DODGY_BUT_NEED_TO_USE_IT
#include "Rivet/Projections/InitialQuarks.hh"
-#include "Rivet/Projections/Thrust.hh"
namespace Rivet {
/// @brief SLD flavour-dependent fragmentation paper
/// @author Peter Richardson
class SLD_1999_S3743934 : public Analysis {
public:
/// Constructor
SLD_1999_S3743934()
: Analysis("SLD_1999_S3743934"),
_multPiPlus(4),_multKPlus(4),_multK0(4),
_multKStar0(4),_multPhi(4),
_multProton(4),_multLambda(4)
{ }
/// @name Analysis methods
//@{
void analyze(const Event& e) {
// First, veto on leptonic events by requiring at least 4 charged FS particles
const FinalState& fs = apply<FinalState>(e, "FS");
const size_t numParticles = fs.particles().size();
// Even if we only generate hadronic events, we still need a cut on numCharged >= 2.
if (numParticles < 2) {
MSG_DEBUG("Failed ncharged cut");
vetoEvent;
}
MSG_DEBUG("Passed ncharged cut");
// Get beams and average beam momentum
const ParticlePair& beams = apply<Beam>(e, "Beams").beams();
const double meanBeamMom = ( beams.first.p3().mod() +
beams.second.p3().mod() ) / 2.0;
MSG_DEBUG("Avg beam momentum = " << meanBeamMom);
int flavour = 0;
const InitialQuarks& iqf = apply<InitialQuarks>(e, "IQF");
// If we only have two quarks (qqbar), just take the flavour.
// If we have more than two quarks, look for the highest energetic q-qbar pair.
/// @todo Can we make this based on hadron flavour instead?
Particles quarks;
if (iqf.particles().size() == 2) {
flavour = iqf.particles().front().abspid();
quarks = iqf.particles();
} else {
map<int, Particle > quarkmap;
for (const Particle& p : iqf.particles()) {
if (quarkmap.find(p.pid()) == quarkmap.end()) quarkmap[p.pid()] = p;
else if (quarkmap[p.pid()].E() < p.E()) quarkmap[p.pid()] = p;
}
double maxenergy = 0.;
for (int i = 1; i <= 5; ++i) {
double energy(0.);
if (quarkmap.find( i) != quarkmap.end())
energy += quarkmap[ i].E();
if (quarkmap.find(-i) != quarkmap.end())
energy += quarkmap[-i].E();
if (energy > maxenergy)
flavour = i;
}
if (quarkmap.find(flavour) != quarkmap.end())
quarks.push_back(quarkmap[flavour]);
if (quarkmap.find(-flavour) != quarkmap.end())
quarks.push_back(quarkmap[-flavour]);
}
switch (flavour) {
case PID::DQUARK:
case PID::UQUARK:
case PID::SQUARK:
_SumOfudsWeights->fill();
break;
case PID::CQUARK:
_SumOfcWeights->fill();
break;
case PID::BQUARK:
_SumOfbWeights->fill();
break;
}
// thrust axis for projections
Vector3 axis = apply<Thrust>(e, "Thrust").thrustAxis();
double dot(0.);
if (!quarks.empty()) {
dot = quarks[0].p3().dot(axis);
if (quarks[0].pid() < 0) dot *= -1;
}
for (const Particle& p : fs.particles()) {
const double xp = p.p3().mod()/meanBeamMom;
// if in quark or antiquark hemisphere
bool quark = p.p3().dot(axis)*dot > 0.;
_h_XpChargedN->fill(xp);
_temp_XpChargedN1->fill(xp);
_temp_XpChargedN2->fill(xp);
_temp_XpChargedN3->fill(xp);
int id = p.abspid();
// charged pions
if (id == PID::PIPLUS) {
_h_XpPiPlusN->fill(xp);
_multPiPlus[0]->fill();
switch (flavour) {
case PID::DQUARK:
case PID::UQUARK:
case PID::SQUARK:
_multPiPlus[1]->fill();
_h_XpPiPlusLight->fill(xp);
if( ( quark && p.pid()>0 ) || ( !quark && p.pid()<0 ))
_h_RPiPlus->fill(xp);
else
_h_RPiMinus->fill(xp);
break;
case PID::CQUARK:
_multPiPlus[2]->fill();
_h_XpPiPlusCharm->fill(xp);
break;
case PID::BQUARK:
_multPiPlus[3]->fill();
_h_XpPiPlusBottom->fill(xp);
break;
}
}
else if (id == PID::KPLUS) {
_h_XpKPlusN->fill(xp);
_multKPlus[0]->fill();
switch (flavour) {
case PID::DQUARK:
case PID::UQUARK:
case PID::SQUARK:
_multKPlus[1]->fill();
_temp_XpKPlusLight->fill(xp);
_h_XpKPlusLight->fill(xp);
if( ( quark && p.pid()>0 ) || ( !quark && p.pid()<0 ))
_h_RKPlus->fill(xp);
else
_h_RKMinus->fill(xp);
break;
break;
case PID::CQUARK:
_multKPlus[2]->fill();
_h_XpKPlusCharm->fill(xp);
_temp_XpKPlusCharm->fill(xp);
break;
case PID::BQUARK:
_multKPlus[3]->fill();
_h_XpKPlusBottom->fill(xp);
break;
}
}
else if (id == PID::PROTON) {
_h_XpProtonN->fill(xp);
_multProton[0]->fill();
switch (flavour) {
case PID::DQUARK:
case PID::UQUARK:
case PID::SQUARK:
_multProton[1]->fill();
_temp_XpProtonLight->fill(xp);
_h_XpProtonLight->fill(xp);
if( ( quark && p.pid()>0 ) || ( !quark && p.pid()<0 ))
_h_RProton->fill(xp);
else
_h_RPBar ->fill(xp);
break;
break;
case PID::CQUARK:
_multProton[2]->fill();
_temp_XpProtonCharm->fill(xp);
_h_XpProtonCharm->fill(xp);
break;
case PID::BQUARK:
_multProton[3]->fill();
_h_XpProtonBottom->fill(xp);
break;
}
}
}
const UnstableFinalState& ufs = apply<UnstableFinalState>(e, "UFS");
for (const Particle& p : ufs.particles()) {
const double xp = p.p3().mod()/meanBeamMom;
// if in quark or antiquark hemisphere
bool quark = p.p3().dot(axis)*dot>0.;
int id = p.abspid();
if (id == PID::LAMBDA) {
_multLambda[0]->fill();
_h_XpLambdaN->fill(xp);
switch (flavour) {
case PID::DQUARK:
case PID::UQUARK:
case PID::SQUARK:
_multLambda[1]->fill();
_h_XpLambdaLight->fill(xp);
if( ( quark && p.pid()>0 ) || ( !quark && p.pid()<0 ))
_h_RLambda->fill(xp);
else
_h_RLBar ->fill(xp);
break;
case PID::CQUARK:
_multLambda[2]->fill();
_h_XpLambdaCharm->fill(xp);
break;
case PID::BQUARK:
_multLambda[3]->fill();
_h_XpLambdaBottom->fill(xp);
break;
}
}
else if (id == 313) {
_multKStar0[0]->fill();
_h_XpKStar0N->fill(xp);
switch (flavour) {
case PID::DQUARK:
case PID::UQUARK:
case PID::SQUARK:
_multKStar0[1]->fill();
_temp_XpKStar0Light->fill(xp);
_h_XpKStar0Light->fill(xp);
if ( ( quark && p.pid()>0 ) || ( !quark && p.pid()<0 ))
_h_RKS0 ->fill(xp);
else
_h_RKSBar0->fill(xp);
break;
break;
case PID::CQUARK:
_multKStar0[2]->fill();
_temp_XpKStar0Charm->fill(xp);
_h_XpKStar0Charm->fill(xp);
break;
case PID::BQUARK:
_multKStar0[3]->fill();
_h_XpKStar0Bottom->fill(xp);
break;
}
}
else if (id == 333) {
_multPhi[0]->fill();
_h_XpPhiN->fill(xp);
switch (flavour) {
case PID::DQUARK:
case PID::UQUARK:
case PID::SQUARK:
_multPhi[1]->fill();
_h_XpPhiLight->fill(xp);
break;
case PID::CQUARK:
_multPhi[2]->fill();
_h_XpPhiCharm->fill(xp);
break;
case PID::BQUARK:
_multPhi[3]->fill();
_h_XpPhiBottom->fill(xp);
break;
}
}
else if (id == PID::K0S || id == PID::K0L) {
_multK0[0]->fill();
_h_XpK0N->fill(xp);
switch (flavour) {
case PID::DQUARK:
case PID::UQUARK:
case PID::SQUARK:
_multK0[1]->fill();
_h_XpK0Light->fill(xp);
break;
case PID::CQUARK:
_multK0[2]->fill();
_h_XpK0Charm->fill(xp);
break;
case PID::BQUARK:
_multK0[3]->fill();
_h_XpK0Bottom->fill(xp);
break;
}
}
}
}
void init() {
// Projections
declare(Beam(), "Beams");
declare(ChargedFinalState(), "FS");
declare(UnstableFinalState(), "UFS");
declare(InitialQuarks(), "IQF");
declare(Thrust(FinalState()), "Thrust");
book(_temp_XpChargedN1 ,"TMP/XpChargedN1", refData( 1, 1, 1));
book(_temp_XpChargedN2 ,"TMP/XpChargedN2", refData( 2, 1, 1));
book(_temp_XpChargedN3 ,"TMP/XpChargedN3", refData( 3, 1, 1));
book(_h_XpPiPlusN , 1, 1, 2);
book(_h_XpKPlusN , 2, 1, 2);
book(_h_XpProtonN , 3, 1, 2);
book(_h_XpChargedN , 4, 1, 1);
book(_h_XpK0N , 5, 1, 1);
book(_h_XpLambdaN , 7, 1, 1);
book(_h_XpKStar0N , 8, 1, 1);
book(_h_XpPhiN , 9, 1, 1);
book(_h_XpPiPlusLight ,10, 1, 1);
book(_h_XpPiPlusCharm ,10, 1, 2);
book(_h_XpPiPlusBottom ,10, 1, 3);
book(_h_XpKPlusLight ,12, 1, 1);
book(_h_XpKPlusCharm ,12, 1, 2);
book(_h_XpKPlusBottom ,12, 1, 3);
book(_h_XpKStar0Light ,14, 1, 1);
book(_h_XpKStar0Charm ,14, 1, 2);
book(_h_XpKStar0Bottom ,14, 1, 3);
book(_h_XpProtonLight ,16, 1, 1);
book(_h_XpProtonCharm ,16, 1, 2);
book(_h_XpProtonBottom ,16, 1, 3);
book(_h_XpLambdaLight ,18, 1, 1);
book(_h_XpLambdaCharm ,18, 1, 2);
book(_h_XpLambdaBottom ,18, 1, 3);
book(_h_XpK0Light ,20, 1, 1);
book(_h_XpK0Charm ,20, 1, 2);
book(_h_XpK0Bottom ,20, 1, 3);
book(_h_XpPhiLight ,22, 1, 1);
book(_h_XpPhiCharm ,22, 1, 2);
book(_h_XpPhiBottom ,22, 1, 3);
book(_temp_XpKPlusCharm ,"TMP/XpKPlusCharm", refData(13, 1, 1));
book(_temp_XpKPlusLight ,"TMP/XpKPlusLight", refData(13, 1, 1));
book(_temp_XpKStar0Charm ,"TMP/XpKStar0Charm", refData(15, 1, 1));
book(_temp_XpKStar0Light ,"TMP/XpKStar0Light", refData(15, 1, 1));
book(_temp_XpProtonCharm ,"TMP/XpProtonCharm", refData(17, 1, 1));
book(_temp_XpProtonLight ,"TMP/XpProtonLight", refData(17, 1, 1));
book(_h_RPiPlus , 26, 1, 1);
book(_h_RPiMinus , 26, 1, 2);
book(_h_RKS0 , 28, 1, 1);
book(_h_RKSBar0 , 28, 1, 2);
book(_h_RKPlus , 30, 1, 1);
book(_h_RKMinus , 30, 1, 2);
book(_h_RProton , 32, 1, 1);
book(_h_RPBar , 32, 1, 2);
book(_h_RLambda , 34, 1, 1);
book(_h_RLBar , 34, 1, 2);
book(_s_Xp_PiPl_Ch , 1, 1, 1);
book(_s_Xp_KPl_Ch , 2, 1, 1);
book(_s_Xp_Pr_Ch , 3, 1, 1);
book(_s_Xp_PiPlCh_PiPlLi, 11, 1, 1);
book(_s_Xp_PiPlBo_PiPlLi, 11, 1, 2);
book(_s_Xp_KPlCh_KPlLi , 13, 1, 1);
book(_s_Xp_KPlBo_KPlLi , 13, 1, 2);
book(_s_Xp_KS0Ch_KS0Li , 15, 1, 1);
book(_s_Xp_KS0Bo_KS0Li , 15, 1, 2);
book(_s_Xp_PrCh_PrLi , 17, 1, 1);
book(_s_Xp_PrBo_PrLi , 17, 1, 2);
book(_s_Xp_LaCh_LaLi , 19, 1, 1);
book(_s_Xp_LaBo_LaLi , 19, 1, 2);
book(_s_Xp_K0Ch_K0Li , 21, 1, 1);
book(_s_Xp_K0Bo_K0Li , 21, 1, 2);
book(_s_Xp_PhiCh_PhiLi , 23, 1, 1);
book(_s_Xp_PhiBo_PhiLi , 23, 1, 2);
book(_s_PiM_PiP , 27, 1, 1);
book(_s_KSBar0_KS0, 29, 1, 1);
book(_s_KM_KP , 31, 1, 1);
book(_s_Pr_PBar , 33, 1, 1);
book(_s_Lam_LBar , 35, 1, 1);
book(_SumOfudsWeights, "SumOfudsWeights");
book(_SumOfcWeights, "SumOfcWeights");
book(_SumOfbWeights, "SumOfbWeights");
for ( size_t i=0; i<4; ++i) {
book(_multPiPlus[i], "multPiPlus_"+to_str(i));
book(_multKPlus[i], "multKPlus_"+to_str(i));
book(_multK0[i], "multK0_"+to_str(i));
book(_multKStar0[i], "multKStar0_"+to_str(i));
book(_multPhi[i], "multPhi_"+to_str(i));
book(_multProton[i], "multProton_"+to_str(i));
book(_multLambda[i], "multLambda_"+to_str(i));
}
book(tmp1, 24, 1, 1, true);
book(tmp2, 24, 1, 2, true);
book(tmp3, 24, 1, 3, true);
book(tmp4, 24, 1, 4, true);
book(tmp5, 25, 1, 1, true);
book(tmp6, 25, 1, 2, true);
book(tmp7, 24, 2, 1, true);
book(tmp8, 24, 2, 2, true);
book(tmp9, 24, 2, 3, true);
book(tmp10, 24, 2, 4, true);
book(tmp11, 25, 2, 1, true);
book(tmp12, 25, 2, 2, true);
book(tmp13, 24, 3, 1, true);
book(tmp14, 24, 3, 2, true);
book(tmp15, 24, 3, 3, true);
book(tmp16, 24, 3, 4, true);
book(tmp17, 25, 3, 1, true);
book(tmp18, 25, 3, 2, true);
book(tmp19, 24, 4, 1, true);
book(tmp20, 24, 4, 2, true);
book(tmp21, 24, 4, 3, true);
book(tmp22, 24, 4, 4, true);
book(tmp23, 25, 4, 1, true);
book(tmp24, 25, 4, 2, true);
book(tmp25, 24, 5, 1, true);
book(tmp26, 24, 5, 2, true);
book(tmp27, 24, 5, 3, true);
book(tmp28, 24, 5, 4, true);
book(tmp29, 25, 5, 1, true);
book(tmp30, 25, 5, 2, true);
book(tmp31, 24, 6, 1, true);
book(tmp32, 24, 6, 2, true);
book(tmp33, 24, 6, 3, true);
book(tmp34, 24, 6, 4, true);
book(tmp35, 25, 6, 1, true);
book(tmp36, 25, 6, 2, true);
book(tmp37, 24, 7, 1, true);
book(tmp38, 24, 7, 2, true);
book(tmp39, 24, 7, 3, true);
book(tmp40, 24, 7, 4, true);
book(tmp41, 25, 7, 1, true);
book(tmp42, 25, 7, 2, true);
}
/// Finalize
void finalize() {
// Get the ratio plots sorted out first
divide(_h_XpPiPlusN, _temp_XpChargedN1, _s_Xp_PiPl_Ch);
divide(_h_XpKPlusN, _temp_XpChargedN2, _s_Xp_KPl_Ch);
divide(_h_XpProtonN, _temp_XpChargedN3, _s_Xp_Pr_Ch);
divide(_h_XpPiPlusCharm, _h_XpPiPlusLight, _s_Xp_PiPlCh_PiPlLi);
_s_Xp_PiPlCh_PiPlLi->scale(1., dbl(*_SumOfudsWeights / *_SumOfcWeights));
divide(_h_XpPiPlusBottom, _h_XpPiPlusLight, _s_Xp_PiPlBo_PiPlLi);
_s_Xp_PiPlBo_PiPlLi->scale(1., dbl(*_SumOfudsWeights / *_SumOfbWeights));
divide(_temp_XpKPlusCharm , _temp_XpKPlusLight, _s_Xp_KPlCh_KPlLi);
_s_Xp_KPlCh_KPlLi->scale(1., dbl(*_SumOfudsWeights / *_SumOfcWeights));
divide(_h_XpKPlusBottom, _h_XpKPlusLight, _s_Xp_KPlBo_KPlLi);
_s_Xp_KPlBo_KPlLi->scale(1., dbl(*_SumOfudsWeights / *_SumOfbWeights));
divide(_temp_XpKStar0Charm, _temp_XpKStar0Light, _s_Xp_KS0Ch_KS0Li);
_s_Xp_KS0Ch_KS0Li->scale(1., dbl(*_SumOfudsWeights / *_SumOfcWeights));
divide(_h_XpKStar0Bottom, _h_XpKStar0Light, _s_Xp_KS0Bo_KS0Li);
_s_Xp_KS0Bo_KS0Li->scale(1., dbl(*_SumOfudsWeights / *_SumOfbWeights));
divide(_temp_XpProtonCharm, _temp_XpProtonLight, _s_Xp_PrCh_PrLi);
_s_Xp_PrCh_PrLi->scale(1., dbl(*_SumOfudsWeights / *_SumOfcWeights));
divide(_h_XpProtonBottom, _h_XpProtonLight, _s_Xp_PrBo_PrLi);
_s_Xp_PrBo_PrLi->scale(1., dbl(*_SumOfudsWeights / *_SumOfbWeights));
divide(_h_XpLambdaCharm, _h_XpLambdaLight, _s_Xp_LaCh_LaLi);
_s_Xp_LaCh_LaLi->scale(1., dbl(*_SumOfudsWeights / *_SumOfcWeights));
divide(_h_XpLambdaBottom, _h_XpLambdaLight, _s_Xp_LaBo_LaLi);
_s_Xp_LaBo_LaLi->scale(1., dbl(*_SumOfudsWeights / *_SumOfbWeights));
divide(_h_XpK0Charm, _h_XpK0Light, _s_Xp_K0Ch_K0Li);
_s_Xp_K0Ch_K0Li->scale(1., dbl(*_SumOfudsWeights / *_SumOfcWeights));
divide(_h_XpK0Bottom, _h_XpK0Light, _s_Xp_K0Bo_K0Li);
_s_Xp_K0Bo_K0Li->scale(1., dbl(*_SumOfudsWeights / *_SumOfbWeights));
divide(_h_XpPhiCharm, _h_XpPhiLight, _s_Xp_PhiCh_PhiLi);
_s_Xp_PhiCh_PhiLi->scale(1., dbl(*_SumOfudsWeights / *_SumOfcWeights));
divide(_h_XpPhiBottom, _h_XpPhiLight, _s_Xp_PhiBo_PhiLi);
_s_Xp_PhiBo_PhiLi->scale(1., dbl(*_SumOfudsWeights / *_SumOfbWeights));
// Then the leading particles
divide(*_h_RPiMinus - *_h_RPiPlus, *_h_RPiMinus + *_h_RPiPlus, _s_PiM_PiP);
divide(*_h_RKSBar0 - *_h_RKS0, *_h_RKSBar0 + *_h_RKS0, _s_KSBar0_KS0);
divide(*_h_RKMinus - *_h_RKPlus, *_h_RKMinus + *_h_RKPlus, _s_KM_KP);
divide(*_h_RProton - *_h_RPBar, *_h_RProton + *_h_RPBar, _s_Pr_PBar);
divide(*_h_RLambda - *_h_RLBar, *_h_RLambda + *_h_RLBar, _s_Lam_LBar);
// Then the rest
scale(_h_XpPiPlusN, 1/sumOfWeights());
scale(_h_XpKPlusN, 1/sumOfWeights());
scale(_h_XpProtonN, 1/sumOfWeights());
scale(_h_XpChargedN, 1/sumOfWeights());
scale(_h_XpK0N, 1/sumOfWeights());
scale(_h_XpLambdaN, 1/sumOfWeights());
scale(_h_XpKStar0N, 1/sumOfWeights());
scale(_h_XpPhiN, 1/sumOfWeights());
scale(_h_XpPiPlusLight, 1 / *_SumOfudsWeights);
scale(_h_XpPiPlusCharm, 1 / *_SumOfcWeights);
scale(_h_XpPiPlusBottom, 1 / *_SumOfbWeights);
scale(_h_XpKPlusLight, 1 / *_SumOfudsWeights);
scale(_h_XpKPlusCharm, 1 / *_SumOfcWeights);
scale(_h_XpKPlusBottom, 1 / *_SumOfbWeights);
scale(_h_XpKStar0Light, 1 / *_SumOfudsWeights);
scale(_h_XpKStar0Charm, 1 / *_SumOfcWeights);
scale(_h_XpKStar0Bottom, 1 / *_SumOfbWeights);
scale(_h_XpProtonLight, 1 / *_SumOfudsWeights);
scale(_h_XpProtonCharm, 1 / *_SumOfcWeights);
scale(_h_XpProtonBottom, 1 / *_SumOfbWeights);
scale(_h_XpLambdaLight, 1 / *_SumOfudsWeights);
scale(_h_XpLambdaCharm, 1 / *_SumOfcWeights);
scale(_h_XpLambdaBottom, 1 / *_SumOfbWeights);
scale(_h_XpK0Light, 1 / *_SumOfudsWeights);
scale(_h_XpK0Charm, 1 / *_SumOfcWeights);
scale(_h_XpK0Bottom, 1 / *_SumOfbWeights);
scale(_h_XpPhiLight, 1 / *_SumOfudsWeights);
scale(_h_XpPhiCharm , 1 / *_SumOfcWeights);
scale(_h_XpPhiBottom, 1 / *_SumOfbWeights);
scale(_h_RPiPlus, 1 / *_SumOfudsWeights);
scale(_h_RPiMinus, 1 / *_SumOfudsWeights);
scale(_h_RKS0, 1 / *_SumOfudsWeights);
scale(_h_RKSBar0, 1 / *_SumOfudsWeights);
scale(_h_RKPlus, 1 / *_SumOfudsWeights);
scale(_h_RKMinus, 1 / *_SumOfudsWeights);
scale(_h_RProton, 1 / *_SumOfudsWeights);
scale(_h_RPBar, 1 / *_SumOfudsWeights);
scale(_h_RLambda, 1 / *_SumOfudsWeights);
scale(_h_RLBar, 1 / *_SumOfudsWeights);
// Multiplicities
double avgNumPartsAll, avgNumPartsLight,avgNumPartsCharm, avgNumPartsBottom;
// pi+/-
// all
avgNumPartsAll = dbl(*_multPiPlus[0])/sumOfWeights();
tmp1->point(0).setY(avgNumPartsAll);
// light
avgNumPartsLight = dbl(*_multPiPlus[1] / *_SumOfudsWeights);
tmp2->point(0).setY(avgNumPartsLight);
// charm
avgNumPartsCharm = dbl(*_multPiPlus[2] / *_SumOfcWeights);
tmp3->point(0).setY(avgNumPartsCharm);
// bottom
avgNumPartsBottom = dbl(*_multPiPlus[3] / *_SumOfbWeights);
tmp4->point(0).setY(avgNumPartsBottom);
// charm-light
tmp5->point(0).setY(avgNumPartsCharm - avgNumPartsLight);
// bottom-light
tmp6->point(0).setY(avgNumPartsBottom - avgNumPartsLight);
// K+/-
// all
avgNumPartsAll = dbl(*_multKPlus[0])/sumOfWeights();
tmp7->point(0).setY(avgNumPartsAll);
// light
avgNumPartsLight = dbl(*_multKPlus[1] / *_SumOfudsWeights);
tmp8->point(0).setY(avgNumPartsLight);
// charm
avgNumPartsCharm = dbl(*_multKPlus[2] / *_SumOfcWeights);
tmp9->point(0).setY(avgNumPartsCharm);
// bottom
avgNumPartsBottom = dbl(*_multKPlus[3] / *_SumOfbWeights);
tmp10->point(0).setY(avgNumPartsBottom);
// charm-light
tmp11->point(0).setY(avgNumPartsCharm - avgNumPartsLight);
// bottom-light
tmp12->point(0).setY(avgNumPartsBottom - avgNumPartsLight);
// K0
// all
avgNumPartsAll = dbl(*_multK0[0])/sumOfWeights();
tmp13->point(0).setY(avgNumPartsAll);
// light
avgNumPartsLight = dbl(*_multK0[1] / *_SumOfudsWeights);
tmp14->point(0).setY(avgNumPartsLight);
// charm
avgNumPartsCharm = dbl(*_multK0[2] / *_SumOfcWeights);
tmp15->point(0).setY(avgNumPartsCharm);
// bottom
avgNumPartsBottom = dbl(*_multK0[3] / *_SumOfbWeights);
tmp16->point(0).setY(avgNumPartsBottom);
// charm-light
tmp17->point(0).setY(avgNumPartsCharm - avgNumPartsLight);
// bottom-light
tmp18->point(0).setY(avgNumPartsBottom - avgNumPartsLight);
// K*0
// all
avgNumPartsAll = dbl(*_multKStar0[0])/sumOfWeights();
tmp19->point(0).setY(avgNumPartsAll);
// light
avgNumPartsLight = dbl(*_multKStar0[1] / *_SumOfudsWeights);
tmp20->point(0).setY(avgNumPartsLight);
// charm
avgNumPartsCharm = dbl(*_multKStar0[2] / *_SumOfcWeights);
tmp21->point(0).setY(avgNumPartsCharm);
// bottom
avgNumPartsBottom = dbl(*_multKStar0[3] / *_SumOfbWeights);
tmp22->point(0).setY(avgNumPartsBottom);
// charm-light
tmp23->point(0).setY(avgNumPartsCharm - avgNumPartsLight);
// bottom-light
tmp24->point(0).setY(avgNumPartsBottom - avgNumPartsLight);
// phi
// all
avgNumPartsAll = dbl(*_multPhi[0])/sumOfWeights();
tmp25->point(0).setY(avgNumPartsAll);
// light
avgNumPartsLight = dbl(*_multPhi[1] / *_SumOfudsWeights);
tmp26->point(0).setY(avgNumPartsLight);
// charm
avgNumPartsCharm = dbl(*_multPhi[2] / *_SumOfcWeights);
tmp27->point(0).setY(avgNumPartsCharm);
// bottom
avgNumPartsBottom = dbl(*_multPhi[3] / *_SumOfbWeights);
tmp28->point(0).setY(avgNumPartsBottom);
// charm-light
tmp29->point(0).setY(avgNumPartsCharm - avgNumPartsLight);
// bottom-light
tmp30->point(0).setY(avgNumPartsBottom - avgNumPartsLight);
// p
// all
avgNumPartsAll = dbl(*_multProton[0])/sumOfWeights();
tmp31->point(0).setY(avgNumPartsAll);
// light
avgNumPartsLight = dbl(*_multProton[1] / *_SumOfudsWeights);
tmp32->point(0).setY(avgNumPartsLight);
// charm
avgNumPartsCharm = dbl(*_multProton[2] / *_SumOfcWeights);
tmp33->point(0).setY(avgNumPartsCharm);
// bottom
avgNumPartsBottom = dbl(*_multProton[3] / *_SumOfbWeights);
tmp34->point(0).setY(avgNumPartsBottom);
// charm-light
tmp35->point(0).setY(avgNumPartsCharm - avgNumPartsLight);
// bottom-light
tmp36->point(0).setY(avgNumPartsBottom - avgNumPartsLight);
// Lambda
// all
avgNumPartsAll = dbl(*_multLambda[0])/sumOfWeights();
tmp37->point(0).setY(avgNumPartsAll);
// light
avgNumPartsLight = dbl(*_multLambda[1] / *_SumOfudsWeights);
tmp38->point(0).setY(avgNumPartsLight);
// charm
avgNumPartsCharm = dbl(*_multLambda[2] / *_SumOfcWeights);
tmp39->point(0).setY(avgNumPartsCharm);
// bottom
avgNumPartsBottom = dbl(*_multLambda[3] / *_SumOfbWeights);
tmp40->point(0).setY(avgNumPartsBottom);
// charm-light
tmp41->point(0).setY(avgNumPartsCharm - avgNumPartsLight);
// bottom-light
tmp42->point(0).setY(avgNumPartsBottom - avgNumPartsLight);
}
//@}
private:
/// Store the weighted sums of numbers of charged / charged+neutral
/// particles. Used to calculate average number of particles for the
/// inclusive single particle distributions' normalisations.
CounterPtr _SumOfudsWeights, _SumOfcWeights, _SumOfbWeights;
vector<CounterPtr> _multPiPlus, _multKPlus, _multK0,
_multKStar0, _multPhi, _multProton, _multLambda;
Histo1DPtr _h_XpPiPlusSig, _h_XpPiPlusN;
Histo1DPtr _h_XpKPlusSig, _h_XpKPlusN;
Histo1DPtr _h_XpProtonSig, _h_XpProtonN;
Histo1DPtr _h_XpChargedN;
Histo1DPtr _h_XpK0N, _h_XpLambdaN;
Histo1DPtr _h_XpKStar0N, _h_XpPhiN;
Histo1DPtr _h_XpPiPlusLight, _h_XpPiPlusCharm, _h_XpPiPlusBottom;
Histo1DPtr _h_XpKPlusLight, _h_XpKPlusCharm, _h_XpKPlusBottom;
Histo1DPtr _h_XpKStar0Light, _h_XpKStar0Charm, _h_XpKStar0Bottom;
Histo1DPtr _h_XpProtonLight, _h_XpProtonCharm, _h_XpProtonBottom;
Histo1DPtr _h_XpLambdaLight, _h_XpLambdaCharm, _h_XpLambdaBottom;
Histo1DPtr _h_XpK0Light, _h_XpK0Charm, _h_XpK0Bottom;
Histo1DPtr _h_XpPhiLight, _h_XpPhiCharm, _h_XpPhiBottom;
Histo1DPtr _temp_XpChargedN1, _temp_XpChargedN2, _temp_XpChargedN3;
Histo1DPtr _temp_XpKPlusCharm , _temp_XpKPlusLight;
Histo1DPtr _temp_XpKStar0Charm, _temp_XpKStar0Light;
Histo1DPtr _temp_XpProtonCharm, _temp_XpProtonLight;
Histo1DPtr _h_RPiPlus, _h_RPiMinus;
Histo1DPtr _h_RKS0, _h_RKSBar0;
Histo1DPtr _h_RKPlus, _h_RKMinus;
Histo1DPtr _h_RProton, _h_RPBar;
Histo1DPtr _h_RLambda, _h_RLBar;
Scatter2DPtr _s_Xp_PiPl_Ch, _s_Xp_KPl_Ch, _s_Xp_Pr_Ch;
Scatter2DPtr _s_Xp_PiPlCh_PiPlLi, _s_Xp_PiPlBo_PiPlLi;
Scatter2DPtr _s_Xp_KPlCh_KPlLi, _s_Xp_KPlBo_KPlLi;
Scatter2DPtr _s_Xp_KS0Ch_KS0Li, _s_Xp_KS0Bo_KS0Li;
Scatter2DPtr _s_Xp_PrCh_PrLi, _s_Xp_PrBo_PrLi;
Scatter2DPtr _s_Xp_LaCh_LaLi, _s_Xp_LaBo_LaLi;
Scatter2DPtr _s_Xp_K0Ch_K0Li, _s_Xp_K0Bo_K0Li;
Scatter2DPtr _s_Xp_PhiCh_PhiLi, _s_Xp_PhiBo_PhiLi;
Scatter2DPtr _s_PiM_PiP, _s_KSBar0_KS0, _s_KM_KP, _s_Pr_PBar, _s_Lam_LBar;
//@}
Scatter2DPtr tmp1;
Scatter2DPtr tmp2;
Scatter2DPtr tmp3;
Scatter2DPtr tmp4;
Scatter2DPtr tmp5;
Scatter2DPtr tmp6;
Scatter2DPtr tmp7;
Scatter2DPtr tmp8;
Scatter2DPtr tmp9;
Scatter2DPtr tmp10;
Scatter2DPtr tmp11;
Scatter2DPtr tmp12;
Scatter2DPtr tmp13;
Scatter2DPtr tmp14;
Scatter2DPtr tmp15;
Scatter2DPtr tmp16;
Scatter2DPtr tmp17;
Scatter2DPtr tmp18;
Scatter2DPtr tmp19;
Scatter2DPtr tmp20;
Scatter2DPtr tmp21;
Scatter2DPtr tmp22;
Scatter2DPtr tmp23;
Scatter2DPtr tmp24;
Scatter2DPtr tmp25;
Scatter2DPtr tmp26;
Scatter2DPtr tmp27;
Scatter2DPtr tmp28;
Scatter2DPtr tmp29;
Scatter2DPtr tmp30;
Scatter2DPtr tmp31;
Scatter2DPtr tmp32;
Scatter2DPtr tmp33;
Scatter2DPtr tmp34;
Scatter2DPtr tmp35;
Scatter2DPtr tmp36;
Scatter2DPtr tmp37;
Scatter2DPtr tmp38;
Scatter2DPtr tmp39;
Scatter2DPtr tmp40;
Scatter2DPtr tmp41;
Scatter2DPtr tmp42;
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(SLD_1999_S3743934);
}
diff --git a/analyses/pluginLEP/SLD_2004_S5693039.cc b/analyses/pluginLEP/SLD_2004_S5693039.cc
--- a/analyses/pluginLEP/SLD_2004_S5693039.cc
+++ b/analyses/pluginLEP/SLD_2004_S5693039.cc
@@ -1,377 +1,379 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/Beam.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
+#include "Rivet/Projections/Thrust.hh"
+
+#define I_KNOW_THE_INITIAL_QUARKS_PROJECTION_IS_DODGY_BUT_NEED_TO_USE_IT
#include "Rivet/Projections/InitialQuarks.hh"
-#include "Rivet/Projections/Thrust.hh"
namespace Rivet {
/// @brief SLD flavour-dependent fragmentation paper
/// @author Peter Richardson
class SLD_2004_S5693039 : public Analysis {
public:
/// Constructor
SLD_2004_S5693039() : Analysis("SLD_2004_S5693039")
{}
/// @name Analysis methods
//@{
void analyze(const Event& e) {
// First, veto on leptonic events by requiring at least 2 charged FS particles
const FinalState& fs = apply<FinalState>(e, "FS");
const size_t numParticles = fs.particles().size();
// Even if we only generate hadronic events, we still need a cut on numCharged >= 2.
if (numParticles < 2) {
MSG_DEBUG("Failed ncharged cut");
vetoEvent;
}
MSG_DEBUG("Passed ncharged cut");
// Get beams and average beam momentum
const ParticlePair& beams = apply<Beam>(e, "Beams").beams();
const double meanBeamMom = ( beams.first.p3().mod() +
beams.second.p3().mod() ) / 2.0;
MSG_DEBUG("Avg beam momentum = " << meanBeamMom);
int flavour = 0;
const InitialQuarks& iqf = apply<InitialQuarks>(e, "IQF");
// If we only have two quarks (qqbar), just take the flavour.
// If we have more than two quarks, look for the highest energetic q-qbar pair.
Particles quarks;
if (iqf.particles().size() == 2) {
flavour = iqf.particles().front().abspid();
quarks = iqf.particles();
}
else {
map<int, Particle > quarkmap;
for (const Particle& p : iqf.particles()) {
if (quarkmap.find(p.pid())==quarkmap.end())
quarkmap[p.pid()] = p;
else if (quarkmap[p.pid()].E() < p.E())
quarkmap[p.pid()] = p;
}
double maxenergy = 0.;
for (int i = 1; i <= 5; ++i) {
double energy(0.);
if(quarkmap.find( i)!=quarkmap.end())
energy += quarkmap[ i].E();
if(quarkmap.find(-i)!=quarkmap.end())
energy += quarkmap[-i].E();
if (energy > maxenergy) flavour = i;
}
if(quarkmap.find( flavour)!=quarkmap.end())
quarks.push_back(quarkmap[ flavour]);
if(quarkmap.find(-flavour)!=quarkmap.end())
quarks.push_back(quarkmap[-flavour]);
}
// total multiplicities
switch (flavour) {
case PID::DQUARK:
case PID::UQUARK:
case PID::SQUARK:
_weightLight ->fill();
_weightedTotalChargedPartNumLight ->fill(numParticles);
break;
case PID::CQUARK:
_weightCharm ->fill();
_weightedTotalChargedPartNumCharm ->fill(numParticles);
break;
case PID::BQUARK:
_weightBottom ->fill();
_weightedTotalChargedPartNumBottom ->fill(numParticles);
break;
}
// thrust axis for projections
Vector3 axis = apply<Thrust>(e, "Thrust").thrustAxis();
double dot(0.);
if(!quarks.empty()) {
dot = quarks[0].p3().dot(axis);
if(quarks[0].pid()<0) dot *= -1.;
}
// spectra and individual multiplicities
for (const Particle& p : fs.particles()) {
double pcm = p.p3().mod();
const double xp = pcm/meanBeamMom;
// if in quark or antiquark hemisphere
bool quark = p.p3().dot(axis)*dot>0.;
_h_PCharged ->fill(pcm );
// all charged
switch (flavour) {
case PID::DQUARK:
case PID::UQUARK:
case PID::SQUARK:
_h_XpChargedL->fill(xp);
break;
case PID::CQUARK:
_h_XpChargedC->fill(xp);
break;
case PID::BQUARK:
_h_XpChargedB->fill(xp);
break;
}
int id = p.abspid();
// charged pions
if (id == PID::PIPLUS) {
_h_XpPiPlus->fill(xp);
_h_XpPiPlusTotal->fill(xp);
switch (flavour) {
case PID::DQUARK:
case PID::UQUARK:
case PID::SQUARK:
_h_XpPiPlusL->fill(xp);
_h_NPiPlusL->fill(sqrtS());
if( ( quark && p.pid()>0 ) || ( !quark && p.pid()<0 ))
_h_RPiPlus->fill(xp);
else
_h_RPiMinus->fill(xp);
break;
case PID::CQUARK:
_h_XpPiPlusC->fill(xp);
_h_NPiPlusC->fill(sqrtS());
break;
case PID::BQUARK:
_h_XpPiPlusB->fill(xp);
_h_NPiPlusB->fill(sqrtS());
break;
}
}
else if (id == PID::KPLUS) {
_h_XpKPlus->fill(xp);
_h_XpKPlusTotal->fill(xp);
switch (flavour) {
case PID::DQUARK:
case PID::UQUARK:
case PID::SQUARK:
_h_XpKPlusL->fill(xp);
_h_NKPlusL->fill(sqrtS());
if( ( quark && p.pid()>0 ) || ( !quark && p.pid()<0 ))
_h_RKPlus->fill(xp);
else
_h_RKMinus->fill(xp);
break;
case PID::CQUARK:
_h_XpKPlusC->fill(xp);
_h_NKPlusC->fill(sqrtS());
break;
case PID::BQUARK:
_h_XpKPlusB->fill(xp);
_h_NKPlusB->fill(sqrtS());
break;
}
}
else if (id == PID::PROTON) {
_h_XpProton->fill(xp);
_h_XpProtonTotal->fill(xp);
switch (flavour) {
case PID::DQUARK:
case PID::UQUARK:
case PID::SQUARK:
_h_XpProtonL->fill(xp);
_h_NProtonL->fill(sqrtS());
if( ( quark && p.pid()>0 ) || ( !quark && p.pid()<0 ))
_h_RProton->fill(xp);
else
_h_RPBar ->fill(xp);
break;
case PID::CQUARK:
_h_XpProtonC->fill(xp);
_h_NProtonC->fill(sqrtS());
break;
case PID::BQUARK:
_h_XpProtonB->fill(xp);
_h_NProtonB->fill(sqrtS());
break;
}
}
}
}
void init() {
// Projections
declare(Beam(), "Beams");
declare(ChargedFinalState(), "FS");
declare(InitialQuarks(), "IQF");
declare(Thrust(FinalState()), "Thrust");
// Book histograms
book(_h_PCharged , 1, 1, 1);
book(_h_XpPiPlus , 2, 1, 2);
book(_h_XpKPlus , 3, 1, 2);
book(_h_XpProton , 4, 1, 2);
book(_h_XpPiPlusTotal , 2, 2, 2);
book(_h_XpKPlusTotal , 3, 2, 2);
book(_h_XpProtonTotal , 4, 2, 2);
book(_h_XpPiPlusL , 5, 1, 1);
book(_h_XpPiPlusC , 5, 1, 2);
book(_h_XpPiPlusB , 5, 1, 3);
book(_h_XpKPlusL , 6, 1, 1);
book(_h_XpKPlusC , 6, 1, 2);
book(_h_XpKPlusB , 6, 1, 3);
book(_h_XpProtonL , 7, 1, 1);
book(_h_XpProtonC , 7, 1, 2);
book(_h_XpProtonB , 7, 1, 3);
book(_h_XpChargedL , 8, 1, 1);
book(_h_XpChargedC , 8, 1, 2);
book(_h_XpChargedB , 8, 1, 3);
book(_h_NPiPlusL , 5, 2, 1);
book(_h_NPiPlusC , 5, 2, 2);
book(_h_NPiPlusB , 5, 2, 3);
book(_h_NKPlusL , 6, 2, 1);
book(_h_NKPlusC , 6, 2, 2);
book(_h_NKPlusB , 6, 2, 3);
book(_h_NProtonL , 7, 2, 1);
book(_h_NProtonC , 7, 2, 2);
book(_h_NProtonB , 7, 2, 3);
book(_h_RPiPlus , 9, 1, 1);
book(_h_RPiMinus , 9, 1, 2);
book(_h_RKPlus ,10, 1, 1);
book(_h_RKMinus ,10, 1, 2);
book(_h_RProton ,11, 1, 1);
book(_h_RPBar ,11, 1, 2);
// Ratios: used as target of divide() later
book(_s_PiM_PiP, 9, 1, 3);
book(_s_KM_KP , 10, 1, 3);
book(_s_Pr_PBar, 11, 1, 3);
book(_weightedTotalChargedPartNumLight, "weightedTotalChargedPartNumLight");
book(_weightedTotalChargedPartNumCharm, "weightedTotalChargedPartNumCharm");
book(_weightedTotalChargedPartNumBottom, "weightedTotalChargedPartNumBottom");
book(_weightLight, "weightLight");
book(_weightCharm, "weightCharm");
book(_weightBottom, "weightBottom");
book(tmp1, 8, 2, 1, true);
book(tmp2, 8, 2, 2, true);
book(tmp3, 8, 2, 3, true);
book(tmp4, 8, 3, 2, true);
book(tmp5, 8, 3, 3, true);
}
/// Finalize
void finalize() {
// Multiplicities
/// @todo Include errors
const double avgNumPartsLight = _weightedTotalChargedPartNumLight->val() / _weightLight->val();
const double avgNumPartsCharm = _weightedTotalChargedPartNumCharm->val() / _weightCharm->val();
const double avgNumPartsBottom = _weightedTotalChargedPartNumBottom->val() / _weightBottom->val();
tmp1->point(0).setY(avgNumPartsLight);
tmp2->point(0).setY(avgNumPartsCharm);
tmp3->point(0).setY(avgNumPartsBottom);
tmp4->point(0).setY(avgNumPartsCharm - avgNumPartsLight);
tmp5->point(0).setY(avgNumPartsBottom - avgNumPartsLight);
// Do divisions
divide(*_h_RPiMinus - *_h_RPiPlus, *_h_RPiMinus + *_h_RPiPlus, _s_PiM_PiP);
divide(*_h_RKMinus - *_h_RKPlus, *_h_RKMinus + *_h_RKPlus, _s_KM_KP);
divide(*_h_RProton - *_h_RPBar, *_h_RProton + *_h_RPBar, _s_Pr_PBar);
// Scale histograms
scale(_h_PCharged, 1./sumOfWeights());
scale(_h_XpPiPlus, 1./sumOfWeights());
scale(_h_XpKPlus, 1./sumOfWeights());
scale(_h_XpProton, 1./sumOfWeights());
scale(_h_XpPiPlusTotal, 1./sumOfWeights());
scale(_h_XpKPlusTotal, 1./sumOfWeights());
scale(_h_XpProtonTotal, 1./sumOfWeights());
scale(_h_XpPiPlusL, 1. / *_weightLight);
scale(_h_XpPiPlusC, 1. / *_weightCharm);
scale(_h_XpPiPlusB, 1. / *_weightBottom);
scale(_h_XpKPlusL, 1. / *_weightLight);
scale(_h_XpKPlusC, 1. / *_weightCharm);
scale(_h_XpKPlusB, 1. / *_weightBottom);
scale(_h_XpProtonL, 1. / *_weightLight);
scale(_h_XpProtonC, 1. / *_weightCharm);
scale(_h_XpProtonB, 1. / *_weightBottom);
scale(_h_XpChargedL, 1. / *_weightLight);
scale(_h_XpChargedC, 1. / *_weightCharm);
scale(_h_XpChargedB, 1. / *_weightBottom);
scale(_h_NPiPlusL, 1. / *_weightLight);
scale(_h_NPiPlusC, 1. / *_weightCharm);
scale(_h_NPiPlusB, 1. / *_weightBottom);
scale(_h_NKPlusL, 1. / *_weightLight);
scale(_h_NKPlusC, 1. / *_weightCharm);
scale(_h_NKPlusB, 1. / *_weightBottom);
scale(_h_NProtonL, 1. / *_weightLight);
scale(_h_NProtonC, 1. / *_weightCharm);
scale(_h_NProtonB, 1. / *_weightBottom);
// Paper suggests this should be 0.5/weight but it has to be 1.0 to get normalisations right...
scale(_h_RPiPlus, 1. / *_weightLight);
scale(_h_RPiMinus, 1. / *_weightLight);
scale(_h_RKPlus, 1. / *_weightLight);
scale(_h_RKMinus, 1. / *_weightLight);
scale(_h_RProton, 1. / *_weightLight);
scale(_h_RPBar, 1. / *_weightLight);
// convert ratio to %
_s_PiM_PiP->scale(1.,100.);
_s_KM_KP ->scale(1.,100.);
_s_Pr_PBar->scale(1.,100.);
}
//@}
private:
Scatter2DPtr tmp1;
Scatter2DPtr tmp2;
Scatter2DPtr tmp3;
Scatter2DPtr tmp4;
Scatter2DPtr tmp5;
/// @name Multiplicities
//@{
CounterPtr _weightedTotalChargedPartNumLight;
CounterPtr _weightedTotalChargedPartNumCharm;
CounterPtr _weightedTotalChargedPartNumBottom;
//@}
/// @name Weights
//@{
CounterPtr _weightLight, _weightCharm, _weightBottom;
//@}
// Histograms
//@{
Histo1DPtr _h_PCharged;
Histo1DPtr _h_XpPiPlus, _h_XpKPlus, _h_XpProton;
Histo1DPtr _h_XpPiPlusTotal, _h_XpKPlusTotal, _h_XpProtonTotal;
Histo1DPtr _h_XpPiPlusL, _h_XpPiPlusC, _h_XpPiPlusB;
Histo1DPtr _h_XpKPlusL, _h_XpKPlusC, _h_XpKPlusB;
Histo1DPtr _h_XpProtonL, _h_XpProtonC, _h_XpProtonB;
Histo1DPtr _h_XpChargedL, _h_XpChargedC, _h_XpChargedB;
Histo1DPtr _h_NPiPlusL, _h_NPiPlusC, _h_NPiPlusB;
Histo1DPtr _h_NKPlusL, _h_NKPlusC, _h_NKPlusB;
Histo1DPtr _h_NProtonL, _h_NProtonC, _h_NProtonB;
Histo1DPtr _h_RPiPlus, _h_RPiMinus, _h_RKPlus;
Histo1DPtr _h_RKMinus, _h_RProton, _h_RPBar;
Scatter2DPtr _s_PiM_PiP, _s_KM_KP, _s_Pr_PBar;
//@}
};
// Hook for the plugin system
DECLARE_RIVET_PLUGIN(SLD_2004_S5693039);
}
diff --git a/analyses/pluginMisc/TPC_1987_I235694.cc b/analyses/pluginMisc/TPC_1987_I235694.cc
--- a/analyses/pluginMisc/TPC_1987_I235694.cc
+++ b/analyses/pluginMisc/TPC_1987_I235694.cc
@@ -1,102 +1,104 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/Beam.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
+
+#define I_KNOW_THE_INITIAL_QUARKS_PROJECTION_IS_DODGY_BUT_NEED_TO_USE_IT
#include "Rivet/Projections/InitialQuarks.hh"
namespace Rivet {
/// @brief TPC flavour separated N charged at 29 GeV
class TPC_1987_I235694 : public Analysis {
public:
/// Constructor
DEFAULT_RIVET_ANALYSIS_CTOR(TPC_1987_I235694);
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
// Histograms
book(_h_all , 5, 1, 4);
book(_h_light , 4, 1, 4);
book(_h_charm , 3, 1, 4);
book(_h_bottom, 2, 1, 4);
// Projections
declare(Beam(), "Beams");
declare(ChargedFinalState(), "CFS");
declare(InitialQuarks(), "IQF");
}
/// Perform the per-event analysis
void analyze(const Event& event) {
// Even if we only generate hadronic events, we still need a cut on numCharged >= 2.
const FinalState& cfs = apply<FinalState>(event, "CFS");
if (cfs.size() < 2) vetoEvent;
int flavour = 0;
const InitialQuarks& iqf = apply<InitialQuarks>(event, "IQF");
// If we only have two quarks (qqbar), just take the flavour.
// If we have more than two quarks, look for the highest energetic q-qbar pair.
if (iqf.particles().size() == 2) {
flavour = iqf.particles().front().abspid();
}
else {
map<int, double> quarkmap;
for (const Particle& p : iqf.particles()) {
if (quarkmap[p.pid()] < p.E()) {
quarkmap[p.pid()] = p.E();
}
}
double maxenergy = 0.;
for (int i = 1; i <= 5; ++i) {
if (quarkmap[i]+quarkmap[-i] > maxenergy) {
flavour = i;
}
}
}
const size_t numParticles = cfs.particles().size();
switch (flavour) {
case 1: case 2: case 3:
_h_light->fill(sqrtS()/GeV,numParticles);
break;
case 4:
_h_charm->fill(sqrtS()/GeV,numParticles);
break;
case 5:
_h_bottom->fill(sqrtS()/GeV,numParticles);
break;
}
_h_all->fill(sqrtS()/GeV,numParticles);
}
/// Normalise histograms etc., after the run
void finalize() { }
//@}
private:
/// @name Multiplicities
//@{
Profile1DPtr _h_all;
Profile1DPtr _h_light;
Profile1DPtr _h_charm;
Profile1DPtr _h_bottom;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(TPC_1987_I235694);
}
diff --git a/include/Rivet/Projections/InitialQuarks.hh b/include/Rivet/Projections/InitialQuarks.hh
--- a/include/Rivet/Projections/InitialQuarks.hh
+++ b/include/Rivet/Projections/InitialQuarks.hh
@@ -1,61 +1,63 @@
// -*- C++ -*-
#ifndef RIVET_InitialQuarks_HH
#define RIVET_InitialQuarks_HH
+#ifndef I_KNOW_THE_INITIAL_QUARKS_PROJECTION_IS_DODGY_BUT_NEED_TO_USE_IT
#warning "This is a dangerous projection for a few specific old analyses. Not for general use!"
+#endif
#include "Rivet/Projection.hh"
#include "Rivet/Particle.hh"
#include "Rivet/Event.hh"
namespace Rivet {
/// @brief Project out quarks from the hard process in \f$ e^+ e^- \to Z^0 \f$ events
///
/// @deprecated We're not sure exactly when we'lll get rid of this, but it's going to happen...
///
/// @warning This is a very dangerous and specific projection!
class InitialQuarks : public Projection {
public:
/// @name Standard constructors and destructors.
//@{
/// The default constructor. May specify the minimum and maximum
/// pseudorapidity \f$ \eta \f$ and the min \f$ p_T \f$ (in GeV).
InitialQuarks() {
setName("InitialQuarks");
}
/// Clone on the heap.
DEFAULT_RIVET_PROJ_CLONE(InitialQuarks);
//@}
/// Access the projected final-state particles.
virtual const Particles& particles() const { return _theParticles; }
/// Is this final state empty?
virtual bool empty() const { return _theParticles.empty(); }
protected:
/// Apply the projection to the event.
virtual void project(const Event& e);
/// Compare projections.
virtual CmpState compare(const Projection& p) const;
protected:
/// The final-state particles.
Particles _theParticles;
};
}
#endif
diff --git a/include/Rivet/Projections/JetAlg.hh b/include/Rivet/Projections/JetAlg.hh
--- a/include/Rivet/Projections/JetAlg.hh
+++ b/include/Rivet/Projections/JetAlg.hh
@@ -1,219 +1,214 @@
// -*- C++ -*-
#ifndef RIVET_JetAlg_HH
#define RIVET_JetAlg_HH
#include "Rivet/Projection.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/VisibleFinalState.hh"
#include "Rivet/Particle.hh"
#include "Rivet/Jet.hh"
namespace Rivet {
/// Abstract base class for projections which can return a set of {@link Jet}s.
class JetAlg : public Projection {
public:
/// Enum for the treatment of muons: whether to include all, some, or none in jet-finding
enum class Muons { NONE, DECAY, ALL };
/// Enum for the treatment of invisible particles: whether to include all, some, or none in jet-finding
enum class Invisibles { NONE, DECAY, ALL };
/// Constructor
JetAlg(const FinalState& fs,
Muons usemuons = Muons::ALL,
Invisibles useinvis = Invisibles::NONE);
/// Default constructor
JetAlg() = default;
/// Clone on the heap.
virtual unique_ptr<Projection> clone() const = 0;
/// Destructor
virtual ~JetAlg() = default;
/// @name Control the treatment of muons and invisible particles
///
/// Since MC-based jet calibration (and/or particle flow) can add back in
/// particles that weren't seen in calorimeters/trackers.
//@{
/// @brief Include (some) muons in jet construction.
///
/// The default behaviour is that jets are only constructed from visible
/// particles. Some jet studies, including those from ATLAS, use a definition
/// in which neutrinos from hadron decays are included via MC-based calibrations.
/// Setting this flag to true avoids the automatic restriction to a VisibleFinalState.
void useMuons(Muons usemuons = Muons::ALL) {
_useMuons = usemuons;
}
/// @brief Include (some) invisible particles in jet construction.
///
/// The default behaviour is that jets are only constructed from visible
/// particles. Some jet studies, including those from ATLAS, use a definition
/// in which neutrinos from hadron decays are included via MC-based calibrations.
/// Setting this flag to true avoids the automatic restriction to a VisibleFinalState.
void useInvisibles(Invisibles useinvis = Invisibles::DECAY) {
_useInvisibles = useinvis;
}
/// @brief obsolete chooser
DEPRECATED("make an explicit choice from Invisibles::{NONE,DECAY,ALL}. This boolean call does not allow for ALL")
void useInvisibles(bool useinvis) {
_useInvisibles = useinvis ? Invisibles::DECAY : Invisibles::NONE;
}
//@}
/// @name Access to jet objects
//@{
/// Get jets in no guaranteed order, with an optional Cut
/// @note Returns a copy rather than a reference, due to cuts
virtual Jets jets(const Cut& c=Cuts::open()) const {
return filter_select(_jets(), c);
}
/// Get jets in no guaranteed order, with a selection functor
/// @note Returns a copy rather than a reference, due to cuts
virtual Jets jets(const JetSelector& selector) const {
return filter_select(_jets(), selector);
}
/// Get the jets with a Cut applied, and ordered by supplied sorting functor
/// @note Returns a copy rather than a reference, due to cuts and sorting
Jets jets(const Cut& c, const JetSorter& sorter) const {
/// @todo Will the vector be efficiently std::move'd by value through this function chain?
return sortBy(jets(c), sorter);
}
/// Get the jets, ordered by supplied sorting functor, with an optional Cut
/// @note Returns a copy rather than a reference, due to cuts and sorting
Jets jets(const JetSorter& sorter, const Cut& c=Cuts::open()) const {
/// @todo Will the vector be efficiently std::move'd by value through this function chain?
return jets(c, sorter);
}
/// Get the jets, ordered by supplied sorting function object, with optional cuts on \f$ p_\perp \f$ and rapidity.
/// @note Returns a copy rather than a reference, due to cuts and sorting
Jets jets(const JetSelector& selector, const JetSorter& sorter) const {
/// @todo Will the vector be efficiently std::move'd by value through this function chain?
return sortBy(jets(selector), sorter);
}
/// Get the jets, ordered by supplied sorting functor and with a selection functor applied
/// @note Returns a copy rather than a reference, due to cuts and sorting
Jets jets(const JetSorter& sorter, const JetSelector selector) const {
- /// @todo Will the vector be efficiently std::move'd by value through this function chain?
return jets(selector, sorter);
}
/// Get the jets, ordered by \f$ p_T \f$, with optional cuts.
///
/// @note Returns a copy rather than a reference, due to cuts and sorting
///
/// This is a very common use-case, so is available as syntatic sugar for jets(c, cmpMomByPt).
- /// @todo The other sorted accessors should be removed in a cleanup.
Jets jetsByPt(const Cut& c=Cuts::open()) const {
return jets(c, cmpMomByPt);
}
/// Get the jets, ordered by \f$ p_T \f$, with cuts via a selection functor.
///
/// @note Returns a copy rather than a reference, due to cuts and sorting
///
/// This is a very common use-case, so is available as syntatic sugar for jets(c, cmpMomByPt).
- /// @todo The other sorted accessors should be removed in a cleanup.
Jets jetsByPt(const JetSelector& selector) const {
return jets(selector, cmpMomByPt);
}
/// Get the jets, ordered by \f$ p_T \f$, with a cut on \f$ p_\perp \f$.
///
/// @deprecated Use the version with a Cut argument
/// @note Returns a copy rather than a reference, due to cuts and sorting
///
/// This is a very common use-case, so is available as syntatic sugar for jets(Cuts::pT >= ptmin, cmpMomByPt).
- /// @todo The other sorted accessors should be removed in a cleanup.
- DEPRECATED("Use the version with a Cut argument")
Jets jetsByPt(double ptmin) const {
return jets(Cuts::pT >= ptmin, cmpMomByPt);
}
//@}
protected:
/// @brief Internal pure virtual method for getting jets in no guaranteed order.
virtual Jets _jets() const = 0;
public:
/// Count the jets
size_t size() const { return jets().size(); }
/// Count the jets after a Cut is applied.
size_t size(const Cut& c) const { return jets(c).size(); }
/// Count the jets after a selection functor is applied.
size_t size(const JetSelector& s) const { return jets(s).size(); }
/// Is this jet finder empty?
bool empty() const { return size() == 0; }
/// Is this jet finder empty after a Cut is applied?
bool empty(const Cut& c) const { return size(c) == 0; }
/// Is this jet finder empty after a selection functor is applied?
bool empty(const JetSelector& s) const { return size(s) == 0; }
/// Clear the projection.
virtual void reset() = 0;
typedef Jet entity_type;
typedef Jets collection_type;
/// Template-usable interface common to FinalState.
collection_type entities() const { return jets(); }
// /// Do the calculation locally (no caching).
// virtual void calc(const Particles& constituents, const Particles& tagparticles=Particles()) = 0;
protected:
/// Perform the projection on the Event.
virtual void project(const Event& e) = 0;
/// Compare projections.
virtual CmpState compare(const Projection& p) const = 0;
protected:
/// Flag to determine whether or not to exclude (some) muons from the would-be constituents.
Muons _useMuons;
/// Flag to determine whether or not to exclude (some) invisible particles from the would-be constituents.
Invisibles _useInvisibles;
};
/// Compatibility typedef, for equivalence with ParticleFinder
/// @todo Should we make this the canonical name? Would "require" a header filename change -> breakage or ugly.
using JetFinder = JetAlg;
}
#endif
diff --git a/src/Projections/InitialQuarks.cc b/src/Projections/InitialQuarks.cc
--- a/src/Projections/InitialQuarks.cc
+++ b/src/Projections/InitialQuarks.cc
@@ -1,61 +1,62 @@
// -*- C++ -*-
+#define I_KNOW_THE_INITIAL_QUARKS_PROJECTION_IS_DODGY_BUT_NEED_TO_USE_IT
#include "Rivet/Projections/InitialQuarks.hh"
namespace Rivet {
CmpState InitialQuarks::compare(const Projection& p) const {
return CmpState::EQ;
}
void InitialQuarks::project(const Event& e) {
_theParticles.clear();
for (const GenParticle* p : Rivet::particles(e.genEvent())) {
const GenVertex* pv = p->production_vertex();
const GenVertex* dv = p->end_vertex();
const PdgId pid = abs(p->pdg_id());
bool passed = inRange((long)pid, 1, 6);
if (passed) {
if (pv != 0) {
for (const GenParticle* pp : particles_in(pv)) {
// Only accept if parent is electron or Z0
const PdgId pid = abs(pp->pdg_id());
passed = (pid == PID::ELECTRON || abs(pp->pdg_id()) == PID::ZBOSON || abs(pp->pdg_id()) == PID::GAMMA);
}
} else {
passed = false;
}
}
if (getLog().isActive(Log::TRACE)) {
const int st = p->status();
const double pT = p->momentum().perp();
const double eta = p->momentum().eta();
MSG_TRACE(std::boolalpha
<< "ID = " << p->pdg_id() << ", status = " << st << ", pT = " << pT
<< ", eta = " << eta << ": result = " << passed);
if (pv != 0) {
for (const GenParticle* pp : particles_in(pv)) {
MSG_TRACE(std::boolalpha << " parent ID = " << pp->pdg_id());
}
}
if (dv != 0) {
for (const GenParticle* pp : particles_out(dv)) {
MSG_TRACE(std::boolalpha << " child ID = " << pp->pdg_id());
}
}
}
if (passed) _theParticles.push_back(Particle(*p));
}
MSG_DEBUG("Number of initial quarks = " << _theParticles.size());
if (!_theParticles.empty()) {
for (size_t i = 0; i < _theParticles.size(); i++) {
MSG_DEBUG("Initial quark[" << i << "] = " << _theParticles[i].pid());
}
}
}
}
File Metadata
Details
Attached
Mime Type
text/x-diff
Expires
Tue, Nov 19, 6:32 PM (1 d, 22 h)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
3805607
Default Alt Text
(88 KB)
Attached To
rRIVETHG rivethg
Event Timeline
Log In to Comment