Page MenuHomeHEPForge

No OneTemporary

diff --git a/analyses/pluginLEP/DELPHI_1991_I324035.cc b/analyses/pluginLEP/DELPHI_1991_I324035.cc
--- a/analyses/pluginLEP/DELPHI_1991_I324035.cc
+++ b/analyses/pluginLEP/DELPHI_1991_I324035.cc
@@ -1,183 +1,182 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/Thrust.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
namespace Rivet {
/// @brief Charged particle multiplicities in different regions
class DELPHI_1991_I324035 : public Analysis {
public:
/// Constructor
DEFAULT_RIVET_ANALYSIS_CTOR(DELPHI_1991_I324035);
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
// Initialise and register projections
const ChargedFinalState cfs;
declare(cfs, "FS");
const Thrust thrust(cfs);
declare(thrust, "Thrust");
// Book histograms
- _h_all_05 = bookHisto1D( 1, 1, 1);
- _h_all_10 = bookHisto1D( 2, 1, 1);
- _h_all_15 = bookHisto1D( 3, 1, 1);
- _h_all_20 = bookHisto1D( 4, 1, 1);
- _h_all_all = bookHisto1D( 5, 1, 1);
- _h_hemi_05 = bookHisto1D( 6, 1, 1);
- _h_hemi_10 = bookHisto1D( 7, 1, 1);
- _h_hemi_15 = bookHisto1D( 8, 1, 1);
- _h_hemi_20 = bookHisto1D( 9, 1, 1);
- _h_hemi_30 = bookHisto1D(10, 1, 1);
- _h_hemi_40 = bookHisto1D(11, 1, 1);
- _h_hemi_50 = bookHisto1D(12, 1, 1);
- _h_hemi_all = bookHisto1D(13, 1, 1);
+ book(_h_all_05 , 1, 1, 1);
+ book(_h_all_10 , 2, 1, 1);
+ book(_h_all_15 , 3, 1, 1);
+ book(_h_all_20 , 4, 1, 1);
+ book(_h_all_all , 5, 1, 1);
+ book(_h_hemi_05 , 6, 1, 1);
+ book(_h_hemi_10 , 7, 1, 1);
+ book(_h_hemi_15 , 8, 1, 1);
+ book(_h_hemi_20 , 9, 1, 1);
+ book(_h_hemi_30 , 10, 1, 1);
+ book(_h_hemi_40 , 11, 1, 1);
+ book(_h_hemi_50 , 12, 1, 1);
+ book(_h_hemi_all, 13, 1, 1);
}
/// Perform the per-event analysis
void analyze(const Event& event) {
// First, veto on leptonic events by requiring at least 4 charged FS particles
const FinalState& fs = apply<FinalState>(event, "FS");
const size_t numParticles = fs.particles().size();
// Even if we only generate hadronic events, we still need a cut on numCharged >= 2.
if (numParticles < 2) {
MSG_DEBUG("Failed leptonic event cut");
vetoEvent;
}
MSG_DEBUG("Passed leptonic event cut");
- const double weight = event.weight();
// Thrusts
MSG_DEBUG("Calculating thrust");
const Thrust& thrust = apply<Thrust>(event, "Thrust");
Vector3 axis=thrust.thrustAxis();
unsigned int n_all_05(0),n_all_10(0),n_all_15(0),n_all_20(0),n_all_all(0);
unsigned int n_pos_05(0),n_pos_10(0),n_pos_15(0),n_pos_20(0),n_pos_30(0),n_pos_40(0),n_pos_50(0),n_pos_all(0);
unsigned int n_neg_05(0),n_neg_10(0),n_neg_15(0),n_neg_20(0),n_neg_30(0),n_neg_40(0),n_neg_50(0),n_neg_all(0);
for(const Particle &p : fs.particles()) {
const Vector3 mom3 = p.p3();
const double energy = p.E();
const double momT = dot(axis, mom3);
const double rapidityT = 0.5 * std::log((energy + momT) / (energy - momT));
++n_all_all;
if(abs(rapidityT)<0.5) {
++n_all_05;
if(rapidityT>0)
++n_pos_05;
else
++n_neg_05;
}
if(abs(rapidityT)<1.0) {
++n_all_10;
if(rapidityT>0)
++n_pos_10;
else
++n_neg_10;
}
if(abs(rapidityT)<1.5) {
++n_all_15;
if(rapidityT>0)
++n_pos_15;
else
++n_neg_15;
}
if(abs(rapidityT)<2.0) {
++n_all_20;
if(rapidityT>0)
++n_pos_20;
else
++n_neg_20;
}
if(abs(rapidityT)<3.0) {
if(rapidityT>0)
++n_pos_30;
else
++n_neg_30;
}
if(abs(rapidityT)<4.0) {
if(rapidityT>0)
++n_pos_40;
else
++n_neg_40;
}
if(abs(rapidityT)<5.0) {
if(rapidityT>0)
++n_pos_50;
else
++n_neg_50;
}
if(rapidityT>0)
++n_pos_all;
else
++n_neg_all;
}
- _h_all_05 ->fill(n_all_05 ,weight);
- _h_all_10 ->fill(n_all_10 ,weight);
- _h_all_15 ->fill(n_all_15 ,weight);
- _h_all_20 ->fill(n_all_20 ,weight);
- _h_all_all->fill(n_all_all,weight);
- _h_hemi_05 ->fill(n_pos_05 ,weight);
- _h_hemi_10 ->fill(n_pos_10 ,weight);
- _h_hemi_15 ->fill(n_pos_15 ,weight);
- _h_hemi_20 ->fill(n_pos_20 ,weight);
- _h_hemi_30 ->fill(n_pos_30 ,weight);
- _h_hemi_40 ->fill(n_pos_40 ,weight);
- _h_hemi_50 ->fill(n_pos_50 ,weight);
- _h_hemi_all->fill(n_pos_all,weight);
- _h_hemi_05 ->fill(n_neg_05 ,weight);
- _h_hemi_10 ->fill(n_neg_10 ,weight);
- _h_hemi_15 ->fill(n_neg_15 ,weight);
- _h_hemi_20 ->fill(n_neg_20 ,weight);
- _h_hemi_30 ->fill(n_neg_30 ,weight);
- _h_hemi_40 ->fill(n_neg_40 ,weight);
- _h_hemi_50 ->fill(n_neg_50 ,weight);
- _h_hemi_all->fill(n_neg_all,weight);
+ _h_all_05 ->fill(n_all_05 );
+ _h_all_10 ->fill(n_all_10 );
+ _h_all_15 ->fill(n_all_15 );
+ _h_all_20 ->fill(n_all_20 );
+ _h_all_all->fill(n_all_all);
+ _h_hemi_05 ->fill(n_pos_05 );
+ _h_hemi_10 ->fill(n_pos_10 );
+ _h_hemi_15 ->fill(n_pos_15 );
+ _h_hemi_20 ->fill(n_pos_20 );
+ _h_hemi_30 ->fill(n_pos_30 );
+ _h_hemi_40 ->fill(n_pos_40 );
+ _h_hemi_50 ->fill(n_pos_50 );
+ _h_hemi_all->fill(n_pos_all);
+ _h_hemi_05 ->fill(n_neg_05 );
+ _h_hemi_10 ->fill(n_neg_10 );
+ _h_hemi_15 ->fill(n_neg_15 );
+ _h_hemi_20 ->fill(n_neg_20 );
+ _h_hemi_30 ->fill(n_neg_30 );
+ _h_hemi_40 ->fill(n_neg_40 );
+ _h_hemi_50 ->fill(n_neg_50 );
+ _h_hemi_all->fill(n_neg_all);
}
/// Normalise histograms etc., after the run
void finalize() {
normalize( _h_all_05 , 1000.);
normalize( _h_all_10 , 1000.);
normalize( _h_all_15 , 1000.);
normalize( _h_all_20 , 1000.);
normalize( _h_all_all , 2000.);
normalize( _h_hemi_05 , 1000.);
normalize( _h_hemi_10 , 1000.);
normalize( _h_hemi_15 , 1000.);
normalize( _h_hemi_20 , 1000.);
normalize( _h_hemi_30 , 1000.);
normalize( _h_hemi_40 , 1000.);
normalize( _h_hemi_50 , 1000.);
normalize( _h_hemi_all, 1000.);
}
//@}
/// @name Histograms
//@{
Histo1DPtr _h_all_05, _h_all_10, _h_all_15, _h_all_20, _h_all_all;
Histo1DPtr _h_hemi_05, _h_hemi_10, _h_hemi_15, _h_hemi_20,
_h_hemi_30, _h_hemi_40, _h_hemi_50, _h_hemi_all;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(DELPHI_1991_I324035);
}
diff --git a/analyses/pluginLEP/DELPHI_1992_I334948.cc b/analyses/pluginLEP/DELPHI_1992_I334948.cc
--- a/analyses/pluginLEP/DELPHI_1992_I334948.cc
+++ b/analyses/pluginLEP/DELPHI_1992_I334948.cc
@@ -1,86 +1,85 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/FastJets.hh"
namespace Rivet {
/// @brief Charged multiplicity for different numbers of final state jets
class DELPHI_1992_I334948 : public Analysis {
public:
/// Constructor
DEFAULT_RIVET_ANALYSIS_CTOR(DELPHI_1992_I334948);
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
// Initialise and register projections
const ChargedFinalState cfs;
declare(cfs, "FS");
declare(FastJets(cfs, FastJets::JADE, 0.7), "Jets");
// Book histograms
for(unsigned int ih=0;ih<3;++ih) {
for(unsigned int iy=0;iy<3;++iy) {
- _h_mult[ih][iy] = bookHisto1D(ih+1,1,iy+1);
+ book(_h_mult[ih][iy],ih+1,1,iy+1);
}
}
}
/// Perform the per-event analysis
void analyze(const Event& event) {
const FinalState& fs = apply<FinalState>(event, "FS");
const size_t numParticles = fs.particles().size();
// Even if we only generate hadronic events, we still need a cut on numCharged >= 2.
if (numParticles < 2) {
MSG_DEBUG("Failed leptonic event cut");
vetoEvent;
}
MSG_DEBUG("Passed leptonic event cut");
- const double weight = event.weight();
const FastJets& jets = apply<FastJets>(event, "Jets");
if (jets.clusterSeq()) {
vector<double> ycut = {0.01,0.02,0.04};
for (unsigned int ih=0;ih<3;++ih) {
int nbin = jets.clusterSeq()->n_exclusive_jets_ycut(ycut[ih])-2;
if(nbin<0 || nbin>2) continue;
- _h_mult[ih][nbin]->fill(numParticles,weight);
+ _h_mult[ih][nbin]->fill(numParticles);
}
}
}
/// Normalise histograms etc., after the run
void finalize() {
for(unsigned int ih=0;ih<3;++ih) {
for(unsigned int iy=0;iy<3;++iy) {
normalize(_h_mult[ih][iy],2000.);
}
}
}
//@}
/// @name Histograms
//@{
Histo1DPtr _h_mult[3][3];
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(DELPHI_1992_I334948);
}
diff --git a/analyses/pluginLEP/DELPHI_1993_I360638.cc b/analyses/pluginLEP/DELPHI_1993_I360638.cc
--- a/analyses/pluginLEP/DELPHI_1993_I360638.cc
+++ b/analyses/pluginLEP/DELPHI_1993_I360638.cc
@@ -1,120 +1,119 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/Sphericity.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/UnstableParticles.hh"
namespace Rivet {
/// @brief Lambda and Lambda bar dists
class DELPHI_1993_I360638 : public Analysis {
public:
/// Constructor
DEFAULT_RIVET_ANALYSIS_CTOR(DELPHI_1993_I360638);
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
// Initialise and register projections
const ChargedFinalState cfs;
declare(cfs, "FS");
declare(UnstableParticles(), "UFS");
declare(Sphericity(cfs), "Sphericity");
// Book histograms
- _h_x = bookHisto1D(1, 1, 1);
- _h_rap = bookHisto1D(3, 1, 1);
- _h_cos = bookHisto1D(4, 1, 1);
- _m_single = bookHisto1D(2, 1, 1);
- _m_like = bookHisto1D(5, 1, 1);
- _m_opposite = bookHisto1D(6, 1, 1);
+ book(_h_x , 1, 1, 1);
+ book(_h_rap , 3, 1, 1);
+ book(_h_cos , 4, 1, 1);
+ book(_m_single , 2, 1, 1);
+ book(_m_like , 5, 1, 1);
+ book(_m_opposite, 6, 1, 1);
}
/// Perform the per-event analysis
void analyze(const Event& event) {
// First, veto on leptonic events by requiring at least 4 charged FS particles
const FinalState& fs = apply<FinalState>(event, "FS");
const size_t numParticles = fs.particles().size();
// Even if we only generate hadronic events, we still need a cut on numCharged >= 2.
if (numParticles < 2) vetoEvent;
- const double weight = event.weight();
const UnstableParticles& ufs = apply<UnstableFinalState>(event, "UFS");
// lambda
Particles lambda = ufs.particles(Cuts::pid== PID::LAMBDA);
Particles lambdabar = ufs.particles(Cuts::pid==-PID::LAMBDA);
// multiplicities
- _m_single->fill(91.2,(lambda.size()+lambdabar.size())*weight);
+ _m_single->fill(91.2,(lambda.size()+lambdabar.size()));
if(lambda.empty()&&lambdabar.empty()) vetoEvent;
for(const Particle& p : lambda) {
double xP = 2.*p.p3().mod()/sqrtS();
- _h_x->fill(xP,weight);
+ _h_x->fill(xP);
}
for(const Particle& p : lambdabar) {
double xP = 2.*p.p3().mod()/sqrtS();
- _h_x->fill(xP,weight);
+ _h_x->fill(xP);
}
if(lambda.size()>=2) {
unsigned int npair=lambda.size()/2;
- _m_like->fill(91.2,weight*double(npair));
+ _m_like->fill(91.2,double(npair));
}
if(lambdabar.size()>=2) {
unsigned int npair=lambdabar.size()/2;
- _m_like->fill(91.2,weight*double(npair));
+ _m_like->fill(91.2,double(npair));
}
if(lambda.size()==0 || lambdabar.size()==0)
return;
- _m_opposite->fill(91.2,weight*double(max(lambda.size(),lambdabar.size())));
+ _m_opposite->fill(91.2,double(max(lambda.size(),lambdabar.size())));
const Sphericity& sphericity = apply<Sphericity>(event, "Sphericity");
for(const Particle & p : lambda) {
const Vector3 momP = p.p3();
const double enP = p.E();
const double modP = dot(sphericity.sphericityAxis(), momP);
const double rapP = 0.5 * std::log((enP + modP) / (enP - modP));
for(const Particle & pb : lambdabar) {
const Vector3 momB = pb.p3();
const double enB = pb.E();
const double modB = dot(sphericity.sphericityAxis(), momB);
const double rapB = 0.5 * std::log((enB + modB) / (enB - modB));
- _h_rap->fill(abs(rapP-rapB),weight);
- _h_cos->fill(momP.unit().dot(momB.unit()),weight);
+ _h_rap->fill(abs(rapP-rapB));
+ _h_cos->fill(momP.unit().dot(momB.unit()));
}
}
}
/// Normalise histograms etc., after the run
void finalize() {
scale( _h_x , 1./sumOfWeights());
scale( _h_rap , 1./sumOfWeights());
scale( _h_cos , 1./sumOfWeights());
scale( _m_single , 1./sumOfWeights());
scale( _m_like , 1./sumOfWeights());
scale( _m_opposite, 1./sumOfWeights());
}
//@}
/// @name Histograms
//@{
Histo1DPtr _h_x, _h_rap ,_h_cos;
Histo1DPtr _m_single, _m_like, _m_opposite;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(DELPHI_1993_I360638);
}
diff --git a/analyses/pluginLEP/DELPHI_1995_I377487.cc b/analyses/pluginLEP/DELPHI_1995_I377487.cc
--- a/analyses/pluginLEP/DELPHI_1995_I377487.cc
+++ b/analyses/pluginLEP/DELPHI_1995_I377487.cc
@@ -1,100 +1,97 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/UnstableParticles.hh"
#include "Rivet/Projections/Beam.hh"
namespace Rivet {
/// @brief Add a short analysis description here
class DELPHI_1995_I377487 : public Analysis {
public:
/// Constructor
DEFAULT_RIVET_ANALYSIS_CTOR(DELPHI_1995_I377487);
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
declare(Beam(), "Beams");
declare(ChargedFinalState(), "FS");
declare(UnstableParticles(), "UFS");
// Book histograms
- _h_K0_x = bookHisto1D( 8, 1, 1);
- _h_K0_xi = bookHisto1D( 9, 1, 1);
- _h_Ks_x = bookHisto1D(10, 1, 1);
+ book(_h_K0_x , 8, 1, 1);
+ book(_h_K0_xi, 9, 1, 1);
+ book(_h_Ks_x ,10, 1, 1);
}
/// Perform the per-event analysis
void analyze(const Event& event) {
// First, veto on leptonic events by requiring at least 4 charged FS particles
const FinalState& fs = apply<FinalState>(event, "FS");
const size_t numParticles = fs.particles().size();
// Even if we only generate hadronic events, we still need a cut on numCharged >= 2.
if (numParticles < 2) {
MSG_DEBUG("Failed leptonic event cut");
vetoEvent;
}
MSG_DEBUG("Passed leptonic event cut");
- // Get event weight for histo filling
- const double weight = event.weight();
-
// Get beams and average beam momentum
const ParticlePair& beams = apply<Beam>(event, "Beams").beams();
const double meanBeamMom = ( beams.first.p3().mod() +
beams.second.p3().mod() ) / 2.0;
MSG_DEBUG("Avg beam momentum = " << meanBeamMom);
// Final state of unstable particles to get particle spectra
const UnstableParticles& ufs = apply<UnstableFinalState>(event, "UFS");
- foreach (const Particle& p, ufs.particles(Cuts::pid==130 or
+ for (const Particle& p : ufs.particles(Cuts::pid==130 or
Cuts::pid==310 or
Cuts::abspid==323)) {
double xp = p.p3().mod()/meanBeamMom;
- if(abs(p.pdgId())==323)
- _h_Ks_x->fill(xp,weight);
+ if(abs(p.pid())==323)
+ _h_Ks_x->fill(xp);
else {
- _h_K0_x->fill(xp,weight);
- _h_K0_xi->fill(-log(xp),weight);
+ _h_K0_x->fill(xp);
+ _h_K0_xi->fill(-log(xp));
}
}
}
/// Normalise histograms etc., after the run
void finalize() {
scale(_h_K0_x, 1./sumOfWeights());
scale(_h_K0_xi, 1./sumOfWeights());
scale(_h_Ks_x, 1./sumOfWeights());
}
//@}
/// @name Histograms
//@{
Histo1DPtr _h_K0_x,_h_K0_xi,_h_Ks_x;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(DELPHI_1995_I377487);
}
diff --git a/analyses/pluginLEP/DELPHI_1995_I394052.cc b/analyses/pluginLEP/DELPHI_1995_I394052.cc
--- a/analyses/pluginLEP/DELPHI_1995_I394052.cc
+++ b/analyses/pluginLEP/DELPHI_1995_I394052.cc
@@ -1,96 +1,93 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/Beam.hh"
namespace Rivet {
/// @brief K+/- and protno spectra
class DELPHI_1995_I394052 : public Analysis {
public:
/// Constructor
DEFAULT_RIVET_ANALYSIS_CTOR(DELPHI_1995_I394052);
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
declare(Beam(), "Beams");
declare(ChargedFinalState(), "FS");
// Book histograms
- _h_kaon_p = bookHisto1D(3, 1, 1);
- _h_kaon_x = bookHisto1D(5, 1, 1);
- _h_proton_p = bookHisto1D(4, 1, 1);
- _h_proton_x = bookHisto1D(6, 1, 1);
+ book(_h_kaon_p , 3, 1, 1);
+ book(_h_kaon_x , 5, 1, 1);
+ book(_h_proton_p , 4, 1, 1);
+ book(_h_proton_x , 6, 1, 1);
}
/// Perform the per-event analysis
void analyze(const Event& event) {
// First, veto on leptonic events by requiring at least 4 charged FS particles
const FinalState& fs = apply<FinalState>(event, "FS");
const size_t numParticles = fs.particles().size();
// Even if we only generate hadronic events, we still need a cut on numCharged >= 2.
if (numParticles < 2) {
MSG_DEBUG("Failed leptonic event cut");
vetoEvent;
}
MSG_DEBUG("Passed leptonic event cut");
- // Get event weight for histo filling
- const double weight = event.weight();
-
// Get beams and average beam momentum
const ParticlePair& beams = apply<Beam>(event, "Beams").beams();
const double meanBeamMom = ( beams.first.p3().mod() +
beams.second.p3().mod() ) / 2.0;
MSG_DEBUG("Avg beam momentum = " << meanBeamMom);
- foreach (const Particle& p, fs.particles(Cuts::abspid==321 or
+ for (const Particle& p : fs.particles(Cuts::abspid==321 or
Cuts::abspid==2212)) {
double modp = p.p3().mod();
double xp = modp/meanBeamMom;
- if(abs(p.pdgId())==321) {
- _h_kaon_p ->fill(modp,weight);
- _h_kaon_x ->fill(xp ,weight);
+ if(abs(p.pid())==321) {
+ _h_kaon_p ->fill(modp);
+ _h_kaon_x ->fill(xp );
}
else {
- _h_proton_p->fill(modp,weight);
- _h_proton_x->fill(xp ,weight);
+ _h_proton_p->fill(modp);
+ _h_proton_x->fill(xp );
}
}
}
/// Normalise histograms etc., after the run
void finalize() {
scale(_h_kaon_p ,1./sumOfWeights());
scale(_h_kaon_x ,1./sumOfWeights());
scale(_h_proton_p,1./sumOfWeights());
scale(_h_proton_x,1./sumOfWeights());
}
//@}
/// @name Histograms
//@{
Histo1DPtr _h_kaon_p, _h_kaon_x, _h_proton_p, _h_proton_x;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(DELPHI_1995_I394052);
}
diff --git a/analyses/pluginLEP/DELPHI_1995_I399737.cc b/analyses/pluginLEP/DELPHI_1995_I399737.cc
--- a/analyses/pluginLEP/DELPHI_1995_I399737.cc
+++ b/analyses/pluginLEP/DELPHI_1995_I399737.cc
@@ -1,90 +1,87 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/FastJets.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/UnstableParticles.hh"
#include "Rivet/Projections/Beam.hh"
namespace Rivet {
/// @brief Delta++ spectrum
class DELPHI_1995_I399737 : public Analysis {
public:
/// Constructor
DEFAULT_RIVET_ANALYSIS_CTOR(DELPHI_1995_I399737);
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
declare(Beam(), "Beams");
declare(ChargedFinalState(), "FS");
declare(UnstableParticles(), "UFS");
// Book histograms
- _h_delta = bookHisto1D(1, 1, 1);
+ book(_h_delta, 1, 1, 1);
}
/// Perform the per-event analysis
void analyze(const Event& event) {
// First, veto on leptonic events by requiring at least 4 charged FS particles
const FinalState& fs = apply<FinalState>(event, "FS");
const size_t numParticles = fs.particles().size();
// Even if we only generate hadronic events, we still need a cut on numCharged >= 2.
if (numParticles < 2) {
MSG_DEBUG("Failed leptonic event cut");
vetoEvent;
}
MSG_DEBUG("Passed leptonic event cut");
- // Get event weight for histo filling
- const double weight = event.weight();
-
// Get beams and average beam momentum
const ParticlePair& beams = apply<Beam>(event, "Beams").beams();
const double meanBeamMom = ( beams.first.p3().mod() +
beams.second.p3().mod() ) / 2.0;
MSG_DEBUG("Avg beam momentum = " << meanBeamMom);
// Final state of unstable particles to get particle spectra
const UnstableParticles& ufs = apply<UnstableFinalState>(event, "UFS");
- foreach (const Particle& p, ufs.particles(Cuts::abspid==2224)) {
+ for (const Particle& p : ufs.particles(Cuts::abspid==2224)) {
double xp = p.p3().mod()/meanBeamMom;
- _h_delta->fill(xp, weight);
+ _h_delta->fill(xp);
}
}
/// Normalise histograms etc., after the run
void finalize() {
scale(_h_delta, 1./sumOfWeights());
}
//@}
/// @name Histograms
//@{
Histo1DPtr _h_delta;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(DELPHI_1995_I399737);
}
diff --git a/analyses/pluginLEP/DELPHI_1996_I401100.cc b/analyses/pluginLEP/DELPHI_1996_I401100.cc
--- a/analyses/pluginLEP/DELPHI_1996_I401100.cc
+++ b/analyses/pluginLEP/DELPHI_1996_I401100.cc
@@ -1,121 +1,118 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/Beam.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/InitialQuarks.hh"
#include "Rivet/Projections/UnstableParticles.hh"
namespace Rivet {
/// @brief pi0 spectrum
class DELPHI_1996_I401100 : public Analysis {
public:
/// Constructor
DEFAULT_RIVET_ANALYSIS_CTOR(DELPHI_1996_I401100);
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
declare(Beam(), "Beams");
declare(ChargedFinalState(), "FS");
declare(UnstableParticles(), "UFS");
declare(InitialQuarks(), "IQF");
// Book histograms
- _h_pi_all = bookHisto1D(1, 1, 1);
- _h_pi_bot = bookHisto1D(3, 1, 1);
+ book(_h_pi_all, 1, 1, 1);
+ book(_h_pi_bot, 3, 1, 1);
- _wAll=0.;
- _wBot=0.;
+ book(_wAll,"TMP/wAll");
+ book(_wBot,"TMP/wBot");
}
/// Perform the per-event analysis
void analyze(const Event& event) {
// First, veto on leptonic events by requiring at least 4 charged FS particles
const FinalState& fs = apply<FinalState>(event, "FS");
const size_t numParticles = fs.particles().size();
// Even if we only generate hadronic events, we still need a cut on numCharged >= 2.
if (numParticles < 2) {
MSG_DEBUG("Failed leptonic event cut");
vetoEvent;
}
MSG_DEBUG("Passed leptonic event cut");
- // Get event weight for histo filling
- const double weight = event.weight();
-
int flavour = 0;
const InitialQuarks& iqf = apply<InitialQuarks>(event, "IQF");
// If we only have two quarks (qqbar), just take the flavour.
// If we have more than two quarks, look for the highest energetic q-qbar pair.
if (iqf.particles().size() == 2) {
flavour = iqf.particles().front().abspid();
}
else {
map<int, double> quarkmap;
- foreach (const Particle& p, iqf.particles()) {
+ for (const Particle& p : iqf.particles()) {
if (quarkmap[p.pid()] < p.E()) {
quarkmap[p.pid()] = p.E();
}
}
double maxenergy = 0.;
for (int i = 1; i <= 5; ++i) {
if (quarkmap[i]+quarkmap[-i] > maxenergy) {
flavour = i;
}
}
}
- _wAll += weight;
- if(flavour==5) _wBot += weight;
+ _wAll->fill();
+ if(flavour==5) _wBot->fill();
// Get beams and average beam momentum
const ParticlePair& beams = apply<Beam>(event, "Beams").beams();
const double meanBeamMom = ( beams.first.p3().mod() +
beams.second.p3().mod() ) / 2.0;
MSG_DEBUG("Avg beam momentum = " << meanBeamMom);
// Final state of unstable particles to get particle spectra
const UnstableParticles& ufs = apply<UnstableFinalState>(event, "UFS");
- foreach (const Particle& p, ufs.particles(Cuts::pid==PID::PI0)) {
+ for (const Particle& p : ufs.particles(Cuts::pid==PID::PI0)) {
double xp = p.p3().mod()/meanBeamMom;
- _h_pi_all->fill(xp,weight);
- if(flavour==5) _h_pi_bot->fill(xp,weight);
+ _h_pi_all->fill(xp);
+ if(flavour==5) _h_pi_bot->fill(xp);
}
}
/// Normalise histograms etc., after the run
void finalize() {
- scale(_h_pi_all, 1./_wAll);
- scale(_h_pi_bot, 1./_wBot);
+ scale(_h_pi_all, 1./ *_wAll);
+ scale(_h_pi_bot, 1./ *_wBot);
}
//@}
/// @name Histograms
//@{
Histo1DPtr _h_pi_all, _h_pi_bot;
- double _wAll,_wBot;
+ CounterPtr _wAll,_wBot;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(DELPHI_1996_I401100);
}
diff --git a/analyses/pluginLEP/DELPHI_1996_I416741.cc b/analyses/pluginLEP/DELPHI_1996_I416741.cc
--- a/analyses/pluginLEP/DELPHI_1996_I416741.cc
+++ b/analyses/pluginLEP/DELPHI_1996_I416741.cc
@@ -1,88 +1,85 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/UnstableParticles.hh"
#include "Rivet/Projections/Beam.hh"
namespace Rivet {
/// @brief f'2 spectrum
class DELPHI_1996_I416741 : public Analysis {
public:
/// Constructor
DEFAULT_RIVET_ANALYSIS_CTOR(DELPHI_1996_I416741);
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
declare(Beam(), "Beams");
declare(ChargedFinalState(), "FS");
declare(UnstableParticles(), "UFS");
// Book histograms
- _h_f2prime = bookHisto1D(1, 1, 1);
+ book(_h_f2prime, 1, 1, 1);
}
/// Perform the per-event analysis
void analyze(const Event& event) {
// First, veto on leptonic events by requiring at least 4 charged FS particles
const FinalState& fs = apply<FinalState>(event, "FS");
const size_t numParticles = fs.particles().size();
// Even if we only generate hadronic events, we still need a cut on numCharged >= 2.
if (numParticles < 2) {
MSG_DEBUG("Failed leptonic event cut");
vetoEvent;
}
MSG_DEBUG("Passed leptonic event cut");
- // Get event weight for histo filling
- const double weight = event.weight();
-
// Get beams and average beam momentum
const ParticlePair& beams = apply<Beam>(event, "Beams").beams();
const double meanBeamMom = ( beams.first.p3().mod() +
beams.second.p3().mod() ) / 2.0;
MSG_DEBUG("Avg beam momentum = " << meanBeamMom);
// Final state of unstable particles to get particle spectra
const UnstableParticles& ufs = apply<UnstableFinalState>(event, "UFS");
- foreach (const Particle& p, ufs.particles(Cuts::abspid==335)) {
+ for (const Particle& p : ufs.particles(Cuts::abspid==335)) {
double xp = p.p3().mod()/meanBeamMom;
- _h_f2prime->fill(xp, weight);
+ _h_f2prime->fill(xp);
}
}
/// Normalise histograms etc., after the run
void finalize() {
scale(_h_f2prime, 1./sumOfWeights());
}
//@}
/// @name Histograms
//@{
Histo1DPtr _h_f2prime;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(DELPHI_1996_I416741);
}
diff --git a/analyses/pluginLEP/DELPHI_1996_I420528.cc b/analyses/pluginLEP/DELPHI_1996_I420528.cc
--- a/analyses/pluginLEP/DELPHI_1996_I420528.cc
+++ b/analyses/pluginLEP/DELPHI_1996_I420528.cc
@@ -1,97 +1,94 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/Beam.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/UnstableParticles.hh"
namespace Rivet {
/// @brief K*0, Phi and production
class DELPHI_1996_I420528 : public Analysis {
public:
/// Constructor
DEFAULT_RIVET_ANALYSIS_CTOR(DELPHI_1996_I420528);
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
declare(Beam(), "Beams");
declare(ChargedFinalState(), "FS");
declare(UnstableParticles(), "UFS");
// Book histograms
- _h_Kstar = bookHisto1D(1, 1, 1);
- _h_phi = bookHisto1D(3, 1, 1);
+ book(_h_Kstar,1, 1, 1);
+ book(_h_phi ,3, 1, 1);
}
/// Perform the per-event analysis
void analyze(const Event& event) {
// First, veto on leptonic events by requiring at least 4 charged FS particles
const FinalState& fs = apply<FinalState>(event, "FS");
const size_t numParticles = fs.particles().size();
// Even if we only generate hadronic events, we still need a cut on numCharged >= 2.
if (numParticles < 2) {
MSG_DEBUG("Failed leptonic event cut");
vetoEvent;
}
MSG_DEBUG("Passed leptonic event cut");
- // Get event weight for histo filling
- const double weight = event.weight();
-
// Get beams and average beam momentum
const ParticlePair& beams = apply<Beam>(event, "Beams").beams();
const double meanBeamMom = ( beams.first.p3().mod() +
beams.second.p3().mod() ) / 2.0;
MSG_DEBUG("Avg beam momentum = " << meanBeamMom);
// Final state of unstable particles to get particle spectra
const UnstableParticles& ufs = apply<UnstableFinalState>(event, "UFS");
- foreach (const Particle& p, ufs.particles(Cuts::abspid==313 or Cuts::pid==333)) {
+ for (const Particle& p : ufs.particles(Cuts::abspid==313 or Cuts::pid==333)) {
const int id = p.abspid();
double xp = p.p3().mod()/meanBeamMom;
switch (id) {
case 333:
- _h_phi->fill(xp, weight);
+ _h_phi->fill(xp);
break;
case 313:
- _h_Kstar->fill(xp, weight);
+ _h_Kstar->fill(xp);
break;
}
}
}
/// Normalise histograms etc., after the run
void finalize() {
scale(_h_Kstar , 1./sumOfWeights());
scale(_h_phi , 1./sumOfWeights());
}
//@}
/// @name Histograms
//@{
Histo1DPtr _h_Kstar, _h_phi;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(DELPHI_1996_I420528);
}
diff --git a/analyses/pluginLEP/DELPHI_1997_I428178.cc b/analyses/pluginLEP/DELPHI_1997_I428178.cc
--- a/analyses/pluginLEP/DELPHI_1997_I428178.cc
+++ b/analyses/pluginLEP/DELPHI_1997_I428178.cc
@@ -1,117 +1,116 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/InitialQuarks.hh"
namespace Rivet {
/// @brief charged particle spectra
class DELPHI_1997_I428178 : public Analysis {
public:
/// Constructor
DEFAULT_RIVET_ANALYSIS_CTOR(DELPHI_1997_I428178);
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
declare(Beam(), "Beams");
declare(ChargedFinalState(), "FS");
declare(InitialQuarks(), "IQF");
// Book histograms
- _h_bottom = bookHisto1D(1, 1, 1);
- _h_charm = bookHisto1D(1, 1, 2);
- _h_light = bookHisto1D(1, 1, 3);
+ book(_h_bottom, 1, 1, 1);
+ book(_h_charm , 1, 1, 2);
+ book(_h_light , 1, 1, 3);
- _wBottom = _wCharm = _wLight = 0.;
+ book(_wBottom,"TMP/wBottom");
+ book(_wCharm ,"TMP/wCharm" );
+ book(_wLight ,"TMP/wLight" );
}
/// Perform the per-event analysis
void analyze(const Event& event) {
// First, veto on leptonic events by requiring at least 4 charged FS particles
const FinalState& fs = apply<FinalState>(event, "FS");
const size_t numParticles = fs.particles().size();
// Even if we only generate hadronic events, we still need a cut on numCharged >= 2.
if (numParticles < 2) {
MSG_DEBUG("Failed leptonic event cut");
vetoEvent;
}
MSG_DEBUG("Passed leptonic event cut");
- // Get event weight for histo filling
- const double weight = event.weight();
-
int flavour = 0;
const InitialQuarks& iqf = apply<InitialQuarks>(event, "IQF");
// If we only have two quarks (qqbar), just take the flavour.
// If we have more than two quarks, look for the highest energetic q-qbar pair.
if (iqf.particles().size() == 2) {
flavour = iqf.particles().front().abspid();
}
else {
map<int, double> quarkmap;
- foreach (const Particle& p, iqf.particles()) {
+ for (const Particle& p : iqf.particles()) {
if (quarkmap[p.pid()] < p.E()) {
quarkmap[p.pid()] = p.E();
}
}
double maxenergy = 0.;
for (int i = 1; i <= 5; ++i) {
if (quarkmap[i]+quarkmap[-i] > maxenergy) {
flavour = i;
}
}
}
- if (flavour==5) _wBottom += weight;
- else if(flavour==4) _wCharm += weight;
- else _wLight += weight;
+ if (flavour==5) _wBottom->fill();
+ else if(flavour==4) _wCharm ->fill();
+ else _wLight ->fill();
// Get beams and average beam momentum
const ParticlePair& beams = apply<Beam>(event, "Beams").beams();
const double meanBeamMom = ( beams.first.p3().mod() +
beams.second.p3().mod() ) / 2.0;
MSG_DEBUG("Avg beam momentum = " << meanBeamMom);
- foreach (const Particle& p, fs.particles()) {
+ for (const Particle& p : fs.particles()) {
double xp = p.p3().mod()/meanBeamMom;
- if (flavour==5) _h_bottom->fill(xp,weight);
- else if(flavour==4) _h_charm ->fill(xp,weight);
- else _h_light ->fill(xp,weight);
+ if (flavour==5) _h_bottom->fill(xp);
+ else if(flavour==4) _h_charm ->fill(xp);
+ else _h_light ->fill(xp);
}
}
/// Normalise histograms etc., after the run
void finalize() {
- scale(_h_bottom, 1./_wBottom);
- scale(_h_charm , 1./_wCharm);
- scale(_h_light , 1./_wLight);
+ scale(_h_bottom, 1./ *_wBottom);
+ scale(_h_charm , 1./ *_wCharm);
+ scale(_h_light , 1./ *_wLight);
}
//@}
/// @name Histograms
//@{
Histo1DPtr _h_bottom, _h_charm, _h_light;
- double _wBottom, _wCharm, _wLight;
+ CounterPtr _wBottom, _wCharm, _wLight;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(DELPHI_1997_I428178);
}
diff --git a/analyses/pluginLEP/DELPHI_1998_I473409.cc b/analyses/pluginLEP/DELPHI_1998_I473409.cc
--- a/analyses/pluginLEP/DELPHI_1998_I473409.cc
+++ b/analyses/pluginLEP/DELPHI_1998_I473409.cc
@@ -1,324 +1,338 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/Beam.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/InitialQuarks.hh"
namespace Rivet {
/// @brief flavour seperate pi,K,p spectra
class DELPHI_1998_I473409 : public Analysis {
public:
/// Constructor
DEFAULT_RIVET_ANALYSIS_CTOR(DELPHI_1998_I473409);
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
// Initialise and register projections
declare(Beam(), "Beams");
declare(ChargedFinalState(), "FS");
- declare(InitialQuarks(), "IQF");
+ declare(InitialQuarks(), "IQF");
+ // Book histograms
+ book(_h_all_pi, "TMP/h_all_pi",refData( 4,1,1));
+ book(_h_all_K , "TMP/h_all_K ",refData( 5,1,1));
+ book(_h_all_p , "TMP/h_all_p ",refData( 6,1,1));
+ book(_h_all_Kp, "TMP/h_all_Kp",refData( 7,1,1));
+ book(_d_all , "TMP/d_all ",refData( 4,1,1));
+
+ book(_h_bot_pi, "TMP/h_bot_pi",refData( 8,1,1));
+ book(_h_bot_K , "TMP/h_bot_K ",refData( 9,1,1));
+ book(_h_bot_p , "TMP/h_bot_p ",refData(10,1,1));
+ book(_h_bot_Kp, "TMP/h_bot_Kp",refData(11,1,1));
+ book(_d_bot , "TMP/d_bot ",refData( 8,1,1));
+
+ book(_h_lgt_pi, "TMP/h_lgt_pi",refData(12,1,1));
+ book(_h_lgt_K , "TMP/h_lgt_K ",refData(13,1,1));
+ book(_h_lgt_p , "TMP/h_lgt_p ",refData(14,1,1));
+ book(_h_lgt_Kp, "TMP/h_lgt_Kp",refData(15,1,1));
+ book(_d_lgt , "TMP/d_lgt ",refData(12,1,1));
- // Book histograms
- _h_all_pi = std::make_shared<YODA::Histo1D>(Histo1D(refData( 4,1,1)));
- _h_all_K = std::make_shared<YODA::Histo1D>(Histo1D(refData( 5,1,1)));
- _h_all_p = std::make_shared<YODA::Histo1D>(Histo1D(refData( 6,1,1)));
- _h_all_Kp = std::make_shared<YODA::Histo1D>(Histo1D(refData( 7,1,1)));
- _d_all = std::make_shared<YODA::Histo1D>(Histo1D(refData( 4,1,1)));
-
- _h_bot_pi = std::make_shared<YODA::Histo1D>(Histo1D(refData( 8,1,1)));
- _h_bot_K = std::make_shared<YODA::Histo1D>(Histo1D(refData( 9,1,1)));
- _h_bot_p = std::make_shared<YODA::Histo1D>(Histo1D(refData(10,1,1)));
- _h_bot_Kp = std::make_shared<YODA::Histo1D>(Histo1D(refData(11,1,1)));
- _d_bot = std::make_shared<YODA::Histo1D>(Histo1D(refData( 8,1,1)));
-
- _h_lgt_pi = std::make_shared<YODA::Histo1D>(Histo1D(refData(12,1,1)));
- _h_lgt_K = std::make_shared<YODA::Histo1D>(Histo1D(refData(13,1,1)));
- _h_lgt_p = std::make_shared<YODA::Histo1D>(Histo1D(refData(14,1,1)));
- _h_lgt_Kp = std::make_shared<YODA::Histo1D>(Histo1D(refData(15,1,1)));
- _d_lgt = std::make_shared<YODA::Histo1D>(Histo1D(refData(12,1,1)));
-
- _h_all_ch_p = bookHisto1D(16,1,1);
- _h_all_ch_x = bookHisto1D(17,1,1);
- _h_all_pi_p = bookHisto1D(18,1,1);
- _h_all_pi_x = bookHisto1D(19,1,1);
- _h_all_K_p = bookHisto1D(20,1,1);
- _h_all_k_x = bookHisto1D(21,1,1);
- _h_all_p_p = bookHisto1D(22,1,1);
- _h_all_p_x = bookHisto1D(23,1,1);
-
- _h_bot_ch_p = bookHisto1D(24,1,1);
- _h_bot_ch_x = bookHisto1D(25,1,1);
- _h_bot_pi_p = bookHisto1D(26,1,1);
- _h_bot_pi_x = bookHisto1D(27,1,1);
- _h_bot_K_p = bookHisto1D(28,1,1);
- _h_bot_k_x = bookHisto1D(29,1,1);
- _h_bot_p_p = bookHisto1D(30,1,1);
- _h_bot_p_x = bookHisto1D(31,1,1);
-
- _h_lgt_ch_p = bookHisto1D(32,1,1);
- _h_lgt_ch_x = bookHisto1D(33,1,1);
- _h_lgt_pi_p = bookHisto1D(34,1,1);
- _h_lgt_pi_x = bookHisto1D(35,1,1);
- _h_lgt_K_p = bookHisto1D(36,1,1);
- _h_lgt_k_x = bookHisto1D(37,1,1);
- _h_lgt_p_p = bookHisto1D(38,1,1);
- _h_lgt_p_x = bookHisto1D(39,1,1);
+ book(_h_all_ch_p, 16,1,1);
+ book(_h_all_ch_x, 17,1,1);
+ book(_h_all_pi_p, 18,1,1);
+ book(_h_all_pi_x, 19,1,1);
+ book(_h_all_K_p , 20,1,1);
+ book(_h_all_k_x , 21,1,1);
+ book(_h_all_p_p , 22,1,1);
+ book(_h_all_p_x , 23,1,1);
+
+ book(_h_bot_ch_p, 24,1,1);
+ book(_h_bot_ch_x, 25,1,1);
+ book(_h_bot_pi_p, 26,1,1);
+ book(_h_bot_pi_x, 27,1,1);
+ book(_h_bot_K_p , 28,1,1);
+ book(_h_bot_k_x , 29,1,1);
+ book(_h_bot_p_p , 30,1,1);
+ book(_h_bot_p_x , 31,1,1);
+
+ book(_h_lgt_ch_p, 32,1,1);
+ book(_h_lgt_ch_x, 33,1,1);
+ book(_h_lgt_pi_p, 34,1,1);
+ book(_h_lgt_pi_x, 35,1,1);
+ book(_h_lgt_K_p , 36,1,1);
+ book(_h_lgt_k_x , 37,1,1);
+ book(_h_lgt_p_p , 38,1,1);
+ book(_h_lgt_p_x , 39,1,1);
for(unsigned int ix=0;ix<3;++ix) {
for(unsigned int iy=0;iy<5;++iy) {
- ostringstream title;
+ std::ostringstream title;
title << "/TMP/MULT_" << ix << "_" << iy;
- _mult[ix][iy] = bookCounter(title.str());
+ book(_mult[ix][iy],title.str());
}
}
- _wLgt = _wBot = _wAll =0.;
+ book(_wLgt,"TMP/wLgt");
+ book(_wBot,"TMP/wBot");
+ book(_wAll,"TMP/wAll");
}
/// Perform the per-event analysis
void analyze(const Event& event) {
// First, veto on leptonic events by requiring at least 4 charged FS particles
const FinalState& fs = apply<ChargedFinalState>(event, "FS");
const size_t numParticles = fs.particles().size();
// Even if we only generate hadronic events, we still need a cut on numCharged >= 2.
if (numParticles < 2) {
MSG_DEBUG("Failed leptonic event cut");
vetoEvent;
}
MSG_DEBUG("Passed leptonic event cut");
int flavour = 0;
const InitialQuarks& iqf = apply<InitialQuarks>(event, "IQF");
// If we only have two quarks (qqbar), just take the flavour.
// If we have more than two quarks, look for the highest energetic q-qbar pair.
if (iqf.particles().size() == 2) {
flavour = iqf.particles().front().abspid();
}
else {
map<int, double> quarkmap;
- foreach (const Particle& p, iqf.particles()) {
+ for (const Particle& p : iqf.particles()) {
if (quarkmap[p.pid()] < p.E()) {
quarkmap[p.pid()] = p.E();
}
}
double maxenergy = 0.;
for (int i = 1; i <= 5; ++i) {
if (quarkmap[i]+quarkmap[-i] > maxenergy) {
flavour = i;
}
}
}
// Get event weight for histo filling
- const double weight = event.weight();
- _wAll += weight;
- if(flavour<=3) _wLgt += weight;
- else if(flavour==5) _wBot += weight;
+ _wAll->fill();
+ if(flavour<=3) _wLgt->fill();
+ else if(flavour==5) _wBot->fill();
// Get beams and average beam momentum
const ParticlePair& beams = apply<Beam>(event, "Beams").beams();
const double meanBeamMom = ( beams.first.p3().mod() +
beams.second.p3().mod() ) / 2.0;
MSG_DEBUG("Avg beam momentum = " << meanBeamMom);
// loop over the charged particles
- foreach (const Particle& p, fs.particles()) {
+ for (const Particle& p : fs.particles()) {
double modp = p.p3().mod();
double xp = modp/meanBeamMom;
- int id = abs(p.pdgId());
- _d_all->fill(modp,weight);
- _mult[0][0]->fill(weight);
- _h_all_ch_p->fill(modp,weight);
- _h_all_ch_x->fill(xp ,weight);
+ int id = abs(p.pid());
+ _d_all->fill(modp);
+ _mult[0][0]->fill();
+ _h_all_ch_p->fill(modp);
+ _h_all_ch_x->fill(xp );
if(flavour<=3) {
- _d_lgt->fill(modp,weight);
- _mult[2][0]->fill(weight);
- _h_lgt_ch_p->fill(modp,weight);
- _h_lgt_ch_x->fill(xp ,weight);
+ _d_lgt->fill(modp);
+ _mult[2][0]->fill();
+ _h_lgt_ch_p->fill(modp);
+ _h_lgt_ch_x->fill(xp );
}
else if(flavour==5) {
- _d_bot ->fill(modp,weight);
- _mult[1][0]->fill(weight);
- _h_bot_ch_p->fill(modp,weight);
- _h_bot_ch_x->fill(xp ,weight);
+ _d_bot ->fill(modp);
+ _mult[1][0]->fill();
+ _h_bot_ch_p->fill(modp);
+ _h_bot_ch_x->fill(xp );
}
if(id==211) {
- _h_all_pi ->fill(modp,weight);
- _mult[0][1]->fill(weight);
- _h_all_pi_p->fill(modp,weight);
- _h_all_pi_x->fill(xp ,weight);
+ _h_all_pi ->fill(modp);
+ _mult[0][1]->fill();
+ _h_all_pi_p->fill(modp);
+ _h_all_pi_x->fill(xp );
if(flavour<=3) {
- _h_lgt_pi ->fill(modp,weight);
- _mult[2][1]->fill(weight);
- _h_lgt_pi_p->fill(modp,weight);
- _h_lgt_pi_x->fill(xp ,weight);
+ _h_lgt_pi ->fill(modp);
+ _mult[2][1]->fill();
+ _h_lgt_pi_p->fill(modp);
+ _h_lgt_pi_x->fill(xp );
}
else if(flavour==5) {
- _h_bot_pi ->fill(modp,weight);
- _mult[1][1]->fill(weight);
- _h_bot_pi_p->fill(modp,weight);
- _h_bot_pi_x->fill(xp ,weight);
+ _h_bot_pi ->fill(modp);
+ _mult[1][1]->fill();
+ _h_bot_pi_p->fill(modp);
+ _h_bot_pi_x->fill(xp );
}
}
else if(id==321) {
- _h_all_K ->fill(modp,weight);
- _h_all_Kp->fill(modp,weight);
- _mult[0][2]->fill(weight);
- _mult[0][4]->fill(weight);
- _h_all_K_p ->fill(modp,weight);
- _h_all_k_x ->fill(xp ,weight);
+ _h_all_K ->fill(modp);
+ _h_all_Kp->fill(modp);
+ _mult[0][2]->fill();
+ _mult[0][4]->fill();
+ _h_all_K_p ->fill(modp);
+ _h_all_k_x ->fill(xp );
if(flavour<=3) {
- _h_lgt_K->fill(modp,weight);
- _h_lgt_Kp->fill(modp,weight);
- _mult[2][2]->fill(weight);
- _mult[2][4]->fill(weight);
- _h_lgt_K_p ->fill(modp,weight);
- _h_lgt_k_x ->fill(xp ,weight);
+ _h_lgt_K->fill(modp);
+ _h_lgt_Kp->fill(modp);
+ _mult[2][2]->fill();
+ _mult[2][4]->fill();
+ _h_lgt_K_p ->fill(modp);
+ _h_lgt_k_x ->fill(xp );
}
else if(flavour==5) {
- _h_bot_K ->fill(modp,weight);
- _h_bot_Kp->fill(modp,weight);
- _mult[1][2]->fill(weight);
- _mult[1][4]->fill(weight);
- _h_bot_K_p ->fill(modp,weight);
- _h_bot_k_x ->fill(xp ,weight);
+ _h_bot_K ->fill(modp);
+ _h_bot_Kp->fill(modp);
+ _mult[1][2]->fill();
+ _mult[1][4]->fill();
+ _h_bot_K_p ->fill(modp);
+ _h_bot_k_x ->fill(xp );
}
}
else if(id==2212) {
- _h_all_p ->fill(modp,weight);
- _h_all_Kp->fill(modp,weight);
- _mult[0][3]->fill(weight);
- _mult[0][4]->fill(weight);
- _h_all_p_p ->fill(modp,weight);
- _h_all_p_x ->fill(xp ,weight);
+ _h_all_p ->fill(modp);
+ _h_all_Kp->fill(modp);
+ _mult[0][3]->fill();
+ _mult[0][4]->fill();
+ _h_all_p_p ->fill(modp);
+ _h_all_p_x ->fill(xp );
if(flavour<=3) {
- _h_lgt_p ->fill(modp,weight);
- _h_lgt_Kp->fill(modp,weight);
- _mult[2][3]->fill(weight);
- _mult[2][4]->fill(weight);
- _h_lgt_p_p ->fill(modp,weight);
- _h_lgt_p_x ->fill(xp ,weight);
+ _h_lgt_p ->fill(modp);
+ _h_lgt_Kp->fill(modp);
+ _mult[2][3]->fill();
+ _mult[2][4]->fill();
+ _h_lgt_p_p ->fill(modp);
+ _h_lgt_p_x ->fill(xp );
}
else if(flavour==5) {
- _h_bot_p ->fill(modp,weight);
- _h_bot_Kp->fill(modp,weight);
- _mult[1][3]->fill(weight);
- _mult[1][4]->fill(weight);
- _h_bot_p_p ->fill(modp,weight);
- _h_bot_p_x ->fill(xp ,weight);
+ _h_bot_p ->fill(modp);
+ _h_bot_Kp->fill(modp);
+ _mult[1][3]->fill();
+ _mult[1][4]->fill();
+ _h_bot_p_p ->fill(modp);
+ _h_bot_p_x ->fill(xp );
}
}
}
}
/// Normalise histograms etc., after the run
void finalize() {
// // Book histograms
scale(_h_all_pi,100.);
scale(_h_all_K ,100.);
scale(_h_all_p ,100.);
scale(_h_all_Kp,100.);
- divide(_h_all_pi, _d_all, bookScatter2D( 4,1,1));
- divide(_h_all_K , _d_all, bookScatter2D( 5,1,1));
- divide(_h_all_p , _d_all, bookScatter2D( 6,1,1));
- divide(_h_all_Kp, _d_all, bookScatter2D( 7,1,1));
+ Scatter2DPtr temp;
+ book(temp,4,1,1);
+ divide(_h_all_pi, _d_all, temp);
+ book(temp,5,1,1);
+ divide(_h_all_K , _d_all, temp);
+ book(temp,6,1,1);
+ divide(_h_all_p , _d_all, temp);
+ book(temp,7,1,1);
+ divide(_h_all_Kp, _d_all, temp);
scale(_h_bot_pi,100.);
scale(_h_bot_K ,100.);
scale(_h_bot_p ,100.);
scale(_h_bot_Kp,100.);
- divide(_h_bot_pi, _d_bot, bookScatter2D( 8,1,1));
- divide(_h_bot_K , _d_bot, bookScatter2D( 9,1,1));
- divide(_h_bot_p , _d_bot, bookScatter2D(10,1,1));
- divide(_h_bot_Kp, _d_bot, bookScatter2D(11,1,1));
+ book(temp, 8,1,1);
+ divide(_h_bot_pi, _d_bot, temp);
+ book(temp, 9,1,1);
+ divide(_h_bot_K , _d_bot, temp);
+ book(temp,10,1,1);
+ divide(_h_bot_p , _d_bot, temp);
+ book(temp,11,1,1);
+ divide(_h_bot_Kp, _d_bot, temp);
scale(_h_lgt_pi,100.);
scale(_h_lgt_K ,100.);
scale(_h_lgt_p ,100.);
scale(_h_lgt_Kp,100.);
- divide(_h_lgt_pi, _d_lgt, bookScatter2D(12,1,1));
- divide(_h_lgt_K , _d_lgt, bookScatter2D(13,1,1));
- divide(_h_lgt_p , _d_lgt, bookScatter2D(14,1,1));
- divide(_h_lgt_Kp, _d_lgt, bookScatter2D(15,1,1));
+ book(temp,12,1,1);
+ divide(_h_lgt_pi, _d_lgt, temp);
+ book(temp,13,1,1);
+ divide(_h_lgt_K , _d_lgt, temp);
+ book(temp,14,1,1);
+ divide(_h_lgt_p , _d_lgt, temp);
+ book(temp,15,1,1);
+ divide(_h_lgt_Kp, _d_lgt, temp);
- scale(_h_all_ch_p, 1./_wAll);
- scale(_h_all_ch_x, 1./_wAll);
- scale(_h_all_pi_p, 1./_wAll);
- scale(_h_all_pi_x, 1./_wAll);
- scale(_h_all_K_p , 1./_wAll);
- scale(_h_all_k_x , 1./_wAll);
- scale(_h_all_p_p , 1./_wAll);
- scale(_h_all_p_x , 1./_wAll);
+ scale(_h_all_ch_p, 1./ *_wAll);
+ scale(_h_all_ch_x, 1./ *_wAll);
+ scale(_h_all_pi_p, 1./ *_wAll);
+ scale(_h_all_pi_x, 1./ *_wAll);
+ scale(_h_all_K_p , 1./ *_wAll);
+ scale(_h_all_k_x , 1./ *_wAll);
+ scale(_h_all_p_p , 1./ *_wAll);
+ scale(_h_all_p_x , 1./ *_wAll);
- scale(_h_bot_ch_p, 1./_wBot);
- scale(_h_bot_ch_x, 1./_wBot);
- scale(_h_bot_pi_p, 1./_wBot);
- scale(_h_bot_pi_x, 1./_wBot);
- scale(_h_bot_K_p , 1./_wBot);
- scale(_h_bot_k_x , 1./_wBot);
- scale(_h_bot_p_p , 1./_wBot);
- scale(_h_bot_p_x , 1./_wBot);
+ scale(_h_bot_ch_p, 1./ *_wBot);
+ scale(_h_bot_ch_x, 1./ *_wBot);
+ scale(_h_bot_pi_p, 1./ *_wBot);
+ scale(_h_bot_pi_x, 1./ *_wBot);
+ scale(_h_bot_K_p , 1./ *_wBot);
+ scale(_h_bot_k_x , 1./ *_wBot);
+ scale(_h_bot_p_p , 1./ *_wBot);
+ scale(_h_bot_p_x , 1./ *_wBot);
- scale(_h_lgt_ch_p, 1./_wLgt);
- scale(_h_lgt_ch_x, 1./_wLgt);
- scale(_h_lgt_pi_p, 1./_wLgt);
- scale(_h_lgt_pi_x, 1./_wLgt);
- scale(_h_lgt_K_p , 1./_wLgt);
- scale(_h_lgt_k_x , 1./_wLgt);
- scale(_h_lgt_p_p , 1./_wLgt);
- scale(_h_lgt_p_x , 1./_wLgt);
+ scale(_h_lgt_ch_p, 1./ *_wLgt);
+ scale(_h_lgt_ch_x, 1./ *_wLgt);
+ scale(_h_lgt_pi_p, 1./ *_wLgt);
+ scale(_h_lgt_pi_x, 1./ *_wLgt);
+ scale(_h_lgt_K_p , 1./ *_wLgt);
+ scale(_h_lgt_k_x , 1./ *_wLgt);
+ scale(_h_lgt_p_p , 1./ *_wLgt);
+ scale(_h_lgt_p_x , 1./ *_wLgt);
// multiplicities
- vector<double> scales = {_wAll,_wBot,_wLgt};
+ vector<CounterPtr> scales = {_wAll,_wBot,_wLgt};
for(unsigned int ix=0;ix<3;++ix) {
- if(scales[ix]<=0.) continue;
+ if(scales[ix]->effNumEntries()<=0.) continue;
for(unsigned int iy=0;iy<5;++iy) {
- Scatter2DPtr scatter = bookScatter2D(ix+1, 1, iy+1, true);
- scale(_mult[ix][iy],1./scales[ix]);
+ Scatter2DPtr scatter;
+ book(scatter, ix+1, 1, iy+1, true);
+ scale(_mult[ix][iy],1./ *scales[ix]);
scatter->point(0).setY(_mult[ix][iy]->val(),_mult[ix][iy]->err());
}
}
}
//@}
/// @name Histograms
//@{
Histo1DPtr _h_all_pi , _h_all_K , _h_all_p , _h_all_Kp , _d_all;
Histo1DPtr _h_bot_pi , _h_bot_K , _h_bot_p , _h_bot_Kp , _d_bot;
Histo1DPtr _h_lgt_pi , _h_lgt_K , _h_lgt_p , _h_lgt_Kp , _d_lgt;
Histo1DPtr _h_all_ch_p, _h_all_ch_x , _h_all_pi_p , _h_all_pi_x ;
Histo1DPtr _h_all_K_p , _h_all_k_x , _h_all_p_p , _h_all_p_x ;
Histo1DPtr _h_bot_ch_p , _h_bot_ch_x , _h_bot_pi_p , _h_bot_pi_x;
Histo1DPtr _h_bot_K_p , _h_bot_k_x , _h_bot_p_p , _h_bot_p_x ;
Histo1DPtr _h_lgt_ch_p , _h_lgt_ch_x , _h_lgt_pi_p , _h_lgt_pi_x;
Histo1DPtr _h_lgt_K_p , _h_lgt_k_x , _h_lgt_p_p , _h_lgt_p_x ;
CounterPtr _mult[3][5];
- double _wLgt, _wBot, _wAll;
+ CounterPtr _wLgt, _wBot, _wAll;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(DELPHI_1998_I473409);
}
diff --git a/analyses/pluginLEP/DELPHI_2003_I620250.cc b/analyses/pluginLEP/DELPHI_2003_I620250.cc
--- a/analyses/pluginLEP/DELPHI_2003_I620250.cc
+++ b/analyses/pluginLEP/DELPHI_2003_I620250.cc
@@ -1,285 +1,285 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/Beam.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/FastJets.hh"
#include "Rivet/Projections/Thrust.hh"
#include "Rivet/Projections/Sphericity.hh"
#include "Rivet/Projections/Hemispheres.hh"
#include "Rivet/Projections/ParisiTensor.hh"
namespace Rivet {
/// @brief DELPHI event shapes below the Z pole
class DELPHI_2003_I620250 : public Analysis {
public:
/// Constructor
DEFAULT_RIVET_ANALYSIS_CTOR(DELPHI_2003_I620250);
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
// Initialise and register projections
declare(Beam(), "Beams");
const FinalState fs;
declare(fs, "FS");
const Thrust thrust(fs);
declare(thrust, "Thrust");
declare(Sphericity(fs), "Sphericity");
declare(ParisiTensor(fs), "Parisi");
declare(Hemispheres(thrust), "Hemispheres");
// find the beam energy
unsigned int offset = 0;
int offset2 = -1;
if (fuzzyEquals(sqrtS()/GeV, 45, 1E-3)) offset = 1;
else if (fuzzyEquals(sqrtS()/GeV, 66, 1E-3)) offset = 2;
else if (fuzzyEquals(sqrtS()/GeV, 76, 1E-3)) offset = 3;
else if (fuzzyEquals(sqrtS()/GeV, 183 , 1E-3)) {
offset2= 0;
offset = 1;
}
else if (fuzzyEquals(sqrtS()/GeV, 189 , 1E-3)) {
offset2= 0;
offset = 2;
}
else if (fuzzyEquals(sqrtS()/GeV, 192 , 1E-3)) {
offset2= 0;
offset = 3;
}
else if (fuzzyEquals(sqrtS()/GeV, 196 , 1E-3)) {
offset2= 0;
offset = 4;
}
else if (fuzzyEquals(sqrtS()/GeV, 200 , 1E-3)) {
offset2= 1;
offset = 1;
}
else if (fuzzyEquals(sqrtS()/GeV, 202 , 1E-3)) {
offset2= 1;
offset = 2;
}
else if (fuzzyEquals(sqrtS()/GeV, 205 , 1E-3)) {
offset2= 1;
offset = 3;
}
else if (fuzzyEquals(sqrtS()/GeV, 207 , 1E-3)) {
offset2= 1;
offset = 4;
}
else MSG_ERROR("Beam energy not supported!");
// Book the histograms
if(offset2<0) {
book(_h_thrust, 1, 1, offset);
book(_h_major , 2, 1, offset);
book(_h_minor , 3, 1, offset);
book(_h_sphericity, 4, 1, offset);
book(_h_planarity, 5, 1, offset);
book(_h_oblateness, 6, 1, offset);
book(_h_heavy_jet_mass, 7, 1, offset);
book(_h_light_jet_mass, 9, 1, offset);
book(_h_diff_jet_mass, 10, 1, offset);
book(_h_total_jet_mass, 11, 1, offset);
book(_h_heavy_jet_mass_E, 8, 1, offset);
book(_h_total_jet_mass_E, 12, 1, offset);
book(_h_wide_broading, 13, 1, offset);
book(_h_narrow_broading, 14, 1, offset);
book(_h_total_broading, 15, 1, offset);
book(_h_diff_broading, 16, 1, offset);
book(_h_CParam, 17, 1, offset);
}
else {
book(_h_rap , 30+offset2, 1, offset);
book(_h_xi , 32+offset2, 1, offset);
book(_h_pTIn , 34+offset2, 1, offset);
book(_h_pTOut , 36+offset2, 1, offset);
book(_h_thrust , 38+offset2, 1, offset);
book(_h_major , 40+offset2, 1, offset);
book(_h_minor , 42+offset2, 1, offset);
book(_h_oblateness , 44+offset2, 1, offset);
book(_h_wide_broading , 46+offset2, 1, offset);
book(_h_total_broading , 48+offset2, 1, offset);
book(_h_diff_broading , 50+offset2, 1, offset);
book(_h_CParam , 52+offset2, 1, offset);
book(_h_DParam , 54+offset2, 1, offset);
book(_h_heavy_jet_mass , 56+offset2, 1, offset);
book(_h_heavy_jet_mass_P, 58+offset2, 1, offset);
book(_h_heavy_jet_mass_E, 60+offset2, 1, offset);
book(_h_light_jet_mass , 62+offset2, 1, offset);
book(_h_diff_jet_mass , 64+offset2, 1, offset);
book(_h_sphericity , 66+offset2, 1, offset);
book(_h_planarity , 68+offset2, 1, offset);
book(_h_aplanarity , 70+offset2, 1, offset);
}
}
/// Perform the per-event analysis
void analyze(const Event& event) {
// Get beams and average beam momentum
const ParticlePair& beams = apply<Beam>(event, "Beams").beams();
const double meanBeamMom = ( beams.first.p3().mod() +
beams.second.p3().mod() ) / 2.0;
MSG_DEBUG("Avg beam momentum = " << meanBeamMom);
const Thrust& thrust = apply<Thrust>(event, "Thrust");
// thrust related observables
_h_thrust ->fill(1.-thrust.thrust() );
_h_major ->fill(thrust.thrustMajor());
_h_minor ->fill(thrust.thrustMinor());
_h_oblateness->fill(thrust.oblateness() );
// sphericity related
const Sphericity& sphericity = apply<Sphericity>(event, "Sphericity");
_h_sphericity->fill(sphericity.sphericity());
_h_planarity ->fill(sphericity.planarity() );
if(_h_aplanarity) _h_aplanarity->fill(sphericity.aplanarity());
// hemisphere related
const Hemispheres& hemi = apply<Hemispheres>(event, "Hemispheres");
// standard jet masses
_h_heavy_jet_mass->fill(hemi.scaledM2high());
_h_light_jet_mass->fill(hemi.scaledM2low() );
_h_diff_jet_mass ->fill(hemi.scaledM2diff());
if(_h_total_jet_mass) _h_total_jet_mass->fill(hemi.scaledM2low()+hemi.scaledM2high());
// jet broadening
_h_wide_broading ->fill(hemi.Bmax() );
if(_h_narrow_broading) _h_narrow_broading->fill(hemi.Bmin() );
_h_total_broading ->fill(hemi.Bsum() );
_h_diff_broading ->fill(hemi.Bdiff());
// E and p scheme jet masses
Vector3 axis = thrust.thrustAxis();
FourMomentum p4WithE, p4AgainstE;
FourMomentum p4WithP, p4AgainstP;
double Evis(0);
for (const Particle& p : apply<FinalState>(event, "FS").particles()) {
Vector3 p3 = p.momentum().vector3().unitVec();
const double E = p.momentum().E();
Evis += E;
p3 = E*p3;
const double p3Para = dot(p3, axis);
FourMomentum p4E(E,p3.x(),p3.y(),p3.z());
FourMomentum p4P(p.p3().mod(),p.p3().x(),p.p3().y(),p.p3().z());
if (p3Para > 0) {
p4WithE += p4E;
p4WithP += p4P;
}
else if (p3Para < 0) {
p4AgainstE += p4E;
p4AgainstP += p4P;
}
else {
MSG_WARNING("Particle split between hemispheres");
p4WithE += 0.5 * p4E;
p4AgainstE += 0.5 * p4E;
p4WithP += 0.5 * p4P;
p4AgainstP += 0.5 * p4P;
}
}
// E scheme
const double mass2With_E = p4WithE.mass2()/sqr(Evis);
const double mass2Against_E = p4AgainstE.mass2()/sqr(Evis);
// fill the histograms
_h_heavy_jet_mass_E->fill(max(mass2With_E,mass2Against_E));
if(_h_total_jet_mass_E) _h_total_jet_mass_E->fill(mass2With_E+mass2Against_E);
// pscheme
const double mass2With_P = p4WithP.mass2()/sqr(Evis);
const double mass2Against_P = p4AgainstP.mass2()/sqr(Evis);
// fill the histograms
if(_h_heavy_jet_mass_P) _h_heavy_jet_mass_P->fill(max(mass2With_P,mass2Against_P));
MSG_DEBUG("Calculating Parisi params");
const ParisiTensor& parisi = apply<ParisiTensor>(event, "Parisi");
- _h_CParam->fill(parisi.C(), weight);
- if(_h_DParam) _h_DParam->fill(parisi.D(), weight);
+ _h_CParam->fill(parisi.C());
+ if(_h_DParam) _h_DParam->fill(parisi.D());
// single particle distributions
const FinalState& fs = apply<FinalState>(event, "FS");
if(_h_xi) {
- foreach (const Particle& p, fs.particles()) {
- if( ! PID::isCharged(p.pdgId())) continue;
+ for (const Particle& p : fs.particles()) {
+ if( ! PID::isCharged(p.pid())) continue;
// Get momentum and energy of each particle.
const Vector3 mom3 = p.p3();
const double energy = p.E();
// Scaled momenta.
const double mom = mom3.mod();
const double scaledMom = mom/meanBeamMom;
const double logInvScaledMom = -std::log(scaledMom);
- _h_xi->fill(logInvScaledMom, weight);
+ _h_xi->fill(logInvScaledMom);
// Get momenta components w.r.t. thrust and sphericity.
const double momT = dot(thrust.thrustAxis(), mom3);
const double pTinT = dot(mom3, thrust.thrustMajorAxis());
const double pToutT = dot(mom3, thrust.thrustMinorAxis());
- _h_pTIn ->fill(fabs(pTinT/GeV), weight);
- _h_pTOut->fill(fabs(pToutT/GeV), weight);
+ _h_pTIn ->fill(fabs(pTinT/GeV));
+ _h_pTOut->fill(fabs(pToutT/GeV));
// Calculate rapidities w.r.t. thrust and sphericity.
const double rapidityT = 0.5 * std::log((energy + momT) / (energy - momT));
- _h_rap->fill(fabs(rapidityT), weight);
+ _h_rap->fill(fabs(rapidityT));
MSG_TRACE(fabs(rapidityT) << " " << scaledMom/GeV);
}
}
}
/// Normalise histograms etc., after the run
void finalize() {
normalize(_h_thrust );
normalize(_h_major );
normalize(_h_minor );
normalize(_h_sphericity );
normalize(_h_planarity );
if(_h_aplanarity) normalize(_h_aplanarity );
normalize(_h_oblateness );
normalize(_h_heavy_jet_mass );
normalize(_h_light_jet_mass );
normalize(_h_diff_jet_mass );
if(_h_total_jet_mass) normalize(_h_total_jet_mass );
normalize(_h_heavy_jet_mass_E);
if(_h_total_jet_mass_E) normalize(_h_total_jet_mass_E);
if(_h_heavy_jet_mass_P) normalize(_h_heavy_jet_mass_P);
normalize(_h_wide_broading );
if(_h_narrow_broading) normalize(_h_narrow_broading );
normalize(_h_total_broading );
normalize(_h_diff_broading );
normalize(_h_CParam );
if(_h_DParam) normalize(_h_DParam );
if(_h_xi) {
scale(_h_xi ,1./sumOfWeights());
scale(_h_pTIn ,1./sumOfWeights());
scale(_h_pTOut,1./sumOfWeights());
scale(_h_rap ,1./sumOfWeights());
}
}
//@}
/// @name Histograms
//@{
Histo1DPtr _h_thrust,_h_major,_h_minor;
Histo1DPtr _h_sphericity,_h_planarity,_h_aplanarity,_h_oblateness;
Histo1DPtr _h_heavy_jet_mass,_h_light_jet_mass,_h_diff_jet_mass,_h_total_jet_mass;
Histo1DPtr _h_heavy_jet_mass_E,_h_total_jet_mass_E;
Histo1DPtr _h_heavy_jet_mass_P;
Histo1DPtr _h_wide_broading,_h_narrow_broading,_h_total_broading,_h_diff_broading;
Histo1DPtr _h_CParam,_h_DParam;
Histo1DPtr _h_xi, _h_pTIn, _h_pTOut,_h_rap;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(DELPHI_2003_I620250);
}
diff --git a/analyses/pluginLEP/DELPHI_2006_I719387.cc b/analyses/pluginLEP/DELPHI_2006_I719387.cc
--- a/analyses/pluginLEP/DELPHI_2006_I719387.cc
+++ b/analyses/pluginLEP/DELPHI_2006_I719387.cc
@@ -1,86 +1,83 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/Beam.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/UnstableParticles.hh"
namespace Rivet {
/// @brief Add a short analysis description here
class DELPHI_2006_I719387 : public Analysis {
public:
/// Constructor
DEFAULT_RIVET_ANALYSIS_CTOR(DELPHI_2006_I719387);
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
declare(Beam(), "Beams");
declare(ChargedFinalState(), "FS");
declare(UnstableParticles(), "UFS");
// Book histograms
- _histXi = bookHisto1D(1, 3, 1);
+ book(_histXi, 1, 3, 1);
}
/// Perform the per-event analysis
void analyze(const Event& event) {
// First, veto on leptonic events by requiring at least 4 charged FS particles
const FinalState& fs = apply<FinalState>(event, "FS");
const size_t numParticles = fs.particles().size();
// Even if we only generate hadronic events, we still need a cut on numCharged >= 2.
if (numParticles < 2) {
MSG_DEBUG("Failed leptonic event cut");
vetoEvent;
}
MSG_DEBUG("Passed leptonic event cut");
- // Get event weight for histo filling
- const double weight = event.weight();
-
// Get beams and average beam momentum
const ParticlePair& beams = apply<Beam>(event, "Beams").beams();
const double meanBeamMom = ( beams.first.p3().mod() +
beams.second.p3().mod() ) / 2.0;
MSG_DEBUG("Avg beam momentum = " << meanBeamMom);
// Final state of unstable particles to get particle spectra
const UnstableParticles& ufs = apply<UnstableFinalState>(event, "UFS");
- foreach (const Particle& p, ufs.particles(Cuts::abspid==3312)) {
+ for (const Particle& p : ufs.particles(Cuts::abspid==3312)) {
const double xi = -log(p.p3().mod()/meanBeamMom);
- _histXi->fill(xi,weight);
+ _histXi->fill(xi);
}
}
/// Normalise histograms etc., after the run
void finalize() {
// mult by bin width
scale(_histXi, 1./sumOfWeights()*.2);
}
//@}
/// @name Histograms
//@{
Histo1DPtr _histXi;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(DELPHI_2006_I719387);
}
diff --git a/analyses/pluginLEP/L3_1997_I427107.cc b/analyses/pluginLEP/L3_1997_I427107.cc
--- a/analyses/pluginLEP/L3_1997_I427107.cc
+++ b/analyses/pluginLEP/L3_1997_I427107.cc
@@ -1,108 +1,105 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/FastJets.hh"
namespace Rivet {
/// @brief eta' and omega production at mz
class L3_1997_I427107 : public Analysis {
public:
/// Constructor
DEFAULT_RIVET_ANALYSIS_CTOR(L3_1997_I427107);
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
// Initialise and register projections
declare(Beam(), "Beams");
declare(ChargedFinalState(), "FS");
declare(UnstableParticles(), "UFS");
// // Book histograms
- _histXpOmega = bookHisto1D( 5, 1, 1);
- _histLnXpOmega = bookHisto1D( 6, 1, 1);
- _histXpEtaP1 = bookHisto1D( 7, 1, 1);
- _histLnXpEtaP1 = bookHisto1D( 8, 1, 1);
- _histXpEtaP2 = bookHisto1D( 9, 1, 1);
- _histLnXpEtaP2 = bookHisto1D(10, 1, 1);
+ book(_histXpOmega , 5, 1, 1);
+ book(_histLnXpOmega, 6, 1, 1);
+ book(_histXpEtaP1 , 7, 1, 1);
+ book(_histLnXpEtaP1, 8, 1, 1);
+ book(_histXpEtaP2 , 9, 1, 1);
+ book(_histLnXpEtaP2, 10, 1, 1);
}
/// Perform the per-event analysis
void analyze(const Event& event) {
// Even if we only generate hadronic events, we still need a cut on numCharged >= 2.
const FinalState& fs = apply<FinalState>(event, "FS");
if (fs.particles().size() < 2) {
MSG_DEBUG("Failed ncharged cut");
vetoEvent;
}
MSG_DEBUG("Passed ncharged cut");
- // Get event weight for histo filling
- const double weight = event.weight();
-
// Get beams and average beam momentum
const ParticlePair& beams = apply<Beam>(event, "Beams").beams();
const double meanBeamMom = ( beams.first.p3().mod() + beams.second.p3().mod() ) / 2.0;
MSG_DEBUG("Avg beam momentum = " << meanBeamMom);
// Final state of unstable particles to get particle spectra
const Particles& mesons = apply<UnstableParticles>(event, "UFS").particles(Cuts::pid==PID::ETAPRIME or
Cuts::pid==PID::OMEGA);
- foreach (const Particle& p, mesons) {
+ for (const Particle& p : mesons) {
double xp = p.p3().mod()/meanBeamMom;
double xi = log(1./xp);
- if(p.pdgId()==PID::ETAPRIME) {
- _histXpEtaP1->fill(xp, weight);
- _histLnXpEtaP1->fill(xi, weight);
- _histXpEtaP2->fill(xp, weight);
- _histLnXpEtaP2->fill(xi, weight);
+ if(p.pid()==PID::ETAPRIME) {
+ _histXpEtaP1->fill(xp);
+ _histLnXpEtaP1->fill(xi);
+ _histXpEtaP2->fill(xp);
+ _histLnXpEtaP2->fill(xi);
}
else {
- _histXpOmega->fill(xp, weight);
- _histLnXpOmega->fill(xi, weight);
+ _histXpOmega->fill(xp);
+ _histLnXpOmega->fill(xi);
}
}
}
/// Normalise histograms etc., after the run
void finalize() {
scale(_histXpEtaP1 , 1./sumOfWeights());
scale(_histLnXpEtaP1, 1./sumOfWeights());
scale(_histXpEtaP2 , 1./sumOfWeights());
scale(_histLnXpEtaP2, 1./sumOfWeights());
scale(_histXpOmega , 1./sumOfWeights());
scale(_histLnXpOmega, 1./sumOfWeights());
}
//@}
/// @name Histograms
//@{
Histo1DPtr _histXpEtaP1,_histXpEtaP2;
Histo1DPtr _histLnXpEtaP1,_histLnXpEtaP2;
Histo1DPtr _histXpOmega;
Histo1DPtr _histLnXpOmega;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(L3_1997_I427107);
}
diff --git a/analyses/pluginLEP/L3_2004_I645127.cc b/analyses/pluginLEP/L3_2004_I645127.cc
--- a/analyses/pluginLEP/L3_2004_I645127.cc
+++ b/analyses/pluginLEP/L3_2004_I645127.cc
@@ -1,189 +1,189 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/GammaGammaFinalState.hh"
#include "Rivet/Projections/UnstableParticles.hh"
namespace Rivet {
/// @brief mu+mu- and tau+tau- in gamma gamma
class L3_2004_I645127 : public Analysis {
public:
/// Constructor
DEFAULT_RIVET_ANALYSIS_CTOR(L3_2004_I645127);
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
// get the mode and options
_mode =0;
if( getOption("PROCESS") == "EE" ) _mode = 0;
else if( getOption("PROCESS") == "GG") _mode = 1;
// Initialise and register projections
if(_mode==0) {
const GammaGammaKinematics& diskin = declare(GammaGammaKinematics(), "Kinematics");
- declare(GammaGammaFinalState(diskin, GammaGammaFinalState::LAB), "FS");
+ declare(GammaGammaFinalState(diskin), "FS");
declare(UnstableParticles(),"UFS");
}
else if(_mode==1) {
declare(FinalState(), "FS");
}
// Book counters
- _c_sigma_mu1 = bookCounter("/TMP/sigma_mu_1");
- _c_sigma_mu2 = bookCounter("/TMP/sigma_mu_2");
- _c_sigma_tau = bookCounter("/TMP/sigma_tau");
+ book(_c_sigma_mu1, "/TMP/sigma_mu_1");
+ book(_c_sigma_mu2, "/TMP/sigma_mu_2");
+ book(_c_sigma_tau, "/TMP/sigma_tau");
}
void findChildren(const Particle & p,map<long,int> & nRes, int &ncount) {
- foreach(const Particle &child, p.children()) {
+ for(const Particle &child : p.children()) {
if(child.children().empty()) {
- --nRes[child.pdgId()];
+ --nRes[child.pid()];
--ncount;
}
else
findChildren(child,nRes,ncount);
}
}
/// Perform the per-event analysis
void analyze(const Event& event) {
- double weight = event.weight();
// stuff for e+e- collisions
double W2 = sqr(sqrtS());
if(_mode==0) {
const GammaGammaKinematics& kin = apply<GammaGammaKinematics>(event, "Kinematics");
W2 = kin.W2();
if(W2<9.*sqr(GeV) ) vetoEvent;
}
const FinalState& fs = apply<FinalState>(event, "FS");
map<long,int> nCount;
int ntotal(0);
bool fiducal = true;
for(const Particle & p : fs.particles()) {
- nCount[p.pdgId()] += 1;
+ nCount[p.pid()] += 1;
++ntotal;
- if(abs(p.pdgId())==13) {
+ if(abs(p.pid())==13) {
if(abs(cos(p.momentum().polarAngle()))>0.8) fiducal = false;
}
}
if( nCount[-13]==1 && nCount[13]==1 && ntotal==2+nCount[22]) {
if(W2<1600.*sqr(GeV)) {
- _c_sigma_mu1->fill(weight);
+ _c_sigma_mu1->fill();
if(fiducal) {
- _c_sigma_mu2->fill(weight);
+ _c_sigma_mu2->fill();
}
}
}
if(_mode==1) return;
bool foundTauPlus = false, foundTauMinus = true;
const UnstableParticles & ufs = apply<UnstableParticles>(event, "UFS");
- foreach (const Particle& p, ufs.particles()) {
+ for (const Particle& p : ufs.particles()) {
if(p.children().empty()) continue;
// find the taus
- if(abs(p.pdgId())==15) {
- if(p.pdgId()== 15) foundTauMinus=true;
- if(p.pdgId()==-15) foundTauPlus =true;
+ if(abs(p.pid())==15) {
+ if(p.pid()== 15) foundTauMinus=true;
+ if(p.pid()==-15) foundTauPlus =true;
findChildren(p,nCount,ntotal);
}
}
if(!foundTauPlus || !foundTauMinus)
vetoEvent;
bool matched = true;
for(auto const & val : nCount) {
if(val.first==22) {
continue;
}
else if(val.second!=0) {
matched = false;
break;
}
}
if(matched)
- _c_sigma_tau->fill(weight);
+ _c_sigma_tau->fill();
}
/// Normalise histograms etc., after the run
void finalize() {
// prefactor for the cross sections
double fact = crossSection()/picobarn/sumOfWeights();
if(_mode==1) fact /= 1000.;
scale(_c_sigma_mu1,fact);
scale(_c_sigma_mu2,fact);
scale(_c_sigma_tau,fact);
unsigned int imin=0, imax = 3;
if(_mode==1) {
imin=3;
imax=4;
}
for(unsigned int ihist=imin;ihist<imax;++ihist) {
unsigned int id=0, iy=0;
double sigma = 0., error = 0.;
if(ihist==0) {
id=1;
iy=1;
sigma = _c_sigma_mu2->val();
error = _c_sigma_mu2->err();
}
else if(ihist==1) {
id=1;
iy=2;
sigma = _c_sigma_mu1->val();
error = _c_sigma_mu1->err();
}
else if(ihist==2) {
id=2;
iy=1;
sigma = _c_sigma_tau->val();
error = _c_sigma_tau->err();
}
else if(ihist==3) {
id=3;
iy=5;
sigma = _c_sigma_mu1->val();
error = _c_sigma_mu1->err();
}
Scatter2D temphisto(refData(id, 1, iy));
- Scatter2DPtr mult = bookScatter2D(id, 1, iy);
+ Scatter2DPtr mult;
+ book(mult, id, 1, iy);
for (size_t b = 0; b < temphisto.numPoints(); b++) {
const double x = temphisto.point(b).x();
pair<double,double> ex = temphisto.point(b).xErrs();
pair<double,double> ex2 = ex;
if(ex2.first ==0.) ex2. first=0.0001;
if(ex2.second==0.) ex2.second=0.0001;
if (inRange(sqrtS()/GeV, x-ex2.first, x+ex2.second)) {
mult->addPoint(x, sigma, ex, make_pair(error,error));
}
else {
mult->addPoint(x, 0., ex, make_pair(0.,.0));
}
}
}
}
//@}
/// @name Histograms
//@{
CounterPtr _c_sigma_mu1,_c_sigma_mu2,_c_sigma_tau;
unsigned int _mode;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(L3_2004_I645127);
}
diff --git a/analyses/pluginLEP/L3_2004_I661114.cc b/analyses/pluginLEP/L3_2004_I661114.cc
--- a/analyses/pluginLEP/L3_2004_I661114.cc
+++ b/analyses/pluginLEP/L3_2004_I661114.cc
@@ -1,67 +1,66 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/GammaGammaFinalState.hh"
#include "Rivet/Projections/FastJets.hh"
namespace Rivet {
/// @brief Add a short analysis description here
class L3_2004_I661114 : public Analysis {
public:
/// Constructor
DEFAULT_RIVET_ANALYSIS_CTOR(L3_2004_I661114);
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
// get the hadronic final state
const GammaGammaKinematics& diskin = declare(GammaGammaKinematics(), "Kinematics");
- const FinalState & fs = declare(GammaGammaFinalState(diskin, GammaGammaFinalState::LAB), "FS");
+ const FinalState & fs = declare(GammaGammaFinalState(diskin), "FS");
declare(FastJets(fs, FastJets::KT,1.),"Jets");
// Book histograms
- _h_y = bookHisto1D(1, 1, 1);
+ book(_h_y, 1, 1, 1);
}
/// Perform the per-event analysis
void analyze(const Event& event) {
- double weight = event.weight();
Jets jets = apply<FastJets>(event, "Jets").jetsByPt(Cuts::pT > 3*GeV and Cuts::abseta < 1.0);
if(jets.empty()) vetoEvent;
for(const Jet & jet : jets) {
- _h_y->fill(jet.pT(),weight);
+ _h_y->fill(jet.pT());
}
}
/// Normalise histograms etc., after the run
void finalize() {
scale(_h_y, crossSection()/picobarn/sumOfWeights());
}
//@}
/// @name Histograms
//@{
Histo1DPtr _h_y;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(L3_2004_I661114);
}
diff --git a/analyses/pluginLEP/OPAL_1995_I382219.cc b/analyses/pluginLEP/OPAL_1995_I382219.cc
--- a/analyses/pluginLEP/OPAL_1995_I382219.cc
+++ b/analyses/pluginLEP/OPAL_1995_I382219.cc
@@ -1,84 +1,81 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/Beam.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/UnstableParticles.hh"
namespace Rivet {
/// @brief D* production
class OPAL_1995_I382219 : public Analysis {
public:
/// Constructor
DEFAULT_RIVET_ANALYSIS_CTOR(OPAL_1995_I382219);
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
declare(Beam(), "Beams");
declare(UnstableParticles(), "UFS");
- _h_Xe_Ds = bookHisto1D(3, 1, 1);
- _h_Xe_Ds_b = bookHisto1D(4, 1, 1);
- _h_Xe_Ds_c = bookHisto1D(5, 1, 1);
+ book(_h_Xe_Ds , 3, 1, 1);
+ book(_h_Xe_Ds_b, 4, 1, 1);
+ book(_h_Xe_Ds_c, 5, 1, 1);
}
/// Perform the per-event analysis
void analyze(const Event& event) {
- const double weight = event.weight();
-
-
const UnstableParticles& ufs = apply<UnstableFinalState>(event, "UFS");
// Get beams and average beam momentum
const ParticlePair& beams = apply<Beam>(event, "Beams").beams();
const double meanBeamMom = ( beams.first.p3().mod() +
beams.second.p3().mod() ) / 2.0/GeV;
// check if b hadrons or not
unsigned int nB= (filter_select(ufs.particles(), isBottomHadron)).size();
// Accept all D*+- decays.
for (const Particle& p : filter_select(ufs.particles(), Cuts::abspid==PID::DSTARPLUS)) {
// Scaled energy.
const double energy = p.E()/GeV;
const double scaledEnergy = energy/meanBeamMom;
- _h_Xe_Ds->fill(scaledEnergy, weight);
+ _h_Xe_Ds->fill(scaledEnergy);
if(nB==0)
- _h_Xe_Ds_c->fill(scaledEnergy, weight);
+ _h_Xe_Ds_c->fill(scaledEnergy);
else
- _h_Xe_Ds_b->fill(scaledEnergy, weight);
+ _h_Xe_Ds_b->fill(scaledEnergy);
}
}
/// Normalise histograms etc., after the run
void finalize() {
// brs for D*+/- -> D0 pi+/- and D0->K+pi-
double br = 0.677*0.03950;
scale(_h_Xe_Ds , 1./sumOfWeights()*br);
scale(_h_Xe_Ds_b, 1./sumOfWeights()*br);
scale(_h_Xe_Ds_c, 1./sumOfWeights()*br);
}
//@}
/// @name Histograms
//@{
Histo1DPtr _h_Xe_Ds,_h_Xe_Ds_b,_h_Xe_Ds_c;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(OPAL_1995_I382219);
}
diff --git a/analyses/pluginLEP/OPAL_2003_I595335.cc b/analyses/pluginLEP/OPAL_2003_I595335.cc
--- a/analyses/pluginLEP/OPAL_2003_I595335.cc
+++ b/analyses/pluginLEP/OPAL_2003_I595335.cc
@@ -1,108 +1,107 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/Beam.hh"
#include "Rivet/Projections/Thrust.hh"
namespace Rivet {
/// @brief Add a short analysis description here
class OPAL_2003_I595335 : public Analysis {
public:
/// Constructor
DEFAULT_RIVET_ANALYSIS_CTOR(OPAL_2003_I595335);
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
// Initialise and register projections
declare(Beam(), "Beams");
const ChargedFinalState cfs = ChargedFinalState();
declare(cfs, "CFS");
declare(Thrust(cfs), "Thrust");
- _h_pT_in = bookHisto1D(1, 1, 1);
- _h_pT_out = bookHisto1D(2, 1, 1);
- _h_y = bookHisto1D(3, 1, 1);
- _h_x = bookHisto1D(4, 1, 1);
- _h_xi = bookHisto1D(5, 1, 1);
- _wSum = 0.;
+ book(_h_pT_in , 1, 1, 1);
+ book(_h_pT_out, 2, 1, 1);
+ book(_h_y , 3, 1, 1);
+ book(_h_x , 4, 1, 1);
+ book(_h_xi , 5, 1, 1);
+ book(_wSum,"TMP/wSum");
}
/// Perform the per-event analysis
void analyze(const Event& event) {
// First, veto on leptonic events by requiring at least 4 charged FS particles
const ChargedFinalState& cfs = apply<ChargedFinalState>(event, "CFS");
const size_t numParticles = cfs.particles().size();
// Even if we only generate hadronic events, we still need a cut on numCharged >= 2.
if (numParticles < 2) {
MSG_DEBUG("Failed leptonic event cut");
vetoEvent;
}
MSG_DEBUG("Passed leptonic event cut");
- const double weight = event.weight();
- _wSum += weight;
+ _wSum->fill();
// Get beams and average beam momentum
const ParticlePair& beams = apply<Beam>(event, "Beams").beams();
const double meanBeamMom = ( beams.first.p3().mod() +
beams.second.p3().mod() ) / 2.0;
MSG_DEBUG("Avg beam momentum = " << meanBeamMom);
// Thrusts
MSG_DEBUG("Calculating thrust");
const Thrust& thrust = apply<Thrust>(event, "Thrust");
- foreach (const Particle& p, cfs.particles()) {
+ for (const Particle& p : cfs.particles()) {
const Vector3 mom3 = p.p3();
const double energy = p.E();
const double pTinT = dot(mom3, thrust.thrustMajorAxis());
const double pToutT = dot(mom3, thrust.thrustMinorAxis());
- _h_pT_in ->fill(fabs(pTinT/GeV) ,weight);
- _h_pT_out->fill(fabs(pToutT/GeV),weight);
+ _h_pT_in ->fill(fabs(pTinT/GeV) );
+ _h_pT_out->fill(fabs(pToutT/GeV));
const double momT = dot(thrust.thrustAxis(), mom3);
const double rapidityT = 0.5 * std::log((energy + momT) / (energy - momT));
- _h_y->fill(fabs(rapidityT),weight);
+ _h_y->fill(fabs(rapidityT));
const double mom = mom3.mod();
const double scaledMom = mom/meanBeamMom;
const double logInvScaledMom = -std::log(scaledMom);
- _h_xi->fill(logInvScaledMom, weight);
- _h_x ->fill(scaledMom , weight);
+ _h_xi->fill(logInvScaledMom);
+ _h_x ->fill(scaledMom );
}
}
/// Normalise histograms etc., after the run
void finalize() {
- scale(_h_pT_in , 1./_wSum);
- scale(_h_pT_out, 1./_wSum);
- scale(_h_y , 1./_wSum);
- scale(_h_x , 1./_wSum);
- scale(_h_xi , 1./_wSum);
+ scale(_h_pT_in , 1./ *_wSum);
+ scale(_h_pT_out, 1./ *_wSum);
+ scale(_h_y , 1./ *_wSum);
+ scale(_h_x , 1./ *_wSum);
+ scale(_h_xi , 1./ *_wSum);
}
//@}
/// @name Histograms
//@{
Histo1DPtr _h_pT_in,_h_pT_out,_h_y,_h_x,_h_xi;
- double _wSum;
+ CounterPtr _wSum;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(OPAL_2003_I595335);
}
diff --git a/analyses/pluginLEP/OPAL_2003_I611415.cc b/analyses/pluginLEP/OPAL_2003_I611415.cc
--- a/analyses/pluginLEP/OPAL_2003_I611415.cc
+++ b/analyses/pluginLEP/OPAL_2003_I611415.cc
@@ -1,173 +1,172 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/GammaGammaFinalState.hh"
#include "Rivet/Projections/FastJets.hh"
namespace Rivet {
/// @brief Add a short analysis description here
class OPAL_2003_I611415 : public Analysis {
public:
/// Constructor
DEFAULT_RIVET_ANALYSIS_CTOR(OPAL_2003_I611415);
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
// get the hadronic final state
const GammaGammaKinematics& diskin = declare(GammaGammaKinematics(), "Kinematics");
- const FinalState & fs = declare(GammaGammaFinalState(diskin, GammaGammaFinalState::LAB), "FS");
+ const FinalState & fs = declare(GammaGammaFinalState(diskin), "FS");
declare(FastJets(fs, FastJets::KT,1.),"Jets");
- _h_theta[0] = bookHisto1D( 1,1,1);
- _h_theta[1] = bookHisto1D( 2,1,1);
- _h_ET[0] = bookHisto1D( 3,1,1);
- _h_ET[1] = bookHisto1D( 4,1,1);
- _h_ET[2] = bookHisto1D( 5,1,1);
- _h_xg[0][0] = bookHisto1D( 6,1,1);
- _h_xg[0][1] = bookHisto1D( 7,1,1);
- _h_xg[1][0] = bookHisto1D( 9,1,1);
- _h_xg[1][1] = bookHisto1D(10,1,1);
- _h_xg[2][0] = bookHisto1D(11,1,1);
- _h_xg[2][1] = bookHisto1D(12,1,1);
- _h_xg_high = bookHisto1D( 8,1,1);
- _h_xlog[0] = bookHisto1D(13,1,1);
- _h_xlog[1] = bookHisto1D(14,1,1);
- _h_xlog[2] = bookHisto1D(15,1,1);
- _h_eta_diff[0] = bookHisto1D(16,1,1);
- _h_eta_diff[1] = bookHisto1D(17,1,1);
- _h_eta_min[0] = bookHisto1D(18,1,1);
- _h_eta_min[1] = bookHisto1D(19,1,1);
- _h_eta_min[2] = bookHisto1D(20,1,1);
- _h_eta_min[3] = bookHisto1D(21,1,1);
+ book(_h_theta[0] , 1,1,1);
+ book(_h_theta[1] , 2,1,1);
+ book(_h_ET[0] , 3,1,1);
+ book(_h_ET[1] , 4,1,1);
+ book(_h_ET[2] , 5,1,1);
+ book(_h_xg[0][0] , 6,1,1);
+ book(_h_xg[0][1] , 7,1,1);
+ book(_h_xg[1][0] , 9,1,1);
+ book(_h_xg[1][1] , 10,1,1);
+ book(_h_xg[2][0] , 11,1,1);
+ book(_h_xg[2][1] , 12,1,1);
+ book(_h_xg_high , 8,1,1);
+ book(_h_xlog[0] , 13,1,1);
+ book(_h_xlog[1] , 14,1,1);
+ book(_h_xlog[2] , 15,1,1);
+ book(_h_eta_diff[0], 16,1,1);
+ book(_h_eta_diff[1], 17,1,1);
+ book(_h_eta_min[0] , 18,1,1);
+ book(_h_eta_min[1] , 19,1,1);
+ book(_h_eta_max[0] , 20,1,1);
+ book(_h_eta_max[1] , 21,1,1);
}
/// Perform the per-event analysis
void analyze(const Event& event) {
- double weight = event.weight();
// need at least two jets with |eta|<2 and pT>3
Jets jets = apply<FastJets>(event, "Jets").jetsByPt(Cuts::Et > 3.*GeV and Cuts::abseta < 2.);
if(jets.size()<2) vetoEvent;
if(jets[0].Et()<jets[1].Et()) swap(jets[0],jets[1]);
// Ets of jets
double Et1 = jets[0].Et(), Et2 = jets[1].Et();
// average Et
double Etbar = 0.5*(Et1+Et2);
double etaBar = 0.5*(jets[0].eta()+jets[1].eta());
if(Etbar<5.) vetoEvent;
// assymetry cut
if((Et1-Et2)/(Et1+Et2)>.25) vetoEvent;
// calculate x_gamma
FourMomentum psum;
for(const Particle & part : apply<FinalState>(event,"FS").particles()) {
psum += part.momentum();
}
FourMomentum pj = jets[0].momentum()+jets[1].momentum();
double xp = (pj.E()+pj.pz())/(psum.E()+psum.pz());
double xm = (pj.E()-pj.pz())/(psum.E()-psum.pz());
double cost = tanh(0.5*(jets[0].eta()-jets[1].eta()));
// cost distributions
if(pj.mass()>15.*GeV && etaBar<=1.) {
if(xp>0.75 && xm>0.75)
- _h_theta[0]->fill(abs(cost),weight);
+ _h_theta[0]->fill(abs(cost));
else if(xp<0.75 && xm<0.75)
- _h_theta[1]->fill(abs(cost),weight);
+ _h_theta[1]->fill(abs(cost));
}
// ET distributions
- _h_ET[0]->fill(Etbar,weight);
+ _h_ET[0]->fill(Etbar);
if((xp<0.75 && xm>0.75)|| (xm<0.75&&xp>0.75))
- _h_ET[1]->fill(Etbar,weight);
+ _h_ET[1]->fill(Etbar);
else if(xp<0.75 && xm <0.75)
- _h_ET[2]->fill(Etbar,weight);
+ _h_ET[2]->fill(Etbar);
if(Etbar>=5.&&Etbar<7.) {
- _h_xg[0][0]->fill(xp,weight);
- _h_xg[0][0]->fill(xm,weight);
- _h_xlog[0]->fill(log(xp),weight);
- _h_xlog[0]->fill(log(xm),weight);
+ _h_xg[0][0]->fill(xp);
+ _h_xg[0][0]->fill(xm);
+ _h_xlog[0]->fill(log(xp));
+ _h_xlog[0]->fill(log(xm));
if((xp<0.75 && xm>0.75)|| (xm<0.75&&xp>0.75)) {
- _h_xg[1][0]->fill(xp,weight);
- _h_xg[1][0]->fill(xm,weight);
- _h_xlog[1]->fill(log(xp),weight);
- _h_xlog[1]->fill(log(xm),weight);
+ _h_xg[1][0]->fill(xp);
+ _h_xg[1][0]->fill(xm);
+ _h_xlog[1]->fill(log(xp));
+ _h_xlog[1]->fill(log(xm));
}
else if(xp<0.75 && xm <0.75) {
- _h_xg[2][0]->fill(xp,weight);
- _h_xg[2][0]->fill(xm,weight);
- _h_xlog[2]->fill(log(xp),weight);
- _h_xlog[2]->fill(log(xm),weight);
+ _h_xg[2][0]->fill(xp);
+ _h_xg[2][0]->fill(xm);
+ _h_xlog[2]->fill(log(xp));
+ _h_xlog[2]->fill(log(xm));
}
}
else if(Etbar>=7.&& Etbar<11.) {
- _h_xg[0][1]->fill(xp,weight);
- _h_xg[0][1]->fill(xm,weight);
+ _h_xg[0][1]->fill(xp);
+ _h_xg[0][1]->fill(xm);
if((xp<0.75 && xm>0.75)|| (xm<0.75&&xp>0.75)) {
- _h_xg[1][1]->fill(xp,weight);
- _h_xg[1][1]->fill(xm,weight);
+ _h_xg[1][1]->fill(xp);
+ _h_xg[1][1]->fill(xm);
}
else if(xp<0.75 && xm <0.75) {
- _h_xg[2][1]->fill(xp,weight);
- _h_xg[2][1]->fill(xm,weight);
+ _h_xg[2][1]->fill(xp);
+ _h_xg[2][1]->fill(xm);
}
}
else if(Etbar>=11.&& Etbar<25.) {
- _h_xg_high->fill(xp,weight);
- _h_xg_high->fill(xm,weight);
+ _h_xg_high->fill(xp);
+ _h_xg_high->fill(xm);
}
// vs eta
double etaMin = min(abs(jets[0].eta()),abs(jets[1].eta()));
double etaMax = max(abs(jets[0].eta()),abs(jets[1].eta()));
if((xp<0.75 && xm>0.75)|| (xm<0.75&&xp>0.75)) {
- _h_eta_diff[0]->fill(abs(jets[0].eta()-jets[1].eta()),weight);
- _h_eta_min[0]->fill(etaMin,weight);
- _h_eta_max[0]->fill(etaMax,weight);
+ _h_eta_diff[0]->fill(abs(jets[0].eta()-jets[1].eta()));
+ _h_eta_min[0]->fill(etaMin);
+ _h_eta_max[0]->fill(etaMax);
}
else if(xp<0.75 && xm <0.75) {
- _h_eta_diff[1]->fill(abs(jets[0].eta()-jets[1].eta()),weight);
- _h_eta_min[1]->fill(etaMin,weight);
- _h_eta_max[1]->fill(etaMax,weight);
+ _h_eta_diff[1]->fill(abs(jets[0].eta()-jets[1].eta()));
+ _h_eta_min[1]->fill(etaMin);
+ _h_eta_max[1]->fill(etaMax);
}
}
/// Normalise histograms etc., after the run
void finalize() {
double fact = crossSection()/picobarn/sumOfWeights();
for(unsigned int ix=0;ix<2;++ix) {
scale(_h_theta[ix], fact);
scale(_h_eta_diff[ix], fact);
scale(_h_eta_min[ix], fact);
scale(_h_eta_max[ix], fact);
for(unsigned int iy=0;iy<3;++iy) {
scale(_h_xg[iy][ix],fact);
}
}
for(unsigned int ix=0;ix<3;++ix) {
scale(_h_ET[ix],fact);
scale(_h_xlog[ix],fact);
}
scale(_h_xg_high,fact);
}
//@}
/// @name Histograms
//@{
Histo1DPtr _h_theta[2],_h_ET[3],_h_xg[3][2],_h_xg_high;
Histo1DPtr _h_xlog[3],_h_eta_diff[2],_h_eta_min[2],_h_eta_max[2];
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(OPAL_2003_I611415);
}
diff --git a/analyses/pluginLEP/OPAL_2008_I754316.cc b/analyses/pluginLEP/OPAL_2008_I754316.cc
--- a/analyses/pluginLEP/OPAL_2008_I754316.cc
+++ b/analyses/pluginLEP/OPAL_2008_I754316.cc
@@ -1,71 +1,70 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/GammaGammaFinalState.hh"
#include "Rivet/Projections/FastJets.hh"
namespace Rivet {
/// @brief Add a short analysis description here
class OPAL_2008_I754316 : public Analysis {
public:
/// Constructor
DEFAULT_RIVET_ANALYSIS_CTOR(OPAL_2008_I754316);
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
// get the hadronic final state
- const GammaGammaKinematics& diskin = declare(GammaGammaKinematics(), "Kinematics");
- const FinalState & fs = declare(GammaGammaFinalState(diskin, GammaGammaFinalState::LAB), "FS");
+ const GammaGammaKinematics& gammakin = declare(GammaGammaKinematics(), "Kinematics");
+ const FinalState & fs = declare(GammaGammaFinalState(gammakin), "FS");
declare(FastJets(fs, FastJets::KT,1.),"Jets");
// Book histograms
- _h_y1 = bookHisto1D(1, 1, 1);
- _h_y2 = bookHisto1D(2, 1, 1);
+ book(_h_y1,1, 1, 1);
+ book(_h_y2,2, 1, 1);
}
/// Perform the per-event analysis
void analyze(const Event& event) {
- double weight = event.weight();
Jets jets = apply<FastJets>(event, "Jets").jetsByPt(Cuts::pT > 5*GeV and Cuts::abseta < 1.5);
if(jets.empty()) vetoEvent;
for(const Jet & jet : jets) {
- _h_y2->fill(jet.pT(),weight);
+ _h_y2->fill(jet.pT());
if(abs(jet.eta())<1.0)
- _h_y1->fill(jet.pT(),weight);
+ _h_y1->fill(jet.pT());
}
}
/// Normalise histograms etc., after the run
void finalize() {
scale(_h_y1, crossSection()/picobarn/sumOfWeights());
scale(_h_y2, crossSection()/picobarn/sumOfWeights());
}
//@}
/// @name Histograms
//@{
Histo1DPtr _h_y1, _h_y2;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(OPAL_2008_I754316);
}
diff --git a/include/Rivet/Projections/GammaGammaFinalState.hh b/include/Rivet/Projections/GammaGammaFinalState.hh
--- a/include/Rivet/Projections/GammaGammaFinalState.hh
+++ b/include/Rivet/Projections/GammaGammaFinalState.hh
@@ -1,92 +1,69 @@
// -*- C++ -*-
#ifndef RIVET_GammaGammaFinalState_HH
#define RIVET_GammaGammaFinalState_HH
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/GammaGammaKinematics.hh"
namespace Rivet {
/// @brief Final state particles boosted to the hadronic center of mass system.
///
- /// NB. The GammaGamma scattered lepton is not included in the final state particles.
+ /// NB. The GammaGamma scattered leptons are not included in the final state particles.
class GammaGammaFinalState: public FinalState {
public:
- /// Type of GammaGamma boost to apply
- enum BoostType { HCM, BREIT, LAB };
-
-
/// @name Constructors
//@{
-
- /// Constructor with explicit FinalState
+ /// Constructor with optional FinalState
/// @note The GammaGammaKinematics has no parameters, hence explicitly passing it as an arg shouldn't be necessary.
- GammaGammaFinalState(const FinalState& fs, BoostType boosttype, const GammaGammaKinematics& kinematicsp=GammaGammaKinematics())
- : _boosttype(boosttype)
+ GammaGammaFinalState(const FinalState& fs=FinalState(), const GammaGammaKinematics& kinematicsp=GammaGammaKinematics())
{
setName("GammaGammaFinalState");
declare(fs, "FS");
declare(kinematicsp, "Kinematics");
}
- /// Constructor with optional FinalState
- /// @note The GammaGammaKinematics has no parameters, hence explicitly passing it as an arg shouldn't be necessary.
- GammaGammaFinalState(BoostType boosttype, const FinalState& fs=FinalState(), const GammaGammaKinematics& kinematicsp=GammaGammaKinematics())
- : GammaGammaFinalState(fs, boosttype, kinematicsp)
- { }
-
/// Constructor with explicit cuts to define final-state particles
/// @note The GammaGammaKinematics has no parameters, hence explicitly passing it as an arg shouldn't be necessary.
- GammaGammaFinalState(const Cut& c, BoostType boosttype, const GammaGammaKinematics& kinematicsp=GammaGammaKinematics())
- : GammaGammaFinalState(FinalState(c), boosttype, kinematicsp)
- { }
-
- /// Constructor with explicit cuts to define final-state particles
- /// @note The GammaGammaKinematics has no parameters, hence explicitly passing it as an arg shouldn't be necessary.
- GammaGammaFinalState(BoostType boosttype, const Cut& c, const GammaGammaKinematics& kinematicsp=GammaGammaKinematics())
- : GammaGammaFinalState(FinalState(c), boosttype, kinematicsp)
+ GammaGammaFinalState(const Cut& c, const GammaGammaKinematics& kinematicsp=GammaGammaKinematics())
+ : GammaGammaFinalState(FinalState(c), kinematicsp)
{ }
// /// @brief Constructor with default FinalState
// /// @note The GammaGammaKinematics has no parameters, hence explicitly passing it as an arg shouldn't be necessary.
- // GammaGammaFinalState(BoostType boosttype, const GammaGammaKinematics& kinematicsp=GammaGammaKinematics())
- // : GammaGammaFinalState(FinalState(), boosttype, kinematicsp)
+ // GammaGammaFinalState(const GammaGammaKinematics& kinematicsp=GammaGammaKinematics())
+ // : GammaGammaFinalState(FinalState(), kinematicsp)
// { }
/// Backward compatible constructor with default FinalState
/// @deprecated Prefer a version that doesn't need a GammaGammaKinematics argument
- GammaGammaFinalState(const GammaGammaKinematics& kinematicsp, BoostType boosttype)
- : GammaGammaFinalState(FinalState(), boosttype, kinematicsp)
+ GammaGammaFinalState(const GammaGammaKinematics& kinematicsp)
+ : GammaGammaFinalState(FinalState(), kinematicsp)
{ }
/// Clone on the heap.
DEFAULT_RIVET_PROJ_CLONE(GammaGammaFinalState);
//@}
protected:
/// Apply the projection on the supplied event.
void project(const Event& e);
/// Compare projections.
- int compare(const Projection& p) const {
- const GammaGammaFinalState& other = dynamic_cast<const GammaGammaFinalState&>(p);
- return mkNamedPCmp(p, "Kinematics") || mkNamedPCmp(p, "FS") || cmp(_boosttype, other._boosttype);
+ CmpState compare(const Projection& p) const {
+ return mkNamedPCmp(p, "Kinematics") || mkNamedPCmp(p, "FS");
}
- private:
-
- BoostType _boosttype;
-
};
}
#endif
diff --git a/include/Rivet/Projections/GammaGammaKinematics.hh b/include/Rivet/Projections/GammaGammaKinematics.hh
--- a/include/Rivet/Projections/GammaGammaKinematics.hh
+++ b/include/Rivet/Projections/GammaGammaKinematics.hh
@@ -1,115 +1,79 @@
// -*- C++ -*-
#ifndef RIVET_GammaGammaKinematics_HH
#define RIVET_GammaGammaKinematics_HH
#include "Rivet/Particle.hh"
#include "Rivet/Event.hh"
#include "Rivet/Projection.hh"
#include "Rivet/Projections/GammaGammaLeptons.hh"
#include "Rivet/Projections/Beam.hh"
namespace Rivet {
/// @brief Get the gamma gamma kinematic variables and relevant boosts for an event.
class GammaGammaKinematics : public Projection {
public:
/// The default constructor.
GammaGammaKinematics(const GammaGammaLeptons & lepton = GammaGammaLeptons(),
const std::map<std::string,std::string> & opts =
std::map<std::string,std::string>())
: _theQ2(make_pair(-1.0,-1.0)), _theW2(-1.0) //,_theX(-1.0), _theY(-1.0), _theS(-1.0)
{
setName("GammaGammaKinematics");
//addPdgIdPair(ANY, hadid);
- addProjection(Beam(), "Beam");
- addProjection(lepton, "Lepton");
+ declare(Beam(), "Beam");
+ declare(lepton, "Lepton");
}
/// Clone on the heap.
DEFAULT_RIVET_PROJ_CLONE(GammaGammaKinematics);
protected:
/// Perform the projection operation on the supplied event.
virtual void project(const Event& e);
/// Compare with other projections.
- virtual int compare(const Projection& p) const;
+ virtual CmpState compare(const Projection& p) const;
public:
/// The \f$Q^2\f$.
pair<double,double> Q2() const { return _theQ2; }
/// The \f$W^2\f$.
double W2() const { return _theW2; }
- // /// The Bjorken \f$x\f$.
- // double x() const { return _theX; }
-
- // /// The inelasticity \f$y\f$
- // double y() const { return _theY; }
-
- // /// The centre of mass energy \f$s\f$
- // double s() const { return _theS; }
-
-
-
- // /// The LorentzRotation needed to boost a particle to the hadronic CM frame.
- // const LorentzTransform& boostHCM() const {
- // return _hcm;
- // }
-
- // /// The LorentzRotation needed to boost a particle to the hadronic Breit frame.
- // const LorentzTransform& boostBreit() const {
- // return _breit;
- // }
-
/// The incoming lepton beam particle
const ParticlePair& beamLeptons() const {
return _inLepton;
}
/// The scattered GammaGamma lepton
const ParticlePair & scatteredLeptons() const {
return _outLepton;
}
private:
/// The \f$Q^2\f$.
pair<double,double> _theQ2;
/// The \f$W^2\f$.
double _theW2;
- // /// The Bjorken \f$x\f$.
- // double _theX;
-
- // /// The Inelasticity \f$y\f$
- // double _theY;
-
- // /// The centre of mass energy \f$s\f$
- // double _theS;
-
/// Incoming and outgoing GammaGamma particles
ParticlePair _inLepton, _outLepton;
- // /// The LorentzRotation needed to boost a particle to the hadronic CM frame.
- // LorentzTransform _hcm;
-
- // /// The LorentzRotation needed to boost a particle to the hadronic Breit frame.
- // LorentzTransform _breit;
-
};
}
#endif
diff --git a/include/Rivet/Projections/GammaGammaLeptons.hh b/include/Rivet/Projections/GammaGammaLeptons.hh
--- a/include/Rivet/Projections/GammaGammaLeptons.hh
+++ b/include/Rivet/Projections/GammaGammaLeptons.hh
@@ -1,135 +1,135 @@
// -*- C++ -*-
#ifndef RIVET_GammaGammaLeptons_HH
#define RIVET_GammaGammaLeptons_HH
#include "Rivet/Projections/Beam.hh"
#include "Rivet/Projections/PromptFinalState.hh"
#include "Rivet/Projections/HadronicFinalState.hh"
#include "Rivet/Projections/DressedLeptons.hh"
#include "Rivet/Projections/UndressBeamLeptons.hh"
#include "Rivet/Particle.hh"
#include "Rivet/Event.hh"
namespace Rivet {
/// @brief Get the incoming and outgoing leptons in a gamma gamma collision event in e+e-
// Heavily based on DISLepton
class GammaGammaLeptons : public Projection {
public:
/// Enum to enable different orderings for selecting scattered
/// leptons in case several were found.
enum SortOrder { ENERGY, ETA, ET };
/// @name Constructors.
//@{
/// Default constructor taking general options. The recognised
/// options are: LMODE, taking the options "prompt", "any" and
/// "dressed"; DressedDR giving a delta-R cone radius where photon
/// momenta are added to the lepton candidates for LMODE=dresses;
/// IsolDR giving a cone in delta-R where no hadrons are allowed
/// around a lepton candidate; and Undress giving a cone around
/// the incoming incoming beam in which photons are considered
/// initial state rafiation for which the momentum is subtracted
/// from the beam momentum.
GammaGammaLeptons(const std::map<std::string,std::string> & opts =
std::map<std::string,std::string>())
: _isolDR(0.0), _sort(ENERGY) {
setName("GammaGammaLeptons");
- addProjection(HadronicFinalState(), "IFS");
+ declare(HadronicFinalState(), "IFS");
auto sorting = opts.find("LSort");
if ( sorting != opts.end() && sorting->second == "ETA" )
_sort = ETA;
else if ( sorting != opts.end() && sorting->second == "ET" )
_sort = ET;
double undresstheta = 0.0;
auto undress = opts.find("Undress");
if ( undress != opts.end() )
undresstheta = std::stod(undress->second);
if ( undresstheta > 0.0 )
- addProjection(UndressBeamLeptons(undresstheta), "Beam");
+ declare(UndressBeamLeptons(undresstheta), "Beam");
else
- addProjection(Beam(), "Beam");
+ declare(Beam(), "Beam");
auto isol = opts.find("IsolDR");
if ( isol != opts.end() ) _isolDR = std::stod(isol->second);
double dressdr = 0.0;
auto dress = opts.find("DressDR");
if ( dress != opts.end() )
dressdr = std::stod(dress->second);
auto lmode = opts.find("LMode");
if ( lmode != opts.end() && lmode->second == "any" )
- addProjection(FinalState(), "LFS");
+ declare(FinalState(), "LFS");
else if ( lmode != opts.end() && lmode->second == "dressed" )
- addProjection(DressedLeptons(dressdr), "LFS");
+ declare(DressedLeptons(dressdr), "LFS");
else
- addProjection(PromptFinalState(), "LFS");
+ declare(PromptFinalState(), "LFS");
}
/// Constructor taking the following arguments: a final state
/// projection defining which lepton candidates to consider; a
/// beam projection detining the momenta of the incoming lepton
/// beam, and a final state projection defining the particles not
/// allowed witin a delta-R of @a isolationcut of a lepton
/// candidate.
GammaGammaLeptons(const FinalState & leptoncandidates,
const Beam & beamproj = Beam(),
const FinalState & isolationfs = FinalState(),
double isolationcut = 0.0, SortOrder sorting = ENERGY)
: _isolDR(isolationcut), _sort(sorting) {
- addProjection(leptoncandidates, "LFS");
- addProjection(isolationfs, "IFS");
- addProjection(beamproj, "Beam");
+ declare(leptoncandidates, "LFS");
+ declare(isolationfs, "IFS");
+ declare(beamproj, "Beam");
}
/// Clone on the heap.
DEFAULT_RIVET_PROJ_CLONE(GammaGammaLeptons);
//@}
protected:
/// Perform the projection operation on the supplied event.
virtual void project(const Event& e);
/// Compare with other projections.
- virtual int compare(const Projection& p) const;
+ virtual CmpState compare(const Projection& p) const;
public:
/// The incoming lepton
const ParticlePair & in() const { return _incoming; }
/// The outgoing lepton
const ParticlePair & out() const { return _outgoing; }
private:
/// The incoming leptons
ParticlePair _incoming;
/// The outgoing leptons
ParticlePair _outgoing;
/// If larger than zerp an isolation cut around the lepton is required.
double _isolDR;
/// How to sort leptons
SortOrder _sort;
};
}
#endif
diff --git a/src/Projections/GammaGammaFinalState.cc b/src/Projections/GammaGammaFinalState.cc
--- a/src/Projections/GammaGammaFinalState.cc
+++ b/src/Projections/GammaGammaFinalState.cc
@@ -1,41 +1,36 @@
// -*- C++ -*-
#include "Rivet/Projections/GammaGammaFinalState.hh"
namespace Rivet {
void GammaGammaFinalState::project(const Event& e) {
const GammaGammaKinematics& ggkin = apply<GammaGammaKinematics>(e, "Kinematics");
if ( ggkin.failed() ) {
fail();
return;
}
- LorentzTransform hcmboost; //< Null boost = LAB frame by default
- // if (_boosttype == HCM) hcmboost = ggkin.boostHCM();
- // else if (_boosttype == BREIT) hcmboost = ggkin.boostBreit();
const GammaGammaLeptons& gglep = ggkin.apply<GammaGammaLeptons>(e, "Lepton");
if ( ggkin.failed() ) {
fail();
return;
}
const FinalState& fs = apply<FinalState>(e, "FS");
// Fill the particle list with all particles _other_ than the GammaGamma scattered
// lepton, with momenta boosted into the appropriate frame.
_theParticles.clear();
_theParticles.reserve(fs.particles().size()-1);
ConstGenParticlePtr lep1 = gglep.out().first .genParticle();
ConstGenParticlePtr lep2 = gglep.out().second.genParticle();
// Ensure that we skip the GammaGamma leptons
for (const Particle& p : fs.particles()) {
- Particle temp = p;
- if (_boosttype != LAB) temp.setMomentum(hcmboost.transform(temp.momentum()));
if (p.genParticle() != lep1 &&
- p.genParticle() != lep2) _theParticles.push_back(temp);
+ p.genParticle() != lep2) _theParticles.push_back(p);
}
}
}
diff --git a/src/Projections/GammaGammaKinematics.cc b/src/Projections/GammaGammaKinematics.cc
--- a/src/Projections/GammaGammaKinematics.cc
+++ b/src/Projections/GammaGammaKinematics.cc
@@ -1,73 +1,73 @@
// -*- C++ -*-
#include "Rivet/Projections/GammaGammaKinematics.hh"
#include "Rivet/Math/Constants.hh"
namespace Rivet {
void GammaGammaKinematics::project(const Event& e) {
// Find appropriate GammaGamma leptons
const GammaGammaLeptons& gglep = applyProjection<GammaGammaLeptons>(e, "Lepton");
if ( gglep.failed() ) {
fail();
return;
}
_inLepton = gglep. in();
_outLepton = gglep.out();
// // Get the GammaGamma lepton and store some of its properties
// const FourMomentum pHad = _inHadron.momentum();
const pair<FourMomentum,FourMomentum> pLepIn = make_pair(_inLepton.first.momentum(),
_inLepton.second.momentum());
const pair<FourMomentum,FourMomentum> pLepOut = make_pair(_outLepton.first.momentum(),
_outLepton.second.momentum());
const pair<FourMomentum,FourMomentum> pGamma = make_pair(pLepIn.first - pLepOut.first,
pLepIn.second - pLepOut.second);
const FourMomentum tothad = pGamma.first+pGamma.second;
_theQ2 = make_pair(-pGamma.first.mass2(),-pGamma.second.mass2());
_theW2 = tothad.mass2();
// _theX = Q2()/(2.0 * pGamma * pHad);
// _theY = (pGamma * pHad) / (pLepIn * pHad);
// _theS = invariant(pLepIn + pHad);
// // Calculate boost vector for boost into HCM-system
// LorentzTransform tmp;
// tmp.setBetaVec(-tothad.boostVector());
// // Rotate so the photon is in x-z plane in HCM rest frame
// FourMomentum pGammaHCM = tmp.transform(pGamma);
// tmp.preMult(Matrix3(Vector3::mkZ(), -pGammaHCM.azimuthalAngle()));
// pGammaHCM = tmp.transform(pGamma);
// assert(isZero(dot(pGammaHCM.vector3(), Vector3::mkY())));
// // Rotate so the photon is along the positive z-axis
// const double rot_angle = pGammaHCM.polarAngle() * (pGammaHCM.px() >= 0 ? -1 : 1);
// tmp.preMult(Matrix3(Vector3::mkY(), rot_angle));
// // Check that final HCM photon lies along +ve z as expected
// pGammaHCM = tmp.transform(pGamma);
// assert(isZero(dot(pGammaHCM.vector3(), Vector3::mkX()), 1e-3));
// assert(isZero(dot(pGammaHCM.vector3(), Vector3::mkY()), 1e-3));
// assert(isZero(angle(pGammaHCM.vector3(), Vector3::mkZ()), 1e-3));
// // Finally rotate so outgoing lepton at phi = 0
// FourMomentum pLepOutHCM = tmp.transform(pLepOut);
// tmp.preMult(Matrix3(Vector3::mkZ(), -pLepOutHCM.azimuthalAngle()));
// assert(isZero(tmp.transform(pLepOut).azimuthalAngle()));
// _hcm = tmp;
// // Boost to Breit frame (use opposite convention for photon --- along *minus* z)
// tmp.preMult(Matrix3(Vector3::mkX(), PI));
// const double bz = 1 - 2*x();
// _breit = LorentzTransform::mkObjTransformFromBeta(Vector3::mkZ() * bz).combine(tmp);
// assert(isZero(angle(_breit.transform(pGamma).vector3(), -Vector3::mkZ()), 1e-3));
// assert(isZero(_breit.transform(pLepOut).azimuthalAngle(), 1e-3));
}
- int GammaGammaKinematics::compare(const Projection & p) const {
+ CmpState GammaGammaKinematics::compare(const Projection & p) const {
const GammaGammaKinematics& other = pcast<GammaGammaKinematics>(p);
return mkNamedPCmp(other, "Lepton");
}
}
diff --git a/src/Projections/GammaGammaLeptons.cc b/src/Projections/GammaGammaLeptons.cc
--- a/src/Projections/GammaGammaLeptons.cc
+++ b/src/Projections/GammaGammaLeptons.cc
@@ -1,87 +1,87 @@
// -*- C++ -*-
#include "Rivet/Projections/GammaGammaLeptons.hh"
namespace Rivet {
- int GammaGammaLeptons::compare(const Projection& p) const {
+ CmpState GammaGammaLeptons::compare(const Projection& p) const {
const GammaGammaLeptons& other = pcast<GammaGammaLeptons>(p);
return mkNamedPCmp(other, "Beam") || mkNamedPCmp(other, "LFS") ||
mkNamedPCmp(other, "IFS") || cmp(_sort, other._sort);
}
void GammaGammaLeptons::project(const Event& e) {
// Find incoming lepton beams
_incoming = applyProjection<Beam>(e, "Beam").beams();
// need two leptonic beams
if(! PID::isLepton(_incoming. first.pid()) ||
! PID::isLepton(_incoming.second.pid()) ) {
fail();
return;
}
// If no graph-connected scattered lepton, use the hardest
// (preferably same-flavour) prompt FS lepton in the event.
const FinalState & fs = applyProjection<FinalState>(e, "LFS");
Particles fsleptons;
if ( _sort == ET )
fsleptons = fs.particles(isLepton, cmpMomByEt);
else if ( _sort == ETA && _incoming.first.momentum().pz() >= 0.0 )
fsleptons = fs.particles(isLepton, cmpMomByDescEta);
else if ( _sort == ETA && _incoming.first.momentum().pz() < 0.0 )
fsleptons = fs.particles(isLepton, cmpMomByEta);
else
fsleptons = fs.particles(isLepton, cmpMomByE);
// loop over the two beam particles
for(unsigned int ix=0;ix<2;++ix) {
Particle inc = ix == 0 ? _incoming.first : _incoming.second;
// resort if required
if(ix==1 && _sort ==ETA ) {
if ( _sort == ETA && _incoming.second.momentum().pz() >= 0.0 )
sort(fsleptons.begin(),fsleptons.end(), cmpMomByDescEta);
else if ( _sort == ETA && _incoming.second.momentum().pz() < 0.0 )
sort(fsleptons.begin(),fsleptons.end(), cmpMomByEta);
}
Particles sfleptons =
filter_select(fsleptons, Cuts::pid == inc.pid());
if ( sfleptons.empty() ) sfleptons = fsleptons;
if ( _isolDR > 0.0 ) {
const Particles & other =
applyProjection<FinalState>(e, "IFS").particles();
while (!sfleptons.empty()) {
bool skip = false;
Particle testlepton = sfleptons.front();
for ( auto p: other ) {
if ( skip ) break;
if ( deltaR(p, testlepton) < _isolDR ) skip = true;
for ( auto c : testlepton.constituents() ) {
if ( c.genParticle() == p.genParticle() ) {
skip = false;
break;
}
}
}
if ( !skip ) break;
sfleptons.erase(sfleptons.begin());
}
}
if ( !sfleptons.empty() ) {
if(ix==0) {
_outgoing.first = sfleptons.front();
}
else {
_outgoing.second = sfleptons.front();
}
}
else {
fail();
}
}
}
}

File Metadata

Mime Type
application/vnd.ms-fontobject
Expires
Sun, Nov 10, 2:42 PM (1 d, 22 h)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
3758749
Default Alt Text
(114 KB)

Event Timeline