Page MenuHomeHEPForge

No OneTemporary

This file is larger than 256 KB, so syntax highlighting was skipped.
diff --git a/analyses/pluginALICE/ALICE_2010_S8625980.cc b/analyses/pluginALICE/ALICE_2010_S8625980.cc
--- a/analyses/pluginALICE/ALICE_2010_S8625980.cc
+++ b/analyses/pluginALICE/ALICE_2010_S8625980.cc
@@ -1,97 +1,97 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
namespace Rivet {
class ALICE_2010_S8625980 : public Analysis {
public:
/// @name Constructors etc.
//@{
/// Constructor
ALICE_2010_S8625980()
: Analysis("ALICE_2010_S8625980")
{ }
//@}
public:
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
ChargedFinalState cfs(-1.0, 1.0);
declare(cfs, "CFS");
if (fuzzyEquals(sqrtS()/GeV, 900, 1E-3)) {
book(_h_dN_deta ,4, 1, 1);
} else if (fuzzyEquals(sqrtS()/GeV, 2360, 1E-3)) {
book(_h_dN_deta ,5, 1, 1);
} else if (fuzzyEquals(sqrtS()/GeV, 7000, 1E-3)) {
book(_h_dN_deta ,6, 1, 1);
book(_h_dN_dNch ,3, 1, 1);
}
book(_Nevt_after_cuts, "Nevt_after_cuts");
}
/// Perform the per-event analysis
void analyze(const Event& event) {
const ChargedFinalState& charged = apply<ChargedFinalState>(event, "CFS");
if (charged.size() < 1) {
vetoEvent;
}
_Nevt_after_cuts->fill();
foreach (const Particle& p, charged.particles()) {
const double eta = p.eta();
_h_dN_deta->fill(eta);
}
if (fuzzyEquals(sqrtS()/GeV, 7000, 1E-3)) {
_h_dN_dNch->fill(charged.size());
}
}
/// Normalise histograms etc., after the run
void finalize() {
if (fuzzyEquals(sqrtS()/GeV, 7000, 1E-3)) {
normalize(_h_dN_dNch);
}
- scale(_h_dN_deta, 1.0/_Nevt_after_cuts);
+ scale(_h_dN_deta, 1.0/ *_Nevt_after_cuts);
}
//@}
private:
/// @name Histograms
//@{
Histo1DPtr _h_dN_deta;
Histo1DPtr _h_dN_dNch;
CounterPtr _Nevt_after_cuts;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(ALICE_2010_S8625980);
}
diff --git a/analyses/pluginALICE/ALICE_2010_S8706239.cc b/analyses/pluginALICE/ALICE_2010_S8706239.cc
--- a/analyses/pluginALICE/ALICE_2010_S8706239.cc
+++ b/analyses/pluginALICE/ALICE_2010_S8706239.cc
@@ -1,100 +1,100 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
namespace Rivet {
class ALICE_2010_S8706239 : public Analysis {
public:
/// @name Constructors etc.
//@{
/// Constructor
ALICE_2010_S8706239()
: Analysis("ALICE_2010_S8706239")
{ }
//@}
public:
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
ChargedFinalState cfs(-0.8, 0.8, 0.15);
declare(cfs, "CFS");
book(_h_pT ,4, 1, 1);
book(_h_pT_Nch_015 ,11, 1, 1);
book(_h_pT_Nch_05 ,12, 1, 1);
book(_Nevt_after_cuts,"Nevt_after_cuts");
}
/// Perform the per-event analysis
void analyze(const Event& event) {
const ChargedFinalState& charged = apply<ChargedFinalState>(event, "CFS");
_Nevt_after_cuts->fill();
// Get number of particles that fulfill certain pT requirements
int Nch_015 = 0;
int Nch_05 = 0;
foreach (const Particle& p, charged.particles()) {
double pT = p.pT()/GeV;
if (pT < 4.0) Nch_015++;
if (pT > 0.5 && pT < 4.0) Nch_05++;
}
// Now we can fill histograms
foreach (const Particle& p, charged.particles()) {
double pT = p.pT()/GeV;
if (pT < 4.0) _h_pT_Nch_015 ->fill(Nch_015, pT);
if (pT > 0.5 && pT < 4.0) _h_pT_Nch_05 ->fill(Nch_05, pT);
// To get the Yield, fill appropriate weight 1/(2PI * pT * d eta)
_h_pT->fill(pT, 1.0 /(TWOPI*pT*1.6) );
}
}
/// Normalise histograms etc., after the run
void finalize() {
- scale(_h_pT, 1.0/_Nevt_after_cuts);
+ scale(_h_pT, 1.0/ *_Nevt_after_cuts);
}
//@}
private:
/// @name Histograms
//@{
Histo1DPtr _h_pT;
Profile1DPtr _h_pT_Nch_015 ;
Profile1DPtr _h_pT_Nch_05 ;
CounterPtr _Nevt_after_cuts;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(ALICE_2010_S8706239);
}
diff --git a/analyses/pluginATLAS/ATLAS_2011_I944826.cc b/analyses/pluginATLAS/ATLAS_2011_I944826.cc
--- a/analyses/pluginATLAS/ATLAS_2011_I944826.cc
+++ b/analyses/pluginATLAS/ATLAS_2011_I944826.cc
@@ -1,258 +1,258 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/IdentifiedFinalState.hh"
#include "Rivet/Projections/UnstableFinalState.hh"
namespace Rivet {
class ATLAS_2011_I944826 : public Analysis {
public:
/// Constructor
ATLAS_2011_I944826()
: Analysis("ATLAS_2011_I944826")
{}
/// Book histograms and initialise projections before the run
void init() {
book(_sum_w_ks , "ks");
book(_sum_w_lambda, "lambda");
book(_sum_w_passed, "passed");
UnstableFinalState ufs(Cuts::pT > 100*MeV);
declare(ufs, "UFS");
ChargedFinalState mbts(Cuts::absetaIn(2.09, 3.84));
declare(mbts, "MBTS");
IdentifiedFinalState nstable(Cuts::abseta < 2.5 && Cuts::pT >= 100*MeV);
nstable.acceptIdPair(PID::ELECTRON)
.acceptIdPair(PID::MUON)
.acceptIdPair(PID::PIPLUS)
.acceptIdPair(PID::KPLUS)
.acceptIdPair(PID::PROTON);
declare(nstable, "nstable");
if (fuzzyEquals(sqrtS()/GeV, 7000, 1e-3)) {
book(_hist_Ks_pT ,1, 1, 1);
book(_hist_Ks_y ,2, 1, 1);
book(_hist_Ks_mult ,3, 1, 1);
book(_hist_L_pT ,7, 1, 1);
book(_hist_L_y ,8, 1, 1);
book(_hist_L_mult ,9, 1, 1);
book(_hist_Ratio_v_y ,13, 1, 1);
book(_hist_Ratio_v_pT,14, 1, 1);
//
book(_temp_lambda_v_y, "TMP/lambda_v_y", 10, 0.0, 2.5);
book(_temp_lambdabar_v_y, "TMP/lambdabar_v_y", 10, 0.0, 2.5);
book(_temp_lambda_v_pT, "TMP/lambda_v_pT", 18, 0.5, 4.1);
book(_temp_lambdabar_v_pT, "TMP/lambdabar_v_pT", 18, 0.5, 4.1);
}
else if (fuzzyEquals(sqrtS()/GeV, 900, 1E-3)) {
book(_hist_Ks_pT ,4, 1, 1);
book(_hist_Ks_y ,5, 1, 1);
book(_hist_Ks_mult ,6, 1, 1);
book(_hist_L_pT ,10, 1, 1);
book(_hist_L_y ,11, 1, 1);
book(_hist_L_mult ,12, 1, 1);
book(_hist_Ratio_v_y ,15, 1, 1);
book(_hist_Ratio_v_pT,16, 1, 1);
//
book(_temp_lambda_v_y, "TMP/lambda_v_y", 5, 0.0, 2.5);
book(_temp_lambdabar_v_y, "TMP/lambdabar_v_y", 5, 0.0, 2.5);
book(_temp_lambda_v_pT, "TMP/lambda_v_pT", 8, 0.5, 3.7);
book(_temp_lambdabar_v_pT, "TMP/lambdabar_v_pT", 8, 0.5, 3.7);
}
}
// This function is required to impose the flight time cuts on Kaons and Lambdas
double getPerpFlightDistance(const Rivet::Particle& p) {
const HepMC::GenParticle* genp = p.genParticle();
const HepMC::GenVertex* prodV = genp->production_vertex();
const HepMC::GenVertex* decV = genp->end_vertex();
const HepMC::ThreeVector prodPos = prodV->point3d();
if (decV) {
const HepMC::ThreeVector decPos = decV->point3d();
double dy = prodPos.y() - decPos.y();
double dx = prodPos.x() - decPos.x();
return add_quad(dx, dy);
}
return numeric_limits<double>::max();
}
bool daughtersSurviveCuts(const Rivet::Particle& p) {
// We require the Kshort or Lambda to decay into two charged
// particles with at least pT = 100 MeV inside acceptance region
const HepMC::GenParticle* genp = p.genParticle();
const HepMC::GenVertex* decV = genp->end_vertex();
bool decision = true;
if (!decV) return false;
if (decV->particles_out_size() == 2) {
std::vector<double> pTs;
std::vector<int> charges;
std::vector<double> etas;
foreach (const HepMC::GenParticle* gp, particles(decV, HepMC::children)) {
pTs.push_back(gp->momentum().perp());
etas.push_back(fabs(gp->momentum().eta()));
charges.push_back( Rivet::PID::threeCharge(gp->pdg_id()) );
// gp->print();
}
if ( (pTs[0]/Rivet::GeV < 0.1) || (pTs[1]/Rivet::GeV < 0.1) ) {
decision = false;
MSG_DEBUG("Failed pT cut: " << pTs[0]/Rivet::GeV << " " << pTs[1]/Rivet::GeV);
}
if ( etas[0] > 2.5 || etas[1] > 2.5 ) {
decision = false;
MSG_DEBUG("Failed eta cut: " << etas[0] << " " << etas[1]);
}
if ( charges[0] * charges[1] >= 0 ) {
decision = false;
MSG_DEBUG("Failed opposite charge cut: " << charges[0] << " " << charges[1]);
}
}
else {
decision = false;
MSG_DEBUG("Failed nDaughters cut: " << decV->particles_out_size());
}
return decision;
}
/// Perform the per-event analysis
void analyze(const Event& event) {
// ATLAS MBTS trigger requirement of at least one hit in either hemisphere
if (apply<FinalState>(event, "MBTS").size() < 1) {
MSG_DEBUG("Failed trigger cut");
vetoEvent;
}
// Veto event also when we find less than 2 particles in the acceptance region of type 211,2212,11,13,321
if (apply<FinalState>(event, "nstable").size() < 2) {
MSG_DEBUG("Failed stable particle cut");
vetoEvent;
}
_sum_w_passed->fill();
// This ufs holds all the Kaons and Lambdas
const UnstableFinalState& ufs = apply<UnstableFinalState>(event, "UFS");
// Some conters
int n_KS0 = 0;
int n_LAMBDA = 0;
// Particle loop
foreach (const Particle& p, ufs.particles()) {
// General particle quantities
const double pT = p.pT();
const double y = p.rapidity();
const PdgId apid = p.abspid();
double flightd = 0.0;
// Look for Kaons, Lambdas
switch (apid) {
case PID::K0S:
flightd = getPerpFlightDistance(p);
if (!inRange(flightd/mm, 4., 450.) ) {
MSG_DEBUG("Kaon failed flight distance cut:" << flightd);
break;
}
if (daughtersSurviveCuts(p) ) {
_hist_Ks_y ->fill(y);
_hist_Ks_pT->fill(pT/GeV);
_sum_w_ks->fill();
n_KS0++;
}
break;
case PID::LAMBDA:
if (pT < 0.5*GeV) { // Lambdas have an additional pT cut of 500 MeV
MSG_DEBUG("Lambda failed pT cut:" << pT/GeV << " GeV");
break;
}
flightd = getPerpFlightDistance(p);
if (!inRange(flightd/mm, 17., 450.)) {
MSG_DEBUG("Lambda failed flight distance cut:" << flightd/mm << " mm");
break;
}
if ( daughtersSurviveCuts(p) ) {
if (p.pid() == PID::LAMBDA) {
_temp_lambda_v_y->fill(fabs(y));
_temp_lambda_v_pT->fill(pT/GeV);
_hist_L_y->fill(y);
_hist_L_pT->fill(pT/GeV);
_sum_w_lambda->fill();
n_LAMBDA++;
} else if (p.pid() == -PID::LAMBDA) {
_temp_lambdabar_v_y->fill(fabs(y));
_temp_lambdabar_v_pT->fill(pT/GeV);
}
}
break;
}
}
// Fill multiplicity histos
_hist_Ks_mult->fill(n_KS0);
_hist_L_mult->fill(n_LAMBDA);
}
/// Normalise histograms etc., after the run
void finalize() {
- MSG_DEBUG("# Events that pass the trigger: " << double(_sum_w_passed));
- MSG_DEBUG("# Kshort events: " << double(_sum_w_ks));
- MSG_DEBUG("# Lambda events: " << double(_sum_w_lambda));
+ MSG_DEBUG("# Events that pass the trigger: " << dbl(*_sum_w_passed));
+ MSG_DEBUG("# Kshort events: " << dbl(*_sum_w_ks));
+ MSG_DEBUG("# Lambda events: " << dbl(*_sum_w_lambda));
/// @todo Replace with normalize()?
- scale(_hist_Ks_pT, 1.0/_sum_w_ks);
- scale(_hist_Ks_y, 1.0/_sum_w_ks);
- scale(_hist_Ks_mult, 1.0/_sum_w_passed);
+ scale(_hist_Ks_pT, 1.0 / *_sum_w_ks);
+ scale(_hist_Ks_y, 1.0 / *_sum_w_ks);
+ scale(_hist_Ks_mult, 1.0 / *_sum_w_passed);
/// @todo Replace with normalize()?
- scale(_hist_L_pT, 1.0/_sum_w_lambda);
- scale(_hist_L_y, 1.0/_sum_w_lambda);
- scale(_hist_L_mult, 1.0/_sum_w_passed);
+ scale(_hist_L_pT, 1.0 / *_sum_w_lambda);
+ scale(_hist_L_y, 1.0 / *_sum_w_lambda);
+ scale(_hist_L_mult, 1.0 / *_sum_w_passed);
// Division of histograms to obtain lambda_bar/lambda ratios
divide(_temp_lambdabar_v_y, _temp_lambda_v_y, _hist_Ratio_v_y);
divide(_temp_lambdabar_v_pT, _temp_lambda_v_pT, _hist_Ratio_v_pT);
}
private:
/// Counters
CounterPtr _sum_w_ks, _sum_w_lambda, _sum_w_passed;
/// @name Persistent histograms
//@{
Histo1DPtr _hist_Ks_pT, _hist_Ks_y, _hist_Ks_mult;
Histo1DPtr _hist_L_pT, _hist_L_y, _hist_L_mult;
Scatter2DPtr _hist_Ratio_v_pT, _hist_Ratio_v_y;
//@}
/// @name Temporary histograms
//@{
Histo1DPtr _temp_lambda_v_y, _temp_lambdabar_v_y;
Histo1DPtr _temp_lambda_v_pT, _temp_lambdabar_v_pT;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(ATLAS_2011_I944826);
}
diff --git a/analyses/pluginATLAS/ATLAS_2011_I945498.cc b/analyses/pluginATLAS/ATLAS_2011_I945498.cc
--- a/analyses/pluginATLAS/ATLAS_2011_I945498.cc
+++ b/analyses/pluginATLAS/ATLAS_2011_I945498.cc
@@ -1,303 +1,303 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ZFinder.hh"
#include "Rivet/Projections/FastJets.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/VetoedFinalState.hh"
#include "Rivet/Projections/IdentifiedFinalState.hh"
#include "Rivet/Projections/LeadingParticlesFinalState.hh"
namespace Rivet {
/// ATLAS Z+jets in pp at 7 TeV
class ATLAS_2011_I945498 : public Analysis {
public:
/// Constructor
ATLAS_2011_I945498()
: Analysis("ATLAS_2011_I945498")
{ }
/// Book histograms and initialise projections before the run
void init() {
// Variable initialisation
_isZeeSample = false;
_isZmmSample = false;
for (size_t chn = 0; chn < 3; ++chn) {
book(weights_nj0[chn], "weights_nj0_" + to_str(chn));
book(weights_nj1[chn], "weights_nj1_" + to_str(chn));
book(weights_nj2[chn], "weights_nj2_" + to_str(chn));
book(weights_nj3[chn], "weights_nj3_" + to_str(chn));
book(weights_nj4[chn], "weights_nj4_" + to_str(chn));
}
// Set up projections
FinalState fs;
ZFinder zfinder_mu(fs, Cuts::abseta < 2.4 && Cuts::pT > 20*GeV, PID::MUON, 66*GeV, 116*GeV, 0.1, ZFinder::CLUSTERNODECAY);
declare(zfinder_mu, "ZFinder_mu");
Cut cuts = (Cuts::abseta < 1.37 || Cuts::absetaIn(1.52, 2.47)) && Cuts::pT > 20*GeV;
ZFinder zfinder_el(fs, cuts, PID::ELECTRON, 66*GeV, 116*GeV, 0.1, ZFinder::CLUSTERNODECAY);
declare(zfinder_el, "ZFinder_el");
Cut cuts25_20 = Cuts::abseta < 2.5 && Cuts::pT > 20*GeV;
// For combined cross-sections (combined phase space + dressed level)
ZFinder zfinder_comb_mu(fs, cuts25_20, PID::MUON, 66.0*GeV, 116.0*GeV, 0.1, ZFinder::CLUSTERNODECAY);
declare(zfinder_comb_mu, "ZFinder_comb_mu");
ZFinder zfinder_comb_el(fs, cuts25_20, PID::ELECTRON, 66.0*GeV, 116.0*GeV, 0.1, ZFinder::CLUSTERNODECAY);
declare(zfinder_comb_el, "ZFinder_comb_el");
// Define veto FS in order to prevent Z-decay products entering the jet algorithm
VetoedFinalState remfs;
remfs.addVetoOnThisFinalState(zfinder_el);
remfs.addVetoOnThisFinalState(zfinder_mu);
VetoedFinalState remfs_comb;
remfs_comb.addVetoOnThisFinalState(zfinder_comb_el);
remfs_comb.addVetoOnThisFinalState(zfinder_comb_mu);
FastJets jets(remfs, FastJets::ANTIKT, 0.4);
jets.useInvisibles();
declare(jets, "jets");
FastJets jets_comb(remfs_comb, FastJets::ANTIKT, 0.4);
jets_comb.useInvisibles();
declare(jets_comb, "jets_comb");
// 0=el, 1=mu, 2=comb
for (size_t chn = 0; chn < 3; ++chn) {
book(_h_njet_incl[chn] ,1, 1, chn+1);
book(_h_njet_ratio[chn] ,2, 1, chn+1);
book(_h_ptjet[chn] ,3, 1, chn+1);
book(_h_ptlead[chn] ,4, 1, chn+1);
book(_h_ptseclead[chn] ,5, 1, chn+1);
book(_h_yjet[chn] ,6, 1, chn+1);
book(_h_ylead[chn] ,7, 1, chn+1);
book(_h_yseclead[chn] ,8, 1, chn+1);
book(_h_mass[chn] ,9, 1, chn+1);
book(_h_deltay[chn] ,10, 1, chn+1);
book(_h_deltaphi[chn] ,11, 1, chn+1);
book(_h_deltaR[chn] ,12, 1, chn+1);
}
}
// Jet selection criteria universal for electron and muon channel
/// @todo Replace with a Cut passed to jetsByPt
Jets selectJets(const ZFinder* zf, const FastJets* allJets) {
const FourMomentum l1 = zf->constituents()[0].momentum();
const FourMomentum l2 = zf->constituents()[1].momentum();
Jets jets;
foreach (const Jet& jet, allJets->jetsByPt(30*GeV)) {
const FourMomentum jmom = jet.momentum();
if (jmom.absrap() < 4.4 &&
deltaR(l1, jmom) > 0.5 && deltaR(l2, jmom) > 0.5) {
jets.push_back(jet);
}
}
return jets;
}
/// Perform the per-event analysis
void analyze(const Event& event) {
vector<const ZFinder*> zfs;
zfs.push_back(& (apply<ZFinder>(event, "ZFinder_el")));
zfs.push_back(& (apply<ZFinder>(event, "ZFinder_mu")));
zfs.push_back(& (apply<ZFinder>(event, "ZFinder_comb_el")));
zfs.push_back(& (apply<ZFinder>(event, "ZFinder_comb_mu")));
vector<const FastJets*> fjs;
fjs.push_back(& (apply<FastJets>(event, "jets")));
fjs.push_back(& (apply<FastJets>(event, "jets_comb")));
// Determine what kind of MC sample this is
const bool isZee = (zfs[0]->bosons().size() == 1) || (zfs[2]->bosons().size() == 1);
const bool isZmm = (zfs[1]->bosons().size() == 1) || (zfs[3]->bosons().size() == 1);
if (isZee) _isZeeSample = true;
if (isZmm) _isZmmSample = true;
// Require exactly one electronic or muonic Z-decay in the event
bool isZeemm = ( (zfs[0]->bosons().size() == 1 && zfs[1]->bosons().size() != 1) ||
(zfs[1]->bosons().size() == 1 && zfs[0]->bosons().size() != 1) );
bool isZcomb = ( (zfs[2]->bosons().size() == 1 && zfs[3]->bosons().size() != 1) ||
(zfs[3]->bosons().size() == 1 && zfs[2]->bosons().size() != 1) );
if (!isZeemm && !isZcomb) vetoEvent;
vector<int> zfIDs;
vector<int> fjIDs;
if (isZeemm) {
int chn = zfs[0]->bosons().size() == 1 ? 0 : 1;
zfIDs.push_back(chn);
fjIDs.push_back(0);
}
if (isZcomb) {
int chn = zfs[2]->bosons().size() == 1 ? 2 : 3;
zfIDs.push_back(chn);
fjIDs.push_back(1);
}
for (size_t izf = 0; izf < zfIDs.size(); ++izf) {
int zfID = zfIDs[izf];
int fjID = fjIDs[izf];
int chn = zfID;
if (zfID == 2 || zfID == 3) chn = 2;
Jets jets = selectJets(zfs[zfID], fjs[fjID]);
switch (jets.size()) {
case 0:
weights_nj0[chn]->fill();
break;
case 1:
weights_nj0[chn]->fill();
weights_nj1[chn]->fill();
break;
case 2:
weights_nj0[chn]->fill();
weights_nj1[chn]->fill();
weights_nj2[chn]->fill();
break;
case 3:
weights_nj0[chn]->fill();
weights_nj1[chn]->fill();
weights_nj2[chn]->fill();
weights_nj3[chn]->fill();
break;
default: // >= 4
weights_nj0[chn]->fill();
weights_nj1[chn]->fill();
weights_nj2[chn]->fill();
weights_nj3[chn]->fill();
weights_nj4[chn]->fill();
}
// Require at least one jet
if (jets.empty()) continue;
// Fill jet multiplicities
for (size_t ijet = 1; ijet <= jets.size(); ++ijet) {
_h_njet_incl[chn]->fill(ijet);
}
// Loop over selected jets, fill inclusive jet distributions
for (size_t ijet = 0; ijet < jets.size(); ++ijet) {
_h_ptjet[chn]->fill(jets[ijet].pT()/GeV);
_h_yjet [chn]->fill(fabs(jets[ijet].rapidity()));
}
// Leading jet histos
const double ptlead = jets[0].pT()/GeV;
const double yabslead = fabs(jets[0].rapidity());
_h_ptlead[chn]->fill(ptlead);
_h_ylead [chn]->fill(yabslead);
if (jets.size() >= 2) {
// Second jet histos
const double pt2ndlead = jets[1].pT()/GeV;
const double yabs2ndlead = fabs(jets[1].rapidity());
_h_ptseclead[chn] ->fill(pt2ndlead);
_h_yseclead [chn] ->fill(yabs2ndlead);
// Dijet histos
const double deltaphi = fabs(deltaPhi(jets[1], jets[0]));
const double deltarap = fabs(jets[0].rapidity() - jets[1].rapidity()) ;
const double deltar = fabs(deltaR(jets[0], jets[1], RAPIDITY));
const double mass = (jets[0].momentum() + jets[1].momentum()).mass();
_h_mass [chn] ->fill(mass/GeV);
_h_deltay [chn] ->fill(deltarap);
_h_deltaphi[chn] ->fill(deltaphi);
_h_deltaR [chn] ->fill(deltar);
}
}
}
/// @name Ratio calculator util functions
//@{
/// Calculate the ratio, being careful about div-by-zero
double ratio(double a, double b) {
return (b != 0) ? a/b : 0;
}
/// Calculate the ratio error, being careful about div-by-zero
double ratio_err(double a, double b) {
return (b != 0) ? sqrt(a/sqr(b) + sqr(a)/(b*b*b)) : 0;
}
//@}
void finalize() {
// Fill ratio histograms
for (size_t chn = 0; chn < 3; ++chn) {
_h_njet_ratio[chn]->addPoint(1, ratio(weights_nj1[chn]->val(), weights_nj0[chn]->val()), 0.5, ratio_err(weights_nj1[chn]->val(), weights_nj0[chn]->val()));
_h_njet_ratio[chn]->addPoint(2, ratio(weights_nj2[chn]->val(), weights_nj1[chn]->val()), 0.5, ratio_err(weights_nj2[chn]->val(), weights_nj1[chn]->val()));
_h_njet_ratio[chn]->addPoint(3, ratio(weights_nj3[chn]->val(), weights_nj2[chn]->val()), 0.5, ratio_err(weights_nj3[chn]->val(), weights_nj2[chn]->val()));
_h_njet_ratio[chn]->addPoint(4, ratio(weights_nj4[chn]->val(), weights_nj3[chn]->val()), 0.5, ratio_err(weights_nj4[chn]->val(), weights_nj3[chn]->val()));
}
// Scale other histos
for (size_t chn = 0; chn < 3; ++chn) {
// For ee and mumu channels: normalize to Njet inclusive cross-section
- double xs = (chn == 2) ? crossSectionPerEvent()/picobarn : 1 / weights_nj0[chn];
+ double xs = (chn == 2) ? crossSectionPerEvent()/picobarn : 1 / weights_nj0[chn]->val();
// For inclusive MC sample(ee/mmu channels together) we want the single-lepton-flavor xsec
if (_isZeeSample && _isZmmSample) xs /= 2;
// Special case histogram: always not normalized
scale(_h_njet_incl[chn], (chn < 2) ? crossSectionPerEvent()/picobarn : xs);
scale(_h_ptjet[chn] , xs);
scale(_h_ptlead[chn] , xs);
scale(_h_ptseclead[chn], xs);
scale(_h_yjet[chn] , xs);
scale(_h_ylead[chn] , xs);
scale(_h_yseclead[chn] , xs);
scale(_h_deltaphi[chn] , xs);
scale(_h_deltay[chn] , xs);
scale(_h_deltaR[chn] , xs);
scale(_h_mass[chn] , xs);
}
}
//@}
private:
bool _isZeeSample;
bool _isZmmSample;
CounterPtr weights_nj0[3];
CounterPtr weights_nj1[3];
CounterPtr weights_nj2[3];
CounterPtr weights_nj3[3];
CounterPtr weights_nj4[3];
Scatter2DPtr _h_njet_ratio[3];
Histo1DPtr _h_njet_incl[3];
Histo1DPtr _h_ptjet[3];
Histo1DPtr _h_ptlead[3];
Histo1DPtr _h_ptseclead[3];
Histo1DPtr _h_yjet[3];
Histo1DPtr _h_ylead[3];
Histo1DPtr _h_yseclead[3];
Histo1DPtr _h_deltaphi[3];
Histo1DPtr _h_deltay[3];
Histo1DPtr _h_deltaR[3];
Histo1DPtr _h_mass[3];
};
DECLARE_RIVET_PLUGIN(ATLAS_2011_I945498);
}
diff --git a/analyses/pluginATLAS/ATLAS_2011_S9131140.cc b/analyses/pluginATLAS/ATLAS_2011_S9131140.cc
--- a/analyses/pluginATLAS/ATLAS_2011_S9131140.cc
+++ b/analyses/pluginATLAS/ATLAS_2011_S9131140.cc
@@ -1,109 +1,109 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ZFinder.hh"
namespace Rivet {
/// @brief ATLAS Z pT in Drell-Yan events at 7 TeV
/// @author Elena Yatsenko, Judith Katzy
class ATLAS_2011_S9131140 : public Analysis {
public:
/// Constructor
ATLAS_2011_S9131140()
: Analysis("ATLAS_2011_S9131140")
{
}
/// @name Analysis methods
//@{
void init() {
// Set up projections
FinalState fs;
Cut cut = Cuts::abseta < 2.4 && Cuts::pT > 20*GeV;
ZFinder zfinder_dressed_el(fs, cut, PID::ELECTRON, 66.0*GeV, 116.0*GeV, 0.1, ZFinder::CLUSTERNODECAY);
declare(zfinder_dressed_el, "ZFinder_dressed_el");
ZFinder zfinder_bare_el(fs, cut, PID::ELECTRON, 66.0*GeV, 116.0*GeV, 0.0, ZFinder::NOCLUSTER);
declare(zfinder_bare_el, "ZFinder_bare_el");
ZFinder zfinder_dressed_mu(fs, cut, PID::MUON, 66.0*GeV, 116.0*GeV, 0.1, ZFinder::CLUSTERNODECAY);
declare(zfinder_dressed_mu, "ZFinder_dressed_mu");
ZFinder zfinder_bare_mu(fs, cut, PID::MUON, 66.0*GeV, 116.0*GeV, 0.0, ZFinder::NOCLUSTER);
declare(zfinder_bare_mu, "ZFinder_bare_mu");
// Book histograms
book(_hist_zpt_el_dressed ,1, 1, 2); // electron "dressed"
book(_hist_zpt_el_bare ,1, 1, 3); // electron "bare"
book(_hist_zpt_mu_dressed ,2, 1, 2); // muon "dressed"
book(_hist_zpt_mu_bare ,2, 1, 3); // muon "bare"
book(_sumw_el_bare, "_sumw_el_bare");
book(_sumw_el_dressed, "_sumw_el_dressed");
book(_sumw_mu_bare, "_sumw_mu_bare");
book(_sumw_mu_dressed, "_sumw_mu_dressed");
}
/// Do the analysis
void analyze(const Event& evt) {
const ZFinder& zfinder_dressed_el = apply<ZFinder>(evt, "ZFinder_dressed_el");
if (!zfinder_dressed_el.bosons().empty()) {
_sumw_el_dressed->fill();
const FourMomentum pZ = zfinder_dressed_el.bosons()[0].momentum();
_hist_zpt_el_dressed->fill(pZ.pT()/GeV);
}
const ZFinder& zfinder_bare_el = apply<ZFinder>(evt, "ZFinder_bare_el");
if (!zfinder_bare_el.bosons().empty()) {
_sumw_el_bare->fill();
const FourMomentum pZ = zfinder_bare_el.bosons()[0].momentum();
_hist_zpt_el_bare->fill(pZ.pT()/GeV);
}
const ZFinder& zfinder_dressed_mu = apply<ZFinder>(evt, "ZFinder_dressed_mu");
if (!zfinder_dressed_mu.bosons().empty()) {
_sumw_mu_dressed->fill();
const FourMomentum pZ = zfinder_dressed_mu.bosons()[0].momentum();
_hist_zpt_mu_dressed->fill(pZ.pT()/GeV);
}
const ZFinder& zfinder_bare_mu = apply<ZFinder>(evt, "ZFinder_bare_mu");
if (!zfinder_bare_mu.bosons().empty()) {
_sumw_mu_bare->fill();
const FourMomentum pZ = zfinder_bare_mu.bosons()[0].momentum();
_hist_zpt_mu_bare->fill(pZ.pT()/GeV);
}
}
void finalize() {
- if (_sumw_el_dressed != 0) scale(_hist_zpt_el_dressed, 1/_sumw_el_dressed);
- if (_sumw_el_bare != 0) scale(_hist_zpt_el_bare, 1/_sumw_el_bare);
- if (_sumw_mu_dressed != 0) scale(_hist_zpt_mu_dressed, 1/_sumw_mu_dressed);
- if (_sumw_mu_bare != 0) scale(_hist_zpt_mu_bare, 1/_sumw_mu_bare);
+ if (_sumw_el_dressed->val() != 0) scale(_hist_zpt_el_dressed, 1/ *_sumw_el_dressed);
+ if (_sumw_el_bare->val() != 0) scale(_hist_zpt_el_bare, 1/ *_sumw_el_bare);
+ if (_sumw_mu_dressed->val() != 0) scale(_hist_zpt_mu_dressed, 1/ *_sumw_mu_dressed);
+ if (_sumw_mu_bare->val() != 0) scale(_hist_zpt_mu_bare, 1/ *_sumw_mu_bare);
}
//@}
private:
CounterPtr _sumw_el_bare, _sumw_el_dressed;
CounterPtr _sumw_mu_bare, _sumw_mu_dressed;
Histo1DPtr _hist_zpt_el_dressed;
Histo1DPtr _hist_zpt_el_bare;
Histo1DPtr _hist_zpt_mu_dressed;
Histo1DPtr _hist_zpt_mu_bare;
};
// Hook for the plugin system
DECLARE_RIVET_PLUGIN(ATLAS_2011_S9131140);
}
diff --git a/analyses/pluginATLAS/ATLAS_2012_I1082009.cc b/analyses/pluginATLAS/ATLAS_2012_I1082009.cc
--- a/analyses/pluginATLAS/ATLAS_2012_I1082009.cc
+++ b/analyses/pluginATLAS/ATLAS_2012_I1082009.cc
@@ -1,150 +1,150 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/IdentifiedFinalState.hh"
#include "Rivet/Projections/VetoedFinalState.hh"
#include "Rivet/Projections/MissingMomentum.hh"
#include "Rivet/Projections/FastJets.hh"
#include "Rivet/Projections/LeadingParticlesFinalState.hh"
#include "Rivet/Projections/UnstableFinalState.hh"
namespace Rivet {
class ATLAS_2012_I1082009 : public Analysis {
public:
/// @name Constructors etc.
//@{
/// Constructor
ATLAS_2012_I1082009()
: Analysis("ATLAS_2012_I1082009")
{ }
//@}
public:
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
// Input for the jets: No neutrinos, no muons
VetoedFinalState veto;
veto.addVetoPairId(PID::MUON);
veto.vetoNeutrinos();
FastJets jets(veto, FastJets::ANTIKT, 0.6);
declare(jets, "jets");
// unstable final-state for D*
declare(UnstableFinalState(), "UFS");
book(_weight25_30, "_weight_25_30");
book(_weight30_40, "_weight_30_40");
book(_weight40_50, "_weight_40_50");
book(_weight50_60, "_weight_50_60");
book(_weight60_70, "_weight_60_70");
book(_weight25_70, "_weight_25_70");
book(_h_pt25_30 , 8,1,1);
book(_h_pt30_40 , 9,1,1);
book(_h_pt40_50 ,10,1,1);
book(_h_pt50_60 ,11,1,1);
book(_h_pt60_70 ,12,1,1);
book(_h_pt25_70 ,13,1,1);
}
/// Perform the per-event analysis
void analyze(const Event& event) {
// get the jets
Jets jets;
foreach (const Jet& jet, apply<FastJets>(event, "jets").jetsByPt(25.0*GeV)) {
if ( jet.abseta() < 2.5 ) jets.push_back(jet);
}
// get the D* mesons
const UnstableFinalState& ufs = apply<UnstableFinalState>(event, "UFS");
Particles Dstar;
foreach (const Particle& p, ufs.particles()) {
const int id = p.abspid();
if(id==413) Dstar.push_back(p);
}
// loop over the jobs
foreach (const Jet& jet, jets ) {
double perp = jet.perp();
bool found = false;
double z(0.);
if(perp<25.||perp>70.) continue;
foreach(const Particle & p, Dstar) {
if(p.perp()<7.5) continue;
if(deltaR(p, jet.momentum())<0.6) {
Vector3 axis = jet.p3().unit();
z = axis.dot(p.p3())/jet.E();
if(z<0.3) continue;
found = true;
break;
}
}
_weight25_70->fill();
if(found) _h_pt25_70->fill(z);
if(perp>=25.&&perp<30.) {
_weight25_30->fill();
if(found) _h_pt25_30->fill(z);
}
else if(perp>=30.&&perp<40.) {
_weight30_40->fill();
if(found) _h_pt30_40->fill(z);
}
else if(perp>=40.&&perp<50.) {
_weight40_50->fill();
if(found) _h_pt40_50->fill(z);
}
else if(perp>=50.&&perp<60.) {
_weight50_60->fill();
if(found) _h_pt50_60->fill(z);
}
else if(perp>=60.&&perp<70.) {
_weight60_70->fill();
if(found) _h_pt60_70->fill(z);
}
}
}
/// Normalise histograms etc., after the run
void finalize() {
- scale(_h_pt25_30,1./_weight25_30);
- scale(_h_pt30_40,1./_weight30_40);
- scale(_h_pt40_50,1./_weight40_50);
- scale(_h_pt50_60,1./_weight50_60);
- scale(_h_pt60_70,1./_weight60_70);
- scale(_h_pt25_70,1./_weight25_70);
+ scale(_h_pt25_30,1./ *_weight25_30);
+ scale(_h_pt30_40,1./ *_weight30_40);
+ scale(_h_pt40_50,1./ *_weight40_50);
+ scale(_h_pt50_60,1./ *_weight50_60);
+ scale(_h_pt60_70,1./ *_weight60_70);
+ scale(_h_pt25_70,1./ *_weight25_70);
}
//@}
private:
/// @name Histograms
//@{
CounterPtr _weight25_30,_weight30_40,_weight40_50;
CounterPtr _weight50_60,_weight60_70,_weight25_70;
Histo1DPtr _h_pt25_30;
Histo1DPtr _h_pt30_40;
Histo1DPtr _h_pt40_50;
Histo1DPtr _h_pt50_60;
Histo1DPtr _h_pt60_70;
Histo1DPtr _h_pt25_70;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(ATLAS_2012_I1082009);
}
diff --git a/analyses/pluginATLAS/ATLAS_2012_I1091481.cc b/analyses/pluginATLAS/ATLAS_2012_I1091481.cc
--- a/analyses/pluginATLAS/ATLAS_2012_I1091481.cc
+++ b/analyses/pluginATLAS/ATLAS_2012_I1091481.cc
@@ -1,177 +1,177 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
namespace Rivet {
class ATLAS_2012_I1091481 : public Analysis {
public:
/// Constructor
ATLAS_2012_I1091481()
: Analysis("ATLAS_2012_I1091481")
{ }
/// Book histograms and initialise projections before the run
void init() {
ChargedFinalState cfs100(Cuts::abseta < 2.5 && Cuts::pT > 0.1*GeV);
declare(cfs100,"CFS100");
ChargedFinalState cfs500(Cuts::abseta < 2.5 && Cuts::pT > 0.5*GeV);
declare(cfs500,"CFS500");
// collision energy
int isqrts = -1;
if (fuzzyEquals(sqrtS(), 900*GeV)) isqrts = 2;
if (fuzzyEquals(sqrtS(), 7*TeV)) isqrts = 1;
assert(isqrts > 0);
book(_sE_10_100 ,isqrts, 1, 1);
book(_sE_1_100 ,isqrts, 1, 2);
book(_sE_10_500 ,isqrts, 1, 3);
book(_sEta_10_100 ,isqrts, 2, 1);
book(_sEta_1_100 ,isqrts, 2, 2);
book(_sEta_10_500 ,isqrts, 2, 3);
book(norm_inclusive, "norm_inclusive");
book(norm_lowPt, "norm_lowPt");
book(norm_pt500, "norm_pt500");
}
// Recalculate particle energy assuming pion mass
double getPionEnergy(const Particle& p) {
double m_pi = 0.1396*GeV;
double p2 = p.p3().mod2()/(GeV*GeV);
return sqrt(sqr(m_pi) + p2);
}
// S_eta core for one event
//
// -1 + 1/Nch * |sum_j^Nch exp[i*(xi eta_j - Phi_j)]|^2
//
double getSeta(const Particles& part, double xi) {
std::complex<double> c_eta (0.0, 0.0);
foreach (const Particle& p, part) {
double eta = p.eta();
double phi = p.phi();
double arg = xi*eta-phi;
std::complex<double> temp(cos(arg), sin(arg));
c_eta += temp;
}
return std::norm(c_eta)/part.size() - 1.0;
}
// S_E core for one event
//
// -1 + 1/Nch * |sum_j^Nch exp[i*(omega X_j - Phi_j)]|^2
//
double getSE(const Particles& part, double omega) {
double Xj = 0.0;
std::complex<double> c_E (0.0, 0.0);
for (unsigned int i=0; i < part.size(); ++i) {
Xj += 0.5*getPionEnergy(part[i]);
double phi = part[i].phi();
double arg = omega*Xj - phi;
std::complex<double> temp(cos(arg), sin(arg));
c_E += temp;
Xj += 0.5*getPionEnergy(part[i]);
}
return std::norm(c_E)/part.size() - 1.0;
}
// Convenient fill function
void fillS(Histo1DPtr h, const Particles& part, bool SE=true) {
// Loop over bins, take bin centers as parameter values
for(size_t i=0; i < h->numBins(); ++i) {
double x = h->bin(i).xMid();
double width = h->bin(i).xMax() - h->bin(i).xMin();
double y;
if(SE) y = getSE(part, x);
else y = getSeta(part, x);
h->fill(x, y * width);
// Histo1D objects will be converted to Scatter2D objects for plotting
// As part of this conversion, Rivet will divide by bin width
// However, we want the (x,y) of the Scatter2D to be the (binCenter, sumW) of
// the current Histo1D. This is why in the above line we multiply by bin width,
// so as to undo later division by bin width.
//
// Could have used Scatter2D objects in the first place, but they cannot be merged
// as easily as Histo1Ds can using yodamerge (missing ScaledBy attribute)
}
}
/// Perform the per-event analysis
void analyze(const Event& event) {
// Charged fs
const ChargedFinalState& cfs100 = apply<ChargedFinalState>(event, "CFS100");
const Particles part100 = cfs100.particles(cmpMomByEta);
const ChargedFinalState& cfs500 = apply<ChargedFinalState>(event, "CFS500");
const Particles& part500 = cfs500.particles(cmpMomByEta);
// Veto event if the most inclusive phase space has less than 10 particles and the max pT is > 10 GeV
if (part100.size() < 11) vetoEvent;
double ptmax = cfs100.particlesByPt()[0].pT()/GeV;
if (ptmax > 10.0) vetoEvent;
// Fill the pt>100, pTmax<10 GeV histos
fillS(_sE_10_100, part100, true);
fillS(_sEta_10_100, part100, false);
norm_inclusive->fill();
// Fill the pt>100, pTmax<1 GeV histos
if (ptmax < 1.0) {
fillS(_sE_1_100, part100, true);
fillS(_sEta_1_100, part100, false);
norm_lowPt->fill();
}
// Fill the pt>500, pTmax<10 GeV histos
if (part500.size() > 10) {
fillS(_sE_10_500, part500, true );
fillS(_sEta_10_500, part500, false);
norm_pt500->fill();
}
}
/// Normalise histograms etc., after the run
void finalize() {
// The scaling takes the multiple fills per event into account
- scale(_sE_10_100, 1.0/norm_inclusive);
- scale(_sE_1_100 , 1.0/norm_lowPt);
- scale(_sE_10_500, 1.0/norm_pt500);
+ scale(_sE_10_100, 1.0/ *norm_inclusive);
+ scale(_sE_1_100 , 1.0/ *norm_lowPt);
+ scale(_sE_10_500, 1.0/ *norm_pt500);
- scale(_sEta_10_100, 1.0/norm_inclusive);
- scale(_sEta_1_100 , 1.0/norm_lowPt);
- scale(_sEta_10_500, 1.0/norm_pt500);
+ scale(_sEta_10_100, 1.0/ *norm_inclusive);
+ scale(_sEta_1_100 , 1.0/ *norm_lowPt);
+ scale(_sEta_10_500, 1.0/ *norm_pt500);
}
//@}
private:
Histo1DPtr _sE_10_100;
Histo1DPtr _sE_1_100;
Histo1DPtr _sE_10_500;
Histo1DPtr _sEta_10_100;
Histo1DPtr _sEta_1_100;
Histo1DPtr _sEta_10_500;
CounterPtr norm_inclusive;
CounterPtr norm_lowPt;
CounterPtr norm_pt500;
};
DECLARE_RIVET_PLUGIN(ATLAS_2012_I1091481);
}
diff --git a/analyses/pluginATLAS/ATLAS_2012_I1125575.cc b/analyses/pluginATLAS/ATLAS_2012_I1125575.cc
--- a/analyses/pluginATLAS/ATLAS_2012_I1125575.cc
+++ b/analyses/pluginATLAS/ATLAS_2012_I1125575.cc
@@ -1,239 +1,239 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/FastJets.hh"
#include "Rivet/Tools/BinnedHistogram.hh"
namespace Rivet {
/// ATLAS charged particle jet underlying event and jet radius dependence
class ATLAS_2012_I1125575 : public Analysis {
public:
/// @name Constructors etc.
//@{
/// Constructor
ATLAS_2012_I1125575()
: Analysis("ATLAS_2012_I1125575")
{ }
//@}
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
const ChargedFinalState jet_input(-2.5, 2.5, 0.5*GeV);
declare(jet_input, "JET_INPUT");
const ChargedFinalState track_input(-1.5, 1.5, 0.5*GeV);
declare(track_input, "TRACK_INPUT");
const FastJets jets02(jet_input, FastJets::ANTIKT, 0.2);
declare(jets02, "JETS_02");
const FastJets jets04(jet_input, FastJets::ANTIKT, 0.4);
declare(jets04, "JETS_04");
const FastJets jets06(jet_input, FastJets::ANTIKT, 0.6);
declare(jets06, "JETS_06");
const FastJets jets08(jet_input, FastJets::ANTIKT, 0.8);
declare(jets08, "JETS_08");
const FastJets jets10(jet_input, FastJets::ANTIKT, 1.0);
declare(jets10, "JETS_10");
// Mean number of tracks
initializeProfiles(_h_meanNch, 1);
// Mean of the average track pT in each region
initializeProfiles(_h_meanPtAvg, 2);
// Mean of the scalar sum of track pT in each region
initializeProfiles(_h_meanPtSum, 3);
// Distribution of Nch, in bins of leading track-jet pT
initializeHistograms(_h_Nch, 4);
// Distribution of average track-jet pT, in bins of leading track-jet pT
initializeHistograms(_h_PtAvg, 5);
// Distribution of sum of track-jet pT, in bins of leading track-jet pT
initializeHistograms(_h_PtSum, 6);
for (int i = 0; i < 5; ++i)
book(_nEvents[i], "nEvents_"+to_str(i));
}
void initializeProfiles(Profile1DPtr plots[5][2], int distribution) {
for (int i = 0; i < 5; ++i) {
for (int j = 0; j < 2; ++j) {
book(plots[i][j] ,distribution, i+1, j+1);
}
}
}
void initializeHistograms(BinnedHistogram plots[5][2], int distribution) {
Scatter2D refscatter = refData(1, 1, 1);
for (int i = 0; i < 5; ++i) {
for (int y = 0; y < 2; ++y) {
for (size_t j = 0; j < refscatter.numPoints(); ++j) {
int histogram_number = ((j+1)*2)-((y+1)%2);
double low_edge = refscatter.point(j).xMin();
double high_edge = refscatter.point(j).xMax();
Histo1DPtr tmp;
plots[i][y].add(low_edge, high_edge, book(tmp, distribution, i+1, histogram_number));
}
}
}
}
/// Perform the per-event analysis
void analyze(const Event& event) {
vector<Jets*> all_jets;
Jets jets_02 = apply<FastJets>(event, "JETS_02").jetsByPt(Cuts::pT > 4*GeV && Cuts::abseta < 1.5);
all_jets.push_back(&jets_02);
Jets jets_04 = apply<FastJets>(event, "JETS_04").jetsByPt(Cuts::pT > 4*GeV && Cuts::abseta < 1.5);
all_jets.push_back(&jets_04);
Jets jets_06 = apply<FastJets>(event, "JETS_06").jetsByPt(Cuts::pT > 4*GeV && Cuts::abseta < 1.5);
all_jets.push_back(&jets_06);
Jets jets_08 = apply<FastJets>(event, "JETS_08").jetsByPt(Cuts::pT > 4*GeV && Cuts::abseta < 1.5);
all_jets.push_back(&jets_08);
Jets jets_10 = apply<FastJets>(event, "JETS_10").jetsByPt(Cuts::pT > 4*GeV && Cuts::abseta < 1.5);
all_jets.push_back(&jets_10);
// Count the number of tracks in the away and transverse regions, for each set of jets
double n_ch[5][2] = { {0,0}, {0,0}, {0,0}, {0,0}, {0,0} };
// Also add up the sum pT
double sumpt[5][2] = { {0,0}, {0,0}, {0,0}, {0,0}, {0,0} };
// ptmean = sumpt / n_ch
double ptavg[5][2] = { {0,0}, {0,0}, {0,0}, {0,0}, {0,0} };
// lead jet pT defines which bin we want to fill
double lead_jet_pts[5] = {0.0};
// Loop over each of the jet radii:
for (int i = 0; i < 5; ++i) {
if (all_jets[i]->size() < 1) continue;
// Find the lead jet pT
lead_jet_pts[i] = all_jets[i]->at(0).pT();
// Loop over each of the charged particles
const Particles& tracks = apply<ChargedFinalState>(event, "TRACK_INPUT").particlesByPt();
foreach(const Particle& t, tracks) {
// Get the delta-phi between the track and the leading jet
double dphi = deltaPhi(all_jets[i]->at(0), t);
// Find out which region this puts it in.
// 0 = away region, 1 = transverse region, 2 = toward region
int region = region_index(dphi);
// If the track is in the toward region, ignore it.
if (region == 2) continue;
// Otherwise, increment the relevant counters
++n_ch[i][region];
sumpt[i][region] += t.pT();
}
// Calculate the pT_avg for the away and transverse regions.
// (And make sure we don't try to divide by zero.)
ptavg[i][0] = (n_ch[i][0] == 0 ? 0.0 : sumpt[i][0] / n_ch[i][0]);
ptavg[i][1] = (n_ch[i][1] == 0 ? 0.0 : sumpt[i][1] / n_ch[i][1]);
_nEvents[i]->fill();
}
fillProfiles(_h_meanNch, n_ch, lead_jet_pts, 1.0 / (2*PI));
fillProfiles(_h_meanPtAvg, ptavg, lead_jet_pts, 1.0);
fillProfiles(_h_meanPtSum, sumpt, lead_jet_pts, 1.0 / (2*PI));
fillHistograms(_h_Nch, n_ch, lead_jet_pts);
fillHistograms(_h_PtAvg, ptavg, lead_jet_pts);
fillHistograms(_h_PtSum, sumpt, lead_jet_pts);
}
void fillProfiles(Profile1DPtr plots[5][2], double var[5][2], double lead_pt[5], double scale) {
for (int i=0; i<5; ++i) {
double pt = lead_pt[i];
for (int j=0; j<2; ++j) {
double v = var[i][j];
plots[i][j]->fill(pt, v*scale);
}
}
}
void fillHistograms(BinnedHistogram plots[5][2], double var[5][2], double lead_pt[5]) {
for (int i=0; i<5; ++i) {
double pt = lead_pt[i];
for (int j=0; j<2; ++j) {
double v = var[i][j];
plots[i][j].fill(pt, v);
}
}
}
int region_index(double dphi) {
assert(inRange(dphi, 0.0, PI, CLOSED, CLOSED));
if (dphi < PI/3.0) return 2;
if (dphi < 2*PI/3.0) return 1;
return 0;
}
/// Normalise histograms etc., after the run
void finalize() {
finalizeHistograms(_h_Nch);
finalizeHistograms(_h_PtAvg);
finalizeHistograms(_h_PtSum);
}
void finalizeHistograms(BinnedHistogram plots[5][2]) {
for (int i = 0; i < 5; ++i) {
for (int j = 0; j < 2; ++j) {
vector<Histo1DPtr> histos = plots[i][j].histos();
foreach(Histo1DPtr h, histos) {
- scale(h, 1.0/_nEvents[i]);
+ scale(h, 1.0/ *_nEvents[i]);
}
}
}
}
//@}
private:
// Data members like post-cuts event weight counters go here
CounterPtr _nEvents[5];
Profile1DPtr _h_meanNch[5][2];
Profile1DPtr _h_meanPtAvg[5][2];
Profile1DPtr _h_meanPtSum[5][2];
BinnedHistogram _h_Nch[5][2];
BinnedHistogram _h_PtAvg[5][2];
BinnedHistogram _h_PtSum[5][2];
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(ATLAS_2012_I1125575);
}
diff --git a/analyses/pluginATLAS/ATLAS_2016_I1419652.cc b/analyses/pluginATLAS/ATLAS_2016_I1419652.cc
--- a/analyses/pluginATLAS/ATLAS_2016_I1419652.cc
+++ b/analyses/pluginATLAS/ATLAS_2016_I1419652.cc
@@ -1,173 +1,173 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
namespace Rivet {
class ATLAS_2016_I1419652 : public Analysis {
public:
/// Particle types included
enum PartTypes {
k_NoStrange,
k_AllCharged,
kNPartTypes
};
/// Phase space regions
enum RegionID {
k_pt500_nch1_eta25,
k_pt500_nch1_eta08,
kNregions
};
/// Nch cut for each region
const static int nchCut[kNregions];
/// Default constructor
ATLAS_2016_I1419652() : Analysis("ATLAS_2016_I1419652") {}
/// Initialization, called once before running
void init() {
// Projections
const ChargedFinalState cfs500_25(-2.5, 2.5, 500.0*MeV);
declare(cfs500_25, "CFS500_25");
const ChargedFinalState cfs500_08(-0.8, 0.8, 500.0*MeV);
declare(cfs500_08, "CFS500_08");
for (int iT = 0; iT < kNPartTypes; ++iT) {
for (int iR = 0; iR < kNregions; ++iR) {
book(_sumW[iT][iR], "_sumW" + to_str(iT) + to_str(iR));
book(_hist_nch [iT][iR] , 1, iR + 1, iT + 1);
book(_hist_pt [iT][iR] , 2, iR + 1, iT + 1);
book(_hist_eta [iT][iR] , 3, iR + 1, iT + 1);
book(_hist_ptnch[iT][iR] , 4, iR + 1, iT + 1);
}
}
}
void analyze(const Event& event) {
string fsName;
for (int iR = 0; iR < kNregions; ++iR) {
switch (iR) {
case k_pt500_nch1_eta25: fsName = "CFS500_25"; break;
case k_pt500_nch1_eta08: fsName = "CFS500_08"; break;
}
const ChargedFinalState& cfs = apply<ChargedFinalState>(event, fsName);
/// What's the benefit in separating this code which is only called from one place?!
fillPtEtaNch(cfs, iR);
}
}
void finalize() {
// Standard histograms
for (int iT = 0; iT < kNPartTypes; ++iT) {
for (int iR = 0; iR < kNregions; ++iR) {
double etaRangeSize = -999.0; //intentionally crazy
switch (iR) {
case k_pt500_nch1_eta25 : etaRangeSize = 5.0 ; break;
case k_pt500_nch1_eta08 : etaRangeSize = 1.6 ; break;
default: etaRangeSize = -999.0; break; //intentionally crazy
}
- if (_sumW[iT][iR] > 0) {
- scale(_hist_nch[iT][iR], 1.0/_sumW[iT][iR]);
- scale(_hist_pt [iT][iR], 1.0/_sumW[iT][iR]/TWOPI/etaRangeSize);
- scale(_hist_eta[iT][iR], 1.0/_sumW[iT][iR]);
+ if (_sumW[iT][iR]->val() > 0) {
+ scale(_hist_nch[iT][iR], 1.0/ *_sumW[iT][iR]);
+ scale(_hist_pt [iT][iR], 1.0/ dbl(*_sumW[iT][iR])/TWOPI/etaRangeSize);
+ scale(_hist_eta[iT][iR], 1.0/ *_sumW[iT][iR]);
} else {
MSG_WARNING("Sum of weights is zero (!) in type/region: " << iT << " " << iR);
}
}
}
}
/// Helper for collectively filling Nch, pT, eta, and pT vs. Nch histograms
void fillPtEtaNch(const ChargedFinalState& cfs, int iRegion) {
// Get number of particles
int nch[kNPartTypes];
int nch_noStrange = 0;
foreach (const Particle& p, cfs.particles()) {
PdgId pdg = p.abspid ();
if ( pdg == 3112 || // Sigma-
pdg == 3222 || // Sigma+
pdg == 3312 || // Xi-
pdg == 3334 ) // Omega-
continue;
nch_noStrange++;
}
nch[k_AllCharged] = cfs.size();
nch[k_NoStrange ] = nch_noStrange;
// Skip if event fails cut for all charged (noStrange will always be less)
if (nch[k_AllCharged] < nchCut[iRegion]) return;
// Fill event weight info
_sumW[k_AllCharged][iRegion]->fill();
if (nch[k_NoStrange ] >= nchCut[iRegion]) {
_sumW[k_NoStrange][iRegion]->fill();
}
// Fill nch
_hist_nch[k_AllCharged][iRegion]->fill(nch[k_AllCharged]);
if (nch[k_NoStrange ] >= nchCut[iRegion]) {
_hist_nch [k_NoStrange][iRegion]->fill(nch[k_NoStrange ]);
}
// Loop over particles, fill pT, eta and ptnch
foreach (const Particle& p, cfs.particles()) {
const double pt = p.pT()/GeV;
const double eta = p.eta();
_hist_pt [k_AllCharged][iRegion]->fill(pt , 1.0/pt);
_hist_eta [k_AllCharged][iRegion]->fill(eta);
_hist_ptnch [k_AllCharged][iRegion]->fill(nch[k_AllCharged], pt);
// Make sure nch cut is passed for nonStrange particles!
if (nch[k_NoStrange ] >= nchCut[iRegion]) {
PdgId pdg = p.abspid ();
if ( pdg == 3112 || // Sigma-
pdg == 3222 || // Sigma+
pdg == 3312 || // Xi-
pdg == 3334 ) // Omega-
continue;
// Here we don't have strange particles anymore
_hist_pt [k_NoStrange][iRegion]->fill(pt , 1.0/pt);
_hist_eta [k_NoStrange][iRegion]->fill(eta);
_hist_ptnch[k_NoStrange][iRegion]->fill(nch[k_NoStrange], pt);
}
}
}
private:
CounterPtr _sumW[kNPartTypes][kNregions];
Histo1DPtr _hist_nch [kNPartTypes][kNregions];
Histo1DPtr _hist_pt [kNPartTypes][kNregions];
Histo1DPtr _hist_eta [kNPartTypes][kNregions];
Profile1DPtr _hist_ptnch[kNPartTypes][kNregions];
};
// Constants: pT & eta regions
const int ATLAS_2016_I1419652::nchCut[] = {1, 1};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(ATLAS_2016_I1419652);
}
diff --git a/analyses/pluginATLAS/ATLAS_2016_I1426695.cc b/analyses/pluginATLAS/ATLAS_2016_I1426695.cc
--- a/analyses/pluginATLAS/ATLAS_2016_I1426695.cc
+++ b/analyses/pluginATLAS/ATLAS_2016_I1426695.cc
@@ -1,129 +1,129 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
namespace Rivet {
/// @brief Add a short analysis description here
class ATLAS_2016_I1426695 : public Analysis {
public:
//phase space regions
enum regionID {
k_pt100_nch2 = 0,
k_pt500_nch1,
k_pt500_nch6,
k_pt500_nch20,
k_pt500_nch50,
kNregions
};
/// Constructor
DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2016_I1426695);
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
for (int iR=0; iR < kNregions; ++iR) {
book(_sumW[iR], "_sumW" + to_str(iR)) ;
}
// Initialise and register projections
declare(ChargedFinalState(Cuts::abseta < 2.5 && Cuts::pT > 100*MeV), "CFS_100");
declare(ChargedFinalState(Cuts::abseta < 2.5 && Cuts::pT > 500*MeV), "CFS_500");
// Book histograms
for (int iR=0; iR < kNregions; ++iR) {
if (iR == k_pt100_nch2 || iR == k_pt500_nch1) {
book(_hist_nch [iR] ,1, iR + 1, 1);
book(_hist_ptnch[iR] ,4, iR + 1, 1);
}
book(_hist_pt [iR] ,2, iR + 1, 1);
book(_hist_eta[iR] ,3, iR + 1, 1);
}
}
void fillPtEtaNch(const Particles particles, int nMin, int iRegion) {
//skip if event fails multiplicity cut
int nch =particles.size();
if (nch < nMin) return;
_sumW[iRegion]->fill();
// Fill nch
if (iRegion == k_pt100_nch2 || iRegion == k_pt500_nch1) {
_hist_nch[iRegion]->fill(nch);
}
for (const Particle&p : particles) {
// Loop over particles, fill pT, eta and ptnch
const double pt = p.pT()/GeV;
const double eta = p.eta();
_hist_pt [iRegion]->fill(pt , 1.0/pt);
_hist_eta[iRegion]->fill(eta);
if (iRegion == k_pt100_nch2 || iRegion == k_pt500_nch1) {
_hist_ptnch[iRegion]->fill(nch, pt);
}
} //end loop over particles
}
/// Perform the per-event analysis
void analyze(const Event& event) {
// Get charged particles, omitting some strange heavies
const Cut& pcut = (
(Cuts::abspid!=PID::SIGMAMINUS) && (Cuts::abspid!=PID::SIGMAPLUS) &&
(Cuts::abspid!=PID::XIMINUS) && (Cuts::abspid!=PID::OMEGAMINUS));
const Particles& p_100 = apply<ChargedFinalState>(event, "CFS_100").particles(pcut);
const Particles& p_500 = apply<ChargedFinalState>(event, "CFS_500").particles(pcut);
fillPtEtaNch(p_100, 2, 0);
fillPtEtaNch(p_500, 1, 1);
fillPtEtaNch(p_500, 6, 2);
fillPtEtaNch(p_500, 20, 3);
fillPtEtaNch(p_500, 50, 4);
}
void finalize() {
for (int iR = 0; iR < kNregions; ++iR) {
- if (_sumW[iR] > 0) {
+ if (_sumW[iR]->val() > 0) {
if (iR == k_pt100_nch2 || iR == k_pt500_nch1) {
- scale(_hist_nch[iR], 1.0/_sumW[iR]);
+ scale(_hist_nch[iR], 1.0/ *_sumW[iR]);
}
- scale(_hist_pt [iR], 1.0/_sumW[iR]/TWOPI/5.);
- scale(_hist_eta[iR], 1.0/_sumW[iR]);
+ scale(_hist_pt [iR], 1.0/ dbl(*_sumW[iR])/TWOPI/5.);
+ scale(_hist_eta[iR], 1.0/ *_sumW[iR]);
}
}
}
//@}
private:
CounterPtr _sumW[kNregions];
/// @name Histograms
Histo1DPtr _hist_nch [kNregions];
Histo1DPtr _hist_pt [kNregions];
Histo1DPtr _hist_eta [kNregions];
Profile1DPtr _hist_ptnch [kNregions];
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(ATLAS_2016_I1426695);
}
diff --git a/analyses/pluginATLAS/ATLAS_2016_I1467230.cc b/analyses/pluginATLAS/ATLAS_2016_I1467230.cc
--- a/analyses/pluginATLAS/ATLAS_2016_I1467230.cc
+++ b/analyses/pluginATLAS/ATLAS_2016_I1467230.cc
@@ -1,133 +1,133 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
namespace Rivet {
/// @brief ATLAS 13 TeV minimum bias analysis for low-pT tracks
class ATLAS_2016_I1467230 : public Analysis {
public:
/// Particle types included
enum PartTypes {
k_NoStrange,
k_AllCharged,
kNPartTypes
};
/// Phase space regions
enum regionID {
k_pt100_nch2_eta25,
kNregions
};
/// Default constructor
DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2016_I1467230);
/// Initialization, called once before running
void init() {
for (int iT = 0; iT < kNPartTypes; ++iT) {
for (int iR = 0; iR < kNregions; ++iR) {
book(_sumW[iT][iR], "_sumW" + to_str(iT) + to_str(iR));
}
}
// Initialize and register projections
declare(ChargedFinalState(Cuts::abseta < 2.5 && Cuts::pT > 100*MeV), "CFS100_25");
for (int iT = 0; iT < kNPartTypes; ++iT) {
for (int iR = 0; iR < kNregions; ++iR) {
book(_hist_nch [iT][iR], 1, iR + 1, iT + 1);
book(_hist_pt [iT][iR], 2, iR + 1, iT + 1);
book(_hist_eta [iT][iR], 3, iR + 1, iT + 1);
book(_hist_ptnch[iT][iR], 4, iR + 1, iT + 1);
}
}
}
/// Fill histograms for the given particle selection and phase-space region
void fillPtEtaNch(const Particles& particles, int ptype, int iRegion) {
// Skip if event fails multiplicity cut
const size_t nch = particles.size();
if (nch < 2) return;
_sumW[ptype][iRegion]->fill();
// Fill nch
_hist_nch[ptype][iRegion]->fill(nch);
// Loop over particles, fill pT, eta and ptnch
for (const Particle& p : particles) {
const double pt = p.pT()/GeV;
const double eta = p.eta();
_hist_pt [ptype][iRegion]->fill(pt , 1.0/pt);
_hist_eta [ptype][iRegion]->fill(eta);
_hist_ptnch[ptype][iRegion]->fill(nch, pt);
}
}
/// Per-event analysis
void analyze(const Event& event) {
// Get all charged particles
const ChargedFinalState& cfs = apply<ChargedFinalState>(event, "CFS100_25");
const Particles& pall = cfs.particles();
// Get charged particles, filtered to omit charged strange baryons
const Cut& pcut = Cuts::abspid != PID::SIGMAMINUS && Cuts::abspid != PID::SIGMAPLUS && Cuts::abspid != PID::XIMINUS && Cuts::abspid != PID::OMEGAMINUS;
const Particles& pnostrange = cfs.particles(pcut);
// Fill all histograms
for (int iR = 0; iR < kNregions; ++iR) {
fillPtEtaNch(pall, k_AllCharged, iR);
fillPtEtaNch(pnostrange, k_NoStrange, iR);
}
}
/// Post-run data manipulation
void finalize() {
// Scale all histograms
for (int iT = 0; iT < kNPartTypes; ++iT) {
for (int iR = 0; iR < kNregions; ++iR) {
- if (_sumW[iT][iR] > 0) {
- scale(_hist_nch[iT][iR], 1.0/_sumW[iT][iR]);
- scale(_hist_pt [iT][iR], 1.0/_sumW[iT][iR]/TWOPI/5.);
- scale(_hist_eta[iT][iR], 1.0/_sumW[iT][iR]);
+ if (_sumW[iT][iR]->val() > 0) {
+ scale(_hist_nch[iT][iR], 1.0/ *_sumW[iT][iR]);
+ scale(_hist_pt [iT][iR], 1.0/ dbl(*_sumW[iT][iR])/TWOPI/5.);
+ scale(_hist_eta[iT][iR], 1.0/ *_sumW[iT][iR]);
}
}
}
}
private:
/// Weight sums
CounterPtr _sumW[kNPartTypes][kNregions];
/// @name Histogram arrays
//@{
Histo1DPtr _hist_nch [kNPartTypes][kNregions];
Histo1DPtr _hist_pt [kNPartTypes][kNregions];
Histo1DPtr _hist_eta [kNPartTypes][kNregions];
Profile1DPtr _hist_ptnch [kNPartTypes][kNregions];
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(ATLAS_2016_I1467230);
}
diff --git a/analyses/pluginCDF/CDF_1990_S2089246.cc b/analyses/pluginCDF/CDF_1990_S2089246.cc
--- a/analyses/pluginCDF/CDF_1990_S2089246.cc
+++ b/analyses/pluginCDF/CDF_1990_S2089246.cc
@@ -1,84 +1,84 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/TriggerCDFRun0Run1.hh"
namespace Rivet {
/// @brief CDF pseudorapidity analysis at 630 and 1800 GeV
/// @author Andy Buckley
class CDF_1990_S2089246 : public Analysis {
public:
/// Constructor
CDF_1990_S2089246()
: Analysis("CDF_1990_S2089246")
{
}
/// @name Analysis methods
//@{
void init() {
// Setup projections
declare(TriggerCDFRun0Run1(), "Trigger");
declare(ChargedFinalState(-3.5, 3.5), "CFS");
// Book histo
if (fuzzyEquals(sqrtS()/GeV, 1800, 1E-3)) {
book(_hist_eta ,3, 1, 1);
} else if (fuzzyEquals(sqrtS()/GeV, 630, 1E-3)) {
book(_hist_eta ,4, 1, 1);
}
book(_sumWTrig, "sumWTrig");
}
/// Do the analysis
void analyze(const Event& event) {
// Trigger
const bool trigger = apply<TriggerCDFRun0Run1>(event, "Trigger").minBiasDecision();
if (!trigger) vetoEvent;
_sumWTrig->fill();
// Loop over final state charged particles to fill eta histos
const FinalState& fs = apply<FinalState>(event, "CFS");
foreach (const Particle& p, fs.particles()) {
const double eta = p.eta();
_hist_eta->fill(fabs(eta));
}
}
/// Finalize
void finalize() {
// Divide through by num events to get d<N>/d(eta) in bins
// Factor of 1/2 for |eta| -> eta
- scale(_hist_eta, 0.5/_sumWTrig);
+ scale(_hist_eta, 0.5/ *_sumWTrig);
}
//@}
private:
/// @name Weight counter
//@{
CounterPtr _sumWTrig;
//@}
/// @name Histogram collections
//@{
Histo1DPtr _hist_eta;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(CDF_1990_S2089246);
}
diff --git a/analyses/pluginCDF/CDF_1994_S2952106.cc b/analyses/pluginCDF/CDF_1994_S2952106.cc
--- a/analyses/pluginCDF/CDF_1994_S2952106.cc
+++ b/analyses/pluginCDF/CDF_1994_S2952106.cc
@@ -1,208 +1,208 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FastJets.hh"
#include "Rivet/Projections/VetoedFinalState.hh"
#include "Rivet/Projections/VisibleFinalState.hh"
#include "Rivet/Projections/MissingMomentum.hh"
namespace Rivet {
/// @brief CDF Run I color coherence analysis
/// @author Andy Buckley
/// @author Lars Sonnenschein
class CDF_1994_S2952106 : public Analysis {
public:
/// Constructor
CDF_1994_S2952106() : Analysis("CDF_1994_S2952106")
{
}
/// @name Analysis methods
//@{
void init() {
const FinalState fs(-4.2, 4.2);
declare(fs, "FS");
declare(FastJets(fs, FastJets::CDFJETCLU, 0.7), "Jets");
// Zero passed-cuts event weight counters
book(_sumw, "sumW");
// Output histograms
book(_histJet1Et ,1,1,1);
book(_histJet2Et ,2,1,1);
book(_histJet3eta, 3,1,1);
book(_histR23 , 4,1,1);
book(_histAlpha , 5,1,1);
// Temporary histos: these are the ones we actually fill for the plots which require correction
book(_tmphistJet3eta, "TMP/Jet3eta", refData(3,1,1));
book(_tmphistR23, "TMP/R23", refData(4,1,1));
book(_tmphistAlpha, "TMP/Alpha", refData(5,1,1));
}
// Do the analysis
void analyze(const Event & event) {
const Jets jets = apply<FastJets>(event, "Jets").jets(cmpMomByEt);
MSG_DEBUG("Jet multiplicity before any cuts = " << jets.size());
// ETs only from jets:
double et_sinphi_sum = 0;
double et_cosphi_sum = 0;
double et_sum = 0;
for (size_t i = 0; i< jets.size(); ++i) {
et_sinphi_sum += jets[i].Et() * sin(jets[i].phi());
et_cosphi_sum += jets[i].Et() * cos(jets[i].phi());
et_sum += jets[i].Et();
}
// ET requirement
if (sqrt(sqr(et_sinphi_sum) + sqr(et_cosphi_sum))/et_sum > 6.0) vetoEvent;
// Check jet requirements
if (jets.size() < 3) vetoEvent;
if (jets[0].pT() < 110*GeV) vetoEvent;
if (jets[2].pT() < 10*GeV) vetoEvent;
// More jet 1,2,3 checks
FourMomentum pj1(jets[0].momentum()), pj2(jets[1].momentum()), pj3(jets[2].momentum());
if (fabs(pj1.eta()) > 0.7 || fabs(pj2.eta()) > 0.7) vetoEvent;
MSG_DEBUG("Jet 1 & 2 eta, pT requirements fulfilled");
// Require that jets are back-to-back within 20 degrees in phi
if ((PI - deltaPhi(pj1.phi(), pj2.phi())) > (20/180.0)*PI) vetoEvent;
MSG_DEBUG("Jet 1 & 2 phi requirement fulfilled");
_sumw->fill();
// Fill histos
_histJet1Et->fill(pj1.pT());
_histJet2Et->fill(pj2.pT());
_tmphistJet3eta->fill(pj3.eta());
_tmphistR23->fill(deltaR(pj2, pj3));
// Calc and plot alpha
const double dPhi = deltaPhi(pj3.phi(), pj2.phi());
const double dH = sign(pj2.eta()) * (pj3.eta() - pj2.eta());
const double alpha = atan(dH/dPhi);
_tmphistAlpha->fill(alpha*180./PI);
}
/// Apply bin-wise detector correction factors
void finalize() {
// Normal scalings
normalize(_histJet1Et, 12.3);
normalize(_histJet2Et, 12.3);
// eta3 correction
const double eta3_CDF_sim[] =
{ 0.0013, 0.0037, 0.0047, 0.0071, 0.0093, 0.0117, 0.0151, 0.0149, 0.0197, 0.0257,
0.0344, 0.0409, 0.0481, 0.0454, 0.0394, 0.0409, 0.0387, 0.0387, 0.0322, 0.0313,
0.0290, 0.0309, 0.0412, 0.0417, 0.0412, 0.0397, 0.0417, 0.0414, 0.0376, 0.0316,
0.0270, 0.0186, 0.0186, 0.0132, 0.0127, 0.0106, 0.0071, 0.0040, 0.0020, 0.0013 };
const double eta3_CDF_sim_err[] =
{ 0.0009, 0.0009, 0.0007, 0.0007, 0.0007, 0.0010, 0.0012, 0.0012, 0.0013, 0.0016,
0.0017, 0.0020, 0.0020, 0.0022, 0.0020, 0.0020, 0.0018, 0.0018, 0.0016, 0.0017,
0.0017, 0.0019, 0.0020, 0.0021, 0.0020, 0.0020, 0.0019, 0.0020, 0.0018, 0.0017,
0.0017, 0.0014, 0.0014, 0.0009, 0.0010, 0.0009, 0.0009, 0.0008, 0.0008, 0.0009 };
const double eta3_Ideal_sim[] =
{ 0.0017, 0.0030, 0.0033, 0.0062, 0.0062, 0.0112, 0.0177, 0.0164, 0.0196, 0.0274,
0.0351, 0.0413, 0.0520, 0.0497, 0.0448, 0.0446, 0.0375, 0.0329, 0.0291, 0.0272,
0.0233, 0.0288, 0.0384, 0.0396, 0.0468, 0.0419, 0.0459, 0.0399, 0.0355, 0.0329,
0.0274, 0.0230, 0.0201, 0.0120, 0.0100, 0.0080, 0.0051, 0.0051, 0.0010, 0.0010 };
for (size_t i = 0; i < 40; ++i) {
const double yval = _tmphistJet3eta->bin(i).area() * (eta3_CDF_sim[i]/eta3_Ideal_sim[i]);
const double yerr = _tmphistJet3eta->bin(i).areaErr() * (eta3_CDF_sim_err[i]/eta3_Ideal_sim[i]);
- _histJet3eta->addPoint(_tmphistJet3eta->bin(i).xMid(), yval/_sumw,
- _tmphistJet3eta->bin(i).xWidth()/2.0, yerr/_sumw);
+ _histJet3eta->addPoint(_tmphistJet3eta->bin(i).xMid(), yval/dbl(*_sumw),
+ _tmphistJet3eta->bin(i).xWidth()/2.0, yerr/dbl(*_sumw));
}
// R23 correction
const double R23_CDF_sim[] =
{ 0.0005, 0.0161, 0.0570, 0.0762, 0.0723, 0.0705, 0.0598, 0.0563, 0.0557, 0.0579,
0.0538, 0.0522, 0.0486, 0.0449, 0.0418, 0.0361, 0.0326, 0.0304, 0.0252, 0.0212,
0.0173, 0.0176, 0.0145, 0.0127, 0.0103, 0.0065, 0.0049, 0.0045, 0.0035, 0.0029,
0.0024, 0.0014, 0.0011, 0.0010, 0.0009 };
const double R23_CDF_sim_err[] =
{ 0.0013, 0.0009, 0.0022, 0.0029, 0.0026, 0.0024, 0.0022, 0.0025, 0.0023, 0.0024,
0.0021, 0.0021, 0.0021, 0.0021, 0.0021, 0.0019, 0.0019, 0.0016, 0.0017, 0.0014,
0.0010, 0.0014, 0.0012, 0.0013, 0.0010, 0.0011, 0.0010, 0.0010, 0.0010, 0.0011,
0.0011, 0.0009, 0.0008, 0.0008, 0.0009 };
const double R23_Ideal_sim[] =
{ 0.0005, 0.0176, 0.0585, 0.0862, 0.0843, 0.0756, 0.0673, 0.0635, 0.0586, 0.0619,
0.0565, 0.0515, 0.0466, 0.0472, 0.0349, 0.0349, 0.0266, 0.0254, 0.0204, 0.0179,
0.0142, 0.0134, 0.0101, 0.0090, 0.0080, 0.0034, 0.0030, 0.0033, 0.0027, 0.0021,
0.0012, 0.0006, 0.0004, 0.0005, 0.0003 };
for (size_t i = 0; i < 35; ++i) {
const double yval = _tmphistR23->bin(i).area() * (R23_CDF_sim[i]/R23_Ideal_sim[i]);
const double yerr = _tmphistR23->bin(i).areaErr() * (R23_CDF_sim_err[i]/R23_Ideal_sim[i]);
- _histR23->addPoint(_tmphistR23->bin(i).xMid(), yval/_sumw,
- _tmphistR23->bin(i).xWidth()/2.0, yerr/_sumw);
+ _histR23->addPoint(_tmphistR23->bin(i).xMid(), yval/dbl(*_sumw),
+ _tmphistR23->bin(i).xWidth()/2.0, yerr/dbl(*_sumw));
}
// alpha correction
const double alpha_CDF_sim[] =
{ 0.0517, 0.0461, 0.0490, 0.0452, 0.0451, 0.0435, 0.0317, 0.0287, 0.0294, 0.0261,
0.0231, 0.0220, 0.0233, 0.0192, 0.0213, 0.0166, 0.0176, 0.0146, 0.0136, 0.0156,
0.0142, 0.0152, 0.0151, 0.0147, 0.0164, 0.0186, 0.0180, 0.0210, 0.0198, 0.0189,
0.0197, 0.0211, 0.0270, 0.0236, 0.0243, 0.0269, 0.0257, 0.0276, 0.0246, 0.0286 };
const double alpha_CDF_sim_err[] =
{ 0.0024, 0.0025, 0.0024, 0.0024, 0.0024, 0.0022, 0.0019, 0.0018, 0.0019, 0.0016,
0.0017, 0.0017, 0.0019, 0.0013, 0.0017, 0.0014, 0.0016, 0.0013, 0.0012, 0.0009,
0.0014, 0.0014, 0.0014, 0.0014, 0.0014, 0.0015, 0.0014, 0.0016, 0.0016, 0.0015,
0.0016, 0.0016, 0.0019, 0.0017, 0.0019, 0.0018, 0.0018, 0.0018, 0.0018, 0.0019 };
const double alpha_Ideal_sim[] =
{ 0.0552, 0.0558, 0.0583, 0.0550, 0.0495, 0.0433, 0.0393, 0.0346, 0.0331, 0.0296,
0.0258, 0.0196, 0.0171, 0.0179, 0.0174, 0.0141, 0.0114, 0.0096, 0.0076, 0.0087,
0.0099, 0.0079, 0.0102, 0.0114, 0.0124, 0.0130, 0.0165, 0.0160, 0.0177, 0.0190,
0.0232, 0.0243, 0.0238, 0.0248, 0.0235, 0.0298, 0.0292, 0.0291, 0.0268, 0.0316 };
for (size_t i = 0; i < 40; ++i) {
const double yval = _tmphistAlpha->bin(i).area() * (alpha_CDF_sim[i]/alpha_Ideal_sim[i]);
const double yerr = _tmphistAlpha->bin(i).areaErr() * (alpha_CDF_sim_err[i]/alpha_Ideal_sim[i]);
- _histAlpha->addPoint(_tmphistAlpha->bin(i).xMid(), yval/_sumw,
- _tmphistAlpha->bin(i).xWidth()/2.0, yerr/_sumw);
+ _histAlpha->addPoint(_tmphistAlpha->bin(i).xMid(), yval/dbl(*_sumw),
+ _tmphistAlpha->bin(i).xWidth()/2.0, yerr/dbl(*_sumw));
}
}
//@}
private:
/// @name Event weight counters
//@{
CounterPtr _sumw;
//@}
/// @name Histograms
//@{
/// Straightforward output histos
Histo1DPtr _histJet1Et, _histJet2Et;
/// Output histos which need to have correction factors applied
Scatter2DPtr _histR23, _histJet3eta, _histAlpha;
/// Temporary histos, to be converted to DPSes
Histo1DPtr _tmphistR23, _tmphistJet3eta, _tmphistAlpha;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(CDF_1994_S2952106);
}
diff --git a/analyses/pluginCDF/CDF_2001_S4751469.cc b/analyses/pluginCDF/CDF_2001_S4751469.cc
--- a/analyses/pluginCDF/CDF_2001_S4751469.cc
+++ b/analyses/pluginCDF/CDF_2001_S4751469.cc
@@ -1,264 +1,264 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/FastJets.hh"
#include "Rivet/Projections/TriggerCDFRun0Run1.hh"
#include "Rivet/Projections/ConstLossyFinalState.hh"
//#include "Rivet/Projections/SmearedParticles.hh"
namespace Rivet {
/// @brief Field-Stuart CDF Run I track-jet underlying event analysis
///
/// @author Andy Buckley
///
/// The "original" underlying event analysis, using a non-standard track-jet algorithm.
///
/// @par Run conditions
///
/// @arg \f$ \sqrt{s} = \f$ 1800 GeV
/// @arg Run with generic QCD events.
/// @arg Several \f$ p_\perp^\text{min} \f$ cutoffs are probably required to fill the profile histograms:
/// @arg \f$ p_\perp^\text{min} = \f$ 0 (min bias), 10, 20 GeV
class CDF_2001_S4751469 : public Analysis {
public:
/// Constructor: cuts on final state are \f$ -1 < \eta < 1 \f$
/// and \f$ p_T > 0.5 \f$ GeV.
CDF_2001_S4751469()
: Analysis("CDF_2001_S4751469")
{ }
/// @name Analysis methods
//@{
// Book histograms
void init() {
declare(TriggerCDFRun0Run1(), "Trigger");
// Randomly discard 8% of charged particles as a kind of hacky detector correction.
const ChargedFinalState cfs(-1.0, 1.0, 0.5*GeV);
/// @todo Replace ConstLossyFinalState with SmearedParticles
const ConstLossyFinalState lossyfs(cfs, 0.08);
//const SmearedParticles lossyfs(cfs, [](const Particle&){ return 0.92; });
declare(lossyfs, "FS");
declare(FastJets(lossyfs, FastJets::TRACKJET, 0.7), "TrackJet");
book(_numvsDeltaPhi2 ,1, 1, 1);
book(_numvsDeltaPhi5 ,1, 1, 2);
book(_numvsDeltaPhi30 ,1, 1, 3);
book(_pTvsDeltaPhi2 ,2, 1, 1);
book(_pTvsDeltaPhi5 ,2, 1, 2);
book(_pTvsDeltaPhi30 ,2, 1, 3);
book(_numTowardMB ,3, 1, 1);
book(_numTransMB ,3, 1, 2);
book(_numAwayMB ,3, 1, 3);
book(_numTowardJ20 ,4, 1, 1);
book(_numTransJ20 ,4, 1, 2);
book(_numAwayJ20 ,4, 1, 3);
book(_ptsumTowardMB ,5, 1, 1);
book(_ptsumTransMB ,5, 1, 2);
book(_ptsumAwayMB ,5, 1, 3);
book(_ptsumTowardJ20 ,6, 1, 1);
book(_ptsumTransJ20 ,6, 1, 2);
book(_ptsumAwayJ20 ,6, 1, 3);
book(_ptTrans2 ,7, 1, 1);
book(_ptTrans5 ,7, 1, 2);
book(_ptTrans30 ,7, 1, 3);
book(_totalNumTrans2, "totalNumTrans2");
book(_totalNumTrans5, "totalNumTrans5");
book(_totalNumTrans30, "totalNumTrans30");
book(_sumWeightsPtLead2, "sumWeightsPtLead2");
book(_sumWeightsPtLead5, "sumWeightsPtLead5");
book(_sumWeightsPtLead30, "sumWeightsPtLead30");
}
/// Do the analysis
void analyze(const Event& event) {
// Trigger
const bool trigger = apply<TriggerCDFRun0Run1>(event, "Trigger").minBiasDecision();
if (!trigger) vetoEvent;
// Get jets, sorted by pT
const Jets jets = apply<JetAlg>(event, "TrackJet").jetsByPt();
if (jets.empty()) vetoEvent;
const Jet jet1 = jets.front();
const double ptLead = jet1.pT();
// Cut on highest pT jet: combined 0.5 GeV < pT(lead) < 50 GeV
if (ptLead/GeV < 0.5) vetoEvent;
if (ptLead/GeV > 50.0) vetoEvent;
// Count sum of all event weights in three pT_lead regions
if (ptLead/GeV > 2.0) _sumWeightsPtLead2->fill();
if (ptLead/GeV > 5.0) _sumWeightsPtLead5->fill();
if (ptLead/GeV > 30.0) _sumWeightsPtLead30->fill();
// Run over tracks
double ptSumToward(0.0), ptSumAway(0.0), ptSumTrans(0.0);
size_t numToward(0), numTrans(0), numAway(0);
// Temporary histos that bin N and pT in dphi
Profile1D htmp_num_dphi_2(refData(1, 1, 1)), htmp_num_dphi_5(refData(1, 1, 2)), htmp_num_dphi_30(refData(1, 1, 3));
Profile1D htmp_pt_dphi_2(refData(2, 1, 1)), htmp_pt_dphi_5(refData(2, 1, 2)), htmp_pt_dphi_30(refData(2, 1, 3));
// Final state charged particles
/// @todo Non-trackjet track efficiencies are corrected?
const Particles& tracks = apply<FinalState>(event, "FS").particles();
for (const Particle& p : tracks) {
const double dPhi = deltaPhi(p, jet1);
const double pT = p.pT();
if (dPhi < PI/3.0) {
ptSumToward += pT;
++numToward;
}
else if (dPhi < 2*PI/3.0) {
ptSumTrans += pT;
++numTrans;
// Fill transverse pT distributions
if (ptLead/GeV > 2.0) {
_ptTrans2->fill(pT/GeV);
_totalNumTrans2->fill();
}
if (ptLead/GeV > 5.0) {
_ptTrans5->fill(pT/GeV);
_totalNumTrans5->fill();
}
if (ptLead/GeV > 30.0) {
_ptTrans30->fill(pT/GeV);
_totalNumTrans30->fill();
}
}
else {
ptSumAway += pT;
++numAway;
}
// Fill tmp histos to bin event's track Nch & pT in dphi
const double dPhideg = 180*dPhi/M_PI;
if (ptLead/GeV > 2.0) {
htmp_num_dphi_2.fill(dPhideg, 1);
htmp_pt_dphi_2.fill (dPhideg, pT/GeV);
}
if (ptLead/GeV > 5.0) {
htmp_num_dphi_5.fill(dPhideg, 1);
htmp_pt_dphi_5.fill (dPhideg, pT/GeV);
}
if (ptLead/GeV > 30.0) {
htmp_num_dphi_30.fill(dPhideg, 1);
htmp_pt_dphi_30.fill (dPhideg, pT/GeV);
}
}
// Update the "proper" dphi profile histograms
for (int i = 0; i < 50; i++) { ///< @todo Should really explicitly iterate over nbins for each temp histo
if (ptLead/GeV > 2.0) {
const double x2 = htmp_pt_dphi_2.bin(i).xMid();
const double num2 = (htmp_num_dphi_2.bin(i).numEntries() > 0) ? htmp_num_dphi_2.bin(i).mean() : 0.0;
const double pt2 = (htmp_num_dphi_2.bin(i).numEntries() > 0) ? htmp_pt_dphi_2.bin(i).mean() : 0.0;
_numvsDeltaPhi2->fill(x2, num2);
_pTvsDeltaPhi2->fill(x2, pt2);
}
if (ptLead/GeV > 5.0) {
const double x5 = htmp_pt_dphi_5.bin(i).xMid();
const double num5 = (htmp_num_dphi_5.bin(i).numEntries() > 0) ? htmp_num_dphi_5.bin(i).mean() : 0.0;
const double pt5 = (htmp_num_dphi_5.bin(i).numEntries() > 0) ? htmp_pt_dphi_5.bin(i).mean() : 0.0;
_numvsDeltaPhi5->fill(x5, num5);
_pTvsDeltaPhi5->fill(x5, pt5);
}
if (ptLead/GeV > 30.0) {
const double x30 = htmp_pt_dphi_30.bin(i).xMid();
const double num30 = (htmp_num_dphi_30.bin(i).numEntries() > 0) ? htmp_num_dphi_30.bin(i).mean() : 0.0;
const double pt30 = (htmp_num_dphi_30.bin(i).numEntries() > 0) ? htmp_pt_dphi_30.bin(i).mean() : 0.0;
_numvsDeltaPhi30->fill(x30, num30);
_pTvsDeltaPhi30->fill(x30, pt30);
}
}
// Log some event details about pT
MSG_DEBUG("pT [lead; twd, away, trans] = [" << ptLead << "; "
<< ptSumToward << ", " << ptSumAway << ", " << ptSumTrans << "]");
// Update the pT profile histograms
_ptsumTowardMB->fill(ptLead/GeV, ptSumToward/GeV);
_ptsumTowardJ20->fill(ptLead/GeV, ptSumToward/GeV);
_ptsumTransMB->fill(ptLead/GeV, ptSumTrans/GeV);
_ptsumTransJ20->fill(ptLead/GeV, ptSumTrans/GeV);
_ptsumAwayMB->fill(ptLead/GeV, ptSumAway/GeV);
_ptsumAwayJ20->fill(ptLead/GeV, ptSumAway/GeV);
// Log some event details about Nch
MSG_DEBUG("N [twd, away, trans] = [" << ptLead << "; "
<< numToward << ", " << numTrans << ", " << numAway << "]");
// Update the N_track profile histograms
_numTowardMB->fill(ptLead/GeV, numToward);
_numTowardJ20->fill(ptLead/GeV, numToward);
_numTransMB->fill(ptLead/GeV, numTrans);
_numTransJ20->fill(ptLead/GeV, numTrans);
_numAwayMB->fill(ptLead/GeV, numAway);
_numAwayJ20->fill(ptLead/GeV, numAway);
}
/// Normalize histos
void finalize() {
- normalize(_ptTrans2, _totalNumTrans2 / _sumWeightsPtLead2);
- normalize(_ptTrans5, _totalNumTrans5 / _sumWeightsPtLead5);
- normalize(_ptTrans30, _totalNumTrans30 / _sumWeightsPtLead30);
+ normalize(_ptTrans2, *_totalNumTrans2 / *_sumWeightsPtLead2);
+ normalize(_ptTrans5, *_totalNumTrans5 / *_sumWeightsPtLead5);
+ normalize(_ptTrans30, *_totalNumTrans30 / *_sumWeightsPtLead30);
}
//@}
private:
/// Sum total number of charged particles in the trans region, in 3 \f$ p_\perp^\text{lead} \f$ bins.
CounterPtr _totalNumTrans2, _totalNumTrans5, _totalNumTrans30;
/// Sum the total number of events in 3 \f$ p_\perp^\text{lead} \f$ bins.
CounterPtr _sumWeightsPtLead2,_sumWeightsPtLead5, _sumWeightsPtLead30;
/// @name Histogram collections
//@{
// The sumpt vs. dphi and Nch vs. dphi histos
Profile1DPtr _numvsDeltaPhi2, _numvsDeltaPhi5, _numvsDeltaPhi30;
Profile1DPtr _pTvsDeltaPhi2, _pTvsDeltaPhi5, _pTvsDeltaPhi30;
/// Profile histograms, binned in the \f$ p_T \f$ of the leading jet, for
/// the \f$ p_T \f$ sum in the toward, transverse and away regions.
Profile1DPtr _ptsumTowardMB, _ptsumTransMB, _ptsumAwayMB;
Profile1DPtr _ptsumTowardJ20, _ptsumTransJ20, _ptsumAwayJ20;
/// Profile histograms, binned in the \f$ p_T \f$ of the leading jet, for
/// the number of charged particles per jet in the toward, transverse and
/// away regions.
Profile1DPtr _numTowardMB, _numTransMB, _numAwayMB;
Profile1DPtr _numTowardJ20, _numTransJ20, _numAwayJ20;
/// Histogram of \f$ p_T \f$ distribution for 3 different \f$ p_{T1} \f$ IR cutoffs.
Histo1DPtr _ptTrans2, _ptTrans5, _ptTrans30;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(CDF_2001_S4751469);
}
diff --git a/analyses/pluginCDF/CDF_2006_S6653332.cc b/analyses/pluginCDF/CDF_2006_S6653332.cc
--- a/analyses/pluginCDF/CDF_2006_S6653332.cc
+++ b/analyses/pluginCDF/CDF_2006_S6653332.cc
@@ -1,180 +1,180 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/VetoedFinalState.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/InvMassFinalState.hh"
#include "Rivet/Projections/FastJets.hh"
#include "Rivet/Projections/ChargedLeptons.hh"
namespace Rivet {
/// @brief CDF Run II analysis: jet \f$ p_T \f$ and \f$ \eta \f$
/// distributions in Z + (b) jet production
/// @author Lars Sonnenschein
///
/// This CDF analysis provides \f$ p_T \f$ and \f$ \eta \f$ distributions of
/// jets in Z + (b) jet production, before and after tagging.
class CDF_2006_S6653332 : public Analysis {
public:
/// Constructor
CDF_2006_S6653332()
: Analysis("CDF_2006_S6653332"),
_Rjet(0.7), _JetPtCut(20.), _JetEtaCut(1.5), _Lep1PtCut(18.), _Lep2PtCut(10.), _LepEtaCut(1.1)
{ }
/// @name Analysis methods
//@{
void init() {
const FinalState fs(-3.6, 3.6);
declare(fs, "FS");
// Create a final state with any e+e- or mu+mu- pair with
// invariant mass 76 -> 106 GeV and ET > 20 (Z decay products)
vector<pair<PdgId,PdgId> > vids;
vids.push_back(make_pair(PID::ELECTRON, PID::POSITRON));
vids.push_back(make_pair(PID::MUON, PID::ANTIMUON));
FinalState fs2(-3.6, 3.6);
InvMassFinalState invfs(fs2, vids, 66*GeV, 116*GeV);
declare(invfs, "INVFS");
// Make a final state without the Z decay products for jet clustering
VetoedFinalState vfs(fs);
vfs.addVetoOnThisFinalState(invfs);
declare(vfs, "VFS");
declare(FastJets(vfs, FastJets::CDFMIDPOINT, 0.7), "Jets");
// Book histograms
book(_sigmaBJet ,1, 1, 1);
book(_ratioBJetToZ ,2, 1, 1);
book(_ratioBJetToJet ,3, 1, 1);
book(_sumWeightsWithZ, "sumWeightsWithZ");
book(_sumWeightsWithZJet, "sumWeightsWithZJet");
}
/// Do the analysis
void analyze(const Event& event) {
// Check we have an l+l- pair that passes the kinematic cuts
// Get the Z decay products (mu+mu- or e+e- pair)
const InvMassFinalState& invMassFinalState = apply<InvMassFinalState>(event, "INVFS");
const Particles& ZDecayProducts = invMassFinalState.particles();
// Make sure we have at least 2 Z decay products (mumu or ee)
if (ZDecayProducts.size() < 2) vetoEvent;
//
double Lep1Pt = ZDecayProducts[0].pT();
double Lep2Pt = ZDecayProducts[1].pT();
double Lep1Eta = ZDecayProducts[0].absrap(); ///< @todo This is y... should be abseta()?
double Lep2Eta = ZDecayProducts[1].absrap(); ///< @todo This is y... should be abseta()?
if (Lep1Eta > _LepEtaCut && Lep2Eta > _LepEtaCut) vetoEvent;
if (ZDecayProducts[0].abspid()==13 && Lep1Eta > 1. && Lep2Eta > 1.) vetoEvent;
if (Lep1Pt < _Lep1PtCut && Lep2Pt < _Lep2PtCut) vetoEvent;
_sumWeightsWithZ->fill();
/// @todo Write out a warning if there are more than two decay products
FourMomentum Zmom = ZDecayProducts[0].momentum() + ZDecayProducts[1].momentum();
// Put all b-quarks in a vector
/// @todo Use jet contents rather than accessing quarks directly
Particles bquarks;
/// @todo Use nicer looping
for (GenEvent::particle_const_iterator p = event.genEvent()->particles_begin(); p != event.genEvent()->particles_end(); ++p) {
if ( std::abs((*p)->pdg_id()) == PID::BQUARK ) {
bquarks.push_back(Particle(**p));
}
}
// Get jets
const FastJets& jetpro = apply<FastJets>(event, "Jets");
MSG_DEBUG("Jet multiplicity before any pT cut = " << jetpro.size());
const PseudoJets& jets = jetpro.pseudoJetsByPt();
MSG_DEBUG("jetlist size = " << jets.size());
int numBJet = 0;
int numJet = 0;
// for each b-jet plot the ET and the eta of the jet, normalise to the total cross section at the end
// for each event plot N jet and pT(Z), normalise to the total cross section at the end
for (PseudoJets::const_iterator jt = jets.begin(); jt != jets.end(); ++jt) {
// select jets that pass the kinematic cuts
if (jt->perp() > _JetPtCut && fabs(jt->rapidity()) <= _JetEtaCut) {
++numJet;
// Does the jet contain a b-quark?
/// @todo Use jet contents rather than accessing quarks directly
bool bjet = false;
foreach (const Particle& bquark, bquarks) {
if (deltaR(jt->rapidity(), jt->phi(), bquark.rapidity(), bquark.phi()) <= _Rjet) {
bjet = true;
break;
}
} // end loop around b-jets
if (bjet) {
numBJet++;
}
}
} // end loop around jets
if (numJet > 0) _sumWeightsWithZJet->fill();
if (numBJet > 0) {
_sigmaBJet->fill(1960.0);
_ratioBJetToZ->fill(1960.0);
_ratioBJetToJet->fill(1960.0);
}
}
/// Finalize
void finalize() {
MSG_DEBUG("Total sum of weights = " << sumOfWeights());
- MSG_DEBUG("Sum of weights for Z production in mass range = " << double(_sumWeightsWithZ));
- MSG_DEBUG("Sum of weights for Z+jet production in mass range = " << double(_sumWeightsWithZJet));
+ MSG_DEBUG("Sum of weights for Z production in mass range = " << dbl(*_sumWeightsWithZ));
+ MSG_DEBUG("Sum of weights for Z+jet production in mass range = " << dbl(*_sumWeightsWithZJet));
scale(_sigmaBJet, crossSection()/sumOfWeights());
- scale(_ratioBJetToZ, 1.0/_sumWeightsWithZ);
- scale(_ratioBJetToJet, 1.0/_sumWeightsWithZJet);
+ scale(_ratioBJetToZ, 1.0/ *_sumWeightsWithZ);
+ scale(_ratioBJetToJet, 1.0/ *_sumWeightsWithZJet);
}
//@}
private:
/// @name Cuts and counters
//@{
double _Rjet;
double _JetPtCut;
double _JetEtaCut;
double _Lep1PtCut;
double _Lep2PtCut;
double _LepEtaCut;
CounterPtr _sumWeightsWithZ;
CounterPtr _sumWeightsWithZJet;
//@}
/// @name Histograms
//@{
Histo1DPtr _sigmaBJet;
Histo1DPtr _ratioBJetToZ;
Histo1DPtr _ratioBJetToJet;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(CDF_2006_S6653332);
}
diff --git a/analyses/pluginCDF/CDF_2008_S7541902.cc b/analyses/pluginCDF/CDF_2008_S7541902.cc
--- a/analyses/pluginCDF/CDF_2008_S7541902.cc
+++ b/analyses/pluginCDF/CDF_2008_S7541902.cc
@@ -1,194 +1,194 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/VetoedFinalState.hh"
#include "Rivet/Projections/InvMassFinalState.hh"
#include "Rivet/Projections/FastJets.hh"
#include <algorithm>
namespace Rivet {
/// @brief CDF jet pT and multiplicity distributions in W + jets events
///
/// This CDF analysis provides jet pT distributions for 4 jet multiplicity bins
/// as well as the jet multiplicity distribution in W + jets events.
/// e-Print: arXiv:0711.4044 [hep-ex]
class CDF_2008_S7541902 : public Analysis {
public:
/// Constructor
CDF_2008_S7541902()
: Analysis("CDF_2008_S7541902"),
_electronETCut(20.0*GeV), _electronETACut(1.1),
_eTmissCut(30.0*GeV), _mTCut(20.0*GeV),
_jetEtCutA(20.0*GeV), _jetEtCutB(25.0*GeV), _jetETA(2.0)
{ }
/// @name Analysis methods
//@{
void init() {
// Set up projections
// Basic FS
FinalState fs(-3.6, 3.6);
declare(fs, "FS");
// Create a final state with any e-nu pair with invariant mass 65 -> 95 GeV and ET > 20 (W decay products)
vector<pair<PdgId,PdgId> > vids;
vids += make_pair(PID::ELECTRON, PID::NU_EBAR);
vids += make_pair(PID::POSITRON, PID::NU_E);
FinalState fs2(-3.6, 3.6, 20*GeV);
InvMassFinalState invfs(fs2, vids, 65*GeV, 95*GeV);
declare(invfs, "INVFS");
// Make a final state without the W decay products for jet clustering
VetoedFinalState vfs(fs);
vfs.addVetoOnThisFinalState(invfs);
declare(vfs, "VFS");
declare(FastJets(vfs, FastJets::CDFJETCLU, 0.4), "Jets");
// Book histograms
for (int i = 0 ; i < 4 ; ++i) {
book(_histJetEt[i] ,1+i, 1, 1);
book(_histJetMultRatio[i], 5, 1, i+1, true);
/// @todo These would be better off as YODA::Counter until finalize()
book(_histJetMult[i] ,6+i, 1, 1); // _sumW is essentially the 0th "histo" counter
}
book(_sumW,"sumW");
}
/// Do the analysis
void analyze(const Event& event) {
// Get the W decay products (electron and neutrino)
const InvMassFinalState& invMassFinalState = apply<InvMassFinalState>(event, "INVFS");
const Particles& wDecayProducts = invMassFinalState.particles();
FourMomentum electronP, neutrinoP;
bool gotElectron(false), gotNeutrino(false);
foreach (const Particle& p, wDecayProducts) {
FourMomentum p4 = p.momentum();
if (p4.Et() > _electronETCut && fabs(p4.eta()) < _electronETACut && p.abspid() == PID::ELECTRON) {
electronP = p4;
gotElectron = true;
}
else if (p4.Et() > _eTmissCut && p.abspid() == PID::NU_E) {
neutrinoP = p4;
gotNeutrino = true;
}
}
// Veto event if the electron or MET cuts fail
if (!gotElectron || !gotNeutrino) vetoEvent;
// Veto event if the MTR cut fails
double mT2 = 2.0 * ( electronP.pT()*neutrinoP.pT() - electronP.px()*neutrinoP.px() - electronP.py()*neutrinoP.py() );
if (sqrt(mT2) < _mTCut ) vetoEvent;
// Get the jets
const JetAlg& jetProj = apply<FastJets>(event, "Jets");
Jets theJets = jetProj.jets(cmpMomByEt, Cuts::Et > _jetEtCutA);
size_t njetsA(0), njetsB(0);
foreach (const Jet& j, theJets) {
const FourMomentum pj = j.momentum();
if (fabs(pj.rapidity()) < _jetETA) {
// Fill differential histograms for top 4 jets with Et > 20
if (njetsA < 4 && pj.Et() > _jetEtCutA) {
++njetsA;
_histJetEt[njetsA-1]->fill(pj.Et());
}
// Count number of jets with Et > 25 (for multiplicity histograms)
if (pj.Et() > _jetEtCutB) ++njetsB;
}
}
// Increment event counter
_sumW->fill();
// Jet multiplicity
for (size_t i = 1; i <= njetsB; ++i) {
/// @todo This isn't really a histogram: replace with a YODA::Counter when we have one!
_histJetMult[i-1]->fill(1960.);
if (i == 4) break;
}
}
/// Finalize
void finalize() {
// Fill the 0th ratio histogram specially
/// @todo This special case for 1-to-0 will disappear if we use Counters for all mults including 0.
- if (_sumW > 0) {
+ if (_sumW->val() > 0) {
const YODA::Histo1D::Bin& b0 = _histJetMult[0]->bin(0);
- double ratio = b0.area()/_sumW;
- double frac_err = 1/_sumW; ///< This 1/sqrt{N} error treatment isn't right for weighted events: use YODA::Counter
+ double ratio = b0.area()/dbl(*_sumW);
+ double frac_err = 1/dbl(*_sumW); ///< This 1/sqrt{N} error treatment isn't right for weighted events: use YODA::Counter
if (b0.area() > 0) frac_err = sqrt( sqr(frac_err) + sqr(b0.areaErr()/b0.area()) );
_histJetMultRatio[0]->point(0).setY(ratio, ratio*frac_err);
}
// Loop over the non-zero multiplicities
for (size_t i = 0; i < 3; ++i) {
const YODA::Histo1D::Bin& b1 = _histJetMult[i]->bin(0);
const YODA::Histo1D::Bin& b2 = _histJetMult[i+1]->bin(0);
if (b1.area() == 0.0) continue;
double ratio = b2.area()/b1.area();
double frac_err = b1.areaErr()/b1.area();
if (b2.area() > 0) frac_err = sqrt( sqr(frac_err) + sqr(b2.areaErr()/b2.area()) );
_histJetMultRatio[i+1]->point(0).setY(ratio, ratio*frac_err);
}
// Normalize the non-ratio histograms
for (size_t i = 0; i < 4; ++i) {
scale(_histJetEt[i], crossSection()/picobarn/sumOfWeights());
scale(_histJetMult[i], crossSection()/picobarn/sumOfWeights());
}
}
//@}
private:
/// @name Cuts
//@{
/// Cut on the electron ET:
double _electronETCut;
/// Cut on the electron ETA:
double _electronETACut;
/// Cut on the missing ET
double _eTmissCut;
/// Cut on the transverse mass squared
double _mTCut;
/// Cut on the jet ET for differential cross sections
double _jetEtCutA;
/// Cut on the jet ET for jet multiplicity
double _jetEtCutB;
/// Cut on the jet ETA
double _jetETA;
//@}
/// @name Histograms
//@{
Histo1DPtr _histJetEt[4];
Histo1DPtr _histJetMultNorm;
Scatter2DPtr _histJetMultRatio[4];
Histo1DPtr _histJetMult[4];
CounterPtr _sumW;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(CDF_2008_S7541902);
}
diff --git a/analyses/pluginCDF/CDF_2008_S8095620.cc b/analyses/pluginCDF/CDF_2008_S8095620.cc
--- a/analyses/pluginCDF/CDF_2008_S8095620.cc
+++ b/analyses/pluginCDF/CDF_2008_S8095620.cc
@@ -1,187 +1,187 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FastJets.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/VetoedFinalState.hh"
#include "Rivet/Projections/InvMassFinalState.hh"
namespace Rivet {
/// @brief CDF Run II Z + b-jet cross-section measurement
class CDF_2008_S8095620 : public Analysis {
public:
/// Constructor.
/// jet cuts: |eta| <= 1.5
CDF_2008_S8095620()
: Analysis("CDF_2008_S8095620"),
_Rjet(0.7), _JetPtCut(20.), _JetEtaCut(1.5), _Lep1PtCut(18.), _Lep2PtCut(10.), _LepEtaCut(3.2)
{ }
/// @name Analysis methods
//@{
void init() {
// Set up projections
const FinalState fs(-3.2, 3.2);
declare(fs, "FS");
// Create a final state with any e+e- or mu+mu- pair with
// invariant mass 76 -> 106 GeV and ET > 18 (Z decay products)
vector<pair<PdgId,PdgId> > vids;
vids.push_back(make_pair(PID::ELECTRON, PID::POSITRON));
vids.push_back(make_pair(PID::MUON, PID::ANTIMUON));
FinalState fs2(-3.2, 3.2);
InvMassFinalState invfs(fs2, vids, 76*GeV, 106*GeV);
declare(invfs, "INVFS");
// Make a final state without the Z decay products for jet clustering
VetoedFinalState vfs(fs);
vfs.addVetoOnThisFinalState(invfs);
declare(vfs, "VFS");
declare(FastJets(vfs, FastJets::CDFMIDPOINT, 0.7), "Jets");
// Book histograms
book(_dStot ,1, 1, 1);
book(_dSdET ,2, 1, 1);
book(_dSdETA ,3, 1, 1);
book(_dSdZpT ,4, 1, 1);
book(_dSdNJet ,5, 1, 1);
book(_dSdNbJet ,6, 1, 1);
book(_sumWeightSelected,"sumWeightSelected");
}
// Do the analysis
void analyze(const Event& event) {
// Check we have an l+l- pair that passes the kinematic cuts
// Get the Z decay products (mu+mu- or e+e- pair)
const InvMassFinalState& invMassFinalState = apply<InvMassFinalState>(event, "INVFS");
const Particles& ZDecayProducts = invMassFinalState.particles();
// make sure we have 2 Z decay products (mumu or ee)
if (ZDecayProducts.size() < 2) vetoEvent;
//new cuts
double Lep1Pt = ZDecayProducts[0].perp();
double Lep2Pt = ZDecayProducts[1].perp();
double Lep1Eta = fabs(ZDecayProducts[0].rapidity());
double Lep2Eta = fabs(ZDecayProducts[1].rapidity());
if (Lep1Eta > _LepEtaCut || Lep2Eta > _LepEtaCut) vetoEvent;
if (ZDecayProducts[0].abspid()==13 &&
((Lep1Eta > 1.5 || Lep2Eta > 1.5) || (Lep1Eta > 1.0 && Lep2Eta > 1.0))) {
vetoEvent;
}
if (Lep1Pt > Lep2Pt) {
if (Lep1Pt < _Lep1PtCut || Lep2Pt < _Lep2PtCut) vetoEvent;
}
else {
if (Lep1Pt < _Lep2PtCut || Lep2Pt < _Lep1PtCut) vetoEvent;
}
_sumWeightSelected->fill();
/// @todo: write out a warning if there are more than two decay products
FourMomentum Zmom = ZDecayProducts[0].momentum() + ZDecayProducts[1].momentum();
// Put all b-quarks in a vector
/// @todo Use a b-hadron search rather than b-quarks for tagging
Particles bquarks;
foreach (const GenParticle* p, particles(event.genEvent())) {
if (std::abs(p->pdg_id()) == PID::BQUARK) {
bquarks += Particle(*p);
}
}
// Get jets
const FastJets& jetpro = apply<FastJets>(event, "Jets");
MSG_DEBUG("Jet multiplicity before any pT cut = " << jetpro.size());
const PseudoJets& jets = jetpro.pseudoJetsByPt();
MSG_DEBUG("jetlist size = " << jets.size());
int numBJet = 0;
int numJet = 0;
// for each b-jet plot the ET and the eta of the jet, normalise to the total cross section at the end
// for each event plot N jet and pT(Z), normalise to the total cross section at the end
for (PseudoJets::const_iterator jt = jets.begin(); jt != jets.end(); ++jt) {
// select jets that pass the kinematic cuts
if (jt->perp() > _JetPtCut && fabs(jt->rapidity()) <= _JetEtaCut) {
numJet++;
// does the jet contain a b-quark?
bool bjet = false;
foreach (const Particle& bquark, bquarks) {
if (deltaR(jt->rapidity(), jt->phi(), bquark.rapidity(),bquark.phi()) <= _Rjet) {
bjet = true;
break;
}
} // end loop around b-jets
if (bjet) {
numBJet++;
_dSdET->fill(jt->perp());
_dSdETA->fill(fabs(jt->rapidity()));
}
}
} // end loop around jets
// wasn't asking for b-jets before!!!!
if(numJet > 0 && numBJet > 0) _dSdNJet->fill(numJet);
if(numBJet > 0) {
_dStot->fill(1960.0);
_dSdNbJet->fill(numBJet);
_dSdZpT->fill(Zmom.pT());
}
}
// Finalize
void finalize() {
// normalise histograms
// scale by 1 / the sum-of-weights of events that pass the Z cuts
// since the cross sections are normalized to the inclusive
// Z cross sections.
double Scale = 1.0;
- if (_sumWeightSelected != 0.0) Scale = 1.0/_sumWeightSelected;
+ if (_sumWeightSelected->val() != 0.0) Scale = 1.0/dbl(*_sumWeightSelected);
scale(_dStot,Scale);
scale(_dSdET,Scale);
scale(_dSdETA,Scale);
scale(_dSdNJet,Scale);
scale(_dSdNbJet,Scale);
scale(_dSdZpT,Scale);
}
//@}
private:
double _Rjet;
double _JetPtCut;
double _JetEtaCut;
double _Lep1PtCut;
double _Lep2PtCut;
double _LepEtaCut;
CounterPtr _sumWeightSelected;
//@{
/// Histograms
Histo1DPtr _dStot;
Histo1DPtr _dSdET;
Histo1DPtr _dSdETA;
Histo1DPtr _dSdNJet;
Histo1DPtr _dSdNbJet;
Histo1DPtr _dSdZpT;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(CDF_2008_S8095620);
}
diff --git a/analyses/pluginCDF/CDF_2009_S8233977.cc b/analyses/pluginCDF/CDF_2009_S8233977.cc
--- a/analyses/pluginCDF/CDF_2009_S8233977.cc
+++ b/analyses/pluginCDF/CDF_2009_S8233977.cc
@@ -1,122 +1,122 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/TriggerCDFRun2.hh"
namespace Rivet {
/// @brief CDF Run II min-bias cross-section
/// @author Hendrik Hoeth
///
/// Measurement of \f$ \langle p_T \rangle \f$ vs. \f$ n_\text{ch} \f$,
/// the track \f$ p_T \f$ distribution, and the \f$ \sum E_T \f$ distribution.
/// Particles are selected within |eta|<1 and with pT>0.4 GeV.
/// There is no pT cut for the \f$ \sum E_T \f$ measurement.
///
/// @par Run conditions
///
/// @arg \f$ \sqrt{s} = \f$ 1960 GeV
/// @arg Run with generic QCD events.
/// @arg Set particles with c*tau > 10 mm stable
class CDF_2009_S8233977 : public Analysis {
public:
/// Constructor
CDF_2009_S8233977()
: Analysis("CDF_2009_S8233977")
{ }
/// @name Analysis methods
//@{
/// Book histograms and projections
void init() {
declare(TriggerCDFRun2(), "Trigger");
declare(FinalState(-1.0, 1.0, 0.0*GeV), "EtFS");
declare(ChargedFinalState(-1.0, 1.0, 0.4*GeV), "CFS");
book(_hist_pt ,1, 1, 1);
book(_hist_pt_vs_multiplicity ,2, 1, 1);
book(_hist_sumEt ,3, 1, 1);
book(_sumWeightSelected,"_sumWeightSelected");
}
/// Do the analysis
void analyze(const Event& evt) {
// MinBias Trigger
const bool trigger = apply<TriggerCDFRun2>(evt, "Trigger").minBiasDecision();
if (!trigger) vetoEvent;
/// @todo The pT and sum(ET) distributions look slightly different from
/// Niccolo's Monte Carlo plots. Still waiting for his answer.
const ChargedFinalState& trackfs = apply<ChargedFinalState>(evt, "CFS");
const size_t numParticles = trackfs.size();
foreach (const Particle& p, trackfs.particles()) {
const double pT = p.pT() / GeV;
_hist_pt_vs_multiplicity->fill(numParticles, pT);
// The weight for entries in the pT distribution should be weight/(pT*dPhi*dy).
//
// - dPhi = 2*PI
//
// - dy depends on the pT: They calculate y assuming the particle has the
// pion mass and assuming that eta=1:
// dy = 2 * 1/2 * ln [(sqrt(m^2 + (a+1)*pT^2) + a*pT) / (sqrt(m^2 + (a+1)*pT^2) - a*pT)]
// with a = sinh(1).
//
// sinh(1) = 1.1752012
// m(charged pion)^2 = (139.57 MeV)^2 = 0.019479785 GeV^2
const double sinh1 = 1.1752012;
const double apT = sinh1 * pT;
const double mPi = 139.57*MeV;
const double root = sqrt(mPi*mPi + (1+sinh1)*pT*pT);
const double dy = std::log((root+apT)/(root-apT));
const double dphi = TWOPI;
_hist_pt->fill(pT, 1.0/(pT*dphi*dy));
}
// Calc sum(Et) from calo particles
const FinalState& etfs = apply<FinalState>(evt, "EtFS");
double sumEt = 0.0;
foreach (const Particle& p, etfs.particles()) {
sumEt += p.Et();
}
_hist_sumEt->fill(sumEt);
_sumWeightSelected->fill();
}
/// Normalize histos
void finalize() {
- scale(_hist_sumEt, crossSection()/millibarn/(4*M_PI*double(_sumWeightSelected)));
- scale(_hist_pt, crossSection()/millibarn/_sumWeightSelected);
+ scale(_hist_sumEt, crossSection()/millibarn/(4*M_PI*dbl(*_sumWeightSelected)));
+ scale(_hist_pt, crossSection()/millibarn/dbl(*_sumWeightSelected));
MSG_DEBUG("sumOfWeights() = " << sumOfWeights());
- MSG_DEBUG("_sumWeightSelected = " << double(_sumWeightSelected));
+ MSG_DEBUG("_sumWeightSelected = " << dbl(*_sumWeightSelected));
}
//@}
private:
CounterPtr _sumWeightSelected;
Profile1DPtr _hist_pt_vs_multiplicity;
Histo1DPtr _hist_pt;
Histo1DPtr _hist_sumEt;
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(CDF_2009_S8233977);
}
diff --git a/analyses/pluginCMS/CMS_2013_I1256943.cc b/analyses/pluginCMS/CMS_2013_I1256943.cc
--- a/analyses/pluginCMS/CMS_2013_I1256943.cc
+++ b/analyses/pluginCMS/CMS_2013_I1256943.cc
@@ -1,183 +1,183 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ZFinder.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/UnstableFinalState.hh"
namespace Rivet {
/// CMS cross-section and angular correlations in Z boson + b-hadrons events at 7 TeV
class CMS_2013_I1256943 : public Analysis {
public:
/// Constructor
DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2013_I1256943);
/// Add projections and book histograms
void init() {
book(_sumW, "sumW");
book(_sumW50, "sumW50");
book(_sumWpT, "sumWpT");
FinalState fs(Cuts::abseta < 2.4 && Cuts::pT > 20*GeV);
declare(fs, "FS");
UnstableFinalState ufs(Cuts::abseta < 2 && Cuts::pT > 15*GeV);
declare(ufs, "UFS");
Cut zetacut = Cuts::abseta < 2.4;
ZFinder zfindermu(fs, zetacut, PID::MUON, 81.0*GeV, 101.0*GeV, 0.1, ZFinder::NOCLUSTER, ZFinder::TRACK, 91.2*GeV);
declare(zfindermu, "ZFinderMu");
ZFinder zfinderel(fs, zetacut, PID::ELECTRON, 81.0*GeV, 101.0*GeV, 0.1, ZFinder::NOCLUSTER, ZFinder::TRACK, 91.2*GeV);
declare(zfinderel, "ZFinderEl");
// Histograms in non-boosted region of Z pT
book(_h_dR_BB ,1, 1, 1);
book(_h_dphi_BB ,2, 1, 1);
book(_h_min_dR_ZB ,3, 1, 1);
book(_h_A_ZBB ,4, 1, 1);
// Histograms in boosted region of Z pT (pT > 50 GeV)
book(_h_dR_BB_boost ,5, 1, 1);
book(_h_dphi_BB_boost ,6, 1, 1);
book(_h_min_dR_ZB_boost ,7, 1, 1);
book(_h_A_ZBB_boost ,8, 1, 1);
book(_h_min_ZpT ,9,1,1);
}
/// Do the analysis
void analyze(const Event& e) {
vector<FourMomentum> Bmom;
const UnstableFinalState& ufs = apply<UnstableFinalState>(e, "UFS");
const ZFinder& zfindermu = apply<ZFinder>(e, "ZFinderMu");
const ZFinder& zfinderel = apply<ZFinder>(e, "ZFinderEl");
// Look for a Z --> mu+ mu- event in the final state
if (zfindermu.empty() && zfinderel.empty()) vetoEvent;
const Particles& z = !zfindermu.empty() ? zfindermu.bosons() : zfinderel.bosons();
const bool is_boosted = ( z[0].pT() > 50*GeV );
// Loop over the unstable particles
for (const Particle& p : ufs.particles()) {
const PdgId pid = p.pid();
// Look for particles with a bottom quark
if (PID::hasBottom(pid)) {
bool good_B = false;
const GenParticle* pgen = p.genParticle();
const GenVertex* vgen = pgen -> end_vertex();
// Loop over the decay products of each unstable particle, looking for a b-hadron pair
/// @todo Avoid HepMC API
for (GenVertex::particles_out_const_iterator it = vgen->particles_out_const_begin(); it != vgen->particles_out_const_end(); ++it) {
// If the particle produced has a bottom quark do not count it and go to the next loop cycle.
if (!( PID::hasBottom( (*it)->pdg_id() ) ) ) {
good_B = true;
continue;
} else {
good_B = false;
break;
}
}
if (good_B ) Bmom.push_back( p.momentum() );
}
else continue;
}
// If there are more than two B's in the final state veto the event
if (Bmom.size() != 2 ) vetoEvent;
// Calculate the observables
double dphiBB = deltaPhi(Bmom[0], Bmom[1]);
double dRBB = deltaR(Bmom[0], Bmom[1]);
const FourMomentum& pZ = z[0].momentum();
const bool closest_B = ( deltaR(pZ, Bmom[0]) < deltaR(pZ, Bmom[1]) );
const double mindR_ZB = closest_B ? deltaR(pZ, Bmom[0]) : deltaR(pZ, Bmom[1]);
const double maxdR_ZB = closest_B ? deltaR(pZ, Bmom[1]) : deltaR(pZ, Bmom[0]);
const double AZBB = ( maxdR_ZB - mindR_ZB ) / ( maxdR_ZB + mindR_ZB );
// Fill the histograms in the non-boosted region
_h_dphi_BB->fill(dphiBB);
_h_dR_BB->fill(dRBB);
_h_min_dR_ZB->fill(mindR_ZB);
_h_A_ZBB->fill(AZBB);
_sumW->fill();
_sumWpT->fill();
// Fill the histograms in the boosted region
if (is_boosted) {
_sumW50->fill();
_h_dphi_BB_boost->fill(dphiBB);
_h_dR_BB_boost->fill(dRBB);
_h_min_dR_ZB_boost->fill(mindR_ZB);
_h_A_ZBB_boost->fill(AZBB);
}
// Fill Z pT (cumulative) histogram
_h_min_ZpT->fill(0);
if (pZ.pT() > 40*GeV ) {
_sumWpT->fill();
_h_min_ZpT->fill(40);
}
if (pZ.pT() > 80*GeV ) {
_sumWpT->fill();
_h_min_ZpT->fill(80);
}
if (pZ.pT() > 120*GeV ) {
_sumWpT->fill();
_h_min_ZpT->fill(120);
}
Bmom.clear();
}
/// Finalize
void finalize() {
// Normalize excluding overflow bins (d'oh)
- normalize(_h_dR_BB, 0.7*crossSection()*double(_sumW)/sumOfWeights(), false); // d01-x01-y01
- normalize(_h_dphi_BB, 0.53*crossSection()*double(_sumW)/sumOfWeights(), false); // d02-x01-y01
- normalize(_h_min_dR_ZB, 0.84*crossSection()*double(_sumW)/sumOfWeights(), false); // d03-x01-y01
- normalize(_h_A_ZBB, 0.2*crossSection()*double(_sumW)/sumOfWeights(), false); // d04-x01-y01
+ normalize(_h_dR_BB, 0.7*crossSection()*dbl(*_sumW)/sumOfWeights(), false); // d01-x01-y01
+ normalize(_h_dphi_BB, 0.53*crossSection()*dbl(*_sumW)/sumOfWeights(), false); // d02-x01-y01
+ normalize(_h_min_dR_ZB, 0.84*crossSection()*dbl(*_sumW)/sumOfWeights(), false); // d03-x01-y01
+ normalize(_h_A_ZBB, 0.2*crossSection()*dbl(*_sumW)/sumOfWeights(), false); // d04-x01-y01
- normalize(_h_dR_BB_boost, 0.84*crossSection()*double(_sumW50)/sumOfWeights(), false); // d05-x01-y01
- normalize(_h_dphi_BB_boost, 0.63*crossSection()*double(_sumW50)/sumOfWeights(), false); // d06-x01-y01
- normalize(_h_min_dR_ZB_boost, 1*crossSection()*double(_sumW50)/sumOfWeights(), false); // d07-x01-y01
- normalize(_h_A_ZBB_boost, 0.25*crossSection()*double(_sumW50)/sumOfWeights(), false); // d08-x01-y01
+ normalize(_h_dR_BB_boost, 0.84*crossSection()*dbl(*_sumW50)/sumOfWeights(), false); // d05-x01-y01
+ normalize(_h_dphi_BB_boost, 0.63*crossSection()*dbl(*_sumW50)/sumOfWeights(), false); // d06-x01-y01
+ normalize(_h_min_dR_ZB_boost, 1*crossSection()*dbl(*_sumW50)/sumOfWeights(), false); // d07-x01-y01
+ normalize(_h_A_ZBB_boost, 0.25*crossSection()*dbl(*_sumW50)/sumOfWeights(), false); // d08-x01-y01
- normalize(_h_min_ZpT, 40*crossSection()*double(_sumWpT)/sumOfWeights(), false); // d09-x01-y01
+ normalize(_h_min_ZpT, 40*crossSection()*dbl(*_sumWpT)/sumOfWeights(), false); // d09-x01-y01
}
private:
/// @name Weight counters
//@{
CounterPtr _sumW, _sumW50, _sumWpT;
//@}
/// @name Histograms
//@{
Histo1DPtr _h_dphi_BB, _h_dR_BB, _h_min_dR_ZB, _h_A_ZBB;
Histo1DPtr _h_dphi_BB_boost, _h_dR_BB_boost, _h_min_dR_ZB_boost, _h_A_ZBB_boost, _h_min_ZpT;
//@}
};
// Hook for the plugin system
DECLARE_RIVET_PLUGIN(CMS_2013_I1256943);
}
diff --git a/analyses/pluginCMS/CMS_2014_I1303894.cc b/analyses/pluginCMS/CMS_2014_I1303894.cc
--- a/analyses/pluginCMS/CMS_2014_I1303894.cc
+++ b/analyses/pluginCMS/CMS_2014_I1303894.cc
@@ -1,236 +1,236 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/FastJets.hh"
#include "Rivet/Projections/VetoedFinalState.hh"
#include "Rivet/Projections/PromptFinalState.hh"
#include "Rivet/Projections/InvMassFinalState.hh"
#include "Rivet/Projections/MissingMomentum.hh"
#include "Rivet/Projections/WFinder.hh"
#include "Rivet/Projections/DressedLeptons.hh"
namespace Rivet {
/// @brief Differential cross-section of W bosons + jets in pp collisions at sqrt(s)=7 TeV
/// @author Darin Baumgartel (darinb@cern.ch)
///
/// Based on Rivet analysis originally created by Anil Singh (anil@cern.ch), Lovedeep Saini (lovedeep@cern.ch)
class CMS_2014_I1303894 : public Analysis {
public:
/// Constructor
CMS_2014_I1303894()
: Analysis("CMS_2014_I1303894")
{ }
// Book histograms and initialise projections before the run
void init() {
// Prompt leptons only, no test on nu flavour.
// Projections
const FinalState fs;
declare(fs, "FS");
MissingMomentum missing(fs);
declare(missing, "MET");
PromptFinalState pfs(fs);
IdentifiedFinalState bareMuons(pfs);
bareMuons.acceptIdPair(PID::MUON);
DressedLeptons muonClusters(fs, bareMuons, -1); //, Cuts::open(), false, false);
declare(muonClusters, "muonClusters");
IdentifiedFinalState neutrinos(pfs);
neutrinos.acceptIdPair(PID::NU_MU);
declare(neutrinos, "neutrinos");
VetoedFinalState jetFS(fs);
jetFS.addVetoOnThisFinalState(muonClusters);
jetFS.addVetoOnThisFinalState(neutrinos);
jetFS.vetoNeutrinos();
FastJets JetProjection(jetFS, FastJets::ANTIKT, 0.5);
JetProjection.useInvisibles(false);
declare(JetProjection, "Jets");
// Histograms
book(_histDPhiMuJet1 ,1,1,1);
book(_histDPhiMuJet2 ,2,1,1);
book(_histDPhiMuJet3 ,3,1,1);
book(_histDPhiMuJet4 ,4,1,1);
book(_histEtaJet1 ,5,1,1);
book(_histEtaJet2 ,6,1,1);
book(_histEtaJet3 ,7,1,1);
book(_histEtaJet4 ,8,1,1);
book(_histHT1JetInc ,9,1,1);
book(_histHT2JetInc ,10,1,1);
book(_histHT3JetInc ,11,1,1);
book(_histHT4JetInc ,12,1,1);
book(_histJet30MultExc ,13,1,1);
book(_histJet30MultInc ,14,1,1);
book(_histPtJet1 ,15,1,1);
book(_histPtJet2 ,16,1,1);
book(_histPtJet3 ,17,1,1);
book(_histPtJet4 ,18,1,1);
// Counters
book(_n_1jet, "n_1jet");
book(_n_2jet, "n_2jet");
book(_n_3jet, "n_3jet");
book(_n_4jet, "n_4jet");
book(_n_inclusivebinsummation, "n_inclusivebinsummation");
}
void analyze(const Event& event) {
// Get the dressed muon
const DressedLeptons& muonClusters = apply<DressedLeptons>(event, "muonClusters");
int nmu = muonClusters.dressedLeptons().size();
if (nmu < 1) vetoEvent;
DressedLepton dressedmuon = muonClusters.dressedLeptons()[0];
if (dressedmuon.momentum().abseta() > 2.1) vetoEvent;
if (dressedmuon.momentum().pT() < 25.0*GeV) vetoEvent;
// Get the muon neutrino
//const Particles& neutrinos = apply<FinalState>(event, "neutrinos").particlesByPt();
// Check that the muon and neutrino are not decay products of tau
if (dressedmuon.constituentLepton().hasAncestor( PID::TAU)) vetoEvent;
if (dressedmuon.constituentLepton().hasAncestor(-PID::TAU)) vetoEvent;
// Get the missing momentum
const MissingMomentum& met = apply<MissingMomentum>(event, "MET");
const double ptmet = met.visibleMomentum().pT();
const double phimet = (-met.visibleMomentum()).phi();
// Calculate MET and MT(mu,MET), and remove events with MT < 50 GeV
const double ptmuon = dressedmuon.pT();
const double phimuon = dressedmuon.phi();
const double mt_mumet = sqrt(2*ptmuon*ptmet*(1.0 - cos(phimet-phimuon)));
// Remove events in MT < 50 region
if (mt_mumet < 50*GeV) vetoEvent;
// Loop over jets and fill pt/eta/phi quantities in vectors
const Jets& jets_filtered = apply<FastJets>(event, "Jets").jetsByPt(0.0*GeV);
vector<float> finaljet_pT_list, finaljet_eta_list, finaljet_phi_list;
double htjets = 0.0;
for (size_t ii = 0; ii < jets_filtered.size(); ++ii) {
// Jet pT/eta/phi
double jet_pt = jets_filtered[ii].pT();
double jet_eta = jets_filtered[ii].eta();
double jet_phi = jets_filtered[ii].phi();
// Kinemetic cuts for jet acceptance
if (fabs(jet_eta) > 2.4) continue;
if (jet_pt < 30.0*GeV) continue;
if (deltaR(dressedmuon, jets_filtered[ii]) < 0.5) continue;
// Add jet to jet list and increases the HT variable
finaljet_pT_list.push_back(jet_pt);
finaljet_eta_list.push_back(jet_eta);
finaljet_phi_list.push_back(jet_phi);
htjets += fabs(jet_pt);
}
// Filling of histograms:
// Fill as many jets as there are into the exclusive jet multiplicity
if (!finaljet_pT_list.empty())
_histJet30MultExc->fill(finaljet_pT_list.size());
for (size_t ij = 0; ij < finaljet_pT_list.size(); ++ij) {
_histJet30MultInc->fill(ij+1);
_n_inclusivebinsummation->fill();
}
if (finaljet_pT_list.size() >= 1) {
_histPtJet1->fill(finaljet_pT_list[0]);
_histEtaJet1->fill(fabs(finaljet_eta_list[0]));
_histDPhiMuJet1->fill(deltaPhi(finaljet_phi_list[0], phimuon));
_histHT1JetInc->fill(htjets);
_n_1jet->fill();
}
if (finaljet_pT_list.size() >= 2) {
_histPtJet2->fill(finaljet_pT_list[1]);
_histEtaJet2->fill(fabs(finaljet_eta_list[1]));
_histDPhiMuJet2->fill(deltaPhi(finaljet_phi_list[1], phimuon));
_histHT2JetInc->fill(htjets);
_n_2jet->fill();
}
if (finaljet_pT_list.size() >= 3) {
_histPtJet3->fill(finaljet_pT_list[2]);
_histEtaJet3->fill(fabs(finaljet_eta_list[2]));
_histDPhiMuJet3->fill(deltaPhi(finaljet_phi_list[2], phimuon));
_histHT3JetInc->fill(htjets);
_n_3jet->fill();
}
if (finaljet_pT_list.size() >=4 ) {
_histPtJet4->fill(finaljet_pT_list[3]);
_histEtaJet4->fill(fabs(finaljet_eta_list[3]));
_histDPhiMuJet4->fill(deltaPhi(finaljet_phi_list[3], phimuon));
_histHT4JetInc-> fill(htjets);
_n_4jet->fill();
}
}
// Finalize the histograms.
void finalize() {
const double inclusive_cross_section = crossSection();
- const double norm_1jet_histo = inclusive_cross_section*double(_n_1jet)/sumOfWeights();
- const double norm_2jet_histo = inclusive_cross_section*double(_n_2jet)/sumOfWeights();
- const double norm_3jet_histo = inclusive_cross_section*double(_n_3jet)/sumOfWeights();
- const double norm_4jet_histo = inclusive_cross_section*double(_n_4jet)/sumOfWeights();
- const double norm_incmultiplicity = inclusive_cross_section*double(_n_inclusivebinsummation)/sumOfWeights();
+ const double norm_1jet_histo = inclusive_cross_section*dbl(*_n_1jet)/sumOfWeights();
+ const double norm_2jet_histo = inclusive_cross_section*dbl(*_n_2jet)/sumOfWeights();
+ const double norm_3jet_histo = inclusive_cross_section*dbl(*_n_3jet)/sumOfWeights();
+ const double norm_4jet_histo = inclusive_cross_section*dbl(*_n_4jet)/sumOfWeights();
+ const double norm_incmultiplicity = inclusive_cross_section*dbl(*_n_inclusivebinsummation)/sumOfWeights();
normalize(_histJet30MultExc, norm_1jet_histo);
normalize(_histJet30MultInc, norm_incmultiplicity);
normalize(_histPtJet1, norm_1jet_histo);
normalize(_histHT1JetInc, norm_1jet_histo);
normalize(_histEtaJet1, norm_1jet_histo);
normalize(_histDPhiMuJet1, norm_1jet_histo);
normalize(_histPtJet2, norm_2jet_histo);
normalize(_histHT2JetInc, norm_2jet_histo);
normalize(_histEtaJet2, norm_2jet_histo);
normalize(_histDPhiMuJet2, norm_2jet_histo);
normalize(_histPtJet3, norm_3jet_histo);
normalize(_histHT3JetInc, norm_3jet_histo);
normalize(_histEtaJet3, norm_3jet_histo);
normalize(_histDPhiMuJet3, norm_3jet_histo);
normalize(_histPtJet4, norm_4jet_histo);
normalize(_histHT4JetInc, norm_4jet_histo);
normalize(_histEtaJet4, norm_4jet_histo);
normalize(_histDPhiMuJet4, norm_4jet_histo);
}
private:
Histo1DPtr _histJet30MultExc, _histJet30MultInc;
Histo1DPtr _histPtJet1, _histPtJet2, _histPtJet3, _histPtJet4;
Histo1DPtr _histEtaJet1, _histEtaJet2, _histEtaJet3, _histEtaJet4;
Histo1DPtr _histDPhiMuJet1, _histDPhiMuJet2, _histDPhiMuJet3, _histDPhiMuJet4;
Histo1DPtr _histHT1JetInc, _histHT2JetInc, _histHT3JetInc, _histHT4JetInc;
CounterPtr _n_1jet, _n_2jet, _n_3jet, _n_4jet, _n_inclusivebinsummation;
};
DECLARE_RIVET_PLUGIN(CMS_2014_I1303894);
}
diff --git a/analyses/pluginD0/D0_2001_S4674421.cc b/analyses/pluginD0/D0_2001_S4674421.cc
--- a/analyses/pluginD0/D0_2001_S4674421.cc
+++ b/analyses/pluginD0/D0_2001_S4674421.cc
@@ -1,189 +1,189 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/LeadingParticlesFinalState.hh"
#include "Rivet/Projections/VetoedFinalState.hh"
namespace Rivet {
/// @brief D0 Run I differential W/Z boson cross-section analysis
/// @author Lars Sonnenschein
/// @author Andy Buckley
class D0_2001_S4674421 : public Analysis {
public:
/// @name Constructors etc.
//@{
/// Constructor.
D0_2001_S4674421()
: Analysis("D0_2001_S4674421")
{ }
/// @name Analysis methods
//@{
void init() {
// Final state projection
FinalState fs(-5.0, 5.0); // corrected for detector acceptance
declare(fs, "FS");
// Z -> e- e+
LeadingParticlesFinalState eeFS(FinalState(-5.0, 5.0, 0.)); //20.);
eeFS.addParticleIdPair(PID::ELECTRON);
declare(eeFS, "eeFS");
// W- -> e- nu_e~
LeadingParticlesFinalState enuFS(FinalState(-5.0, 5.0, 0.)); //25.);
enuFS.addParticleId(PID::ELECTRON).addParticleId(PID::NU_EBAR);
declare(enuFS, "enuFS");
// W+ -> e+ nu_e
LeadingParticlesFinalState enubFS(FinalState(-5.0, 5.0, 0.)); //25.);
enubFS.addParticleId(PID::POSITRON).addParticleId(PID::NU_E);
declare(enubFS, "enubFS");
// Remove neutrinos for isolation of final state particles
VetoedFinalState vfs(fs);
vfs.vetoNeutrinos();
declare(vfs, "VFS");
// Counters
book(_eventsFilledW,"eventsFilledW");
book(_eventsFilledZ,"eventsFilledZ");
// Histograms
book(_h_dsigdpt_w ,1, 1, 1);
book(_h_dsigdpt_z ,1, 1, 2);
book(_h_dsigdpt_scaled_z, 2, 1, 1);
}
void analyze(const Event& event) {
const LeadingParticlesFinalState& eeFS = apply<LeadingParticlesFinalState>(event, "eeFS");
// Z boson analysis
if (eeFS.particles().size() >= 2) {
// If there is a Z candidate:
// Fill Z pT distributions
double deltaM2=1e30,mass2(0.);
double pT=-1.;
const Particles& Zdaughters = eeFS.particles();
for (size_t ix = 0; ix < Zdaughters.size(); ++ix) {
for (size_t iy = ix+1; iy < Zdaughters.size(); ++iy) {
if (Zdaughters[ix].pid()!=-Zdaughters[iy].pid()) continue;
const FourMomentum pmom = Zdaughters[ix].momentum() + Zdaughters[iy].momentum();
double mz2 = pmom.mass2();
double dm2 = fabs(mz2 - sqr(91.118*GeV));
if (dm2 < deltaM2) {
pT = pmom.pT();
deltaM2 = dm2;
mass2 = mz2;
}
}
}
if (pT > 0. && mass2 > 0. && inRange(sqrt(mass2)/GeV, 75.0, 105.0)) {
_eventsFilledZ->fill();
MSG_DEBUG("Z pmom.pT() = " << pT/GeV << " GeV");
_h_dsigdpt_z->fill(pT/GeV);
// return if found a Z
return;
}
}
// There is no Z -> ee candidate... so this might be a W event
const LeadingParticlesFinalState& enuFS = apply<LeadingParticlesFinalState>(event, "enuFS");
const LeadingParticlesFinalState& enubFS = apply<LeadingParticlesFinalState>(event, "enubFS");
double deltaM2=1e30;
double pT=-1.;
for (size_t iw = 0; iw < 2; ++iw) {
Particles Wdaughters;
Wdaughters = (iw == 0) ? enuFS.particles() : enubFS.particles();
for (size_t ix = 0; ix < Wdaughters.size(); ++ix) {
for (size_t iy = ix+1; iy < Wdaughters.size(); ++iy) {
if (Wdaughters[ix].pid() == Wdaughters[iy].pid()) continue;
const FourMomentum pmom = Wdaughters[0].momentum() + Wdaughters[1].momentum();
double dm2 = abs(pmom.mass2() - sqr(80.4*GeV));
if (dm2 < deltaM2) {
pT = pmom.pT();
deltaM2 = dm2;
}
}
}
}
if (pT > 0.) {
_eventsFilledW->fill();
_h_dsigdpt_w->fill(pT/GeV);
}
}
void finalize() {
// Get cross-section per event (i.e. per unit weight) from generator
const double xSecPerEvent = crossSectionPerEvent()/picobarn;
// Correct W pT distribution to W cross-section
- const double xSecW = xSecPerEvent * double(_eventsFilledW);
+ const double xSecW = xSecPerEvent * dbl(*_eventsFilledW);
// Correct Z pT distribution to Z cross-section
- const double xSecZ = xSecPerEvent * double(_eventsFilledZ);
+ const double xSecZ = xSecPerEvent * dbl(*_eventsFilledZ);
// Get W and Z pT integrals
const double wpt_integral = _h_dsigdpt_w->integral();
const double zpt_integral = _h_dsigdpt_z->integral();
// Divide and scale ratio histos
if (xSecW == 0 || wpt_integral == 0 || xSecZ == 0 || zpt_integral == 0) {
MSG_WARNING("Not filling ratio plot because input histos are empty");
} else {
// Scale factor converts event counts to cross-sections, and inverts the
// branching ratios since only one decay channel has been analysed for each boson.
// Oh, and we put MW/MZ in, like they do in the paper.
const double MW_MZ = 0.8820; // Ratio M_W/M_Z
const double BRZEE_BRWENU = 0.033632 / 0.1073; // Ratio of branching fractions
const double scalefactor = (xSecW / wpt_integral) / (xSecZ / zpt_integral) * MW_MZ * BRZEE_BRWENU;
for (size_t ibin = 0; ibin < _h_dsigdpt_w->numBins(); ibin++) {
const double xval = _h_dsigdpt_w->bin(ibin).xMid();
const double xerr = _h_dsigdpt_w->bin(ibin).xWidth() / 2.;
double yval(0), yerr(0);
if (_h_dsigdpt_w->bin(ibin).sumW() != 0 && _h_dsigdpt_z->bin(ibin).sumW() != 0) {
yval = scalefactor * _h_dsigdpt_w->bin(ibin).sumW() / _h_dsigdpt_z->bin(ibin).sumW();
yerr = yval * sqrt( sqr(_h_dsigdpt_w->bin(ibin).relErr()) + sqr(_h_dsigdpt_z->bin(ibin).areaErr()) );
}
_h_dsigdpt_scaled_z->addPoint(xval, yval, xerr, yerr);
}
}
// Normalize non-ratio histos
normalize(_h_dsigdpt_w, xSecW);
normalize(_h_dsigdpt_z, xSecZ);
}
//@}
private:
/// @name Event counters for cross section normalizations
//@{
CounterPtr _eventsFilledW;
CounterPtr _eventsFilledZ;
//@}
//@{
/// Histograms
Histo1DPtr _h_dsigdpt_w;
Histo1DPtr _h_dsigdpt_z;
Scatter2DPtr _h_dsigdpt_scaled_z;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(D0_2001_S4674421);
}
diff --git a/analyses/pluginD0/D0_2008_S7863608.cc b/analyses/pluginD0/D0_2008_S7863608.cc
--- a/analyses/pluginD0/D0_2008_S7863608.cc
+++ b/analyses/pluginD0/D0_2008_S7863608.cc
@@ -1,132 +1,132 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ZFinder.hh"
#include "Rivet/Projections/FastJets.hh"
namespace Rivet {
/// @brief D0 differential Z/\f$ \gamma^* \f$ + jet + \f$ X \f$ cross sections
/// @author Gavin Hesketh, Andy Buckley, Frank Siegert
class D0_2008_S7863608 : public Analysis {
public:
/// Constructor
D0_2008_S7863608()
: Analysis("D0_2008_S7863608")
{ }
/// @name Analysis methods
//@{
/// Book histograms
void init() {
/// @todo These clustering arguments look odd: are they ok?
Cut cut = Cuts::abseta < 1.7 && Cuts::pT > 15*GeV;
ZFinder zfinder(FinalState(), cut, PID::MUON, 65*GeV, 115*GeV, 0.2, ZFinder::NOCLUSTER, ZFinder::TRACK);
declare(zfinder, "ZFinder");
FastJets conefinder(zfinder.remainingFinalState(), FastJets::D0ILCONE, 0.5);
declare(conefinder, "ConeFinder");
book(_sum_of_weights_inclusive, "sum_of_weights_inclusive");
book(_h_jet_pT_cross_section ,1, 1, 1);
book(_h_jet_pT_normalised ,1, 1, 2);
book(_h_jet_y_cross_section ,2, 1, 1);
book(_h_jet_y_normalised ,2, 1, 2);
book(_h_Z_pT_cross_section ,3, 1, 1);
book(_h_Z_pT_normalised ,3, 1, 2);
book(_h_Z_y_cross_section ,4, 1, 1);
book(_h_Z_y_normalised ,4, 1, 2);
book(_h_total_cross_section ,5, 1, 1);
}
// Do the analysis
void analyze(const Event& e) {
const ZFinder& zfinder = apply<ZFinder>(e, "ZFinder");
if (zfinder.bosons().size()==1) {
_sum_of_weights_inclusive->fill();
const JetAlg& jetpro = apply<JetAlg>(e, "ConeFinder");
const Jets& jets = jetpro.jetsByPt(20*GeV);
Jets jets_cut;
foreach (const Jet& j, jets) {
if (j.abseta() < 2.8) {
jets_cut.push_back(j);
}
}
// Return if there are no jets:
if(jets_cut.size()<1) {
MSG_DEBUG("Skipping event " << numEvents() << " because no jets pass cuts ");
vetoEvent;
}
const FourMomentum Zmom = zfinder.bosons()[0].momentum();
// In jet pT
_h_jet_pT_cross_section->fill( jets_cut[0].pT());
_h_jet_pT_normalised->fill( jets_cut[0].pT());
_h_jet_y_cross_section->fill( fabs(jets_cut[0].rapidity()));
_h_jet_y_normalised->fill( fabs(jets_cut[0].rapidity()));
// In Z pT
_h_Z_pT_cross_section->fill(Zmom.pT());
_h_Z_pT_normalised->fill(Zmom.pT());
_h_Z_y_cross_section->fill(Zmom.absrap());
_h_Z_y_normalised->fill(Zmom.absrap());
_h_total_cross_section->fill(1960);
}
}
/// Finalize
void finalize() {
const double invlumi = crossSection()/sumOfWeights();
scale(_h_total_cross_section, invlumi);
scale(_h_jet_pT_cross_section, invlumi);
scale(_h_jet_y_cross_section, invlumi);
scale(_h_Z_pT_cross_section, invlumi);
scale(_h_Z_y_cross_section, invlumi);
- double factor=1/_sum_of_weights_inclusive;
- if (_sum_of_weights_inclusive == 0) factor = 0;
+ double factor=1/dbl(*_sum_of_weights_inclusive);
+ if (_sum_of_weights_inclusive->val() == 0) factor = 0;
scale(_h_jet_pT_normalised, factor);
scale(_h_jet_y_normalised, factor);
scale(_h_Z_pT_normalised, factor);
scale(_h_Z_y_normalised, factor);
}
//@}
private:
/// @name Histograms
//@{
Histo1DPtr _h_jet_pT_cross_section;
Histo1DPtr _h_jet_y_cross_section;
Histo1DPtr _h_Z_pT_cross_section;
Histo1DPtr _h_Z_y_cross_section;
Histo1DPtr _h_total_cross_section;
Histo1DPtr _h_jet_pT_normalised;
Histo1DPtr _h_jet_y_normalised;
Histo1DPtr _h_Z_pT_normalised;
Histo1DPtr _h_Z_y_normalised;
//@}
CounterPtr _sum_of_weights_inclusive;
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(D0_2008_S7863608);
}
diff --git a/analyses/pluginD0/D0_2009_S8202443.cc b/analyses/pluginD0/D0_2009_S8202443.cc
--- a/analyses/pluginD0/D0_2009_S8202443.cc
+++ b/analyses/pluginD0/D0_2009_S8202443.cc
@@ -1,126 +1,126 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/ZFinder.hh"
#include "Rivet/Projections/FastJets.hh"
namespace Rivet {
/// @brief D0 Z + jet + \f$ X \f$ cross-section / \f$ p_\perp \f$ distributions
class D0_2009_S8202443 : public Analysis {
public:
/// Constructor
D0_2009_S8202443()
: Analysis("D0_2009_S8202443")
{ }
/// @name Analysis methods
//@{
/// Book histograms
void init() {
FinalState fs;
// Leptons in constrained tracking acceptance
Cut cuts = (Cuts::abseta < 1.1 || Cuts::absetaIn(1.5, 2.5)) && Cuts::pT > 25*GeV;
ZFinder zfinder_constrained(fs, cuts, PID::ELECTRON, 65*GeV, 115*GeV, 0.2, ZFinder::CLUSTERNODECAY, ZFinder::TRACK);
declare(zfinder_constrained, "ZFinderConstrained");
FastJets conefinder_constrained(zfinder_constrained.remainingFinalState(), FastJets::D0ILCONE, 0.5);
declare(conefinder_constrained, "ConeFinderConstrained");
// Unconstrained leptons
ZFinder zfinder(fs, Cuts::open(), PID::ELECTRON, 65*GeV, 115*GeV, 0.2, ZFinder::CLUSTERNODECAY, ZFinder::TRACK);
declare(zfinder, "ZFinder");
FastJets conefinder(zfinder.remainingFinalState(), FastJets::D0ILCONE, 0.5);
declare(conefinder, "ConeFinder");
book(_h_jet1_pT_constrained ,1, 1, 1);
book(_h_jet2_pT_constrained ,3, 1, 1);
book(_h_jet3_pT_constrained ,5, 1, 1);
book(_h_jet1_pT ,2, 1, 1);
book(_h_jet2_pT ,4, 1, 1);
book(_h_jet3_pT ,6, 1, 1);
book(_sum_of_weights,"sum_of_weights");
book(_sum_of_weights_constrained, "sum_of_weights_constrained");
}
// Do the analysis
void analyze(const Event& e) {
// Unconstrained electrons
const ZFinder& zfinder = apply<ZFinder>(e, "ZFinder");
if (zfinder.bosons().size() == 0) {
MSG_DEBUG("No unique lepton pair found.");
vetoEvent;
}
_sum_of_weights->fill();
const Jets jets_cut = apply<JetAlg>(e, "ConeFinder").jetsByPt(Cuts::pT > 20*GeV && Cuts::abseta < 2.5);
if (jets_cut.size() > 0)
_h_jet1_pT->fill(jets_cut[0].pT()/GeV);
if (jets_cut.size() > 1)
_h_jet2_pT->fill(jets_cut[1].pT()/GeV);
if (jets_cut.size() > 2)
_h_jet3_pT->fill(jets_cut[2].pT()/GeV);
// Constrained electrons
const ZFinder& zfinder_constrained = apply<ZFinder>(e, "ZFinderConstrained");
if (zfinder_constrained.bosons().size() == 0) {
MSG_DEBUG("No unique constrained lepton pair found.");
return; // Not really a "veto", since if we got this far there is an unconstrained Z
}
_sum_of_weights_constrained->fill();
const Jets& jets_constrained = apply<JetAlg>(e, "ConeFinderConstrained").jetsByPt(20*GeV);
/// @todo Replace this explicit selection with a Cut
Jets jets_cut_constrained;
foreach (const Jet& j, jets_constrained) {
if (j.abseta() < 2.5) jets_cut_constrained.push_back(j);
}
if (jets_cut_constrained.size() > 0)
_h_jet1_pT_constrained->fill(jets_cut_constrained[0].pT()/GeV);
if (jets_cut_constrained.size() > 1)
_h_jet2_pT_constrained->fill(jets_cut_constrained[1].pT()/GeV);
if (jets_cut_constrained.size() > 2)
_h_jet3_pT_constrained->fill(jets_cut_constrained[2].pT()/GeV);
}
// Finalize
void finalize() {
- scale(_h_jet1_pT, 1/_sum_of_weights);
- scale(_h_jet2_pT, 1/_sum_of_weights);
- scale(_h_jet3_pT, 1/_sum_of_weights);
- scale(_h_jet1_pT_constrained, 1/_sum_of_weights_constrained);
- scale(_h_jet2_pT_constrained, 1/_sum_of_weights_constrained);
- scale(_h_jet3_pT_constrained, 1/_sum_of_weights_constrained);
+ scale(_h_jet1_pT, 1/ *_sum_of_weights);
+ scale(_h_jet2_pT, 1/ *_sum_of_weights);
+ scale(_h_jet3_pT, 1/ *_sum_of_weights);
+ scale(_h_jet1_pT_constrained, 1/ *_sum_of_weights_constrained);
+ scale(_h_jet2_pT_constrained, 1/ *_sum_of_weights_constrained);
+ scale(_h_jet3_pT_constrained, 1/ *_sum_of_weights_constrained);
}
//@}
private:
/// @name Histograms
//@{
Histo1DPtr _h_jet1_pT;
Histo1DPtr _h_jet2_pT;
Histo1DPtr _h_jet3_pT;
Histo1DPtr _h_jet1_pT_constrained;
Histo1DPtr _h_jet2_pT_constrained;
Histo1DPtr _h_jet3_pT_constrained;
//@}
CounterPtr _sum_of_weights, _sum_of_weights_constrained;
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(D0_2009_S8202443);
}
diff --git a/analyses/pluginD0/D0_2009_S8349509.cc b/analyses/pluginD0/D0_2009_S8349509.cc
--- a/analyses/pluginD0/D0_2009_S8349509.cc
+++ b/analyses/pluginD0/D0_2009_S8349509.cc
@@ -1,168 +1,168 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/ZFinder.hh"
#include "Rivet/Projections/FastJets.hh"
namespace Rivet {
/// @brief D0 Z+jets angular distributions
class D0_2009_S8349509 : public Analysis {
public:
/// @name Constructors etc.
//@{
/// Constructor
D0_2009_S8349509()
: Analysis("D0_2009_S8349509")
{ }
//@}
/// @name Analysis methods
//@{
/// Book histograms
void init() {
Cut cut = Cuts::abseta < 1.7 && Cuts::pT > 15*GeV;
ZFinder zfinder(FinalState(), cut, PID::MUON, 65*GeV, 115*GeV, 0.2, ZFinder::NOCLUSTER, ZFinder::TRACK);
declare(zfinder, "ZFinder");
FastJets conefinder(zfinder.remainingFinalState(), FastJets::D0ILCONE, 0.5);
declare(conefinder, "ConeFinder");
book(_h_dphi_jet_Z25 ,1, 1, 1);
book(_h_dphi_jet_Z45 ,2, 1, 1);
book(_h_dy_jet_Z25 ,3, 1, 1);
book(_h_dy_jet_Z45 ,4, 1, 1);
book(_h_yboost_jet_Z25 ,5, 1, 1);
book(_h_yboost_jet_Z45 ,6, 1, 1);
book(_h_dphi_jet_Z25_xs ,1, 1, 2);
book(_h_dphi_jet_Z45_xs ,2, 1, 2);
book(_h_dy_jet_Z25_xs ,3, 1, 2);
book(_h_dy_jet_Z45_xs ,4, 1, 2);
book(_h_yboost_jet_Z25_xs ,5, 1, 2);
book(_h_yboost_jet_Z45_xs ,6, 1, 2);
book(_inclusive_Z_sumofweights, "_inclusive_Z_sumofweights");
}
void analyze(const Event& event) {
const ZFinder& zfinder = apply<ZFinder>(event, "ZFinder");
if (zfinder.bosons().size() == 1) {
// count inclusive sum of weights for histogram normalisation
_inclusive_Z_sumofweights->fill();
const FourMomentum& zmom = zfinder.bosons()[0].momentum();
if (zmom.pT() < 25*GeV) vetoEvent;
Jets jets;
foreach (const Jet& j, apply<JetAlg>(event, "ConeFinder").jetsByPt(20*GeV)) {
if (j.abseta() < 2.8) {
jets.push_back(j);
break;
}
}
// Return if there are no jets:
if (jets.size() < 1) {
MSG_DEBUG("Skipping event " << numEvents() << " because no jets pass cuts ");
vetoEvent;
}
const FourMomentum& jetmom = jets[0].momentum();
const double yZ = zmom.rapidity();
const double yjet = jetmom.rapidity();
const double dphi = deltaPhi(zmom, jetmom);
const double dy = deltaRap(zmom, jetmom);
const double yboost = fabs(yZ+yjet)/2;
if (zmom.pT() > 25*GeV) {
_h_dphi_jet_Z25->fill(dphi);
_h_dy_jet_Z25->fill(dy);
_h_yboost_jet_Z25->fill(yboost);
_h_dphi_jet_Z25_xs->fill(dphi);
_h_dy_jet_Z25_xs->fill(dy);
_h_yboost_jet_Z25_xs->fill(yboost);
}
if (zmom.pT() > 45*GeV) {
_h_dphi_jet_Z45->fill(dphi);
_h_dy_jet_Z45->fill(dy);
_h_yboost_jet_Z45->fill(yboost);
_h_dphi_jet_Z45_xs->fill(dphi);
_h_dy_jet_Z45_xs->fill(dy);
_h_yboost_jet_Z45_xs->fill(yboost);
}
}
}
void finalize() {
- if (_inclusive_Z_sumofweights == 0) return;
- scale(_h_dphi_jet_Z25, 1/_inclusive_Z_sumofweights);
- scale(_h_dphi_jet_Z45, 1/_inclusive_Z_sumofweights);
- scale(_h_dy_jet_Z25, 1/_inclusive_Z_sumofweights);
- scale(_h_dy_jet_Z45, 1/_inclusive_Z_sumofweights);
- scale(_h_yboost_jet_Z25, 1/_inclusive_Z_sumofweights);
- scale(_h_yboost_jet_Z45, 1/_inclusive_Z_sumofweights);
+ if (_inclusive_Z_sumofweights->val() == 0) return;
+ scale(_h_dphi_jet_Z25, 1/ *_inclusive_Z_sumofweights);
+ scale(_h_dphi_jet_Z45, 1/ *_inclusive_Z_sumofweights);
+ scale(_h_dy_jet_Z25, 1/ *_inclusive_Z_sumofweights);
+ scale(_h_dy_jet_Z45, 1/ *_inclusive_Z_sumofweights);
+ scale(_h_yboost_jet_Z25, 1/ *_inclusive_Z_sumofweights);
+ scale(_h_yboost_jet_Z45, 1/ *_inclusive_Z_sumofweights);
scale(_h_dphi_jet_Z25_xs, crossSectionPerEvent());
scale(_h_dphi_jet_Z45_xs, crossSectionPerEvent());
scale(_h_dy_jet_Z25_xs, crossSectionPerEvent());
scale(_h_dy_jet_Z45_xs, crossSectionPerEvent());
scale(_h_yboost_jet_Z25_xs, crossSectionPerEvent());
scale(_h_yboost_jet_Z45_xs, crossSectionPerEvent());
}
//@}
private:
// Data members like post-cuts event weight counters go here
private:
/// @name Histograms (normalised)
//@{
Histo1DPtr _h_dphi_jet_Z25;
Histo1DPtr _h_dphi_jet_Z45;
Histo1DPtr _h_dy_jet_Z25;
Histo1DPtr _h_dy_jet_Z45;
Histo1DPtr _h_yboost_jet_Z25;
Histo1DPtr _h_yboost_jet_Z45;
//@}
/// @name Histograms (absolute cross sections)
//@{
Histo1DPtr _h_dphi_jet_Z25_xs;
Histo1DPtr _h_dphi_jet_Z45_xs;
Histo1DPtr _h_dy_jet_Z25_xs;
Histo1DPtr _h_dy_jet_Z45_xs;
Histo1DPtr _h_yboost_jet_Z25_xs;
Histo1DPtr _h_yboost_jet_Z45_xs;
//@}
CounterPtr _inclusive_Z_sumofweights;
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(D0_2009_S8349509);
}
diff --git a/analyses/pluginHERA/H1_1994_S2919893.cc b/analyses/pluginHERA/H1_1994_S2919893.cc
--- a/analyses/pluginHERA/H1_1994_S2919893.cc
+++ b/analyses/pluginHERA/H1_1994_S2919893.cc
@@ -1,229 +1,229 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Math/Constants.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/DISKinematics.hh"
namespace Rivet {
/// @brief H1 energy flow and charged particle spectra
/// @author Peter Richardson
/// Based on the equivalent HZTool analysis
class H1_1994_S2919893 : public Analysis {
public:
/// Constructor
H1_1994_S2919893()
: Analysis("H1_1994_S2919893")
{}
/// @name Analysis methods
//@{
/// Initialise projections and histograms
void init() {
// Projections
declare(DISLepton(), "Lepton");
declare(DISKinematics(), "Kinematics");
declare(FinalState(), "FS");
// Histos
book(_histEnergyFlowLowX ,1, 1, 1);
book(_histEnergyFlowHighX ,1, 1, 2);
book(_histEECLowX ,2, 1, 1);
book(_histEECHighX ,2, 1, 2);
book(_histSpectraW77 ,3, 1, 1);
book(_histSpectraW122 ,3, 1, 2);
book(_histSpectraW169 ,3, 1, 3);
book(_histSpectraW117 ,3, 1, 4);
book(_histPT2 ,4, 1, 1);
book(_w77 .first, "TMP/w77_1");
book(_w122.first, "TMP/w122_1");
book(_w169.first, "TMP/w169_1");
book(_w117.first, "TMP/w117_1");
book(_wEnergy.first, "TMP/wEnergy_1");
book(_w77 .second, "TMP/w77_2");
book(_w122.second, "TMP/w122_2");
book(_w169.second, "TMP/w169_2");
book(_w117.second, "TMP/w117_2");
book(_wEnergy.second, "TMP/wEnergy_2");
}
/// Analyse each event
void analyze(const Event& event) {
// Get the DIS kinematics
const DISKinematics& dk = apply<DISKinematics>(event, "Kinematics");
const double x = dk.x();
const double w2 = dk.W2();
const double w = sqrt(w2);
// Momentum of the scattered lepton
const DISLepton& dl = apply<DISLepton>(event,"Lepton");
const FourMomentum leptonMom = dl.out();
const double ptel = leptonMom.pT();
const double enel = leptonMom.E();
const double thel = leptonMom.angle(dk.beamHadron().mom())/degree;
// Extract the particles other than the lepton
const FinalState& fs = apply<FinalState>(event, "FS");
Particles particles;
particles.reserve(fs.particles().size());
const GenParticle* dislepGP = dl.out().genParticle();
foreach (const Particle& p, fs.particles()) {
const GenParticle* loopGP = p.genParticle();
if (loopGP == dislepGP) continue;
particles.push_back(p);
}
// Cut on the forward energy
double efwd = 0.0;
foreach (const Particle& p, particles) {
const double th = p.angle(dk.beamHadron())/degree;
if (inRange(th, 4.4, 15)) efwd += p.E();
}
// Apply the cuts
// Lepton energy and angle, w2 and forward energy
MSG_DEBUG("enel/GeV = " << enel/GeV << ", thel = " << thel
<< ", w2 = " << w2 << ", efwd/GeV = " << efwd/GeV);
bool cut = enel/GeV > 14. && thel > 157. && thel < 172.5 && w2 >= 3000. && efwd/GeV > 0.5;
if (!cut) vetoEvent;
// Weight of the event
(x < 1e-3 ? _wEnergy.first : _wEnergy.second)->fill();
// Boost to hadronic CM
const LorentzTransform hcmboost = dk.boostHCM();
// Loop over the particles
long ncharged(0);
for (size_t ip1 = 0; ip1 < particles.size(); ++ip1) {
const Particle& p = particles[ip1];
const double th = p.angle(dk.beamHadron().momentum()) / degree;
// Boost momentum to lab
const FourMomentum hcmMom = hcmboost.transform(p.momentum());
// Angular cut
if (th <= 4.4) continue;
// Energy flow histogram
const double et = fabs(hcmMom.Et());
const double eta = hcmMom.eta();
(x < 1e-3 ? _histEnergyFlowLowX : _histEnergyFlowHighX)->fill(eta, et);
if (PID::threeCharge(p.pid()) != 0) {
/// @todo Use units in w comparisons... what are the units?
if (w > 50. && w <= 200.) {
double xf= 2 * hcmMom.z() / w;
double pt2 = hcmMom.pT2();
if (w > 50. && w <= 100.) {
_histSpectraW77 ->fill(xf);
} else if (w > 100. && w <= 150.) {
_histSpectraW122->fill(xf);
} else if (w > 150. && w <= 200.) {
_histSpectraW169->fill(xf);
}
_histSpectraW117->fill(xf);
/// @todo Is this profile meant to be filled with 2 weight factors?
_histPT2->fill(xf, pt2/GeV2);
++ncharged;
}
}
// Energy-energy correlation
if (th <= 8.) continue;
double phi1 = p.phi(ZERO_2PI);
double eta1 = p.eta();
double et1 = fabs(p.momentum().Et());
for (size_t ip2 = ip1+1; ip2 < particles.size(); ++ip2) {
const Particle& p2 = particles[ip2];
//double th2 = beamAngle(p2.momentum(), order);
double th2 = p2.angle(dk.beamHadron().momentum()) / degree;
if (th2 <= 8.) continue;
double phi2 = p2.phi(ZERO_2PI);
/// @todo Use angle function
double deltaphi = phi1 - phi2;
if (fabs(deltaphi) > PI) deltaphi = fabs(fabs(deltaphi) - TWOPI);
double eta2 = p2.eta();
double omega = sqrt(sqr(eta1-eta2) + sqr(deltaphi));
double et2 = fabs(p2.momentum().Et());
double wt = et1*et2 / sqr(ptel);
(x < 1e-3 ? _histEECLowX : _histEECHighX)->fill(omega, wt);
}
}
// Factors for normalization
if (w > 50. && w <= 200.) {
if (w <= 100.) {
_w77.first ->fill(ncharged);
_w77.second->fill();
} else if (w <= 150.) {
_w122.first ->fill(ncharged);
_w122.second->fill();
} else {
_w169.first ->fill(ncharged);
_w169.second->fill();
}
_w117.first ->fill(ncharged);
_w117.second->fill();
}
}
// Normalize inclusive single particle distributions to the average number of charged particles per event.
void finalize() {
- normalize(_histSpectraW77, _w77.first/_w77.second);
- normalize(_histSpectraW122, _w122.first/_w122.second);
- normalize(_histSpectraW169, _w169.first/_w169.second);
- normalize(_histSpectraW117, _w117.first/_w117.second);
+ normalize(_histSpectraW77, *_w77.first/ *_w77.second);
+ normalize(_histSpectraW122, *_w122.first/ *_w122.second);
+ normalize(_histSpectraW169, *_w169.first/ *_w169.second);
+ normalize(_histSpectraW117, *_w117.first/ *_w117.second);
- scale(_histEnergyFlowLowX , 1./_wEnergy.first );
- scale(_histEnergyFlowHighX, 1./_wEnergy.second);
+ scale(_histEnergyFlowLowX , 1./ *_wEnergy.first );
+ scale(_histEnergyFlowHighX, 1./ *_wEnergy.second);
- scale(_histEECLowX , 1./_wEnergy.first );
- scale(_histEECHighX, 1./_wEnergy.second);
+ scale(_histEECLowX , 1./ *_wEnergy.first );
+ scale(_histEECHighX, 1./ *_wEnergy.second);
}
//@}
private:
/// Polar angle with right direction of the beam
inline double beamAngle(const FourVector& v, bool order) {
double thel = v.polarAngle()/degree;
if (thel < 0) thel += 180.;
if (!order) thel = 180 - thel;
return thel;
}
/// @name Histograms
//@{
Histo1DPtr _histEnergyFlowLowX, _histEnergyFlowHighX;
Histo1DPtr _histEECLowX, _histEECHighX;
Histo1DPtr _histSpectraW77, _histSpectraW122, _histSpectraW169, _histSpectraW117;
Profile1DPtr _histPT2;
//@}
/// @name Storage of weights to calculate averages for normalisation
//@{
pair<CounterPtr,CounterPtr> _w77, _w122, _w169, _w117, _wEnergy;
//@}
};
DECLARE_RIVET_PLUGIN(H1_1994_S2919893);
}
diff --git a/analyses/pluginHERA/H1_1995_S3167097.cc b/analyses/pluginHERA/H1_1995_S3167097.cc
--- a/analyses/pluginHERA/H1_1995_S3167097.cc
+++ b/analyses/pluginHERA/H1_1995_S3167097.cc
@@ -1,128 +1,128 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/DISFinalState.hh"
#include "Rivet/Projections/CentralEtHCM.hh"
namespace Rivet {
/// H1 energy flow in DIS
///
/// @todo Make histograms match those in HepData and use autobooking
///
/// @author Leif Lonnblad
/// @author Andy Buckley
class H1_1995_S3167097 : public Analysis {
public:
/// Constructor
H1_1995_S3167097()
: Analysis("H1_1995_S3167097")
{ }
/// @name Analysis methods
//@{
void init() {
// Projections
const DISKinematics& diskin = declare(DISKinematics(), "Kinematics");
const DISFinalState& fshcm = declare(DISFinalState(diskin, DISFinalState::HCM), "FS");
declare(CentralEtHCM(fshcm), "Y1HCM");
// Histograms
/// @todo Convert to use autobooking and correspond to HepData data tables
_hEtFlow.resize(9);
for (size_t i = 0; i < 9; ++i) {
book(_sumw[i], "sumW_" + to_str(i));
book(_hEtFlow[i] ,to_str(i), 24, -6, 6);
}
book(_tmphAvEt, "TMP/hAvEt", 9, 1.0, 10.0);
book(_tmphAvX , "TMP/hAvX", 9, 1.0, 10.0);
book(_tmphAvQ2, "TMP/hAvQ2", 9, 1.0, 10.0);
book(_tmphN , "TMP/hN", 9, 1.0, 10.0);
}
/// Calculate the bin number from the DISKinematics projection
/// @todo Convert to use a HEPUtils Binning1D
size_t _getbin(const DISKinematics& dk) {
if (inRange(dk.Q2()/GeV2, 5.0, 10.0)) {
if (inRange(dk.x(), 1e-4, 2e-4)) return 0;
if (inRange(dk.x(), 2e-4, 5e-4) && dk.Q2() > 6.0*GeV2) return 1;
} else if (inRange(dk.Q2()/GeV2, 10.0, 20.0)) {
if (inRange(dk.x(), 2e-4, 5e-4)) return 2;
if (inRange(dk.x(), 5e-4, 8e-4)) return 3;
if (inRange(dk.x(), 8e-4, 1.5e-3)) return 4;
if (inRange(dk.x(), 1.5e-3, 4e-3)) return 5;
} else if (inRange(dk.Q2()/GeV2, 20.0, 50.0)) {
if (inRange(dk.x(), 5e-4, 1.4e-3)) return 6;
if (inRange(dk.x(), 1.4e-3, 3e-3)) return 7;
if (inRange(dk.x(), 3e-3, 1e-2)) return 8;
}
return -1;
}
void analyze(const Event& event) {
const FinalState& fs = apply<FinalState>(event, "FS");
const DISKinematics& dk = apply<DISKinematics>(event, "Kinematics");
const CentralEtHCM& y1 = apply<CentralEtHCM>(event, "Y1HCM");
const int ibin = _getbin(dk);
if (ibin < 0) vetoEvent;
_sumw[ibin]->fill();
for (size_t i = 0, N = fs.particles().size(); i < N; ++i) {
const double rap = fs.particles()[i].rapidity();
const double et = fs.particles()[i].Et();
_hEtFlow[ibin]->fill(rap, et/GeV);
}
/// @todo Use fillBin?
_tmphAvEt->fill(ibin + 1.5, y1.sumEt()/GeV);
_tmphAvX->fill(ibin + 1.5, dk.x());
_tmphAvQ2->fill(ibin + 1.5, dk.Q2()/GeV2);
_tmphN->fill(ibin + 1.5);
}
void finalize() {
for (size_t ibin = 0; ibin < 9; ++ibin)
- scale(_hEtFlow[ibin], 0.5/_sumw[ibin]);
+ scale(_hEtFlow[ibin], 0.5/ *_sumw[ibin]);
/// @todo Improve this!
Scatter2DPtr s21,s22,s23;
divide(_tmphAvEt,_tmphN,s21);
book(s21, "21");
divide(_tmphAvX,_tmphN,s22);
book(s22, "22");
divide(_tmphAvQ2,_tmphN,s23);
book(s23, "23");
// addAnalysisObject(make_shared<Scatter2D>(_tmphAvEt/_tmphN, histoPath("21")) );
// addAnalysisObject(make_shared<Scatter2D>(_tmphAvX/_tmphN, histoPath("22")) );
// addAnalysisObject(make_shared<Scatter2D>(_tmphAvQ2/_tmphN, histoPath("23")) );
}
//@}
private:
/// Histograms for the \f$ E_T \f$ flow
vector<Histo1DPtr> _hEtFlow;
/// Temporary histograms for averages in different kinematical bins.
Histo1DPtr _tmphAvEt, _tmphAvX, _tmphAvQ2, _tmphN;
/// Weights counters for each kinematic bin
array<CounterPtr, 9> _sumw;
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(H1_1995_S3167097);
}
diff --git a/analyses/pluginHERA/H1_2000_S4129130.cc b/analyses/pluginHERA/H1_2000_S4129130.cc
--- a/analyses/pluginHERA/H1_2000_S4129130.cc
+++ b/analyses/pluginHERA/H1_2000_S4129130.cc
@@ -1,258 +1,258 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Math/Constants.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/DISKinematics.hh"
namespace Rivet {
/// @brief H1 energy flow and charged particle spectra
///
/// @author Peter Richardson
///
/// Based on the HZTOOL analysis HZ99091
class H1_2000_S4129130 : public Analysis {
public:
/// Constructor
H1_2000_S4129130()
: Analysis("H1_2000_S4129130")
{ }
/// @name Analysis methods
//@{
/// Initialise projections and histograms
void init() {
// Projections
declare(DISLepton(), "Lepton");
declare(DISKinematics(), "Kinematics");
declare(FinalState(), "FS");
// Histos
Histo1DPtr h;
// Histograms and weight vectors for low Q^2 a
for (size_t ix = 0; ix < 17; ++ix) {
book(h ,ix+1, 1, 1);
_histETLowQa.push_back(h);
book(_weightETLowQa[ix], "TMP/ETLowQa");
}
// Histograms and weight vectors for high Q^2 a
for (size_t ix = 0; ix < 7; ++ix) {
book(h ,ix+18, 1, 1);
_histETHighQa.push_back(h);
book(_weightETHighQa[ix], "TMP/ETHighQa");
}
// Histograms and weight vectors for low Q^2 b
for (size_t ix = 0; ix < 5; ++ix) {
book(h ,ix+25, 1, 1);
_histETLowQb.push_back(h);
book(_weightETLowQb[ix], "TMP/ETLowQb");
}
// Histograms and weight vectors for high Q^2 b
for (size_t ix = 0; ix < 3; ++ix) {
book(h ,30+ix, 1, 1);
_histETHighQb.push_back(h);
book(_weightETHighQb[ix], "TMP/ETHighQb");
}
// Histograms for the averages
book(_histAverETCentral ,33, 1, 1);
book(_histAverETFrag ,34, 1, 1);
}
/// Analyze each event
void analyze(const Event& event) {
// DIS kinematics
const DISKinematics& dk = apply<DISKinematics>(event, "Kinematics");
double q2 = dk.Q2();
double x = dk.x();
double y = dk.y();
double w2 = dk.W2();
// Kinematics of the scattered lepton
const DISLepton& dl = apply<DISLepton>(event,"Lepton");
const FourMomentum leptonMom = dl.out();
const double enel = leptonMom.E();
const double thel = 180 - leptonMom.angle(dl.in().mom())/degree;
// Extract the particles other than the lepton
const FinalState& fs = apply<FinalState>(event, "FS");
Particles particles; particles.reserve(fs.size());
const GenParticle* dislepGP = dl.out().genParticle(); ///< @todo Is the GenParticle stuff necessary? (Not included in Particle::==?)
foreach (const Particle& p, fs.particles()) {
const GenParticle* loopGP = p.genParticle();
if (loopGP == dislepGP) continue;
particles.push_back(p);
}
// Cut on the forward energy
double efwd = 0.;
foreach (const Particle& p, particles) {
const double th = 180 - p.angle(dl.in())/degree;
if (inRange(th, 4.4, 15.0)) efwd += p.E();
}
// There are four possible selections for events
bool evcut[4];
// Low Q2 selection a
evcut[0] = enel/GeV > 12. && w2 >= 4400.*GeV2 && efwd/GeV > 0.5 && inRange(thel,157.,176.);
// Low Q2 selection b
evcut[1] = enel/GeV > 12. && inRange(y,0.3,0.5);
// High Q2 selection a
evcut[2] = inRange(thel,12.,150.) && inRange(y,0.05,0.6) && w2 >= 4400.*GeV2 && efwd > 0.5;
// High Q2 selection b
evcut[3] = inRange(thel,12.,150.) && inRange(y,0.05,0.6) && inRange(w2,27110.*GeV2,45182.*GeV2);
// Veto if fails all cuts
/// @todo Can we use all()?
if (! (evcut[0] || evcut[1] || evcut[2] || evcut[3]) ) vetoEvent;
// Find the bins
int bin[4] = {-1,-1,-1,-1};
// For the low Q2 selection a)
if (q2 > 2.5*GeV && q2 <= 5.*GeV) {
if (x > 0.00005 && x <= 0.0001 ) bin[0] = 0;
if (x > 0.0001 && x <= 0.0002 ) bin[0] = 1;
if (x > 0.0002 && x <= 0.00035) bin[0] = 2;
if (x > 0.00035 && x <= 0.0010 ) bin[0] = 3;
}
else if (q2 > 5.*GeV && q2 <= 10.*GeV) {
if (x > 0.0001 && x <= 0.0002 ) bin[0] = 4;
if (x > 0.0002 && x <= 0.00035) bin[0] = 5;
if (x > 0.00035 && x <= 0.0007 ) bin[0] = 6;
if (x > 0.0007 && x <= 0.0020 ) bin[0] = 7;
}
else if (q2 > 10.*GeV && q2 <= 20.*GeV) {
if (x > 0.0002 && x <= 0.0005) bin[0] = 8;
if (x > 0.0005 && x <= 0.0008) bin[0] = 9;
if (x > 0.0008 && x <= 0.0015) bin[0] = 10;
if (x > 0.0015 && x <= 0.040 ) bin[0] = 11;
}
else if (q2 > 20.*GeV && q2 <= 50.*GeV) {
if (x > 0.0005 && x <= 0.0014) bin[0] = 12;
if (x > 0.0014 && x <= 0.0030) bin[0] = 13;
if (x > 0.0030 && x <= 0.0100) bin[0] = 14;
}
else if (q2 > 50.*GeV && q2 <= 100.*GeV) {
if (x >0.0008 && x <= 0.0030) bin[0] = 15;
if (x >0.0030 && x <= 0.0200) bin[0] = 16;
}
// check in one of the bins
evcut[0] &= bin[0] >= 0;
// For the low Q2 selection b)
if (q2 > 2.5*GeV && q2 <= 5. *GeV) bin[1] = 0;
if (q2 > 5. *GeV && q2 <= 10. *GeV) bin[1] = 1;
if (q2 > 10.*GeV && q2 <= 20. *GeV) bin[1] = 2;
if (q2 > 20.*GeV && q2 <= 50. *GeV) bin[1] = 3;
if (q2 > 50.*GeV && q2 <= 100.*GeV) bin[1] = 4;
// check in one of the bins
evcut[1] &= bin[1] >= 0;
// for the high Q2 selection a)
if (q2 > 100.*GeV && q2 <= 400.*GeV) {
if (x > 0.00251 && x <= 0.00631) bin[2] = 0;
if (x > 0.00631 && x <= 0.0158 ) bin[2] = 1;
if (x > 0.0158 && x <= 0.0398 ) bin[2] = 2;
}
else if (q2 > 400.*GeV && q2 <= 1100.*GeV) {
if (x > 0.00631 && x <= 0.0158 ) bin[2] = 3;
if (x > 0.0158 && x <= 0.0398 ) bin[2] = 4;
if (x > 0.0398 && x <= 1. ) bin[2] = 5;
}
else if (q2 > 1100.*GeV && q2 <= 100000.*GeV) {
if (x > 0. && x <= 1.) bin[2] = 6;
}
// check in one of the bins
evcut[2] &= bin[2] >= 0;
// for the high Q2 selection b)
if (q2 > 100.*GeV && q2 <= 220.*GeV) bin[3] = 0;
else if (q2 > 220.*GeV && q2 <= 400.*GeV) bin[3] = 1;
else if (q2 > 400. ) bin[3] = 2;
// check in one of*GeV the bins
evcut[3] &= bin[3] >= 0;
// Veto if fails all cuts after bin selection
/// @todo Can we use all()?
if (! (evcut[0] || evcut[1] || evcut[2] || evcut[3])) vetoEvent;
// Increment the count for normalisation
if (evcut[0]) _weightETLowQa [bin[0]]->fill();
if (evcut[1]) _weightETLowQb [bin[1]]->fill();
if (evcut[2]) _weightETHighQa[bin[2]]->fill();
if (evcut[3]) _weightETHighQb[bin[3]]->fill();
// Boost to hadronic CoM
const LorentzTransform hcmboost = dk.boostHCM();
// Loop over the particles
double etcent = 0;
double etfrag = 0;
foreach (const Particle& p, particles) {
// Boost momentum to CMS
const FourMomentum hcmMom = hcmboost.transform(p.momentum());
double et = fabs(hcmMom.Et());
double eta = hcmMom.eta();
// Averages in central and forward region
if (fabs(eta) < .5 ) etcent += et;
if (eta > 2 && eta <= 3.) etfrag += et;
// Histograms of Et flow
if (evcut[0]) _histETLowQa [bin[0]]->fill(eta, et);
if (evcut[1]) _histETLowQb [bin[1]]->fill(eta, et);
if (evcut[2]) _histETHighQa[bin[2]]->fill(eta, et);
if (evcut[3]) _histETHighQb[bin[3]]->fill(eta, et);
}
// Fill histograms for the average quantities
if (evcut[1] || evcut[3]) {
_histAverETCentral->fill(q2, etcent);
_histAverETFrag ->fill(q2, etfrag);
}
}
// Finalize
void finalize() {
// Normalization of the Et distributions
/// @todo Simplify by using normalize() instead? Are all these being normalized to area=1?
- for (size_t ix = 0; ix < 17; ++ix) if (_weightETLowQa[ix] != 0) scale(_histETLowQa[ix], 1/_weightETLowQa[ix]);
- for (size_t ix = 0; ix < 7; ++ix) if (_weightETHighQa[ix] != 0) scale(_histETHighQa[ix], 1/_weightETHighQa[ix]);
- for (size_t ix = 0; ix < 5; ++ix) if (_weightETLowQb[ix] != 0) scale(_histETLowQb[ix], 1/_weightETLowQb[ix]);
- for (size_t ix = 0; ix < 3; ++ix) if (_weightETHighQb[ix] != 0) scale(_histETHighQb[ix], 1/_weightETHighQb[ix]);
+ for (size_t ix = 0; ix < 17; ++ix) if (_weightETLowQa[ix]->val() != 0) scale(_histETLowQa[ix], 1/ *_weightETLowQa[ix]);
+ for (size_t ix = 0; ix < 7; ++ix) if (_weightETHighQa[ix]->val() != 0) scale(_histETHighQa[ix], 1/ *_weightETHighQa[ix]);
+ for (size_t ix = 0; ix < 5; ++ix) if (_weightETLowQb[ix]->val() != 0) scale(_histETLowQb[ix], 1/ *_weightETLowQb[ix]);
+ for (size_t ix = 0; ix < 3; ++ix) if (_weightETHighQb[ix]->val() != 0) scale(_histETHighQb[ix], 1/ *_weightETHighQb[ix]);
}
//@}
private:
/// @name Histograms
//@{
vector<Histo1DPtr> _histETLowQa;
vector<Histo1DPtr> _histETHighQa;
vector<Histo1DPtr> _histETLowQb;
vector<Histo1DPtr> _histETHighQb;
Profile1DPtr _histAverETCentral;
Profile1DPtr _histAverETFrag;
//@}
/// @name storage of weights for normalisation
//@{
array<CounterPtr,17> _weightETLowQa;
array<CounterPtr, 7> _weightETHighQa;
array<CounterPtr, 5> _weightETLowQb;
array<CounterPtr, 3> _weightETHighQb;
//@}
};
DECLARE_RIVET_PLUGIN(H1_2000_S4129130);
}
diff --git a/analyses/pluginLEP/ALEPH_1996_S3486095.cc b/analyses/pluginLEP/ALEPH_1996_S3486095.cc
--- a/analyses/pluginLEP/ALEPH_1996_S3486095.cc
+++ b/analyses/pluginLEP/ALEPH_1996_S3486095.cc
@@ -1,556 +1,555 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/Beam.hh"
#include "Rivet/Projections/Sphericity.hh"
#include "Rivet/Projections/Thrust.hh"
#include "Rivet/Projections/FastJets.hh"
#include "Rivet/Projections/ParisiTensor.hh"
#include "Rivet/Projections/Hemispheres.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/UnstableFinalState.hh"
namespace Rivet {
/// @brief ALEPH QCD study with event shapes and identified particles
/// @author Holger Schulz
class ALEPH_1996_S3486095 : public Analysis {
public:
/// Constructor
ALEPH_1996_S3486095()
: Analysis("ALEPH_1996_S3486095")
{}
/// @name Analysis methods
//@{
void init() {
// Set up projections
declare(Beam(), "Beams");
const ChargedFinalState cfs;
declare(cfs, "FS");
declare(UnstableFinalState(), "UFS");
declare(FastJets(cfs, FastJets::DURHAM, 0.7), "DurhamJets");
declare(Sphericity(cfs), "Sphericity");
declare(ParisiTensor(cfs), "Parisi");
const Thrust thrust(cfs);
declare(thrust, "Thrust");
declare(Hemispheres(thrust), "Hemispheres");
// Book histograms
book(_histSphericity ,1, 1, 1);
book(_histAplanarity ,2, 1, 1);
book(_hist1MinusT ,3, 1, 1);
book(_histTMinor ,4, 1, 1);
book(_histY3 ,5, 1, 1);
book(_histHeavyJetMass ,6, 1, 1);
book(_histCParam ,7, 1, 1);
book(_histOblateness ,8, 1, 1);
book(_histScaledMom ,9, 1, 1);
book(_histRapidityT ,10, 1, 1);
book(_histPtSIn ,11, 1, 1);
book(_histPtSOut ,12, 1, 1);
book(_histLogScaledMom ,17, 1, 1);
book(_histChMult ,18, 1, 1);
book(_histMeanChMult ,19, 1, 1);
book(_histMeanChMultRapt05,20, 1, 1);
book(_histMeanChMultRapt10,21, 1, 1);
book(_histMeanChMultRapt15,22, 1, 1);
book(_histMeanChMultRapt20,23, 1, 1);
// Particle spectra
book(_histMultiPiPlus ,25, 1, 1);
book(_histMultiKPlus ,26, 1, 1);
book(_histMultiP ,27, 1, 1);
book(_histMultiPhoton ,28, 1, 1);
book(_histMultiPi0 ,29, 1, 1);
book(_histMultiEta ,30, 1, 1);
book(_histMultiEtaPrime ,31, 1, 1);
book(_histMultiK0 ,32, 1, 1);
book(_histMultiLambda0 ,33, 1, 1);
book(_histMultiXiMinus ,34, 1, 1);
book(_histMultiSigma1385Plus ,35, 1, 1);
book(_histMultiXi1530_0 ,36, 1, 1);
book(_histMultiRho ,37, 1, 1);
book(_histMultiOmega782 ,38, 1, 1);
book(_histMultiKStar892_0 ,39, 1, 1);
book(_histMultiPhi ,40, 1, 1);
book(_histMultiKStar892Plus ,43, 1, 1);
// Mean multiplicities
book(_histMeanMultiPi0 ,44, 1, 2);
book(_histMeanMultiEta ,44, 1, 3);
book(_histMeanMultiEtaPrime ,44, 1, 4);
book(_histMeanMultiK0 ,44, 1, 5);
book(_histMeanMultiRho ,44, 1, 6);
book(_histMeanMultiOmega782 ,44, 1, 7);
book(_histMeanMultiPhi ,44, 1, 8);
book(_histMeanMultiKStar892Plus ,44, 1, 9);
book(_histMeanMultiKStar892_0 ,44, 1, 10);
book(_histMeanMultiLambda0 ,44, 1, 11);
book(_histMeanMultiSigma0 ,44, 1, 12);
book(_histMeanMultiXiMinus ,44, 1, 13);
book(_histMeanMultiSigma1385Plus ,44, 1, 14);
book(_histMeanMultiXi1530_0 ,44, 1, 15);
book(_histMeanMultiOmegaOmegaBar ,44, 1, 16);
book(_weightedTotalPartNum, "weightedTotalPartNum");
book(_weightedTotalNumPiPlus, "weightedTotalNumPiPlus");
book(_weightedTotalNumKPlus, "weightedTotalNumKPlus");
book(_weightedTotalNumP, "weightedTotalNumP");
book(_weightedTotalNumPhoton, "weightedTotalNumPhoton");
book(_weightedTotalNumPi0, "weightedTotalNumPi0");
book(_weightedTotalNumEta, "weightedTotalNumEta");
book(_weightedTotalNumEtaPrime, "weightedTotalNumEtaPrime");
book(_weightedTotalNumK0, "weightedTotalNumK0");
book(_weightedTotalNumLambda0, "weightedTotalNumLambda0");
book(_weightedTotalNumXiMinus, "weightedTotalNumXiMinus");
book(_weightedTotalNumSigma1385Plus, "weightedTotalNumSigma1385Plus");
book(_weightedTotalNumXi1530_0, "weightedTotalNumXi1530_0");
book(_weightedTotalNumRho, "weightedTotalNumRho");
book(_weightedTotalNumOmega782, "weightedTotalNumOmega782");
book(_weightedTotalNumKStar892_0, "weightedTotalNumKStar892_0");
book(_weightedTotalNumPhi, "weightedTotalNumPhi");
book(_weightedTotalNumKStar892Plus, "weightedTotalNumKStar892Plus");
book(_numChParticles, "numChParticles");
}
void analyze(const Event& e) {
// First, veto on leptonic events by requiring at least 4 charged FS particles
const FinalState& fs = apply<FinalState>(e, "FS");
const size_t numParticles = fs.particles().size();
// Even if we only generate hadronic events, we still need a cut on numCharged >= 2.
if (numParticles < 2) {
MSG_DEBUG("Failed leptonic event cut");
vetoEvent;
}
MSG_DEBUG("Passed leptonic event cut");
_weightedTotalPartNum->fill(numParticles);
// Get beams and average beam momentum
const ParticlePair& beams = apply<Beam>(e, "Beams").beams();
const double meanBeamMom = ( beams.first.p3().mod() +
beams.second.p3().mod() ) / 2.0;
MSG_DEBUG("Avg beam momentum = " << meanBeamMom);
// Thrusts
MSG_DEBUG("Calculating thrust");
const Thrust& thrust = apply<Thrust>(e, "Thrust");
_hist1MinusT->fill(1 - thrust.thrust());
_histTMinor->fill(thrust.thrustMinor());
_histOblateness->fill(thrust.oblateness());
// Jets
MSG_DEBUG("Calculating differential jet rate plots:");
const FastJets& durjet = apply<FastJets>(e, "DurhamJets");
if (durjet.clusterSeq()) {
double y3 = durjet.clusterSeq()->exclusive_ymerge_max(2);
if (y3>0.0) _histY3->fill(-1. * std::log(y3));
}
// Sphericities
MSG_DEBUG("Calculating sphericity");
const Sphericity& sphericity = apply<Sphericity>(e, "Sphericity");
_histSphericity->fill(sphericity.sphericity());
_histAplanarity->fill(sphericity.aplanarity());
// C param
MSG_DEBUG("Calculating Parisi params");
const ParisiTensor& parisi = apply<ParisiTensor>(e, "Parisi");
_histCParam->fill(parisi.C());
// Hemispheres
MSG_DEBUG("Calculating hemisphere variables");
const Hemispheres& hemi = apply<Hemispheres>(e, "Hemispheres");
_histHeavyJetMass->fill(hemi.scaledM2high());
// Iterate over all the charged final state particles.
double Evis = 0.0;
double rapt05 = 0.;
double rapt10 = 0.;
double rapt15 = 0.;
double rapt20 = 0.;
//int numChParticles = 0;
MSG_DEBUG("About to iterate over charged FS particles");
foreach (const Particle& p, fs.particles()) {
// Get momentum and energy of each particle.
const Vector3 mom3 = p.p3();
const double energy = p.E();
Evis += energy;
_numChParticles->fill();
// Scaled momenta.
const double mom = mom3.mod();
const double scaledMom = mom/meanBeamMom;
const double logInvScaledMom = -std::log(scaledMom);
_histLogScaledMom->fill(logInvScaledMom);
_histScaledMom->fill(scaledMom);
// Get momenta components w.r.t. thrust and sphericity.
const double momT = dot(thrust.thrustAxis(), mom3);
const double pTinS = dot(mom3, sphericity.sphericityMajorAxis());
const double pToutS = dot(mom3, sphericity.sphericityMinorAxis());
_histPtSIn->fill(fabs(pTinS/GeV));
_histPtSOut->fill(fabs(pToutS/GeV));
// Calculate rapidities w.r.t. thrust.
const double rapidityT = 0.5 * std::log((energy + momT) / (energy - momT));
_histRapidityT->fill(fabs(rapidityT));
if (std::fabs(rapidityT) <= 0.5) {
rapt05 += 1.0;
}
if (std::fabs(rapidityT) <= 1.0) {
rapt10 += 1.0;
}
if (std::fabs(rapidityT) <= 1.5) {
rapt15 += 1.0;
}
if (std::fabs(rapidityT) <= 2.0) {
rapt20 += 1.0;
}
}
_histChMult->fill(numParticles);
_histMeanChMultRapt05->fill(_histMeanChMultRapt05->bin(0).xMid(), rapt05);
_histMeanChMultRapt10->fill(_histMeanChMultRapt10->bin(0).xMid(), rapt10);
_histMeanChMultRapt15->fill(_histMeanChMultRapt15->bin(0).xMid(), rapt15);
_histMeanChMultRapt20->fill(_histMeanChMultRapt20->bin(0).xMid(), rapt20);
_histMeanChMult->fill(_histMeanChMult->bin(0).xMid(), numParticles);
//// Final state of unstable particles to get particle spectra
const UnstableFinalState& ufs = apply<UnstableFinalState>(e, "UFS");
for (Particles::const_iterator p = ufs.particles().begin(); p != ufs.particles().end(); ++p) {
const Vector3 mom3 = p->momentum().p3();
int id = abs(p->pid());
const double mom = mom3.mod();
const double energy = p->momentum().E();
const double scaledMom = mom/meanBeamMom;
const double scaledEnergy = energy/meanBeamMom; // meanBeamMom is approximately beam energy
switch (id) {
case 22:
_histMultiPhoton->fill(-1.*std::log(scaledMom));
_weightedTotalNumPhoton->fill();
break;
case -321:
case 321:
_weightedTotalNumKPlus->fill();
_histMultiKPlus->fill(scaledMom);
break;
case 211:
case -211:
_histMultiPiPlus->fill(scaledMom);
_weightedTotalNumPiPlus->fill();
break;
case 2212:
case -2212:
_histMultiP->fill(scaledMom);
_weightedTotalNumP->fill();
break;
case 111:
_histMultiPi0->fill(scaledMom);
_histMeanMultiPi0->fill(_histMeanMultiPi0->bin(0).xMid());
_weightedTotalNumPi0->fill();
break;
case 221:
if (scaledMom >= 0.1) {
_histMultiEta->fill(scaledEnergy);
_histMeanMultiEta->fill(_histMeanMultiEta->bin(0).xMid());
_weightedTotalNumEta->fill();
}
break;
case 331:
if (scaledMom >= 0.1) {
_histMultiEtaPrime->fill(scaledEnergy);
_histMeanMultiEtaPrime->fill(_histMeanMultiEtaPrime->bin(0).xMid());
_weightedTotalNumEtaPrime->fill();
}
break;
case 130: //klong
case 310: //kshort
_histMultiK0->fill(scaledMom);
_histMeanMultiK0->fill(_histMeanMultiK0->bin(0).xMid());
_weightedTotalNumK0->fill();
break;
case 113:
_histMultiRho->fill(scaledMom);
_histMeanMultiRho->fill(_histMeanMultiRho->bin(0).xMid());
_weightedTotalNumRho->fill();
break;
case 223:
_histMultiOmega782->fill(scaledMom);
_histMeanMultiOmega782->fill(_histMeanMultiOmega782->bin(0).xMid());
_weightedTotalNumOmega782->fill();
break;
case 333:
_histMultiPhi->fill(scaledMom);
_histMeanMultiPhi->fill(_histMeanMultiPhi->bin(0).xMid());
_weightedTotalNumPhi->fill();
break;
case 313:
case -313:
_histMultiKStar892_0->fill(scaledMom);
_histMeanMultiKStar892_0->fill(_histMeanMultiKStar892_0->bin(0).xMid());
_weightedTotalNumKStar892_0->fill();
break;
case 323:
case -323:
_histMultiKStar892Plus->fill(scaledEnergy);
_histMeanMultiKStar892Plus->fill(_histMeanMultiKStar892Plus->bin(0).xMid());
_weightedTotalNumKStar892Plus->fill();
break;
case 3122:
case -3122:
_histMultiLambda0->fill(scaledMom);
_histMeanMultiLambda0->fill(_histMeanMultiLambda0->bin(0).xMid());
_weightedTotalNumLambda0->fill();
break;
case 3212:
case -3212:
_histMeanMultiSigma0->fill(_histMeanMultiSigma0->bin(0).xMid());
break;
case 3312:
case -3312:
_histMultiXiMinus->fill(scaledEnergy);
_histMeanMultiXiMinus->fill(_histMeanMultiXiMinus->bin(0).xMid());
_weightedTotalNumXiMinus->fill();
break;
case 3114:
case -3114:
case 3224:
case -3224:
_histMultiSigma1385Plus->fill(scaledEnergy);
_histMeanMultiSigma1385Plus->fill(_histMeanMultiSigma1385Plus->bin(0).xMid());
_weightedTotalNumSigma1385Plus->fill();
break;
case 3324:
case -3324:
_histMultiXi1530_0->fill(scaledEnergy);
_histMeanMultiXi1530_0->fill(_histMeanMultiXi1530_0->bin(0).xMid());
_weightedTotalNumXi1530_0->fill();
break;
case 3334:
_histMeanMultiOmegaOmegaBar->fill(_histMeanMultiOmegaOmegaBar->bin(0).xMid());
break;
}
}
}
/// Finalize
void finalize() {
// Normalize inclusive single particle distributions to the average number
// of charged particles per event.
- const double avgNumParts = double(_weightedTotalPartNum) / sumOfWeights();
+ const double avgNumParts = dbl(*_weightedTotalPartNum) / sumOfWeights();
normalize(_histPtSIn, avgNumParts);
normalize(_histPtSOut, avgNumParts);
normalize(_histRapidityT, avgNumParts);
normalize(_histY3);
normalize(_histLogScaledMom, avgNumParts);
normalize(_histScaledMom, avgNumParts);
// particle spectra
scale(_histMultiPiPlus ,1./sumOfWeights());
scale(_histMultiKPlus ,1./sumOfWeights());
scale(_histMultiP ,1./sumOfWeights());
scale(_histMultiPhoton ,1./sumOfWeights());
scale(_histMultiPi0 ,1./sumOfWeights());
scale(_histMultiEta ,1./sumOfWeights());
scale(_histMultiEtaPrime ,1./sumOfWeights());
scale(_histMultiK0 ,1./sumOfWeights());
scale(_histMultiLambda0 ,1./sumOfWeights());
scale(_histMultiXiMinus ,1./sumOfWeights());
scale(_histMultiSigma1385Plus ,1./sumOfWeights());
scale(_histMultiXi1530_0 ,1./sumOfWeights());
scale(_histMultiRho ,1./sumOfWeights());
scale(_histMultiOmega782 ,1./sumOfWeights());
scale(_histMultiKStar892_0 ,1./sumOfWeights());
scale(_histMultiPhi ,1./sumOfWeights());
scale(_histMultiKStar892Plus ,1./sumOfWeights());
- //normalize(_histMultiPiPlus ,_weightedTotalNumPiPlus / sumOfWeights());
- //normalize(_histMultiKPlus ,_weightedTotalNumKPlus/sumOfWeights());
- //normalize(_histMultiP ,_weightedTotalNumP/sumOfWeights());
- //normalize(_histMultiPhoton ,_weightedTotalNumPhoton/sumOfWeights());
- //normalize(_histMultiPi0 ,_weightedTotalNumPi0/sumOfWeights());
- //normalize(_histMultiEta ,_weightedTotalNumEta/sumOfWeights());
- //normalize(_histMultiEtaPrime ,_weightedTotalNumEtaPrime/sumOfWeights());
- //normalize(_histMultiK0 ,_weightedTotalNumK0/sumOfWeights());
- //normalize(_histMultiLambda0 ,_weightedTotalNumLambda0/sumOfWeights());
- //normalize(_histMultiXiMinus ,_weightedTotalNumXiMinus/sumOfWeights());
- //normalize(_histMultiSigma1385Plus ,_weightedTotalNumSigma1385Plus/sumOfWeights());
- //normalize(_histMultiXi1530_0 ,_weightedTotalNumXi1530_0 /sumOfWeights());
- //normalize(_histMultiRho ,_weightedTotalNumRho/sumOfWeights());
- //normalize(_histMultiOmegaMinus ,_weightedTotalNumOmegaMinus/sumOfWeights());
- //normalize(_histMultiKStar892_0 ,_weightedTotalNumKStar892_0/sumOfWeights());
- //normalize(_histMultiPhi ,_weightedTotalNumPhi/sumOfWeights());
-
- //normalize(_histMultiKStar892Plus ,_weightedTotalNumKStar892Plus/sumOfWeights());
+ normalize(_histMultiPiPlus ,dbl(*_weightedTotalNumPiPlus)/sumOfWeights());
+ normalize(_histMultiKPlus ,dbl(*_weightedTotalNumKPlus)/sumOfWeights());
+ normalize(_histMultiP ,dbl(*_weightedTotalNumP)/sumOfWeights());
+ normalize(_histMultiPhoton ,dbl(*_weightedTotalNumPhoton)/sumOfWeights());
+ normalize(_histMultiPi0 ,dbl(*_weightedTotalNumPi0)/sumOfWeights());
+ normalize(_histMultiEta ,dbl(*_weightedTotalNumEta)/sumOfWeights());
+ normalize(_histMultiEtaPrime ,dbl(*_weightedTotalNumEtaPrime)/sumOfWeights());
+ normalize(_histMultiK0 ,dbl(*_weightedTotalNumK0)/sumOfWeights());
+ normalize(_histMultiLambda0 ,dbl(*_weightedTotalNumLambda0)/sumOfWeights());
+ normalize(_histMultiXiMinus ,dbl(*_weightedTotalNumXiMinus)/sumOfWeights());
+ normalize(_histMultiSigma1385Plus ,dbl(*_weightedTotalNumSigma1385Plus)/sumOfWeights());
+ normalize(_histMultiXi1530_0 ,dbl(*_weightedTotalNumXi1530_0 )/sumOfWeights());
+ normalize(_histMultiRho ,dbl(*_weightedTotalNumRho)/sumOfWeights());
+ normalize(_histMultiOmega782 ,dbl(*_weightedTotalNumOmega782)/sumOfWeights());
+ normalize(_histMultiKStar892_0 ,dbl(*_weightedTotalNumKStar892_0)/sumOfWeights());
+ normalize(_histMultiPhi ,dbl(*_weightedTotalNumPhi)/sumOfWeights());
+ normalize(_histMultiKStar892Plus ,dbl(*_weightedTotalNumKStar892Plus)/sumOfWeights());
// event shape
normalize(_hist1MinusT);
normalize(_histTMinor);
normalize(_histOblateness);
normalize(_histSphericity);
normalize(_histAplanarity);
normalize(_histHeavyJetMass);
normalize(_histCParam);
// mean multiplicities
scale(_histChMult , 2.0/sumOfWeights()); // taking into account the binwidth of 2
scale(_histMeanChMult , 1.0/sumOfWeights());
scale(_histMeanChMultRapt05 , 1.0/sumOfWeights());
scale(_histMeanChMultRapt10 , 1.0/sumOfWeights());
scale(_histMeanChMultRapt15 , 1.0/sumOfWeights());
scale(_histMeanChMultRapt20 , 1.0/sumOfWeights());
scale(_histMeanMultiPi0 , 1.0/sumOfWeights());
scale(_histMeanMultiEta , 1.0/sumOfWeights());
scale(_histMeanMultiEtaPrime , 1.0/sumOfWeights());
scale(_histMeanMultiK0 , 1.0/sumOfWeights());
scale(_histMeanMultiRho , 1.0/sumOfWeights());
scale(_histMeanMultiOmega782 , 1.0/sumOfWeights());
scale(_histMeanMultiPhi , 1.0/sumOfWeights());
scale(_histMeanMultiKStar892Plus , 1.0/sumOfWeights());
scale(_histMeanMultiKStar892_0 , 1.0/sumOfWeights());
scale(_histMeanMultiLambda0 , 1.0/sumOfWeights());
scale(_histMeanMultiSigma0 , 1.0/sumOfWeights());
scale(_histMeanMultiXiMinus , 1.0/sumOfWeights());
scale(_histMeanMultiSigma1385Plus, 1.0/sumOfWeights());
scale(_histMeanMultiXi1530_0 , 1.0/sumOfWeights());
scale(_histMeanMultiOmegaOmegaBar, 1.0/sumOfWeights());
}
//@}
private:
/// Store the weighted sums of numbers of charged / charged+neutral
/// particles - used to calculate average number of particles for the
/// inclusive single particle distributions' normalisations.
CounterPtr _weightedTotalPartNum;
CounterPtr _weightedTotalNumPiPlus;
CounterPtr _weightedTotalNumKPlus;
CounterPtr _weightedTotalNumP;
CounterPtr _weightedTotalNumPhoton;
CounterPtr _weightedTotalNumPi0;
CounterPtr _weightedTotalNumEta;
CounterPtr _weightedTotalNumEtaPrime;
CounterPtr _weightedTotalNumK0;
CounterPtr _weightedTotalNumLambda0;
CounterPtr _weightedTotalNumXiMinus;
CounterPtr _weightedTotalNumSigma1385Plus;
CounterPtr _weightedTotalNumXi1530_0;
CounterPtr _weightedTotalNumRho;
CounterPtr _weightedTotalNumOmega782;
CounterPtr _weightedTotalNumKStar892_0;
CounterPtr _weightedTotalNumPhi;
CounterPtr _weightedTotalNumKStar892Plus;
CounterPtr _numChParticles;
/// @name Histograms
//@{
Histo1DPtr _histSphericity;
Histo1DPtr _histAplanarity;
Histo1DPtr _hist1MinusT;
Histo1DPtr _histTMinor;
Histo1DPtr _histY3;
Histo1DPtr _histHeavyJetMass;
Histo1DPtr _histCParam;
Histo1DPtr _histOblateness;
Histo1DPtr _histScaledMom;
Histo1DPtr _histRapidityT;
Histo1DPtr _histPtSIn;
Histo1DPtr _histPtSOut;
Histo1DPtr _histJetRate2Durham;
Histo1DPtr _histJetRate3Durham;
Histo1DPtr _histJetRate4Durham;
Histo1DPtr _histJetRate5Durham;
Histo1DPtr _histLogScaledMom;
Histo1DPtr _histChMult;
Histo1DPtr _histMultiPiPlus;
Histo1DPtr _histMultiKPlus;
Histo1DPtr _histMultiP;
Histo1DPtr _histMultiPhoton;
Histo1DPtr _histMultiPi0;
Histo1DPtr _histMultiEta;
Histo1DPtr _histMultiEtaPrime;
Histo1DPtr _histMultiK0;
Histo1DPtr _histMultiLambda0;
Histo1DPtr _histMultiXiMinus;
Histo1DPtr _histMultiSigma1385Plus;
Histo1DPtr _histMultiXi1530_0;
Histo1DPtr _histMultiRho;
Histo1DPtr _histMultiOmega782;
Histo1DPtr _histMultiKStar892_0;
Histo1DPtr _histMultiPhi;
Histo1DPtr _histMultiKStar892Plus;
// mean multiplicities
Histo1DPtr _histMeanChMult;
Histo1DPtr _histMeanChMultRapt05;
Histo1DPtr _histMeanChMultRapt10;
Histo1DPtr _histMeanChMultRapt15;
Histo1DPtr _histMeanChMultRapt20;
Histo1DPtr _histMeanMultiPi0;
Histo1DPtr _histMeanMultiEta;
Histo1DPtr _histMeanMultiEtaPrime;
Histo1DPtr _histMeanMultiK0;
Histo1DPtr _histMeanMultiRho;
Histo1DPtr _histMeanMultiOmega782;
Histo1DPtr _histMeanMultiPhi;
Histo1DPtr _histMeanMultiKStar892Plus;
Histo1DPtr _histMeanMultiKStar892_0;
Histo1DPtr _histMeanMultiLambda0;
Histo1DPtr _histMeanMultiSigma0;
Histo1DPtr _histMeanMultiXiMinus;
Histo1DPtr _histMeanMultiSigma1385Plus;
Histo1DPtr _histMeanMultiXi1530_0;
Histo1DPtr _histMeanMultiOmegaOmegaBar;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(ALEPH_1996_S3486095);
}
diff --git a/analyses/pluginLEP/ALEPH_2004_S5765862.cc b/analyses/pluginLEP/ALEPH_2004_S5765862.cc
--- a/analyses/pluginLEP/ALEPH_2004_S5765862.cc
+++ b/analyses/pluginLEP/ALEPH_2004_S5765862.cc
@@ -1,332 +1,332 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/FastJets.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/Thrust.hh"
#include "Rivet/Projections/Sphericity.hh"
#include "Rivet/Projections/ParisiTensor.hh"
#include "Rivet/Projections/Hemispheres.hh"
#include "Rivet/Projections/Beam.hh"
namespace Rivet {
/// @brief ALEPH jet rates and event shapes at LEP 1 and 2
class ALEPH_2004_S5765862 : public Analysis {
public:
ALEPH_2004_S5765862()
: Analysis("ALEPH_2004_S5765862") , _initialisedJets(false),
_initialisedSpectra(false)
{
}
public:
void init() {
_initialisedJets = true;
_initialisedSpectra = true;
// TODO: According to the paper they seem to discard neutral particles
// between 1 and 2 GeV. That correction is included in the systematic
// uncertainties and overly complicated to program, so we ignore it.
const FinalState fs;
declare(fs, "FS");
FastJets durhamjets(fs, FastJets::DURHAM, 0.7);
durhamjets.useInvisibles(true);
declare(durhamjets, "DurhamJets");
const Thrust thrust(fs);
declare(thrust, "Thrust");
declare(Sphericity(fs), "Sphericity");
declare(ParisiTensor(fs), "Parisi");
declare(Hemispheres(thrust), "Hemispheres");
const ChargedFinalState cfs;
declare(Beam(), "Beams");
declare(cfs, "CFS");
// Histos
// offset for the event shapes and jets
int offset = 0;
switch (int(sqrtS()/GeV + 0.5)) {
case 91: offset = 0; break;
case 133: offset = 1; break;
case 161: offset = 2; break;
case 172: offset = 3; break;
case 183: offset = 4; break;
case 189: offset = 5; break;
case 200: offset = 6; break;
case 206: offset = 7; break;
default:
_initialisedJets = false;
}
// event shapes
if(_initialisedJets) {
book(_h_thrust ,offset+54, 1, 1);
book(_h_heavyjetmass ,offset+62, 1, 1);
book(_h_totaljetbroadening ,offset+70, 1, 1);
book(_h_widejetbroadening ,offset+78, 1, 1);
book(_h_cparameter ,offset+86, 1, 1);
book(_h_thrustmajor ,offset+94, 1, 1);
book(_h_thrustminor ,offset+102, 1, 1);
book(_h_jetmassdifference ,offset+110, 1, 1);
book(_h_aplanarity ,offset+118, 1, 1);
if ( offset != 0 )
book(_h_planarity, offset+125, 1, 1);
book(_h_oblateness ,offset+133, 1, 1);
book(_h_sphericity ,offset+141, 1, 1);
// Durham n->m jet resolutions
book(_h_y_Durham[0] ,offset+149, 1, 1); // y12 d149 ... d156
book(_h_y_Durham[1] ,offset+157, 1, 1); // y23 d157 ... d164
if (offset<6) { // there is no y34, y45 and y56 for 200 gev
book(_h_y_Durham[2] ,offset+165, 1, 1); // y34 d165 ... d172, but not 171
book(_h_y_Durham[3] ,offset+173, 1, 1); // y45 d173 ... d179
book(_h_y_Durham[4] ,offset+180, 1, 1); // y56 d180 ... d186
}
else if (offset==6) {
_h_y_Durham[2] = Histo1DPtr();
_h_y_Durham[3] = Histo1DPtr();
_h_y_Durham[4] = Histo1DPtr();
}
else if (offset==7) {
book(_h_y_Durham[2] ,172, 1, 1);
book(_h_y_Durham[3] ,179, 1, 1);
book(_h_y_Durham[4] ,186, 1, 1);
}
// Durham n-jet fractions
book(_h_R_Durham[0] ,offset+187, 1, 1); // R1 d187 ... d194
book(_h_R_Durham[1] ,offset+195, 1, 1); // R2 d195 ... d202
book(_h_R_Durham[2] ,offset+203, 1, 1); // R3 d203 ... d210
book(_h_R_Durham[3] ,offset+211, 1, 1); // R4 d211 ... d218
book(_h_R_Durham[4] ,offset+219, 1, 1); // R5 d219 ... d226
book(_h_R_Durham[5] ,offset+227, 1, 1); // R>=6 d227 ... d234
}
// offset for the charged particle distributions
offset = 0;
switch (int(sqrtS()/GeV + 0.5)) {
case 133: offset = 0; break;
case 161: offset = 1; break;
case 172: offset = 2; break;
case 183: offset = 3; break;
case 189: offset = 4; break;
case 196: offset = 5; break;
case 200: offset = 6; break;
case 206: offset = 7; break;
default:
_initialisedSpectra=false;
}
if (_initialisedSpectra) {
book(_h_xp , 2+offset, 1, 1);
book(_h_xi ,11+offset, 1, 1);
book(_h_xe ,19+offset, 1, 1);
book(_h_pTin ,27+offset, 1, 1);
if (offset == 7)
book(_h_pTout, 35, 1, 1);
book(_h_rapidityT ,36+offset, 1, 1);
book(_h_rapidityS ,44+offset, 1, 1);
}
book(_weightedTotalChargedPartNum, "weightedTotalChargedPartNum");
if (!_initialisedSpectra && !_initialisedJets) {
MSG_WARNING("CoM energy of events sqrt(s) = " << sqrtS()/GeV
<< " doesn't match any available analysis energy .");
}
book(mult, 1, 1, 1);
}
void analyze(const Event& e) {
const Thrust& thrust = apply<Thrust>(e, "Thrust");
const Sphericity& sphericity = apply<Sphericity>(e, "Sphericity");
if(_initialisedJets) {
bool LEP1 = fuzzyEquals(sqrtS(),91.2*GeV,0.01);
// event shapes
double thr = LEP1 ? thrust.thrust() : 1.0 - thrust.thrust();
_h_thrust->fill(thr);
_h_thrustmajor->fill(thrust.thrustMajor());
if(LEP1)
_h_thrustminor->fill(log(thrust.thrustMinor()));
else
_h_thrustminor->fill(thrust.thrustMinor());
_h_oblateness->fill(thrust.oblateness());
const Hemispheres& hemi = apply<Hemispheres>(e, "Hemispheres");
_h_heavyjetmass->fill(hemi.scaledM2high());
_h_jetmassdifference->fill(hemi.scaledM2diff());
_h_totaljetbroadening->fill(hemi.Bsum());
_h_widejetbroadening->fill(hemi.Bmax());
const ParisiTensor& parisi = apply<ParisiTensor>(e, "Parisi");
_h_cparameter->fill(parisi.C());
_h_aplanarity->fill(sphericity.aplanarity());
if(_h_planarity)
_h_planarity->fill(sphericity.planarity());
_h_sphericity->fill(sphericity.sphericity());
// Jet rates
const FastJets& durjet = apply<FastJets>(e, "DurhamJets");
double log10e = log10(exp(1.));
if (durjet.clusterSeq()) {
double logynm1=0.;
double logyn;
for (size_t i=0; i<5; ++i) {
double yn = durjet.clusterSeq()->exclusive_ymerge_max(i+1);
if (yn<=0.0) continue;
logyn = -log(yn);
if (_h_y_Durham[i]) {
_h_y_Durham[i]->fill(logyn);
}
if(!LEP1) logyn *= log10e;
for (size_t j = 0; j < _h_R_Durham[i]->numBins(); ++j) {
double val = _h_R_Durham[i]->bin(j).xMin();
double width = _h_R_Durham[i]->bin(j).xWidth();
if(-val<=logynm1) break;
if(-val<logyn) {
_h_R_Durham[i]->fill(val+0.5*width, width);
}
}
logynm1 = logyn;
}
for (size_t j = 0; j < _h_R_Durham[5]->numBins(); ++j) {
double val = _h_R_Durham[5]->bin(j).xMin();
double width = _h_R_Durham[5]->bin(j).xWidth();
if(-val<=logynm1) break;
_h_R_Durham[5]->fill(val+0.5*width, width);
}
}
if( !_initialisedSpectra) {
const ChargedFinalState& cfs = apply<ChargedFinalState>(e, "CFS");
const size_t numParticles = cfs.particles().size();
_weightedTotalChargedPartNum->fill(numParticles);
}
}
// charged particle distributions
if(_initialisedSpectra) {
const ChargedFinalState& cfs = apply<ChargedFinalState>(e, "CFS");
const size_t numParticles = cfs.particles().size();
_weightedTotalChargedPartNum->fill(numParticles);
const ParticlePair& beams = apply<Beam>(e, "Beams").beams();
const double meanBeamMom = ( beams.first.p3().mod() +
beams.second.p3().mod() ) / 2.0;
foreach (const Particle& p, cfs.particles()) {
const double xp = p.p3().mod()/meanBeamMom;
_h_xp->fill(xp );
const double logxp = -std::log(xp);
_h_xi->fill(logxp);
const double xe = p.E()/meanBeamMom;
_h_xe->fill(xe );
const double pTinT = dot(p.p3(), thrust.thrustMajorAxis());
const double pToutT = dot(p.p3(), thrust.thrustMinorAxis());
_h_pTin->fill(fabs(pTinT/GeV));
if(_h_pTout) _h_pTout->fill(fabs(pToutT/GeV));
const double momT = dot(thrust.thrustAxis() ,p.p3());
const double rapidityT = 0.5 * std::log((p.E() + momT) /
(p.E() - momT));
_h_rapidityT->fill(fabs(rapidityT));
const double momS = dot(sphericity.sphericityAxis(),p.p3());
const double rapidityS = 0.5 * std::log((p.E() + momS) /
(p.E() - momS));
_h_rapidityS->fill(fabs(rapidityS));
}
}
}
void finalize() {
if(!_initialisedJets && !_initialisedSpectra) return;
if (_initialisedJets) {
normalize(_h_thrust);
normalize(_h_heavyjetmass);
normalize(_h_totaljetbroadening);
normalize(_h_widejetbroadening);
normalize(_h_cparameter);
normalize(_h_thrustmajor);
normalize(_h_thrustminor);
normalize(_h_jetmassdifference);
normalize(_h_aplanarity);
if(_h_planarity) normalize(_h_planarity);
normalize(_h_oblateness);
normalize(_h_sphericity);
for (size_t n=0; n<6; ++n) {
scale(_h_R_Durham[n], 1./sumOfWeights());
}
for (size_t n = 0; n < 5; ++n) {
if (_h_y_Durham[n]) {
scale(_h_y_Durham[n], 1.0/sumOfWeights());
}
}
}
Histo1D temphisto(refData(1, 1, 1));
- const double avgNumParts = double(_weightedTotalChargedPartNum) / sumOfWeights();
+ const double avgNumParts = dbl(*_weightedTotalChargedPartNum) / sumOfWeights();
for (size_t b = 0; b < temphisto.numBins(); b++) {
const double x = temphisto.bin(b).xMid();
const double ex = temphisto.bin(b).xWidth()/2.;
if (inRange(sqrtS()/GeV, x-ex, x+ex)) {
mult->addPoint(x, avgNumParts, ex, 0.);
}
}
if (_initialisedSpectra) {
normalize(_h_xp, avgNumParts);
normalize(_h_xi, avgNumParts);
normalize(_h_xe, avgNumParts);
normalize(_h_pTin , avgNumParts);
if (_h_pTout) normalize(_h_pTout, avgNumParts);
normalize(_h_rapidityT, avgNumParts);
normalize(_h_rapidityS, avgNumParts);
}
}
private:
bool _initialisedJets;
bool _initialisedSpectra;
Scatter2DPtr mult;
Histo1DPtr _h_xp;
Histo1DPtr _h_xi;
Histo1DPtr _h_xe;
Histo1DPtr _h_pTin;
Histo1DPtr _h_pTout;
Histo1DPtr _h_rapidityT;
Histo1DPtr _h_rapidityS;
Histo1DPtr _h_thrust;
Histo1DPtr _h_heavyjetmass;
Histo1DPtr _h_totaljetbroadening;
Histo1DPtr _h_widejetbroadening;
Histo1DPtr _h_cparameter;
Histo1DPtr _h_thrustmajor;
Histo1DPtr _h_thrustminor;
Histo1DPtr _h_jetmassdifference;
Histo1DPtr _h_aplanarity;
Histo1DPtr _h_planarity;
Histo1DPtr _h_oblateness;
Histo1DPtr _h_sphericity;
Histo1DPtr _h_R_Durham[6];
Histo1DPtr _h_y_Durham[5];
CounterPtr _weightedTotalChargedPartNum;
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(ALEPH_2004_S5765862);
}
diff --git a/analyses/pluginLEP/DELPHI_1995_S3137023.cc b/analyses/pluginLEP/DELPHI_1995_S3137023.cc
--- a/analyses/pluginLEP/DELPHI_1995_S3137023.cc
+++ b/analyses/pluginLEP/DELPHI_1995_S3137023.cc
@@ -1,104 +1,104 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/Beam.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/UnstableFinalState.hh"
namespace Rivet {
/// @brief DELPHI strange baryon paper
/// @author Hendrik Hoeth
class DELPHI_1995_S3137023 : public Analysis {
public:
/// Constructor
DELPHI_1995_S3137023()
: Analysis("DELPHI_1995_S3137023")
{}
/// @name Analysis methods
//@{
void init() {
declare(Beam(), "Beams");
declare(ChargedFinalState(), "FS");
declare(UnstableFinalState(), "UFS");
book(_histXpXiMinus ,2, 1, 1);
book(_histXpSigma1385Plus ,3, 1, 1);
book(_weightedTotalNumXiMinus, "weightedTotalNumXiMinus");
book(_weightedTotalNumSigma1385Plus, "weightedTotalNumSigma1385Plus");
}
void analyze(const Event& e) {
// First, veto on leptonic events by requiring at least 4 charged FS particles
const FinalState& fs = apply<FinalState>(e, "FS");
const size_t numParticles = fs.particles().size();
// Even if we only generate hadronic events, we still need a cut on numCharged >= 2.
if (numParticles < 2) {
MSG_DEBUG("Failed leptonic event cut");
vetoEvent;
}
MSG_DEBUG("Passed leptonic event cut");
// Get beams and average beam momentum
const ParticlePair& beams = apply<Beam>(e, "Beams").beams();
const double meanBeamMom = ( beams.first.p3().mod() +
beams.second.p3().mod() ) / 2.0;
MSG_DEBUG("Avg beam momentum = " << meanBeamMom);
// Final state of unstable particles to get particle spectra
const UnstableFinalState& ufs = apply<UnstableFinalState>(e, "UFS");
foreach (const Particle& p, ufs.particles()) {
const int id = p.abspid();
switch (id) {
case 3312:
_histXpXiMinus->fill(p.p3().mod()/meanBeamMom);
_weightedTotalNumXiMinus->fill();
break;
case 3114: case 3224:
_histXpSigma1385Plus->fill(p.p3().mod()/meanBeamMom);
_weightedTotalNumSigma1385Plus->fill();
break;
}
}
}
/// Finalize
void finalize() {
- normalize(_histXpXiMinus , double(_weightedTotalNumXiMinus)/sumOfWeights());
- normalize(_histXpSigma1385Plus , double(_weightedTotalNumSigma1385Plus)/sumOfWeights());
+ normalize(_histXpXiMinus , dbl(*_weightedTotalNumXiMinus)/sumOfWeights());
+ normalize(_histXpSigma1385Plus , dbl(*_weightedTotalNumSigma1385Plus)/sumOfWeights());
}
//@}
private:
/// Store the weighted sums of numbers of charged / charged+neutral
/// particles - used to calculate average number of particles for the
/// inclusive single particle distributions' normalisations.
CounterPtr _weightedTotalNumXiMinus;
CounterPtr _weightedTotalNumSigma1385Plus;
Histo1DPtr _histXpXiMinus;
Histo1DPtr _histXpSigma1385Plus;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(DELPHI_1995_S3137023);
}
diff --git a/analyses/pluginLEP/DELPHI_1996_S3430090.cc b/analyses/pluginLEP/DELPHI_1996_S3430090.cc
--- a/analyses/pluginLEP/DELPHI_1996_S3430090.cc
+++ b/analyses/pluginLEP/DELPHI_1996_S3430090.cc
@@ -1,553 +1,553 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/Beam.hh"
#include "Rivet/Projections/Sphericity.hh"
#include "Rivet/Projections/Thrust.hh"
#include "Rivet/Projections/FastJets.hh"
#include "Rivet/Projections/ParisiTensor.hh"
#include "Rivet/Projections/Hemispheres.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/UnstableFinalState.hh"
namespace Rivet {
/**
* @brief DELPHI event shapes and identified particle spectra
* @author Andy Buckley
* @author Hendrik Hoeth
*
* This is the paper which was used for the original PROFESSOR MC tuning
* study. It studies a wide range of e+ e- event shape variables, differential
* jet rates in the Durham and JADE schemes, and incorporates identified
* particle spectra, from other LEP analyses.
*
* @par Run conditions
*
* @arg LEP1 beam energy: \f$ \sqrt{s} = \f$ 91.2 GeV
* @arg Run with generic QCD events.
* @arg No \f$ p_\perp^\text{min} \f$ cutoff is required
*/
class DELPHI_1996_S3430090 : public Analysis {
public:
/// Constructor
DELPHI_1996_S3430090()
: Analysis("DELPHI_1996_S3430090")
{ }
/// @name Analysis methods
//@{
void init() {
declare(Beam(), "Beams");
// Don't try to introduce a pT or eta cut here. It's all corrected
// back. (See Section 2 of the paper.)
const ChargedFinalState cfs;
declare(cfs, "FS");
declare(UnstableFinalState(), "UFS");
declare(FastJets(cfs, FastJets::JADE, 0.7), "JadeJets");
declare(FastJets(cfs, FastJets::DURHAM, 0.7), "DurhamJets");
declare(Sphericity(cfs), "Sphericity");
declare(ParisiTensor(cfs), "Parisi");
const Thrust thrust(cfs);
declare(thrust, "Thrust");
declare(Hemispheres(thrust), "Hemispheres");
book(_histPtTIn, 1, 1, 1);
book(_histPtTOut,2, 1, 1);
book(_histPtSIn, 3, 1, 1);
book(_histPtSOut,4, 1, 1);
book(_histRapidityT, 5, 1, 1);
book(_histRapidityS, 6, 1, 1);
book(_histScaledMom, 7, 1, 1);
book(_histLogScaledMom, 8, 1, 1);
book(_histPtTOutVsXp ,9, 1, 1);
book(_histPtVsXp ,10, 1, 1);
book(_hist1MinusT, 11, 1, 1);
book(_histTMajor, 12, 1, 1);
book(_histTMinor, 13, 1, 1);
book(_histOblateness, 14, 1, 1);
book(_histSphericity, 15, 1, 1);
book(_histAplanarity, 16, 1, 1);
book(_histPlanarity, 17, 1, 1);
book(_histCParam, 18, 1, 1);
book(_histDParam, 19, 1, 1);
book(_histHemiMassH, 20, 1, 1);
book(_histHemiMassL, 21, 1, 1);
book(_histHemiMassD, 22, 1, 1);
book(_histHemiBroadW, 23, 1, 1);
book(_histHemiBroadN, 24, 1, 1);
book(_histHemiBroadT, 25, 1, 1);
book(_histHemiBroadD, 26, 1, 1);
// Binned in y_cut
book(_histDiffRate2Durham, 27, 1, 1);
book(_histDiffRate2Jade, 28, 1, 1);
book(_histDiffRate3Durham, 29, 1, 1);
book(_histDiffRate3Jade, 30, 1, 1);
book(_histDiffRate4Durham, 31, 1, 1);
book(_histDiffRate4Jade, 32, 1, 1);
// Binned in cos(chi)
book(_histEEC, 33, 1, 1);
book(_histAEEC, 34, 1, 1);
book(_histMultiCharged, 35, 1, 1);
book(_histMultiPiPlus, 36, 1, 1);
book(_histMultiPi0, 36, 1, 2);
book(_histMultiKPlus, 36, 1, 3);
book(_histMultiK0, 36, 1, 4);
book(_histMultiEta, 36, 1, 5);
book(_histMultiEtaPrime, 36, 1, 6);
book(_histMultiDPlus, 36, 1, 7);
book(_histMultiD0, 36, 1, 8);
book(_histMultiBPlus0, 36, 1, 9);
book(_histMultiF0, 37, 1, 1);
book(_histMultiRho, 38, 1, 1);
book(_histMultiKStar892Plus, 38, 1, 2);
book(_histMultiKStar892_0, 38, 1, 3);
book(_histMultiPhi, 38, 1, 4);
book(_histMultiDStar2010Plus, 38, 1, 5);
book(_histMultiF2, 39, 1, 1);
book(_histMultiK2Star1430_0, 39, 1, 2);
book(_histMultiP, 40, 1, 1);
book(_histMultiLambda0, 40, 1, 2);
book(_histMultiXiMinus, 40, 1, 3);
book(_histMultiOmegaMinus, 40, 1, 4);
book(_histMultiDeltaPlusPlus, 40, 1, 5);
book(_histMultiSigma1385Plus, 40, 1, 6);
book(_histMultiXi1530_0, 40, 1, 7);
book(_histMultiLambdaB0, 40, 1, 8);
book(_weightedTotalPartNum,"TotalPartNum");
book(_passedCutWeightSum, "passedCutWeightSum");
book(_passedCut3WeightSum, "passedCut3WeightSum");
book(_passedCut4WeightSum, "passedCut4WeightSum");
book(_passedCut5WeightSum, "passedCut5WeightSum");
}
void analyze(const Event& e) {
// First, veto on leptonic events by requiring at least 4 charged FS particles
const FinalState& fs = apply<FinalState>(e, "FS");
const size_t numParticles = fs.particles().size();
// Even if we only generate hadronic events, we still need a cut on numCharged >= 2.
if (numParticles < 2) {
MSG_DEBUG("Failed leptonic event cut");
vetoEvent;
}
MSG_DEBUG("Passed leptonic event cut");
_passedCutWeightSum->fill();
_weightedTotalPartNum->fill(numParticles);
// Get beams and average beam momentum
const ParticlePair& beams = apply<Beam>(e, "Beams").beams();
const double meanBeamMom = ( beams.first.p3().mod() +
beams.second.p3().mod() ) / 2.0;
MSG_DEBUG("Avg beam momentum = " << meanBeamMom);
// Thrusts
MSG_DEBUG("Calculating thrust");
const Thrust& thrust = apply<Thrust>(e, "Thrust");
_hist1MinusT->fill(1 - thrust.thrust());
_histTMajor->fill(thrust.thrustMajor());
_histTMinor->fill(thrust.thrustMinor());
_histOblateness->fill(thrust.oblateness());
// Jets
const FastJets& durjet = apply<FastJets>(e, "DurhamJets");
const FastJets& jadejet = apply<FastJets>(e, "JadeJets");
if (numParticles >= 3) {
_passedCut3WeightSum->fill();
if (durjet.clusterSeq()) _histDiffRate2Durham->fill(durjet.clusterSeq()->exclusive_ymerge_max(2));
if (jadejet.clusterSeq()) _histDiffRate2Jade->fill(jadejet.clusterSeq()->exclusive_ymerge_max(2));
}
if (numParticles >= 4) {
_passedCut4WeightSum->fill();
if (durjet.clusterSeq()) _histDiffRate3Durham->fill(durjet.clusterSeq()->exclusive_ymerge_max(3));
if (jadejet.clusterSeq()) _histDiffRate3Jade->fill(jadejet.clusterSeq()->exclusive_ymerge_max(3));
}
if (numParticles >= 5) {
_passedCut5WeightSum->fill();
if (durjet.clusterSeq()) _histDiffRate4Durham->fill(durjet.clusterSeq()->exclusive_ymerge_max(4));
if (jadejet.clusterSeq()) _histDiffRate4Jade->fill(jadejet.clusterSeq()->exclusive_ymerge_max(4));
}
// Sphericities
MSG_DEBUG("Calculating sphericity");
const Sphericity& sphericity = apply<Sphericity>(e, "Sphericity");
_histSphericity->fill(sphericity.sphericity());
_histAplanarity->fill(sphericity.aplanarity());
_histPlanarity->fill(sphericity.planarity());
// C & D params
MSG_DEBUG("Calculating Parisi params");
const ParisiTensor& parisi = apply<ParisiTensor>(e, "Parisi");
_histCParam->fill(parisi.C());
_histDParam->fill(parisi.D());
// Hemispheres
MSG_DEBUG("Calculating hemisphere variables");
const Hemispheres& hemi = apply<Hemispheres>(e, "Hemispheres");
_histHemiMassH->fill(hemi.scaledM2high());
_histHemiMassL->fill(hemi.scaledM2low());
_histHemiMassD->fill(hemi.scaledM2diff());
_histHemiBroadW->fill(hemi.Bmax());
_histHemiBroadN->fill(hemi.Bmin());
_histHemiBroadT->fill(hemi.Bsum());
_histHemiBroadD->fill(hemi.Bdiff());
// Iterate over all the charged final state particles.
double Evis = 0.0;
double Evis2 = 0.0;
MSG_DEBUG("About to iterate over charged FS particles");
foreach (const Particle& p, fs.particles()) {
// Get momentum and energy of each particle.
const Vector3 mom3 = p.p3();
const double energy = p.E();
Evis += energy;
// Scaled momenta.
const double mom = mom3.mod();
const double scaledMom = mom/meanBeamMom;
const double logInvScaledMom = -std::log(scaledMom);
_histLogScaledMom->fill(logInvScaledMom);
_histScaledMom->fill(scaledMom);
// Get momenta components w.r.t. thrust and sphericity.
const double momT = dot(thrust.thrustAxis(), mom3);
const double momS = dot(sphericity.sphericityAxis(), mom3);
const double pTinT = dot(mom3, thrust.thrustMajorAxis());
const double pToutT = dot(mom3, thrust.thrustMinorAxis());
const double pTinS = dot(mom3, sphericity.sphericityMajorAxis());
const double pToutS = dot(mom3, sphericity.sphericityMinorAxis());
const double pT = sqrt(pow(pTinT, 2) + pow(pToutT, 2));
_histPtTIn->fill(fabs(pTinT/GeV));
_histPtTOut->fill(fabs(pToutT/GeV));
_histPtSIn->fill(fabs(pTinS/GeV));
_histPtSOut->fill(fabs(pToutS/GeV));
_histPtVsXp->fill(scaledMom, fabs(pT/GeV));
_histPtTOutVsXp->fill(scaledMom, fabs(pToutT/GeV));
// Calculate rapidities w.r.t. thrust and sphericity.
const double rapidityT = 0.5 * std::log((energy + momT) / (energy - momT));
const double rapidityS = 0.5 * std::log((energy + momS) / (energy - momS));
_histRapidityT->fill(fabs(rapidityT));
_histRapidityS->fill(fabs(rapidityS));
MSG_TRACE(fabs(rapidityT) << " " << scaledMom/GeV);
}
Evis2 = Evis*Evis;
// (A)EEC
// Need iterators since second loop starts at current outer loop iterator, i.e. no "foreach" here!
for (Particles::const_iterator p_i = fs.particles().begin(); p_i != fs.particles().end(); ++p_i) {
for (Particles::const_iterator p_j = p_i; p_j != fs.particles().end(); ++p_j) {
if (p_i == p_j) continue;
const Vector3 mom3_i = p_i->momentum().p3();
const Vector3 mom3_j = p_j->momentum().p3();
const double energy_i = p_i->momentum().E();
const double energy_j = p_j->momentum().E();
const double cosij = dot(mom3_i.unit(), mom3_j.unit());
const double eec = (energy_i*energy_j) / Evis2;
_histEEC->fill(cosij, eec);
if (cosij < 0)
_histAEEC->fill( cosij, eec);
else
_histAEEC->fill(-cosij, -eec);
}
}
_histMultiCharged->fill(_histMultiCharged->bin(0).xMid(), numParticles);
// Final state of unstable particles to get particle spectra
const UnstableFinalState& ufs = apply<UnstableFinalState>(e, "UFS");
foreach (const Particle& p, ufs.particles()) {
int id = p.abspid();
switch (id) {
case 211:
_histMultiPiPlus->fill(_histMultiPiPlus->bin(0).xMid());
break;
case 111:
_histMultiPi0->fill(_histMultiPi0->bin(0).xMid());
break;
case 321:
_histMultiKPlus->fill(_histMultiKPlus->bin(0).xMid());
break;
case 130:
case 310:
_histMultiK0->fill(_histMultiK0->bin(0).xMid());
break;
case 221:
_histMultiEta->fill(_histMultiEta->bin(0).xMid());
break;
case 331:
_histMultiEtaPrime->fill(_histMultiEtaPrime->bin(0).xMid());
break;
case 411:
_histMultiDPlus->fill(_histMultiDPlus->bin(0).xMid());
break;
case 421:
_histMultiD0->fill(_histMultiD0->bin(0).xMid());
break;
case 511:
case 521:
case 531:
_histMultiBPlus0->fill(_histMultiBPlus0->bin(0).xMid());
break;
case 9010221:
_histMultiF0->fill(_histMultiF0->bin(0).xMid());
break;
case 113:
_histMultiRho->fill(_histMultiRho->bin(0).xMid());
break;
case 323:
_histMultiKStar892Plus->fill(_histMultiKStar892Plus->bin(0).xMid());
break;
case 313:
_histMultiKStar892_0->fill(_histMultiKStar892_0->bin(0).xMid());
break;
case 333:
_histMultiPhi->fill(_histMultiPhi->bin(0).xMid());
break;
case 413:
_histMultiDStar2010Plus->fill(_histMultiDStar2010Plus->bin(0).xMid());
break;
case 225:
_histMultiF2->fill(_histMultiF2->bin(0).xMid());
break;
case 315:
_histMultiK2Star1430_0->fill(_histMultiK2Star1430_0->bin(0).xMid());
break;
case 2212:
_histMultiP->fill(_histMultiP->bin(0).xMid());
break;
case 3122:
_histMultiLambda0->fill(_histMultiLambda0->bin(0).xMid());
break;
case 3312:
_histMultiXiMinus->fill(_histMultiXiMinus->bin(0).xMid());
break;
case 3334:
_histMultiOmegaMinus->fill(_histMultiOmegaMinus->bin(0).xMid());
break;
case 2224:
_histMultiDeltaPlusPlus->fill(_histMultiDeltaPlusPlus->bin(0).xMid());
break;
case 3114:
_histMultiSigma1385Plus->fill(_histMultiSigma1385Plus->bin(0).xMid());
break;
case 3324:
_histMultiXi1530_0->fill(_histMultiXi1530_0->bin(0).xMid());
break;
case 5122:
_histMultiLambdaB0->fill(_histMultiLambdaB0->bin(0).xMid());
break;
}
}
}
// Finalize
void finalize() {
// Normalize inclusive single particle distributions to the average number
// of charged particles per event.
- const double avgNumParts = _weightedTotalPartNum / _passedCutWeightSum;
+ const double avgNumParts = dbl(*_weightedTotalPartNum / *_passedCutWeightSum);
normalize(_histPtTIn, avgNumParts);
normalize(_histPtTOut, avgNumParts);
normalize(_histPtSIn, avgNumParts);
normalize(_histPtSOut, avgNumParts);
normalize(_histRapidityT, avgNumParts);
normalize(_histRapidityS, avgNumParts);
normalize(_histLogScaledMom, avgNumParts);
normalize(_histScaledMom, avgNumParts);
- scale(_histEEC, 1.0/_passedCutWeightSum);
- scale(_histAEEC, 1.0/_passedCutWeightSum);
- scale(_histMultiCharged, 1.0/_passedCutWeightSum);
+ scale(_histEEC, 1.0 / *_passedCutWeightSum);
+ scale(_histAEEC, 1.0 / *_passedCutWeightSum);
+ scale(_histMultiCharged, 1.0 / *_passedCutWeightSum);
- scale(_histMultiPiPlus, 1.0/_passedCutWeightSum);
- scale(_histMultiPi0, 1.0/_passedCutWeightSum);
- scale(_histMultiKPlus, 1.0/_passedCutWeightSum);
- scale(_histMultiK0, 1.0/_passedCutWeightSum);
- scale(_histMultiEta, 1.0/_passedCutWeightSum);
- scale(_histMultiEtaPrime, 1.0/_passedCutWeightSum);
- scale(_histMultiDPlus, 1.0/_passedCutWeightSum);
- scale(_histMultiD0, 1.0/_passedCutWeightSum);
- scale(_histMultiBPlus0, 1.0/_passedCutWeightSum);
+ scale(_histMultiPiPlus, 1.0 / *_passedCutWeightSum);
+ scale(_histMultiPi0, 1.0 / *_passedCutWeightSum);
+ scale(_histMultiKPlus, 1.0 / *_passedCutWeightSum);
+ scale(_histMultiK0, 1.0 / *_passedCutWeightSum);
+ scale(_histMultiEta, 1.0 / *_passedCutWeightSum);
+ scale(_histMultiEtaPrime, 1.0 / *_passedCutWeightSum);
+ scale(_histMultiDPlus, 1.0 / *_passedCutWeightSum);
+ scale(_histMultiD0, 1.0 / *_passedCutWeightSum);
+ scale(_histMultiBPlus0, 1.0 / *_passedCutWeightSum);
- scale(_histMultiF0, 1.0/_passedCutWeightSum);
+ scale(_histMultiF0, 1.0 / *_passedCutWeightSum);
- scale(_histMultiRho, 1.0/_passedCutWeightSum);
- scale(_histMultiKStar892Plus, 1.0/_passedCutWeightSum);
- scale(_histMultiKStar892_0, 1.0/_passedCutWeightSum);
- scale(_histMultiPhi, 1.0/_passedCutWeightSum);
- scale(_histMultiDStar2010Plus, 1.0/_passedCutWeightSum);
+ scale(_histMultiRho, 1.0 / *_passedCutWeightSum);
+ scale(_histMultiKStar892Plus, 1.0 / *_passedCutWeightSum);
+ scale(_histMultiKStar892_0, 1.0 / *_passedCutWeightSum);
+ scale(_histMultiPhi, 1.0 / *_passedCutWeightSum);
+ scale(_histMultiDStar2010Plus, 1.0 / *_passedCutWeightSum);
- scale(_histMultiF2, 1.0/_passedCutWeightSum);
- scale(_histMultiK2Star1430_0, 1.0/_passedCutWeightSum);
+ scale(_histMultiF2, 1.0 / *_passedCutWeightSum);
+ scale(_histMultiK2Star1430_0, 1.0 / *_passedCutWeightSum);
- scale(_histMultiP, 1.0/_passedCutWeightSum);
- scale(_histMultiLambda0, 1.0/_passedCutWeightSum);
- scale(_histMultiXiMinus, 1.0/_passedCutWeightSum);
- scale(_histMultiOmegaMinus, 1.0/_passedCutWeightSum);
- scale(_histMultiDeltaPlusPlus, 1.0/_passedCutWeightSum);
- scale(_histMultiSigma1385Plus, 1.0/_passedCutWeightSum);
- scale(_histMultiXi1530_0, 1.0/_passedCutWeightSum);
- scale(_histMultiLambdaB0, 1.0/_passedCutWeightSum);
+ scale(_histMultiP, 1.0 / *_passedCutWeightSum);
+ scale(_histMultiLambda0, 1.0 / *_passedCutWeightSum);
+ scale(_histMultiXiMinus, 1.0 / *_passedCutWeightSum);
+ scale(_histMultiOmegaMinus, 1.0 / *_passedCutWeightSum);
+ scale(_histMultiDeltaPlusPlus, 1.0 / *_passedCutWeightSum);
+ scale(_histMultiSigma1385Plus, 1.0 / *_passedCutWeightSum);
+ scale(_histMultiXi1530_0, 1.0 / *_passedCutWeightSum);
+ scale(_histMultiLambdaB0, 1.0 / *_passedCutWeightSum);
- scale(_hist1MinusT, 1.0/_passedCutWeightSum);
- scale(_histTMajor, 1.0/_passedCutWeightSum);
- scale(_histTMinor, 1.0/_passedCutWeightSum);
- scale(_histOblateness, 1.0/_passedCutWeightSum);
+ scale(_hist1MinusT, 1.0 / *_passedCutWeightSum);
+ scale(_histTMajor, 1.0 / *_passedCutWeightSum);
+ scale(_histTMinor, 1.0 / *_passedCutWeightSum);
+ scale(_histOblateness, 1.0 / *_passedCutWeightSum);
- scale(_histSphericity, 1.0/_passedCutWeightSum);
- scale(_histAplanarity, 1.0/_passedCutWeightSum);
- scale(_histPlanarity, 1.0/_passedCutWeightSum);
+ scale(_histSphericity, 1.0 / *_passedCutWeightSum);
+ scale(_histAplanarity, 1.0 / *_passedCutWeightSum);
+ scale(_histPlanarity, 1.0 / *_passedCutWeightSum);
- scale(_histHemiMassD, 1.0/_passedCutWeightSum);
- scale(_histHemiMassH, 1.0/_passedCutWeightSum);
- scale(_histHemiMassL, 1.0/_passedCutWeightSum);
+ scale(_histHemiMassD, 1.0 / *_passedCutWeightSum);
+ scale(_histHemiMassH, 1.0 / *_passedCutWeightSum);
+ scale(_histHemiMassL, 1.0 / *_passedCutWeightSum);
- scale(_histHemiBroadW, 1.0/_passedCutWeightSum);
- scale(_histHemiBroadN, 1.0/_passedCutWeightSum);
- scale(_histHemiBroadT, 1.0/_passedCutWeightSum);
- scale(_histHemiBroadD, 1.0/_passedCutWeightSum);
+ scale(_histHemiBroadW, 1.0 / *_passedCutWeightSum);
+ scale(_histHemiBroadN, 1.0 / *_passedCutWeightSum);
+ scale(_histHemiBroadT, 1.0 / *_passedCutWeightSum);
+ scale(_histHemiBroadD, 1.0 / *_passedCutWeightSum);
- scale(_histCParam, 1.0/_passedCutWeightSum);
- scale(_histDParam, 1.0/_passedCutWeightSum);
+ scale(_histCParam, 1.0 / *_passedCutWeightSum);
+ scale(_histDParam, 1.0 / *_passedCutWeightSum);
- scale(_histDiffRate2Durham, 1.0/_passedCut3WeightSum);
- scale(_histDiffRate2Jade, 1.0/_passedCut3WeightSum);
- scale(_histDiffRate3Durham, 1.0/_passedCut4WeightSum);
- scale(_histDiffRate3Jade, 1.0/_passedCut4WeightSum);
- scale(_histDiffRate4Durham, 1.0/_passedCut5WeightSum);
- scale(_histDiffRate4Jade, 1.0/_passedCut5WeightSum);
+ scale(_histDiffRate2Durham, 1.0 / *_passedCut3WeightSum);
+ scale(_histDiffRate2Jade, 1.0 / *_passedCut3WeightSum);
+ scale(_histDiffRate3Durham, 1.0 / *_passedCut4WeightSum);
+ scale(_histDiffRate3Jade, 1.0 / *_passedCut4WeightSum);
+ scale(_histDiffRate4Durham, 1.0 / *_passedCut5WeightSum);
+ scale(_histDiffRate4Jade, 1.0 / *_passedCut5WeightSum);
}
//@}
private:
/// Store the weighted sums of numbers of charged / charged+neutral
/// particles - used to calculate average number of particles for the
/// inclusive single particle distributions' normalisations.
CounterPtr _weightedTotalPartNum;
/// @name Sums of weights past various cuts
//@{
CounterPtr _passedCutWeightSum;
CounterPtr _passedCut3WeightSum;
CounterPtr _passedCut4WeightSum;
CounterPtr _passedCut5WeightSum;
//@}
/// @name Histograms
//@{
Histo1DPtr _histPtTIn;
Histo1DPtr _histPtTOut;
Histo1DPtr _histPtSIn;
Histo1DPtr _histPtSOut;
Histo1DPtr _histRapidityT;
Histo1DPtr _histRapidityS;
Histo1DPtr _histScaledMom, _histLogScaledMom;
Profile1DPtr _histPtTOutVsXp, _histPtVsXp;
Histo1DPtr _hist1MinusT;
Histo1DPtr _histTMajor;
Histo1DPtr _histTMinor;
Histo1DPtr _histOblateness;
Histo1DPtr _histSphericity;
Histo1DPtr _histAplanarity;
Histo1DPtr _histPlanarity;
Histo1DPtr _histCParam;
Histo1DPtr _histDParam;
Histo1DPtr _histHemiMassD;
Histo1DPtr _histHemiMassH;
Histo1DPtr _histHemiMassL;
Histo1DPtr _histHemiBroadW;
Histo1DPtr _histHemiBroadN;
Histo1DPtr _histHemiBroadT;
Histo1DPtr _histHemiBroadD;
Histo1DPtr _histDiffRate2Durham;
Histo1DPtr _histDiffRate2Jade;
Histo1DPtr _histDiffRate3Durham;
Histo1DPtr _histDiffRate3Jade;
Histo1DPtr _histDiffRate4Durham;
Histo1DPtr _histDiffRate4Jade;
Histo1DPtr _histEEC, _histAEEC;
Histo1DPtr _histMultiCharged;
Histo1DPtr _histMultiPiPlus;
Histo1DPtr _histMultiPi0;
Histo1DPtr _histMultiKPlus;
Histo1DPtr _histMultiK0;
Histo1DPtr _histMultiEta;
Histo1DPtr _histMultiEtaPrime;
Histo1DPtr _histMultiDPlus;
Histo1DPtr _histMultiD0;
Histo1DPtr _histMultiBPlus0;
Histo1DPtr _histMultiF0;
Histo1DPtr _histMultiRho;
Histo1DPtr _histMultiKStar892Plus;
Histo1DPtr _histMultiKStar892_0;
Histo1DPtr _histMultiPhi;
Histo1DPtr _histMultiDStar2010Plus;
Histo1DPtr _histMultiF2;
Histo1DPtr _histMultiK2Star1430_0;
Histo1DPtr _histMultiP;
Histo1DPtr _histMultiLambda0;
Histo1DPtr _histMultiXiMinus;
Histo1DPtr _histMultiOmegaMinus;
Histo1DPtr _histMultiDeltaPlusPlus;
Histo1DPtr _histMultiSigma1385Plus;
Histo1DPtr _histMultiXi1530_0;
Histo1DPtr _histMultiLambdaB0;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(DELPHI_1996_S3430090);
}
diff --git a/analyses/pluginLEP/DELPHI_2000_S4328825.cc b/analyses/pluginLEP/DELPHI_2000_S4328825.cc
--- a/analyses/pluginLEP/DELPHI_2000_S4328825.cc
+++ b/analyses/pluginLEP/DELPHI_2000_S4328825.cc
@@ -1,143 +1,143 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/Beam.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/Sphericity.hh"
#include "Rivet/Projections/Thrust.hh"
#include "Rivet/Projections/FastJets.hh"
#include "Rivet/Projections/ParisiTensor.hh"
#include "Rivet/Projections/Hemispheres.hh"
#include "Rivet/Projections/InitialQuarks.hh"
#include <cmath>
namespace Rivet {
/// @brief OPAL multiplicities at various energies
/// @author Peter Richardson
class DELPHI_2000_S4328825 : public Analysis {
public:
/// Constructor
DELPHI_2000_S4328825()
: Analysis("DELPHI_2000_S4328825")
{}
/// @name Analysis methods
//@{
void init() {
// Projections
declare(Beam(), "Beams");
declare(ChargedFinalState(), "CFS");
declare(InitialQuarks(), "IQF");
book(_weightedTotalChargedPartNumLight,"weight_totalch_light");
book(_weightedTotalChargedPartNumCharm,"weight_totalch_charm");
book(_weightedTotalChargedPartNumBottom,"weight_totalch_bottom");
book(_weightLight,"weight_light");
book(_weightCharm,"weight_charm");
book(_weightBottom,"weight_bottom");
book(h_bottom, 1, 1, 1);
book(h_charm, 1, 1, 2);
book(h_light, 1, 1, 3);
book(h_diff, 1, 1, 4); // bottom minus light
}
void analyze(const Event& event) {
// Even if we only generate hadronic events, we still need a cut on numCharged >= 2.
const FinalState& cfs = apply<FinalState>(event, "CFS");
if (cfs.size() < 2) vetoEvent;
int flavour = 0;
const InitialQuarks& iqf = apply<InitialQuarks>(event, "IQF");
// If we only have two quarks (qqbar), just take the flavour.
// If we have more than two quarks, look for the highest energetic q-qbar pair.
if (iqf.particles().size() == 2) {
flavour = iqf.particles().front().abspid();
}
else {
map<int, double> quarkmap;
foreach (const Particle& p, iqf.particles()) {
if (quarkmap[p.pid()] < p.E()) {
quarkmap[p.pid()] = p.E();
}
}
double maxenergy = 0.;
for (int i = 1; i <= 5; ++i) {
if (quarkmap[i]+quarkmap[-i] > maxenergy) {
flavour = i;
}
}
}
const size_t numParticles = cfs.particles().size();
switch (flavour) {
case 1: case 2: case 3:
_weightLight->fill();
_weightedTotalChargedPartNumLight ->fill(numParticles);
break;
case 4:
_weightCharm->fill();
_weightedTotalChargedPartNumCharm ->fill(numParticles);
break;
case 5:
_weightBottom->fill();
_weightedTotalChargedPartNumBottom->fill(numParticles);
break;
}
}
void finalize() {
Histo1D temphisto(refData(1, 1, 1));
- const double avgNumPartsBottom = _weightedTotalChargedPartNumBottom / _weightBottom;
- const double avgNumPartsCharm = _weightedTotalChargedPartNumCharm / _weightCharm;
- const double avgNumPartsLight = _weightedTotalChargedPartNumLight / _weightLight;
+ const double avgNumPartsBottom = dbl(*_weightedTotalChargedPartNumBottom / *_weightBottom);
+ const double avgNumPartsCharm = dbl(*_weightedTotalChargedPartNumCharm / *_weightCharm);
+ const double avgNumPartsLight = dbl(*_weightedTotalChargedPartNumLight / *_weightLight);
for (size_t b = 0; b < temphisto.numBins(); b++) {
const double x = temphisto.bin(b).xMid();
const double ex = temphisto.bin(b).xWidth()/2.;
if (inRange(sqrtS()/GeV, x-ex, x+ex)) {
// @TODO: Fix y-error:
h_bottom->addPoint(x, avgNumPartsBottom, ex, 0.);
h_charm->addPoint(x, avgNumPartsCharm, ex, 0.);
h_light->addPoint(x, avgNumPartsLight, ex, 0.);
h_diff->addPoint(x, avgNumPartsBottom-avgNumPartsLight, ex, 0.);
}
}
}
//@}
private:
Scatter2DPtr h_bottom, h_charm, h_light, h_diff;
/// @name Multiplicities
//@{
CounterPtr _weightedTotalChargedPartNumLight;
CounterPtr _weightedTotalChargedPartNumCharm;
CounterPtr _weightedTotalChargedPartNumBottom;
//@}
/// @name Weights
//@{
CounterPtr _weightLight;
CounterPtr _weightCharm;
CounterPtr _weightBottom;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(DELPHI_2000_S4328825);
}
diff --git a/analyses/pluginLEP/L3_2004_I652683.cc b/analyses/pluginLEP/L3_2004_I652683.cc
--- a/analyses/pluginLEP/L3_2004_I652683.cc
+++ b/analyses/pluginLEP/L3_2004_I652683.cc
@@ -1,210 +1,210 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/Beam.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/InitialQuarks.hh"
#include "Rivet/Projections/Thrust.hh"
#include "Rivet/Projections/ParisiTensor.hh"
#include "Rivet/Projections/Hemispheres.hh"
namespace Rivet {
/// Jet rates and event shapes at LEP I+II
class L3_2004_I652683 : public Analysis {
public:
/// Constructor
DEFAULT_RIVET_ANALYSIS_CTOR(L3_2004_I652683);
// L3_2004_I652683() : Analysis("L3_2004_I652683")
// { }
/// Book histograms and initialise projections before the run
void init() {
// Projections to use
const FinalState FS;
declare(FS, "FS");
declare(Beam(), "beams");
const ChargedFinalState CFS;
declare(CFS, "CFS");
const Thrust thrust(FS);
declare(thrust, "thrust");
declare(ParisiTensor(FS), "Parisi");
declare(Hemispheres(thrust), "Hemispheres");
declare(InitialQuarks(), "initialquarks");
// Book the histograms
book(_h_Thrust_udsc , 47, 1, 1);
book(_h_Thrust_bottom , 47, 1, 2);
book(_h_heavyJetmass_udsc , 48, 1, 1);
book(_h_heavyJetmass_bottom , 48, 1, 2);
book(_h_totalJetbroad_udsc , 49, 1, 1);
book(_h_totalJetbroad_bottom , 49, 1, 2);
book(_h_wideJetbroad_udsc , 50, 1, 1);
book(_h_wideJetbroad_bottom , 50, 1, 2);
book(_h_Cparameter_udsc , 51, 1, 1);
book(_h_Cparameter_bottom , 51, 1, 2);
book(_h_Dparameter_udsc , 52, 1, 1);
book(_h_Dparameter_bottom , 52, 1, 2);
book(_h_Ncharged , 59, 1, 1);
book(_h_Ncharged_udsc , 59, 1, 2);
book(_h_Ncharged_bottom , 59, 1, 3);
book(_h_scaledMomentum , 65, 1, 1);
book(_h_scaledMomentum_udsc , 65, 1, 2);
book(_h_scaledMomentum_bottom, 65, 1, 3);
book(_sumW_udsc, "sumW_udsc");
book(_sumW_b, "sumW_b");
book(_sumW_ch, "sumW_ch");
book(_sumW_ch_udsc, "sumW_ch_udsc");
book(_sumW_ch_b, "sumW_ch_b");
}
/// Perform the per-event analysis
void analyze(const Event& event) {
// Get beam average momentum
const ParticlePair& beams = apply<Beam>(event, "beams").beams();
const double beamMomentum = ( beams.first.p3().mod() + beams.second.p3().mod() ) / 2.0;
// InitialQuarks projection to have udsc events separated from b events
/// @todo Yuck!!! Eliminate when possible...
int flavour = 0;
const InitialQuarks& iqf = apply<InitialQuarks>(event, "initialquarks");
Particles quarks;
if ( iqf.particles().size() == 2 ) {
flavour = iqf.particles().front().abspid();
quarks = iqf.particles();
} else {
map<int, Particle> quarkmap;
for (const Particle& p : iqf.particles()) {
if (quarkmap.find(p.pid()) == quarkmap.end()) quarkmap[p.pid()] = p;
else if (quarkmap[p.pid()].E() < p.E()) quarkmap[p.pid()] = p;
}
double max_energy = 0.;
for (int i = 1; i <= 5; ++i) {
double energy = 0.;
if (quarkmap.find(i) != quarkmap.end())
energy += quarkmap[ i].E();
if (quarkmap.find(-i) != quarkmap.end())
energy += quarkmap[-i].E();
if (energy > max_energy)
flavour = i;
}
if (quarkmap.find(flavour) != quarkmap.end())
quarks.push_back(quarkmap[flavour]);
if (quarkmap.find(-flavour) != quarkmap.end())
quarks.push_back(quarkmap[-flavour]);
}
// Flavour label
/// @todo Change to a bool?
const int iflav = (flavour == PID::DQUARK || flavour == PID::UQUARK || flavour == PID::SQUARK || flavour == PID::CQUARK) ? 1 : (flavour == PID::BQUARK) ? 5 : 0;
// Update weight sums
if (iflav == 1) {
_sumW_udsc->fill();
} else if (iflav == 5) {
_sumW_b->fill();
}
_sumW_ch->fill();
// Charged multiplicity
const FinalState& cfs = applyProjection<FinalState>(event, "CFS");
_h_Ncharged->fill(cfs.size());
if (iflav == 1) {
_sumW_ch_udsc->fill();
_h_Ncharged_udsc->fill(cfs.size());
} else if (iflav == 5) {
_sumW_ch_b->fill();
_h_Ncharged_bottom->fill(cfs.size());
}
// Scaled momentum
const Particles& chparticles = cfs.particlesByPt();
for (const Particle& p : chparticles) {
const Vector3 momentum3 = p.p3();
const double mom = momentum3.mod();
const double scaledMom = mom/beamMomentum;
const double logScaledMom = std::log(scaledMom);
_h_scaledMomentum->fill(-logScaledMom);
if (iflav == 1) {
_h_scaledMomentum_udsc->fill(-logScaledMom);
} else if (iflav == 5) {
_h_scaledMomentum_bottom->fill(-logScaledMom);
}
}
// Thrust
const Thrust& thrust = applyProjection<Thrust>(event, "thrust");
if (iflav == 1) {
_h_Thrust_udsc->fill(thrust.thrust());
} else if (iflav == 5) {
_h_Thrust_bottom->fill(thrust.thrust());
}
// C and D Parisi parameters
const ParisiTensor& parisi = applyProjection<ParisiTensor>(event, "Parisi");
if (iflav == 1) {
_h_Cparameter_udsc->fill(parisi.C());
_h_Dparameter_udsc->fill(parisi.D());
} else if (iflav == 5) {
_h_Cparameter_bottom->fill(parisi.C());
_h_Dparameter_bottom->fill(parisi.D());
}
// The hemisphere variables
const Hemispheres& hemisphere = applyProjection<Hemispheres>(event, "Hemispheres");
if (iflav == 1) {
_h_heavyJetmass_udsc->fill(hemisphere.scaledM2high());
_h_totalJetbroad_udsc->fill(hemisphere.Bsum());
_h_wideJetbroad_udsc->fill(hemisphere.Bmax());
} else if (iflav == 5) {
_h_heavyJetmass_bottom->fill(hemisphere.scaledM2high());
_h_totalJetbroad_bottom->fill(hemisphere.Bsum());
_h_wideJetbroad_bottom->fill(hemisphere.Bmax());
}
}
/// Normalise histograms etc., after the run
void finalize() {
- scale({_h_Thrust_udsc, _h_heavyJetmass_udsc, _h_totalJetbroad_udsc, _h_wideJetbroad_udsc, _h_Cparameter_udsc, _h_Dparameter_udsc}, 1/_sumW_udsc);
- scale({_h_Thrust_bottom, _h_heavyJetmass_bottom, _h_totalJetbroad_bottom, _h_wideJetbroad_bottom, _h_Cparameter_bottom, _h_Dparameter_bottom}, 1./_sumW_b);
- scale(_h_Ncharged, 2/_sumW_ch);
- scale(_h_Ncharged_udsc, 2/_sumW_ch_udsc);
- scale(_h_Ncharged_bottom, 2/_sumW_ch_b);
- scale(_h_scaledMomentum, 1/_sumW_ch);
- scale(_h_scaledMomentum_udsc, 1/_sumW_ch_udsc);
- scale(_h_scaledMomentum_bottom, 1/_sumW_ch_b);
+ scale({_h_Thrust_udsc, _h_heavyJetmass_udsc, _h_totalJetbroad_udsc, _h_wideJetbroad_udsc, _h_Cparameter_udsc, _h_Dparameter_udsc}, 1/ *_sumW_udsc);
+ scale({_h_Thrust_bottom, _h_heavyJetmass_bottom, _h_totalJetbroad_bottom, _h_wideJetbroad_bottom, _h_Cparameter_bottom, _h_Dparameter_bottom}, 1./ *_sumW_b);
+ scale(_h_Ncharged, 2/ *_sumW_ch);
+ scale(_h_Ncharged_udsc, 2/ *_sumW_ch_udsc);
+ scale(_h_Ncharged_bottom, 2/ *_sumW_ch_b);
+ scale(_h_scaledMomentum, 1/ *_sumW_ch);
+ scale(_h_scaledMomentum_udsc, 1/ *_sumW_ch_udsc);
+ scale(_h_scaledMomentum_bottom, 1/ *_sumW_ch_b);
}
/// Weight counters
CounterPtr _sumW_udsc, _sumW_b, _sumW_ch, _sumW_ch_udsc, _sumW_ch_b;
/// @name Histograms
//@{
Histo1DPtr _h_Thrust_udsc, _h_Thrust_bottom;
Histo1DPtr _h_heavyJetmass_udsc, _h_heavyJetmass_bottom;
Histo1DPtr _h_totalJetbroad_udsc, _h_totalJetbroad_bottom;
Histo1DPtr _h_wideJetbroad_udsc, _h_wideJetbroad_bottom;
Histo1DPtr _h_Cparameter_udsc, _h_Cparameter_bottom;
Histo1DPtr _h_Dparameter_udsc, _h_Dparameter_bottom;
Histo1DPtr _h_Ncharged, _h_Ncharged_udsc, _h_Ncharged_bottom;
Histo1DPtr _h_scaledMomentum, _h_scaledMomentum_udsc, _h_scaledMomentum_bottom;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(L3_2004_I652683);
}
diff --git a/analyses/pluginLEP/OPAL_1996_S3257789.cc b/analyses/pluginLEP/OPAL_1996_S3257789.cc
--- a/analyses/pluginLEP/OPAL_1996_S3257789.cc
+++ b/analyses/pluginLEP/OPAL_1996_S3257789.cc
@@ -1,95 +1,95 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/Beam.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/UnstableFinalState.hh"
namespace Rivet {
/// @brief OPAL J/Psi fragmentation function paper
/// @author Peter Richardson
class OPAL_1996_S3257789 : public Analysis {
public:
/// Constructor
OPAL_1996_S3257789()
: Analysis("OPAL_1996_S3257789")
{}
/// @name Analysis methods
//@{
void init() {
declare(Beam(), "Beams");
declare(ChargedFinalState(), "FS");
declare(UnstableFinalState(), "UFS");
book(_histXpJPsi , 1, 1, 1);
book(_multJPsi , 2, 1, 1);
book(_multPsiPrime , 2, 1, 2);
book(_weightSum,"weightSum");
}
void analyze(const Event& e) {
// First, veto on leptonic events by requiring at least 4 charged FS particles
const FinalState& fs = apply<FinalState>(e, "FS");
const size_t numParticles = fs.particles().size();
// Even if we only generate hadronic events, we still need a cut on numCharged >= 2.
if (numParticles < 2) {
MSG_DEBUG("Failed leptonic event cut");
vetoEvent;
}
MSG_DEBUG("Passed leptonic event cut");
// Get beams and average beam momentum
const ParticlePair& beams = apply<Beam>(e, "Beams").beams();
const double meanBeamMom = ( beams.first.p3().mod() +
beams.second.p3().mod() ) / 2.0;
MSG_DEBUG("Avg beam momentum = " << meanBeamMom);
// Final state of unstable particles to get particle spectra
const UnstableFinalState& ufs = apply<UnstableFinalState>(e, "UFS");
foreach (const Particle& p, ufs.particles()) {
if(p.abspid()==443) {
double xp = p.p3().mod()/meanBeamMom;
_histXpJPsi->fill(xp);
_multJPsi->fill(91.2);
_weightSum->fill();
}
else if(p.abspid()==100443) {
_multPsiPrime->fill(91.2);
}
}
}
/// Finalize
void finalize() {
- if(_weightSum>0.)
- scale(_histXpJPsi , 0.1/_weightSum);
+ if(_weightSum->val()>0.)
+ scale(_histXpJPsi , 0.1/ *_weightSum);
scale(_multJPsi , 1./sumOfWeights());
scale(_multPsiPrime, 1./sumOfWeights());
}
//@}
private:
CounterPtr _weightSum;
Histo1DPtr _histXpJPsi;
Histo1DPtr _multJPsi;
Histo1DPtr _multPsiPrime;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(OPAL_1996_S3257789);
}
diff --git a/analyses/pluginLEP/OPAL_1997_S3396100.cc b/analyses/pluginLEP/OPAL_1997_S3396100.cc
--- a/analyses/pluginLEP/OPAL_1997_S3396100.cc
+++ b/analyses/pluginLEP/OPAL_1997_S3396100.cc
@@ -1,164 +1,164 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/Beam.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/UnstableFinalState.hh"
namespace Rivet {
/// @brief OPAL strange baryon paper
/// @author Peter Richardson
class OPAL_1997_S3396100 : public Analysis {
public:
/// Constructor
OPAL_1997_S3396100()
: Analysis("OPAL_1997_S3396100")
{}
/// @name Analysis methods
//@{
void init() {
declare(Beam(), "Beams");
declare(ChargedFinalState(), "FS");
declare(UnstableFinalState(), "UFS");
book(_histXpLambda , 1, 1, 1);
book(_histXiLambda , 2, 1, 1);
book(_histXpXiMinus , 3, 1, 1);
book(_histXiXiMinus , 4, 1, 1);
book(_histXpSigma1385Plus , 5, 1, 1);
book(_histXiSigma1385Plus , 6, 1, 1);
book(_histXpSigma1385Minus , 7, 1, 1);
book(_histXiSigma1385Minus , 8, 1, 1);
book(_histXpXi1530 , 9, 1, 1);
book(_histXiXi1530 ,10, 1, 1);
book(_histXpLambda1520 ,11, 1, 1);
book(_histXiLambda1520 ,12, 1, 1);
book(_weightedTotalNumLambda, "weightedTotalNumLambda");
book(_weightedTotalNumXiMinus, "weightedTotalNumXiMinus");
book(_weightedTotalNumSigma1385Plus, "weightedTotalNumSigma1385Plus");
book(_weightedTotalNumSigma1385Minus, "weightedTotalNumSigma1385Minus");
book(_weightedTotalNumXi1530, "weightedTotalNumXi1530");
book(_weightedTotalNumLambda1520, "weightedTotalNumLambda1520");
}
void analyze(const Event& e) {
// First, veto on leptonic events by requiring at least 4 charged FS particles
const FinalState& fs = apply<FinalState>(e, "FS");
const size_t numParticles = fs.particles().size();
// Even if we only generate hadronic events, we still need a cut on numCharged >= 2.
if (numParticles < 2) {
MSG_DEBUG("Failed leptonic event cut");
vetoEvent;
}
MSG_DEBUG("Passed leptonic event cut");
// Get beams and average beam momentum
const ParticlePair& beams = apply<Beam>(e, "Beams").beams();
const double meanBeamMom = ( beams.first.p3().mod() +
beams.second.p3().mod() ) / 2.0;
MSG_DEBUG("Avg beam momentum = " << meanBeamMom);
// Final state of unstable particles to get particle spectra
const UnstableFinalState& ufs = apply<UnstableFinalState>(e, "UFS");
foreach (const Particle& p, ufs.particles()) {
const int id = p.abspid();
double xp = p.p3().mod()/meanBeamMom;
double xi = -log(xp);
switch (id) {
case 3312:
_histXpXiMinus->fill(xp);
_histXiXiMinus->fill(xi);
_weightedTotalNumXiMinus->fill();
break;
case 3224:
_histXpSigma1385Plus->fill(xp);
_histXiSigma1385Plus->fill(xi);
_weightedTotalNumSigma1385Plus->fill();
break;
case 3114:
_histXpSigma1385Minus->fill(xp);
_histXiSigma1385Minus->fill(xi);
_weightedTotalNumSigma1385Minus->fill();
break;
case 3122:
_histXpLambda->fill(xp);
_histXiLambda->fill(xi);
_weightedTotalNumLambda->fill();
break;
case 3324:
_histXpXi1530->fill(xp);
_histXiXi1530->fill(xi);
_weightedTotalNumXi1530->fill();
break;
case 3124:
_histXpLambda1520->fill(xp);
_histXiLambda1520->fill(xi);
_weightedTotalNumLambda1520->fill();
break;
}
}
}
/// Finalize
void finalize() {
- normalize(_histXpLambda , double(_weightedTotalNumLambda )/sumOfWeights());
- normalize(_histXiLambda , double(_weightedTotalNumLambda )/sumOfWeights());
- normalize(_histXpXiMinus , double(_weightedTotalNumXiMinus )/sumOfWeights());
- normalize(_histXiXiMinus , double(_weightedTotalNumXiMinus )/sumOfWeights());
- normalize(_histXpSigma1385Plus , double(_weightedTotalNumSigma1385Plus)/sumOfWeights());
- normalize(_histXiSigma1385Plus , double(_weightedTotalNumSigma1385Plus)/sumOfWeights());
- normalize(_histXpSigma1385Minus, double(_weightedTotalNumSigma1385Plus)/sumOfWeights());
- normalize(_histXiSigma1385Minus, double(_weightedTotalNumSigma1385Plus)/sumOfWeights());
- normalize(_histXpXi1530 , double(_weightedTotalNumXi1530 )/sumOfWeights());
- normalize(_histXiXi1530 , double(_weightedTotalNumXi1530 )/sumOfWeights());
- normalize(_histXpLambda1520 , double(_weightedTotalNumLambda1520 )/sumOfWeights());
- normalize(_histXiLambda1520 , double(_weightedTotalNumLambda1520 )/sumOfWeights());
+ normalize(_histXpLambda , dbl(*_weightedTotalNumLambda )/sumOfWeights());
+ normalize(_histXiLambda , dbl(*_weightedTotalNumLambda )/sumOfWeights());
+ normalize(_histXpXiMinus , dbl(*_weightedTotalNumXiMinus )/sumOfWeights());
+ normalize(_histXiXiMinus , dbl(*_weightedTotalNumXiMinus )/sumOfWeights());
+ normalize(_histXpSigma1385Plus , dbl(*_weightedTotalNumSigma1385Plus)/sumOfWeights());
+ normalize(_histXiSigma1385Plus , dbl(*_weightedTotalNumSigma1385Plus)/sumOfWeights());
+ normalize(_histXpSigma1385Minus, dbl(*_weightedTotalNumSigma1385Plus)/sumOfWeights());
+ normalize(_histXiSigma1385Minus, dbl(*_weightedTotalNumSigma1385Plus)/sumOfWeights());
+ normalize(_histXpXi1530 , dbl(*_weightedTotalNumXi1530 )/sumOfWeights());
+ normalize(_histXiXi1530 , dbl(*_weightedTotalNumXi1530 )/sumOfWeights());
+ normalize(_histXpLambda1520 , dbl(*_weightedTotalNumLambda1520 )/sumOfWeights());
+ normalize(_histXiLambda1520 , dbl(*_weightedTotalNumLambda1520 )/sumOfWeights());
}
//@}
private:
/// Store the weighted sums of numbers of charged / charged+neutral
/// particles - used to calculate average number of particles for the
/// inclusive single particle distributions' normalisations.
CounterPtr _weightedTotalNumLambda;
CounterPtr _weightedTotalNumXiMinus;
CounterPtr _weightedTotalNumSigma1385Plus;
CounterPtr _weightedTotalNumSigma1385Minus;
CounterPtr _weightedTotalNumXi1530;
CounterPtr _weightedTotalNumLambda1520;
Histo1DPtr _histXpLambda ;
Histo1DPtr _histXiLambda ;
Histo1DPtr _histXpXiMinus ;
Histo1DPtr _histXiXiMinus ;
Histo1DPtr _histXpSigma1385Plus ;
Histo1DPtr _histXiSigma1385Plus ;
Histo1DPtr _histXpSigma1385Minus;
Histo1DPtr _histXiSigma1385Minus;
Histo1DPtr _histXpXi1530 ;
Histo1DPtr _histXiXi1530 ;
Histo1DPtr _histXpLambda1520 ;
Histo1DPtr _histXiLambda1520 ;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(OPAL_1997_S3396100);
}
diff --git a/analyses/pluginLEP/OPAL_1998_S3780481.cc b/analyses/pluginLEP/OPAL_1998_S3780481.cc
--- a/analyses/pluginLEP/OPAL_1998_S3780481.cc
+++ b/analyses/pluginLEP/OPAL_1998_S3780481.cc
@@ -1,193 +1,193 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/Beam.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/InitialQuarks.hh"
namespace Rivet {
/// @brief OPAL flavour-dependent fragmentation paper
/// @author Hendrik Hoeth
class OPAL_1998_S3780481 : public Analysis {
public:
/// Constructor
OPAL_1998_S3780481() : Analysis("OPAL_1998_S3780481") {
}
/// @name Analysis methods
//@{
void analyze(const Event& e) {
// First, veto on leptonic events by requiring at least 4 charged FS particles
const FinalState& fs = apply<FinalState>(e, "FS");
const size_t numParticles = fs.particles().size();
// Even if we only generate hadronic events, we still need a cut on numCharged >= 2.
if (numParticles < 2) {
MSG_DEBUG("Failed ncharged cut");
vetoEvent;
}
MSG_DEBUG("Passed ncharged cut");
_weightedTotalPartNum->fill(numParticles);
// Get beams and average beam momentum
const ParticlePair& beams = apply<Beam>(e, "Beams").beams();
const double meanBeamMom = ( beams.first.p3().mod() +
beams.second.p3().mod() ) / 2.0;
MSG_DEBUG("Avg beam momentum = " << meanBeamMom);
int flavour = 0;
const InitialQuarks& iqf = apply<InitialQuarks>(e, "IQF");
// If we only have two quarks (qqbar), just take the flavour.
// If we have more than two quarks, look for the highest energetic q-qbar pair.
/// @todo Yuck... does this *really* have to be quark-based?!?
if (iqf.particles().size() == 2) {
flavour = iqf.particles().front().abspid();
} else {
map<int, double> quarkmap;
foreach (const Particle& p, iqf.particles()) {
if (quarkmap[p.pid()] < p.E()) {
quarkmap[p.pid()] = p.E();
}
}
double maxenergy = 0.;
for (int i = 1; i <= 5; ++i) {
if (quarkmap[i]+quarkmap[-i] > maxenergy) {
flavour = i;
}
}
}
switch (flavour) {
case 1:
case 2:
case 3:
_SumOfudsWeights->fill();
break;
case 4:
_SumOfcWeights->fill();
break;
case 5:
_SumOfbWeights->fill();
break;
}
foreach (const Particle& p, fs.particles()) {
const double xp = p.p3().mod()/meanBeamMom;
const double logxp = -std::log(xp);
_histXpall->fill(xp);
_histLogXpall->fill(logxp);
_histMultiChargedall->fill(_histMultiChargedall->bin(0).xMid());
switch (flavour) {
/// @todo Use PDG code enums
case PID::DQUARK:
case PID::UQUARK:
case PID::SQUARK:
_histXpuds->fill(xp);
_histLogXpuds->fill(logxp);
_histMultiChargeduds->fill(_histMultiChargeduds->bin(0).xMid());
break;
case PID::CQUARK:
_histXpc->fill(xp);
_histLogXpc->fill(logxp);
_histMultiChargedc->fill(_histMultiChargedc->bin(0).xMid());
break;
case PID::BQUARK:
_histXpb->fill(xp);
_histLogXpb->fill(logxp);
_histMultiChargedb->fill(_histMultiChargedb->bin(0).xMid());
break;
}
}
}
void init() {
// Projections
declare(Beam(), "Beams");
declare(ChargedFinalState(), "FS");
declare(InitialQuarks(), "IQF");
// Book histos
book(_histXpuds ,1, 1, 1);
book(_histXpc ,2, 1, 1);
book(_histXpb ,3, 1, 1);
book(_histXpall ,4, 1, 1);
book(_histLogXpuds ,5, 1, 1);
book(_histLogXpc ,6, 1, 1);
book(_histLogXpb ,7, 1, 1);
book(_histLogXpall ,8, 1, 1);
book(_histMultiChargeduds ,9, 1, 1);
book(_histMultiChargedc ,9, 1, 2);
book(_histMultiChargedb ,9, 1, 3);
book(_histMultiChargedall ,9, 1, 4);
// Counters
book(_weightedTotalPartNum, "TotalPartNum");
book(_SumOfudsWeights, "udsWeights");
book(_SumOfcWeights, "cWeights");
book(_SumOfbWeights, "bWeights");
}
/// Finalize
void finalize() {
- const double avgNumParts = double(_weightedTotalPartNum) / sumOfWeights();
+ const double avgNumParts = dbl(*_weightedTotalPartNum) / sumOfWeights();
normalize(_histXpuds , avgNumParts);
normalize(_histXpc , avgNumParts);
normalize(_histXpb , avgNumParts);
normalize(_histXpall , avgNumParts);
normalize(_histLogXpuds , avgNumParts);
normalize(_histLogXpc , avgNumParts);
normalize(_histLogXpb , avgNumParts);
normalize(_histLogXpall , avgNumParts);
- scale(_histMultiChargeduds, 1.0/_SumOfudsWeights);
- scale(_histMultiChargedc , 1.0/_SumOfcWeights);
- scale(_histMultiChargedb , 1.0/_SumOfbWeights);
+ scale(_histMultiChargeduds, 1.0/ *_SumOfudsWeights);
+ scale(_histMultiChargedc , 1.0/ *_SumOfcWeights);
+ scale(_histMultiChargedb , 1.0/ *_SumOfbWeights);
scale(_histMultiChargedall, 1.0/sumOfWeights());
}
//@}
private:
/// Store the weighted sums of numbers of charged / charged+neutral
/// particles - used to calculate average number of particles for the
/// inclusive single particle distributions' normalisations.
CounterPtr _weightedTotalPartNum;
CounterPtr _SumOfudsWeights;
CounterPtr _SumOfcWeights;
CounterPtr _SumOfbWeights;
Histo1DPtr _histXpuds;
Histo1DPtr _histXpc;
Histo1DPtr _histXpb;
Histo1DPtr _histXpall;
Histo1DPtr _histLogXpuds;
Histo1DPtr _histLogXpc;
Histo1DPtr _histLogXpb;
Histo1DPtr _histLogXpall;
Histo1DPtr _histMultiChargeduds;
Histo1DPtr _histMultiChargedc;
Histo1DPtr _histMultiChargedb;
Histo1DPtr _histMultiChargedall;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(OPAL_1998_S3780481);
}
diff --git a/analyses/pluginLEP/OPAL_2002_S5361494.cc b/analyses/pluginLEP/OPAL_2002_S5361494.cc
--- a/analyses/pluginLEP/OPAL_2002_S5361494.cc
+++ b/analyses/pluginLEP/OPAL_2002_S5361494.cc
@@ -1,145 +1,145 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/Beam.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/Sphericity.hh"
#include "Rivet/Projections/Thrust.hh"
#include "Rivet/Projections/FastJets.hh"
#include "Rivet/Projections/ParisiTensor.hh"
#include "Rivet/Projections/Hemispheres.hh"
#include "Rivet/Projections/InitialQuarks.hh"
#include <cmath>
namespace Rivet {
/// @brief OPAL multiplicities at various energies
/// @author Peter Richardson
class OPAL_2002_S5361494 : public Analysis {
public:
/// Constructor
OPAL_2002_S5361494()
: Analysis("OPAL_2002_S5361494")
{}
/// @name Analysis methods
//@{
void init() {
// Projections
declare(Beam(), "Beams");
declare(ChargedFinalState(), "CFS");
declare(InitialQuarks(), "IQF");
book(h_bottom, 1, 1, 1);
book(h_charm, 1, 1, 2);
book(h_light, 1, 1, 3);
book(h_diff, 1, 1, 4); // bottom minus light
book(_weightedTotalChargedPartNumLight, "TotalChargedPartNumLight");
book(_weightedTotalChargedPartNumCharm, "TotalChargedPartNumCharm");
book(_weightedTotalChargedPartNumBottom, "TotalChargedPartNumBottom");
book(_weightLight, "Light");
book(_weightCharm, "Charm");
book(_weightBottom, "Bottom");
}
void analyze(const Event& event) {
// Even if we only generate hadronic events, we still need a cut on numCharged >= 2.
const FinalState& cfs = apply<FinalState>(event, "CFS");
if (cfs.size() < 2) vetoEvent;
int flavour = 0;
const InitialQuarks& iqf = apply<InitialQuarks>(event, "IQF");
// If we only have two quarks (qqbar), just take the flavour.
// If we have more than two quarks, look for the highest energetic q-qbar pair.
if (iqf.particles().size() == 2) {
flavour = iqf.particles().front().abspid();
}
else {
map<int, double> quarkmap;
foreach (const Particle& p, iqf.particles()) {
if (quarkmap[p.pid()] < p.E()) {
quarkmap[p.pid()] = p.E();
}
}
double maxenergy = 0.;
for (int i = 1; i <= 5; ++i) {
if (quarkmap[i]+quarkmap[-i] > maxenergy) {
flavour = i;
}
}
}
const size_t numParticles = cfs.particles().size();
switch (flavour) {
case 1: case 2: case 3:
_weightLight ->fill();
_weightedTotalChargedPartNumLight ->fill(numParticles);
break;
case 4:
_weightCharm ->fill();
_weightedTotalChargedPartNumCharm ->fill(numParticles);
break;
case 5:
_weightBottom->fill();
_weightedTotalChargedPartNumBottom->fill(numParticles);
break;
}
}
void finalize() {
Histo1D temphisto(refData(1, 1, 1));
- const double avgNumPartsBottom = _weightBottom != 0. ? _weightedTotalChargedPartNumBottom / _weightBottom : 0.;
- const double avgNumPartsCharm = _weightCharm != 0. ? _weightedTotalChargedPartNumCharm / _weightCharm : 0.;
- const double avgNumPartsLight = _weightLight != 0. ? _weightedTotalChargedPartNumLight / _weightLight : 0.;
+ const double avgNumPartsBottom = _weightBottom->val() != 0. ? dbl(*_weightedTotalChargedPartNumBottom / *_weightBottom) : 0.;
+ const double avgNumPartsCharm = _weightCharm->val() != 0. ? dbl(*_weightedTotalChargedPartNumCharm / *_weightCharm ) : 0.;
+ const double avgNumPartsLight = _weightLight->val() != 0. ? dbl(*_weightedTotalChargedPartNumLight / *_weightLight ) : 0.;
for (size_t b = 0; b < temphisto.numBins(); b++) {
const double x = temphisto.bin(b).xMid();
const double ex = temphisto.bin(b).xWidth()/2.;
if (inRange(sqrtS()/GeV, x-ex, x+ex)) {
// @TODO: Fix y-error:
h_bottom->addPoint(x, avgNumPartsBottom, ex, 0.);
h_charm->addPoint(x, avgNumPartsCharm, ex, 0.);
h_light->addPoint(x, avgNumPartsLight, ex, 0.);
h_diff->addPoint(x, avgNumPartsBottom-avgNumPartsLight, ex, 0.);
}
}
}
//@}
private:
Scatter2DPtr h_bottom;
Scatter2DPtr h_charm ;
Scatter2DPtr h_light ;
Scatter2DPtr h_diff ;
/// @name Multiplicities
//@{
CounterPtr _weightedTotalChargedPartNumLight;
CounterPtr _weightedTotalChargedPartNumCharm;
CounterPtr _weightedTotalChargedPartNumBottom;
//@}
/// @name Weights
//@{
CounterPtr _weightLight;
CounterPtr _weightCharm;
CounterPtr _weightBottom;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(OPAL_2002_S5361494);
}
diff --git a/analyses/pluginLEP/OPAL_2004_I648738.cc b/analyses/pluginLEP/OPAL_2004_I648738.cc
--- a/analyses/pluginLEP/OPAL_2004_I648738.cc
+++ b/analyses/pluginLEP/OPAL_2004_I648738.cc
@@ -1,118 +1,118 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
namespace Rivet {
class OPAL_2004_I648738 : public Analysis {
public:
/// Constructor
OPAL_2004_I648738()
: Analysis("OPAL_2004_I648738"), _sumW(3), _histo_xE(3)
{ }
/// @name Analysis methods
//@{
void init() {
declare(FinalState(), "FS");
declare(ChargedFinalState(), "CFS");
unsigned int ih=0;
if (inRange(0.5*sqrtS()/GeV, 4.0, 9.0)) {
ih = 1;
}
else if (inRange(0.5*sqrtS()/GeV, 9.0, 19.0)) {
ih = 2;
}
else if (inRange(0.5*sqrtS()/GeV, 19.0, 30.0)) {
ih = 3;
}
else if (inRange(0.5*sqrtS()/GeV, 45.5, 45.7)) {
ih = 5;
}
else if (inRange(0.5*sqrtS()/GeV, 30.0, 70.0)) {
ih = 4;
}
else if (inRange(0.5*sqrtS()/GeV, 91.5, 104.5)) {
ih = 6;
}
assert(ih>0);
// book the histograms
book(_histo_xE[0], ih+5,1,1);
book(_histo_xE[1], ih+5,1,2);
if(ih<5) book(_histo_xE[2] ,ih+5,1,3);
book(_sumW[0], "sumW_0");
book(_sumW[1], "sumW_1");
book(_sumW[2], "sumW_2");
}
/// Perform the per-event analysis
void analyze(const Event& event) {
// find the initial quarks/gluons
ParticleVector initial;
for (const GenParticle* p : Rivet::particles(event.genEvent())) {
const GenVertex* pv = p->production_vertex();
const PdgId pid = abs(p->pdg_id());
if(!( (pid>=1&&pid<=5) || pid ==21) ) continue;
bool passed = false;
for (const GenParticle* pp : particles_in(pv)) {
const PdgId ppid = abs(pp->pdg_id());
passed = (ppid == PID::ELECTRON || ppid == PID::HIGGS ||
ppid == PID::ZBOSON || ppid == PID::GAMMA);
if(passed) break;
}
if(passed) initial.push_back(Particle(*p));
}
if(initial.size()!=2) {
vetoEvent;
}
// type of event
unsigned int itype=2;
if(initial[0].pdgId()==-initial[1].pdgId()) {
PdgId pid = abs(initial[0].pdgId());
if(pid>=1&&pid<=4)
itype=0;
else
itype=1;
}
assert(itype<_histo_xE.size());
// fill histograms
_sumW[itype]->fill(2.);
const Particles& chps = applyProjection<FinalState>(event, "CFS").particles();
foreach(const Particle& p, chps) {
double xE = 2.*p.E()/sqrtS();
_histo_xE[itype]->fill(xE);
}
}
/// Normalise histograms etc., after the run
void finalize() {
for(unsigned int ix=0;ix<_histo_xE.size();++ix) {
- if(_sumW[ix]>0.) scale(_histo_xE[ix],1./_sumW[ix]);
+ if(_sumW[ix]->val()>0.) scale(_histo_xE[ix],1./ *_sumW[ix]);
}
}
//@}
private:
vector<CounterPtr> _sumW;
/// @name Histograms
//@{
vector<Histo1DPtr> _histo_xE;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(OPAL_2004_I648738);
}
diff --git a/analyses/pluginLEP/OPAL_2004_S6132243.cc b/analyses/pluginLEP/OPAL_2004_S6132243.cc
--- a/analyses/pluginLEP/OPAL_2004_S6132243.cc
+++ b/analyses/pluginLEP/OPAL_2004_S6132243.cc
@@ -1,274 +1,274 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/Beam.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/Sphericity.hh"
#include "Rivet/Projections/Thrust.hh"
#include "Rivet/Projections/FastJets.hh"
#include "Rivet/Projections/ParisiTensor.hh"
#include "Rivet/Projections/Hemispheres.hh"
#include <cmath>
namespace Rivet {
/// @brief OPAL event shapes and moments at 91, 133, 177, and 197 GeV
/// @author Andy Buckley
class OPAL_2004_S6132243 : public Analysis {
public:
/// Constructor
OPAL_2004_S6132243()
: Analysis("OPAL_2004_S6132243"),
_isqrts(-1)
{
//
}
/// @name Analysis methods
//@{
/// Energies: 91, 133, 177 (161-183), 197 (189-209) => index 0..4
int getHistIndex(double sqrts) {
int ih = -1;
if (inRange(sqrts/GeV, 89.9, 91.5)) {
ih = 0;
} else if (fuzzyEquals(sqrts/GeV, 133)) {
ih = 1;
} else if (fuzzyEquals(sqrts/GeV, 177)) { // (161-183)
ih = 2;
} else if (fuzzyEquals(sqrts/GeV, 197)) { // (189-209)
ih = 3;
} else {
stringstream ss;
ss << "Invalid energy for OPAL_2004 analysis: "
<< sqrts/GeV << " GeV != 91, 133, 177, or 197 GeV";
throw Error(ss.str());
}
assert(ih >= 0);
return ih;
}
void init() {
// Projections
declare(Beam(), "Beams");
const FinalState fs;
declare(fs, "FS");
const ChargedFinalState cfs;
declare(cfs, "CFS");
declare(FastJets(fs, FastJets::DURHAM, 0.7), "DurhamJets");
declare(Sphericity(fs), "Sphericity");
declare(ParisiTensor(fs), "Parisi");
const Thrust thrust(fs);
declare(thrust, "Thrust");
declare(Hemispheres(thrust), "Hemispheres");
// Get beam energy index
_isqrts = getHistIndex(sqrtS());
// Book histograms
book(_hist1MinusT[_isqrts] ,1, 1, _isqrts+1);
book(_histHemiMassH[_isqrts] ,2, 1, _isqrts+1);
book(_histCParam[_isqrts] ,3, 1, _isqrts+1);
book(_histHemiBroadT[_isqrts] ,4, 1, _isqrts+1);
book(_histHemiBroadW[_isqrts] ,5, 1, _isqrts+1);
book(_histY23Durham[_isqrts] ,6, 1, _isqrts+1);
book(_histTMajor[_isqrts] ,7, 1, _isqrts+1);
book(_histTMinor[_isqrts] ,8, 1, _isqrts+1);
book(_histAplanarity[_isqrts] ,9, 1, _isqrts+1);
book(_histSphericity[_isqrts] ,10, 1, _isqrts+1);
book(_histOblateness[_isqrts] ,11, 1, _isqrts+1);
book(_histHemiMassL[_isqrts] ,12, 1, _isqrts+1);
book(_histHemiBroadN[_isqrts] ,13, 1, _isqrts+1);
book(_histDParam[_isqrts] ,14, 1, _isqrts+1);
//
book(_hist1MinusTMom[_isqrts] ,15, 1, _isqrts+1);
book(_histHemiMassHMom[_isqrts] ,16, 1, _isqrts+1);
book(_histCParamMom[_isqrts] ,17, 1, _isqrts+1);
book(_histHemiBroadTMom[_isqrts] ,18, 1, _isqrts+1);
book(_histHemiBroadWMom[_isqrts] ,19, 1, _isqrts+1);
book(_histY23DurhamMom[_isqrts] ,20, 1, _isqrts+1);
book(_histTMajorMom[_isqrts] ,21, 1, _isqrts+1);
book(_histTMinorMom[_isqrts] ,22, 1, _isqrts+1);
book(_histSphericityMom[_isqrts] ,23, 1, _isqrts+1);
book(_histOblatenessMom[_isqrts] ,24, 1, _isqrts+1);
book(_histHemiMassLMom[_isqrts] ,25, 1, _isqrts+1);
book(_histHemiBroadNMom[_isqrts] ,26, 1, _isqrts+1);
book(_sumWTrack2, "sumWTrack2");
book(_sumWJet3, "sumWJet3");
}
void analyze(const Event& event) {
// Even if we only generate hadronic events, we still need a cut on numCharged >= 2.
const FinalState& cfs = apply<FinalState>(event, "CFS");
if (cfs.size() < 2) vetoEvent;
_sumWTrack2->fill();
// Thrusts
const Thrust& thrust = apply<Thrust>(event, "Thrust");
_hist1MinusT[_isqrts]->fill(1-thrust.thrust());
_histTMajor[_isqrts]->fill(thrust.thrustMajor());
_histTMinor[_isqrts]->fill(thrust.thrustMinor());
_histOblateness[_isqrts]->fill(thrust.oblateness());
for (int n = 1; n <= 5; ++n) {
_hist1MinusTMom[_isqrts]->fill(n, pow(1-thrust.thrust(), n));
_histTMajorMom[_isqrts]->fill(n, pow(thrust.thrustMajor(), n));
_histTMinorMom[_isqrts]->fill(n, pow(thrust.thrustMinor(), n));
_histOblatenessMom[_isqrts]->fill(n, pow(thrust.oblateness(), n));
}
// Jets
const FastJets& durjet = apply<FastJets>(event, "DurhamJets");
if (durjet.clusterSeq()) {
_sumWJet3->fill();
const double y23 = durjet.clusterSeq()->exclusive_ymerge_max(2);
if (y23>0.0) {
_histY23Durham[_isqrts]->fill(y23);
for (int n = 1; n <= 5; ++n) {
_histY23DurhamMom[_isqrts]->fill(n, pow(y23, n));
}
}
}
// Sphericities
const Sphericity& sphericity = apply<Sphericity>(event, "Sphericity");
const double sph = sphericity.sphericity();
const double apl = sphericity.aplanarity();
_histSphericity[_isqrts]->fill(sph);
_histAplanarity[_isqrts]->fill(apl);
for (int n = 1; n <= 5; ++n) {
_histSphericityMom[_isqrts]->fill(n, pow(sph, n));
}
// C & D params
const ParisiTensor& parisi = apply<ParisiTensor>(event, "Parisi");
const double cparam = parisi.C();
const double dparam = parisi.D();
_histCParam[_isqrts]->fill(cparam);
_histDParam[_isqrts]->fill(dparam);
for (int n = 1; n <= 5; ++n) {
_histCParamMom[_isqrts]->fill(n, pow(cparam, n));
}
// Hemispheres
const Hemispheres& hemi = apply<Hemispheres>(event, "Hemispheres");
// The paper says that M_H/L are scaled by sqrt(s), but scaling by E_vis is the way that fits the data...
const double hemi_mh = hemi.scaledMhigh();
const double hemi_ml = hemi.scaledMlow();
/// @todo This shouldn't be necessary... what's going on? Memory corruption suspected :(
// if (std::isnan(hemi_ml)) {
// MSG_ERROR("NaN in HemiL! Event = " << numEvents());
// MSG_ERROR(hemi.M2low() << ", " << hemi.E2vis());
// }
if (!std::isnan(hemi_mh) && !std::isnan(hemi_ml)) {
const double hemi_bmax = hemi.Bmax();
const double hemi_bmin = hemi.Bmin();
const double hemi_bsum = hemi.Bsum();
_histHemiMassH[_isqrts]->fill(hemi_mh);
_histHemiMassL[_isqrts]->fill(hemi_ml);
_histHemiBroadW[_isqrts]->fill(hemi_bmax);
_histHemiBroadN[_isqrts]->fill(hemi_bmin);
_histHemiBroadT[_isqrts]->fill(hemi_bsum);
for (int n = 1; n <= 5; ++n) {
// if (std::isnan(pow(hemi_ml, n))) MSG_ERROR("NaN in HemiL moment! Event = " << numEvents());
_histHemiMassHMom[_isqrts]->fill(n, pow(hemi_mh, n));
_histHemiMassLMom[_isqrts]->fill(n, pow(hemi_ml, n));
_histHemiBroadWMom[_isqrts]->fill(n, pow(hemi_bmax, n));
_histHemiBroadNMom[_isqrts]->fill(n, pow(hemi_bmin, n));
_histHemiBroadTMom[_isqrts]->fill(n, pow(hemi_bsum, n));
}
}
}
void finalize() {
- scale(_hist1MinusT[_isqrts], 1.0/_sumWTrack2);
- scale(_histTMajor[_isqrts], 1.0/_sumWTrack2);
- scale(_histTMinor[_isqrts], 1.0/_sumWTrack2);
- scale(_histOblateness[_isqrts], 1.0/_sumWTrack2);
- scale(_histSphericity[_isqrts], 1.0/_sumWTrack2);
- scale(_histAplanarity[_isqrts], 1.0/_sumWTrack2);
- scale(_histHemiMassH[_isqrts], 1.0/_sumWTrack2);
- scale(_histHemiMassL[_isqrts], 1.0/_sumWTrack2);
- scale(_histHemiBroadW[_isqrts], 1.0/_sumWTrack2);
- scale(_histHemiBroadN[_isqrts], 1.0/_sumWTrack2);
- scale(_histHemiBroadT[_isqrts], 1.0/_sumWTrack2);
- scale(_histCParam[_isqrts], 1.0/_sumWTrack2);
- scale(_histDParam[_isqrts], 1.0/_sumWTrack2);
- scale(_histY23Durham[_isqrts], 1.0/_sumWJet3);
+ scale(_hist1MinusT[_isqrts], 1.0 / *_sumWTrack2);
+ scale(_histTMajor[_isqrts], 1.0 / *_sumWTrack2);
+ scale(_histTMinor[_isqrts], 1.0 / *_sumWTrack2);
+ scale(_histOblateness[_isqrts], 1.0 / *_sumWTrack2);
+ scale(_histSphericity[_isqrts], 1.0 / *_sumWTrack2);
+ scale(_histAplanarity[_isqrts], 1.0 / *_sumWTrack2);
+ scale(_histHemiMassH[_isqrts], 1.0 / *_sumWTrack2);
+ scale(_histHemiMassL[_isqrts], 1.0 / *_sumWTrack2);
+ scale(_histHemiBroadW[_isqrts], 1.0 / *_sumWTrack2);
+ scale(_histHemiBroadN[_isqrts], 1.0 / *_sumWTrack2);
+ scale(_histHemiBroadT[_isqrts], 1.0 / *_sumWTrack2);
+ scale(_histCParam[_isqrts], 1.0 / *_sumWTrack2);
+ scale(_histDParam[_isqrts], 1.0 / *_sumWTrack2);
+ scale(_histY23Durham[_isqrts], 1.0 / *_sumWJet3);
//
- scale(_hist1MinusTMom[_isqrts], 1.0/_sumWTrack2);
- scale(_histTMajorMom[_isqrts], 1.0/_sumWTrack2);
- scale(_histTMinorMom[_isqrts], 1.0/_sumWTrack2);
- scale(_histOblatenessMom[_isqrts], 1.0/_sumWTrack2);
- scale(_histSphericityMom[_isqrts], 1.0/_sumWTrack2);
- scale(_histHemiMassHMom[_isqrts], 1.0/_sumWTrack2);
- scale(_histHemiMassLMom[_isqrts], 1.0/_sumWTrack2);
- scale(_histHemiBroadWMom[_isqrts], 1.0/_sumWTrack2);
- scale(_histHemiBroadNMom[_isqrts], 1.0/_sumWTrack2);
- scale(_histHemiBroadTMom[_isqrts], 1.0/_sumWTrack2);
- scale(_histCParamMom[_isqrts], 1.0/_sumWTrack2);
- scale(_histY23DurhamMom[_isqrts], 1.0/_sumWJet3);
+ scale(_hist1MinusTMom[_isqrts], 1.0 / *_sumWTrack2);
+ scale(_histTMajorMom[_isqrts], 1.0 / *_sumWTrack2);
+ scale(_histTMinorMom[_isqrts], 1.0 / *_sumWTrack2);
+ scale(_histOblatenessMom[_isqrts], 1.0 / *_sumWTrack2);
+ scale(_histSphericityMom[_isqrts], 1.0 / *_sumWTrack2);
+ scale(_histHemiMassHMom[_isqrts], 1.0 / *_sumWTrack2);
+ scale(_histHemiMassLMom[_isqrts], 1.0 / *_sumWTrack2);
+ scale(_histHemiBroadWMom[_isqrts], 1.0 / *_sumWTrack2);
+ scale(_histHemiBroadNMom[_isqrts], 1.0 / *_sumWTrack2);
+ scale(_histHemiBroadTMom[_isqrts], 1.0 / *_sumWTrack2);
+ scale(_histCParamMom[_isqrts], 1.0 / *_sumWTrack2);
+ scale(_histY23DurhamMom[_isqrts], 1.0 / *_sumWJet3);
}
//@}
private:
/// Beam energy index for histograms
int _isqrts;
/// @name Counters of event weights passing the cuts
//@{
CounterPtr _sumWTrack2, _sumWJet3;
//@}
/// @name Event shape histos at 4 energies
//@{
Histo1DPtr _hist1MinusT[4];
Histo1DPtr _histHemiMassH[4];
Histo1DPtr _histCParam[4];
Histo1DPtr _histHemiBroadT[4];
Histo1DPtr _histHemiBroadW[4];
Histo1DPtr _histY23Durham[4];
Histo1DPtr _histTMajor[4];
Histo1DPtr _histTMinor[4];
Histo1DPtr _histAplanarity[4];
Histo1DPtr _histSphericity[4];
Histo1DPtr _histOblateness[4];
Histo1DPtr _histHemiMassL[4];
Histo1DPtr _histHemiBroadN[4];
Histo1DPtr _histDParam[4];
//@}
/// @name Event shape moment histos at 4 energies
//@{
Histo1DPtr _hist1MinusTMom[4];
Histo1DPtr _histHemiMassHMom[4];
Histo1DPtr _histCParamMom[4];
Histo1DPtr _histHemiBroadTMom[4];
Histo1DPtr _histHemiBroadWMom[4];
Histo1DPtr _histY23DurhamMom[4];
Histo1DPtr _histTMajorMom[4];
Histo1DPtr _histTMinorMom[4];
Histo1DPtr _histSphericityMom[4];
Histo1DPtr _histOblatenessMom[4];
Histo1DPtr _histHemiMassLMom[4];
Histo1DPtr _histHemiBroadNMom[4];
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(OPAL_2004_S6132243);
}
diff --git a/analyses/pluginLEP/SLD_1996_S3398250.cc b/analyses/pluginLEP/SLD_1996_S3398250.cc
--- a/analyses/pluginLEP/SLD_1996_S3398250.cc
+++ b/analyses/pluginLEP/SLD_1996_S3398250.cc
@@ -1,139 +1,139 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/Beam.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/Sphericity.hh"
#include "Rivet/Projections/Thrust.hh"
#include "Rivet/Projections/FastJets.hh"
#include "Rivet/Projections/ParisiTensor.hh"
#include "Rivet/Projections/Hemispheres.hh"
#include "Rivet/Projections/InitialQuarks.hh"
#include <cmath>
namespace Rivet {
/// @brief SLD multiplicities at mZ
/// @author Peter Richardson
class SLD_1996_S3398250 : public Analysis {
public:
/// Constructor
SLD_1996_S3398250()
: Analysis("SLD_1996_S3398250")
{}
/// @name Analysis methods
//@{
void init() {
// Projections
declare(Beam(), "Beams");
declare(ChargedFinalState(), "CFS");
declare(InitialQuarks(), "IQF");
book(_h_bottom ,1, 1, 1);
book(_h_charm ,2, 1, 1);
book(_h_light ,3, 1, 1);
book(_weightLight, "weightLight");
book(_weightCharm, "weightCharm");
book(_weightBottom, "weightBottom");
book(scatter_c, 4,1,1);
book(scatter_b, 5,1,1);
}
void analyze(const Event& event) {
// Even if we only generate hadronic events, we still need a cut on numCharged >= 2.
const FinalState& cfs = apply<FinalState>(event, "CFS");
if (cfs.size() < 2) vetoEvent;
int flavour = 0;
const InitialQuarks& iqf = apply<InitialQuarks>(event, "IQF");
// If we only have two quarks (qqbar), just take the flavour.
// If we have more than two quarks, look for the highest energetic q-qbar pair.
if (iqf.particles().size() == 2) {
flavour = iqf.particles().front().abspid();
}
else {
map<int, double> quarkmap;
foreach (const Particle& p, iqf.particles()) {
if (quarkmap[p.pid()] < p.E()) {
quarkmap[p.pid()] = p.E();
}
}
double maxenergy = 0.;
for (int i = 1; i <= 5; ++i) {
if (quarkmap[i]+quarkmap[-i] > maxenergy) {
flavour = i;
}
}
}
const size_t numParticles = cfs.particles().size();
switch (flavour) {
case 1: case 2: case 3:
_weightLight ->fill();
_h_light->fillBin(0, numParticles);
break;
case 4:
_weightCharm ->fill();
_h_charm->fillBin(0, numParticles);
break;
case 5:
_weightBottom->fill();
_h_bottom->fillBin(0, numParticles);
break;
}
}
void multiplicity_subtract(const Histo1DPtr first, const Histo1DPtr second, Scatter2DPtr & scatter) {
const double x = first->bin(0).xMid();
const double ex = first->bin(0).xWidth()/2.;
const double y = first->bin(0).area() - second->bin(0).area();
const double ey = sqrt(sqr(first->bin(0).areaErr()) + sqr(second->bin(0).areaErr()));
scatter->addPoint(x, y, ex, ey);
}
void finalize() {
- if (_weightBottom != 0) scale(_h_bottom, 1./_weightBottom);
- if (_weightCharm != 0) scale(_h_charm, 1./_weightCharm );
- if (_weightLight != 0) scale(_h_light, 1./_weightLight );
+ if (_weightBottom->val() != 0) scale(_h_bottom, 1./ *_weightBottom);
+ if (_weightCharm->val() != 0) scale(_h_charm, 1./ *_weightCharm );
+ if (_weightLight->val() != 0) scale(_h_light, 1./ *_weightLight );
multiplicity_subtract(_h_charm, _h_light, scatter_c);
multiplicity_subtract(_h_bottom, _h_light, scatter_b);
}
//@}
private:
Scatter2DPtr scatter_c, scatter_b;
/// @name Weights
//@{
CounterPtr _weightLight;
CounterPtr _weightCharm;
CounterPtr _weightBottom;
//@}
Histo1DPtr _h_bottom;
Histo1DPtr _h_charm;
Histo1DPtr _h_light;
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(SLD_1996_S3398250);
}
diff --git a/analyses/pluginLEP/SLD_1999_S3743934.cc b/analyses/pluginLEP/SLD_1999_S3743934.cc
--- a/analyses/pluginLEP/SLD_1999_S3743934.cc
+++ b/analyses/pluginLEP/SLD_1999_S3743934.cc
@@ -1,736 +1,736 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/Beam.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/UnstableFinalState.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/InitialQuarks.hh"
#include "Rivet/Projections/Thrust.hh"
namespace Rivet {
/// @brief SLD flavour-dependent fragmentation paper
/// @author Peter Richardson
class SLD_1999_S3743934 : public Analysis {
public:
/// Constructor
SLD_1999_S3743934()
: Analysis("SLD_1999_S3743934"),
_multPiPlus(4),_multKPlus(4),_multK0(4),
_multKStar0(4),_multPhi(4),
_multProton(4),_multLambda(4)
{ }
/// @name Analysis methods
//@{
void analyze(const Event& e) {
// First, veto on leptonic events by requiring at least 4 charged FS particles
const FinalState& fs = apply<FinalState>(e, "FS");
const size_t numParticles = fs.particles().size();
// Even if we only generate hadronic events, we still need a cut on numCharged >= 2.
if (numParticles < 2) {
MSG_DEBUG("Failed ncharged cut");
vetoEvent;
}
MSG_DEBUG("Passed ncharged cut");
// Get beams and average beam momentum
const ParticlePair& beams = apply<Beam>(e, "Beams").beams();
const double meanBeamMom = ( beams.first.p3().mod() +
beams.second.p3().mod() ) / 2.0;
MSG_DEBUG("Avg beam momentum = " << meanBeamMom);
int flavour = 0;
const InitialQuarks& iqf = apply<InitialQuarks>(e, "IQF");
// If we only have two quarks (qqbar), just take the flavour.
// If we have more than two quarks, look for the highest energetic q-qbar pair.
/// @todo Can we make this based on hadron flavour instead?
Particles quarks;
if (iqf.particles().size() == 2) {
flavour = iqf.particles().front().abspid();
quarks = iqf.particles();
} else {
map<int, Particle > quarkmap;
foreach (const Particle& p, iqf.particles()) {
if (quarkmap.find(p.pid()) == quarkmap.end()) quarkmap[p.pid()] = p;
else if (quarkmap[p.pid()].E() < p.E()) quarkmap[p.pid()] = p;
}
double maxenergy = 0.;
for (int i = 1; i <= 5; ++i) {
double energy(0.);
if (quarkmap.find( i) != quarkmap.end())
energy += quarkmap[ i].E();
if (quarkmap.find(-i) != quarkmap.end())
energy += quarkmap[-i].E();
if (energy > maxenergy)
flavour = i;
}
if (quarkmap.find(flavour) != quarkmap.end())
quarks.push_back(quarkmap[flavour]);
if (quarkmap.find(-flavour) != quarkmap.end())
quarks.push_back(quarkmap[-flavour]);
}
switch (flavour) {
case PID::DQUARK:
case PID::UQUARK:
case PID::SQUARK:
_SumOfudsWeights->fill();
break;
case PID::CQUARK:
_SumOfcWeights->fill();
break;
case PID::BQUARK:
_SumOfbWeights->fill();
break;
}
// thrust axis for projections
Vector3 axis = apply<Thrust>(e, "Thrust").thrustAxis();
double dot(0.);
if (!quarks.empty()) {
dot = quarks[0].p3().dot(axis);
if (quarks[0].pid() < 0) dot *= -1;
}
foreach (const Particle& p, fs.particles()) {
const double xp = p.p3().mod()/meanBeamMom;
// if in quark or antiquark hemisphere
bool quark = p.p3().dot(axis)*dot > 0.;
_h_XpChargedN->fill(xp);
_temp_XpChargedN1->fill(xp);
_temp_XpChargedN2->fill(xp);
_temp_XpChargedN3->fill(xp);
int id = p.abspid();
// charged pions
if (id == PID::PIPLUS) {
_h_XpPiPlusN->fill(xp);
_multPiPlus[0]->fill();
switch (flavour) {
case PID::DQUARK:
case PID::UQUARK:
case PID::SQUARK:
_multPiPlus[1]->fill();
_h_XpPiPlusLight->fill(xp);
if( ( quark && p.pid()>0 ) || ( !quark && p.pid()<0 ))
_h_RPiPlus->fill(xp);
else
_h_RPiMinus->fill(xp);
break;
case PID::CQUARK:
_multPiPlus[2]->fill();
_h_XpPiPlusCharm->fill(xp);
break;
case PID::BQUARK:
_multPiPlus[3]->fill();
_h_XpPiPlusBottom->fill(xp);
break;
}
}
else if (id == PID::KPLUS) {
_h_XpKPlusN->fill(xp);
_multKPlus[0]->fill();
switch (flavour) {
case PID::DQUARK:
case PID::UQUARK:
case PID::SQUARK:
_multKPlus[1]->fill();
_temp_XpKPlusLight->fill(xp);
_h_XpKPlusLight->fill(xp);
if( ( quark && p.pid()>0 ) || ( !quark && p.pid()<0 ))
_h_RKPlus->fill(xp);
else
_h_RKMinus->fill(xp);
break;
break;
case PID::CQUARK:
_multKPlus[2]->fill();
_h_XpKPlusCharm->fill(xp);
_temp_XpKPlusCharm->fill(xp);
break;
case PID::BQUARK:
_multKPlus[3]->fill();
_h_XpKPlusBottom->fill(xp);
break;
}
}
else if (id == PID::PROTON) {
_h_XpProtonN->fill(xp);
_multProton[0]->fill();
switch (flavour) {
case PID::DQUARK:
case PID::UQUARK:
case PID::SQUARK:
_multProton[1]->fill();
_temp_XpProtonLight->fill(xp);
_h_XpProtonLight->fill(xp);
if( ( quark && p.pid()>0 ) || ( !quark && p.pid()<0 ))
_h_RProton->fill(xp);
else
_h_RPBar ->fill(xp);
break;
break;
case PID::CQUARK:
_multProton[2]->fill();
_temp_XpProtonCharm->fill(xp);
_h_XpProtonCharm->fill(xp);
break;
case PID::BQUARK:
_multProton[3]->fill();
_h_XpProtonBottom->fill(xp);
break;
}
}
}
const UnstableFinalState& ufs = apply<UnstableFinalState>(e, "UFS");
foreach (const Particle& p, ufs.particles()) {
const double xp = p.p3().mod()/meanBeamMom;
// if in quark or antiquark hemisphere
bool quark = p.p3().dot(axis)*dot>0.;
int id = p.abspid();
if (id == PID::LAMBDA) {
_multLambda[0]->fill();
_h_XpLambdaN->fill(xp);
switch (flavour) {
case PID::DQUARK:
case PID::UQUARK:
case PID::SQUARK:
_multLambda[1]->fill();
_h_XpLambdaLight->fill(xp);
if( ( quark && p.pid()>0 ) || ( !quark && p.pid()<0 ))
_h_RLambda->fill(xp);
else
_h_RLBar ->fill(xp);
break;
case PID::CQUARK:
_multLambda[2]->fill();
_h_XpLambdaCharm->fill(xp);
break;
case PID::BQUARK:
_multLambda[3]->fill();
_h_XpLambdaBottom->fill(xp);
break;
}
}
else if (id == 313) {
_multKStar0[0]->fill();
_h_XpKStar0N->fill(xp);
switch (flavour) {
case PID::DQUARK:
case PID::UQUARK:
case PID::SQUARK:
_multKStar0[1]->fill();
_temp_XpKStar0Light->fill(xp);
_h_XpKStar0Light->fill(xp);
if ( ( quark && p.pid()>0 ) || ( !quark && p.pid()<0 ))
_h_RKS0 ->fill(xp);
else
_h_RKSBar0->fill(xp);
break;
break;
case PID::CQUARK:
_multKStar0[2]->fill();
_temp_XpKStar0Charm->fill(xp);
_h_XpKStar0Charm->fill(xp);
break;
case PID::BQUARK:
_multKStar0[3]->fill();
_h_XpKStar0Bottom->fill(xp);
break;
}
}
else if (id == 333) {
_multPhi[0]->fill();
_h_XpPhiN->fill(xp);
switch (flavour) {
case PID::DQUARK:
case PID::UQUARK:
case PID::SQUARK:
_multPhi[1]->fill();
_h_XpPhiLight->fill(xp);
break;
case PID::CQUARK:
_multPhi[2]->fill();
_h_XpPhiCharm->fill(xp);
break;
case PID::BQUARK:
_multPhi[3]->fill();
_h_XpPhiBottom->fill(xp);
break;
}
}
else if (id == PID::K0S || id == PID::K0L) {
_multK0[0]->fill();
_h_XpK0N->fill(xp);
switch (flavour) {
case PID::DQUARK:
case PID::UQUARK:
case PID::SQUARK:
_multK0[1]->fill();
_h_XpK0Light->fill(xp);
break;
case PID::CQUARK:
_multK0[2]->fill();
_h_XpK0Charm->fill(xp);
break;
case PID::BQUARK:
_multK0[3]->fill();
_h_XpK0Bottom->fill(xp);
break;
}
}
}
}
void init() {
// Projections
declare(Beam(), "Beams");
declare(ChargedFinalState(), "FS");
declare(UnstableFinalState(), "UFS");
declare(InitialQuarks(), "IQF");
declare(Thrust(FinalState()), "Thrust");
book(_temp_XpChargedN1 ,"TMP/XpChargedN1", refData( 1, 1, 1));
book(_temp_XpChargedN2 ,"TMP/XpChargedN2", refData( 2, 1, 1));
book(_temp_XpChargedN3 ,"TMP/XpChargedN3", refData( 3, 1, 1));
book(_h_XpPiPlusN , 1, 1, 2);
book(_h_XpKPlusN , 2, 1, 2);
book(_h_XpProtonN , 3, 1, 2);
book(_h_XpChargedN , 4, 1, 1);
book(_h_XpK0N , 5, 1, 1);
book(_h_XpLambdaN , 7, 1, 1);
book(_h_XpKStar0N , 8, 1, 1);
book(_h_XpPhiN , 9, 1, 1);
book(_h_XpPiPlusLight ,10, 1, 1);
book(_h_XpPiPlusCharm ,10, 1, 2);
book(_h_XpPiPlusBottom ,10, 1, 3);
book(_h_XpKPlusLight ,12, 1, 1);
book(_h_XpKPlusCharm ,12, 1, 2);
book(_h_XpKPlusBottom ,12, 1, 3);
book(_h_XpKStar0Light ,14, 1, 1);
book(_h_XpKStar0Charm ,14, 1, 2);
book(_h_XpKStar0Bottom ,14, 1, 3);
book(_h_XpProtonLight ,16, 1, 1);
book(_h_XpProtonCharm ,16, 1, 2);
book(_h_XpProtonBottom ,16, 1, 3);
book(_h_XpLambdaLight ,18, 1, 1);
book(_h_XpLambdaCharm ,18, 1, 2);
book(_h_XpLambdaBottom ,18, 1, 3);
book(_h_XpK0Light ,20, 1, 1);
book(_h_XpK0Charm ,20, 1, 2);
book(_h_XpK0Bottom ,20, 1, 3);
book(_h_XpPhiLight ,22, 1, 1);
book(_h_XpPhiCharm ,22, 1, 2);
book(_h_XpPhiBottom ,22, 1, 3);
book(_temp_XpKPlusCharm ,"TMP/XpKPlusCharm", refData(13, 1, 1));
book(_temp_XpKPlusLight ,"TMP/XpKPlusLight", refData(13, 1, 1));
book(_temp_XpKStar0Charm ,"TMP/XpKStar0Charm", refData(15, 1, 1));
book(_temp_XpKStar0Light ,"TMP/XpKStar0Light", refData(15, 1, 1));
book(_temp_XpProtonCharm ,"TMP/XpProtonCharm", refData(17, 1, 1));
book(_temp_XpProtonLight ,"TMP/XpProtonLight", refData(17, 1, 1));
book(_h_RPiPlus , 26, 1, 1);
book(_h_RPiMinus , 26, 1, 2);
book(_h_RKS0 , 28, 1, 1);
book(_h_RKSBar0 , 28, 1, 2);
book(_h_RKPlus , 30, 1, 1);
book(_h_RKMinus , 30, 1, 2);
book(_h_RProton , 32, 1, 1);
book(_h_RPBar , 32, 1, 2);
book(_h_RLambda , 34, 1, 1);
book(_h_RLBar , 34, 1, 2);
book(_s_Xp_PiPl_Ch , 1, 1, 1);
book(_s_Xp_KPl_Ch , 2, 1, 1);
book(_s_Xp_Pr_Ch , 3, 1, 1);
book(_s_Xp_PiPlCh_PiPlLi, 11, 1, 1);
book(_s_Xp_PiPlBo_PiPlLi, 11, 1, 2);
book(_s_Xp_KPlCh_KPlLi , 13, 1, 1);
book(_s_Xp_KPlBo_KPlLi , 13, 1, 2);
book(_s_Xp_KS0Ch_KS0Li , 15, 1, 1);
book(_s_Xp_KS0Bo_KS0Li , 15, 1, 2);
book(_s_Xp_PrCh_PrLi , 17, 1, 1);
book(_s_Xp_PrBo_PrLi , 17, 1, 2);
book(_s_Xp_LaCh_LaLi , 19, 1, 1);
book(_s_Xp_LaBo_LaLi , 19, 1, 2);
book(_s_Xp_K0Ch_K0Li , 21, 1, 1);
book(_s_Xp_K0Bo_K0Li , 21, 1, 2);
book(_s_Xp_PhiCh_PhiLi , 23, 1, 1);
book(_s_Xp_PhiBo_PhiLi , 23, 1, 2);
book(_s_PiM_PiP , 27, 1, 1);
book(_s_KSBar0_KS0, 29, 1, 1);
book(_s_KM_KP , 31, 1, 1);
book(_s_Pr_PBar , 33, 1, 1);
book(_s_Lam_LBar , 35, 1, 1);
book(_SumOfudsWeights, "SumOfudsWeights");
book(_SumOfcWeights, "SumOfcWeights");
book(_SumOfbWeights, "SumOfbWeights");
for ( size_t i=0; i<4; ++i) {
book(_multPiPlus[i], "multPiPlus_"+to_str(i));
book(_multKPlus[i], "multKPlus_"+to_str(i));
book(_multK0[i], "multK0_"+to_str(i));
book(_multKStar0[i], "multKStar0_"+to_str(i));
book(_multPhi[i], "multPhi_"+to_str(i));
book(_multProton[i], "multProton_"+to_str(i));
book(_multLambda[i], "multLambda_"+to_str(i));
}
book(tmp1, 24, 1, 1, true);
book(tmp2, 24, 1, 2, true);
book(tmp3, 24, 1, 3, true);
book(tmp4, 24, 1, 4, true);
book(tmp5, 25, 1, 1, true);
book(tmp6, 25, 1, 2, true);
book(tmp7, 24, 2, 1, true);
book(tmp8, 24, 2, 2, true);
book(tmp9, 24, 2, 3, true);
book(tmp10, 24, 2, 4, true);
book(tmp11, 25, 2, 1, true);
book(tmp12, 25, 2, 2, true);
book(tmp13, 24, 3, 1, true);
book(tmp14, 24, 3, 2, true);
book(tmp15, 24, 3, 3, true);
book(tmp16, 24, 3, 4, true);
book(tmp17, 25, 3, 1, true);
book(tmp18, 25, 3, 2, true);
book(tmp19, 24, 4, 1, true);
book(tmp20, 24, 4, 2, true);
book(tmp21, 24, 4, 3, true);
book(tmp22, 24, 4, 4, true);
book(tmp23, 25, 4, 1, true);
book(tmp24, 25, 4, 2, true);
book(tmp25, 24, 5, 1, true);
book(tmp26, 24, 5, 2, true);
book(tmp27, 24, 5, 3, true);
book(tmp28, 24, 5, 4, true);
book(tmp29, 25, 5, 1, true);
book(tmp30, 25, 5, 2, true);
book(tmp31, 24, 6, 1, true);
book(tmp32, 24, 6, 2, true);
book(tmp33, 24, 6, 3, true);
book(tmp34, 24, 6, 4, true);
book(tmp35, 25, 6, 1, true);
book(tmp36, 25, 6, 2, true);
book(tmp37, 24, 7, 1, true);
book(tmp38, 24, 7, 2, true);
book(tmp39, 24, 7, 3, true);
book(tmp40, 24, 7, 4, true);
book(tmp41, 25, 7, 1, true);
book(tmp42, 25, 7, 2, true);
}
/// Finalize
void finalize() {
// Get the ratio plots sorted out first
divide(_h_XpPiPlusN, _temp_XpChargedN1, _s_Xp_PiPl_Ch);
divide(_h_XpKPlusN, _temp_XpChargedN2, _s_Xp_KPl_Ch);
divide(_h_XpProtonN, _temp_XpChargedN3, _s_Xp_Pr_Ch);
divide(_h_XpPiPlusCharm, _h_XpPiPlusLight, _s_Xp_PiPlCh_PiPlLi);
- _s_Xp_PiPlCh_PiPlLi->scale(1.,_SumOfudsWeights/_SumOfcWeights);
+ _s_Xp_PiPlCh_PiPlLi->scale(1., dbl(*_SumOfudsWeights / *_SumOfcWeights));
divide(_h_XpPiPlusBottom, _h_XpPiPlusLight, _s_Xp_PiPlBo_PiPlLi);
- _s_Xp_PiPlBo_PiPlLi->scale(1.,_SumOfudsWeights/_SumOfbWeights);
+ _s_Xp_PiPlBo_PiPlLi->scale(1., dbl(*_SumOfudsWeights / *_SumOfbWeights));
divide(_temp_XpKPlusCharm , _temp_XpKPlusLight, _s_Xp_KPlCh_KPlLi);
- _s_Xp_KPlCh_KPlLi->scale(1.,_SumOfudsWeights/_SumOfcWeights);
+ _s_Xp_KPlCh_KPlLi->scale(1., dbl(*_SumOfudsWeights / *_SumOfcWeights));
divide(_h_XpKPlusBottom, _h_XpKPlusLight, _s_Xp_KPlBo_KPlLi);
- _s_Xp_KPlBo_KPlLi->scale(1.,_SumOfudsWeights/_SumOfbWeights);
+ _s_Xp_KPlBo_KPlLi->scale(1., dbl(*_SumOfudsWeights / *_SumOfbWeights));
divide(_temp_XpKStar0Charm, _temp_XpKStar0Light, _s_Xp_KS0Ch_KS0Li);
- _s_Xp_KS0Ch_KS0Li->scale(1.,_SumOfudsWeights/_SumOfcWeights);
+ _s_Xp_KS0Ch_KS0Li->scale(1., dbl(*_SumOfudsWeights / *_SumOfcWeights));
divide(_h_XpKStar0Bottom, _h_XpKStar0Light, _s_Xp_KS0Bo_KS0Li);
- _s_Xp_KS0Bo_KS0Li->scale(1.,_SumOfudsWeights/_SumOfbWeights);
+ _s_Xp_KS0Bo_KS0Li->scale(1., dbl(*_SumOfudsWeights / *_SumOfbWeights));
divide(_temp_XpProtonCharm, _temp_XpProtonLight, _s_Xp_PrCh_PrLi);
- _s_Xp_PrCh_PrLi->scale(1.,_SumOfudsWeights/_SumOfcWeights);
+ _s_Xp_PrCh_PrLi->scale(1., dbl(*_SumOfudsWeights / *_SumOfcWeights));
divide(_h_XpProtonBottom, _h_XpProtonLight, _s_Xp_PrBo_PrLi);
- _s_Xp_PrBo_PrLi->scale(1.,_SumOfudsWeights/_SumOfbWeights);
+ _s_Xp_PrBo_PrLi->scale(1., dbl(*_SumOfudsWeights / *_SumOfbWeights));
divide(_h_XpLambdaCharm, _h_XpLambdaLight, _s_Xp_LaCh_LaLi);
- _s_Xp_LaCh_LaLi->scale(1.,_SumOfudsWeights/_SumOfcWeights);
+ _s_Xp_LaCh_LaLi->scale(1., dbl(*_SumOfudsWeights / *_SumOfcWeights));
divide(_h_XpLambdaBottom, _h_XpLambdaLight, _s_Xp_LaBo_LaLi);
- _s_Xp_LaBo_LaLi->scale(1.,_SumOfudsWeights/_SumOfbWeights);
+ _s_Xp_LaBo_LaLi->scale(1., dbl(*_SumOfudsWeights / *_SumOfbWeights));
divide(_h_XpK0Charm, _h_XpK0Light, _s_Xp_K0Ch_K0Li);
- _s_Xp_K0Ch_K0Li->scale(1.,_SumOfudsWeights/_SumOfcWeights);
+ _s_Xp_K0Ch_K0Li->scale(1., dbl(*_SumOfudsWeights / *_SumOfcWeights));
divide(_h_XpK0Bottom, _h_XpK0Light, _s_Xp_K0Bo_K0Li);
- _s_Xp_K0Bo_K0Li->scale(1.,_SumOfudsWeights/_SumOfbWeights);
+ _s_Xp_K0Bo_K0Li->scale(1., dbl(*_SumOfudsWeights / *_SumOfbWeights));
divide(_h_XpPhiCharm, _h_XpPhiLight, _s_Xp_PhiCh_PhiLi);
- _s_Xp_PhiCh_PhiLi->scale(1.,_SumOfudsWeights/_SumOfcWeights);
+ _s_Xp_PhiCh_PhiLi->scale(1., dbl(*_SumOfudsWeights / *_SumOfcWeights));
divide(_h_XpPhiBottom, _h_XpPhiLight, _s_Xp_PhiBo_PhiLi);
- _s_Xp_PhiBo_PhiLi->scale(1.,_SumOfudsWeights/_SumOfbWeights);
+ _s_Xp_PhiBo_PhiLi->scale(1., dbl(*_SumOfudsWeights / *_SumOfbWeights));
// Then the leading particles
divide(*_h_RPiMinus - *_h_RPiPlus, *_h_RPiMinus + *_h_RPiPlus, _s_PiM_PiP);
divide(*_h_RKSBar0 - *_h_RKS0, *_h_RKSBar0 + *_h_RKS0, _s_KSBar0_KS0);
divide(*_h_RKMinus - *_h_RKPlus, *_h_RKMinus + *_h_RKPlus, _s_KM_KP);
divide(*_h_RProton - *_h_RPBar, *_h_RProton + *_h_RPBar, _s_Pr_PBar);
divide(*_h_RLambda - *_h_RLBar, *_h_RLambda + *_h_RLBar, _s_Lam_LBar);
// Then the rest
scale(_h_XpPiPlusN, 1/sumOfWeights());
scale(_h_XpKPlusN, 1/sumOfWeights());
scale(_h_XpProtonN, 1/sumOfWeights());
scale(_h_XpChargedN, 1/sumOfWeights());
scale(_h_XpK0N, 1/sumOfWeights());
scale(_h_XpLambdaN, 1/sumOfWeights());
scale(_h_XpKStar0N, 1/sumOfWeights());
scale(_h_XpPhiN, 1/sumOfWeights());
- scale(_h_XpPiPlusLight, 1/_SumOfudsWeights);
- scale(_h_XpPiPlusCharm, 1/_SumOfcWeights);
- scale(_h_XpPiPlusBottom, 1/_SumOfbWeights);
- scale(_h_XpKPlusLight, 1/_SumOfudsWeights);
- scale(_h_XpKPlusCharm, 1/_SumOfcWeights);
- scale(_h_XpKPlusBottom, 1/_SumOfbWeights);
- scale(_h_XpKStar0Light, 1/_SumOfudsWeights);
- scale(_h_XpKStar0Charm, 1/_SumOfcWeights);
- scale(_h_XpKStar0Bottom, 1/_SumOfbWeights);
- scale(_h_XpProtonLight, 1/_SumOfudsWeights);
- scale(_h_XpProtonCharm, 1/_SumOfcWeights);
- scale(_h_XpProtonBottom, 1/_SumOfbWeights);
- scale(_h_XpLambdaLight, 1/_SumOfudsWeights);
- scale(_h_XpLambdaCharm, 1/_SumOfcWeights);
- scale(_h_XpLambdaBottom, 1/_SumOfbWeights);
- scale(_h_XpK0Light, 1/_SumOfudsWeights);
- scale(_h_XpK0Charm, 1/_SumOfcWeights);
- scale(_h_XpK0Bottom, 1/_SumOfbWeights);
- scale(_h_XpPhiLight, 1/_SumOfudsWeights);
- scale(_h_XpPhiCharm , 1/_SumOfcWeights);
- scale(_h_XpPhiBottom, 1/_SumOfbWeights);
- scale(_h_RPiPlus, 1/_SumOfudsWeights);
- scale(_h_RPiMinus, 1/_SumOfudsWeights);
- scale(_h_RKS0, 1/_SumOfudsWeights);
- scale(_h_RKSBar0, 1/_SumOfudsWeights);
- scale(_h_RKPlus, 1/_SumOfudsWeights);
- scale(_h_RKMinus, 1/_SumOfudsWeights);
- scale(_h_RProton, 1/_SumOfudsWeights);
- scale(_h_RPBar, 1/_SumOfudsWeights);
- scale(_h_RLambda, 1/_SumOfudsWeights);
- scale(_h_RLBar, 1/_SumOfudsWeights);
+ scale(_h_XpPiPlusLight, 1 / *_SumOfudsWeights);
+ scale(_h_XpPiPlusCharm, 1 / *_SumOfcWeights);
+ scale(_h_XpPiPlusBottom, 1 / *_SumOfbWeights);
+ scale(_h_XpKPlusLight, 1 / *_SumOfudsWeights);
+ scale(_h_XpKPlusCharm, 1 / *_SumOfcWeights);
+ scale(_h_XpKPlusBottom, 1 / *_SumOfbWeights);
+ scale(_h_XpKStar0Light, 1 / *_SumOfudsWeights);
+ scale(_h_XpKStar0Charm, 1 / *_SumOfcWeights);
+ scale(_h_XpKStar0Bottom, 1 / *_SumOfbWeights);
+ scale(_h_XpProtonLight, 1 / *_SumOfudsWeights);
+ scale(_h_XpProtonCharm, 1 / *_SumOfcWeights);
+ scale(_h_XpProtonBottom, 1 / *_SumOfbWeights);
+ scale(_h_XpLambdaLight, 1 / *_SumOfudsWeights);
+ scale(_h_XpLambdaCharm, 1 / *_SumOfcWeights);
+ scale(_h_XpLambdaBottom, 1 / *_SumOfbWeights);
+ scale(_h_XpK0Light, 1 / *_SumOfudsWeights);
+ scale(_h_XpK0Charm, 1 / *_SumOfcWeights);
+ scale(_h_XpK0Bottom, 1 / *_SumOfbWeights);
+ scale(_h_XpPhiLight, 1 / *_SumOfudsWeights);
+ scale(_h_XpPhiCharm , 1 / *_SumOfcWeights);
+ scale(_h_XpPhiBottom, 1 / *_SumOfbWeights);
+ scale(_h_RPiPlus, 1 / *_SumOfudsWeights);
+ scale(_h_RPiMinus, 1 / *_SumOfudsWeights);
+ scale(_h_RKS0, 1 / *_SumOfudsWeights);
+ scale(_h_RKSBar0, 1 / *_SumOfudsWeights);
+ scale(_h_RKPlus, 1 / *_SumOfudsWeights);
+ scale(_h_RKMinus, 1 / *_SumOfudsWeights);
+ scale(_h_RProton, 1 / *_SumOfudsWeights);
+ scale(_h_RPBar, 1 / *_SumOfudsWeights);
+ scale(_h_RLambda, 1 / *_SumOfudsWeights);
+ scale(_h_RLBar, 1 / *_SumOfudsWeights);
// Multiplicities
double avgNumPartsAll, avgNumPartsLight,avgNumPartsCharm, avgNumPartsBottom;
// pi+/-
// all
- avgNumPartsAll = double(_multPiPlus[0])/sumOfWeights();
+ avgNumPartsAll = dbl(*_multPiPlus[0])/sumOfWeights();
tmp1->point(0).setY(avgNumPartsAll);
// light
- avgNumPartsLight = _multPiPlus[1]/_SumOfudsWeights;
+ avgNumPartsLight = dbl(*_multPiPlus[1] / *_SumOfudsWeights);
tmp2->point(0).setY(avgNumPartsLight);
// charm
- avgNumPartsCharm = _multPiPlus[2]/_SumOfcWeights;
+ avgNumPartsCharm = dbl(*_multPiPlus[2] / *_SumOfcWeights);
tmp3->point(0).setY(avgNumPartsCharm);
// bottom
- avgNumPartsBottom = _multPiPlus[3]/_SumOfbWeights;
+ avgNumPartsBottom = dbl(*_multPiPlus[3] / *_SumOfbWeights);
tmp4->point(0).setY(avgNumPartsBottom);
// charm-light
tmp5->point(0).setY(avgNumPartsCharm - avgNumPartsLight);
// bottom-light
tmp6->point(0).setY(avgNumPartsBottom - avgNumPartsLight);
// K+/-
// all
- avgNumPartsAll = double(_multKPlus[0])/sumOfWeights();
+ avgNumPartsAll = dbl(*_multKPlus[0])/sumOfWeights();
tmp7->point(0).setY(avgNumPartsAll);
// light
- avgNumPartsLight = _multKPlus[1]/_SumOfudsWeights;
+ avgNumPartsLight = dbl(*_multKPlus[1] / *_SumOfudsWeights);
tmp8->point(0).setY(avgNumPartsLight);
// charm
- avgNumPartsCharm = _multKPlus[2]/_SumOfcWeights;
+ avgNumPartsCharm = dbl(*_multKPlus[2] / *_SumOfcWeights);
tmp9->point(0).setY(avgNumPartsCharm);
// bottom
- avgNumPartsBottom = _multKPlus[3]/_SumOfbWeights;
+ avgNumPartsBottom = dbl(*_multKPlus[3] / *_SumOfbWeights);
tmp10->point(0).setY(avgNumPartsBottom);
// charm-light
tmp11->point(0).setY(avgNumPartsCharm - avgNumPartsLight);
// bottom-light
tmp12->point(0).setY(avgNumPartsBottom - avgNumPartsLight);
// K0
// all
- avgNumPartsAll = double(_multK0[0])/sumOfWeights();
+ avgNumPartsAll = dbl(*_multK0[0])/sumOfWeights();
tmp13->point(0).setY(avgNumPartsAll);
// light
- avgNumPartsLight = _multK0[1]/_SumOfudsWeights;
+ avgNumPartsLight = dbl(*_multK0[1] / *_SumOfudsWeights);
tmp14->point(0).setY(avgNumPartsLight);
// charm
- avgNumPartsCharm = _multK0[2]/_SumOfcWeights;
+ avgNumPartsCharm = dbl(*_multK0[2] / *_SumOfcWeights);
tmp15->point(0).setY(avgNumPartsCharm);
// bottom
- avgNumPartsBottom = _multK0[3]/_SumOfbWeights;
+ avgNumPartsBottom = dbl(*_multK0[3] / *_SumOfbWeights);
tmp16->point(0).setY(avgNumPartsBottom);
// charm-light
tmp17->point(0).setY(avgNumPartsCharm - avgNumPartsLight);
// bottom-light
tmp18->point(0).setY(avgNumPartsBottom - avgNumPartsLight);
// K*0
// all
- avgNumPartsAll = double(_multKStar0[0])/sumOfWeights();
+ avgNumPartsAll = dbl(*_multKStar0[0])/sumOfWeights();
tmp19->point(0).setY(avgNumPartsAll);
// light
- avgNumPartsLight = _multKStar0[1]/_SumOfudsWeights;
+ avgNumPartsLight = dbl(*_multKStar0[1] / *_SumOfudsWeights);
tmp20->point(0).setY(avgNumPartsLight);
// charm
- avgNumPartsCharm = _multKStar0[2]/_SumOfcWeights;
+ avgNumPartsCharm = dbl(*_multKStar0[2] / *_SumOfcWeights);
tmp21->point(0).setY(avgNumPartsCharm);
// bottom
- avgNumPartsBottom = _multKStar0[3]/_SumOfbWeights;
+ avgNumPartsBottom = dbl(*_multKStar0[3] / *_SumOfbWeights);
tmp22->point(0).setY(avgNumPartsBottom);
// charm-light
tmp23->point(0).setY(avgNumPartsCharm - avgNumPartsLight);
// bottom-light
tmp24->point(0).setY(avgNumPartsBottom - avgNumPartsLight);
// phi
// all
- avgNumPartsAll = double(_multPhi[0])/sumOfWeights();
+ avgNumPartsAll = dbl(*_multPhi[0])/sumOfWeights();
tmp25->point(0).setY(avgNumPartsAll);
// light
- avgNumPartsLight = _multPhi[1]/_SumOfudsWeights;
+ avgNumPartsLight = dbl(*_multPhi[1] / *_SumOfudsWeights);
tmp26->point(0).setY(avgNumPartsLight);
// charm
- avgNumPartsCharm = _multPhi[2]/_SumOfcWeights;
+ avgNumPartsCharm = dbl(*_multPhi[2] / *_SumOfcWeights);
tmp27->point(0).setY(avgNumPartsCharm);
// bottom
- avgNumPartsBottom = _multPhi[3]/_SumOfbWeights;
+ avgNumPartsBottom = dbl(*_multPhi[3] / *_SumOfbWeights);
tmp28->point(0).setY(avgNumPartsBottom);
// charm-light
tmp29->point(0).setY(avgNumPartsCharm - avgNumPartsLight);
// bottom-light
tmp30->point(0).setY(avgNumPartsBottom - avgNumPartsLight);
// p
// all
- avgNumPartsAll = double(_multProton[0])/sumOfWeights();
+ avgNumPartsAll = dbl(*_multProton[0])/sumOfWeights();
tmp31->point(0).setY(avgNumPartsAll);
// light
- avgNumPartsLight = _multProton[1]/_SumOfudsWeights;
+ avgNumPartsLight = dbl(*_multProton[1] / *_SumOfudsWeights);
tmp32->point(0).setY(avgNumPartsLight);
// charm
- avgNumPartsCharm = _multProton[2]/_SumOfcWeights;
+ avgNumPartsCharm = dbl(*_multProton[2] / *_SumOfcWeights);
tmp33->point(0).setY(avgNumPartsCharm);
// bottom
- avgNumPartsBottom = _multProton[3]/_SumOfbWeights;
+ avgNumPartsBottom = dbl(*_multProton[3] / *_SumOfbWeights);
tmp34->point(0).setY(avgNumPartsBottom);
// charm-light
tmp35->point(0).setY(avgNumPartsCharm - avgNumPartsLight);
// bottom-light
tmp36->point(0).setY(avgNumPartsBottom - avgNumPartsLight);
// Lambda
// all
- avgNumPartsAll = double(_multLambda[0])/sumOfWeights();
+ avgNumPartsAll = dbl(*_multLambda[0])/sumOfWeights();
tmp37->point(0).setY(avgNumPartsAll);
// light
- avgNumPartsLight = _multLambda[1]/_SumOfudsWeights;
+ avgNumPartsLight = dbl(*_multLambda[1] / *_SumOfudsWeights);
tmp38->point(0).setY(avgNumPartsLight);
// charm
- avgNumPartsCharm = _multLambda[2]/_SumOfcWeights;
+ avgNumPartsCharm = dbl(*_multLambda[2] / *_SumOfcWeights);
tmp39->point(0).setY(avgNumPartsCharm);
// bottom
- avgNumPartsBottom = _multLambda[3]/_SumOfbWeights;
+ avgNumPartsBottom = dbl(*_multLambda[3] / *_SumOfbWeights);
tmp40->point(0).setY(avgNumPartsBottom);
// charm-light
tmp41->point(0).setY(avgNumPartsCharm - avgNumPartsLight);
// bottom-light
tmp42->point(0).setY(avgNumPartsBottom - avgNumPartsLight);
}
//@}
private:
/// Store the weighted sums of numbers of charged / charged+neutral
/// particles. Used to calculate average number of particles for the
/// inclusive single particle distributions' normalisations.
CounterPtr _SumOfudsWeights, _SumOfcWeights, _SumOfbWeights;
vector<CounterPtr> _multPiPlus, _multKPlus, _multK0,
_multKStar0, _multPhi, _multProton, _multLambda;
Histo1DPtr _h_XpPiPlusSig, _h_XpPiPlusN;
Histo1DPtr _h_XpKPlusSig, _h_XpKPlusN;
Histo1DPtr _h_XpProtonSig, _h_XpProtonN;
Histo1DPtr _h_XpChargedN;
Histo1DPtr _h_XpK0N, _h_XpLambdaN;
Histo1DPtr _h_XpKStar0N, _h_XpPhiN;
Histo1DPtr _h_XpPiPlusLight, _h_XpPiPlusCharm, _h_XpPiPlusBottom;
Histo1DPtr _h_XpKPlusLight, _h_XpKPlusCharm, _h_XpKPlusBottom;
Histo1DPtr _h_XpKStar0Light, _h_XpKStar0Charm, _h_XpKStar0Bottom;
Histo1DPtr _h_XpProtonLight, _h_XpProtonCharm, _h_XpProtonBottom;
Histo1DPtr _h_XpLambdaLight, _h_XpLambdaCharm, _h_XpLambdaBottom;
Histo1DPtr _h_XpK0Light, _h_XpK0Charm, _h_XpK0Bottom;
Histo1DPtr _h_XpPhiLight, _h_XpPhiCharm, _h_XpPhiBottom;
Histo1DPtr _temp_XpChargedN1, _temp_XpChargedN2, _temp_XpChargedN3;
Histo1DPtr _temp_XpKPlusCharm , _temp_XpKPlusLight;
Histo1DPtr _temp_XpKStar0Charm, _temp_XpKStar0Light;
Histo1DPtr _temp_XpProtonCharm, _temp_XpProtonLight;
Histo1DPtr _h_RPiPlus, _h_RPiMinus;
Histo1DPtr _h_RKS0, _h_RKSBar0;
Histo1DPtr _h_RKPlus, _h_RKMinus;
Histo1DPtr _h_RProton, _h_RPBar;
Histo1DPtr _h_RLambda, _h_RLBar;
Scatter2DPtr _s_Xp_PiPl_Ch, _s_Xp_KPl_Ch, _s_Xp_Pr_Ch;
Scatter2DPtr _s_Xp_PiPlCh_PiPlLi, _s_Xp_PiPlBo_PiPlLi;
Scatter2DPtr _s_Xp_KPlCh_KPlLi, _s_Xp_KPlBo_KPlLi;
Scatter2DPtr _s_Xp_KS0Ch_KS0Li, _s_Xp_KS0Bo_KS0Li;
Scatter2DPtr _s_Xp_PrCh_PrLi, _s_Xp_PrBo_PrLi;
Scatter2DPtr _s_Xp_LaCh_LaLi, _s_Xp_LaBo_LaLi;
Scatter2DPtr _s_Xp_K0Ch_K0Li, _s_Xp_K0Bo_K0Li;
Scatter2DPtr _s_Xp_PhiCh_PhiLi, _s_Xp_PhiBo_PhiLi;
Scatter2DPtr _s_PiM_PiP, _s_KSBar0_KS0, _s_KM_KP, _s_Pr_PBar, _s_Lam_LBar;
//@}
Scatter2DPtr tmp1;
Scatter2DPtr tmp2;
Scatter2DPtr tmp3;
Scatter2DPtr tmp4;
Scatter2DPtr tmp5;
Scatter2DPtr tmp6;
Scatter2DPtr tmp7;
Scatter2DPtr tmp8;
Scatter2DPtr tmp9;
Scatter2DPtr tmp10;
Scatter2DPtr tmp11;
Scatter2DPtr tmp12;
Scatter2DPtr tmp13;
Scatter2DPtr tmp14;
Scatter2DPtr tmp15;
Scatter2DPtr tmp16;
Scatter2DPtr tmp17;
Scatter2DPtr tmp18;
Scatter2DPtr tmp19;
Scatter2DPtr tmp20;
Scatter2DPtr tmp21;
Scatter2DPtr tmp22;
Scatter2DPtr tmp23;
Scatter2DPtr tmp24;
Scatter2DPtr tmp25;
Scatter2DPtr tmp26;
Scatter2DPtr tmp27;
Scatter2DPtr tmp28;
Scatter2DPtr tmp29;
Scatter2DPtr tmp30;
Scatter2DPtr tmp31;
Scatter2DPtr tmp32;
Scatter2DPtr tmp33;
Scatter2DPtr tmp34;
Scatter2DPtr tmp35;
Scatter2DPtr tmp36;
Scatter2DPtr tmp37;
Scatter2DPtr tmp38;
Scatter2DPtr tmp39;
Scatter2DPtr tmp40;
Scatter2DPtr tmp41;
Scatter2DPtr tmp42;
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(SLD_1999_S3743934);
}
diff --git a/analyses/pluginLEP/SLD_2004_S5693039.cc b/analyses/pluginLEP/SLD_2004_S5693039.cc
--- a/analyses/pluginLEP/SLD_2004_S5693039.cc
+++ b/analyses/pluginLEP/SLD_2004_S5693039.cc
@@ -1,377 +1,377 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/Beam.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/InitialQuarks.hh"
#include "Rivet/Projections/Thrust.hh"
namespace Rivet {
/// @brief SLD flavour-dependent fragmentation paper
/// @author Peter Richardson
class SLD_2004_S5693039 : public Analysis {
public:
/// Constructor
SLD_2004_S5693039() : Analysis("SLD_2004_S5693039")
{}
/// @name Analysis methods
//@{
void analyze(const Event& e) {
// First, veto on leptonic events by requiring at least 2 charged FS particles
const FinalState& fs = apply<FinalState>(e, "FS");
const size_t numParticles = fs.particles().size();
// Even if we only generate hadronic events, we still need a cut on numCharged >= 2.
if (numParticles < 2) {
MSG_DEBUG("Failed ncharged cut");
vetoEvent;
}
MSG_DEBUG("Passed ncharged cut");
// Get beams and average beam momentum
const ParticlePair& beams = apply<Beam>(e, "Beams").beams();
const double meanBeamMom = ( beams.first.p3().mod() +
beams.second.p3().mod() ) / 2.0;
MSG_DEBUG("Avg beam momentum = " << meanBeamMom);
int flavour = 0;
const InitialQuarks& iqf = apply<InitialQuarks>(e, "IQF");
// If we only have two quarks (qqbar), just take the flavour.
// If we have more than two quarks, look for the highest energetic q-qbar pair.
Particles quarks;
if (iqf.particles().size() == 2) {
flavour = iqf.particles().front().abspid();
quarks = iqf.particles();
}
else {
map<int, Particle > quarkmap;
foreach (const Particle& p, iqf.particles()) {
if (quarkmap.find(p.pid())==quarkmap.end())
quarkmap[p.pid()] = p;
else if (quarkmap[p.pid()].E() < p.E())
quarkmap[p.pid()] = p;
}
double maxenergy = 0.;
for (int i = 1; i <= 5; ++i) {
double energy(0.);
if(quarkmap.find( i)!=quarkmap.end())
energy += quarkmap[ i].E();
if(quarkmap.find(-i)!=quarkmap.end())
energy += quarkmap[-i].E();
if (energy > maxenergy) flavour = i;
}
if(quarkmap.find( flavour)!=quarkmap.end())
quarks.push_back(quarkmap[ flavour]);
if(quarkmap.find(-flavour)!=quarkmap.end())
quarks.push_back(quarkmap[-flavour]);
}
// total multiplicities
switch (flavour) {
case PID::DQUARK:
case PID::UQUARK:
case PID::SQUARK:
_weightLight ->fill();
_weightedTotalChargedPartNumLight ->fill(numParticles);
break;
case PID::CQUARK:
_weightCharm ->fill();
_weightedTotalChargedPartNumCharm ->fill(numParticles);
break;
case PID::BQUARK:
_weightBottom ->fill();
_weightedTotalChargedPartNumBottom ->fill(numParticles);
break;
}
// thrust axis for projections
Vector3 axis = apply<Thrust>(e, "Thrust").thrustAxis();
double dot(0.);
if(!quarks.empty()) {
dot = quarks[0].p3().dot(axis);
if(quarks[0].pid()<0) dot *= -1.;
}
// spectra and individual multiplicities
foreach (const Particle& p, fs.particles()) {
double pcm = p.p3().mod();
const double xp = pcm/meanBeamMom;
// if in quark or antiquark hemisphere
bool quark = p.p3().dot(axis)*dot>0.;
_h_PCharged ->fill(pcm );
// all charged
switch (flavour) {
case PID::DQUARK:
case PID::UQUARK:
case PID::SQUARK:
_h_XpChargedL->fill(xp);
break;
case PID::CQUARK:
_h_XpChargedC->fill(xp);
break;
case PID::BQUARK:
_h_XpChargedB->fill(xp);
break;
}
int id = p.abspid();
// charged pions
if (id == PID::PIPLUS) {
_h_XpPiPlus->fill(xp);
_h_XpPiPlusTotal->fill(xp);
switch (flavour) {
case PID::DQUARK:
case PID::UQUARK:
case PID::SQUARK:
_h_XpPiPlusL->fill(xp);
_h_NPiPlusL->fill(sqrtS());
if( ( quark && p.pid()>0 ) || ( !quark && p.pid()<0 ))
_h_RPiPlus->fill(xp);
else
_h_RPiMinus->fill(xp);
break;
case PID::CQUARK:
_h_XpPiPlusC->fill(xp);
_h_NPiPlusC->fill(sqrtS());
break;
case PID::BQUARK:
_h_XpPiPlusB->fill(xp);
_h_NPiPlusB->fill(sqrtS());
break;
}
}
else if (id == PID::KPLUS) {
_h_XpKPlus->fill(xp);
_h_XpKPlusTotal->fill(xp);
switch (flavour) {
case PID::DQUARK:
case PID::UQUARK:
case PID::SQUARK:
_h_XpKPlusL->fill(xp);
_h_NKPlusL->fill(sqrtS());
if( ( quark && p.pid()>0 ) || ( !quark && p.pid()<0 ))
_h_RKPlus->fill(xp);
else
_h_RKMinus->fill(xp);
break;
case PID::CQUARK:
_h_XpKPlusC->fill(xp);
_h_NKPlusC->fill(sqrtS());
break;
case PID::BQUARK:
_h_XpKPlusB->fill(xp);
_h_NKPlusB->fill(sqrtS());
break;
}
}
else if (id == PID::PROTON) {
_h_XpProton->fill(xp);
_h_XpProtonTotal->fill(xp);
switch (flavour) {
case PID::DQUARK:
case PID::UQUARK:
case PID::SQUARK:
_h_XpProtonL->fill(xp);
_h_NProtonL->fill(sqrtS());
if( ( quark && p.pid()>0 ) || ( !quark && p.pid()<0 ))
_h_RProton->fill(xp);
else
_h_RPBar ->fill(xp);
break;
case PID::CQUARK:
_h_XpProtonC->fill(xp);
_h_NProtonC->fill(sqrtS());
break;
case PID::BQUARK:
_h_XpProtonB->fill(xp);
_h_NProtonB->fill(sqrtS());
break;
}
}
}
}
void init() {
// Projections
declare(Beam(), "Beams");
declare(ChargedFinalState(), "FS");
declare(InitialQuarks(), "IQF");
declare(Thrust(FinalState()), "Thrust");
// Book histograms
book(_h_PCharged , 1, 1, 1);
book(_h_XpPiPlus , 2, 1, 2);
book(_h_XpKPlus , 3, 1, 2);
book(_h_XpProton , 4, 1, 2);
book(_h_XpPiPlusTotal , 2, 2, 2);
book(_h_XpKPlusTotal , 3, 2, 2);
book(_h_XpProtonTotal , 4, 2, 2);
book(_h_XpPiPlusL , 5, 1, 1);
book(_h_XpPiPlusC , 5, 1, 2);
book(_h_XpPiPlusB , 5, 1, 3);
book(_h_XpKPlusL , 6, 1, 1);
book(_h_XpKPlusC , 6, 1, 2);
book(_h_XpKPlusB , 6, 1, 3);
book(_h_XpProtonL , 7, 1, 1);
book(_h_XpProtonC , 7, 1, 2);
book(_h_XpProtonB , 7, 1, 3);
book(_h_XpChargedL , 8, 1, 1);
book(_h_XpChargedC , 8, 1, 2);
book(_h_XpChargedB , 8, 1, 3);
book(_h_NPiPlusL , 5, 2, 1);
book(_h_NPiPlusC , 5, 2, 2);
book(_h_NPiPlusB , 5, 2, 3);
book(_h_NKPlusL , 6, 2, 1);
book(_h_NKPlusC , 6, 2, 2);
book(_h_NKPlusB , 6, 2, 3);
book(_h_NProtonL , 7, 2, 1);
book(_h_NProtonC , 7, 2, 2);
book(_h_NProtonB , 7, 2, 3);
book(_h_RPiPlus , 9, 1, 1);
book(_h_RPiMinus , 9, 1, 2);
book(_h_RKPlus ,10, 1, 1);
book(_h_RKMinus ,10, 1, 2);
book(_h_RProton ,11, 1, 1);
book(_h_RPBar ,11, 1, 2);
// Ratios: used as target of divide() later
book(_s_PiM_PiP, 9, 1, 3);
book(_s_KM_KP , 10, 1, 3);
book(_s_Pr_PBar, 11, 1, 3);
book(_weightedTotalChargedPartNumLight, "weightedTotalChargedPartNumLight");
book(_weightedTotalChargedPartNumCharm, "weightedTotalChargedPartNumCharm");
book(_weightedTotalChargedPartNumBottom, "weightedTotalChargedPartNumBottom");
book(_weightLight, "weightLight");
book(_weightCharm, "weightCharm");
book(_weightBottom, "weightBottom");
book(tmp1, 8, 2, 1, true);
book(tmp2, 8, 2, 2, true);
book(tmp3, 8, 2, 3, true);
book(tmp4, 8, 3, 2, true);
book(tmp5, 8, 3, 3, true);
}
/// Finalize
void finalize() {
// Multiplicities
/// @todo Include errors
- const double avgNumPartsLight = _weightedTotalChargedPartNumLight / _weightLight;
- const double avgNumPartsCharm = _weightedTotalChargedPartNumCharm / _weightCharm;
- const double avgNumPartsBottom = _weightedTotalChargedPartNumBottom / _weightBottom;
+ const double avgNumPartsLight = _weightedTotalChargedPartNumLight->val() / _weightLight->val();
+ const double avgNumPartsCharm = _weightedTotalChargedPartNumCharm->val() / _weightCharm->val();
+ const double avgNumPartsBottom = _weightedTotalChargedPartNumBottom->val() / _weightBottom->val();
tmp1->point(0).setY(avgNumPartsLight);
tmp2->point(0).setY(avgNumPartsCharm);
tmp3->point(0).setY(avgNumPartsBottom);
tmp4->point(0).setY(avgNumPartsCharm - avgNumPartsLight);
tmp5->point(0).setY(avgNumPartsBottom - avgNumPartsLight);
// Do divisions
divide(*_h_RPiMinus - *_h_RPiPlus, *_h_RPiMinus + *_h_RPiPlus, _s_PiM_PiP);
divide(*_h_RKMinus - *_h_RKPlus, *_h_RKMinus + *_h_RKPlus, _s_KM_KP);
divide(*_h_RProton - *_h_RPBar, *_h_RProton + *_h_RPBar, _s_Pr_PBar);
// Scale histograms
scale(_h_PCharged, 1./sumOfWeights());
scale(_h_XpPiPlus, 1./sumOfWeights());
scale(_h_XpKPlus, 1./sumOfWeights());
scale(_h_XpProton, 1./sumOfWeights());
scale(_h_XpPiPlusTotal, 1./sumOfWeights());
scale(_h_XpKPlusTotal, 1./sumOfWeights());
scale(_h_XpProtonTotal, 1./sumOfWeights());
- scale(_h_XpPiPlusL, 1./_weightLight);
- scale(_h_XpPiPlusC, 1./_weightCharm);
- scale(_h_XpPiPlusB, 1./_weightBottom);
- scale(_h_XpKPlusL, 1./_weightLight);
- scale(_h_XpKPlusC, 1./_weightCharm);
- scale(_h_XpKPlusB, 1./_weightBottom);
- scale(_h_XpProtonL, 1./_weightLight);
- scale(_h_XpProtonC, 1./_weightCharm);
- scale(_h_XpProtonB, 1./_weightBottom);
+ scale(_h_XpPiPlusL, 1. / *_weightLight);
+ scale(_h_XpPiPlusC, 1. / *_weightCharm);
+ scale(_h_XpPiPlusB, 1. / *_weightBottom);
+ scale(_h_XpKPlusL, 1. / *_weightLight);
+ scale(_h_XpKPlusC, 1. / *_weightCharm);
+ scale(_h_XpKPlusB, 1. / *_weightBottom);
+ scale(_h_XpProtonL, 1. / *_weightLight);
+ scale(_h_XpProtonC, 1. / *_weightCharm);
+ scale(_h_XpProtonB, 1. / *_weightBottom);
- scale(_h_XpChargedL, 1./_weightLight);
- scale(_h_XpChargedC, 1./_weightCharm);
- scale(_h_XpChargedB, 1./_weightBottom);
+ scale(_h_XpChargedL, 1. / *_weightLight);
+ scale(_h_XpChargedC, 1. / *_weightCharm);
+ scale(_h_XpChargedB, 1. / *_weightBottom);
- scale(_h_NPiPlusL, 1./_weightLight);
- scale(_h_NPiPlusC, 1./_weightCharm);
- scale(_h_NPiPlusB, 1./_weightBottom);
- scale(_h_NKPlusL, 1./_weightLight);
- scale(_h_NKPlusC, 1./_weightCharm);
- scale(_h_NKPlusB, 1./_weightBottom);
- scale(_h_NProtonL, 1./_weightLight);
- scale(_h_NProtonC, 1./_weightCharm);
- scale(_h_NProtonB, 1./_weightBottom);
+ scale(_h_NPiPlusL, 1. / *_weightLight);
+ scale(_h_NPiPlusC, 1. / *_weightCharm);
+ scale(_h_NPiPlusB, 1. / *_weightBottom);
+ scale(_h_NKPlusL, 1. / *_weightLight);
+ scale(_h_NKPlusC, 1. / *_weightCharm);
+ scale(_h_NKPlusB, 1. / *_weightBottom);
+ scale(_h_NProtonL, 1. / *_weightLight);
+ scale(_h_NProtonC, 1. / *_weightCharm);
+ scale(_h_NProtonB, 1. / *_weightBottom);
// Paper suggests this should be 0.5/weight but it has to be 1.0 to get normalisations right...
- scale(_h_RPiPlus, 1./_weightLight);
- scale(_h_RPiMinus, 1./_weightLight);
- scale(_h_RKPlus, 1./_weightLight);
- scale(_h_RKMinus, 1./_weightLight);
- scale(_h_RProton, 1./_weightLight);
- scale(_h_RPBar, 1./_weightLight);
+ scale(_h_RPiPlus, 1. / *_weightLight);
+ scale(_h_RPiMinus, 1. / *_weightLight);
+ scale(_h_RKPlus, 1. / *_weightLight);
+ scale(_h_RKMinus, 1. / *_weightLight);
+ scale(_h_RProton, 1. / *_weightLight);
+ scale(_h_RPBar, 1. / *_weightLight);
// convert ratio to %
_s_PiM_PiP->scale(1.,100.);
_s_KM_KP ->scale(1.,100.);
_s_Pr_PBar->scale(1.,100.);
}
//@}
private:
Scatter2DPtr tmp1;
Scatter2DPtr tmp2;
Scatter2DPtr tmp3;
Scatter2DPtr tmp4;
Scatter2DPtr tmp5;
/// @name Multiplicities
//@{
CounterPtr _weightedTotalChargedPartNumLight;
CounterPtr _weightedTotalChargedPartNumCharm;
CounterPtr _weightedTotalChargedPartNumBottom;
//@}
/// @name Weights
//@{
CounterPtr _weightLight, _weightCharm, _weightBottom;
//@}
// Histograms
//@{
Histo1DPtr _h_PCharged;
Histo1DPtr _h_XpPiPlus, _h_XpKPlus, _h_XpProton;
Histo1DPtr _h_XpPiPlusTotal, _h_XpKPlusTotal, _h_XpProtonTotal;
Histo1DPtr _h_XpPiPlusL, _h_XpPiPlusC, _h_XpPiPlusB;
Histo1DPtr _h_XpKPlusL, _h_XpKPlusC, _h_XpKPlusB;
Histo1DPtr _h_XpProtonL, _h_XpProtonC, _h_XpProtonB;
Histo1DPtr _h_XpChargedL, _h_XpChargedC, _h_XpChargedB;
Histo1DPtr _h_NPiPlusL, _h_NPiPlusC, _h_NPiPlusB;
Histo1DPtr _h_NKPlusL, _h_NKPlusC, _h_NKPlusB;
Histo1DPtr _h_NProtonL, _h_NProtonC, _h_NProtonB;
Histo1DPtr _h_RPiPlus, _h_RPiMinus, _h_RKPlus;
Histo1DPtr _h_RKMinus, _h_RProton, _h_RPBar;
Scatter2DPtr _s_PiM_PiP, _s_KM_KP, _s_Pr_PBar;
//@}
};
// Hook for the plugin system
DECLARE_RIVET_PLUGIN(SLD_2004_S5693039);
}
diff --git a/analyses/pluginLHCb/LHCB_2010_S8758301.cc b/analyses/pluginLHCb/LHCB_2010_S8758301.cc
--- a/analyses/pluginLHCb/LHCB_2010_S8758301.cc
+++ b/analyses/pluginLHCb/LHCB_2010_S8758301.cc
@@ -1,554 +1,554 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/UnstableFinalState.hh"
#include "Rivet/Math/Constants.hh"
#include "Rivet/Math/Units.hh"
#include "HepMC/GenEvent.h"
#include "HepMC/GenParticle.h"
#include "HepMC/GenVertex.h"
#include "HepMC/SimpleVector.h"
namespace Rivet {
using namespace HepMC;
// Lifetime cut: longest living ancestor ctau < 10^-11 [m]
namespace {
const double MAX_CTAU = 1.0E-11; // [m]
const double MIN_PT = 0.0001; // [GeV/c]
}
class LHCB_2010_S8758301 : public Analysis {
public:
/// @name Constructors etc.
//@{
/// Constructor
LHCB_2010_S8758301()
: Analysis("LHCB_2010_S8758301"),
sumKs0_badnull(0),
sumKs0_badlft(0), sumKs0_all(0),
sumKs0_outup(0), sumKs0_outdwn(0),
sum_low_pt_loss(0), sum_high_pt_loss(0)
{ }
//@}
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
MSG_DEBUG("Initializing analysis!");
fillMap(partLftMap);
book(_h_K0s_pt_30 ,1,1,1);
book(_h_K0s_pt_35 ,1,1,2);
book(_h_K0s_pt_40 ,1,1,3);
book(_h_K0s_pt_y_30 ,2,1,1);
book(_h_K0s_pt_y_35 ,2,1,2);
book(_h_K0s_pt_y_40 ,2,1,3);
book(_h_K0s_pt_y_all ,3,1,1);
book(sumKs0_30, "TMP/sumKs0_30");
book(sumKs0_35, "TMP/sumKs0_35");
book(sumKs0_40, "TMP/sumKs0_40");
declare(UnstableFinalState(), "UFS");
}
/// Perform the per-event analysis
void analyze(const Event& event) {
int id;
double y, pT;
const UnstableFinalState& ufs = apply<UnstableFinalState>(event, "UFS");
double ancestor_lftime;
foreach (const Particle& p, ufs.particles()) {
id = p.pid();
if ((id != 310) && (id != -310)) continue;
sumKs0_all ++;
ancestor_lftime = 0.;
const GenParticle* long_ancestor = getLongestLivedAncestor(p, ancestor_lftime);
if ( !(long_ancestor) ) {
sumKs0_badnull ++;
continue;
}
if ( ancestor_lftime > MAX_CTAU ) {
sumKs0_badlft ++;
MSG_DEBUG("Ancestor " << long_ancestor->pdg_id() << ", ctau: " << ancestor_lftime << " [m]");
continue;
}
const FourMomentum& qmom = p.momentum();
y = 0.5 * log((qmom.E() + qmom.pz())/(qmom.E() - qmom.pz()));
pT = sqrt((qmom.px() * qmom.px()) + (qmom.py() * qmom.py()));
if (pT < MIN_PT) {
sum_low_pt_loss ++;
MSG_DEBUG("Small pT K^0_S: " << pT << " GeV/c.");
}
if (pT > 1.6) {
sum_high_pt_loss ++;
}
if (y > 2.5 && y < 4.0) {
_h_K0s_pt_y_all->fill(pT);
if (y > 2.5 && y < 3.0) {
_h_K0s_pt_y_30->fill(pT);
_h_K0s_pt_30->fill(pT);
sumKs0_30->fill();
} else if (y > 3.0 && y < 3.5) {
_h_K0s_pt_y_35->fill(pT);
_h_K0s_pt_35->fill(pT);
sumKs0_35->fill();
} else if (y > 3.5 && y < 4.0) {
_h_K0s_pt_y_40->fill(pT);
_h_K0s_pt_40->fill(pT);
sumKs0_40->fill();
}
} else if (y < 2.5) {
sumKs0_outdwn ++;
} else if (y > 4.0) {
sumKs0_outup ++;
}
}
}
/// Normalise histograms etc., after the run
void finalize() {
MSG_DEBUG("Total number Ks0: " << sumKs0_all << endl
<< "Sum of weights: " << sumOfWeights() << endl
- << "Weight Ks0 (2.5 < y < 3.0): " << double(sumKs0_30) << endl
- << "Weight Ks0 (3.0 < y < 3.5): " << double(sumKs0_35) << endl
- << "Weight Ks0 (3.5 < y < 4.0): " << double(sumKs0_40) << endl
+ << "Weight Ks0 (2.5 < y < 3.0): " << sumKs0_30->val() << endl
+ << "Weight Ks0 (3.0 < y < 3.5): " << sumKs0_35->val() << endl
+ << "Weight Ks0 (3.5 < y < 4.0): " << sumKs0_40->val() << endl
<< "Nb. unprompt Ks0 [null mother]: " << sumKs0_badnull << endl
<< "Nb. unprompt Ks0 [mother lifetime exceeded]: " << sumKs0_badlft << endl
<< "Nb. Ks0 (y > 4.0): " << sumKs0_outup << endl
<< "Nb. Ks0 (y < 2.5): " << sumKs0_outdwn << endl
<< "Nb. Ks0 (pT < " << (MIN_PT/MeV) << " MeV/c): " << sum_low_pt_loss << endl
<< "Nb. Ks0 (pT > 1.6 GeV/c): " << sum_high_pt_loss << endl
<< "Cross-section [mb]: " << crossSection()/millibarn << endl
<< "Nb. events: " << numEvents());
// Compute cross-section; multiply by bin width for correct scaling
// cross-section given by Rivet in pb
double xsection_factor = crossSection()/sumOfWeights();
// Multiply bin width for correct scaling, xsection in mub
scale(_h_K0s_pt_30, 0.2*xsection_factor/microbarn);
scale(_h_K0s_pt_35, 0.2*xsection_factor/microbarn);
scale(_h_K0s_pt_40, 0.2*xsection_factor/microbarn);
// Divide by dy (rapidity window width), xsection in mb
scale(_h_K0s_pt_y_30, xsection_factor/0.5/millibarn);
scale(_h_K0s_pt_y_35, xsection_factor/0.5/millibarn);
scale(_h_K0s_pt_y_40, xsection_factor/0.5/millibarn);
scale(_h_K0s_pt_y_all, xsection_factor/1.5/millibarn);
}
//@}
private:
/// Get particle lifetime from hardcoded data
double getLifeTime(int pid) {
double lft = -1.0;
if (pid < 0) pid = - pid;
// Correct Pythia6 PIDs for f0(980), f0(1370) mesons
if (pid == 10331) pid = 30221;
if (pid == 10221) pid = 9010221;
map<int, double>::iterator pPartLft = partLftMap.find(pid);
// search stable particle list
if (pPartLft == partLftMap.end()) {
if (pid <= 100 || pid == 990) return 0.0;
for ( auto id : stablePDGIds ) {
if (pid == id) { lft = 0.0; break; }
}
} else {
lft = (*pPartLft).second;
}
if (lft < 0.0)
MSG_ERROR("Could not determine lifetime for particle with PID " << pid
<< "... This K_s^0 will be considered unprompt!");
return lft;
}
const GenParticle* getLongestLivedAncestor(const Particle& p, double& lifeTime) {
const GenParticle* ret = nullptr;
lifeTime = 1.;
if (p.genParticle() == nullptr) return nullptr;
const GenParticle* pmother = p.genParticle();
double longest_ctau = 0.;
double mother_ctau;
int mother_pid, n_inparts;
const GenVertex* ivertex = pmother->production_vertex();
while (ivertex) {
n_inparts = ivertex->particles_in_size();
if (n_inparts < 1) {ret = nullptr; break;} // error: should never happen!
const auto iPart_invtx = ivertex->particles_in_const_begin();
pmother = (*iPart_invtx); // first mother particle
mother_pid = pmother->pdg_id();
ivertex = pmother->production_vertex(); // get next vertex
if ( (mother_pid == 2212) || (mother_pid <= 100) ) {
if (ret == nullptr) ret = pmother;
continue;
}
mother_ctau = getLifeTime(mother_pid);
if (mother_ctau < 0.) { ret= nullptr; break; } // error:should never happen!
if (mother_ctau > longest_ctau) {
longest_ctau = mother_ctau;
ret = pmother;
}
}
if (ret) lifeTime = longest_ctau * c_light;
return ret;
}
// Fill the PDG Id to Lifetime[seconds] map
// Data was extract from LHCb Particle Table using ParticleSvc
bool fillMap(map<int, double> &m) {
m[6] = 4.707703E-25;
m[11] = 1.E+16;
m[12] = 1.E+16;
m[13] = 2.197019E-06;
m[14] = 1.E+16;
m[15] = 2.906E-13;
m[16] = 1.E+16;
m[22] = 1.E+16;
m[23] = 2.637914E-25;
m[24] = 3.075758E-25;
m[25] = 9.4E-26;
m[35] = 9.4E-26;
m[36] = 9.4E-26;
m[37] = 9.4E-26;
m[84] = 3.335641E-13;
m[85] = 1.290893E-12;
m[111] = 8.4E-17;
m[113] = 4.405704E-24;
m[115] = 6.151516E-24;
m[117] = 4.088275E-24;
m[119] = 2.102914E-24;
m[130] = 5.116E-08;
m[150] = 1.525E-12;
m[211] = 2.6033E-08;
m[213] = 4.405704E-24;
m[215] = 6.151516E-24;
m[217] = 4.088275E-24;
m[219] = 2.102914E-24;
m[221] = 5.063171E-19;
m[223] = 7.752794E-23;
m[225] = 3.555982E-24;
m[227] = 3.91793E-24;
m[229] = 2.777267E-24;
m[310] = 8.953E-11;
m[313] = 1.308573E-23;
m[315] = 6.038644E-24;
m[317] = 4.139699E-24;
m[319] = 3.324304E-24;
m[321] = 1.238E-08;
m[323] = 1.295693E-23;
m[325] = 6.682357E-24;
m[327] = 4.139699E-24;
m[329] = 3.324304E-24;
m[331] = 3.210791E-21;
m[333] = 1.545099E-22;
m[335] = 9.016605E-24;
m[337] = 7.565657E-24;
m[350] = 1.407125E-12;
m[411] = 1.04E-12;
m[413] = 6.856377E-21;
m[415] = 1.778952E-23;
m[421] = 4.101E-13;
m[423] = 1.000003E-19;
m[425] = 1.530726E-23;
m[431] = 5.E-13;
m[433] = 1.000003E-19;
m[435] = 3.291061E-23;
m[441] = 2.465214E-23;
m[443] = 7.062363E-21;
m[445] = 3.242425E-22;
m[510] = 1.525E-12;
m[511] = 1.525E-12;
m[513] = 1.000019E-19;
m[515] = 1.31E-23;
m[521] = 1.638E-12;
m[523] = 1.000019E-19;
m[525] = 1.31E-23;
m[530] = 1.536875E-12;
m[531] = 1.472E-12;
m[533] = 1.E-19;
m[535] = 1.31E-23;
m[541] = 4.5E-13;
m[553] = 1.218911E-20;
m[1112] = 4.539394E-24;
m[1114] = 5.578069E-24;
m[1116] = 1.994582E-24;
m[1118] = 2.269697E-24;
m[1212] = 4.539394E-24;
m[1214] = 5.723584E-24;
m[1216] = 1.994582E-24;
m[1218] = 1.316424E-24;
m[2112] = 8.857E+02;
m[2114] = 5.578069E-24;
m[2116] = 4.388081E-24;
m[2118] = 2.269697E-24;
m[2122] = 4.539394E-24;
m[2124] = 5.723584E-24;
m[2126] = 1.994582E-24;
m[2128] = 1.316424E-24;
m[2212] = 1.E+16;
m[2214] = 5.578069E-24;
m[2216] = 4.388081E-24;
m[2218] = 2.269697E-24;
m[2222] = 4.539394E-24;
m[2224] = 5.578069E-24;
m[2226] = 1.994582E-24;
m[2228] = 2.269697E-24;
m[3112] = 1.479E-10;
m[3114] = 1.670589E-23;
m[3116] = 5.485102E-24;
m[3118] = 3.656734E-24;
m[3122] = 2.631E-10;
m[3124] = 4.219309E-23;
m[3126] = 8.227653E-24;
m[3128] = 3.291061E-24;
m[3212] = 7.4E-20;
m[3214] = 1.828367E-23;
m[3216] = 5.485102E-24;
m[3218] = 3.656734E-24;
m[3222] = 8.018E-11;
m[3224] = 1.838582E-23;
m[3226] = 5.485102E-24;
m[3228] = 3.656734E-24;
m[3312] = 1.639E-10;
m[3314] = 6.648608E-23;
m[3322] = 2.9E-10;
m[3324] = 7.233101E-23;
m[3334] = 8.21E-11;
m[4112] = 2.991874E-22;
m[4114] = 4.088274E-23;
m[4122] = 2.E-13;
m[4132] = 1.12E-13;
m[4212] = 3.999999E-22;
m[4214] = 3.291061E-22;
m[4222] = 2.951624E-22;
m[4224] = 4.417531E-23;
m[4232] = 4.42E-13;
m[4332] = 6.9E-14;
m[4412] = 3.335641E-13;
m[4422] = 3.335641E-13;
m[4432] = 3.335641E-13;
m[5112] = 1.E-19;
m[5122] = 1.38E-12;
m[5132] = 1.42E-12;
m[5142] = 1.290893E-12;
m[5212] = 1.E-19;
m[5222] = 1.E-19;
m[5232] = 1.42E-12;
m[5242] = 1.290893E-12;
m[5312] = 1.E-19;
m[5322] = 1.E-19;
m[5332] = 1.55E-12;
m[5342] = 1.290893E-12;
m[5442] = 1.290893E-12;
m[5512] = 1.290893E-12;
m[5522] = 1.290893E-12;
m[5532] = 1.290893E-12;
m[5542] = 1.290893E-12;
m[10111] = 2.48382E-24;
m[10113] = 4.635297E-24;
m[10115] = 2.54136E-24;
m[10211] = 2.48382E-24;
m[10213] = 4.635297E-24;
m[10215] = 2.54136E-24;
m[10223] = 1.828367E-24;
m[10225] = 3.636531E-24;
m[10311] = 2.437823E-24;
m[10313] = 7.313469E-24;
m[10315] = 3.538775E-24;
m[10321] = 2.437823E-24;
m[10323] = 7.313469E-24;
m[10325] = 3.538775E-24;
m[10331] = 4.804469E-24;
m[10411] = 4.38E-24;
m[10413] = 3.29E-23;
m[10421] = 4.38E-24;
m[10423] = 3.22653E-23;
m[10431] = 6.5821E-22;
m[10433] = 6.5821E-22;
m[10441] = 6.453061E-23;
m[10511] = 4.39E-24;
m[10513] = 1.65E-23;
m[10521] = 4.39E-24;
m[10523] = 1.65E-23;
m[10531] = 4.39E-24;
m[10533] = 1.65E-23;
m[11114] = 2.194041E-24;
m[11116] = 1.828367E-24;
m[11212] = 1.880606E-24;
m[11216] = 1.828367E-24;
m[12112] = 2.194041E-24;
m[12114] = 2.194041E-24;
m[12116] = 5.063171E-24;
m[12126] = 1.828367E-24;
m[12212] = 2.194041E-24;
m[12214] = 2.194041E-24;
m[12216] = 5.063171E-24;
m[12224] = 2.194041E-24;
m[12226] = 1.828367E-24;
m[13112] = 6.582122E-24;
m[13114] = 1.09702E-23;
m[13116] = 5.485102E-24;
m[13122] = 1.316424E-23;
m[13124] = 1.09702E-23;
m[13126] = 6.928549E-24;
m[13212] = 6.582122E-24;
m[13214] = 1.09702E-23;
m[13216] = 5.485102E-24;
m[13222] = 6.582122E-24;
m[13224] = 1.09702E-23;
m[13226] = 5.485102E-24;
m[13312] = 4.135667E-22;
m[13314] = 2.742551E-23;
m[13324] = 2.742551E-23;
m[14122] = 1.828367E-22;
m[20022] = 1.E+16;
m[20113] = 1.567172E-24;
m[20213] = 1.567172E-24;
m[20223] = 2.708692E-23;
m[20313] = 3.782829E-24;
m[20315] = 2.384827E-24;
m[20323] = 3.782829E-24;
m[20325] = 2.384827E-24;
m[20333] = 1.198929E-23;
m[20413] = 2.63E-24;
m[20423] = 2.63E-24;
m[20433] = 6.5821E-22;
m[20443] = 7.395643E-22;
m[20513] = 2.63E-24;
m[20523] = 2.63E-24;
m[20533] = 2.63E-24;
m[21112] = 2.632849E-24;
m[21114] = 3.291061E-24;
m[21212] = 2.632849E-24;
m[21214] = 6.582122E-24;
m[22112] = 4.388081E-24;
m[22114] = 3.291061E-24;
m[22122] = 2.632849E-24;
m[22124] = 6.582122E-24;
m[22212] = 4.388081E-24;
m[22214] = 3.291061E-24;
m[22222] = 2.632849E-24;
m[22224] = 3.291061E-24;
m[23112] = 7.313469E-24;
m[23114] = 2.991874E-24;
m[23122] = 4.388081E-24;
m[23124] = 6.582122E-24;
m[23126] = 3.291061E-24;
m[23212] = 7.313469E-24;
m[23214] = 2.991874E-24;
m[23222] = 7.313469E-24;
m[23224] = 2.991874E-24;
m[30113] = 2.632849E-24;
m[30213] = 2.632849E-24;
m[30221] = 1.880606E-24;
m[30223] = 2.089563E-24;
m[30313] = 2.056913E-24;
m[30323] = 2.056913E-24;
m[30443] = 2.419898E-23;
m[31114] = 1.880606E-24;
m[31214] = 3.291061E-24;
m[32112] = 3.989164E-24;
m[32114] = 1.880606E-24;
m[32124] = 3.291061E-24;
m[32212] = 3.989164E-24;
m[32214] = 1.880606E-24;
m[32224] = 1.880606E-24;
m[33122] = 1.880606E-23;
m[42112] = 6.582122E-24;
m[42212] = 6.582122E-24;
m[43122] = 2.194041E-24;
m[53122] = 4.388081E-24;
m[100111] = 1.645531E-24;
m[100113] = 1.64553E-24;
m[100211] = 1.645531E-24;
m[100213] = 1.64553E-24;
m[100221] = 1.196749E-23;
m[100223] = 3.061452E-24;
m[100313] = 2.837122E-24;
m[100323] = 2.837122E-24;
m[100331] = 4.459432E-25;
m[100333] = 4.388081E-24;
m[100441] = 4.701516E-23;
m[100443] = 2.076379E-21;
m[100553] = 2.056913E-20;
m[200553] = 3.242425E-20;
m[300553] = 3.210791E-23;
m[9000111] = 8.776163E-24;
m[9000211] = 8.776163E-24;
m[9000443] = 8.227652E-24;
m[9000553] = 5.983747E-24;
m[9010111] = 3.164482E-24;
m[9010211] = 3.164482E-24;
m[9010221] = 9.403031E-24;
m[9010443] = 8.438618E-24;
m[9010553] = 8.3318E-24;
m[9020221] = 8.093281E-23;
m[9020443] = 1.061633E-23;
m[9030221] = 6.038644E-24;
m[9042413] = 2.07634E-21;
m[9050225] = 1.394517E-24;
m[9060225] = 3.291061E-24;
m[9080225] = 4.388081E-24;
m[9090225] = 2.056913E-24;
m[9910445] = 2.07634E-21;
m[9920443] = 2.07634E-21;
return true;
}
/// @name Histograms
//@{
Histo1DPtr _h_K0s_pt_y_30; // histogram for 2.5 < y < 3.0 (d2sigma)
Histo1DPtr _h_K0s_pt_y_35; // histogram for 3.0 < y < 3.5 (d2sigma)
Histo1DPtr _h_K0s_pt_y_40; // histogram for 3.5 < y < 4.0 (d2sigma)
Histo1DPtr _h_K0s_pt_30; // histogram for 2.5 < y < 3.0 (sigma)
Histo1DPtr _h_K0s_pt_35; // histogram for 3.0 < y < 3.5 (sigma)
Histo1DPtr _h_K0s_pt_40; // histogram for 3.5 < y < 4.0 (sigma)
Histo1DPtr _h_K0s_pt_y_all; // histogram for 2.5 < y < 4.0 (d2sigma)
CounterPtr sumKs0_30; // Sum of weights 2.5 < y < 3.0
CounterPtr sumKs0_35; // Sum of weights 3.0 < y < 3.5
CounterPtr sumKs0_40; // Sum of weights 3.5 < y < 4.0
// Various counters mainly for debugging and comparisons between different generators
size_t sumKs0_badnull; // Nb of particles for which mother could not be identified
size_t sumKs0_badlft; // Nb of mesons with long lived mothers
size_t sumKs0_all; // Nb of all Ks0 generated
size_t sumKs0_outup; // Nb of mesons with y > 4.0
size_t sumKs0_outdwn; // Nb of mesons with y < 2.5
size_t sum_low_pt_loss; // Nb of mesons with very low pT (indicates when units are mixed-up)
size_t sum_high_pt_loss; // Nb of mesons with pT > 1.6 GeV/c
// Map between PDG id and particle lifetimes in seconds
std::map<int, double> partLftMap;
// Set of PDG Ids for stable particles (PDG Id <= 100 are considered stable)
static const array<int,171> stablePDGIds;
//@}
};
// Actual initialization according to ISO C++ requirements
const array<int,171> LHCB_2010_S8758301::stablePDGIds{{
311, 543, 545, 551, 555, 557, 1103, 2101, 2103, 2203, 3101, 3103, 3201, 3203, 3303,
4101, 4103, 4124, 4201, 4203, 4301, 4303, 4312, 4314, 4322, 4324, 4334, 4403, 4414,
4424, 4434, 4444, 5101, 5103, 5114, 5201, 5203, 5214, 5224, 5301, 5303, 5314, 5324,
5334, 5401, 5403, 5412, 5414, 5422, 5424, 5432, 5434, 5444, 5503, 5514, 5524, 5534,
5544, 5554, 10022, 10333, 10335, 10443, 10541, 10543, 10551, 10553, 10555, 11112,
12118, 12122, 12218, 12222, 13316, 13326, 20543, 20553, 20555, 23314, 23324, 30343,
30353, 30363, 30553, 33314, 33324, 41214, 42124, 52114, 52214, 100311, 100315, 100321,
100325, 100411, 100413, 100421, 100423, 100551, 100555, 100557, 110551, 110553, 110555,
120553, 120555, 130553, 200551, 200555, 210551, 210553, 220553, 1000001, 1000002,
1000003, 1000004, 1000005, 1000006, 1000011, 1000012, 1000013, 1000014, 1000015,
1000016, 1000021, 1000022, 1000023, 1000024, 1000025, 1000035, 1000037, 1000039,
2000001, 2000002, 2000003, 2000004, 2000005, 2000006, 2000011, 2000012, 2000013,
2000014, 2000015, 2000016, 3000111, 3000113, 3000211, 3000213, 3000221, 3000223,
3000331, 3100021, 3100111, 3100113, 3200111, 3200113, 3300113, 3400113, 4000001,
4000002, 4000011, 4000012, 5000039, 9000221, 9900012, 9900014, 9900016, 9900023,
9900024, 9900041, 9900042}};
// Hook for the plugin system
DECLARE_RIVET_PLUGIN(LHCB_2010_S8758301);
}
diff --git a/analyses/pluginLHCb/LHCB_2013_I1208105.cc b/analyses/pluginLHCb/LHCB_2013_I1208105.cc
--- a/analyses/pluginLHCb/LHCB_2013_I1208105.cc
+++ b/analyses/pluginLHCb/LHCB_2013_I1208105.cc
@@ -1,235 +1,235 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
namespace Rivet {
class LHCB_2013_I1208105 : public Analysis {
public:
LHCB_2013_I1208105()
: Analysis("LHCB_2013_I1208105")
{ }
void init() {
// Projections
declare(FinalState(1.9, 4.9), "forwardFS");
declare(FinalState(-3.5,-1.5), "backwardFS");
declare(ChargedFinalState(1.9, 4.9), "forwardCFS");
declare(ChargedFinalState(-3.5,-1.5), "backwardCFS");
// Histos
book(_s_chEF_minbias, 1, 1, 1, true);
book(_s_chEF_hard, 2, 1, 1, true);
book(_s_chEF_diff, 3, 1, 1, true);
book(_s_chEF_nondiff, 4, 1, 1, true);
book(_s_totEF_minbias, 5, 1, 1, true);
book(_s_totEF_hard, 6, 1, 1, true);
book(_s_totEF_diff, 7, 1, 1, true);
book(_s_totEF_nondiff, 8, 1, 1, true);
// Temporary profiles and histos
/// @todo Convert to declared/registered temp histos
book(_tp_chEF_minbias, "TMP/chEF_minbias", refData(1,1,1));
book(_tp_chEF_hard, "TMP/chEF_hard", refData(2,1,1));
book(_tp_chEF_diff, "TMP/chEF_diff", refData(3,1,1));
book(_tp_chEF_nondiff, "TMP/chEF_nondiff", refData(4,1,1));
book(_tp_totEF_minbias, "TMP/totEF_minbias", refData(5,1,1));
book(_tp_totEF_hard, "TMP/totEF_hard", refData(6,1,1));
book(_tp_totEF_diff, "TMP/totEF_diff", refData(7,1,1));
book(_tp_totEF_nondiff, "TMP/totEF_nondiff", refData(8,1,1));
book(_th_chN_minbias, "TMP/chN_minbias", refData(1,1,1));
book(_th_chN_hard, "TMP/chN_hard", refData(2,1,1));
book(_th_chN_diff, "TMP/chN_diff", refData(3,1,1));
book(_th_chN_nondiff, "TMP/chN_nondiff", refData(4,1,1));
book(_th_totN_minbias, "TMP/totN_minbias", refData(5,1,1));
book(_th_totN_hard, "TMP/totN_hard", refData(6,1,1));
book(_th_totN_diff, "TMP/totN_diff", refData(7,1,1));
book(_th_totN_nondiff, "TMP/totN_nondiff", refData(8,1,1));
// Counters
book(_mbSumW, "TMP/mbSumW");
book(_hdSumW, "TMP/hdSumW");
book(_dfSumW, "TMP/dfSumW");
book(_ndSumW, "TMP/ndSumW");
book(_mbchSumW, "TMP/mbchSumW");
book(_hdchSumW, "TMP/hdchSumW");
book(_dfchSumW, "TMP/dfchSumW");
book(_ndchSumW, "TMP/ndchSumW");
}
/// Perform the per-event analysis
void analyze(const Event& event) {
const FinalState& ffs = apply<FinalState>(event, "forwardFS");
const FinalState& bfs = apply<FinalState>(event, "backwardFS");
const ChargedFinalState& fcfs = apply<ChargedFinalState>(event, "forwardCFS");
const ChargedFinalState& bcfs = apply<ChargedFinalState>(event, "backwardCFS");
// Veto this event completely if there are no forward *charged* particles
if (fcfs.empty()) vetoEvent;
// Charged and neutral version
{
// Decide empirically if this is a "hard" or "diffractive" event
bool ishardEvt = false;
foreach (const Particle& p, ffs.particles()) {
if (p.pT() > 3.0*GeV) { ishardEvt = true; break; }
}
// Decide empirically if this is a "diffractive" event
/// @todo Can be "diffractive" *and* "hard"?
bool isdiffEvt = (bfs.size() == 0);
// Update event-type weight counters
_mbSumW->fill();
(isdiffEvt ? _dfSumW : _ndSumW)->fill();
if (ishardEvt) _hdSumW->fill();
// Plot energy flow
foreach (const Particle& p, ffs.particles()) {
const double eta = p.eta();
const double energy = p.E();
_tp_totEF_minbias->fill(eta, energy);
_th_totN_minbias->fill(eta);
if (ishardEvt) {
_tp_totEF_hard->fill(eta, energy);
_th_totN_hard->fill(eta);
}
if (isdiffEvt) {
_tp_totEF_diff->fill(eta, energy);
_th_totN_diff->fill(eta);
} else {
_tp_totEF_nondiff->fill(eta, energy);
_th_totN_nondiff->fill(eta);
}
}
}
// Charged-only version
{
bool ishardEvt = false;
foreach (const Particle& p, fcfs.particles()) {
if (p.pT() > 3.0*GeV) { ishardEvt = true; break; }
}
// Decide empirically if this is a "diffractive" event
/// @todo Can be "diffractive" *and* "hard"?
bool isdiffEvt = (bcfs.size() == 0);
// Update event-type weight counters
_mbchSumW->fill();
(isdiffEvt ? _dfchSumW : _ndchSumW)->fill();
if (ishardEvt) _hdchSumW->fill();
// Plot energy flow
foreach (const Particle& p, fcfs.particles()) {
const double eta = p.eta();
const double energy = p.E();
_tp_chEF_minbias->fill(eta, energy);
_th_chN_minbias->fill(eta);
if (ishardEvt) {
_tp_chEF_hard->fill(eta, energy);
_th_chN_hard->fill(eta);
}
if (isdiffEvt) {
_tp_chEF_diff->fill(eta, energy);
_th_chN_diff->fill(eta);
} else {
_tp_chEF_nondiff->fill(eta, energy);
_th_chN_nondiff->fill(eta);
}
}
}
}
void finalize() {
for (size_t i = 0; i < _s_totEF_minbias->numPoints(); ++i) {
const double val = _tp_totEF_minbias->bin(i).mean() * _th_totN_minbias->bin(i).height();
const double err = (_tp_totEF_minbias->bin(i).mean() * _th_totN_minbias->bin(i).heightErr() +
_tp_totEF_minbias->bin(i).stdErr() * _th_totN_minbias->bin(i).height());
- _s_totEF_minbias->point(i).setY(val/_mbSumW, err/_mbSumW);
+ _s_totEF_minbias->point(i).setY(val/_mbSumW->val(), err/_mbSumW->val());
}
for (size_t i = 0; i < _s_totEF_hard->numPoints(); ++i) {
const double val = _tp_totEF_hard->bin(i).mean() * _th_totN_hard->bin(i).height();
const double err = (_tp_totEF_hard->bin(i).mean() * _th_totN_hard->bin(i).heightErr() +
_tp_totEF_hard->bin(i).stdErr() * _th_totN_hard->bin(i).height());
- _s_totEF_hard->point(i).setY(val/_hdSumW, err/_hdSumW);
+ _s_totEF_hard->point(i).setY(val/_hdSumW->val(), err/_hdSumW->val());
}
for (size_t i = 0; i < _s_totEF_diff->numPoints(); ++i) {
const double val = _tp_totEF_diff->bin(i).mean() * _th_totN_diff->bin(i).height();
const double err = (_tp_totEF_diff->bin(i).mean() * _th_totN_diff->bin(i).heightErr() +
_tp_totEF_diff->bin(i).stdErr() * _th_totN_diff->bin(i).height());
- _s_totEF_diff->point(i).setY(val/_dfSumW, err/_dfSumW);
+ _s_totEF_diff->point(i).setY(val/_dfSumW->val(), err/_dfSumW->val());
}
for (size_t i = 0; i < _s_totEF_nondiff->numPoints(); ++i) {
const double val = _tp_totEF_nondiff->bin(i).mean() * _th_totN_nondiff->bin(i).height();
const double err = (_tp_totEF_nondiff->bin(i).mean() * _th_totN_nondiff->bin(i).heightErr() +
_tp_totEF_nondiff->bin(i).stdErr() * _th_totN_nondiff->bin(i).height());
- _s_totEF_nondiff->point(i).setY(val/_ndSumW, err/_ndSumW);
+ _s_totEF_nondiff->point(i).setY(val/_ndSumW->val(), err/_ndSumW->val());
}
for (size_t i = 0; i < _s_chEF_minbias->numPoints(); ++i) {
const double val = _tp_chEF_minbias->bin(i).mean() * _th_chN_minbias->bin(i).height();
const double err = (_tp_chEF_minbias->bin(i).mean() * _th_chN_minbias->bin(i).heightErr() +
_tp_chEF_minbias->bin(i).stdErr() * _th_chN_minbias->bin(i).height());
- _s_chEF_minbias->point(i).setY(val/_mbchSumW, err/_mbchSumW);
+ _s_chEF_minbias->point(i).setY(val/_mbchSumW->val(), err/_mbchSumW->val());
}
for (size_t i = 0; i < _s_chEF_hard->numPoints(); ++i) {
const double val = _tp_chEF_hard->bin(i).mean() * _th_chN_hard->bin(i).height();
const double err = (_tp_chEF_hard->bin(i).mean() * _th_chN_hard->bin(i).heightErr() +
_tp_chEF_hard->bin(i).stdErr() * _th_chN_hard->bin(i).height());
- _s_chEF_hard->point(i).setY(val/_hdchSumW, err/_hdchSumW);
+ _s_chEF_hard->point(i).setY(val/_hdchSumW->val(), err/_hdchSumW->val());
}
for (size_t i = 0; i < _s_chEF_diff->numPoints(); ++i) {
const double val = _tp_chEF_diff->bin(i).mean() * _th_chN_diff->bin(i).height();
const double err = (_tp_chEF_diff->bin(i).mean() * _th_chN_diff->bin(i).heightErr() +
_tp_chEF_diff->bin(i).stdErr() * _th_chN_diff->bin(i).height());
- _s_chEF_diff->point(i).setY(val/_dfchSumW, err/_dfchSumW);
+ _s_chEF_diff->point(i).setY(val/_dfchSumW->val(), err/_dfchSumW->val());
}
for (size_t i = 0; i < _s_chEF_nondiff->numPoints(); ++i) {
const double val = _tp_chEF_nondiff->bin(i).mean() * _th_chN_nondiff->bin(i).height();
const double err = (_tp_chEF_nondiff->bin(i).mean() * _th_chN_nondiff->bin(i).heightErr() +
_tp_chEF_nondiff->bin(i).stdErr() * _th_chN_nondiff->bin(i).height());
- _s_chEF_nondiff->point(i).setY(val/_ndchSumW, err/_ndchSumW);
+ _s_chEF_nondiff->point(i).setY(val/_ndchSumW->val(), err/_ndchSumW->val());
}
}
private:
/// @name Histograms and counters
///
/// @note Histograms correspond to charged and total EF for each class of events:
/// minimum bias, hard scattering, diffractive enriched and non-diffractive enriched.
//@{
// Scatters to be filled in finalize with 1/d_eta <N(eta)><E(eta)>
Scatter2DPtr _s_totEF_minbias, _s_totEF_hard, _s_totEF_diff, _s_totEF_nondiff;
Scatter2DPtr _s_chEF_minbias, _s_chEF_hard, _s_chEF_diff, _s_chEF_nondiff;
// Temp profiles containing <E(eta)>
Profile1DPtr _tp_totEF_minbias, _tp_totEF_hard, _tp_totEF_diff, _tp_totEF_nondiff;
Profile1DPtr _tp_chEF_minbias, _tp_chEF_hard, _tp_chEF_diff, _tp_chEF_nondiff;
// Temp profiles containing <N(eta)>
Histo1DPtr _th_totN_minbias, _th_totN_hard, _th_totN_diff, _th_totN_nondiff;
Histo1DPtr _th_chN_minbias, _th_chN_hard, _th_chN_diff, _th_chN_nondiff;
// Sums of weights (~ #events) in each event class
CounterPtr _mbSumW, _hdSumW, _dfSumW, _ndSumW;
CounterPtr _mbchSumW, _hdchSumW, _dfchSumW, _ndchSumW;
//@}
};
// Hook for the plugin system
DECLARE_RIVET_PLUGIN(LHCB_2013_I1208105);
}
diff --git a/analyses/pluginLHCb/LHCB_2014_I1281685.cc b/analyses/pluginLHCb/LHCB_2014_I1281685.cc
--- a/analyses/pluginLHCb/LHCB_2014_I1281685.cc
+++ b/analyses/pluginLHCb/LHCB_2014_I1281685.cc
@@ -1,1177 +1,1177 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
namespace Rivet {
/// Charged particle multiplicities and densities in $pp$ collisions at $\sqrt{s} = 7$ TeV
class LHCB_2014_I1281685 : public Analysis {
public:
/// @name Constructors etc.
//@{
/// Constructor
LHCB_2014_I1281685()
: Analysis("LHCB_2014_I1281685"),
_p_min(2.0),
_pt_min(0.2),
_eta_min(2.0),
_eta_max(4.8),
_maxlft(1.0e-11)
{ }
//@}
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
fillMap(_partLftMap);
// Projections
declare(ChargedFinalState(_eta_min, _eta_max, _pt_min*GeV), "CFS");
// Book histograms
book(_h_mult_total ,"d03-x01-y01", 50, 0.5, 50.5);
book(_h_mult_eta[0] ,"d04-x01-y01", 21, -0.5, 20.5); //eta=[2.0,2.5]
book(_h_mult_eta[1] ,"d04-x01-y02", 21, -0.5, 20.5); //eta=[2.5,3.0]
book(_h_mult_eta[2] ,"d04-x01-y03", 21, -0.5, 20.5); //eta=[3.0,3.5]
book(_h_mult_eta[3] ,"d04-x01-y04", 21, -0.5, 20.5); //eta=[3.5,4.0]
book(_h_mult_eta[4] ,"d04-x01-y05", 21, -0.5, 20.5); //eta=[4.0,4.5]
book(_h_mult_pt[0] ,"d05-x01-y01", 21, -0.5, 20.5); //pT=[0.2,0.3]GeV
book(_h_mult_pt[1] ,"d05-x01-y02", 21, -0.5, 20.5); //pT=[0.3,0.4]GeV
book(_h_mult_pt[2] ,"d05-x01-y03", 21, -0.5, 20.5); //pT=[0.4,0.6]GeV
book(_h_mult_pt[3] ,"d05-x01-y04", 21, -0.5, 20.5); //pT=[0.6,1.0]GeV
book(_h_mult_pt[4] ,"d05-x01-y05", 21, -0.5, 20.5); //pT=[1.0,2.0]GeV
book(_h_dndeta ,"d01-x01-y01", 14, 2.0, 4.8); //eta=[2,4.8]
book(_h_dndpt ,"d02-x01-y01", 18, 0.2, 2.0); //pT =[0,2]GeV
// Counters
book(_sumW, "TMP/sumW");
}
/// Perform the per-event analysis
void analyze(const Event& event) {
// Variable to store multiplicities per event
int LHCbcountAll = 0; //count particles fulfiling all requirements
int LHCbcountEta[8] = {0,0,0,0,0,0,0,0}; //count per eta-bin
int LHCbcountPt[7] = {0,0,0,0,0,0,0}; //count per pT-bin
vector<double> val_dNdEta;
vector<double> val_dNdPt;
val_dNdEta.clear();
val_dNdPt.clear();
const ChargedFinalState& cfs = apply<ChargedFinalState>(event, "CFS");
foreach (const Particle& p, cfs.particles()) {
int id = p.pdgId();
// continue if particle is not a pion, kaon, proton, muon or electron
if ( !( (abs(id) == 211) || (abs(id) == 321) || (abs(id) == 2212) || (abs(id) == 13) || (abs(id) == 11)) ) {
continue;
}
const FourMomentum& qmom = p.momentum();
const double eta = p.momentum().eta();
const double pT = p.momentum().pT();
//minimum momentum
if (qmom.p3().mod() < _p_min) continue;
//minimum tr. momentum
if (pT < _pt_min) continue;
//eta range
if ((eta < _eta_min) || (eta > _eta_max)) continue;
/* Select only prompt particles via lifetime */
//Sum of all mother lifetimes (PDG lifetime) < 10ps
double ancestors_sumlft = getAncestorSumLifetime(p);
if( (ancestors_sumlft > _maxlft) || (ancestors_sumlft < 0) ) continue;
//after all cuts;
LHCbcountAll++; //count particles in whole kin. range
//in eta bins
if( eta >2.0 && eta <= 2.5) LHCbcountEta[0]++;
if( eta >2.5 && eta <= 3.0) LHCbcountEta[1]++;
if( eta >3.0 && eta <= 3.5) LHCbcountEta[2]++;
if( eta >3.5 && eta <= 4.0) LHCbcountEta[3]++;
if( eta >4.0 && eta <= 4.5) LHCbcountEta[4]++;
if( eta >2.0 && eta <= 4.8) LHCbcountEta[5]++; //cross-check
//in pT bins
if( pT > 0.2 && pT <= 0.3) LHCbcountPt[0]++;
if( pT > 0.3 && pT <= 0.4) LHCbcountPt[1]++;
if( pT > 0.4 && pT <= 0.6) LHCbcountPt[2]++;
if( pT > 0.6 && pT <= 1.0) LHCbcountPt[3]++;
if( pT > 1.0 && pT <= 2.0) LHCbcountPt[4]++;
if( pT > 0.2) LHCbcountPt[5]++; //cross-check
//particle densities -> need proper normalization (finalize)
val_dNdPt.push_back( pT );
val_dNdEta.push_back( eta );
}//end foreach
// Fill histograms only, if at least 1 particle pre event was within the
// kinematic range of the analysis!
if (LHCbcountAll) {
_sumW->fill();
_h_mult_total->fill(LHCbcountAll);
_h_mult_eta[0]->fill(LHCbcountEta[0]);
_h_mult_eta[1]->fill(LHCbcountEta[1]);
_h_mult_eta[2]->fill(LHCbcountEta[2]);
_h_mult_eta[3]->fill(LHCbcountEta[3]);
_h_mult_eta[4]->fill(LHCbcountEta[4]);
_h_mult_pt[0]->fill(LHCbcountPt[0]);
_h_mult_pt[1]->fill(LHCbcountPt[1]);
_h_mult_pt[2]->fill(LHCbcountPt[2]);
_h_mult_pt[3]->fill(LHCbcountPt[3]);
_h_mult_pt[4]->fill(LHCbcountPt[4]);
for (size_t part = 0; part < val_dNdEta.size(); part++)
_h_dndeta->fill(val_dNdEta[part]);
for (size_t part = 0; part < val_dNdPt.size(); part++)
_h_dndpt->fill(val_dNdPt[part]);
}
}
/// Normalise histograms etc., after the run
void finalize() {
- const double scalefactor = 1.0/_sumW; // normalize multiplicity histograms by nEvents
+ const double scalefactor = 1.0/_sumW->val(); // normalize multiplicity histograms by nEvents
const double scale1k = 1000.; // to match '10^3' scale in reference histograms
scale( _h_dndeta, scalefactor );
scale( _h_dndpt, scalefactor*0.1 ); //additional factor 0.1 for [0.1 GeV/c]
scale( _h_mult_total, scalefactor*scale1k);
_h_mult_eta[0]->scaleW( scalefactor*scale1k );
_h_mult_eta[1]->scaleW( scalefactor*scale1k );
_h_mult_eta[2]->scaleW( scalefactor*scale1k );
_h_mult_eta[3]->scaleW( scalefactor*scale1k );
_h_mult_eta[4]->scaleW( scalefactor*scale1k );
_h_mult_pt[0]->scaleW( scalefactor*scale1k );
_h_mult_pt[1]->scaleW( scalefactor*scale1k );
_h_mult_pt[2]->scaleW( scalefactor*scale1k );
_h_mult_pt[3]->scaleW( scalefactor*scale1k );
_h_mult_pt[4]->scaleW( scalefactor*scale1k );
}
//@}
private:
// Get mean PDG lifetime for particle with PID
double getLifetime(int pid) {
double lft = 0.;
map<int, double>::iterator pPartLft = _partLftMap.find(pid);
if (pPartLft != _partLftMap.end()) {
lft = (*pPartLft).second;
} else {
// allow identifying missing life times only in debug mode
MSG_DEBUG("Could not determine lifetime for particle with PID " << pid << "... Assume non-prompt particle");
lft = -1;
}
return lft;
}
// Get sum of all ancestor particles
const double getAncestorSumLifetime(const Particle& p) {
double lftSum = 0.;
double plft = 0.;
const GenParticle* part = p.genParticle();
if ( 0 == part ) return -1;
const GenVertex* ivtx = part->production_vertex();
while(ivtx) {
if (ivtx->particles_in_size() < 1) { lftSum = -1.; break; };
const GenVertex::particles_in_const_iterator iPart_invtx = ivtx->particles_in_const_begin();
part = (*iPart_invtx);
if ( !(part) ) { lftSum = -1.; break; };
ivtx = part->production_vertex();
if ( (part->pdg_id() == 2212) || !(ivtx) ) break; // reached beam
plft = getLifetime(part->pdg_id());
if (plft < 0.) { lftSum = -1.; break; };
lftSum += plft;
}
return (lftSum);
}
/// Hard-coded map linking PDG ID with PDG lifetime[s] (converted from ParticleTable.txt)
bool fillMap(map<int, double>& m) {
// PDGID = LIFETIME
m[22] = 1.000000e+016;
m[-11] = 1.000000e+016;
m[11] = 1.000000e+016;
m[12] = 1.000000e+016;
m[-13] = 2.197036e-006;
m[13] = 2.197036e-006;
m[111] = 8.438618e-017;
m[211] = 2.603276e-008;
m[-211] = 2.603276e-008;
m[130] = 5.174624e-008;
m[321] = 1.238405e-008;
m[-321] = 1.238405e-008;
m[2112] = 885.646128;
m[2212] = 1.000000e+016;
m[-2212] = 1.000000e+016;
m[310] = 8.934603e-011;
m[221] = 5.578070e-019;
m[3122] = 2.631796e-010;
m[3222] = 8.018178e-011;
m[3212] = 7.395643e-020;
m[3112] = 1.479129e-010;
m[3322] = 2.899613e-010;
m[3312] = 1.637344e-010;
m[3334] = 8.207135e-011;
m[-2112] = 885.646128;
m[-3122] = 2.631796e-010;
m[-3222] = 8.018178e-011;
m[-3212] = 7.395643e-020;
m[-3112] = 1.479129e-010;
m[-3322] = 2.899613e-010;
m[-3312] = 1.637344e-010;
m[-3334] = 8.207135e-011;
m[113] = 4.411610e-024;
m[213] = 4.411610e-024;
m[-213] = 4.411610e-024;
m[223] = 7.798723e-023;
m[333] = 1.545099e-022;
m[323] = 1.295693e-023;
m[-323] = 1.295693e-023;
m[313] = 1.298249e-023;
m[-313] = 1.298249e-023;
m[20213] = 1.500000e-024;
m[-20213] = 1.500000e-024;
m[450000000] = 1.000000e+015;
m[460000000] = 1.000000e+015;
m[470000000] = 1.000000e+015;
m[480000000] = 1.000000e+015;
m[490000000] = 1.000000e+015;
m[20022] = 1.000000e+016;
m[-15] = 2.906014e-013;
m[15] = 2.906014e-013;
m[24] = 3.104775e-025;
m[-24] = 3.104775e-025;
m[23] = 2.637914e-025;
m[411] = 1.051457e-012;
m[-411] = 1.051457e-012;
m[421] = 4.116399e-013;
m[-421] = 4.116399e-013;
m[431] = 4.904711e-013;
m[-431] = 4.904711e-013;
m[4122] = 1.994582e-013;
m[-4122] = 1.994582e-013;
m[443] = 7.565657e-021;
m[413] = 6.856377e-021;
m[-413] = 6.856377e-021;
m[423] = 1.000003e-019;
m[-423] = 1.000003e-019;
m[433] = 1.000003e-019;
m[-433] = 1.000003e-019;
m[521] = 1.671000e-012;
m[-521] = 1.671000e-012;
m[511] = 1.536000e-012;
m[-511] = 1.536000e-012;
m[531] = 1.461000e-012;
m[-531] = 1.461000e-012;
m[541] = 4.600000e-013;
m[-541] = 4.600000e-013;
m[5122] = 1.229000e-012;
m[-5122] = 1.229000e-012;
m[4112] = 4.388081e-022;
m[-4112] = 4.388081e-022;
m[4212] = 3.999999e-022;
m[-4212] = 3.999999e-022;
m[4222] = 3.291060e-022;
m[-4222] = 3.291060e-022;
m[25] = 9.400000e-026;
m[35] = 9.400000e-026;
m[36] = 9.400000e-026;
m[37] = 9.400000e-026;
m[-37] = 9.400000e-026;
m[4312] = 9.800002e-014;
m[-4312] = 9.800002e-014;
m[4322] = 3.500001e-013;
m[-4322] = 3.500001e-013;
m[4332] = 6.453061e-014;
m[-4332] = 6.453061e-014;
m[4132] = 9.824063e-014;
m[-4132] = 9.824063e-014;
m[4232] = 4.417532e-013;
m[-4232] = 4.417532e-013;
m[5222] = 1.000000e-019;
m[-5222] = 1.000000e-019;
m[5212] = 1.000000e-019;
m[-5212] = 1.000000e-019;
m[5112] = 1.000000e-019;
m[-5112] = 1.000000e-019;
m[5312] = 1.000000e-019;
m[-5312] = 1.000000e-019;
m[5322] = 1.000000e-019;
m[-5322] = 1.000000e-019;
m[5332] = 1.550000e-012;
m[-5332] = 1.550000e-012;
m[5132] = 1.390000e-012;
m[-5132] = 1.390000e-012;
m[5232] = 1.390000e-012;
m[-5232] = 1.390000e-012;
m[100443] = 2.194041e-021;
m[331] = 3.258476e-021;
m[441] = 4.113826e-023;
m[10441] = 4.063038e-023;
m[20443] = 7.154480e-022;
m[445] = 3.164482e-022;
m[9000111] = 1.149997e-023;
m[9000211] = 1.149997e-023;
m[-9000211] = 1.149997e-023;
m[20113] = 1.500000e-024;
m[115] = 6.151516e-024;
m[215] = 6.151516e-024;
m[-215] = 6.151516e-024;
m[10323] = 7.313469e-024;
m[-10323] = 7.313469e-024;
m[10313] = 7.313469e-024;
m[-10313] = 7.313469e-024;
m[20323] = 3.782829e-024;
m[-20323] = 3.782829e-024;
m[20313] = 3.782829e-024;
m[-20313] = 3.782829e-024;
m[10321] = 2.238817e-024;
m[-10321] = 2.238817e-024;
m[10311] = 2.238817e-024;
m[-10311] = 2.238817e-024;
m[325] = 6.682357e-024;
m[-325] = 6.682357e-024;
m[315] = 6.038644e-024;
m[-315] = 6.038644e-024;
m[10411] = 4.380000e-024;
m[20413] = 2.630000e-024;
m[10413] = 3.290000e-023;
m[-415] = 2.632849e-023;
m[-10411] = 4.380000e-024;
m[-20413] = 2.630000e-024;
m[-10413] = 3.290000e-023;
m[415] = 2.632849e-023;
m[10421] = 4.380000e-024;
m[20423] = 2.630000e-024;
m[10423] = 3.482604e-023;
m[-425] = 2.861792e-023;
m[-10421] = 4.380000e-024;
m[-20423] = 2.630000e-024;
m[-10423] = 3.482604e-023;
m[425] = 2.861792e-023;
m[10431] = 6.582100e-022;
m[20433] = 6.582100e-022;
m[10433] = 6.582100e-022;
m[435] = 4.388100e-023;
m[-10431] = 6.582100e-022;
m[-20433] = 6.582100e-022;
m[-10433] = 6.582100e-022;
m[-435] = 4.388100e-023;
m[2224] = 5.485102e-024;
m[2214] = 5.485102e-024;
m[2114] = 5.485102e-024;
m[1114] = 5.485102e-024;
m[-2224] = 5.485102e-024;
m[-2214] = 5.485102e-024;
m[-2114] = 5.485102e-024;
m[-1114] = 5.485102e-024;
m[-523] = 1.000019e-019;
m[523] = 1.000019e-019;
m[513] = 1.000019e-019;
m[-513] = 1.000019e-019;
m[533] = 1.000000e-019;
m[-533] = 1.000000e-019;
m[10521] = 4.390000e-024;
m[20523] = 2.630000e-024;
m[10523] = 1.650000e-023;
m[525] = 1.310000e-023;
m[-10521] = 4.390000e-024;
m[-20523] = 2.630000e-024;
m[-10523] = 1.650000e-023;
m[-525] = 1.310000e-023;
m[10511] = 4.390000e-024;
m[20513] = 2.630000e-024;
m[10513] = 1.650000e-023;
m[515] = 1.310000e-023;
m[-10511] = 4.390000e-024;
m[-20513] = 2.630000e-024;
m[-10513] = 1.650000e-023;
m[-515] = 1.310000e-023;
m[10531] = 4.390000e-024;
m[20533] = 2.630000e-024;
m[10533] = 1.650000e-023;
m[535] = 1.310000e-023;
m[-10531] = 4.390000e-024;
m[-20533] = 2.630000e-024;
m[-10533] = 1.650000e-023;
m[-535] = 1.310000e-023;
m[14] = 1.000000e+016;
m[-14] = 1.000000e+016;
m[-12] = 1.000000e+016;
m[1] = 0.000000e+000;
m[-1] = 0.000000e+000;
m[2] = 0.000000e+000;
m[-2] = 0.000000e+000;
m[3] = 0.000000e+000;
m[-3] = 0.000000e+000;
m[4] = 0.000000e+000;
m[-4] = 0.000000e+000;
m[5] = 0.000000e+000;
m[-5] = 0.000000e+000;
m[6] = 4.707703e-025;
m[-6] = 4.707703e-025;
m[7] = 0.000000e+000;
m[-7] = 0.000000e+000;
m[8] = 0.000000e+000;
m[-8] = 0.000000e+000;
m[16] = 1.000000e+016;
m[-16] = 1.000000e+016;
m[17] = 0.000000e+000;
m[-17] = 0.000000e+000;
m[18] = 0.000000e+000;
m[-18] = 0.000000e+000;
m[21] = 0.000000e+000;
m[32] = 0.000000e+000;
m[33] = 0.000000e+000;
m[34] = 0.000000e+000;
m[-34] = 0.000000e+000;
m[39] = 0.000000e+000;
m[41] = 0.000000e+000;
m[-41] = 0.000000e+000;
m[42] = 0.000000e+000;
m[-42] = 0.000000e+000;
m[43] = 0.000000e+000;
m[44] = 0.000000e+000;
m[-44] = 0.000000e+000;
m[81] = 0.000000e+000;
m[82] = 0.000000e+000;
m[-82] = 0.000000e+000;
m[83] = 0.000000e+000;
m[84] = 3.335641e-013;
m[-84] = 3.335641e-013;
m[85] = 1.290893e-012;
m[-85] = 1.290893e-012;
m[86] = 0.000000e+000;
m[-86] = 0.000000e+000;
m[87] = 0.000000e+000;
m[-87] = 0.000000e+000;
m[88] = 0.000000e+000;
m[90] = 0.000000e+000;
m[91] = 0.000000e+000;
m[92] = 0.000000e+000;
m[93] = 0.000000e+000;
m[94] = 0.000000e+000;
m[95] = 0.000000e+000;
m[96] = 0.000000e+000;
m[97] = 0.000000e+000;
m[98] = 0.000000e+000;
m[99] = 0.000000e+000;
m[117] = 4.088275e-024;
m[119] = 1.828367e-024;
m[217] = 4.088275e-024;
m[-217] = 4.088275e-024;
m[219] = 1.828367e-024;
m[-219] = 1.828367e-024;
m[225] = 3.555982e-024;
m[227] = 3.917930e-024;
m[229] = 3.392846e-024;
m[311] = 1.000000e+016;
m[-311] = 1.000000e+016;
m[317] = 4.139699e-024;
m[-317] = 4.139699e-024;
m[319] = 3.324304e-024;
m[-319] = 3.324304e-024;
m[327] = 4.139699e-024;
m[-327] = 4.139699e-024;
m[329] = 3.324304e-024;
m[-329] = 3.324304e-024;
m[335] = 8.660687e-024;
m[337] = 7.565657e-024;
m[543] = 0.000000e+000;
m[-543] = 0.000000e+000;
m[545] = 0.000000e+000;
m[-545] = 0.000000e+000;
m[551] = 0.000000e+000;
m[553] = 1.253738e-020;
m[555] = 1.000000e+016;
m[557] = 0.000000e+000;
m[-450000000] = 0.000000e+000;
m[-490000000] = 0.000000e+000;
m[-460000000] = 0.000000e+000;
m[-470000000] = 0.000000e+000;
m[1103] = 0.000000e+000;
m[-1103] = 0.000000e+000;
m[1112] = 4.388081e-024;
m[-1112] = 4.388081e-024;
m[1116] = 1.880606e-024;
m[-1116] = 1.880606e-024;
m[1118] = 2.194041e-024;
m[-1118] = 2.194041e-024;
m[1212] = 4.388081e-024;
m[-1212] = 4.388081e-024;
m[1214] = 5.485102e-024;
m[-1214] = 5.485102e-024;
m[1216] = 1.880606e-024;
m[-1216] = 1.880606e-024;
m[1218] = 1.462694e-024;
m[-1218] = 1.462694e-024;
m[2101] = 0.000000e+000;
m[-2101] = 0.000000e+000;
m[2103] = 0.000000e+000;
m[-2103] = 0.000000e+000;
m[2116] = 4.388081e-024;
m[-2116] = 4.388081e-024;
m[2118] = 2.194041e-024;
m[-2118] = 2.194041e-024;
m[2122] = 4.388081e-024;
m[-2122] = 4.388081e-024;
m[2124] = 5.485102e-024;
m[-2124] = 5.485102e-024;
m[2126] = 1.880606e-024;
m[-2126] = 1.880606e-024;
m[2128] = 1.462694e-024;
m[-2128] = 1.462694e-024;
m[2203] = 0.000000e+000;
m[-2203] = 0.000000e+000;
m[2216] = 4.388081e-024;
m[-2216] = 4.388081e-024;
m[2218] = 2.194041e-024;
m[-2218] = 2.194041e-024;
m[2222] = 4.388081e-024;
m[-2222] = 4.388081e-024;
m[2226] = 1.880606e-024;
m[-2226] = 1.880606e-024;
m[2228] = 2.194041e-024;
m[-2228] = 2.194041e-024;
m[3101] = 0.000000e+000;
m[-3101] = 0.000000e+000;
m[3103] = 0.000000e+000;
m[-3103] = 0.000000e+000;
m[3114] = 1.670589e-023;
m[-3114] = 1.670589e-023;
m[3116] = 5.485102e-024;
m[-3116] = 5.485102e-024;
m[3118] = 3.656734e-024;
m[-3118] = 3.656734e-024;
m[3124] = 4.219309e-023;
m[-3124] = 4.219309e-023;
m[3126] = 8.227653e-024;
m[-3126] = 8.227653e-024;
m[3128] = 3.291061e-024;
m[-3128] = 3.291061e-024;
m[3201] = 0.000000e+000;
m[-3201] = 0.000000e+000;
m[3203] = 0.000000e+000;
m[-3203] = 0.000000e+000;
m[3214] = 1.828367e-023;
m[-3214] = 1.828367e-023;
m[3216] = 5.485102e-024;
m[-3216] = 5.485102e-024;
m[3218] = 3.656734e-024;
m[-3218] = 3.656734e-024;
m[3224] = 1.838582e-023;
m[-3224] = 1.838582e-023;
m[3226] = 5.485102e-024;
m[-3226] = 5.485102e-024;
m[3228] = 3.656734e-024;
m[-3228] = 3.656734e-024;
m[3303] = 0.000000e+000;
m[-3303] = 0.000000e+000;
m[3314] = 6.648608e-023;
m[-3314] = 6.648608e-023;
m[3324] = 7.233101e-023;
m[-3324] = 7.233101e-023;
m[4101] = 0.000000e+000;
m[-4101] = 0.000000e+000;
m[4103] = 0.000000e+000;
m[-4103] = 0.000000e+000;
m[4114] = 0.000000e+000;
m[-4114] = 0.000000e+000;
m[4201] = 0.000000e+000;
m[-4201] = 0.000000e+000;
m[4203] = 0.000000e+000;
m[-4203] = 0.000000e+000;
m[4214] = 3.291061e-022;
m[-4214] = 3.291061e-022;
m[4224] = 0.000000e+000;
m[-4224] = 0.000000e+000;
m[4301] = 0.000000e+000;
m[-4301] = 0.000000e+000;
m[4303] = 0.000000e+000;
m[-4303] = 0.000000e+000;
m[4314] = 0.000000e+000;
m[-4314] = 0.000000e+000;
m[4324] = 0.000000e+000;
m[-4324] = 0.000000e+000;
m[4334] = 0.000000e+000;
m[-4334] = 0.000000e+000;
m[4403] = 0.000000e+000;
m[-4403] = 0.000000e+000;
m[4412] = 3.335641e-013;
m[-4412] = 3.335641e-013;
m[4414] = 3.335641e-013;
m[-4414] = 3.335641e-013;
m[4422] = 3.335641e-013;
m[-4422] = 3.335641e-013;
m[4424] = 3.335641e-013;
m[-4424] = 3.335641e-013;
m[4432] = 3.335641e-013;
m[-4432] = 3.335641e-013;
m[4434] = 3.335641e-013;
m[-4434] = 3.335641e-013;
m[4444] = 3.335641e-013;
m[-4444] = 3.335641e-013;
m[5101] = 0.000000e+000;
m[-5101] = 0.000000e+000;
m[5103] = 0.000000e+000;
m[-5103] = 0.000000e+000;
m[5114] = 0.000000e+000;
m[-5114] = 0.000000e+000;
m[5142] = 1.290893e-012;
m[-5142] = 1.290893e-012;
m[5201] = 0.000000e+000;
m[-5201] = 0.000000e+000;
m[5203] = 0.000000e+000;
m[-5203] = 0.000000e+000;
m[5214] = 0.000000e+000;
m[-5214] = 0.000000e+000;
m[5224] = 0.000000e+000;
m[-5224] = 0.000000e+000;
m[5242] = 1.290893e-012;
m[-5242] = 1.290893e-012;
m[5301] = 0.000000e+000;
m[-5301] = 0.000000e+000;
m[5303] = 0.000000e+000;
m[-5303] = 0.000000e+000;
m[5314] = 0.000000e+000;
m[-5314] = 0.000000e+000;
m[5324] = 0.000000e+000;
m[-5324] = 0.000000e+000;
m[5334] = 0.000000e+000;
m[-5334] = 0.000000e+000;
m[5342] = 1.290893e-012;
m[-5342] = 1.290893e-012;
m[5401] = 0.000000e+000;
m[-5401] = 0.000000e+000;
m[5403] = 0.000000e+000;
m[-5403] = 0.000000e+000;
m[5412] = 1.290893e-012;
m[-5412] = 1.290893e-012;
m[5414] = 1.290893e-012;
m[-5414] = 1.290893e-012;
m[5422] = 1.290893e-012;
m[-5422] = 1.290893e-012;
m[5424] = 1.290893e-012;
m[-5424] = 1.290893e-012;
m[5432] = 1.290893e-012;
m[-5432] = 1.290893e-012;
m[5434] = 1.290893e-012;
m[-5434] = 1.290893e-012;
m[5442] = 1.290893e-012;
m[-5442] = 1.290893e-012;
m[5444] = 1.290893e-012;
m[-5444] = 1.290893e-012;
m[5503] = 0.000000e+000;
m[-5503] = 0.000000e+000;
m[5512] = 1.290893e-012;
m[-5512] = 1.290893e-012;
m[5514] = 1.290893e-012;
m[-5514] = 1.290893e-012;
m[5522] = 1.290893e-012;
m[-5522] = 1.290893e-012;
m[5524] = 1.290893e-012;
m[-5524] = 1.290893e-012;
m[5532] = 1.290893e-012;
m[-5532] = 1.290893e-012;
m[5534] = 1.290893e-012;
m[-5534] = 1.290893e-012;
m[5542] = 1.290893e-012;
m[-5542] = 1.290893e-012;
m[5544] = 1.290893e-012;
m[-5544] = 1.290893e-012;
m[5554] = 1.290893e-012;
m[-5554] = 1.290893e-012;
m[10022] = 0.000000e+000;
m[10111] = 2.483820e-024;
m[10113] = 4.635297e-024;
m[10115] = 2.541360e-024;
m[10211] = 2.483820e-024;
m[-10211] = 2.483820e-024;
m[10213] = 4.635297e-024;
m[-10213] = 4.635297e-024;
m[10215] = 2.541360e-024;
m[-10215] = 2.541360e-024;
m[9010221] = 1.316424e-023;
m[10223] = 1.828367e-024;
m[10225] = 0.000000e+000;
m[10315] = 3.538775e-024;
m[-10315] = 3.538775e-024;
m[10325] = 3.538775e-024;
m[-10325] = 3.538775e-024;
m[10331] = 5.265698e-024;
m[10333] = 0.000000e+000;
m[10335] = 0.000000e+000;
m[10443] = 0.000000e+000;
m[10541] = 0.000000e+000;
m[-10541] = 0.000000e+000;
m[10543] = 0.000000e+000;
m[-10543] = 0.000000e+000;
m[10551] = 1.000000e+016;
m[10553] = 0.000000e+000;
m[10555] = 0.000000e+000;
m[11112] = 0.000000e+000;
m[-11112] = 0.000000e+000;
m[11114] = 2.194041e-024;
m[-11114] = 2.194041e-024;
m[11116] = 1.880606e-024;
m[-11116] = 1.880606e-024;
m[11212] = 1.880606e-024;
m[-11212] = 1.880606e-024;
m[11216] = 0.000000e+000;
m[-11216] = 0.000000e+000;
m[12112] = 1.880606e-024;
m[-12112] = 1.880606e-024;
m[12114] = 2.194041e-024;
m[-12114] = 2.194041e-024;
m[12116] = 5.063171e-024;
m[-12116] = 5.063171e-024;
m[12118] = 0.000000e+000;
m[-12118] = 0.000000e+000;
m[12122] = 0.000000e+000;
m[-12122] = 0.000000e+000;
m[12126] = 1.880606e-024;
m[-12126] = 1.880606e-024;
m[12212] = 1.880606e-024;
m[-12212] = 1.880606e-024;
m[12214] = 2.194041e-024;
m[-12214] = 2.194041e-024;
m[12216] = 5.063171e-024;
m[-12216] = 5.063171e-024;
m[12218] = 0.000000e+000;
m[-12218] = 0.000000e+000;
m[12222] = 0.000000e+000;
m[-12222] = 0.000000e+000;
m[12224] = 2.194041e-024;
m[-12224] = 2.194041e-024;
m[12226] = 1.880606e-024;
m[-12226] = 1.880606e-024;
m[13112] = 6.582122e-024;
m[-13112] = 6.582122e-024;
m[13114] = 1.097020e-023;
m[-13114] = 1.097020e-023;
m[13116] = 5.485102e-024;
m[-13116] = 5.485102e-024;
m[13122] = 1.316424e-023;
m[-13122] = 1.316424e-023;
m[13124] = 1.097020e-023;
m[-13124] = 1.097020e-023;
m[13126] = 6.928549e-024;
m[-13126] = 6.928549e-024;
m[13212] = 6.582122e-024;
m[-13212] = 6.582122e-024;
m[13214] = 1.097020e-023;
m[-13214] = 1.097020e-023;
m[13216] = 5.485102e-024;
m[-13216] = 5.485102e-024;
m[13222] = 6.582122e-024;
m[-13222] = 6.582122e-024;
m[13224] = 1.097020e-023;
m[-13224] = 1.097020e-023;
m[13226] = 5.485102e-024;
m[-13226] = 5.485102e-024;
m[13314] = 2.742551e-023;
m[-13314] = 2.742551e-023;
m[13316] = 0.000000e+000;
m[-13316] = 0.000000e+000;
m[13324] = 2.742551e-023;
m[-13324] = 2.742551e-023;
m[13326] = 0.000000e+000;
m[-13326] = 0.000000e+000;
m[14122] = 1.828367e-022;
m[-14122] = 1.828367e-022;
m[14124] = 0.000000e+000;
m[-14124] = 0.000000e+000;
m[10221] = 2.194040e-024;
m[20223] = 2.742551e-023;
m[20315] = 2.384827e-024;
m[-20315] = 2.384827e-024;
m[20325] = 2.384827e-024;
m[-20325] = 2.384827e-024;
m[20333] = 1.185968e-023;
m[20543] = 0.000000e+000;
m[-20543] = 0.000000e+000;
m[20553] = 1.000000e+016;
m[20555] = 0.000000e+000;
m[21112] = 2.632849e-024;
m[-21112] = 2.632849e-024;
m[21114] = 3.291061e-024;
m[-21114] = 3.291061e-024;
m[21212] = 2.632849e-024;
m[-21212] = 2.632849e-024;
m[21214] = 6.582122e-024;
m[-21214] = 6.582122e-024;
m[22112] = 4.388081e-024;
m[-22112] = 4.388081e-024;
m[22114] = 3.291061e-024;
m[-22114] = 3.291061e-024;
m[22122] = 2.632849e-024;
m[-22122] = 2.632849e-024;
m[22124] = 6.582122e-024;
m[-22124] = 6.582122e-024;
m[22212] = 4.388081e-024;
m[-22212] = 4.388081e-024;
m[22214] = 3.291061e-024;
m[-22214] = 3.291061e-024;
m[22222] = 2.632849e-024;
m[-22222] = 2.632849e-024;
m[22224] = 3.291061e-024;
m[-22224] = 3.291061e-024;
m[23112] = 7.313469e-024;
m[-23112] = 7.313469e-024;
m[23114] = 2.991874e-024;
m[-23114] = 2.991874e-024;
m[23122] = 4.388081e-024;
m[-23122] = 4.388081e-024;
m[23124] = 6.582122e-024;
m[-23124] = 6.582122e-024;
m[23126] = 3.291061e-024;
m[-23126] = 3.291061e-024;
m[23212] = 7.313469e-024;
m[-23212] = 7.313469e-024;
m[23214] = 2.991874e-024;
m[-23214] = 2.991874e-024;
m[23222] = 7.313469e-024;
m[-23222] = 7.313469e-024;
m[23224] = 2.991874e-024;
m[-23224] = 2.991874e-024;
m[23314] = 0.000000e+000;
m[-23314] = 0.000000e+000;
m[23324] = 0.000000e+000;
m[-23324] = 0.000000e+000;
m[30113] = 2.742551e-024;
m[30213] = 2.742551e-024;
m[-30213] = 2.742551e-024;
m[30223] = 2.991874e-024;
m[30313] = 2.056913e-024;
m[-30313] = 2.056913e-024;
m[30323] = 2.056913e-024;
m[-30323] = 2.056913e-024;
m[30343] = 0.000000e+000;
m[-30343] = 0.000000e+000;
m[30353] = 0.000000e+000;
m[-30353] = 0.000000e+000;
m[30363] = 0.000000e+000;
m[-30363] = 0.000000e+000;
m[30411] = 0.000000e+000;
m[-30411] = 0.000000e+000;
m[30413] = 0.000000e+000;
m[-30413] = 0.000000e+000;
m[30421] = 0.000000e+000;
m[-30421] = 0.000000e+000;
m[30423] = 0.000000e+000;
m[-30423] = 0.000000e+000;
m[30443] = 2.789035e-023;
m[30553] = 0.000000e+000;
m[31114] = 1.880606e-024;
m[-31114] = 1.880606e-024;
m[31214] = 4.388081e-024;
m[-31214] = 4.388081e-024;
m[32112] = 4.388081e-024;
m[-32112] = 4.388081e-024;
m[32114] = 1.880606e-024;
m[-32114] = 1.880606e-024;
m[32124] = 4.388081e-024;
m[-32124] = 4.388081e-024;
m[32212] = 4.388081e-024;
m[-32212] = 4.388081e-024;
m[32214] = 1.880606e-024;
m[-32214] = 1.880606e-024;
m[32224] = 1.880606e-024;
m[-32224] = 1.880606e-024;
m[33122] = 1.880606e-023;
m[-33122] = 1.880606e-023;
m[33314] = 0.000000e+000;
m[-33314] = 0.000000e+000;
m[33324] = 0.000000e+000;
m[-33324] = 0.000000e+000;
m[41214] = 0.000000e+000;
m[-41214] = 0.000000e+000;
m[42112] = 6.582122e-024;
m[-42112] = 6.582122e-024;
m[42124] = 0.000000e+000;
m[-42124] = 0.000000e+000;
m[42212] = 6.582122e-024;
m[-42212] = 6.582122e-024;
m[43122] = 2.194041e-024;
m[-43122] = 2.194041e-024;
m[52114] = 0.000000e+000;
m[-52114] = 0.000000e+000;
m[52214] = 0.000000e+000;
m[-52214] = 0.000000e+000;
m[53122] = 4.388081e-024;
m[-53122] = 4.388081e-024;
m[100111] = 1.645531e-024;
m[100113] = 2.123265e-024;
m[100211] = 1.645531e-024;
m[-100211] = 1.645531e-024;
m[100213] = 2.123265e-024;
m[-100213] = 2.123265e-024;
m[100221] = 1.196749e-023;
m[100223] = 3.871836e-024;
m[100225] = 0.000000e+000;
m[100311] = 0.000000e+000;
m[-100311] = 0.000000e+000;
m[100313] = 2.837122e-024;
m[-100313] = 2.837122e-024;
m[100315] = 0.000000e+000;
m[-100315] = 0.000000e+000;
m[100321] = 0.000000e+000;
m[-100321] = 0.000000e+000;
m[100323] = 2.837122e-024;
m[-100323] = 2.837122e-024;
m[100325] = 0.000000e+000;
m[-100325] = 0.000000e+000;
m[100331] = 0.000000e+000;
m[100333] = 4.388081e-024;
m[100335] = 3.291061e-024;
m[100441] = 0.000000e+000;
m[100551] = 0.000000e+000;
m[100553] = 1.495937e-020;
m[100555] = 1.000000e+016;
m[100557] = 0.000000e+000;
m[110551] = 1.000000e+016;
m[110553] = 0.000000e+000;
m[110555] = 0.000000e+000;
m[120553] = 1.000000e+016;
m[120555] = 0.000000e+000;
m[130553] = 0.000000e+000;
m[200111] = 3.134344e-024;
m[200211] = 3.134344e-024;
m[-200211] = 3.134344e-024;
m[200551] = 0.000000e+000;
m[200553] = 2.502708e-020;
m[200555] = 0.000000e+000;
m[210551] = 0.000000e+000;
m[210553] = 0.000000e+000;
m[220553] = 0.000000e+000;
m[300553] = 4.701516e-023;
m[9000221] = 0.000000e+000;
m[9000443] = 1.265793e-023;
m[9000553] = 5.983747e-024;
m[9010443] = 8.438618e-024;
m[9010553] = 8.331800e-024;
m[9020221] = 6.038644e-024;
m[9020443] = 1.530726e-023;
m[9060225] = 4.388081e-024;
m[9070225] = 2.056913e-024;
m[1000001] = 0.000000e+000;
m[-1000001] = 0.000000e+000;
m[1000002] = 0.000000e+000;
m[-1000002] = 0.000000e+000;
m[1000003] = 0.000000e+000;
m[-1000003] = 0.000000e+000;
m[1000004] = 0.000000e+000;
m[-1000004] = 0.000000e+000;
m[1000005] = 0.000000e+000;
m[-1000005] = 0.000000e+000;
m[1000006] = 0.000000e+000;
m[-1000006] = 0.000000e+000;
m[1000011] = 0.000000e+000;
m[-1000011] = 0.000000e+000;
m[1000012] = 0.000000e+000;
m[-1000012] = 0.000000e+000;
m[1000013] = 0.000000e+000;
m[-1000013] = 0.000000e+000;
m[1000014] = 0.000000e+000;
m[-1000014] = 0.000000e+000;
m[1000015] = 0.000000e+000;
m[-1000015] = 0.000000e+000;
m[1000016] = 0.000000e+000;
m[-1000016] = 0.000000e+000;
m[1000021] = 0.000000e+000;
m[1000022] = 0.000000e+000;
m[1000023] = 0.000000e+000;
m[1000024] = 0.000000e+000;
m[-1000024] = 0.000000e+000;
m[1000025] = 0.000000e+000;
m[1000035] = 0.000000e+000;
m[1000037] = 0.000000e+000;
m[-1000037] = 0.000000e+000;
m[1000039] = 0.000000e+000;
m[2000001] = 0.000000e+000;
m[-2000001] = 0.000000e+000;
m[2000002] = 0.000000e+000;
m[-2000002] = 0.000000e+000;
m[2000003] = 0.000000e+000;
m[-2000003] = 0.000000e+000;
m[2000004] = 0.000000e+000;
m[-2000004] = 0.000000e+000;
m[2000005] = 0.000000e+000;
m[-2000005] = 0.000000e+000;
m[2000006] = 0.000000e+000;
m[-2000006] = 0.000000e+000;
m[2000011] = 0.000000e+000;
m[-2000011] = 0.000000e+000;
m[2000012] = 0.000000e+000;
m[-2000012] = 0.000000e+000;
m[2000013] = 0.000000e+000;
m[-2000013] = 0.000000e+000;
m[2000014] = 0.000000e+000;
m[-2000014] = 0.000000e+000;
m[2000015] = 0.000000e+000;
m[-2000015] = 0.000000e+000;
m[2000016] = 0.000000e+000;
m[-2000016] = 0.000000e+000;
m[3000111] = 0.000000e+000;
m[3000113] = 0.000000e+000;
m[3000211] = 0.000000e+000;
m[-3000211] = 0.000000e+000;
m[3000213] = 0.000000e+000;
m[-3000213] = 0.000000e+000;
m[3000221] = 0.000000e+000;
m[3000223] = 0.000000e+000;
m[3000331] = 0.000000e+000;
m[3100021] = 0.000000e+000;
m[3100111] = 0.000000e+000;
m[3100113] = 0.000000e+000;
m[3200111] = 0.000000e+000;
m[3200113] = 0.000000e+000;
m[3300113] = 0.000000e+000;
m[3400113] = 0.000000e+000;
m[4000001] = 0.000000e+000;
m[-4000001] = 0.000000e+000;
m[4000002] = 0.000000e+000;
m[-4000002] = 0.000000e+000;
m[4000011] = 0.000000e+000;
m[-4000011] = 0.000000e+000;
m[4000012] = 0.000000e+000;
m[-4000012] = 0.000000e+000;
m[5000039] = 0.000000e+000;
m[9900012] = 0.000000e+000;
m[9900014] = 0.000000e+000;
m[9900016] = 0.000000e+000;
m[9900023] = 0.000000e+000;
m[9900024] = 0.000000e+000;
m[-9900024] = 0.000000e+000;
m[9900041] = 0.000000e+000;
m[-9900041] = 0.000000e+000;
m[9900042] = 0.000000e+000;
m[-9900042] = 0.000000e+000;
m[1027013000] = 0.000000e+000;
m[1012006000] = 0.000000e+000;
m[1063029000] = 0.000000e+000;
m[1014007000] = 0.000000e+000;
m[1016008000] = 0.000000e+000;
m[1028014000] = 0.000000e+000;
m[1065029000] = 0.000000e+000;
m[1009004000] = 0.000000e+000;
m[1019009000] = 0.000000e+000;
m[1056026000] = 0.000000e+000;
m[1207082000] = 0.000000e+000;
m[1208082000] = 0.000000e+000;
m[1029014000] = 0.000000e+000;
m[1206082000] = 0.000000e+000;
m[1054026000] = 0.000000e+000;
m[1018008000] = 0.000000e+000;
m[1030014000] = 0.000000e+000;
m[1057026000] = 0.000000e+000;
m[1204082000] = 0.000000e+000;
m[-99000000] = 0.000000e+000;
m[1028013000] = 0.000000e+000;
m[1040018000] = 0.000000e+000;
m[1011005000] = 0.000000e+000;
m[1012005000] = 0.000000e+000;
m[1013006000] = 0.000000e+000;
m[1014006000] = 0.000000e+000;
m[1052024000] = 0.000000e+000;
m[1024012000] = 0.000000e+000;
m[1026012000] = 0.000000e+000;
m[1027012000] = 0.000000e+000;
m[1015007000] = 0.000000e+000;
m[1022010000] = 0.000000e+000;
m[1058028000] = 0.000000e+000;
m[1060028000] = 0.000000e+000;
m[1062028000] = 0.000000e+000;
m[1064028000] = 0.000000e+000;
m[1007003000] = 0.000000e+000;
m[1025012000] = 0.000000e+000;
m[1053024000] = 0.000000e+000;
m[1055025000] = 0.000000e+000;
m[1008004000] = 0.000000e+000;
m[1010004000] = 0.000000e+000;
m[1010005000] = 0.000000e+000;
m[1016007000] = 0.000000e+000;
m[1017008000] = 0.000000e+000;
m[1019008000] = 0.000000e+000;
m[1023010000] = 0.000000e+000;
m[1024011000] = 0.000000e+000;
m[1031015000] = 0.000000e+000;
m[1039017000] = 0.000000e+000;
m[1040017000] = 0.000000e+000;
m[1036018000] = 0.000000e+000;
m[1050024000] = 0.000000e+000;
m[1054024000] = 0.000000e+000;
m[1059026000] = 0.000000e+000;
m[1061028000] = 0.000000e+000;
m[1063028000] = 0.000000e+000;
m[1092042000] = 0.000000e+000;
m[1095042000] = 0.000000e+000;
m[1096042000] = 0.000000e+000;
m[1097042000] = 0.000000e+000;
m[1098042000] = 0.000000e+000;
m[1100042000] = 0.000000e+000;
m[1108046000] = 0.000000e+000;
// Added by hand:
m[9902210] = 0.000000e+000; //diffractive p-state -> assume no lifetime
return true;
}
private:
/// @name Histograms
//@{
Histo1DPtr _h_mult_total; // full kinematic range
Histo1DPtr _h_mult_eta[5]; // in eta bins
Histo1DPtr _h_mult_pt[5]; // in pT bins
Histo1DPtr _h_dndeta; // density dn/deta
Histo1DPtr _h_dndpt; // density dn/dpT
//@}
/// @name Private variables
double _p_min;
double _pt_min;
double _eta_min;
double _eta_max;
double _maxlft;
/// Count selected events
CounterPtr _sumW;
map<int, double> _partLftMap; // Map <PDGID, PDGLIFETIME>
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(LHCB_2014_I1281685);
}
diff --git a/analyses/pluginMisc/ARGUS_1993_S2653028.cc b/analyses/pluginMisc/ARGUS_1993_S2653028.cc
--- a/analyses/pluginMisc/ARGUS_1993_S2653028.cc
+++ b/analyses/pluginMisc/ARGUS_1993_S2653028.cc
@@ -1,175 +1,175 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/UnstableFinalState.hh"
namespace Rivet {
/// @brief BELLE pi+/-, K+/- and proton/antiproton spectrum at Upsilon(4S)
/// @author Peter Richardson
class ARGUS_1993_S2653028 : public Analysis {
public:
ARGUS_1993_S2653028()
: Analysis("ARGUS_1993_S2653028"){ }
void analyze(const Event& e) {
// Find the upsilons
Particles upsilons;
// First in unstable final state
const UnstableFinalState& ufs = apply<UnstableFinalState>(e, "UFS");
foreach (const Particle& p, ufs.particles()) {
if (p.pid() == 300553) upsilons.push_back(p);
}
// Then in whole event if that failed
if (upsilons.empty()) {
foreach (const GenParticle* p, particles(e.genEvent())) {
if (p->pdg_id() != 300553) continue;
const GenVertex* pv = p->production_vertex();
bool passed = true;
if (pv) {
foreach (const GenParticle* pp, particles_in(pv)) {
if ( p->pdg_id() == pp->pdg_id() ) {
passed = false;
break;
}
}
}
if (passed) upsilons.push_back(Particle(*p));
}
}
// Find an upsilon
foreach (const Particle& p, upsilons) {
_weightSum->fill();
vector<GenParticle *> pionsA,pionsB,protonsA,protonsB,kaons;
// Find the decay products we want
findDecayProducts(p.genParticle(), pionsA, pionsB, protonsA, protonsB, kaons);
LorentzTransform cms_boost;
if (p.p3().mod() > 1*MeV)
cms_boost = LorentzTransform::mkFrameTransformFromBeta(p.momentum().betaVec());
for (size_t ix = 0; ix < pionsA.size(); ++ix) {
FourMomentum ptemp(pionsA[ix]->momentum());
FourMomentum p2 = cms_boost.transform(ptemp);
double pcm = cms_boost.transform(ptemp).vector3().mod();
_histPiA->fill(pcm);
}
_multPiA->fill(10.58,double(pionsA.size()));
for (size_t ix = 0; ix < pionsB.size(); ++ix) {
double pcm = cms_boost.transform(FourMomentum(pionsB[ix]->momentum())).vector3().mod();
_histPiB->fill(pcm);
}
_multPiB->fill(10.58,double(pionsB.size()));
for (size_t ix = 0; ix < protonsA.size(); ++ix) {
double pcm = cms_boost.transform(FourMomentum(protonsA[ix]->momentum())).vector3().mod();
_histpA->fill(pcm);
}
_multpA->fill(10.58,double(protonsA.size()));
for (size_t ix = 0; ix < protonsB.size(); ++ix) {
double pcm = cms_boost.transform(FourMomentum(protonsB[ix]->momentum())).vector3().mod();
_histpB->fill(pcm);
}
_multpB->fill(10.58,double(protonsB.size()));
for (size_t ix = 0 ;ix < kaons.size(); ++ix) {
double pcm = cms_boost.transform(FourMomentum(kaons[ix]->momentum())).vector3().mod();
_histKA->fill(pcm);
_histKB->fill(pcm);
}
_multK->fill(10.58,double(kaons.size()));
}
}
void finalize() {
- if (_weightSum > 0.) {
- scale(_histPiA, 1./_weightSum);
- scale(_histPiB, 1./_weightSum);
- scale(_histKA , 1./_weightSum);
- scale(_histKB , 1./_weightSum);
- scale(_histpA , 1./_weightSum);
- scale(_histpB , 1./_weightSum);
- scale(_multPiA, 1./_weightSum);
- scale(_multPiB, 1./_weightSum);
- scale(_multK , 1./_weightSum);
- scale(_multpA , 1./_weightSum);
- scale(_multpB , 1./_weightSum);
+ if (_weightSum->val() > 0.) {
+ scale(_histPiA, 1. / *_weightSum);
+ scale(_histPiB, 1. / *_weightSum);
+ scale(_histKA , 1. / *_weightSum);
+ scale(_histKB , 1. / *_weightSum);
+ scale(_histpA , 1. / *_weightSum);
+ scale(_histpB , 1. / *_weightSum);
+ scale(_multPiA, 1. / *_weightSum);
+ scale(_multPiB, 1. / *_weightSum);
+ scale(_multK , 1. / *_weightSum);
+ scale(_multpA , 1. / *_weightSum);
+ scale(_multpB , 1. / *_weightSum);
}
}
void init() {
declare(UnstableFinalState(), "UFS");
// spectra
book(_histPiA ,1, 1, 1);
book(_histPiB ,2, 1, 1);
book(_histKA ,3, 1, 1);
book(_histKB ,6, 1, 1);
book(_histpA ,4, 1, 1);
book(_histpB ,5, 1, 1);
// multiplicities
book(_multPiA , 7, 1, 1);
book(_multPiB , 8, 1, 1);
book(_multK , 9, 1, 1);
book(_multpA ,10, 1, 1);
book(_multpB ,11, 1, 1);
book(_weightSum, "TMP/weightSum");
} // init
private:
//@{
/// Count of weights
CounterPtr _weightSum;
/// Spectra
Histo1DPtr _histPiA, _histPiB, _histKA, _histKB, _histpA, _histpB;
/// Multiplicities
Histo1DPtr _multPiA, _multPiB, _multK, _multpA, _multpB;
//@}
void findDecayProducts(const GenParticle* p,
vector<GenParticle*>& pionsA, vector<GenParticle*>& pionsB,
vector<GenParticle*>& protonsA, vector<GenParticle*>& protonsB,
vector<GenParticle*>& kaons)
{
int parentId = p->pdg_id();
const GenVertex* dv = p->end_vertex();
/// @todo Use better looping
for (GenVertex::particles_out_const_iterator pp = dv->particles_out_const_begin(); pp != dv->particles_out_const_end(); ++pp) {
int id = abs((*pp)->pdg_id());
if (id == PID::PIPLUS) {
if (parentId != PID::LAMBDA && parentId != PID::K0S) {
pionsA.push_back(*pp);
pionsB.push_back(*pp);
}
else
pionsB.push_back(*pp);
}
else if (id == PID::PROTON) {
if (parentId != PID::LAMBDA && parentId != PID::K0S) {
protonsA.push_back(*pp);
protonsB.push_back(*pp);
}
else
protonsB.push_back(*pp);
}
else if (id == PID::KPLUS) {
kaons.push_back(*pp);
}
else if ((*pp)->end_vertex())
findDecayProducts(*pp, pionsA, pionsB, protonsA, protonsB, kaons);
}
}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(ARGUS_1993_S2653028);
}
diff --git a/analyses/pluginMisc/ARGUS_1993_S2669951.cc b/analyses/pluginMisc/ARGUS_1993_S2669951.cc
--- a/analyses/pluginMisc/ARGUS_1993_S2669951.cc
+++ b/analyses/pluginMisc/ARGUS_1993_S2669951.cc
@@ -1,199 +1,199 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/UnstableFinalState.hh"
namespace Rivet {
/// @brief Production of the $\eta'(958)$ and $f_0(980)$ in $e^+e^-$ annihilation in the Upsilon region
/// @author Peter Richardson
class ARGUS_1993_S2669951 : public Analysis {
public:
ARGUS_1993_S2669951()
: Analysis("ARGUS_1993_S2669951")
{ }
void init() {
declare(UnstableFinalState(), "UFS");
book(_weightSum_cont, "weightSum_cont");
book(_weightSum_Ups1, "weightSum_Ups1");
book(_weightSum_Ups2, "weightSum_Ups2");
for ( auto i : {0,1,2} ) {
if ( i < 2 )
book(_count_etaPrime_highZ[i], "count_etaPrime_highz_" + to_str(i));
book(_count_etaPrime_allZ[i], "count_etaPrime_allz_" + to_str(i));
book(_count_f0[i], "count_f0_" + to_str(i));
}
book(_hist_cont_f0 ,2, 1, 1);
book(_hist_Ups1_f0 ,3, 1, 1);
book(_hist_Ups2_f0 ,4, 1, 1);
book(s111, 1, 1, 1, true);
book(s112, 1, 1, 2, true);
book(s511, 5, 1, 1, true);
}
void analyze(const Event& e) {
// Find the Upsilons among the unstables
const UnstableFinalState& ufs = apply<UnstableFinalState>(e, "UFS");
Particles upsilons;
// First in unstable final state
foreach (const Particle& p, ufs.particles())
if (p.pid() == 553 || p.pid() == 100553)
upsilons.push_back(p);
// Then in whole event if fails
if (upsilons.empty()) {
/// @todo Replace HepMC digging with Particle::descendents etc. calls
foreach (const GenParticle* p, Rivet::particles(e.genEvent())) {
if ( p->pdg_id() != 553 && p->pdg_id() != 100553 ) continue;
// Discard it if its parent has the same PDG ID code (avoid duplicates)
const GenVertex* pv = p->production_vertex();
bool passed = true;
if (pv) {
foreach (const GenParticle* pp, particles_in(pv)) {
if ( p->pdg_id() == pp->pdg_id() ) {
passed = false;
break;
}
}
}
if (passed) upsilons.push_back(Particle(*p));
}
}
// Finding done, now fill counters
if (upsilons.empty()) { // Continuum
MSG_DEBUG("No Upsilons found => continuum event");
_weightSum_cont->fill();
unsigned int nEtaA(0), nEtaB(0), nf0(0);
foreach (const Particle& p, ufs.particles()) {
const int id = p.abspid();
const double xp = 2.*p.E()/sqrtS();
const double beta = p.p3().mod() / p.E();
if (id == 9010221) {
_hist_cont_f0->fill(xp, 1.0/beta);
nf0 += 1;
} else if (id == 331) {
if (xp > 0.35) nEtaA += 1;
nEtaB += 1;
}
}
_count_f0[2] ->fill(nf0);
_count_etaPrime_highZ[1]->fill(nEtaA);
_count_etaPrime_allZ[2] ->fill(nEtaB);
} else { // Upsilon(s) found
MSG_DEBUG("Upsilons found => resonance event");
foreach (const Particle& ups, upsilons) {
const int parentId = ups.pid();
((parentId == 553) ? _weightSum_Ups1 : _weightSum_Ups2)->fill();
Particles unstable;
// Find the decay products we want
findDecayProducts(ups.genParticle(), unstable);
LorentzTransform cms_boost;
if (ups.p3().mod() > 1*MeV)
cms_boost = LorentzTransform::mkFrameTransformFromBeta(ups.momentum().betaVec());
const double mass = ups.mass();
unsigned int nEtaA(0), nEtaB(0), nf0(0);
foreach(const Particle& p, unstable) {
const int id = p.abspid();
const FourMomentum p2 = cms_boost.transform(p.momentum());
const double xp = 2.*p2.E()/mass;
const double beta = p2.p3().mod()/p2.E();
if (id == 9010221) { //< ?
((parentId == 553) ? _hist_Ups1_f0 : _hist_Ups2_f0)->fill(xp, 1.0/beta);
nf0 += 1;
} else if (id == 331) { //< ?
if (xp > 0.35) nEtaA += 1;
nEtaB += 1;
}
}
if (parentId == 553) {
_count_f0[0] ->fill( nf0);
_count_etaPrime_highZ[0]->fill(nEtaA);
_count_etaPrime_allZ[0] ->fill(nEtaB);
} else {
_count_f0[1]->fill(nf0);
_count_etaPrime_allZ[1] ->fill(nEtaB);
}
}
}
}
void finalize() {
// High-Z eta' multiplicity
- if (_weightSum_Ups1 > 0) // Point at 9.460
- s111->point(0).setY(_count_etaPrime_highZ[0] / _weightSum_Ups1, 0);
- if (_weightSum_cont > 0) // Point at 9.905
- s111->point(1).setY(_count_etaPrime_highZ[1] / _weightSum_cont, 0);
+ if (_weightSum_Ups1->val() > 0) // Point at 9.460
+ s111->point(0).setY(_count_etaPrime_highZ[0]->val() / _weightSum_Ups1->val(), 0);
+ if (_weightSum_cont->val() > 0) // Point at 9.905
+ s111->point(1).setY(_count_etaPrime_highZ[1]->val() / _weightSum_cont->val(), 0);
// All-Z eta' multiplicity
- if (_weightSum_Ups1 > 0) // Point at 9.460
- s112->point(0).setY(_count_etaPrime_allZ[0] / _weightSum_Ups1, 0);
- if (_weightSum_cont > 0) // Point at 9.905
- s112->point(1).setY(_count_etaPrime_allZ[2] / _weightSum_cont, 0);
- if (_weightSum_Ups2 > 0) // Point at 10.02
- s112->point(2).setY(_count_etaPrime_allZ[1] / _weightSum_Ups2, 0);
+ if (_weightSum_Ups1->val() > 0) // Point at 9.460
+ s112->point(0).setY(_count_etaPrime_allZ[0]->val() / _weightSum_Ups1->val(), 0);
+ if (_weightSum_cont->val() > 0) // Point at 9.905
+ s112->point(1).setY(_count_etaPrime_allZ[2]->val() / _weightSum_cont->val(), 0);
+ if (_weightSum_Ups2->val() > 0) // Point at 10.02
+ s112->point(2).setY(_count_etaPrime_allZ[1]->val() / _weightSum_Ups2->val(), 0);
// f0 multiplicity
- if (_weightSum_Ups1 > 0) // Point at 9.46
- s511->point(0).setY(_count_f0[0] / _weightSum_Ups1, 0);
- if (_weightSum_Ups2 > 0) // Point at 10.02
- s511->point(1).setY(_count_f0[1] / _weightSum_Ups2, 0);
- if (_weightSum_cont > 0) // Point at 10.45
- s511->point(2).setY(_count_f0[2] / _weightSum_cont, 0);
+ if (_weightSum_Ups1->val() > 0) // Point at 9.46
+ s511->point(0).setY(_count_f0[0]->val() / _weightSum_Ups1->val(), 0);
+ if (_weightSum_Ups2->val() > 0) // Point at 10.02
+ s511->point(1).setY(_count_f0[1]->val() / _weightSum_Ups2->val(), 0);
+ if (_weightSum_cont->val() > 0) // Point at 10.45
+ s511->point(2).setY(_count_f0[2]->val() / _weightSum_cont->val(), 0);
// Scale histos
- if (_weightSum_cont > 0.) scale(_hist_cont_f0, 1./_weightSum_cont);
- if (_weightSum_Ups1 > 0.) scale(_hist_Ups1_f0, 1./_weightSum_Ups1);
- if (_weightSum_Ups2 > 0.) scale(_hist_Ups2_f0, 1./_weightSum_Ups2);
+ if (_weightSum_cont->val() > 0.) scale(_hist_cont_f0, 1./ *_weightSum_cont);
+ if (_weightSum_Ups1->val() > 0.) scale(_hist_Ups1_f0, 1./ *_weightSum_Ups1);
+ if (_weightSum_Ups2->val() > 0.) scale(_hist_Ups2_f0, 1./ *_weightSum_Ups2);
}
private:
/// @name Counters
//@{
array<CounterPtr,3> _count_etaPrime_highZ, _count_etaPrime_allZ, _count_f0;
CounterPtr _weightSum_cont,_weightSum_Ups1,_weightSum_Ups2;
//@}
Scatter2DPtr s111, s112, s511;
/// Histos
Histo1DPtr _hist_cont_f0, _hist_Ups1_f0, _hist_Ups2_f0;
/// Recursively walk the HepMC tree to find decay products of @a p
void findDecayProducts(const GenParticle* p, Particles& unstable) {
const GenVertex* dv = p->end_vertex();
/// @todo Use better looping
for (GenVertex::particles_out_const_iterator pp = dv->particles_out_const_begin(); pp != dv->particles_out_const_end(); ++pp) {
const int id = abs((*pp)->pdg_id());
if (id == 331 || id == 9010221) unstable.push_back(Particle(*pp));
else if ((*pp)->end_vertex()) findDecayProducts(*pp, unstable);
}
}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(ARGUS_1993_S2669951);
}
diff --git a/analyses/pluginMisc/ARGUS_1993_S2789213.cc b/analyses/pluginMisc/ARGUS_1993_S2789213.cc
--- a/analyses/pluginMisc/ARGUS_1993_S2789213.cc
+++ b/analyses/pluginMisc/ARGUS_1993_S2789213.cc
@@ -1,258 +1,258 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/UnstableFinalState.hh"
namespace Rivet {
/// @brief ARGUS vector meson production
/// @author Peter Richardson
class ARGUS_1993_S2789213 : public Analysis {
public:
ARGUS_1993_S2789213()
: Analysis("ARGUS_1993_S2789213")
{ }
void init() {
declare(UnstableFinalState(), "UFS");
book(_mult_cont_Omega , 1, 1, 1);
book(_mult_cont_Rho0 , 1, 1, 2);
book(_mult_cont_KStar0 , 1, 1, 3);
book(_mult_cont_KStarPlus , 1, 1, 4);
book(_mult_cont_Phi , 1, 1, 5);
book(_mult_Ups1_Omega , 2, 1, 1);
book(_mult_Ups1_Rho0 , 2, 1, 2);
book(_mult_Ups1_KStar0 , 2, 1, 3);
book(_mult_Ups1_KStarPlus , 2, 1, 4);
book(_mult_Ups1_Phi , 2, 1, 5);
book(_mult_Ups4_Omega , 3, 1, 1);
book(_mult_Ups4_Rho0 , 3, 1, 2);
book(_mult_Ups4_KStar0 , 3, 1, 3);
book(_mult_Ups4_KStarPlus , 3, 1, 4);
book(_mult_Ups4_Phi , 3, 1, 5);
book(_hist_cont_KStarPlus , 4, 1, 1);
book(_hist_Ups1_KStarPlus , 5, 1, 1);
book(_hist_Ups4_KStarPlus , 6, 1, 1);
book(_hist_cont_KStar0 , 7, 1, 1);
book(_hist_Ups1_KStar0 , 8, 1, 1);
book(_hist_Ups4_KStar0 , 9, 1, 1);
book(_hist_cont_Rho0 ,10, 1, 1);
book(_hist_Ups1_Rho0 ,11, 1, 1);
book(_hist_Ups4_Rho0 ,12, 1, 1);
book(_hist_cont_Omega ,13, 1, 1);
book(_hist_Ups1_Omega ,14, 1, 1);
book(_weightSum_cont,"TMP/weightSumcont");
book(_weightSum_Ups1,"TMP/weightSumUps1");
book(_weightSum_Ups4,"TMP/weightSumUps4");
}
void analyze(const Event& e) {
// Find the upsilons
Particles upsilons;
// First in unstable final state
const UnstableFinalState& ufs = apply<UnstableFinalState>(e, "UFS");
foreach (const Particle& p, ufs.particles())
if (p.pid() == 300553 || p.pid() == 553) upsilons.push_back(p);
// Then in whole event if that failed
if (upsilons.empty()) {
foreach (const GenParticle* p, Rivet::particles(e.genEvent())) {
if (p->pdg_id() != 300553 && p->pdg_id() != 553) continue;
const GenVertex* pv = p->production_vertex();
bool passed = true;
if (pv) {
foreach (const GenParticle* pp, particles_in(pv)) {
if ( p->pdg_id() == pp->pdg_id() ) {
passed = false;
break;
}
}
}
if (passed) upsilons.push_back(Particle(*p));
}
}
if (upsilons.empty()) { // continuum
_weightSum_cont->fill();
unsigned int nOmega(0), nRho0(0), nKStar0(0), nKStarPlus(0), nPhi(0);
foreach (const Particle& p, ufs.particles()) {
int id = p.abspid();
double xp = 2.*p.E()/sqrtS();
double beta = p.p3().mod()/p.E();
if (id == 113) {
_hist_cont_Rho0->fill(xp, 1.0/beta);
++nRho0;
}
else if (id == 313) {
_hist_cont_KStar0->fill(xp, 1.0/beta);
++nKStar0;
}
else if (id == 223) {
_hist_cont_Omega->fill(xp, 1.0/beta);
++nOmega;
}
else if (id == 323) {
_hist_cont_KStarPlus->fill(xp,1.0/beta);
++nKStarPlus;
}
else if (id == 333) {
++nPhi;
}
}
/// @todo Replace with Counters and fill one-point Scatters at the end
_mult_cont_Omega ->fill(10.45, nOmega );
_mult_cont_Rho0 ->fill(10.45, nRho0 );
_mult_cont_KStar0 ->fill(10.45, nKStar0 );
_mult_cont_KStarPlus->fill(10.45, nKStarPlus);
_mult_cont_Phi ->fill(10.45, nPhi );
} else { // found an upsilon
foreach (const Particle& ups, upsilons) {
const int parentId = ups.pid();
(parentId == 553 ? _weightSum_Ups1 : _weightSum_Ups4)->fill();
Particles unstable;
// Find the decay products we want
findDecayProducts(ups.genParticle(),unstable);
/// @todo Update to new LT mk* functions
LorentzTransform cms_boost;
if (ups.p3().mod() > 0.001)
cms_boost = LorentzTransform::mkFrameTransformFromBeta(ups.momentum().betaVec());
double mass = ups.mass();
unsigned int nOmega(0),nRho0(0),nKStar0(0),nKStarPlus(0),nPhi(0);
foreach(const Particle & p , unstable) {
int id = p.abspid();
FourMomentum p2 = cms_boost.transform(p.momentum());
double xp = 2.*p2.E()/mass;
double beta = p2.p3().mod()/p2.E();
if (id == 113) {
if (parentId == 553) _hist_Ups1_Rho0->fill(xp,1.0/beta);
else _hist_Ups4_Rho0->fill(xp,1.0/beta);
++nRho0;
}
else if (id == 313) {
if (parentId == 553) _hist_Ups1_KStar0->fill(xp,1.0/beta);
else _hist_Ups4_KStar0->fill(xp,1.0/beta);
++nKStar0;
}
else if (id == 223) {
if (parentId == 553) _hist_Ups1_Omega->fill(xp,1.0/beta);
++nOmega;
}
else if (id == 323) {
if (parentId == 553) _hist_Ups1_KStarPlus->fill(xp,1.0/beta);
else _hist_Ups4_KStarPlus->fill(xp,1.0/beta);
++nKStarPlus;
}
else if (id == 333) {
++nPhi;
}
}
if (parentId == 553) {
_mult_Ups1_Omega ->fill(9.46,nOmega );
_mult_Ups1_Rho0 ->fill(9.46,nRho0 );
_mult_Ups1_KStar0 ->fill(9.46,nKStar0 );
_mult_Ups1_KStarPlus->fill(9.46,nKStarPlus);
_mult_Ups1_Phi ->fill(9.46,nPhi );
}
else {
_mult_Ups4_Omega ->fill(10.58,nOmega );
_mult_Ups4_Rho0 ->fill(10.58,nRho0 );
_mult_Ups4_KStar0 ->fill(10.58,nKStar0 );
_mult_Ups4_KStarPlus->fill(10.58,nKStarPlus);
_mult_Ups4_Phi ->fill(10.58,nPhi );
}
}
}
}
void finalize() {
- if (_weightSum_cont > 0.) {
+ if (_weightSum_cont->val() > 0.) {
/// @todo Replace with Counters and fill one-point Scatters at the end
- scale(_mult_cont_Omega , 1./_weightSum_cont);
- scale(_mult_cont_Rho0 , 1./_weightSum_cont);
- scale(_mult_cont_KStar0 , 1./_weightSum_cont);
- scale(_mult_cont_KStarPlus, 1./_weightSum_cont);
- scale(_mult_cont_Phi , 1./_weightSum_cont);
- scale(_hist_cont_KStarPlus, 1./_weightSum_cont);
- scale(_hist_cont_KStar0 , 1./_weightSum_cont);
- scale(_hist_cont_Rho0 , 1./_weightSum_cont);
- scale(_hist_cont_Omega , 1./_weightSum_cont);
+ scale(_mult_cont_Omega , 1. / *_weightSum_cont);
+ scale(_mult_cont_Rho0 , 1. / *_weightSum_cont);
+ scale(_mult_cont_KStar0 , 1. / *_weightSum_cont);
+ scale(_mult_cont_KStarPlus, 1. / *_weightSum_cont);
+ scale(_mult_cont_Phi , 1. / *_weightSum_cont);
+ scale(_hist_cont_KStarPlus, 1. / *_weightSum_cont);
+ scale(_hist_cont_KStar0 , 1. / *_weightSum_cont);
+ scale(_hist_cont_Rho0 , 1. / *_weightSum_cont);
+ scale(_hist_cont_Omega , 1. / *_weightSum_cont);
}
- if (_weightSum_Ups1 > 0.) {
+ if (_weightSum_Ups1->val() > 0.) {
/// @todo Replace with Counters and fill one-point Scatters at the end
- scale(_mult_Ups1_Omega , 1./_weightSum_Ups1);
- scale(_mult_Ups1_Rho0 , 1./_weightSum_Ups1);
- scale(_mult_Ups1_KStar0 , 1./_weightSum_Ups1);
- scale(_mult_Ups1_KStarPlus, 1./_weightSum_Ups1);
- scale(_mult_Ups1_Phi , 1./_weightSum_Ups1);
- scale(_hist_Ups1_KStarPlus, 1./_weightSum_Ups1);
- scale(_hist_Ups1_KStar0 , 1./_weightSum_Ups1);
- scale(_hist_Ups1_Rho0 , 1./_weightSum_Ups1);
- scale(_hist_Ups1_Omega , 1./_weightSum_Ups1);
+ scale(_mult_Ups1_Omega , 1. / *_weightSum_Ups1);
+ scale(_mult_Ups1_Rho0 , 1. / *_weightSum_Ups1);
+ scale(_mult_Ups1_KStar0 , 1. / *_weightSum_Ups1);
+ scale(_mult_Ups1_KStarPlus, 1. / *_weightSum_Ups1);
+ scale(_mult_Ups1_Phi , 1. / *_weightSum_Ups1);
+ scale(_hist_Ups1_KStarPlus, 1. / *_weightSum_Ups1);
+ scale(_hist_Ups1_KStar0 , 1. / *_weightSum_Ups1);
+ scale(_hist_Ups1_Rho0 , 1. / *_weightSum_Ups1);
+ scale(_hist_Ups1_Omega , 1. / *_weightSum_Ups1);
}
- if (_weightSum_Ups4 > 0.) {
+ if (_weightSum_Ups4->val() > 0.) {
/// @todo Replace with Counters and fill one-point Scatters at the end
- scale(_mult_Ups4_Omega , 1./_weightSum_Ups4);
- scale(_mult_Ups4_Rho0 , 1./_weightSum_Ups4);
- scale(_mult_Ups4_KStar0 , 1./_weightSum_Ups4);
- scale(_mult_Ups4_KStarPlus, 1./_weightSum_Ups4);
- scale(_mult_Ups4_Phi , 1./_weightSum_Ups4);
- scale(_hist_Ups4_KStarPlus, 1./_weightSum_Ups4);
- scale(_hist_Ups4_KStar0 , 1./_weightSum_Ups4);
- scale(_hist_Ups4_Rho0 , 1./_weightSum_Ups4);
+ scale(_mult_Ups4_Omega , 1. / *_weightSum_Ups4);
+ scale(_mult_Ups4_Rho0 , 1. / *_weightSum_Ups4);
+ scale(_mult_Ups4_KStar0 , 1. / *_weightSum_Ups4);
+ scale(_mult_Ups4_KStarPlus, 1. / *_weightSum_Ups4);
+ scale(_mult_Ups4_Phi , 1. / *_weightSum_Ups4);
+ scale(_hist_Ups4_KStarPlus, 1. / *_weightSum_Ups4);
+ scale(_hist_Ups4_KStar0 , 1. / *_weightSum_Ups4);
+ scale(_hist_Ups4_Rho0 , 1. / *_weightSum_Ups4);
}
}
private:
//@{
Histo1DPtr _mult_cont_Omega, _mult_cont_Rho0, _mult_cont_KStar0, _mult_cont_KStarPlus, _mult_cont_Phi;
Histo1DPtr _mult_Ups1_Omega, _mult_Ups1_Rho0, _mult_Ups1_KStar0, _mult_Ups1_KStarPlus, _mult_Ups1_Phi;
Histo1DPtr _mult_Ups4_Omega, _mult_Ups4_Rho0, _mult_Ups4_KStar0, _mult_Ups4_KStarPlus, _mult_Ups4_Phi;
Histo1DPtr _hist_cont_KStarPlus, _hist_Ups1_KStarPlus, _hist_Ups4_KStarPlus;
Histo1DPtr _hist_cont_KStar0, _hist_Ups1_KStar0, _hist_Ups4_KStar0 ;
Histo1DPtr _hist_cont_Rho0, _hist_Ups1_Rho0, _hist_Ups4_Rho0;
Histo1DPtr _hist_cont_Omega, _hist_Ups1_Omega;
CounterPtr _weightSum_cont,_weightSum_Ups1,_weightSum_Ups4;
//@}
void findDecayProducts(const GenParticle* p, Particles& unstable) {
const GenVertex* dv = p->end_vertex();
/// @todo Use better looping
for (GenVertex::particles_out_const_iterator pp = dv->particles_out_const_begin(); pp != dv->particles_out_const_end(); ++pp) {
int id = abs((*pp)->pdg_id());
if (id == 113 || id == 313 || id == 323 ||
id == 333 || id == 223 ) {
unstable.push_back(Particle(*pp));
}
else if ((*pp)->end_vertex())
findDecayProducts(*pp, unstable);
}
}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(ARGUS_1993_S2789213);
}
diff --git a/analyses/pluginMisc/BABAR_2003_I593379.cc b/analyses/pluginMisc/BABAR_2003_I593379.cc
--- a/analyses/pluginMisc/BABAR_2003_I593379.cc
+++ b/analyses/pluginMisc/BABAR_2003_I593379.cc
@@ -1,186 +1,186 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/Beam.hh"
#include "Rivet/Projections/UnstableFinalState.hh"
namespace Rivet {
/// @brief Babar charmonium spectra
/// @author Peter Richardson
class BABAR_2003_I593379 : public Analysis {
public:
BABAR_2003_I593379()
: Analysis("BABAR_2003_I593379")
{ }
void analyze(const Event& e) {
// Find the charmonia
Particles upsilons;
// First in unstable final state
const UnstableFinalState& ufs = apply<UnstableFinalState>(e, "UFS");
foreach (const Particle& p, ufs.particles())
if (p.pid() == 300553) upsilons.push_back(p);
// Then in whole event if fails
if (upsilons.empty()) {
foreach (const GenParticle* p, Rivet::particles(e.genEvent())) {
if (p->pdg_id() != 300553) continue;
const GenVertex* pv = p->production_vertex();
bool passed = true;
if (pv) {
foreach (const GenParticle* pp, particles_in(pv)) {
if ( p->pdg_id() == pp->pdg_id() ) {
passed = false;
break;
}
}
}
if (passed) upsilons.push_back(Particle(*p));
}
}
// Find upsilons
foreach (const Particle& p, upsilons) {
_weightSum->fill();
// Find the charmonium resonances
/// @todo Use Rivet::Particles
vector<const GenParticle*> allJpsi, primaryJpsi, Psiprime, all_chi_c1, all_chi_c2, primary_chi_c1, primary_chi_c2;
findDecayProducts(p.genParticle(), allJpsi, primaryJpsi, Psiprime,
all_chi_c1, all_chi_c2, primary_chi_c1, primary_chi_c2);
const LorentzTransform cms_boost = LorentzTransform::mkFrameTransformFromBeta(p.mom().betaVec());
for (size_t i = 0; i < allJpsi.size(); i++) {
const double pcm = cms_boost.transform(FourMomentum(allJpsi[i]->momentum())).p();
_hist_all_Jpsi->fill(pcm);
}
_mult_JPsi->fill(10.58, double(allJpsi.size()));
for (size_t i = 0; i < primaryJpsi.size(); i++) {
const double pcm = cms_boost.transform(FourMomentum(primaryJpsi[i]->momentum())).p();
_hist_primary_Jpsi->fill(pcm);
}
_mult_JPsi_direct->fill(10.58, double(primaryJpsi.size()));
for (size_t i=0; i<Psiprime.size(); i++) {
const double pcm = cms_boost.transform(FourMomentum(Psiprime[i]->momentum())).p();
_hist_Psi_prime->fill(pcm);
}
_mult_Psi2S->fill(10.58, double(Psiprime.size()));
for (size_t i = 0; i < all_chi_c1.size(); i++) {
const double pcm = cms_boost.transform(FourMomentum(all_chi_c1[i]->momentum())).p();
_hist_chi_c1->fill(pcm);
}
_mult_chi_c1->fill(10.58, double(all_chi_c1.size()));
_mult_chi_c1_direct->fill(10.58, double(primary_chi_c1.size()));
for (size_t i = 0; i < all_chi_c2.size(); i++) {
const double pcm = cms_boost.transform(FourMomentum(all_chi_c2[i]->momentum())).p();
_hist_chi_c2->fill(pcm);
}
_mult_chi_c2->fill(10.58, double(all_chi_c2.size()));
_mult_chi_c2_direct->fill(10.58, double(primary_chi_c2.size()));
}
} // analyze
void finalize() {
- scale(_hist_all_Jpsi , 0.5*0.1/_weightSum);
- scale(_hist_chi_c1 , 0.5*0.1/_weightSum);
- scale(_hist_chi_c2 , 0.5*0.1/_weightSum);
- scale(_hist_Psi_prime , 0.5*0.1/_weightSum);
- scale(_hist_primary_Jpsi , 0.5*0.1/_weightSum);
- scale(_mult_JPsi , 0.5*100./_weightSum);
- scale(_mult_JPsi_direct , 0.5*100./_weightSum);
- scale(_mult_chi_c1 , 0.5*100./_weightSum);
- scale(_mult_chi_c1_direct, 0.5*100./_weightSum);
- scale(_mult_chi_c2 , 0.5*100./_weightSum);
- scale(_mult_chi_c2_direct, 0.5*100./_weightSum);
- scale(_mult_Psi2S , 0.5*100./_weightSum);
+ scale(_hist_all_Jpsi , 0.5*0.1 / *_weightSum);
+ scale(_hist_chi_c1 , 0.5*0.1 / *_weightSum);
+ scale(_hist_chi_c2 , 0.5*0.1 / *_weightSum);
+ scale(_hist_Psi_prime , 0.5*0.1 / *_weightSum);
+ scale(_hist_primary_Jpsi , 0.5*0.1 / *_weightSum);
+ scale(_mult_JPsi , 0.5*100. / *_weightSum);
+ scale(_mult_JPsi_direct , 0.5*100. / *_weightSum);
+ scale(_mult_chi_c1 , 0.5*100. / *_weightSum);
+ scale(_mult_chi_c1_direct, 0.5*100. / *_weightSum);
+ scale(_mult_chi_c2 , 0.5*100. / *_weightSum);
+ scale(_mult_chi_c2_direct, 0.5*100. / *_weightSum);
+ scale(_mult_Psi2S , 0.5*100. / *_weightSum);
} // finalize
void init() {
declare(UnstableFinalState(), "UFS");
book(_mult_JPsi ,1, 1, 1);
book(_mult_JPsi_direct ,1, 1, 2);
book(_mult_chi_c1 ,1, 1, 3);
book(_mult_chi_c1_direct ,1, 1, 4);
book(_mult_chi_c2 ,1, 1, 5);
book(_mult_chi_c2_direct ,1, 1, 6);
book(_mult_Psi2S ,1, 1, 7);
book(_hist_all_Jpsi ,6, 1, 1);
book(_hist_chi_c1 ,7, 1, 1);
book(_hist_chi_c2 ,7, 1, 2);
book(_hist_Psi_prime ,8, 1, 1);
book(_hist_primary_Jpsi ,10, 1, 1);
book(_weightSum, "TMP/weightSum");
} // init
private:
//@{
// count of weights
CounterPtr _weightSum;
/// Histograms
Histo1DPtr _hist_all_Jpsi;
Histo1DPtr _hist_chi_c1;
Histo1DPtr _hist_chi_c2;
Histo1DPtr _hist_Psi_prime;
Histo1DPtr _hist_primary_Jpsi;
Histo1DPtr _mult_JPsi;
Histo1DPtr _mult_JPsi_direct;
Histo1DPtr _mult_chi_c1;
Histo1DPtr _mult_chi_c1_direct;
Histo1DPtr _mult_chi_c2;
Histo1DPtr _mult_chi_c2_direct;
Histo1DPtr _mult_Psi2S;
//@}
void findDecayProducts(const GenParticle* p,
vector<const GenParticle*>& allJpsi,
vector<const GenParticle*>& primaryJpsi,
vector<const GenParticle*>& Psiprime,
vector<const GenParticle*>& all_chi_c1, vector<const GenParticle*>& all_chi_c2,
vector<const GenParticle*>& primary_chi_c1, vector<const GenParticle*>& primary_chi_c2) {
const GenVertex* dv = p->end_vertex();
bool isOnium = false;
/// @todo Use better looping
for (GenVertex::particles_in_const_iterator pp = dv->particles_in_const_begin() ; pp != dv->particles_in_const_end() ; ++pp) {
int id = (*pp)->pdg_id();
id = id%1000;
id -= id%10;
id /= 10;
if (id==44) isOnium = true;
}
/// @todo Use better looping
for (GenVertex::particles_out_const_iterator pp = dv->particles_out_const_begin(); pp != dv->particles_out_const_end(); ++pp) {
int id = (*pp)->pdg_id();
if (id==100443) {
Psiprime.push_back(*pp);
}
else if (id==20443) {
all_chi_c1.push_back(*pp);
if (!isOnium) primary_chi_c1.push_back(*pp);
}
else if (id==445) {
all_chi_c2.push_back(*pp);
if (!isOnium) primary_chi_c2.push_back(*pp);
}
else if (id==443) {
allJpsi.push_back(*pp);
if (!isOnium) primaryJpsi.push_back(*pp);
}
if ((*pp)->end_vertex()) {
findDecayProducts(*pp, allJpsi, primaryJpsi, Psiprime, all_chi_c1, all_chi_c2, primary_chi_c1, primary_chi_c2);
}
}
}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(BABAR_2003_I593379);
}
diff --git a/analyses/pluginMisc/BABAR_2007_S7266081.cc b/analyses/pluginMisc/BABAR_2007_S7266081.cc
--- a/analyses/pluginMisc/BABAR_2007_S7266081.cc
+++ b/analyses/pluginMisc/BABAR_2007_S7266081.cc
@@ -1,190 +1,190 @@
// -*- C++ -*-
#include <iostream>
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/UnstableFinalState.hh"
namespace Rivet {
/// @brief BABAR tau lepton to three charged hadrons
/// @author Peter Richardson
class BABAR_2007_S7266081 : public Analysis {
public:
BABAR_2007_S7266081()
: Analysis("BABAR_2007_S7266081")
{ }
void init() {
declare(UnstableFinalState(), "UFS");
book(_hist_pipipi_pipipi , 1, 1, 1);
book(_hist_pipipi_pipi , 2, 1, 1);
book(_hist_Kpipi_Kpipi , 3, 1, 1);
book(_hist_Kpipi_Kpi , 4, 1, 1);
book(_hist_Kpipi_pipi , 5, 1, 1);
book(_hist_KpiK_KpiK , 6, 1, 1);
book(_hist_KpiK_KK , 7, 1, 1);
book(_hist_KpiK_piK , 8, 1, 1);
book(_hist_KKK_KKK , 9, 1, 1);
book(_hist_KKK_KK ,10, 1, 1);
book(_weight_total, "weight_total");
book(_weight_pipipi, "weight_pipipi");
book(_weight_Kpipi, "weight_Kpipi");
book(_weight_KpiK, "weight_KpiK");
book(_weight_KKK, "weight_KKK");
/// @note Using autobooking for these scatters since their x values are not really obtainable from the MC data
book(tmp11, 11, 1, 1, true);
book(tmp12, 12, 1, 1, true);
book(tmp13, 13, 1, 1, true);
book(tmp14, 14, 1, 1, true);
}
void analyze(const Event& e) {
// Find the taus
Particles taus;
foreach(const Particle& p, apply<UnstableFinalState>(e, "UFS").particles(Cuts::pid==PID::TAU)) {
_weight_total->fill();
Particles pip, pim, Kp, Km;
unsigned int nstable = 0;
// Get the boost to the rest frame
LorentzTransform cms_boost;
if (p.p3().mod() > 1*MeV)
cms_boost = LorentzTransform::mkFrameTransformFromBeta(p.momentum().betaVec());
// Find the decay products we want
findDecayProducts(p.genParticle(), nstable, pip, pim, Kp, Km);
if (p.pid() < 0) {
swap(pip, pim);
swap(Kp, Km );
}
if (nstable != 4) continue;
// pipipi
if (pim.size() == 2 && pip.size() == 1) {
_weight_pipipi->fill();
_hist_pipipi_pipipi->
fill((pip[0].momentum()+pim[0].momentum()+pim[1].momentum()).mass());
_hist_pipipi_pipi->
fill((pip[0].momentum()+pim[0].momentum()).mass());
_hist_pipipi_pipi->
fill((pip[0].momentum()+pim[1].momentum()).mass());
}
else if (pim.size() == 1 && pip.size() == 1 && Km.size() == 1) {
_weight_Kpipi->fill();
_hist_Kpipi_Kpipi->
fill((pim[0].momentum()+pip[0].momentum()+Km[0].momentum()).mass());
_hist_Kpipi_Kpi->
fill((pip[0].momentum()+Km[0].momentum()).mass());
_hist_Kpipi_pipi->
fill((pim[0].momentum()+pip[0].momentum()).mass());
}
else if (Kp.size() == 1 && Km.size() == 1 && pim.size() == 1) {
_weight_KpiK->fill();
_hist_KpiK_KpiK->
fill((Kp[0].momentum()+Km[0].momentum()+pim[0].momentum()).mass());
_hist_KpiK_KK->
fill((Kp[0].momentum()+Km[0].momentum()).mass());
_hist_KpiK_piK->
fill((Kp[0].momentum()+pim[0].momentum()).mass());
}
else if (Kp.size() == 1 && Km.size() == 2) {
_weight_KKK->fill();
_hist_KKK_KKK->
fill((Kp[0].momentum()+Km[0].momentum()+Km[1].momentum()).mass());
_hist_KKK_KK->
fill((Kp[0].momentum()+Km[0].momentum()).mass());
_hist_KKK_KK->
fill((Kp[0].momentum()+Km[1].momentum()).mass());
}
}
}
void finalize() {
- if (_weight_pipipi > 0.) {
- scale(_hist_pipipi_pipipi, 1.0/_weight_pipipi);
- scale(_hist_pipipi_pipi , 0.5/_weight_pipipi);
+ if (_weight_pipipi->val() > 0.) {
+ scale(_hist_pipipi_pipipi, 1.0 / *_weight_pipipi);
+ scale(_hist_pipipi_pipi , 0.5 / *_weight_pipipi);
}
- if (_weight_Kpipi > 0.) {
- scale(_hist_Kpipi_Kpipi , 1.0/_weight_Kpipi);
- scale(_hist_Kpipi_Kpi , 1.0/_weight_Kpipi);
- scale(_hist_Kpipi_pipi , 1.0/_weight_Kpipi);
+ if (_weight_Kpipi->val() > 0.) {
+ scale(_hist_Kpipi_Kpipi , 1.0 / *_weight_Kpipi);
+ scale(_hist_Kpipi_Kpi , 1.0 / *_weight_Kpipi);
+ scale(_hist_Kpipi_pipi , 1.0 / *_weight_Kpipi);
}
- if (_weight_KpiK > 0.) {
- scale(_hist_KpiK_KpiK , 1.0/_weight_KpiK);
- scale(_hist_KpiK_KK , 1.0/_weight_KpiK);
- scale(_hist_KpiK_piK , 1.0/_weight_KpiK);
+ if (_weight_KpiK->val() > 0.) {
+ scale(_hist_KpiK_KpiK , 1.0 / *_weight_KpiK);
+ scale(_hist_KpiK_KK , 1.0 / *_weight_KpiK);
+ scale(_hist_KpiK_piK , 1.0 / *_weight_KpiK);
}
- if (_weight_KKK > 0.) {
- scale(_hist_KKK_KKK , 1.0/_weight_KKK);
- scale(_hist_KKK_KK , 0.5/_weight_KKK);
+ if (_weight_KKK->val() > 0.) {
+ scale(_hist_KKK_KKK , 1.0 / *_weight_KKK);
+ scale(_hist_KKK_KK , 0.5 / *_weight_KKK);
}
- tmp11->point(0).setY(100*_weight_pipipi/_weight_total, 100*sqrt(double(_weight_pipipi))/_weight_total);
- tmp12->point(0).setY(100*_weight_Kpipi/_weight_total, 100*sqrt(double(_weight_Kpipi))/_weight_total);
- tmp13->point(0).setY(100*_weight_KpiK/_weight_total, 100*sqrt(double(_weight_KpiK))/_weight_total);
- tmp14->point(0).setY(100*_weight_KKK/_weight_total, 100*sqrt(double(_weight_KKK))/_weight_total);
+ tmp11->point(0).setY(100*_weight_pipipi->val()/_weight_total->val(), 100*sqrt(double(_weight_pipipi->val()))/_weight_total->val());
+ tmp12->point(0).setY(100*_weight_Kpipi->val()/_weight_total->val(), 100*sqrt(double(_weight_Kpipi->val()))/_weight_total->val());
+ tmp13->point(0).setY(100*_weight_KpiK->val()/_weight_total->val(), 100*sqrt(double(_weight_KpiK->val()))/_weight_total->val());
+ tmp14->point(0).setY(100*_weight_KKK->val()/_weight_total->val(), 100*sqrt(double(_weight_KKK->val()))/_weight_total->val());
}
private:
//@{
Scatter2DPtr tmp11, tmp12, tmp13, tmp14;
// Histograms
Histo1DPtr _hist_pipipi_pipipi, _hist_pipipi_pipi;
Histo1DPtr _hist_Kpipi_Kpipi, _hist_Kpipi_Kpi, _hist_Kpipi_pipi;
Histo1DPtr _hist_KpiK_KpiK, _hist_KpiK_KK, _hist_KpiK_piK;
Histo1DPtr _hist_KKK_KKK, _hist_KKK_KK;
// Weights counters
CounterPtr _weight_total, _weight_pipipi, _weight_Kpipi, _weight_KpiK, _weight_KKK;
//@}
void findDecayProducts(const GenParticle* p,
unsigned int & nstable,
Particles& pip, Particles& pim,
Particles& Kp, Particles& Km) {
const GenVertex* dv = p->end_vertex();
/// @todo Use better looping
for (GenVertex::particles_out_const_iterator pp = dv->particles_out_const_begin(); pp != dv->particles_out_const_end(); ++pp) {
int id = (*pp)->pdg_id();
if (id == PID::PI0 )
++nstable;
else if (id == PID::K0S)
++nstable;
else if (id == PID::PIPLUS) {
pip.push_back(Particle(**pp));
++nstable;
}
else if (id == PID::PIMINUS) {
pim.push_back(Particle(**pp));
++nstable;
}
else if (id == PID::KPLUS) {
Kp.push_back(Particle(**pp));
++nstable;
}
else if (id == PID::KMINUS) {
Km.push_back(Particle(**pp));
++nstable;
}
else if ((*pp)->end_vertex()) {
findDecayProducts(*pp, nstable, pip, pim, Kp, Km);
}
else
++nstable;
}
}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(BABAR_2007_S7266081);
}
diff --git a/analyses/pluginMisc/BELLE_2001_S4598261.cc b/analyses/pluginMisc/BELLE_2001_S4598261.cc
--- a/analyses/pluginMisc/BELLE_2001_S4598261.cc
+++ b/analyses/pluginMisc/BELLE_2001_S4598261.cc
@@ -1,105 +1,105 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/Beam.hh"
#include "Rivet/Projections/UnstableFinalState.hh"
namespace Rivet {
/// @brief BELLE pi0 spectrum at Upsilon(4S)
/// @author Peter Richardson
class BELLE_2001_S4598261 : public Analysis {
public:
BELLE_2001_S4598261()
: Analysis("BELLE_2001_S4598261")
{ }
void init() {
declare(UnstableFinalState(), "UFS");
book(_histdSigDp ,1, 1, 1); // spectrum
book(_histMult ,2, 1, 1); // multiplicity
book(_weightSum, "TMP/weightSum");
}
void analyze(const Event& e) {
// Find the upsilons
Particles upsilons;
// First in unstable final state
const UnstableFinalState& ufs = apply<UnstableFinalState>(e, "UFS");
foreach (const Particle& p, ufs.particles())
if (p.pid()==300553) upsilons.push_back(p);
// Then in whole event if fails
if (upsilons.empty()) {
foreach (const GenParticle* p, Rivet::particles(e.genEvent())) {
if (p->pdg_id() != 300553) continue;
const GenVertex* pv = p->production_vertex();
bool passed = true;
if (pv) {
/// @todo Use better looping
for (GenVertex::particles_in_const_iterator pp = pv->particles_in_const_begin() ; pp != pv->particles_in_const_end() ; ++pp) {
if ( p->pdg_id() == (*pp)->pdg_id() ) {
passed = false;
break;
}
}
}
if (passed) upsilons.push_back(Particle(p));
}
}
// Find upsilons
foreach (const Particle& p, upsilons) {
_weightSum->fill();
// Find the neutral pions from the decay
vector<GenParticle *> pions;
findDecayProducts(p.genParticle(), pions);
const LorentzTransform cms_boost = LorentzTransform::mkFrameTransformFromBeta(p.momentum().betaVec());
for (size_t ix=0; ix<pions.size(); ++ix) {
const double pcm = cms_boost.transform(FourMomentum(pions[ix]->momentum())).p();
_histdSigDp->fill(pcm);
}
_histMult->fill(0., pions.size());
}
}
void finalize() {
- scale(_histdSigDp, 1./_weightSum);
- scale(_histMult , 1./_weightSum);
+ scale(_histdSigDp, 1./ *_weightSum);
+ scale(_histMult , 1./ *_weightSum);
}
private:
//@{
// count of weights
CounterPtr _weightSum;
/// Histograms
Histo1DPtr _histdSigDp;
Histo1DPtr _histMult;
//@}
void findDecayProducts(const GenParticle* p, vector<GenParticle*>& pions) {
const GenVertex* dv = p->end_vertex();
/// @todo Use better looping
for (GenVertex::particles_out_const_iterator pp = dv->particles_out_const_begin(); pp != dv->particles_out_const_end(); ++pp) {
const int id = (*pp)->pdg_id();
if (id == 111) {
pions.push_back(*pp);
} else if ((*pp)->end_vertex())
findDecayProducts(*pp, pions);
}
}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(BELLE_2001_S4598261);
}
diff --git a/analyses/pluginMisc/BELLE_2008_I786560.cc b/analyses/pluginMisc/BELLE_2008_I786560.cc
--- a/analyses/pluginMisc/BELLE_2008_I786560.cc
+++ b/analyses/pluginMisc/BELLE_2008_I786560.cc
@@ -1,112 +1,112 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/UnstableFinalState.hh"
namespace Rivet {
/// @brief BELLE tau lepton to pi pi
/// @author Peter Richardson
class BELLE_2008_I786560 : public Analysis {
public:
BELLE_2008_I786560()
: Analysis("BELLE_2008_I786560")
{ }
void init() {
declare(UnstableFinalState(), "UFS");
book(_hist_pipi , 1, 1, 1);
book(_weight_total, "TMP/weight_total");
book(_weight_pipi, "TMP/weight_pipi");
}
void analyze(const Event& e) {
// Find the taus
Particles taus;
const UnstableFinalState& ufs = apply<UnstableFinalState>(e, "UFS");
foreach (const Particle& p, ufs.particles()) {
if (p.abspid() != PID::TAU) continue;
_weight_total->fill();
Particles pip, pim, pi0;
unsigned int nstable = 0;
// get the boost to the rest frame
LorentzTransform cms_boost;
if (p.p3().mod() > 1*MeV)
cms_boost = LorentzTransform::mkFrameTransformFromBeta(p.momentum().betaVec());
// find the decay products we want
findDecayProducts(p.genParticle(), nstable, pip, pim, pi0);
if (p.pid() < 0) {
swap(pip, pim);
}
if (nstable != 3) continue;
// pipi
if (pim.size() == 1 && pi0.size() == 1) {
_weight_pipi->fill();
_hist_pipi->fill((pi0[0].momentum()+pim[0].momentum()).mass2());
}
}
}
void finalize() {
- if (_weight_pipi > 0.) scale(_hist_pipi, 1./_weight_pipi);
+ if (_weight_pipi->val() > 0.) scale(_hist_pipi, 1. / *_weight_pipi);
}
private:
//@{
// Histograms
Histo1DPtr _hist_pipi;
// Weights counters
CounterPtr _weight_total, _weight_pipi;
//@}
void findDecayProducts(const GenParticle* p,
unsigned int & nstable,
Particles& pip, Particles& pim,
Particles& pi0) {
const GenVertex* dv = p->end_vertex();
/// @todo Use better looping
for (GenVertex::particles_out_const_iterator pp = dv->particles_out_const_begin(); pp != dv->particles_out_const_end(); ++pp) {
int id = (*pp)->pdg_id();
if (id == PID::PI0 ) {
pi0.push_back(Particle(**pp));
++nstable;
}
else if (id == PID::K0S)
++nstable;
else if (id == PID::PIPLUS) {
pip.push_back(Particle(**pp));
++nstable;
}
else if (id == PID::PIMINUS) {
pim.push_back(Particle(**pp));
++nstable;
}
else if (id == PID::KPLUS) {
++nstable;
}
else if (id == PID::KMINUS) {
++nstable;
}
else if ((*pp)->end_vertex()) {
findDecayProducts(*pp, nstable, pip, pim, pi0);
}
else
++nstable;
}
}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(BELLE_2008_I786560);
}
diff --git a/analyses/pluginMisc/E735_1998_S3905616.cc b/analyses/pluginMisc/E735_1998_S3905616.cc
--- a/analyses/pluginMisc/E735_1998_S3905616.cc
+++ b/analyses/pluginMisc/E735_1998_S3905616.cc
@@ -1,71 +1,71 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/TriggerCDFRun0Run1.hh"
#include "Rivet/Projections/TriggerUA5.hh"
namespace Rivet {
/// @brief E735 charged multiplicity in NSD-triggered events
class E735_1998_S3905616 : public Analysis {
public:
/// Constructor
E735_1998_S3905616() : Analysis("E735_1998_S3905616") {}
/// @name Analysis methods
//@{
void init() {
// Projections
declare(TriggerUA5(), "Trigger");
declare(ChargedFinalState(), "FS");
// Histo
book(_hist_multiplicity ,1, 1, 1);
book(_sumWTrig, "TMP/sumWtrig");
}
void analyze(const Event& event) {
const bool trigger = apply<TriggerUA5>(event, "Trigger").nsdDecision();
if (!trigger) vetoEvent;
_sumWTrig->fill();
const ChargedFinalState& fs = apply<ChargedFinalState>(event, "FS");
const size_t numParticles = fs.particles().size();
_hist_multiplicity->fill(numParticles);
}
void finalize() {
- scale(_hist_multiplicity, 1/_sumWTrig);
+ scale(_hist_multiplicity, 1 / *_sumWTrig);
}
//@}
private:
/// @name Weight counter
//@{
CounterPtr _sumWTrig;
//@}
/// @name Histograms
//@{
Histo1DPtr _hist_multiplicity;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(E735_1998_S3905616);
}
diff --git a/analyses/pluginMisc/PDG_HADRON_MULTIPLICITIES_RATIOS.cc b/analyses/pluginMisc/PDG_HADRON_MULTIPLICITIES_RATIOS.cc
--- a/analyses/pluginMisc/PDG_HADRON_MULTIPLICITIES_RATIOS.cc
+++ b/analyses/pluginMisc/PDG_HADRON_MULTIPLICITIES_RATIOS.cc
@@ -1,761 +1,761 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/UnstableFinalState.hh"
namespace Rivet {
/// @brief Implementation of PDG hadron multiplicities as ratios to \f$ \pi^\pm \f$ multiplicity
/// @author Holger Schulz
class PDG_HADRON_MULTIPLICITIES_RATIOS : public Analysis {
public:
/// Constructor
PDG_HADRON_MULTIPLICITIES_RATIOS() : Analysis("PDG_HADRON_MULTIPLICITIES_RATIOS")
{}
/// @name Analysis methods
//@{
void analyze(const Event& e) {
// First, veto on leptonic events by requiring at least 4 charged FS particles
const FinalState& fs = apply<FinalState>(e, "FS");
const size_t numParticles = fs.particles().size();
// Even if we only generate hadronic events, we still need a cut on numCharged >= 2.
if (numParticles < 2) {
MSG_DEBUG("Failed leptonic event cut");
vetoEvent;
}
MSG_DEBUG("Passed leptonic event cut");
MSG_DEBUG("sqrt(S) = " << sqrtS()/GeV << " GeV");
// Final state of unstable particles to get particle spectra
const UnstableFinalState& ufs = apply<UnstableFinalState>(e, "UFS");
if (sqrtS()/GeV >= 9.5 && sqrtS()/GeV <= 10.5) {
foreach (const Particle& p, ufs.particles()) {
const PdgId id = p.abspid();
switch (id) {
case 211:
_weightedTotalNumPiPlus->fill();
break;
case 111:
_histMeanMultiPi0->fill(_histMeanMultiPi0->bin(0).xMid());
break;
case 321:
_histMeanMultiKPlus->fill(_histMeanMultiKPlus->bin(0).xMid());
break;
case 130:
case 310:
_histMeanMultiK0->fill(_histMeanMultiK0->bin(0).xMid());
break;
case 221:
_histMeanMultiEta->fill(_histMeanMultiEta->bin(0).xMid());
break;
case 331:
_histMeanMultiEtaPrime->fill(_histMeanMultiEtaPrime->bin(0).xMid());
break;
case 411:
_histMeanMultiDPlus->fill(_histMeanMultiDPlus->bin(0).xMid());
break;
case 421:
_histMeanMultiD0->fill(_histMeanMultiD0->bin(0).xMid());
break;
case 431:
_histMeanMultiDPlus_s->fill(_histMeanMultiDPlus_s->bin(0).xMid());
break;
case 9010221:
_histMeanMultiF0_980->fill(_histMeanMultiF0_980->bin(0).xMid());
break;
case 113:
_histMeanMultiRho770_0->fill(_histMeanMultiRho770_0->bin(0).xMid());
break;
case 223:
_histMeanMultiOmega782->fill(_histMeanMultiOmega782->bin(0).xMid());
break;
case 323:
_histMeanMultiKStar892Plus->fill(_histMeanMultiKStar892Plus->bin(0).xMid());
break;
case 313:
_histMeanMultiKStar892_0->fill(_histMeanMultiKStar892_0->bin(0).xMid());
break;
case 333:
_histMeanMultiPhi1020->fill(_histMeanMultiPhi1020->bin(0).xMid());
break;
case 413:
_histMeanMultiDStar2010Plus->fill(_histMeanMultiDStar2010Plus->bin(0).xMid());
break;
case 423:
_histMeanMultiDStar2007_0->fill(_histMeanMultiDStar2007_0->bin(0).xMid());
break;
case 433:
_histMeanMultiDStar_s2112Plus->fill(_histMeanMultiDStar_s2112Plus->bin(0).xMid());
break;
case 443:
_histMeanMultiJPsi1S->fill(_histMeanMultiJPsi1S->bin(0).xMid());
break;
case 225:
_histMeanMultiF2_1270->fill(_histMeanMultiF2_1270->bin(0).xMid());
break;
case 2212:
_histMeanMultiP->fill(_histMeanMultiP->bin(0).xMid());
break;
case 3122:
_histMeanMultiLambda->fill(_histMeanMultiLambda->bin(0).xMid());
break;
case 3212:
_histMeanMultiSigma0->fill(_histMeanMultiSigma0->bin(0).xMid());
break;
case 3312:
_histMeanMultiXiMinus->fill(_histMeanMultiXiMinus->bin(0).xMid());
break;
case 2224:
_histMeanMultiDelta1232PlusPlus->fill(_histMeanMultiDelta1232PlusPlus->bin(0).xMid());
break;
case 3114:
_histMeanMultiSigma1385Minus->fill(_histMeanMultiSigma1385Minus->bin(0).xMid());
_histMeanMultiSigma1385PlusMinus->fill(_histMeanMultiSigma1385PlusMinus->bin(0).xMid());
break;
case 3224:
_histMeanMultiSigma1385Plus->fill(_histMeanMultiSigma1385Plus->bin(0).xMid());
_histMeanMultiSigma1385PlusMinus->fill(_histMeanMultiSigma1385PlusMinus->bin(0).xMid());
break;
case 3324:
_histMeanMultiXi1530_0->fill(_histMeanMultiXi1530_0->bin(0).xMid());
break;
case 3334:
_histMeanMultiOmegaMinus->fill(_histMeanMultiOmegaMinus->bin(0).xMid());
break;
case 4122:
_histMeanMultiLambda_c_Plus->fill(_histMeanMultiLambda_c_Plus->bin(0).xMid());
break;
case 4222:
case 4112:
_histMeanMultiSigma_c_PlusPlus_0->fill(_histMeanMultiSigma_c_PlusPlus_0->bin(0).xMid());
break;
case 3124:
_histMeanMultiLambda1520->fill(_histMeanMultiLambda1520->bin(0).xMid());
break;
}
}
}
if (sqrtS()/GeV >= 29 && sqrtS()/GeV <= 35) {
foreach (const Particle& p, ufs.particles()) {
const PdgId id = p.abspid();
switch (id) {
case 211:
_weightedTotalNumPiPlus->fill();
break;
case 111:
_histMeanMultiPi0->fill(_histMeanMultiPi0->bin(0).xMid());
break;
case 321:
_histMeanMultiKPlus->fill(_histMeanMultiKPlus->bin(0).xMid());
break;
case 130:
case 310:
_histMeanMultiK0->fill(_histMeanMultiK0->bin(0).xMid());
break;
case 221:
_histMeanMultiEta->fill(_histMeanMultiEta->bin(0).xMid());
break;
case 331:
_histMeanMultiEtaPrime->fill(_histMeanMultiEtaPrime->bin(0).xMid());
break;
case 411:
_histMeanMultiDPlus->fill(_histMeanMultiDPlus->bin(0).xMid());
break;
case 421:
_histMeanMultiD0->fill(_histMeanMultiD0->bin(0).xMid());
break;
case 431:
_histMeanMultiDPlus_s->fill(_histMeanMultiDPlus_s->bin(0).xMid());
break;
case 9010221:
_histMeanMultiF0_980->fill(_histMeanMultiF0_980->bin(0).xMid());
break;
case 113:
_histMeanMultiRho770_0->fill(_histMeanMultiRho770_0->bin(0).xMid());
break;
case 323:
_histMeanMultiKStar892Plus->fill(_histMeanMultiKStar892Plus->bin(0).xMid());
break;
case 313:
_histMeanMultiKStar892_0->fill(_histMeanMultiKStar892_0->bin(0).xMid());
break;
case 333:
_histMeanMultiPhi1020->fill(_histMeanMultiPhi1020->bin(0).xMid());
break;
case 413:
_histMeanMultiDStar2010Plus->fill(_histMeanMultiDStar2010Plus->bin(0).xMid());
break;
case 423:
_histMeanMultiDStar2007_0->fill(_histMeanMultiDStar2007_0->bin(0).xMid());
break;
case 225:
_histMeanMultiF2_1270->fill(_histMeanMultiF2_1270->bin(0).xMid());
break;
case 325:
_histMeanMultiK2Star1430Plus->fill(_histMeanMultiK2Star1430Plus->bin(0).xMid());
break;
case 315:
_histMeanMultiK2Star1430_0->fill(_histMeanMultiK2Star1430_0->bin(0).xMid());
break;
case 2212:
_histMeanMultiP->fill(_histMeanMultiP->bin(0).xMid());
break;
case 3122:
_histMeanMultiLambda->fill(_histMeanMultiLambda->bin(0).xMid());
break;
case 3312:
_histMeanMultiXiMinus->fill(_histMeanMultiXiMinus->bin(0).xMid());
break;
case 3114:
_histMeanMultiSigma1385Minus->fill(_histMeanMultiSigma1385Minus->bin(0).xMid());
_histMeanMultiSigma1385PlusMinus->fill(_histMeanMultiSigma1385PlusMinus->bin(0).xMid());
break;
case 3224:
_histMeanMultiSigma1385Plus->fill(_histMeanMultiSigma1385Plus->bin(0).xMid());
_histMeanMultiSigma1385PlusMinus->fill(_histMeanMultiSigma1385PlusMinus->bin(0).xMid());
break;
case 3334:
_histMeanMultiOmegaMinus->fill(_histMeanMultiOmegaMinus->bin(0).xMid());
break;
case 4122:
_histMeanMultiLambda_c_Plus->fill(_histMeanMultiLambda_c_Plus->bin(0).xMid());
break;
}
}
}
if (sqrtS()/GeV >= 89.5 && sqrtS()/GeV <= 91.8) {
foreach (const Particle& p, ufs.particles()) {
const PdgId id = p.abspid();
switch (id) {
case 211:
_weightedTotalNumPiPlus->fill();
break;
case 111:
_histMeanMultiPi0->fill(_histMeanMultiPi0->bin(0).xMid());
break;
case 321:
_histMeanMultiKPlus->fill(_histMeanMultiKPlus->bin(0).xMid());
break;
case 130:
case 310:
_histMeanMultiK0->fill(_histMeanMultiK0->bin(0).xMid());
break;
case 221:
_histMeanMultiEta->fill(_histMeanMultiEta->bin(0).xMid());
break;
case 331:
_histMeanMultiEtaPrime->fill(_histMeanMultiEtaPrime->bin(0).xMid());
break;
case 411:
_histMeanMultiDPlus->fill(_histMeanMultiDPlus->bin(0).xMid());
break;
case 421:
_histMeanMultiD0->fill(_histMeanMultiD0->bin(0).xMid());
break;
case 431:
_histMeanMultiDPlus_s->fill(_histMeanMultiDPlus_s->bin(0).xMid());
break;
case 511:
_histMeanMultiBPlus_B0_d->fill(_histMeanMultiBPlus_B0_d->bin(0).xMid());
break;
case 521:
_histMeanMultiBPlus_B0_d->fill(_histMeanMultiBPlus_B0_d->bin(0).xMid());
_histMeanMultiBPlus_u->fill(_histMeanMultiBPlus_u->bin(0).xMid());
break;
case 531:
_histMeanMultiB0_s->fill(_histMeanMultiB0_s->bin(0).xMid());
break;
case 9010221:
_histMeanMultiF0_980->fill(_histMeanMultiF0_980->bin(0).xMid());
break;
case 9000211:
_histMeanMultiA0_980Plus->fill(_histMeanMultiA0_980Plus->bin(0).xMid());
break;
case 113:
_histMeanMultiRho770_0->fill(_histMeanMultiRho770_0->bin(0).xMid());
break;
case 213:
_histMeanMultiRho770Plus->fill(_histMeanMultiRho770Plus->bin(0).xMid());
break;
case 223:
_histMeanMultiOmega782->fill(_histMeanMultiOmega782->bin(0).xMid());
break;
case 323:
_histMeanMultiKStar892Plus->fill(_histMeanMultiKStar892Plus->bin(0).xMid());
break;
case 313:
_histMeanMultiKStar892_0->fill(_histMeanMultiKStar892_0->bin(0).xMid());
break;
case 333:
_histMeanMultiPhi1020->fill(_histMeanMultiPhi1020->bin(0).xMid());
break;
case 413:
_histMeanMultiDStar2010Plus->fill(_histMeanMultiDStar2010Plus->bin(0).xMid());
break;
case 433:
_histMeanMultiDStar_s2112Plus->fill(_histMeanMultiDStar_s2112Plus->bin(0).xMid());
break;
case 513:
case 523:
case 533:
_histMeanMultiBStar->fill(_histMeanMultiBStar->bin(0).xMid());
break;
case 443:
_histMeanMultiJPsi1S->fill(_histMeanMultiJPsi1S->bin(0).xMid());
break;
case 100443:
_histMeanMultiPsi2S->fill(_histMeanMultiPsi2S->bin(0).xMid());
break;
case 553:
_histMeanMultiUpsilon1S->fill(_histMeanMultiUpsilon1S->bin(0).xMid());
break;
case 20223:
_histMeanMultiF1_1285->fill(_histMeanMultiF1_1285->bin(0).xMid());
break;
case 20333:
_histMeanMultiF1_1420->fill(_histMeanMultiF1_1420->bin(0).xMid());
break;
case 445:
_histMeanMultiChi_c1_3510->fill(_histMeanMultiChi_c1_3510->bin(0).xMid());
break;
case 225:
_histMeanMultiF2_1270->fill(_histMeanMultiF2_1270->bin(0).xMid());
break;
case 335:
_histMeanMultiF2Prime1525->fill(_histMeanMultiF2Prime1525->bin(0).xMid());
break;
case 315:
_histMeanMultiK2Star1430_0->fill(_histMeanMultiK2Star1430_0->bin(0).xMid());
break;
case 515:
case 525:
case 535:
_histMeanMultiBStarStar->fill(_histMeanMultiBStarStar->bin(0).xMid());
break;
case 10433:
case 20433:
_histMeanMultiDs1Plus->fill(_histMeanMultiDs1Plus->bin(0).xMid());
break;
case 435:
_histMeanMultiDs2Plus->fill(_histMeanMultiDs2Plus->bin(0).xMid());
break;
case 2212:
_histMeanMultiP->fill(_histMeanMultiP->bin(0).xMid());
break;
case 3122:
_histMeanMultiLambda->fill(_histMeanMultiLambda->bin(0).xMid());
break;
case 3212:
_histMeanMultiSigma0->fill(_histMeanMultiSigma0->bin(0).xMid());
break;
case 3112:
_histMeanMultiSigmaMinus->fill(_histMeanMultiSigmaMinus->bin(0).xMid());
_histMeanMultiSigmaPlusMinus->fill(_histMeanMultiSigmaPlusMinus->bin(0).xMid());
break;
case 3222:
_histMeanMultiSigmaPlus->fill(_histMeanMultiSigmaPlus->bin(0).xMid());
_histMeanMultiSigmaPlusMinus->fill(_histMeanMultiSigmaPlusMinus->bin(0).xMid());
break;
case 3312:
_histMeanMultiXiMinus->fill(_histMeanMultiXiMinus->bin(0).xMid());
break;
case 2224:
_histMeanMultiDelta1232PlusPlus->fill(_histMeanMultiDelta1232PlusPlus->bin(0).xMid());
break;
case 3114:
_histMeanMultiSigma1385Minus->fill(_histMeanMultiSigma1385Minus->bin(0).xMid());
_histMeanMultiSigma1385PlusMinus->fill(_histMeanMultiSigma1385PlusMinus->bin(0).xMid());
break;
case 3224:
_histMeanMultiSigma1385Plus->fill(_histMeanMultiSigma1385Plus->bin(0).xMid());
_histMeanMultiSigma1385PlusMinus->fill(_histMeanMultiSigma1385PlusMinus->bin(0).xMid());
break;
case 3324:
_histMeanMultiXi1530_0->fill(_histMeanMultiXi1530_0->bin(0).xMid());
break;
case 3334:
_histMeanMultiOmegaMinus->fill(_histMeanMultiOmegaMinus->bin(0).xMid());
break;
case 4122:
_histMeanMultiLambda_c_Plus->fill(_histMeanMultiLambda_c_Plus->bin(0).xMid());
break;
case 5122:
_histMeanMultiLambda_b_0->fill(_histMeanMultiLambda_b_0->bin(0).xMid());
break;
case 3124:
_histMeanMultiLambda1520->fill(_histMeanMultiLambda1520->bin(0).xMid());
break;
}
}
}
if (sqrtS()/GeV >= 130 && sqrtS()/GeV <= 200) {
foreach (const Particle& p, ufs.particles()) {
const PdgId id = p.abspid();
switch (id) {
case 211:
_weightedTotalNumPiPlus->fill();
break;
case 321:
_histMeanMultiKPlus->fill(_histMeanMultiKPlus->bin(0).xMid());
break;
case 130:
case 310:
_histMeanMultiK0->fill(_histMeanMultiK0->bin(0).xMid());
break;
case 2212:
_histMeanMultiP->fill(_histMeanMultiP->bin(0).xMid());
break;
case 3122:
_histMeanMultiLambda->fill(_histMeanMultiLambda->bin(0).xMid());
break;
}
}
}
}
void init() {
declare(ChargedFinalState(), "FS");
declare(UnstableFinalState(), "UFS");
book(_weightedTotalNumPiPlus, "TMP/PiPlus");
if (sqrtS()/GeV >= 9.5 && sqrtS()/GeV <= 10.5) {
book(_histMeanMultiPi0 , 2, 1, 1);
book(_histMeanMultiKPlus , 3, 1, 1);
book(_histMeanMultiK0 , 4, 1, 1);
book(_histMeanMultiEta , 5, 1, 1);
book(_histMeanMultiEtaPrime , 6, 1, 1);
book(_histMeanMultiDPlus , 7, 1, 1);
book(_histMeanMultiD0 , 8, 1, 1);
book(_histMeanMultiDPlus_s , 9, 1, 1);
book(_histMeanMultiF0_980 ,13, 1, 1);
book(_histMeanMultiRho770_0 ,15, 1, 1);
book(_histMeanMultiOmega782 ,17, 1, 1);
book(_histMeanMultiKStar892Plus ,18, 1, 1);
book(_histMeanMultiKStar892_0 ,19, 1, 1);
book(_histMeanMultiPhi1020 ,20, 1, 1);
book(_histMeanMultiDStar2010Plus ,21, 1, 1);
book(_histMeanMultiDStar2007_0 ,22, 1, 1);
book(_histMeanMultiDStar_s2112Plus ,23, 1, 1);
book(_histMeanMultiJPsi1S ,25, 1, 1);
book(_histMeanMultiF2_1270 ,31, 1, 1);
book(_histMeanMultiP ,38, 1, 1);
book(_histMeanMultiLambda ,39, 1, 1);
book(_histMeanMultiSigma0 ,40, 1, 1);
book(_histMeanMultiXiMinus ,44, 1, 1);
book(_histMeanMultiDelta1232PlusPlus ,45, 1, 1);
book(_histMeanMultiSigma1385Minus ,46, 1, 1);
book(_histMeanMultiSigma1385Plus ,47, 1, 1);
book(_histMeanMultiSigma1385PlusMinus ,48, 1, 1);
book(_histMeanMultiXi1530_0 ,49, 1, 1);
book(_histMeanMultiOmegaMinus ,50, 1, 1);
book(_histMeanMultiLambda_c_Plus ,51, 1, 1);
book(_histMeanMultiSigma_c_PlusPlus_0 ,53, 1, 1);
book(_histMeanMultiLambda1520 ,54, 1, 1);
}
if (sqrtS()/GeV >= 29 && sqrtS()/GeV <= 35) {
book(_histMeanMultiPi0 , 2, 1, 2);
book(_histMeanMultiKPlus , 3, 1, 2);
book(_histMeanMultiK0 , 4, 1, 2);
book(_histMeanMultiEta , 5, 1, 2);
book(_histMeanMultiEtaPrime , 6, 1, 2);
book(_histMeanMultiDPlus , 7, 1, 2);
book(_histMeanMultiD0 , 8, 1, 2);
book(_histMeanMultiDPlus_s , 9, 1, 2);
book(_histMeanMultiF0_980 ,13, 1, 2);
book(_histMeanMultiRho770_0 ,15, 1, 2);
book(_histMeanMultiKStar892Plus ,18, 1, 2);
book(_histMeanMultiKStar892_0 ,19, 1, 2);
book(_histMeanMultiPhi1020 ,20, 1, 2);
book(_histMeanMultiDStar2010Plus ,21, 1, 2);
book(_histMeanMultiDStar2007_0 ,22, 1, 2);
book(_histMeanMultiF2_1270 ,31, 1, 2);
book(_histMeanMultiK2Star1430Plus ,33, 1, 1);
book(_histMeanMultiK2Star1430_0 ,34, 1, 1);
book(_histMeanMultiP ,38, 1, 2);
book(_histMeanMultiLambda ,39, 1, 2);
book(_histMeanMultiXiMinus ,44, 1, 2);
book(_histMeanMultiSigma1385Minus ,46, 1, 2);
book(_histMeanMultiSigma1385Plus ,47, 1, 2);
book(_histMeanMultiSigma1385PlusMinus ,48, 1, 2);
book(_histMeanMultiOmegaMinus ,50, 1, 2);
book(_histMeanMultiLambda_c_Plus ,51, 1, 2);
}
if (sqrtS()/GeV >= 89.5 && sqrtS()/GeV <= 91.8) {
book(_histMeanMultiPi0 , 2, 1, 3);
book(_histMeanMultiKPlus , 3, 1, 3);
book(_histMeanMultiK0 , 4, 1, 3);
book(_histMeanMultiEta , 5, 1, 3);
book(_histMeanMultiEtaPrime , 6, 1, 3);
book(_histMeanMultiDPlus , 7, 1, 3);
book(_histMeanMultiD0 , 8, 1, 3);
book(_histMeanMultiDPlus_s , 9, 1, 3);
book(_histMeanMultiBPlus_B0_d ,10, 1, 1);
book(_histMeanMultiBPlus_u ,11, 1, 1);
book(_histMeanMultiB0_s ,12, 1, 1);
book(_histMeanMultiF0_980 ,13, 1, 3);
book(_histMeanMultiA0_980Plus ,14, 1, 1);
book(_histMeanMultiRho770_0 ,15, 1, 3);
book(_histMeanMultiRho770Plus ,16, 1, 1);
book(_histMeanMultiOmega782 ,17, 1, 2);
book(_histMeanMultiKStar892Plus ,18, 1, 3);
book(_histMeanMultiKStar892_0 ,19, 1, 3);
book(_histMeanMultiPhi1020 ,20, 1, 3);
book(_histMeanMultiDStar2010Plus ,21, 1, 3);
book(_histMeanMultiDStar_s2112Plus ,23, 1, 2);
book(_histMeanMultiBStar ,24, 1, 1);
book(_histMeanMultiJPsi1S ,25, 1, 2);
book(_histMeanMultiPsi2S ,26, 1, 1);
book(_histMeanMultiUpsilon1S ,27, 1, 1);
book(_histMeanMultiF1_1285 ,28, 1, 1);
book(_histMeanMultiF1_1420 ,29, 1, 1);
book(_histMeanMultiChi_c1_3510 ,30, 1, 1);
book(_histMeanMultiF2_1270 ,31, 1, 3);
book(_histMeanMultiF2Prime1525 ,32, 1, 1);
book(_histMeanMultiK2Star1430_0 ,34, 1, 2);
book(_histMeanMultiBStarStar ,35, 1, 1);
book(_histMeanMultiDs1Plus ,36, 1, 1);
book(_histMeanMultiDs2Plus ,37, 1, 1);
book(_histMeanMultiP ,38, 1, 3);
book(_histMeanMultiLambda ,39, 1, 3);
book(_histMeanMultiSigma0 ,40, 1, 2);
book(_histMeanMultiSigmaMinus ,41, 1, 1);
book(_histMeanMultiSigmaPlus ,42, 1, 1);
book(_histMeanMultiSigmaPlusMinus ,43, 1, 1);
book(_histMeanMultiXiMinus ,44, 1, 3);
book(_histMeanMultiDelta1232PlusPlus ,45, 1, 2);
book(_histMeanMultiSigma1385Minus ,46, 1, 3);
book(_histMeanMultiSigma1385Plus ,47, 1, 3);
book(_histMeanMultiSigma1385PlusMinus ,48, 1, 3);
book(_histMeanMultiXi1530_0 ,49, 1, 2);
book(_histMeanMultiOmegaMinus ,50, 1, 3);
book(_histMeanMultiLambda_c_Plus ,51, 1, 3);
book(_histMeanMultiLambda_b_0 ,52, 1, 1);
book(_histMeanMultiLambda1520 ,54, 1, 2);
}
if (sqrtS()/GeV >= 130 && sqrtS()/GeV <= 200) {
book(_histMeanMultiKPlus , 3, 1, 4);
book(_histMeanMultiK0 , 4, 1, 4);
book(_histMeanMultiP ,38, 1, 4);
book(_histMeanMultiLambda ,39, 1, 4);
}
}
// Finalize
void finalize() {
if (sqrtS()/GeV >= 9.5 && sqrtS()/GeV <= 10.5) {
- scale(_histMeanMultiPi0 , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiKPlus , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiK0 , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiEta , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiEtaPrime , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiDPlus , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiD0 , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiDPlus_s , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiF0_980 , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiRho770_0 , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiOmega782 , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiKStar892Plus , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiKStar892_0 , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiPhi1020 , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiDStar2010Plus , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiDStar2007_0 , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiDStar_s2112Plus , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiJPsi1S , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiF2_1270 , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiP , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiLambda , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiSigma0 , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiXiMinus , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiDelta1232PlusPlus , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiSigma1385Minus , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiSigma1385Plus , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiSigma1385PlusMinus, 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiXi1530_0 , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiOmegaMinus , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiLambda_c_Plus , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiSigma_c_PlusPlus_0, 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiLambda1520 , 1.0/_weightedTotalNumPiPlus);
+ scale(_histMeanMultiPi0 , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiKPlus , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiK0 , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiEta , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiEtaPrime , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiDPlus , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiD0 , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiDPlus_s , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiF0_980 , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiRho770_0 , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiOmega782 , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiKStar892Plus , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiKStar892_0 , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiPhi1020 , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiDStar2010Plus , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiDStar2007_0 , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiDStar_s2112Plus , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiJPsi1S , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiF2_1270 , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiP , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiLambda , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiSigma0 , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiXiMinus , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiDelta1232PlusPlus , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiSigma1385Minus , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiSigma1385Plus , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiSigma1385PlusMinus, 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiXi1530_0 , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiOmegaMinus , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiLambda_c_Plus , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiSigma_c_PlusPlus_0, 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiLambda1520 , 1.0 / *_weightedTotalNumPiPlus);
}
if (sqrtS()/GeV >= 29 && sqrtS()/GeV <= 35) {
- scale(_histMeanMultiPi0 , 5.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiKPlus , 5.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiK0 , 5.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiEta , 5.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiEtaPrime , 5.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiDPlus , 5.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiD0 , 5.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiDPlus_s , 5.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiF0_980 , 5.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiRho770_0 , 5.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiKStar892Plus , 5.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiKStar892_0 , 5.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiPhi1020 , 5.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiDStar2010Plus , 5.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiDStar2007_0 , 5.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiF2_1270 , 5.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiK2Star1430Plus , 5.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiK2Star1430_0 , 5.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiP , 5.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiLambda , 5.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiXiMinus , 5.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiSigma1385Minus , 5.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiSigma1385Plus , 5.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiSigma1385PlusMinus, 5.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiOmegaMinus , 5.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiLambda_c_Plus , 5.0/_weightedTotalNumPiPlus);
+ scale(_histMeanMultiPi0 , 5.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiKPlus , 5.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiK0 , 5.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiEta , 5.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiEtaPrime , 5.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiDPlus , 5.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiD0 , 5.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiDPlus_s , 5.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiF0_980 , 5.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiRho770_0 , 5.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiKStar892Plus , 5.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiKStar892_0 , 5.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiPhi1020 , 5.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiDStar2010Plus , 5.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiDStar2007_0 , 5.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiF2_1270 , 5.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiK2Star1430Plus , 5.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiK2Star1430_0 , 5.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiP , 5.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiLambda , 5.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiXiMinus , 5.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiSigma1385Minus , 5.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiSigma1385Plus , 5.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiSigma1385PlusMinus, 5.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiOmegaMinus , 5.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiLambda_c_Plus , 5.0 / *_weightedTotalNumPiPlus);
}
if (sqrtS()/GeV >= 89.5 && sqrtS()/GeV <= 91.8) {
- scale(_histMeanMultiPi0 , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiKPlus , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiK0 , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiEta , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiEtaPrime , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiDPlus , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiD0 , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiDPlus_s , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiBPlus_B0_d , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiBPlus_u , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiB0_s , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiF0_980 , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiA0_980Plus , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiRho770_0 , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiRho770Plus , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiOmega782 , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiKStar892Plus , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiKStar892_0 , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiPhi1020 , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiDStar2010Plus , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiDStar_s2112Plus , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiBStar , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiJPsi1S , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiPsi2S , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiUpsilon1S , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiF1_1285 , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiF1_1420 , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiChi_c1_3510 , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiF2_1270 , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiF2Prime1525 , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiK2Star1430_0 , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiBStarStar , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiDs1Plus , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiDs2Plus , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiP , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiLambda , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiSigma0 , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiSigmaMinus , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiSigmaPlus , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiSigmaPlusMinus , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiXiMinus , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiDelta1232PlusPlus , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiSigma1385Minus , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiSigma1385Plus , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiSigma1385PlusMinus, 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiXi1530_0 , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiOmegaMinus , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiLambda_c_Plus , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiLambda_b_0 , 1.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiLambda1520 , 1.0/_weightedTotalNumPiPlus);
+ scale(_histMeanMultiPi0 , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiKPlus , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiK0 , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiEta , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiEtaPrime , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiDPlus , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiD0 , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiDPlus_s , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiBPlus_B0_d , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiBPlus_u , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiB0_s , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiF0_980 , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiA0_980Plus , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiRho770_0 , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiRho770Plus , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiOmega782 , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiKStar892Plus , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiKStar892_0 , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiPhi1020 , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiDStar2010Plus , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiDStar_s2112Plus , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiBStar , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiJPsi1S , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiPsi2S , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiUpsilon1S , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiF1_1285 , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiF1_1420 , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiChi_c1_3510 , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiF2_1270 , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiF2Prime1525 , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiK2Star1430_0 , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiBStarStar , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiDs1Plus , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiDs2Plus , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiP , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiLambda , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiSigma0 , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiSigmaMinus , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiSigmaPlus , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiSigmaPlusMinus , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiXiMinus , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiDelta1232PlusPlus , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiSigma1385Minus , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiSigma1385Plus , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiSigma1385PlusMinus, 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiXi1530_0 , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiOmegaMinus , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiLambda_c_Plus , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiLambda_b_0 , 1.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiLambda1520 , 1.0 / *_weightedTotalNumPiPlus);
}
if (sqrtS()/GeV >= 130 && sqrtS()/GeV <= 200) {
- scale(_histMeanMultiKPlus , 70.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiK0 , 70.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiP , 70.0/_weightedTotalNumPiPlus);
- scale(_histMeanMultiLambda , 70.0/_weightedTotalNumPiPlus);
+ scale(_histMeanMultiKPlus , 70.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiK0 , 70.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiP , 70.0 / *_weightedTotalNumPiPlus);
+ scale(_histMeanMultiLambda , 70.0 / *_weightedTotalNumPiPlus);
}
}
//@}
private:
CounterPtr _weightedTotalNumPiPlus;
Histo1DPtr _histMeanMultiPi0;
Histo1DPtr _histMeanMultiKPlus;
Histo1DPtr _histMeanMultiK0;
Histo1DPtr _histMeanMultiEta;
Histo1DPtr _histMeanMultiEtaPrime;
Histo1DPtr _histMeanMultiDPlus;
Histo1DPtr _histMeanMultiD0;
Histo1DPtr _histMeanMultiDPlus_s;
Histo1DPtr _histMeanMultiBPlus_B0_d;
Histo1DPtr _histMeanMultiBPlus_u;
Histo1DPtr _histMeanMultiB0_s;
Histo1DPtr _histMeanMultiF0_980;
Histo1DPtr _histMeanMultiA0_980Plus;
Histo1DPtr _histMeanMultiRho770_0;
Histo1DPtr _histMeanMultiRho770Plus;
Histo1DPtr _histMeanMultiOmega782;
Histo1DPtr _histMeanMultiKStar892Plus;
Histo1DPtr _histMeanMultiKStar892_0;
Histo1DPtr _histMeanMultiPhi1020;
Histo1DPtr _histMeanMultiDStar2010Plus;
Histo1DPtr _histMeanMultiDStar2007_0;
Histo1DPtr _histMeanMultiDStar_s2112Plus;
Histo1DPtr _histMeanMultiBStar;
Histo1DPtr _histMeanMultiJPsi1S;
Histo1DPtr _histMeanMultiPsi2S;
Histo1DPtr _histMeanMultiUpsilon1S;
Histo1DPtr _histMeanMultiF1_1285;
Histo1DPtr _histMeanMultiF1_1420;
Histo1DPtr _histMeanMultiChi_c1_3510;
Histo1DPtr _histMeanMultiF2_1270;
Histo1DPtr _histMeanMultiF2Prime1525;
Histo1DPtr _histMeanMultiK2Star1430Plus;
Histo1DPtr _histMeanMultiK2Star1430_0;
Histo1DPtr _histMeanMultiBStarStar;
Histo1DPtr _histMeanMultiDs1Plus;
Histo1DPtr _histMeanMultiDs2Plus;
Histo1DPtr _histMeanMultiP;
Histo1DPtr _histMeanMultiLambda;
Histo1DPtr _histMeanMultiSigma0;
Histo1DPtr _histMeanMultiSigmaMinus;
Histo1DPtr _histMeanMultiSigmaPlus;
Histo1DPtr _histMeanMultiSigmaPlusMinus;
Histo1DPtr _histMeanMultiXiMinus;
Histo1DPtr _histMeanMultiDelta1232PlusPlus;
Histo1DPtr _histMeanMultiSigma1385Minus;
Histo1DPtr _histMeanMultiSigma1385Plus;
Histo1DPtr _histMeanMultiSigma1385PlusMinus;
Histo1DPtr _histMeanMultiXi1530_0;
Histo1DPtr _histMeanMultiOmegaMinus;
Histo1DPtr _histMeanMultiLambda_c_Plus;
Histo1DPtr _histMeanMultiLambda_b_0;
Histo1DPtr _histMeanMultiSigma_c_PlusPlus_0;
Histo1DPtr _histMeanMultiLambda1520;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(PDG_HADRON_MULTIPLICITIES_RATIOS);
}
diff --git a/analyses/pluginMisc/PDG_TAUS.cc b/analyses/pluginMisc/PDG_TAUS.cc
--- a/analyses/pluginMisc/PDG_TAUS.cc
+++ b/analyses/pluginMisc/PDG_TAUS.cc
@@ -1,212 +1,212 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/TauFinder.hh"
namespace Rivet {
class PDG_TAUS : public Analysis {
public:
/// Constructor
PDG_TAUS()
: Analysis("PDG_TAUS")
{ }
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
TauFinder tauleptonic(TauFinder::LEPTONIC); // open cuts, leptonic decays
declare(tauleptonic, "TauLeptonic");
TauFinder tauhadronic(TauFinder::HADRONIC); // open cuts, hadronic decays
declare(tauhadronic, "TauHadronic");
populateDecayMap();
book(_h_ratio_mu ,1, 1, 1);
book(_h_ratio_el ,1, 1, 2);
book(_h_1prong_pinu ,2, 1, 1);
book(_h_1prong_Kpnu ,2, 1, 2);
book(_h_1prong_pipinu ,2, 1, 3);
book(_h_1prong_Kppinu ,2, 1, 4);
book(_h_1prong_pipipinu ,2, 1, 5);
book(_h_1prong_Knpinu ,2, 1, 6);
book(_h_3prong_pipipinu ,2, 2, 1);
book(_h_5prong ,2, 3, 1);
book(_weights_had, "TMP/weights_had");
book(_weights_mu, "TMP/weights_mu");
book(_weights_el, "TMP/weights_el");
}
/// Perform the per-event analysis
void analyze(const Event& e) {
const TauFinder& taulep = apply<TauFinder>(e, "TauLeptonic");
const TauFinder& tauhad = apply<TauFinder>(e, "TauHadronic");
// Hadronic tau decays --- prong decays
foreach(const Particle& tau, tauhad.taus()) {
_weights_had->fill();
int prongs = countProngs(tau); // number of charged particles among decay products
// Only do 1 prong decays here
if (prongs == 1) {
////// Exclusive decay modes "1-prong"
if (analyzeDecay(tau, decay_pids["pinu"], true)) _h_1prong_pinu->fill(1);
if (analyzeDecay(tau, decay_pids["Kpnu"], true)) _h_1prong_Kpnu->fill(1);
if (analyzeDecay(tau, decay_pids["pipinu"], true)) _h_1prong_pipinu->fill(1);
if (analyzeDecay(tau, decay_pids["Kppinu"] , true)) _h_1prong_Kppinu->fill(1);
if (analyzeDecay(tau, decay_pids["pipipinu"], true)) _h_1prong_pipipinu->fill(1);
// Kshort, Klong --- (twice) filling the K0 labelled PDG histo
if (analyzeDecay(tau, decay_pids["KSpinu"] , true)) _h_1prong_Knpinu->fill(1);
if (analyzeDecay(tau, decay_pids["KLpinu"] , true)) _h_1prong_Knpinu->fill(1);
}
else if (prongs == 3) {
if (analyzeDecay(tau, decay_pids["3pipipinu"], true)) _h_3prong_pipipinu->fill(1);
}
else if (prongs == 5 && !any(tau.children(), HasAbsPID(310))) _h_5prong->fill(1);
}
// Leptonic tau decays --- look for radiative and non-radiative 1 prong decays
foreach(const Particle& tau, taulep.taus()) {
int prongs = countProngs(tau); // number of charged particles among decay products
// Only do 1 prong decays here
if (prongs == 1) {
analyzeRadiativeDecay(tau, decay_pids["muids"], _weights_mu, true, _h_ratio_mu);
analyzeRadiativeDecay(tau, decay_pids["elids"], _weights_el, true, _h_ratio_el);
}
}
}
/// Normalise histograms etc., after the run
void finalize() {
- scale(_h_ratio_mu, 1./_weights_mu);
- scale(_h_ratio_el, 1./_weights_el);
+ scale(_h_ratio_mu, 1. / *_weights_mu);
+ scale(_h_ratio_el, 1. / *_weights_el);
- const double norm = double(_weights_had) + double(_weights_mu) + double(_weights_el);
+ const YODA::Counter norm = *_weights_had + *_weights_mu + *_weights_el;
scale(_h_1prong_pinu, 1./norm);
scale(_h_1prong_Kpnu, 1./norm);
scale(_h_1prong_pipinu, 1./norm);
scale(_h_1prong_Kppinu, 1./norm);
scale(_h_1prong_pipipinu, 1./norm);
scale(_h_1prong_Knpinu, 1./norm);
scale(_h_3prong_pipipinu, 1./norm);
scale(_h_5prong, 1./norm);
}
// Short hand
bool contains(Particle& mother, int id, bool abs=false) {
if (abs) return any(mother.children(), HasAbsPID(id));
return any(mother.children(), HasPID(id));
}
// Count charged decay products
int countProngs(Particle mother) {
int n_prongs = 0;
foreach(Particle p, mother.children())
if (p.threeCharge()!=0) ++n_prongs;
return n_prongs;
}
// Set up a lookup table for decays
void populateDecayMap() {
decay_pids["muids"] = {{ 13,14,16 }};
decay_pids["elids"] = {{ 11,12,16 }};
decay_pids["pinu"] = {{ 211,16 }};
decay_pids["Kpnu"] = {{ 321,16 }};
decay_pids["pipinu"] = {{ 111,211,16 }};
decay_pids["Kppinu"] = {{ 111,321,16 }};
decay_pids["pipipinu"] = {{ 111,111,211,16 }};
decay_pids["KSpinu"] = {{ 211,310,16 }};
decay_pids["KLpinu"] = {{ 211,130,16 }};
decay_pids["3pipipinu"] = {{ 211,211,211,16 }};
}
bool analyzeDecay(Particle mother, vector<int> ids, bool absolute) {
// There is no point in looking for decays with less particles than to be analysed
if (mother.children().size() == ids.size()) {
bool decayfound = true;
foreach (int id, ids) {
if (!contains(mother, id, absolute)) decayfound = false;
}
return decayfound;
} // end of first if
return false;
}
// Look for radiative (and non-radiative) tau decays to fill a ratio histo
void analyzeRadiativeDecay(Particle mother, vector<int> ids, CounterPtr &w_incl, bool absolute, Histo1DPtr h_ratio) {
// w_incl ... reference to a global weight counter for all leptonic tau decays
// h_ratio ... pointer to ratio histo
// There is no point in looking for decays with less particles than to be analysed
if (mother.children().size() >= ids.size()) {
bool decayfound = true;
foreach (int id, ids) {
if (!contains(mother, id, absolute)) decayfound = false;
}
// Do not increment counters if the specified decay products were not found
if (decayfound) {
w_incl->fill(); // the (global) weight counter for leptonic decays
bool radiative = any(mother.children(), HasPID(PID::PHOTON));
// Only fill the histo if there is a radiative decay
if (radiative) {
// Iterate over decay products to find photon with 5 MeV energy
foreach (const Particle& son, mother.children()) {
if (son.pid() == PID::PHOTON) {
// Require photons to have at least 5 MeV energy in the rest frame of the tau
// boosted taus
if (!mother.momentum().betaVec().isZero()) {
LorentzTransform cms_boost = LorentzTransform::mkFrameTransformFromBeta(mother.momentum().betaVec());
if (cms_boost.transform(son.momentum())[0]/MeV > 5.) {
h_ratio->fill(1);
break;
}
}
// not boosted taus
else {
if (son.momentum()[0]/MeV > 5.) {
h_ratio->fill(1);
break;
}
}
}
} // end loop over decay products
} // end of radiative
} // end of decayfound
} // end of first if
}
private:
/// @name Histograms
//@{
Histo1DPtr _h_ratio_mu, _h_ratio_el;
Histo1DPtr _h_1prong_pinu, _h_1prong_Kpnu, _h_1prong_Kppinu, _h_1prong_pipinu, _h_1prong_pipipinu, _h_1prong_Knpinu;
Histo1DPtr _h_3prong_pipipinu;
Histo1DPtr _h_5prong;
//@}
CounterPtr _weights_had, _weights_mu, _weights_el;
map<string, vector<int> > decay_pids;
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(PDG_TAUS);
}
diff --git a/analyses/pluginRHIC/STAR_2006_S6500200.cc b/analyses/pluginRHIC/STAR_2006_S6500200.cc
--- a/analyses/pluginRHIC/STAR_2006_S6500200.cc
+++ b/analyses/pluginRHIC/STAR_2006_S6500200.cc
@@ -1,108 +1,108 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/IdentifiedFinalState.hh"
namespace Rivet {
/// @brief STAR identified hadron spectra in pp at 200 GeV
class STAR_2006_S6500200 : public Analysis {
public:
/// Constructor
STAR_2006_S6500200()
: Analysis("STAR_2006_S6500200")
{ }
/// Book projections and histograms
void init() {
ChargedFinalState bbc1(-5.0,-3.3, 0.0*GeV); // beam-beam-counter trigger
ChargedFinalState bbc2( 3.3, 5.0, 0.0*GeV); // beam-beam-counter trigger
declare(bbc1, "BBC1");
declare(bbc2, "BBC2");
IdentifiedFinalState pionfs(Cuts::abseta < 2.5 && Cuts::pT > 0.3*GeV);
IdentifiedFinalState protonfs(Cuts::abseta < 2.5 && Cuts::pT > 0.4*GeV);
pionfs.acceptIdPair(PID::PIPLUS);
protonfs.acceptIdPair(PID::PROTON);
declare(pionfs, "PionFS");
declare(protonfs, "ProtonFS");
book(_h_pT_piplus ,1, 1, 1); // full range pion binning
book(_h_pT_piminus ,1, 2, 1); // full range pion binning
book(_tmp_pT_piplus ,"TMP/pT_piplus", refData(2, 3, 1)); // pi histo compatible with more restricted proton binning
book(_tmp_pT_piminus ,"TMP/pT_piminus", refData(2, 4, 1)); // pi histo compatible with more restricted proton binning
book(_h_pT_proton ,1, 3, 1);
book(_h_pT_antiproton ,1, 4, 1);
book(_s_piminus_piplus, 2, 1, 1);
book(_s_antipr_pr , 2, 2, 1);
book(_s_pr_piplus , 2, 3, 1);
book(_s_antipr_piminus, 2, 4, 1);
book(_sumWeightSelected, "sumWeightSelected");
}
/// Do the analysis
void analyze(const Event& event) {
const ChargedFinalState& bbc1 = apply<ChargedFinalState>(event, "BBC1");
const ChargedFinalState& bbc2 = apply<ChargedFinalState>(event, "BBC2");
if (bbc1.size() < 1 || bbc2.size() < 1) {
MSG_DEBUG("Failed beam-beam-counter trigger");
vetoEvent;
}
const IdentifiedFinalState& pionfs = apply<IdentifiedFinalState>(event, "PionFS");
foreach (const Particle& p, pionfs.particles()) {
if (p.absrap() < 0.5) {
/// @todo Use a binned counter to avoid this bin width cancellation hack
const double pT = p.pT() / GeV;
((p.pid() > 0) ? _h_pT_piplus : _h_pT_piminus)->fill(pT, 1.0/pT);
((p.pid() > 0) ? _tmp_pT_piplus : _tmp_pT_piminus)->fill(pT, 1.0/pT);
}
}
const IdentifiedFinalState& protonfs = apply<IdentifiedFinalState>(event, "ProtonFS");
foreach (const Particle& p, protonfs.particles()) {
if (p.absrap() < 0.5) {
/// @todo Use a binned counter to avoid this bin width cancellation hack
const double pT = p.pT() / GeV;
((p.pid() > 0) ? _h_pT_proton : _h_pT_antiproton)->fill(pT, 1.0/pT);
}
}
_sumWeightSelected->fill();
}
/// Finalize
void finalize() {
divide(_h_pT_piminus, _h_pT_piplus, _s_piminus_piplus);
divide(_h_pT_antiproton, _h_pT_proton, _s_antipr_pr);
divide(_h_pT_proton, _tmp_pT_piplus, _s_pr_piplus);
divide(_h_pT_antiproton, _tmp_pT_piminus, _s_antipr_piminus);
- const double factor = 1/(2*M_PI*double(_sumWeightSelected));
+ const YODA::Scatter1D factor = (1/(2*M_PI)) / *_sumWeightSelected;
scale(_h_pT_piplus, factor);
scale(_h_pT_piminus, factor);
scale(_h_pT_proton, factor);
scale(_h_pT_antiproton, factor);
}
private:
CounterPtr _sumWeightSelected;
Histo1DPtr _h_pT_piplus, _h_pT_piminus, _h_pT_proton, _h_pT_antiproton;
Histo1DPtr _tmp_pT_piplus, _tmp_pT_piminus;
Scatter2DPtr _s_piminus_piplus, _s_antipr_pr, _s_pr_piplus, _s_antipr_piminus;
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(STAR_2006_S6500200);
}
diff --git a/analyses/pluginRHIC/STAR_2006_S6860818.cc b/analyses/pluginRHIC/STAR_2006_S6860818.cc
--- a/analyses/pluginRHIC/STAR_2006_S6860818.cc
+++ b/analyses/pluginRHIC/STAR_2006_S6860818.cc
@@ -1,195 +1,195 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/IdentifiedFinalState.hh"
#include "Rivet/Projections/UnstableFinalState.hh"
namespace Rivet {
/// @brief STAR strange particle spectra in pp at 200 GeV
class STAR_2006_S6860818 : public Analysis {
public:
/// Constructor
STAR_2006_S6860818()
: Analysis("STAR_2006_S6860818")
{
for (size_t i = 0; i < 4; i++) {
_nBaryon[i] = 0;
_nAntiBaryon[i] = 0;
}
}
/// Book projections and histograms
void init() {
ChargedFinalState bbc1(Cuts::etaIn(-5.0, -3.5)); // beam-beam-counter trigger
ChargedFinalState bbc2(Cuts::etaIn( 3.5, 5.0)); // beam-beam-counter trigger
declare(bbc1, "BBC1");
declare(bbc2, "BBC2");
UnstableFinalState ufs(Cuts::abseta < 2.5);
declare(ufs, "UFS");
book(_h_pT_k0s ,1, 1, 1);
book(_h_pT_kminus ,1, 2, 1);
book(_h_pT_kplus ,1, 3, 1);
book(_h_pT_lambda ,1, 4, 1);
book(_h_pT_lambdabar ,1, 5, 1);
book(_h_pT_ximinus ,1, 6, 1);
book(_h_pT_xiplus ,1, 7, 1);
//book(_h_pT_omega ,1, 8, 1);
book(_h_antibaryon_baryon_ratio, 2, 1, 1);
book(_h_lambar_lam, 2, 2, 1);
book(_h_xiplus_ximinus, 2, 3, 1);
book(_h_pT_vs_mass ,3, 1, 1);
for (size_t i = 0; i < 4; i++) {
book(_nWeightedBaryon[i], "TMP/nWeightedBaryon"+to_str(i));
book(_nWeightedAntiBaryon[i], "TMP/nWeightedBaryon"+to_str(i));
}
book(_sumWeightSelected, "sumWselected");
}
/// Do the analysis
void analyze(const Event& event) {
const ChargedFinalState& bbc1 = apply<ChargedFinalState>(event, "BBC1");
const ChargedFinalState& bbc2 = apply<ChargedFinalState>(event, "BBC2");
if (bbc1.size()<1 || bbc2.size()<1) {
MSG_DEBUG("Failed beam-beam-counter trigger");
vetoEvent;
}
const UnstableFinalState& ufs = apply<UnstableFinalState>(event, "UFS");
foreach (const Particle& p, ufs.particles()) {
if (p.absrap() < 0.5) {
const PdgId pid = p.pid();
const double pT = p.pT() / GeV;
switch (abs(pid)) {
case PID::PIPLUS:
if (pid < 0) _h_pT_vs_mass->fill(0.1396, pT);
break;
case PID::PROTON:
if (pid < 0) _h_pT_vs_mass->fill(0.9383, pT);
if (pT > 0.4) {
pid > 0 ? _nBaryon[0]++ : _nAntiBaryon[0]++;
pid > 0 ? _nWeightedBaryon[0]->fill() : _nWeightedAntiBaryon[0]->fill();
}
break;
case PID::K0S:
if (pT > 0.2) {
_h_pT_k0s->fill(pT, 1.0/pT);
}
_h_pT_vs_mass->fill(0.5056, pT);
break;
case PID::K0L:
_h_pT_vs_mass->fill(0.5056, pT);
break;
case 113: // rho0(770)
_h_pT_vs_mass->fill(0.7755, pT);
break;
case 313: // K0*(892)
_h_pT_vs_mass->fill(0.8960, pT);
break;
case 333: // phi(1020)
_h_pT_vs_mass->fill(1.0190, pT);
break;
case 3214: // Sigma(1385)
_h_pT_vs_mass->fill(1.3840, pT);
break;
case 3124: // Lambda(1520)
_h_pT_vs_mass->fill(1.5200, pT);
break;
case PID::KPLUS:
if (pid < 0) _h_pT_vs_mass->fill(0.4856, pT);
if (pT > 0.2) {
pid > 0 ? _h_pT_kplus->fill(pT, 1.0/pT) : _h_pT_kminus->fill(pT, 1.0/pT);
}
break;
case PID::LAMBDA:
pid > 0 ? _h_pT_vs_mass->fill(1.1050, pT) : _h_pT_vs_mass->fill(1.1250, pT);
if (pT > 0.3) {
pid > 0 ? _h_pT_lambda->fill(pT, 1.0/pT) : _h_pT_lambdabar->fill(pT, 1.0/pT);
pid > 0 ? _nBaryon[1]++ : _nAntiBaryon[1]++;
pid > 0 ? _nWeightedBaryon[1]->fill() : _nWeightedAntiBaryon[1]->fill();
}
break;
case PID::XIMINUS:
pid > 0 ? _h_pT_vs_mass->fill(1.3120, pT) : _h_pT_vs_mass->fill(1.3320, pT);
if (pT > 0.5) {
pid > 0 ? _h_pT_ximinus->fill(pT, 1.0/pT) : _h_pT_xiplus->fill(pT, 1.0/pT);
pid > 0 ? _nBaryon[2]++ : _nAntiBaryon[2]++;
pid > 0 ? _nWeightedBaryon[2]->fill() : _nWeightedAntiBaryon[2]->fill();
}
break;
case PID::OMEGAMINUS:
_h_pT_vs_mass->fill(1.6720, pT);
if (pT > 0.5) {
//_h_pT_omega->fill(pT, 1.0/pT);
pid > 0 ? _nBaryon[3]++ : _nAntiBaryon[3]++;
pid > 0 ? _nWeightedBaryon[3]->fill() : _nWeightedAntiBaryon[3]->fill();
}
break;
}
}
}
_sumWeightSelected->fill();
}
/// Finalize
void finalize() {
std::vector<Point2D> points;
for (size_t i=0 ; i<4 ; i++) {
- if (_nWeightedBaryon[i]==0 || _nWeightedAntiBaryon[i]==0) {
+ if (_nWeightedBaryon[i]->val()==0 || _nWeightedAntiBaryon[i]->val()==0) {
points.push_back(Point2D(i,0,0.5,0));
} else {
- double y = _nWeightedAntiBaryon[i]/_nWeightedBaryon[i];
+ double y = _nWeightedAntiBaryon[i]->val()/_nWeightedBaryon[i]->val();
double dy = sqrt( 1./_nAntiBaryon[i] + 1./_nBaryon[i] );
points.push_back(Point2D(i,y,0.5,y*dy));
}
}
_h_antibaryon_baryon_ratio->addPoints( points );
divide(_h_pT_lambdabar,_h_pT_lambda, _h_lambar_lam);
divide(_h_pT_xiplus,_h_pT_ximinus, _h_xiplus_ximinus);
- const double factor = 1./(2*M_PI*double(_sumWeightSelected));
+ const YODA::Scatter1D factor = (1./(2.0 * M_PI)) / *_sumWeightSelected;
scale(_h_pT_k0s, factor);
scale(_h_pT_kminus, factor);
scale(_h_pT_kplus, factor);
scale(_h_pT_lambda, factor);
scale(_h_pT_lambdabar, factor);
scale(_h_pT_ximinus, factor);
scale(_h_pT_xiplus, factor);
//scale(_h_pT_omega, 1./(2*M_PI*_sumWeightSelected));
MSG_DEBUG("sumOfWeights() = " << sumOfWeights());
- MSG_DEBUG("_sumWeightSelected = " << double(_sumWeightSelected));
+ MSG_DEBUG("_sumWeightSelected = " << _sumWeightSelected->val());
}
private:
CounterPtr _sumWeightSelected;
array<int,4> _nBaryon;
array<int,4> _nAntiBaryon;
array<CounterPtr, 4> _nWeightedBaryon;
array<CounterPtr, 4> _nWeightedAntiBaryon;
Histo1DPtr _h_pT_k0s, _h_pT_kminus, _h_pT_kplus, _h_pT_lambda, _h_pT_lambdabar, _h_pT_ximinus, _h_pT_xiplus;
//Histo1DPtr _h_pT_omega;
Scatter2DPtr _h_antibaryon_baryon_ratio;
Profile1DPtr _h_pT_vs_mass;
Scatter2DPtr _h_lambar_lam;
Scatter2DPtr _h_xiplus_ximinus;
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(STAR_2006_S6860818);
}
diff --git a/analyses/pluginRHIC/STAR_2008_S7869363.cc b/analyses/pluginRHIC/STAR_2008_S7869363.cc
--- a/analyses/pluginRHIC/STAR_2008_S7869363.cc
+++ b/analyses/pluginRHIC/STAR_2008_S7869363.cc
@@ -1,173 +1,173 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/LossyFinalState.hh"
namespace Rivet {
/// @todo Replace with SmearedParticles
class STARRandomFilter {
public:
STARRandomFilter() { }
// Return true to throw away a particle
bool operator()(const Particle& p) {
/// @todo Use a better RNG?
size_t idx = int(floor(p.pT()/MeV/50));
if (idx > 11) idx = 11;
return (rand()/static_cast<double>(RAND_MAX) > _trkeff[idx]);
}
int compare(const STARRandomFilter& other) const {
return true;
}
private:
const static double _trkeff[12];
};
// Here we have the track reconstruction efficiencies for tracks with pT from 0 to 600 MeV
// in steps of 50 MeV. The efficiency is assumed to be 0.88 for pT >= 600 MeV
const double STARRandomFilter::_trkeff[12] = {0,0,0.38,0.72,0.78,0.81,0.82,0.84,0.85,0.86,0.87,0.88};
class STAR_2008_S7869363 : public Analysis {
public:
/// @name Constructors etc.
//@{
/// Constructor
STAR_2008_S7869363()
: Analysis("STAR_2008_S7869363")
{ }
//@}
public:
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
const ChargedFinalState cfs(-0.5, 0.5, 0.2*GeV);
const LossyFinalState<STARRandomFilter> lfs(cfs, STARRandomFilter());
declare(lfs, "FS");
book(_h_dNch ,1, 1, 1);
book(_h_dpT_Pi ,2, 1, 1);
book(_h_dpT_Piplus ,2, 1, 2);
book(_h_dpT_Kaon ,2, 1, 3);
book(_h_dpT_Kaonplus ,2, 1, 4);
book(_h_dpT_AntiProton ,2, 1, 5);
book(_h_dpT_Proton ,2, 1, 6);
- book(nCutsPassed, "nCutsPassed");
- book(nPi, "nPi");
- book(nPiPlus, "nPiPlus");
- book(nKaon, "nKaon");
- book(nKaonPlus, "nKaonPlus");
- book(nProton, "nProton");
- book(nAntiProton, "nAntiProton");
+ // book(nCutsPassed, "nCutsPassed");
+ // book(nPi, "nPi");
+ // book(nPiPlus, "nPiPlus");
+ // book(nKaon, "nKaon");
+ // book(nKaonPlus, "nKaonPlus");
+ // book(nProton, "nProton");
+ // book(nAntiProton, "nAntiProton");
}
/// Perform the per-event analysis
void analyze(const Event& event) {
const FinalState& charged = apply<FinalState>(event, "FS");
// Vertex reconstruction efficiencies as a function of charged multiplicity.
// For events with more than 23 reconstructed tracks the efficiency is 100%.
double vtxeffs[24] = { 0.000000,0.512667,0.739365,0.847131,0.906946,0.940922,0.959328,0.96997,
0.975838,0.984432,0.988311,0.990327,0.990758,0.995767,0.99412,0.992271,
0.996631,0.994802,0.99635,0.997384,0.998986,0.996441,0.994513,1.000000 };
double vtxeff = 1.0;
if (charged.particles().size() < 24) {
vtxeff = vtxeffs[charged.particles().size()];
}
const double weight = vtxeff;
foreach (const Particle& p, charged.particles()) {
double pT = p.pT()/GeV;
double y = p.rapidity();
if (fabs(y) < 0.1) {
- nCutsPassed->fill(weight);
+ // nCutsPassed->fill(weight);
const PdgId id = p.pid();
switch (id) {
case -211:
_h_dpT_Pi->fill(pT, weight/(TWOPI*pT*0.2));
- nPi->fill(weight);
+ // nPi->fill(weight);
break;
case 211:
_h_dpT_Piplus->fill(pT, weight/(TWOPI*pT*0.2));
- nPiPlus->fill(weight);
+ // nPiPlus->fill(weight);
break;
case -321:
_h_dpT_Kaon->fill(pT, weight/(TWOPI*pT*0.2));
- nKaon->fill(weight);
+ // nKaon->fill(weight);
break;
case 321:
_h_dpT_Kaonplus->fill(pT, weight/(TWOPI*pT*0.2));
- nKaonPlus->fill(weight);
+ // nKaonPlus->fill(weight);
break;
case -2212:
_h_dpT_AntiProton->fill(pT, weight/(TWOPI*pT*0.2));
- nAntiProton->fill(weight);
+ // nAntiProton->fill(weight);
break;
case 2212:
_h_dpT_Proton->fill(pT, weight/(TWOPI*pT*0.2));
- nProton->fill(weight);
+ // nProton->fill(weight);
break;
}
}
else {
continue;
}
}
_h_dNch->fill(charged.particles().size(), weight);
}
/// Normalise histograms etc., after the run
void finalize() {
//double nTot = nPi + nPiPlus + nKaon + nKaonPlus + nProton + nAntiProton;
normalize(_h_dNch);
/// @todo Norm to data!
normalize(_h_dpT_Pi , 0.389825 );
normalize(_h_dpT_Piplus , 0.396025 );
normalize(_h_dpT_Kaon , 0.03897 );
normalize(_h_dpT_Kaonplus , 0.04046 );
normalize(_h_dpT_AntiProton, 0.0187255);
normalize(_h_dpT_Proton , 0.016511 );
}
private:
Histo1DPtr _h_dNch;
Histo1DPtr _h_dpT_Pi, _h_dpT_Piplus;
Histo1DPtr _h_dpT_Kaon, _h_dpT_Kaonplus;
Histo1DPtr _h_dpT_AntiProton, _h_dpT_Proton;
Profile1DPtr _h_pT_vs_Nch;
- CounterPtr nCutsPassed, nPi, nPiPlus, nKaon, nKaonPlus, nProton, nAntiProton;
+ //CounterPtr nCutsPassed, nPi, nPiPlus, nKaon, nKaonPlus, nProton, nAntiProton;
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(STAR_2008_S7869363);
}
diff --git a/analyses/pluginSPS/UA1_1990_S2044935.cc b/analyses/pluginSPS/UA1_1990_S2044935.cc
--- a/analyses/pluginSPS/UA1_1990_S2044935.cc
+++ b/analyses/pluginSPS/UA1_1990_S2044935.cc
@@ -1,176 +1,176 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/MissingMomentum.hh"
namespace Rivet {
/// @brief UA1 minbias track multiplicities, \f$ p_\perp \f$ and \f$ E_\perp \f$
class UA1_1990_S2044935 : public Analysis {
public:
/// Constructor
UA1_1990_S2044935() : Analysis("UA1_1990_S2044935") {
}
/// @name Analysis methods
//@{
/// Book projections and histograms
void init() {
declare(ChargedFinalState(-5.5, 5.5), "TriggerFS");
declare(ChargedFinalState(-2.5, 2.5), "TrackFS");
const FinalState trkcalofs(-2.5, 2.5);
declare(MissingMomentum(trkcalofs), "MET25");
const FinalState calofs(-6.0, 6.0);
declare(MissingMomentum(calofs), "MET60");
if (fuzzyEquals(sqrtS()/GeV, 63)) {
book(_hist_Pt ,8,1,1);
} else if (fuzzyEquals(sqrtS()/GeV, 200)) {
book(_hist_Nch ,1,1,1);
book(_hist_Esigd3p ,2,1,1);
book(_hist_Pt ,6,1,1);
book(_hist_Et ,9,1,1);
book(_hist_Etavg ,12,1,1);
} else if (fuzzyEquals(sqrtS()/GeV, 500)) {
book(_hist_Nch ,1,1,2);
book(_hist_Esigd3p ,2,1,2);
book(_hist_Et ,10,1,1);
book(_hist_Etavg ,12,1,2);
} else if (fuzzyEquals(sqrtS()/GeV, 900)) {
book(_hist_Nch ,1,1,3);
book(_hist_Esigd3p ,2,1,3);
book(_hist_Pt ,7,1,1);
book(_hist_Et ,11,1,1);
book(_hist_Etavg ,12,1,3);
book(_hist_Esigd3p08 ,3,1,1);
book(_hist_Esigd3p40 ,4,1,1);
book(_hist_Esigd3p80 ,5,1,1);
}
book(_sumwTrig, "TMP/sumwTrig");
- book(_sumwTrig08, "TMP/sumwTrig08");
- book(_sumwTrig40, "TMP/sumwTrig40");
- book(_sumwTrig80, "TMP/sumwTrig80");
+ // book(_sumwTrig08, "TMP/sumwTrig08");
+ // book(_sumwTrig40, "TMP/sumwTrig40");
+ // book(_sumwTrig80, "TMP/sumwTrig80");
}
void analyze(const Event& event) {
// Trigger
const FinalState& trigfs = apply<FinalState>(event, "TriggerFS");
unsigned int n_minus(0), n_plus(0);
foreach (const Particle& p, trigfs.particles()) {
const double eta = p.eta();
if (inRange(eta, -5.5, -1.5)) n_minus++;
else if (inRange(eta, 1.5, 5.5)) n_plus++;
}
MSG_DEBUG("Trigger -: " << n_minus << ", Trigger +: " << n_plus);
if (n_plus == 0 || n_minus == 0) vetoEvent;
_sumwTrig->fill();
// Use good central detector tracks
const FinalState& cfs = apply<FinalState>(event, "TrackFS");
const double Et25 = apply<MissingMomentum>(event, "MET25").scalarEt();
const double Et60 = apply<MissingMomentum>(event, "MET60").scalarEt();
const unsigned int nch = cfs.size();
// Event level histos
if (!fuzzyEquals(sqrtS()/GeV, 63, 1E-3)) {
_hist_Nch->fill(nch);
_hist_Et->fill(Et60/GeV);
_hist_Etavg->fill(nch, Et25/GeV);
}
// Particle/track level histos
const double deta = 2 * 5.0;
const double dphi = TWOPI;
const double dnch_deta = nch/deta;
foreach (const Particle& p, cfs.particles()) {
const double pt = p.pT();
const double scaled_weight = 1.0/(deta*dphi*pt/GeV);
if (!fuzzyEquals(sqrtS()/GeV, 500, 1E-3)) {
_hist_Pt->fill(nch, pt/GeV);
}
if (!fuzzyEquals(sqrtS()/GeV, 63, 1E-3)) {
_hist_Esigd3p->fill(pt/GeV, scaled_weight);
}
// Also fill for specific dn/deta ranges at 900 GeV
if (fuzzyEquals(sqrtS()/GeV, 900, 1E-3)) {
if (inRange(dnch_deta, 0.8, 4.0)) {
- _sumwTrig08 ->fill();
+ //_sumwTrig08 ->fill();
_hist_Esigd3p08->fill(pt/GeV, scaled_weight);
} else if (inRange(dnch_deta, 4.0, 8.0)) {
- _sumwTrig40 ->fill();
+ //_sumwTrig40 ->fill();
_hist_Esigd3p40->fill(pt/GeV, scaled_weight);
} else {
//MSG_WARNING(dnch_deta);
if (dnch_deta > 8.0) {
- _sumwTrig80 ->fill();
+ //_sumwTrig80 ->fill();
_hist_Esigd3p80->fill(pt/GeV, scaled_weight);
}
}
}
}
}
void finalize() {
- if (_sumwTrig <= 0) {
+ if (_sumwTrig->val() <= 0) {
MSG_WARNING("No events passed the trigger!");
return;
}
const double xsec = crossSectionPerEvent();
if (!fuzzyEquals(sqrtS()/GeV, 63, 1E-3)) {
scale(_hist_Nch, 2*xsec/millibarn); ///< Factor of 2 for Nch bin widths?
scale(_hist_Esigd3p, xsec/millibarn);
scale(_hist_Et, xsec/millibarn);
}
if (fuzzyEquals(sqrtS()/GeV, 900, 1E-3)) {
// NB. Ref data is normalised to a fixed value not reproducible from MC.
const double scale08 = (_hist_Esigd3p08->bin(0).area() > 0) ?
0.933e5/_hist_Esigd3p08->bin(0).height() : 0;
scale(_hist_Esigd3p08, scale08);
const double scale40 = (_hist_Esigd3p40->bin(0).area() > 0) ?
1.369e5/_hist_Esigd3p40->bin(0).height() : 0;
scale(_hist_Esigd3p40, scale40);
const double scale80 = (_hist_Esigd3p80->bin(0).area() > 0) ?
1.657e5/_hist_Esigd3p80->bin(0).height() : 0;
scale(_hist_Esigd3p80, scale80);
}
}
//@}
private:
/// @name Weight counters
//@{
- CounterPtr _sumwTrig, _sumwTrig08, _sumwTrig40, _sumwTrig80;
+ CounterPtr _sumwTrig; //, _sumwTrig08, _sumwTrig40, _sumwTrig80;
//@}
/// @name Histogram collections
//@{
Histo1DPtr _hist_Nch;
Histo1DPtr _hist_Esigd3p;
Histo1DPtr _hist_Esigd3p08;
Histo1DPtr _hist_Esigd3p40;
Histo1DPtr _hist_Esigd3p80;
Profile1DPtr _hist_Pt;
Profile1DPtr _hist_Etavg;
Histo1DPtr _hist_Et;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(UA1_1990_S2044935);
}
diff --git a/analyses/pluginSPS/UA5_1982_S875503.cc b/analyses/pluginSPS/UA5_1982_S875503.cc
--- a/analyses/pluginSPS/UA5_1982_S875503.cc
+++ b/analyses/pluginSPS/UA5_1982_S875503.cc
@@ -1,93 +1,93 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/TriggerUA5.hh"
namespace Rivet {
/// @brief UA5 multiplicity and \f$ \eta \f$ distributions
class UA5_1982_S875503 : public Analysis {
public:
/// Default constructor
UA5_1982_S875503() : Analysis("UA5_1982_S875503") {
}
/// @name Analysis methods
//@{
/// Set up projections and book histos
void init() {
declare(TriggerUA5(), "Trigger");
declare(ChargedFinalState(-3.5, 3.5), "CFS");
// Book histos based on pp or ppbar beams
if (beamIds().first == beamIds().second) {
book(_hist_nch ,2,1,1);
book(_hist_eta ,3,1,1);
} else {
book(_hist_nch ,2,1,2);
book(_hist_eta ,4,1,1);
}
book(_sumWTrig, "sumW");
}
void analyze(const Event& event) {
// Trigger
const TriggerUA5& trigger = apply<TriggerUA5>(event, "Trigger");
if (!trigger.nsdDecision()) vetoEvent;
_sumWTrig->fill();
// Get tracks
const ChargedFinalState& cfs = apply<ChargedFinalState>(event, "CFS");
// Fill mean charged multiplicity histos
_hist_nch->fill(_hist_nch->bin(0).xMid(), cfs.size());
// Iterate over all tracks and fill eta histograms
foreach (const Particle& p, cfs.particles()) {
const double eta = p.abseta();
_hist_eta->fill(eta);
}
}
void finalize() {
/// @todo Why the factor of 2 on Nch for ppbar?
if (beamIds().first == beamIds().second) {
- scale(_hist_nch, 1.0/_sumWTrig);
+ scale(_hist_nch, 1.0 / *_sumWTrig);
} else {
- scale(_hist_nch, 0.5/_sumWTrig);
+ scale(_hist_nch, 0.5 / *_sumWTrig);
}
- scale(_hist_eta, 0.5/_sumWTrig);
+ scale(_hist_eta, 0.5 / *_sumWTrig);
}
//@}
private:
/// @name Counters
//@{
CounterPtr _sumWTrig;
//@}
/// @name Histogram collections
//@{
Histo1DPtr _hist_nch;
Histo1DPtr _hist_eta;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(UA5_1982_S875503);
}
diff --git a/analyses/pluginSPS/UA5_1986_S1583476.cc b/analyses/pluginSPS/UA5_1986_S1583476.cc
--- a/analyses/pluginSPS/UA5_1986_S1583476.cc
+++ b/analyses/pluginSPS/UA5_1986_S1583476.cc
@@ -1,123 +1,123 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/Beam.hh"
#include "Rivet/Projections/TriggerUA5.hh"
namespace Rivet {
/// @brief UA5 \f$ \eta \f$ distributions at 200 and 900 GeV
class UA5_1986_S1583476 : public Analysis {
public:
/// Constructor
UA5_1986_S1583476() : Analysis("UA5_1986_S1583476") {
}
/// @name Analysis methods
//@{
/// Set up projections and histograms
void init() {
declare(TriggerUA5(), "Trigger");
declare(Beam(), "Beams");
declare(ChargedFinalState(-5.0, 5.0), "CFS50");
// Histograms
if (fuzzyEquals(sqrtS()/GeV, 200.0, 1E-4)) {
book(_hist_eta_nsd ,1,1,1);
book(_hist_eta_inelastic ,1,1,2);
_hists_eta_nsd.resize(6);
for (int i = 1; i <= 6; ++i) {
_sumWn.push_back({});
book(_sumWn.back(), "TMP/sumWn"+to_str(i));
book(_hists_eta_nsd[i-1],2,1,i);
}
} else if (fuzzyEquals(sqrtS()/GeV, 900.0, 1E-4)) {
book(_hist_eta_nsd ,1,1,3);
book(_hist_eta_inelastic ,1,1,4);
_hists_eta_nsd.resize(9);
for (int i = 1; i <= 9; ++i) {
_sumWn.push_back({});
book(_sumWn.back(), "TMP/sumWn"+to_str(i));
book(_hists_eta_nsd[i-1],3,1,i);
}
}
book(_sumWTrig, "sumWtrig");
book(_sumWTrigNSD, "sumWtrigNSD");
}
/// Fill eta histograms (in Nch bins)
void analyze(const Event& event) {
// Trigger
const TriggerUA5& trigger = apply<TriggerUA5>(event, "Trigger");
if (!trigger.sdDecision()) vetoEvent;
const bool isNSD = trigger.nsdDecision();
// Get the index corresponding to the max Nch range histo/sum(w) vector index
const ChargedFinalState& cfs50 = apply<ChargedFinalState>(event, "CFS50");
const int numP = cfs50.size();
const int ni = (int)floor(static_cast<float>(numP-2)/10.0);
const int num_idx = min(ni, (int)_sumWn.size()-1);
MSG_TRACE("Multiplicity index: " << numP << " charged particles -> #" << num_idx);
// Update weights
_sumWTrig->fill();
if (isNSD) {
_sumWTrigNSD->fill();
if (num_idx >= 0) _sumWn[num_idx]->fill();
}
// Fill histos
foreach (const Particle& p, cfs50.particles()) {
const double eta = p.abseta();
_hist_eta_inelastic->fill(eta);
if (isNSD) {
_hist_eta_nsd->fill(eta);
if (num_idx >= 0) _hists_eta_nsd[num_idx]->fill(eta);
}
}
}
/// Scale histos
void finalize() {
- MSG_DEBUG("sumW_NSD,inel = " << double(_sumWTrigNSD) << ", " << double(_sumWTrig));
- scale(_hist_eta_nsd, 0.5/_sumWTrigNSD);
- scale(_hist_eta_inelastic, 0.5/_sumWTrig);
+ MSG_DEBUG("sumW_NSD,inel = " << _sumWTrigNSD->val() << ", " << _sumWTrig->val());
+ scale(_hist_eta_nsd, 0.5 / *_sumWTrigNSD);
+ scale(_hist_eta_inelastic, 0.5 / *_sumWTrig);
//
for (size_t i = 0; i < _hists_eta_nsd.size(); ++i) {
- MSG_DEBUG("sumW[n] = " << double(_sumWn[i]));
- scale(_hists_eta_nsd[i], 0.5/_sumWn[i]);
+ MSG_DEBUG("sumW[n] = " << _sumWn[i]->val());
+ scale(_hists_eta_nsd[i], 0.5 / *_sumWn[i]);
}
}
private:
/// @name Weight counters
//@{
CounterPtr _sumWTrig;
CounterPtr _sumWTrigNSD;
vector<CounterPtr> _sumWn;
//@}
/// @name Histograms
//@{
Histo1DPtr _hist_eta_nsd;
Histo1DPtr _hist_eta_inelastic;
vector<Histo1DPtr> _hists_eta_nsd;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(UA5_1986_S1583476);
}
diff --git a/analyses/pluginSPS/UA5_1987_S1640666.cc b/analyses/pluginSPS/UA5_1987_S1640666.cc
--- a/analyses/pluginSPS/UA5_1987_S1640666.cc
+++ b/analyses/pluginSPS/UA5_1987_S1640666.cc
@@ -1,72 +1,72 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/TriggerUA5.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
namespace Rivet {
class UA5_1987_S1640666 : public Analysis {
public:
/// Constructor
UA5_1987_S1640666()
: Analysis("UA5_1987_S1640666")
{
}
/// Book histograms and initialise projections before the run
void init() {
declare(TriggerUA5(), "Trigger");
declare(ChargedFinalState(-5.0, 5.0), "CFS");
book(_hist_mean_nch ,1, 1, 1);
book(_hist_nch ,3, 1, 1);
book(_sumWPassed, "SumW");
}
/// Perform the per-event analysis
void analyze(const Event& event) {
// Trigger
const TriggerUA5& trigger = apply<TriggerUA5>(event, "Trigger");
if (!trigger.nsdDecision()) vetoEvent;
_sumWPassed->fill();
// Count final state particles in several eta regions
const int Nch = apply<ChargedFinalState>(event, "CFS").size();
// Fill histograms
_hist_nch->fill(Nch);
_hist_mean_nch->fill(_hist_mean_nch->bin(0).xMid(), Nch);
}
/// Normalise histograms etc., after the run
void finalize() {
- scale(_hist_nch, 1.0/_sumWPassed);
- scale(_hist_mean_nch, 1.0/_sumWPassed);
+ scale(_hist_nch, 1.0 / *_sumWPassed);
+ scale(_hist_mean_nch, 1.0 / *_sumWPassed);
}
private:
CounterPtr _sumWPassed;
Histo1DPtr _hist_mean_nch;
Histo1DPtr _hist_nch;
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(UA5_1987_S1640666);
}
diff --git a/analyses/pluginSPS/UA5_1988_S1867512.cc b/analyses/pluginSPS/UA5_1988_S1867512.cc
--- a/analyses/pluginSPS/UA5_1988_S1867512.cc
+++ b/analyses/pluginSPS/UA5_1988_S1867512.cc
@@ -1,195 +1,195 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/Beam.hh"
#include "Rivet/Projections/TriggerUA5.hh"
namespace Rivet {
namespace {
/// @brief Helper function to fill correlation points into scatter plot
- Point2D correlation_helper(double x, double xerr, const vector<int> & nf, const vector<int> & nb, double sumWPassed) {
- return Point2D(x, correlation(nf, nb), xerr, correlation_err(nf, nb)/sqrt(sumWPassed));
+ Point2D correlation_helper(double x, double xerr, const vector<int> & nf, const vector<int> & nb, CounterPtr sumWPassed) {
+ return Point2D(x, correlation(nf, nb), xerr, correlation_err(nf, nb)/sqrt(sumWPassed->val()));
}
}
/// @brief UA5 charged particle correlations at 200, 546 and 900 GeV
class UA5_1988_S1867512 : public Analysis {
public:
UA5_1988_S1867512()
: Analysis("UA5_1988_S1867512")
{ }
/// @name Analysis methods
//@{
void init() {
// Projections
declare(TriggerUA5(), "Trigger");
// Symmetric eta interval
declare(ChargedFinalState(-0.5, 0.5), "CFS05");
// Asymmetric intervals first
// Forward eta intervals
declare(ChargedFinalState(0.0, 1.0), "CFS10F");
declare(ChargedFinalState(0.5, 1.5), "CFS15F");
declare(ChargedFinalState(1.0, 2.0), "CFS20F");
declare(ChargedFinalState(1.5, 2.5), "CFS25F");
declare(ChargedFinalState(2.0, 3.0), "CFS30F");
declare(ChargedFinalState(2.5, 3.5), "CFS35F");
declare(ChargedFinalState(3.0, 4.0), "CFS40F");
// Backward eta intervals
declare(ChargedFinalState(-1.0, 0.0), "CFS10B");
declare(ChargedFinalState(-1.5, -0.5), "CFS15B");
declare(ChargedFinalState(-2.0, -1.0), "CFS20B");
declare(ChargedFinalState(-2.5, -1.5), "CFS25B");
declare(ChargedFinalState(-3.0, -2.0), "CFS30B");
declare(ChargedFinalState(-3.5, -2.5), "CFS35B");
declare(ChargedFinalState(-4.0, -3.0), "CFS40B");
// Histogram booking, we have sqrt(s) = 200, 546 and 900 GeV
// TODO use Scatter2D to be able to output errors
if (fuzzyEquals(sqrtS()/GeV, 200.0, 1E-4)) {
book(_hist_correl, 2, 1, 1);
book(_hist_correl_asym, 3, 1, 1);
} else if (fuzzyEquals(sqrtS()/GeV, 546.0, 1E-4)) {
book(_hist_correl, 2, 1, 2);
book(_hist_correl_asym, 3, 1, 2);
} else if (fuzzyEquals(sqrtS()/GeV, 900.0, 1E-4)) {
book(_hist_correl, 2, 1, 3);
book(_hist_correl_asym, 3, 1, 3);
}
book(_sumWPassed, "sumW");
}
void analyze(const Event& event) {
// Trigger
const bool trigger = apply<TriggerUA5>(event, "Trigger").nsdDecision();
if (!trigger) vetoEvent;
_sumWPassed->fill();
// Count forward/backward particles
n_10f.push_back(apply<ChargedFinalState>(event, "CFS10F").size());
n_15f.push_back(apply<ChargedFinalState>(event, "CFS15F").size());
n_20f.push_back(apply<ChargedFinalState>(event, "CFS20F").size());
n_25f.push_back(apply<ChargedFinalState>(event, "CFS25F").size());
n_30f.push_back(apply<ChargedFinalState>(event, "CFS30F").size());
n_35f.push_back(apply<ChargedFinalState>(event, "CFS35F").size());
n_40f.push_back(apply<ChargedFinalState>(event, "CFS40F").size());
//
n_10b.push_back(apply<ChargedFinalState>(event, "CFS10B").size());
n_15b.push_back(apply<ChargedFinalState>(event, "CFS15B").size());
n_20b.push_back(apply<ChargedFinalState>(event, "CFS20B").size());
n_25b.push_back(apply<ChargedFinalState>(event, "CFS25B").size());
n_30b.push_back(apply<ChargedFinalState>(event, "CFS30B").size());
n_35b.push_back(apply<ChargedFinalState>(event, "CFS35B").size());
n_40b.push_back(apply<ChargedFinalState>(event, "CFS40B").size());
//
n_05 .push_back(apply<ChargedFinalState>(event, "CFS05").size());
}
void finalize() {
// The correlation strength is defined in formulas
// 4.1 and 4.2
// Fill histos, gap width histo comes first
// * Set the errors as Delta b / sqrt(sumWPassed) with
// Delta b being the absolute uncertainty of b according to
// Gaussian error-propagation (linear limit) and assuming
// Poissonian uncertainties for the number of particles in
// the eta-intervals
//
// Define vectors to be able to fill Scatter2Ds
vector<Point2D> points;
// Fill the y-value vector
points.push_back(correlation_helper(0, 0.5, n_10f, n_10b, _sumWPassed));
points.push_back(correlation_helper(1, 0.5, n_15f, n_15b, _sumWPassed));
points.push_back(correlation_helper(2, 0.5, n_20f, n_20b, _sumWPassed));
points.push_back(correlation_helper(3, 0.5, n_25f, n_25b, _sumWPassed));
points.push_back(correlation_helper(4, 0.5, n_30f, n_30b, _sumWPassed));
points.push_back(correlation_helper(5, 0.5, n_35f, n_35b, _sumWPassed));
points.push_back(correlation_helper(6, 0.5, n_40f, n_40b, _sumWPassed));
// Fill the DPS
_hist_correl->addPoints(points);
// Fill gap-center histo (Fig 15)
//
// The first bin contains the c_str strengths of
// the gap size histo that has ane eta gap of two
//
// Now do the other histo -- clear already defined vectors first
points.clear();
points.push_back(correlation_helper(0, 0.25, n_20f, n_20b, _sumWPassed));
points.push_back(correlation_helper(0.5, 0.25, n_25f, n_15b, _sumWPassed));
points.push_back(correlation_helper(1, 0.25, n_30f, n_10b, _sumWPassed));
points.push_back(correlation_helper(1.5, 0.25, n_35f, n_05 , _sumWPassed));
points.push_back(correlation_helper(2, 0.25, n_40f, n_10f, _sumWPassed));
// Fill in correlation strength for assymetric intervals,
// see Tab. 5
// Fill the DPS
_hist_correl_asym->addPoints(points);
}
//@}
private:
/// @name Counters
//@{
CounterPtr _sumWPassed;
//@}
/// @name Vectors for storing the number of particles in the different eta intervals per event.
/// @todo Is there a better way?
//@{
std::vector<int> n_10f;
std::vector<int> n_15f;
std::vector<int> n_20f;
std::vector<int> n_25f;
std::vector<int> n_30f;
std::vector<int> n_35f;
std::vector<int> n_40f;
//
std::vector<int> n_10b;
std::vector<int> n_15b;
std::vector<int> n_20b;
std::vector<int> n_25b;
std::vector<int> n_30b;
std::vector<int> n_35b;
std::vector<int> n_40b;
//
std::vector<int> n_05;
//@}
/// @name Histograms
//@{
// Symmetric eta intervals
Scatter2DPtr _hist_correl;
// For asymmetric eta intervals
Scatter2DPtr _hist_correl_asym;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(UA5_1988_S1867512);
}
diff --git a/analyses/pluginSPS/UA5_1989_S1926373.cc b/analyses/pluginSPS/UA5_1989_S1926373.cc
--- a/analyses/pluginSPS/UA5_1989_S1926373.cc
+++ b/analyses/pluginSPS/UA5_1989_S1926373.cc
@@ -1,110 +1,110 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/TriggerUA5.hh"
namespace Rivet {
/// @brief UA5 min bias charged multiplicities in central \f$ \eta \f$ ranges
class UA5_1989_S1926373 : public Analysis {
public:
/// Constructor
UA5_1989_S1926373() : Analysis("UA5_1989_S1926373") {
}
/// @name Analysis methods
//@{
/// Book histograms and projections
void init() {
declare(TriggerUA5(), "Trigger");
declare(ChargedFinalState(-0.5, 0.5), "CFS05");
declare(ChargedFinalState(-1.5, 1.5), "CFS15");
declare(ChargedFinalState(-3.0, 3.0), "CFS30");
declare(ChargedFinalState(-5.0, 5.0), "CFS50");
// NB. _hist_nch and _hist_ncheta50 use the same data but different binning
if (fuzzyEquals(sqrtS()/GeV, 200, 1E-3)) {
book(_hist_nch ,1, 1, 1);
book(_hist_nch_eta05 ,3, 1, 1);
book(_hist_nch_eta15 ,4, 1, 1);
book(_hist_nch_eta30 ,5, 1, 1);
book(_hist_nch_eta50 ,6, 1, 1);
book(_hist_mean_nch ,11, 1, 1);
} else if (fuzzyEquals(sqrtS()/GeV, 900, 1E-3)) {
book(_hist_nch ,2, 1, 1);
book(_hist_nch_eta05 ,7, 1, 1);
book(_hist_nch_eta15 ,8, 1, 1);
book(_hist_nch_eta30 ,9, 1, 1);
book(_hist_nch_eta50 ,10, 1, 1);
book(_hist_mean_nch ,12, 1, 1);
}
book(_sumWPassed, "SumW");
/// @todo Moments of distributions
}
/// Do the analysis
void analyze(const Event& event) {
// Trigger
const TriggerUA5& trigger = apply<TriggerUA5>(event, "Trigger");
if (!trigger.nsdDecision()) vetoEvent;
_sumWPassed->fill();
// Count final state particles in several eta regions
const int numP05 = apply<ChargedFinalState>(event, "CFS05").size();
const int numP15 = apply<ChargedFinalState>(event, "CFS15").size();
const int numP30 = apply<ChargedFinalState>(event, "CFS30").size();
const int numP50 = apply<ChargedFinalState>(event, "CFS50").size();
// Fill histograms
_hist_nch->fill(numP50);
_hist_nch_eta05->fill(numP05);
_hist_nch_eta15->fill(numP15);
_hist_nch_eta30->fill(numP30);
_hist_nch_eta50->fill(numP50);
_hist_mean_nch->fill(_hist_mean_nch->bin(0).xMid(), numP50);
}
void finalize() {
- scale(_hist_nch, 1.0/_sumWPassed);
- scale(_hist_nch_eta05, 1.0/_sumWPassed);
- scale(_hist_nch_eta15, 1.0/_sumWPassed);
- scale(_hist_nch_eta30, 1.0/_sumWPassed);
- scale(_hist_nch_eta50, 1.0/_sumWPassed);
- scale(_hist_mean_nch, 1.0/_sumWPassed);
+ scale(_hist_nch, 1.0 / *_sumWPassed);
+ scale(_hist_nch_eta05, 1.0 / *_sumWPassed);
+ scale(_hist_nch_eta15, 1.0 / *_sumWPassed);
+ scale(_hist_nch_eta30, 1.0 / *_sumWPassed);
+ scale(_hist_nch_eta50, 1.0 / *_sumWPassed);
+ scale(_hist_mean_nch, 1.0 / *_sumWPassed);
}
//@}
private:
/// @name Counters
//@{
CounterPtr _sumWPassed;
//@}
/// @name Histograms
//@{
Histo1DPtr _hist_nch;
Histo1DPtr _hist_nch_eta05;
Histo1DPtr _hist_nch_eta15;
Histo1DPtr _hist_nch_eta30;
Histo1DPtr _hist_nch_eta50;
Histo1DPtr _hist_mean_nch;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(UA5_1989_S1926373);
}
diff --git a/analyses/pluginTOTEM/CMSTOTEM_2014_I1294140.cc b/analyses/pluginTOTEM/CMSTOTEM_2014_I1294140.cc
--- a/analyses/pluginTOTEM/CMSTOTEM_2014_I1294140.cc
+++ b/analyses/pluginTOTEM/CMSTOTEM_2014_I1294140.cc
@@ -1,78 +1,78 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/Beam.hh"
namespace Rivet {
class CMSTOTEM_2014_I1294140 : public Analysis {
public:
CMSTOTEM_2014_I1294140()
: Analysis("CMSTOTEM_2014_I1294140")
{ }
void init() {
ChargedFinalState cfs(-7.0, 7.0, 0.0*GeV);
declare(cfs, "CFS");
book(_Nevt_after_cuts_or, "Nevt_or");
book(_Nevt_after_cuts_and, "Nevt_and");
book(_Nevt_after_cuts_xor, "Nevt_xor");
if (fuzzyEquals(sqrtS(), 8000*GeV, 1E-3)) {
book(_h_dNch_dEta_OR ,1, 1, 1);
book(_h_dNch_dEta_AND ,2, 1, 1);
book(_h_dNch_dEta_XOR ,3, 1, 1);
}
}
void analyze(const Event& event) {
// Count forward and backward charged particles
const ChargedFinalState& charged = apply<ChargedFinalState>(event, "CFS");
int count_plus = 0, count_minus = 0;
foreach (const Particle& p, charged.particles()) {
if (inRange(p.eta(), 5.3, 6.5)) count_plus++;
if (inRange(p.eta(), -6.5, -5.3)) count_minus++;
}
// Cut combinations
const bool cutsor = (count_plus > 0 || count_minus > 0);
const bool cutsand = (count_plus > 0 && count_minus > 0);
const bool cutsxor = ( (count_plus > 0 && count_minus == 0) || (count_plus == 0 && count_minus > 0) );
// Increment counters and fill histos
if (cutsor) _Nevt_after_cuts_or ->fill();
if (cutsand) _Nevt_after_cuts_and ->fill();
if (cutsxor) _Nevt_after_cuts_xor ->fill();
foreach (const Particle& p, charged.particles()) {
if (cutsor) _h_dNch_dEta_OR ->fill(p.abseta());
if (cutsand) _h_dNch_dEta_AND->fill(p.abseta());
if (cutsxor) _h_dNch_dEta_XOR->fill(p.abseta());
}
}
void finalize() {
- scale(_h_dNch_dEta_OR, 0.5/_Nevt_after_cuts_or);
- scale(_h_dNch_dEta_AND, 0.5/_Nevt_after_cuts_and);
- scale(_h_dNch_dEta_XOR, 0.5/_Nevt_after_cuts_xor);
+ scale(_h_dNch_dEta_OR, 0.5 / *_Nevt_after_cuts_or);
+ scale(_h_dNch_dEta_AND, 0.5 / *_Nevt_after_cuts_and);
+ scale(_h_dNch_dEta_XOR, 0.5 / *_Nevt_after_cuts_xor);
}
private:
Histo1DPtr _h_dNch_dEta_OR, _h_dNch_dEta_AND, _h_dNch_dEta_XOR;
CounterPtr _Nevt_after_cuts_or, _Nevt_after_cuts_and, _Nevt_after_cuts_xor;
};
// Hook for the plugin system
DECLARE_RIVET_PLUGIN(CMSTOTEM_2014_I1294140);
}
diff --git a/analyses/pluginTOTEM/TOTEM_2012_I1115294.cc b/analyses/pluginTOTEM/TOTEM_2012_I1115294.cc
--- a/analyses/pluginTOTEM/TOTEM_2012_I1115294.cc
+++ b/analyses/pluginTOTEM/TOTEM_2012_I1115294.cc
@@ -1,62 +1,62 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
namespace Rivet {
class TOTEM_2012_I1115294 : public Analysis {
public:
TOTEM_2012_I1115294()
: Analysis("TOTEM_2012_I1115294")
{ }
public:
void init() {
ChargedFinalState cfsm(-6.50, -5.35, 40.*MeV);
ChargedFinalState cfsp( 5.35, 6.50, 40.*MeV);
declare(cfsm, "CFSM");
declare(cfsp, "CFSP");
book(_h_eta ,1, 1, 1);
book(_sumofweights, "sumofweights");
}
void analyze(const Event& event) {
const ChargedFinalState cfsm = apply<ChargedFinalState>(event, "CFSM");
const ChargedFinalState cfsp = apply<ChargedFinalState>(event, "CFSP");
if (cfsm.size() == 0 && cfsp.size() == 0) vetoEvent;
_sumofweights->fill();
foreach (const Particle& p, cfsm.particles() + cfsp.particles()) {
_h_eta->fill(p.abseta());
}
}
void finalize() {
- scale(_h_eta, 1./(2*_sumofweights));
+ scale(_h_eta, 0.5 / *_sumofweights);
}
private:
CounterPtr _sumofweights;
Histo1DPtr _h_eta;
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(TOTEM_2012_I1115294);
}
diff --git a/analyses/pluginTOTEM/TOTEM_2014_I1328627.cc b/analyses/pluginTOTEM/TOTEM_2014_I1328627.cc
--- a/analyses/pluginTOTEM/TOTEM_2014_I1328627.cc
+++ b/analyses/pluginTOTEM/TOTEM_2014_I1328627.cc
@@ -1,58 +1,58 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
namespace Rivet {
class TOTEM_2014_I1328627 : public Analysis {
public:
TOTEM_2014_I1328627()
: Analysis("TOTEM_2014_I1328627")
{ }
void init() {
ChargedFinalState cfsm(-7.0, -6.0, 0.0*GeV);
ChargedFinalState cfsp( 3.7, 4.8, 0.0*GeV);
declare(cfsm, "CFSM");
declare(cfsp, "CFSP");
book(_h_eta ,1, 1, 1);
book(_sumofweights, "sumofweights");
}
void analyze(const Event& event) {
const ChargedFinalState cfsm = apply<ChargedFinalState>(event, "CFSM");
const ChargedFinalState cfsp = apply<ChargedFinalState>(event, "CFSP");
if (cfsm.size() == 0 && cfsp.size() == 0) vetoEvent;
_sumofweights->fill();
foreach (const Particle& p, cfsm.particles() + cfsp.particles()) {
_h_eta->fill(p.abseta());
}
}
void finalize() {
- scale(_h_eta, 1./_sumofweights);
+ scale(_h_eta, 1./ *_sumofweights);
}
private:
CounterPtr _sumofweights;
Histo1DPtr _h_eta;
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(TOTEM_2014_I1328627);
}
diff --git a/include/Rivet/Analysis.hh b/include/Rivet/Analysis.hh
--- a/include/Rivet/Analysis.hh
+++ b/include/Rivet/Analysis.hh
@@ -1,924 +1,954 @@
// -*- C++ -*-
#ifndef RIVET_Analysis_HH
#define RIVET_Analysis_HH
#include "Rivet/Config/RivetCommon.hh"
#include "Rivet/AnalysisInfo.hh"
#include "Rivet/Event.hh"
#include "Rivet/Projection.hh"
#include "Rivet/ProjectionApplier.hh"
#include "Rivet/ProjectionHandler.hh"
#include "Rivet/AnalysisLoader.hh"
#include "Rivet/Tools/Cuts.hh"
#include "Rivet/Tools/Logging.hh"
#include "Rivet/Tools/ParticleUtils.hh"
#include "Rivet/Tools/BinnedHistogram.hh"
#include "Rivet/Tools/RivetMT2.hh"
#include "Rivet/Tools/RivetYODA.hh"
/// @def vetoEvent
/// Preprocessor define for vetoing events, including the log message and return.
#define vetoEvent \
do { MSG_DEBUG("Vetoing event on line " << __LINE__ << " of " << __FILE__); return; } while(0)
namespace Rivet {
// Forward declaration
class AnalysisHandler;
/// @brief This is the base class of all analysis classes in Rivet.
///
/// There are
/// three virtual functions which should be implemented in base classes:
///
/// void init() is called by Rivet before a run is started. Here the
/// analysis class should book necessary histograms. The needed
/// projections should probably rather be constructed in the
/// constructor.
///
/// void analyze(const Event&) is called once for each event. Here the
/// analysis class should apply the necessary Projections and fill the
/// histograms.
///
/// void finalize() is called after a run is finished. Here the analysis
/// class should do whatever manipulations are necessary on the
/// histograms. Writing the histograms to a file is, however, done by
/// the Rivet class.
class Analysis : public ProjectionApplier {
/// The AnalysisHandler is a friend.
friend class AnalysisHandler;
public:
/// @name Standard constructors and destructors.
//@{
// /// The default constructor.
// Analysis();
/// Constructor
Analysis(const std::string& name);
/// The destructor.
virtual ~Analysis() {}
//@}
public:
/// @name Main analysis methods
//@{
/// Initialize this analysis object. A concrete class should here
/// book all necessary histograms. An overridden function must make
/// sure it first calls the base class function.
virtual void init() { }
/// Analyze one event. A concrete class should here apply the
/// necessary projections on the \a event and fill the relevant
/// histograms. An overridden function must make sure it first calls
/// the base class function.
virtual void analyze(const Event& event) = 0;
/// Finalize this analysis object. A concrete class should here make
/// all necessary operations on the histograms. Writing the
/// histograms to a file is, however, done by the Rivet class. An
/// overridden function must make sure it first calls the base class
/// function.
virtual void finalize() { }
//@}
public:
/// @name Metadata
/// Metadata is used for querying from the command line and also for
/// building web pages and the analysis pages in the Rivet manual.
//@{
/// Get the actual AnalysisInfo object in which all this metadata is stored.
const AnalysisInfo& info() const {
assert(_info && "No AnalysisInfo object :O");
return *_info;
}
/// @brief Get the name of the analysis.
///
/// By default this is computed by combining the results of the experiment,
/// year and Spires ID metadata methods and you should only override it if
/// there's a good reason why those won't work.
virtual std::string name() const {
return (info().name().empty()) ? _defaultname : info().name();
}
/// Get the Inspire ID code for this analysis.
virtual std::string inspireId() const {
return info().inspireId();
}
/// Get the SPIRES ID code for this analysis (~deprecated).
virtual std::string spiresId() const {
return info().spiresId();
}
/// @brief Names & emails of paper/analysis authors.
///
/// Names and email of authors in 'NAME \<EMAIL\>' format. The first
/// name in the list should be the primary contact person.
virtual std::vector<std::string> authors() const {
return info().authors();
}
/// @brief Get a short description of the analysis.
///
/// Short (one sentence) description used as an index entry.
/// Use @a description() to provide full descriptive paragraphs
/// of analysis details.
virtual std::string summary() const {
return info().summary();
}
/// @brief Get a full description of the analysis.
///
/// Full textual description of this analysis, what it is useful for,
/// what experimental techniques are applied, etc. Should be treated
/// as a chunk of restructuredText (http://docutils.sourceforge.net/rst.html),
/// with equations to be rendered as LaTeX with amsmath operators.
virtual std::string description() const {
return info().description();
}
/// @brief Information about the events needed as input for this analysis.
///
/// Event types, energies, kinematic cuts, particles to be considered
/// stable, etc. etc. Should be treated as a restructuredText bullet list
/// (http://docutils.sourceforge.net/rst.html)
virtual std::string runInfo() const {
return info().runInfo();
}
/// Experiment which performed and published this analysis.
virtual std::string experiment() const {
return info().experiment();
}
/// Collider on which the experiment ran.
virtual std::string collider() const {
return info().collider();
}
/// When the original experimental analysis was published.
virtual std::string year() const {
return info().year();
}
/// The luminosity in inverse femtobarn
virtual std::string luminosityfb() const {
return info().luminosityfb();
}
/// Journal, and preprint references.
virtual std::vector<std::string> references() const {
return info().references();
}
/// BibTeX citation key for this article.
virtual std::string bibKey() const {
return info().bibKey();
}
/// BibTeX citation entry for this article.
virtual std::string bibTeX() const {
return info().bibTeX();
}
/// Whether this analysis is trusted (in any way!)
virtual std::string status() const {
return (info().status().empty()) ? "UNVALIDATED" : info().status();
}
/// Any work to be done on this analysis.
virtual std::vector<std::string> todos() const {
return info().todos();
}
/// Return the allowed pairs of incoming beams required by this analysis.
virtual const std::vector<PdgIdPair>& requiredBeams() const {
return info().beams();
}
/// Declare the allowed pairs of incoming beams required by this analysis.
virtual Analysis& setRequiredBeams(const std::vector<PdgIdPair>& requiredBeams) {
info().setBeams(requiredBeams);
return *this;
}
/// Sets of valid beam energy pairs, in GeV
virtual const std::vector<std::pair<double, double> >& requiredEnergies() const {
return info().energies();
}
/// Get vector of analysis keywords
virtual const std::vector<std::string> & keywords() const {
return info().keywords();
}
/// Declare the list of valid beam energy pairs, in GeV
virtual Analysis& setRequiredEnergies(const std::vector<std::pair<double, double> >& requiredEnergies) {
info().setEnergies(requiredEnergies);
return *this;
}
//@}
/// @name Internal metadata modifying methods
//@{
/// Get the actual AnalysisInfo object in which all this metadata is stored (non-const).
AnalysisInfo& info() {
assert(_info && "No AnalysisInfo object :O");
return *_info;
}
//@}
/// @name Run conditions
//@{
/// Incoming beams for this run
const ParticlePair& beams() const;
/// Incoming beam IDs for this run
const PdgIdPair beamIds() const;
/// Centre of mass energy for this run
double sqrtS() const;
//@}
/// @name Analysis / beam compatibility testing
//@{
/// Check if analysis is compatible with the provided beam particle IDs and energies
bool isCompatible(const ParticlePair& beams) const;
/// Check if analysis is compatible with the provided beam particle IDs and energies
bool isCompatible(PdgId beam1, PdgId beam2, double e1, double e2) const;
/// Check if analysis is compatible with the provided beam particle IDs and energies
bool isCompatible(const PdgIdPair& beams, const std::pair<double,double>& energies) const;
//@}
/// Access the controlling AnalysisHandler object.
AnalysisHandler& handler() const { return *_analysishandler; }
protected:
/// Get a Log object based on the name() property of the calling analysis object.
Log& getLog() const;
/// Get the process cross-section in pb. Throws if this hasn't been set.
double crossSection() const;
/// Get the process cross-section per generated event in pb. Throws if this
/// hasn't been set.
double crossSectionPerEvent() const;
/// @brief Get the number of events seen (via the analysis handler).
///
/// @note Use in the finalize phase only.
size_t numEvents() const;
/// @brief Get the sum of event weights seen (via the analysis handler).
///
/// @note Use in the finalize phase only.
double sumOfWeights() const;
protected:
/// @name Histogram paths
//@{
/// Get the canonical histogram "directory" path for this analysis.
const std::string histoDir() const;
/// Get the canonical histogram path for the named histogram in this analysis.
const std::string histoPath(const std::string& hname) const;
/// Get the canonical histogram path for the numbered histogram in this analysis.
const std::string histoPath(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const;
/// Get the internal histogram name for given d, x and y (cf. HepData)
const std::string mkAxisCode(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const;
/// Alias
/// @deprecated Prefer the "mk" form, consistent with other "making function" names
const std::string makeAxisCode(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const {
return mkAxisCode(datasetId, xAxisId, yAxisId);
}
//@}
/// @name Histogram reference data
//@{
/// Get reference data for a named histo
/// @todo SFINAE to ensure that the type inherits from YODA::AnalysisObject?
template <typename T=YODA::Scatter2D>
const T& refData(const string& hname) const {
_cacheRefData();
MSG_TRACE("Using histo bin edges for " << name() << ":" << hname);
if (!_refdata[hname]) {
MSG_ERROR("Can't find reference histogram " << hname);
throw Exception("Reference data " + hname + " not found.");
}
return dynamic_cast<T&>(*_refdata[hname]);
}
/// Get reference data for a numbered histo
/// @todo SFINAE to ensure that the type inherits from YODA::AnalysisObject?
template <typename T=YODA::Scatter2D>
const T& refData(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const {
const string hname = makeAxisCode(datasetId, xAxisId, yAxisId);
return refData(hname);
}
//@}
/// @name Counter booking
//@{
/// Book a counter.
CounterPtr & book(CounterPtr &, const std::string& name,
const std::string& title="");
// const std::string& valtitle=""
/// Book a counter, using a path generated from the dataset and axis ID codes
///
/// The paper, dataset and x/y-axis IDs will be used to build the histo name in the HepData standard way.
CounterPtr & book(CounterPtr &, unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId,
const std::string& title="");
// const std::string& valtitle=""
//@}
/// @name 1D histogram booking
//@{
/// Book a 1D histogram with @a nbins uniformly distributed across the range @a lower - @a upper .
Histo1DPtr & book(Histo1DPtr &,const std::string& name,
size_t nbins, double lower, double upper,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="");
/// Book a 1D histogram with non-uniform bins defined by the vector of bin edges @a binedges .
Histo1DPtr & book(Histo1DPtr &,const std::string& name,
const std::vector<double>& binedges,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="");
/// Book a 1D histogram with non-uniform bins defined by the vector of bin edges @a binedges .
Histo1DPtr & book(Histo1DPtr &,const std::string& name,
const std::initializer_list<double>& binedges,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="");
/// Book a 1D histogram with binning from a reference scatter.
Histo1DPtr & book(Histo1DPtr &,const std::string& name,
const Scatter2D& refscatter,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="");
/// Book a 1D histogram, using the binnings in the reference data histogram.
Histo1DPtr & book(Histo1DPtr &,const std::string& name,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="");
/// Book a 1D histogram, using the binnings in the reference data histogram.
///
/// The paper, dataset and x/y-axis IDs will be used to build the histo name in the HepData standard way.
Histo1DPtr & book(Histo1DPtr &,unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="");
//@}
/// @name 2D histogram booking
//@{
/// Book a 2D histogram with @a nxbins and @a nybins uniformly
/// distributed across the ranges @a xlower - @a xupper and @a
/// ylower - @a yupper respectively along the x- and y-axis.
Histo2DPtr & book(Histo2DPtr &,const std::string& name,
size_t nxbins, double xlower, double xupper,
size_t nybins, double ylower, double yupper,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="",
const std::string& ztitle="");
/// Book a 2D histogram with non-uniform bins defined by the
/// vectors of bin edges @a xbinedges and @a ybinedges.
Histo2DPtr & book(Histo2DPtr &,const std::string& name,
const std::vector<double>& xbinedges,
const std::vector<double>& ybinedges,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="",
const std::string& ztitle="");
/// Book a 2D histogram with non-uniform bins defined by the
/// vectors of bin edges @a xbinedges and @a ybinedges.
Histo2DPtr & book(Histo2DPtr &,const std::string& name,
const std::initializer_list<double>& xbinedges,
const std::initializer_list<double>& ybinedges,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="",
const std::string& ztitle="");
// /// Book a 2D histogram with binning from a reference scatter.
// Histo2DPtr bookHisto2D(const std::string& name,
// const Scatter3D& refscatter,
// const std::string& title="",
// const std::string& xtitle="",
// const std::string& ytitle="",
// const std::string& ztitle="");
// /// Book a 2D histogram, using the binnings in the reference data histogram.
// Histo2DPtr bookHisto2D(const std::string& name,
// const std::string& title="",
// const std::string& xtitle="",
// const std::string& ytitle="",
// const std::string& ztitle="");
// /// Book a 2D histogram, using the binnings in the reference data histogram.
// ///
// /// The paper, dataset and x/y-axis IDs will be used to build the histo name in the HepData standard way.
// Histo2DPtr bookHisto2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId,
// const std::string& title="",
// const std::string& xtitle="",
// const std::string& ytitle="",
// const std::string& ztitle="");
//@}
/// @name 1D profile histogram booking
//@{
/// Book a 1D profile histogram with @a nbins uniformly distributed across the range @a lower - @a upper .
Profile1DPtr & book(Profile1DPtr &, const std::string& name,
size_t nbins, double lower, double upper,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="");
/// Book a 1D profile histogram with non-uniform bins defined by the vector of bin edges @a binedges .
Profile1DPtr & book(Profile1DPtr &, const std::string& name,
const std::vector<double>& binedges,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="");
/// Book a 1D profile histogram with non-uniform bins defined by the vector of bin edges @a binedges .
Profile1DPtr & book(Profile1DPtr &, const std::string& name,
const std::initializer_list<double>& binedges,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="");
/// Book a 1D profile histogram with binning from a reference scatter.
Profile1DPtr & book(Profile1DPtr &, const std::string& name,
const Scatter2D& refscatter,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="");
/// Book a 1D profile histogram, using the binnings in the reference data histogram.
Profile1DPtr & book(Profile1DPtr &, const std::string& name,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="");
/// Book a 1D profile histogram, using the binnings in the reference data histogram.
///
/// The paper, dataset and x/y-axis IDs will be used to build the histo name in the HepData standard way.
Profile1DPtr & book(Profile1DPtr &, unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="");
//@}
/// @name 2D profile histogram booking
//@{
/// Book a 2D profile histogram with @a nxbins and @a nybins uniformly
/// distributed across the ranges @a xlower - @a xupper and @a ylower - @a
/// yupper respectively along the x- and y-axis.
Profile2DPtr & book(Profile2DPtr &, const std::string& name,
size_t nxbins, double xlower, double xupper,
size_t nybins, double ylower, double yupper,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="",
const std::string& ztitle="");
/// Book a 2D profile histogram with non-uniform bins defined by the vectorx
/// of bin edges @a xbinedges and @a ybinedges.
Profile2DPtr & book(Profile2DPtr &, const std::string& name,
const std::vector<double>& xbinedges,
const std::vector<double>& ybinedges,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="",
const std::string& ztitle="");
/// Book a 2D profile histogram with non-uniform bins defined by the vectorx
/// of bin edges @a xbinedges and @a ybinedges.
Profile2DPtr & book(Profile2DPtr &, const std::string& name,
const std::initializer_list<double>& xbinedges,
const std::initializer_list<double>& ybinedges,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="",
const std::string& ztitle="");
/// Book a 2D profile histogram with binning from a reference scatter.
// Profile2DPtr bookProfile2D(const std::string& name,
// const Scatter3D& refscatter,
// const std::string& title="",
// const std::string& xtitle="",
// const std::string& ytitle="",
// const std::string& ztitle="");
// /// Book a 2D profile histogram, using the binnings in the reference data histogram.
// Profile2DPtr bookProfile2D(const std::string& name,
// const std::string& title="",
// const std::string& xtitle="",
// const std::string& ytitle="",
// const std::string& ztitle="");
// /// Book a 2D profile histogram, using the binnings in the reference data histogram.
// ///
// /// The paper, dataset and x/y-axis IDs will be used to build the histo name in the HepData standard way.
// Profile2DPtr bookProfile2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId,
// const std::string& title="",
// const std::string& xtitle="",
// const std::string& ytitle="",
// const std::string& ztitle="");
//@}
/// @name 2D scatter booking
//@{
/// @brief Book a 2-dimensional data point set with the given name.
///
/// @note Unlike histogram booking, scatter booking by default makes no
/// attempt to use reference data to pre-fill the data object. If you want
/// this, which is sometimes useful e.g. when the x-position is not really
/// meaningful and can't be extracted from the data, then set the @a
/// copy_pts parameter to true. This creates points to match the reference
/// data's x values and errors, but with the y values and errors zeroed...
/// assuming that there is a reference histo with the same name: if there
/// isn't, an exception will be thrown.
Scatter2DPtr & book(Scatter2DPtr & s2d, const string& hname,
bool copy_pts=false,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="");
/// @brief Book a 2-dimensional data point set, using the binnings in the reference data histogram.
///
/// The paper, dataset and x/y-axis IDs will be used to build the histo name in the HepData standard way.
///
/// @note Unlike histogram booking, scatter booking by default makes no
/// attempt to use reference data to pre-fill the data object. If you want
/// this, which is sometimes useful e.g. when the x-position is not really
/// meaningful and can't be extracted from the data, then set the @a
/// copy_pts parameter to true. This creates points to match the reference
/// data's x values and errors, but with the y values and errors zeroed.
Scatter2DPtr & book(Scatter2DPtr & s2d, unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId,
bool copy_pts=false,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="");
/// @brief Book a 2-dimensional data point set with equally spaced x-points in a range.
///
/// The y values and errors will be set to 0.
Scatter2DPtr & book(Scatter2DPtr & s2d, const string& hname,
size_t npts, double lower, double upper,
const std::string& title="",
const std::string& xtitle="",
const std::string& ytitle="");
/// @brief Book a 2-dimensional data point set based on provided contiguous "bin edges".
///
/// The y values and errors will be set to 0.
Scatter2DPtr & book(Scatter2DPtr & s2d, const string& hname,
const std::vector<double>& binedges,
const std::string& title,
const std::string& xtitle,
const std::string& ytitle);
//@}
+ private:
+ /// to be used in finalize context only
+ class CounterAdapter {
+ public:
+ CounterAdapter(double x) : x_(x ) {}
+
+ CounterAdapter(const YODA::Counter & c) : x_(c.val() ) {}
+
+ // CounterAdapter(CounterPtr cp) : x_(cp->val() ) {}
+
+ CounterAdapter(const YODA::Scatter1D & s) : x_(s.points()[0].x()) {
+ assert( s.numPoints() == 1 || "Can only scale by a single value.");
+ }
+
+ // CounterAdapter(Scatter1DPtr sp) : x_(sp->points()[0].x()) {
+ // assert( sp->numPoints() == 1 || "Can only scale by a single value.");
+ // }
+
+ operator double() const { return x_; }
+
+ private:
+ double x_;
+ };
+
public:
+ double dbl(double x) { return x; }
+ double dbl(const YODA::Counter & c) { return c.val(); }
+ double dbl(const YODA::Scatter1D & s) {
+ assert( s.numPoints() == 1 );
+ return s.points()[0].x();
+ }
/// @name Analysis object manipulation
/// @todo Should really be protected: only public to keep BinnedHistogram happy for now...
//@{
/// Multiplicatively scale the given counter, @a cnt, by factor @s factor.
- void scale(CounterPtr cnt, double factor);
+ void scale(CounterPtr cnt, CounterAdapter factor);
/// Multiplicatively scale the given counters, @a cnts, by factor @s factor.
/// @note Constness intentional, if weird, to allow passing rvalue refs of smart ptrs (argh)
/// @todo Use SFINAE for a generic iterable of CounterPtrs
- void scale(const std::vector<CounterPtr>& cnts, double factor) {
+ void scale(const std::vector<CounterPtr>& cnts, CounterAdapter factor) {
for (auto& c : cnts) scale(c, factor);
}
/// @todo YUCK!
template <std::size_t array_size>
- void scale(const CounterPtr (&cnts)[array_size], double factor) {
+ void scale(const CounterPtr (&cnts)[array_size], CounterAdapter factor) {
// for (size_t i = 0; i < std::extent<decltype(cnts)>::value; ++i) scale(cnts[i], factor);
for (auto& c : cnts) scale(c, factor);
}
/// Normalize the given histogram, @a histo, to area = @a norm.
- void normalize(Histo1DPtr histo, double norm=1.0, bool includeoverflows=true);
+ void normalize(Histo1DPtr histo, CounterAdapter norm=1.0, bool includeoverflows=true);
/// Normalize the given histograms, @a histos, to area = @a norm.
/// @note Constness intentional, if weird, to allow passing rvalue refs of smart ptrs (argh)
/// @todo Use SFINAE for a generic iterable of Histo1DPtrs
- void normalize(const std::vector<Histo1DPtr>& histos, double norm=1.0, bool includeoverflows=true) {
+ void normalize(const std::vector<Histo1DPtr>& histos, CounterAdapter norm=1.0, bool includeoverflows=true) {
for (auto& h : histos) normalize(h, norm, includeoverflows);
}
/// @todo YUCK!
template <std::size_t array_size>
- void normalize(const Histo1DPtr (&histos)[array_size], double norm=1.0, bool includeoverflows=true) {
+ void normalize(const Histo1DPtr (&histos)[array_size], CounterAdapter norm=1.0, bool includeoverflows=true) {
for (auto& h : histos) normalize(h, norm, includeoverflows);
}
/// Multiplicatively scale the given histogram, @a histo, by factor @s factor.
- void scale(Histo1DPtr histo, double factor);
+ void scale(Histo1DPtr histo, CounterAdapter factor);
/// Multiplicatively scale the given histograms, @a histos, by factor @s factor.
/// @note Constness intentional, if weird, to allow passing rvalue refs of smart ptrs (argh)
/// @todo Use SFINAE for a generic iterable of Histo1DPtrs
- void scale(const std::vector<Histo1DPtr>& histos, double factor) {
+ void scale(const std::vector<Histo1DPtr>& histos, CounterAdapter factor) {
for (auto& h : histos) scale(h, factor);
}
/// @todo YUCK!
template <std::size_t array_size>
- void scale(const Histo1DPtr (&histos)[array_size], double factor) {
+ void scale(const Histo1DPtr (&histos)[array_size], CounterAdapter factor) {
for (auto& h : histos) scale(h, factor);
}
/// Normalize the given histogram, @a histo, to area = @a norm.
- void normalize(Histo2DPtr histo, double norm=1.0, bool includeoverflows=true);
+ void normalize(Histo2DPtr histo, CounterAdapter norm=1.0, bool includeoverflows=true);
/// Normalize the given histograms, @a histos, to area = @a norm.
/// @note Constness intentional, if weird, to allow passing rvalue refs of smart ptrs (argh)
/// @todo Use SFINAE for a generic iterable of Histo2DPtrs
- void normalize(const std::vector<Histo2DPtr>& histos, double norm=1.0, bool includeoverflows=true) {
+ void normalize(const std::vector<Histo2DPtr>& histos, CounterAdapter norm=1.0, bool includeoverflows=true) {
for (auto& h : histos) normalize(h, norm, includeoverflows);
}
/// @todo YUCK!
template <std::size_t array_size>
- void normalize(const Histo2DPtr (&histos)[array_size], double norm=1.0, bool includeoverflows=true) {
+ void normalize(const Histo2DPtr (&histos)[array_size], CounterAdapter norm=1.0, bool includeoverflows=true) {
for (auto& h : histos) normalize(h, norm, includeoverflows);
}
/// Multiplicatively scale the given histogram, @a histo, by factor @s factor.
- void scale(Histo2DPtr histo, double factor);
+ void scale(Histo2DPtr histo, CounterAdapter factor);
/// Multiplicatively scale the given histograms, @a histos, by factor @s factor.
/// @note Constness intentional, if weird, to allow passing rvalue refs of smart ptrs (argh)
/// @todo Use SFINAE for a generic iterable of Histo2DPtrs
- void scale(const std::vector<Histo2DPtr>& histos, double factor) {
+ void scale(const std::vector<Histo2DPtr>& histos, CounterAdapter factor) {
for (auto& h : histos) scale(h, factor);
}
/// @todo YUCK!
template <std::size_t array_size>
- void scale(const Histo2DPtr (&histos)[array_size], double factor) {
+ void scale(const Histo2DPtr (&histos)[array_size], CounterAdapter factor) {
for (auto& h : histos) scale(h, factor);
}
/// Helper for counter division.
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void divide(CounterPtr c1, CounterPtr c2, Scatter1DPtr s) const;
/// Helper for histogram division with raw YODA objects.
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void divide(const YODA::Counter& c1, const YODA::Counter& c2, Scatter1DPtr s) const;
/// Helper for histogram division.
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void divide(Histo1DPtr h1, Histo1DPtr h2, Scatter2DPtr s) const;
/// Helper for histogram division with raw YODA objects.
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void divide(const YODA::Histo1D& h1, const YODA::Histo1D& h2, Scatter2DPtr s) const;
/// Helper for profile histogram division.
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void divide(Profile1DPtr p1, Profile1DPtr p2, Scatter2DPtr s) const;
/// Helper for profile histogram division with raw YODA objects.
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void divide(const YODA::Profile1D& p1, const YODA::Profile1D& p2, Scatter2DPtr s) const;
/// Helper for 2D histogram division.
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void divide(Histo2DPtr h1, Histo2DPtr h2, Scatter3DPtr s) const;
/// Helper for 2D histogram division with raw YODA objects.
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void divide(const YODA::Histo2D& h1, const YODA::Histo2D& h2, Scatter3DPtr s) const;
/// Helper for 2D profile histogram division.
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void divide(Profile2DPtr p1, Profile2DPtr p2, Scatter3DPtr s) const;
/// Helper for 2D profile histogram division with raw YODA objects
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void divide(const YODA::Profile2D& p1, const YODA::Profile2D& p2, Scatter3DPtr s) const;
/// Helper for histogram efficiency calculation.
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void efficiency(Histo1DPtr h1, Histo1DPtr h2, Scatter2DPtr s) const;
/// Helper for histogram efficiency calculation.
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void efficiency(const YODA::Histo1D& h1, const YODA::Histo1D& h2, Scatter2DPtr s) const;
/// Helper for histogram asymmetry calculation.
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void asymm(Histo1DPtr h1, Histo1DPtr h2, Scatter2DPtr s) const;
/// Helper for histogram asymmetry calculation.
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void asymm(const YODA::Histo1D& h1, const YODA::Histo1D& h2, Scatter2DPtr s) const;
/// Helper for converting a differential histo to an integral one.
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void integrate(Histo1DPtr h, Scatter2DPtr s) const;
/// Helper for converting a differential histo to an integral one.
///
/// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
void integrate(const Histo1D& h, Scatter2DPtr s) const;
//@}
public:
/// List of registered analysis data objects
const vector<MultiweightAOPtr>& analysisObjects() const {
return _analysisobjects;
}
protected:
/// @name Data object registration, and removal
//@{
/// Register a data object in the histogram system
void addAnalysisObject(const MultiweightAOPtr & ao);
/// Unregister a data object from the histogram system (by name)
void removeAnalysisObject(const std::string& path);
/// Unregister a data object from the histogram system (by pointer)
void removeAnalysisObject(const MultiweightAOPtr & ao);
//@}
private:
/// Name passed to constructor (used to find .info analysis data file, and as a fallback)
string _defaultname;
/// Pointer to analysis metadata object
unique_ptr<AnalysisInfo> _info;
/// Storage of all plot objects
/// @todo Make this a map for fast lookup by path?
vector<MultiweightAOPtr> _analysisobjects;
/// @name Cross-section variables
//@{
double _crossSection;
//@}
/// The controlling AnalysisHandler object.
AnalysisHandler* _analysishandler;
/// Collection of cached refdata to speed up many autobookings: the
/// reference data file should only be read once.
mutable std::map<std::string, YODA::AnalysisObjectPtr> _refdata;
private:
/// @name Utility functions
//@{
/// Get the reference data for this paper and cache it.
void _cacheRefData() const;
//@}
/// The assignment operator is private and must never be called.
/// In fact, it should not even be implemented.
Analysis& operator=(const Analysis&);
};
}
// Include definition of analysis plugin system so that analyses automatically see it when including Analysis.hh
#include "Rivet/AnalysisBuilder.hh"
/// @def DECLARE_RIVET_PLUGIN
/// Preprocessor define to prettify the global-object plugin hook mechanism.
#define DECLARE_RIVET_PLUGIN(clsname) Rivet::AnalysisBuilder<clsname> plugin_ ## clsname
/// @def DECLARE_ALIASED_RIVET_PLUGIN
/// Preprocessor define to prettify the global-object plugin hook mechanism, with an extra alias name for this analysis.
// #define DECLARE_ALIASED_RIVET_PLUGIN(clsname, alias) Rivet::AnalysisBuilder<clsname> plugin_ ## clsname ## ( ## #alias ## )
#define DECLARE_ALIASED_RIVET_PLUGIN(clsname, alias) DECLARE_RIVET_PLUGIN(clsname)( #alias )
/// @def DEFAULT_RIVET_ANALYSIS_CONSTRUCTOR
/// Preprocessor define to prettify the manky constructor with name string argument
#define DEFAULT_RIVET_ANALYSIS_CONSTRUCTOR(clsname) clsname() : Analysis(# clsname) {}
/// @def DEFAULT_RIVET_ANALYSIS_CTOR
/// Slight abbreviation for DEFAULT_RIVET_ANALYSIS_CONSTRUCTOR
#define DEFAULT_RIVET_ANALYSIS_CTOR(clsname) DEFAULT_RIVET_ANALYSIS_CONSTRUCTOR(clsname)
#endif
diff --git a/src/Core/Analysis.cc b/src/Core/Analysis.cc
--- a/src/Core/Analysis.cc
+++ b/src/Core/Analysis.cc
@@ -1,857 +1,857 @@
// -*- C++ -*-
#include "Rivet/Config/RivetCommon.hh"
#include "Rivet/Analysis.hh"
#include "Rivet/AnalysisHandler.hh"
#include "Rivet/AnalysisInfo.hh"
#include "Rivet/Tools/BeamConstraint.hh"
// #include "DummyConfig.hh"
// #ifdef HAVE_EXECINFO_H
// #include <execinfo.h>
// #endif
namespace Rivet {
Analysis::Analysis(const string& name)
: _analysishandler(NULL)
{
ProjectionApplier::_allowProjReg = false;
_defaultname = name;
unique_ptr<AnalysisInfo> ai = AnalysisInfo::make(name);
assert(ai);
_info = move(ai);
assert(_info);
}
double Analysis::sqrtS() const {
return handler().sqrtS();
}
const ParticlePair& Analysis::beams() const {
return handler().beams();
}
const PdgIdPair Analysis::beamIds() const {
return handler().beamIds();
}
const string Analysis::histoDir() const {
/// @todo Cache in a member variable
string _histoDir;
if (_histoDir.empty()) {
_histoDir = "/" + name();
if (handler().runName().length() > 0) {
_histoDir = "/" + handler().runName() + _histoDir;
}
replace_all(_histoDir, "//", "/"); //< iterates until none
}
return _histoDir;
}
const string Analysis::histoPath(const string& hname) const {
const string path = histoDir() + "/" + hname;
return path;
}
const string Analysis::histoPath(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const {
return histoDir() + "/" + mkAxisCode(datasetId, xAxisId, yAxisId);
}
const string Analysis::mkAxisCode(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const {
stringstream axisCode;
axisCode << "d";
if (datasetId < 10) axisCode << 0;
axisCode << datasetId;
axisCode << "-x";
if (xAxisId < 10) axisCode << 0;
axisCode << xAxisId;
axisCode << "-y";
if (yAxisId < 10) axisCode << 0;
axisCode << yAxisId;
return axisCode.str();
}
Log& Analysis::getLog() const {
string logname = "Rivet.Analysis." + name();
return Log::getLog(logname);
}
///////////////////////////////////////////
size_t Analysis::numEvents() const {
return handler().numEvents();
}
double Analysis::sumOfWeights() const {
return handler().sumOfWeights();
}
///////////////////////////////////////////
bool Analysis::isCompatible(const ParticlePair& beams) const {
return isCompatible(beams.first.pid(), beams.second.pid(),
beams.first.energy(), beams.second.energy());
}
bool Analysis::isCompatible(PdgId beam1, PdgId beam2, double e1, double e2) const {
PdgIdPair beams(beam1, beam2);
pair<double,double> energies(e1, e2);
return isCompatible(beams, energies);
}
bool Analysis::isCompatible(const PdgIdPair& beams, const pair<double,double>& energies) const {
// First check the beam IDs
bool beamIdsOk = false;
foreach (const PdgIdPair& bp, requiredBeams()) {
if (compatible(beams, bp)) {
beamIdsOk = true;
break;
}
}
if (!beamIdsOk) return false;
// Next check that the energies are compatible (within 1% or 1 GeV, whichever is larger, for a bit of UI forgiveness)
/// @todo Use some sort of standard ordering to improve comparisons, esp. when the two beams are different particles
bool beamEnergiesOk = requiredEnergies().size() > 0 ? false : true;
typedef pair<double,double> DoublePair;
foreach (const DoublePair& ep, requiredEnergies()) {
if ((fuzzyEquals(ep.first, energies.first, 0.01) && fuzzyEquals(ep.second, energies.second, 0.01)) ||
(fuzzyEquals(ep.first, energies.second, 0.01) && fuzzyEquals(ep.second, energies.first, 0.01)) ||
(abs(ep.first - energies.first) < 1*GeV && abs(ep.second - energies.second) < 1*GeV) ||
(abs(ep.first - energies.second) < 1*GeV && abs(ep.second - energies.first) < 1*GeV)) {
beamEnergiesOk = true;
break;
}
}
return beamEnergiesOk;
/// @todo Need to also check internal consistency of the analysis'
/// beam requirements with those of the projections it uses.
}
///////////////////////////////////////////
double Analysis::crossSection() const {
const YODA::Scatter1D::Points& ps = handler().crossSection()->points();
if (ps.size() != 1) {
string errMsg = "cross section missing for analysis " + name();
throw Error(errMsg);
}
return ps[0].x();
}
double Analysis::crossSectionPerEvent() const {
return crossSection()/sumOfWeights();
}
////////////////////////////////////////////////////////////
// Histogramming
void Analysis::_cacheRefData() const {
if (_refdata.empty()) {
MSG_TRACE("Getting refdata cache for paper " << name());
_refdata = getRefData(name());
}
}
CounterPtr & Analysis::book(CounterPtr & ctr,
const string& cname,
const string& title) {
const string path = histoPath(cname);
ctr = CounterPtr(handler().weightNames(), Counter(path, title));
addAnalysisObject(ctr);
MSG_TRACE("Made counter " << cname << " for " << name());
return ctr;
}
CounterPtr & Analysis::book(CounterPtr & ctr, unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId,
const string& title) {
// const string& xtitle,
// const string& ytitle) {
const string axisCode = mkAxisCode(datasetId, xAxisId, yAxisId);
return book(ctr, axisCode, title);
}
Histo1DPtr & Analysis::book(Histo1DPtr & histo, const string& hname,
size_t nbins, double lower, double upper,
const string& title,
const string& xtitle,
const string& ytitle) {
const string path = histoPath(hname);
Histo1D hist = Histo1D(nbins, lower, upper, path, title);
hist.setAnnotation("XLabel", xtitle);
hist.setAnnotation("YLabel", ytitle);
histo = Histo1DPtr(handler().weightNames(), hist);
addAnalysisObject(histo);
MSG_TRACE("Made histogram " << hname << " for " << name());
return histo;
// Histo1DPtr hist;
// try { // try to bind to pre-existing
// // AnalysisObjectPtr ao = getAnalysisObject(path);
// // hist = dynamic_pointer_cast<Histo1D>(ao);
// hist = getHisto1D(hname);
// /// @todo Test that cast worked
// /// @todo Also test that binning is as expected?
// MSG_TRACE("Bound pre-existing histogram " << hname << " for " << name());
// } catch (...) { // binding failed; make it from scratch
// hist = make_shared<Histo1D>(nbins, lower, upper, histoPath(hname), title);
// addAnalysisObject(hist);
// MSG_TRACE("Made histogram " << hname << " for " << name());
// }
}
Histo1DPtr & Analysis::book(Histo1DPtr & histo, const string& hname,
const initializer_list<double>& binedges,
const string& title,
const string& xtitle,
const string& ytitle) {
return book(histo, hname, vector<double>{binedges}, title, xtitle, ytitle);
}
Histo1DPtr & Analysis::book(Histo1DPtr & histo, const string& hname,
const vector<double>& binedges,
const string& title,
const string& xtitle,
const string& ytitle) {
const string path = histoPath(hname);
// Histo1DPtr hist;
// try { // try to bind to pre-existing
// // AnalysisObjectPtr ao = getAnalysisObject(path);
// // hist = dynamic_pointer_cast<Histo1D>(ao);
// hist = getHisto1D(hname);
// /// @todo Test that cast worked
// /// @todo Also test that binning is as expected?
// MSG_TRACE("Bound pre-existing histogram " << hname << " for " << name());
// } catch (...) { // binding failed; make it from scratch
// hist = make_shared<Histo1D>(binedges, histoPath(hname), title);
// addAnalysisObject(hist);
// MSG_TRACE("Made histogram " << hname << " for " << name());
// }
Histo1D hist = Histo1D(binedges, path, title);
hist.setAnnotation("XLabel", xtitle);
hist.setAnnotation("YLabel", ytitle);
histo = Histo1DPtr(handler().weightNames(), hist);
addAnalysisObject(histo);
MSG_TRACE("Made histogram " << hname << " for " << name());
return histo;
}
Histo1DPtr & Analysis::book(Histo1DPtr & histo, const string& hname,
const Scatter2D& refscatter,
const string& title,
const string& xtitle,
const string& ytitle) {
const string path = histoPath(hname);
// Histo1DPtr hist;
// try { // try to bind to pre-existing
// // AnalysisObjectPtr ao = getAnalysisObject(path);
// // hist = dynamic_pointer_cast<Histo1D>(ao);
// hist = getHisto1D(hname);
// /// @todo Test that cast worked
// /// @todo Also test that binning is as expected?
// MSG_TRACE("Bound pre-existing histogram " << hname << " for " << name());
// } catch (...) { // binding failed; make it from scratch
// hist = make_shared<Histo1D>(refscatter, histoPath(hname));
// if (hist->hasAnnotation("IsRef")) hist->rmAnnotation("IsRef");
// addAnalysisObject(hist);
// MSG_TRACE("Made histogram " << hname << " for " << name());
// }
Histo1D hist = Histo1D(refscatter, path);
hist.setTitle(title);
hist.setAnnotation("XLabel", xtitle);
hist.setAnnotation("YLabel", ytitle);
histo = Histo1DPtr(handler().weightNames(), hist);
addAnalysisObject(histo);
MSG_TRACE("Made histogram " << hname << " for " << name());
return histo;
}
Histo1DPtr & Analysis::book(Histo1DPtr & histo, const string& hname,
const string& title,
const string& xtitle,
const string& ytitle) {
const Scatter2D& refdata = refData(hname);
return book(histo, hname, refdata, title, xtitle, ytitle);
}
Histo1DPtr & Analysis::book(Histo1DPtr & histo, unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId,
const string& title,
const string& xtitle,
const string& ytitle) {
const string axisCode = mkAxisCode(datasetId, xAxisId, yAxisId);
return book(histo, axisCode, title, xtitle, ytitle);
}
/// @todo Add booking methods which take a path, titles and *a reference Scatter from which to book*
/////////////////
Histo2DPtr & Analysis::book(Histo2DPtr & h2d,const string& hname,
size_t nxbins, double xlower, double xupper,
size_t nybins, double ylower, double yupper,
const string& title,
const string& xtitle,
const string& ytitle,
const string& ztitle)
{
const string path = histoPath(hname);
Histo2D hist(nxbins, xlower, xupper, nybins, ylower, yupper, path, title);
hist.setAnnotation("XLabel", xtitle);
hist.setAnnotation("YLabel", ytitle);
hist.setAnnotation("ZLabel", ztitle);
h2d = Histo2DPtr(handler().weightNames(), hist);
addAnalysisObject(h2d);
MSG_TRACE("Made 2D histogram " << hname << " for " << name());
return h2d;
}
Histo2DPtr & Analysis::book(Histo2DPtr & h2d,const string& hname,
const initializer_list<double>& xbinedges,
const initializer_list<double>& ybinedges,
const string& title,
const string& xtitle,
const string& ytitle,
const string& ztitle)
{
return book(h2d, hname, vector<double>{xbinedges}, vector<double>{ybinedges}, title, xtitle, ytitle, ztitle);
}
Histo2DPtr & Analysis::book(Histo2DPtr & h2d,const string& hname,
const vector<double>& xbinedges,
const vector<double>& ybinedges,
const string& title,
const string& xtitle,
const string& ytitle,
const string& ztitle)
{
const string path = histoPath(hname);
Histo2D hist(xbinedges, ybinedges, path, title);
hist.setAnnotation("XLabel", xtitle);
hist.setAnnotation("YLabel", ytitle);
hist.setAnnotation("ZLabel", ztitle);
h2d = Histo2DPtr(handler().weightNames(), hist);
addAnalysisObject(h2d);
MSG_TRACE("Made 2D histogram " << hname << " for " << name());
return h2d;
}
Profile1DPtr & Analysis::book(Profile1DPtr & p1d,const string& hname,
size_t nbins, double lower, double upper,
const string& title,
const string& xtitle,
const string& ytitle) {
const string path = histoPath(hname);
Profile1D prof(nbins, lower, upper, path, title);
prof.setAnnotation("XLabel", xtitle);
prof.setAnnotation("YLabel", ytitle);
p1d = Profile1DPtr(handler().weightNames(), prof);
addAnalysisObject(p1d);
MSG_TRACE("Made profile histogram " << hname << " for " << name());
return p1d;
}
Profile1DPtr & Analysis::book(Profile1DPtr & p1d,const string& hname,
const initializer_list<double>& binedges,
const string& title,
const string& xtitle,
const string& ytitle) {
return book(p1d, hname, vector<double>{binedges}, title, xtitle, ytitle);
}
Profile1DPtr & Analysis::book(Profile1DPtr & p1d,const string& hname,
const vector<double>& binedges,
const string& title,
const string& xtitle,
const string& ytitle) {
const string path = histoPath(hname);
Profile1D prof(binedges, path, title);
prof.setAnnotation("XLabel", xtitle);
prof.setAnnotation("YLabel", ytitle);
p1d = Profile1DPtr(handler().weightNames(), prof);
addAnalysisObject(p1d);
MSG_TRACE("Made profile histogram " << hname << " for " << name());
return p1d;
}
Profile1DPtr & Analysis::book(Profile1DPtr & p1d,const string& hname,
const Scatter2D& refscatter,
const string& title,
const string& xtitle,
const string& ytitle) {
const string path = histoPath(hname);
Profile1D prof(refscatter, path);
prof.setTitle(title);
prof.setAnnotation("XLabel", xtitle);
prof.setAnnotation("YLabel", ytitle);
p1d = Profile1DPtr(handler().weightNames(), prof);
addAnalysisObject(p1d);
MSG_TRACE("Made profile histogram " << hname << " for " << name());
return p1d;
// if (prof.hasAnnotation("IsRef")) prof.rmAnnotation("IsRef");
}
Profile1DPtr & Analysis::book(Profile1DPtr & p1d,const string& hname,
const string& title,
const string& xtitle,
const string& ytitle) {
const Scatter2D& refdata = refData(hname);
book(p1d, hname, refdata, title, xtitle, ytitle);
return p1d;
}
Profile1DPtr & Analysis::book(Profile1DPtr & p1d,unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId,
const string& title,
const string& xtitle,
const string& ytitle) {
const string axisCode = mkAxisCode(datasetId, xAxisId, yAxisId);
return book(p1d, axisCode, title, xtitle, ytitle);
}
Profile2DPtr & Analysis::book(Profile2DPtr & p2d, const string& hname,
size_t nxbins, double xlower, double xupper,
size_t nybins, double ylower, double yupper,
const string& title,
const string& xtitle,
const string& ytitle,
const string& ztitle)
{
const string path = histoPath(hname);
Profile2D prof(nxbins, xlower, xupper, nybins, ylower, yupper, path, title);
prof.setAnnotation("XLabel", xtitle);
prof.setAnnotation("YLabel", ytitle);
prof.setAnnotation("ZLabel", ztitle);
p2d = Profile2DPtr(handler().weightNames(), prof);
addAnalysisObject(p2d);
MSG_TRACE("Made 2D profile histogram " << hname << " for " << name());
return p2d;
}
Profile2DPtr & Analysis::book(Profile2DPtr & p2d, const string& hname,
const initializer_list<double>& xbinedges,
const initializer_list<double>& ybinedges,
const string& title,
const string& xtitle,
const string& ytitle,
const string& ztitle)
{
return book(p2d, hname, vector<double>{xbinedges}, vector<double>{ybinedges}, title, xtitle, ytitle, ztitle);
}
Profile2DPtr & Analysis::book(Profile2DPtr & p2d, const string& hname,
const vector<double>& xbinedges,
const vector<double>& ybinedges,
const string& title,
const string& xtitle,
const string& ytitle,
const string& ztitle)
{
const string path = histoPath(hname);
Profile2D prof(xbinedges, ybinedges, path, title);
prof.setAnnotation("XLabel", xtitle);
prof.setAnnotation("YLabel", ytitle);
prof.setAnnotation("ZLabel", ztitle);
p2d = Profile2DPtr(handler().weightNames(), prof);
addAnalysisObject(p2d);
MSG_TRACE("Made 2D profile histogram " << hname << " for " << name());
return p2d;
}
Scatter2DPtr & Analysis::book(Scatter2DPtr & s2d, unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId,
bool copy_pts,
const string& title,
const string& xtitle,
const string& ytitle) {
const string axisCode = mkAxisCode(datasetId, xAxisId, yAxisId);
return book(s2d, axisCode, copy_pts, title, xtitle, ytitle);
}
Scatter2DPtr & Analysis::book(Scatter2DPtr & s2d, const string& hname,
bool copy_pts,
const string& title,
const string& xtitle,
const string& ytitle) {
Scatter2D scat;
const string path = histoPath(hname);
if (copy_pts) {
const Scatter2D& refdata = refData(hname);
scat = Scatter2D(refdata, path);
for (Point2D& p : scat.points()) p.setY(0, 0);
} else {
scat = Scatter2D(path);
}
scat.setTitle(title);
scat.setAnnotation("XLabel", xtitle);
scat.setAnnotation("YLabel", ytitle);
s2d = Scatter2DPtr(handler().weightNames(), scat);
addAnalysisObject(s2d);
MSG_TRACE("Made scatter " << hname << " for " << name());
// if (scat.hasAnnotation("IsRef")) scat.rmAnnotation("IsRef");
return s2d;
}
Scatter2DPtr & Analysis::book(Scatter2DPtr & s2d, const string& hname,
size_t npts, double lower, double upper,
const string& title,
const string& xtitle,
const string& ytitle) {
// TODO: default branch has a read mechanism implemented, to start from an existing AO.
// need to work out how to implement that for multiweights
const string path = histoPath(hname);
Scatter2D scat;
const double binwidth = (upper-lower)/npts;
for (size_t pt = 0; pt < npts; ++pt) {
const double bincentre = lower + (pt + 0.5) * binwidth;
scat.addPoint(bincentre, 0, binwidth/2.0, 0);
}
scat.setTitle(title);
scat.setAnnotation("XLabel", xtitle);
scat.setAnnotation("YLabel", ytitle);
s2d = Scatter2DPtr(handler().weightNames(), scat);
addAnalysisObject(s2d);
MSG_TRACE("Made scatter " << hname << " for " << name());
return s2d;
}
Scatter2DPtr & Analysis::book(Scatter2DPtr & s2d, const string& hname,
const vector<double>& binedges,
const string& title,
const string& xtitle,
const string& ytitle) {
const string path = histoPath(hname);
Scatter2D scat;
for (size_t pt = 0; pt < binedges.size()-1; ++pt) {
const double bincentre = (binedges[pt] + binedges[pt+1]) / 2.0;
const double binwidth = binedges[pt+1] - binedges[pt];
scat.addPoint(bincentre, 0, binwidth/2.0, 0);
}
scat.setTitle(title);
scat.setAnnotation("XLabel", xtitle);
scat.setAnnotation("YLabel", ytitle);
s2d = Scatter2DPtr(handler().weightNames(), scat);
addAnalysisObject(s2d);
MSG_TRACE("Made scatter " << hname << " for " << name());
return s2d;
}
void Analysis::divide(CounterPtr c1, CounterPtr c2, Scatter1DPtr s) const {
const string path = s->path();
*s = *c1 / *c2;
s->setPath(path);
}
void Analysis::divide(const Counter& c1, const Counter& c2, Scatter1DPtr s) const {
const string path = s->path();
*s = c1 / c2;
s->setPath(path);
}
void Analysis::divide(Histo1DPtr h1, Histo1DPtr h2, Scatter2DPtr s) const {
const string path = s->path();
*s = *h1 / *h2;
s->setPath(path);
}
void Analysis::divide(const Histo1D& h1, const Histo1D& h2, Scatter2DPtr s) const {
const string path = s->path();
*s = h1 / h2;
s->setPath(path);
}
void Analysis::divide(Profile1DPtr p1, Profile1DPtr p2, Scatter2DPtr s) const {
const string path = s->path();
*s = *p1 / *p2;
s->setPath(path);
}
void Analysis::divide(const Profile1D& p1, const Profile1D& p2, Scatter2DPtr s) const {
const string path = s->path();
*s = p1 / p2;
s->setPath(path);
}
void Analysis::divide(Histo2DPtr h1, Histo2DPtr h2, Scatter3DPtr s) const {
const string path = s->path();
*s = *h1 / *h2;
s->setPath(path);
}
void Analysis::divide(const Histo2D& h1, const Histo2D& h2, Scatter3DPtr s) const {
const string path = s->path();
*s = h1 / h2;
s->setPath(path);
}
void Analysis::divide(Profile2DPtr p1, Profile2DPtr p2, Scatter3DPtr s) const {
const string path = s->path();
*s = *p1 / *p2;
s->setPath(path);
}
void Analysis::divide(const Profile2D& p1, const Profile2D& p2, Scatter3DPtr s) const {
const string path = s->path();
*s = p1 / p2;
s->setPath(path);
}
/// @todo Counter and Histo2D efficiencies and asymms
void Analysis::efficiency(Histo1DPtr h1, Histo1DPtr h2, Scatter2DPtr s) const {
const string path = s->path();
*s = YODA::efficiency(*h1, *h2);
s->setPath(path);
}
void Analysis::efficiency(const Histo1D& h1, const Histo1D& h2, Scatter2DPtr s) const {
const string path = s->path();
*s = YODA::efficiency(h1, h2);
s->setPath(path);
}
void Analysis::asymm(Histo1DPtr h1, Histo1DPtr h2, Scatter2DPtr s) const {
const string path = s->path();
*s = YODA::asymm(*h1, *h2);
s->setPath(path);
}
void Analysis::asymm(const Histo1D& h1, const Histo1D& h2, Scatter2DPtr s) const {
const string path = s->path();
*s = YODA::asymm(h1, h2);
s->setPath(path);
}
- void Analysis::scale(CounterPtr cnt, double factor) {
+ void Analysis::scale(CounterPtr cnt, Analysis::CounterAdapter factor) {
if (!cnt) {
- MSG_WARNING("Failed to scale counter=NULL in analysis " << name() << " (scale=" << factor << ")");
+ MSG_WARNING("Failed to scale counter=NULL in analysis " << name() << " (scale=" << double(factor) << ")");
return;
}
if (std::isnan(factor) || std::isinf(factor)) {
- MSG_WARNING("Failed to scale counter=" << cnt->path() << " in analysis: " << name() << " (invalid scale factor = " << factor << ")");
+ MSG_WARNING("Failed to scale counter=" << cnt->path() << " in analysis: " << name() << " (invalid scale factor = " << double(factor) << ")");
factor = 0;
}
- MSG_TRACE("Scaling counter " << cnt->path() << " by factor " << factor);
+ MSG_TRACE("Scaling counter " << cnt->path() << " by factor " << double(factor));
try {
cnt->scaleW(factor);
} catch (YODA::Exception& we) {
MSG_WARNING("Could not scale counter " << cnt->path());
return;
}
}
- void Analysis::normalize(Histo1DPtr histo, double norm, bool includeoverflows) {
+ void Analysis::normalize(Histo1DPtr histo, Analysis::CounterAdapter norm, bool includeoverflows) {
if (!histo) {
- MSG_WARNING("Failed to normalize histo=NULL in analysis " << name() << " (norm=" << norm << ")");
+ MSG_WARNING("Failed to normalize histo=NULL in analysis " << name() << " (norm=" << double(norm) << ")");
return;
}
- MSG_TRACE("Normalizing histo " << histo->path() << " to " << norm);
+ MSG_TRACE("Normalizing histo " << histo->path() << " to " << double(norm));
try {
histo->normalize(norm, includeoverflows);
} catch (YODA::Exception& we) {
MSG_WARNING("Could not normalize histo " << histo->path());
return;
}
}
- void Analysis::scale(Histo1DPtr histo, double factor) {
+ void Analysis::scale(Histo1DPtr histo, Analysis::CounterAdapter factor) {
if (!histo) {
- MSG_WARNING("Failed to scale histo=NULL in analysis " << name() << " (scale=" << factor << ")");
+ MSG_WARNING("Failed to scale histo=NULL in analysis " << name() << " (scale=" << double(factor) << ")");
return;
}
if (std::isnan(factor) || std::isinf(factor)) {
- MSG_WARNING("Failed to scale histo=" << histo->path() << " in analysis: " << name() << " (invalid scale factor = " << factor << ")");
+ MSG_WARNING("Failed to scale histo=" << histo->path() << " in analysis: " << name() << " (invalid scale factor = " << double(factor) << ")");
factor = 0;
}
- MSG_TRACE("Scaling histo " << histo->path() << " by factor " << factor);
+ MSG_TRACE("Scaling histo " << histo->path() << " by factor " << double(factor));
try {
histo->scaleW(factor);
} catch (YODA::Exception& we) {
MSG_WARNING("Could not scale histo " << histo->path());
return;
}
}
- void Analysis::normalize(Histo2DPtr histo, double norm, bool includeoverflows) {
+ void Analysis::normalize(Histo2DPtr histo, Analysis::CounterAdapter norm, bool includeoverflows) {
if (!histo) {
- MSG_ERROR("Failed to normalize histo=NULL in analysis " << name() << " (norm=" << norm << ")");
+ MSG_ERROR("Failed to normalize histo=NULL in analysis " << name() << " (norm=" << double(norm) << ")");
return;
}
- MSG_TRACE("Normalizing histo " << histo->path() << " to " << norm);
+ MSG_TRACE("Normalizing histo " << histo->path() << " to " << double(norm));
try {
histo->normalize(norm, includeoverflows);
} catch (YODA::Exception& we) {
MSG_WARNING("Could not normalize histo " << histo->path());
return;
}
}
- void Analysis::scale(Histo2DPtr histo, double factor) {
+ void Analysis::scale(Histo2DPtr histo, Analysis::CounterAdapter factor) {
if (!histo) {
- MSG_ERROR("Failed to scale histo=NULL in analysis " << name() << " (scale=" << factor << ")");
+ MSG_ERROR("Failed to scale histo=NULL in analysis " << name() << " (scale=" << double(factor) << ")");
return;
}
if (std::isnan(factor) || std::isinf(factor)) {
- MSG_ERROR("Failed to scale histo=" << histo->path() << " in analysis: " << name() << " (invalid scale factor = " << factor << ")");
+ MSG_ERROR("Failed to scale histo=" << histo->path() << " in analysis: " << name() << " (invalid scale factor = " << double(factor) << ")");
factor = 0;
}
- MSG_TRACE("Scaling histo " << histo->path() << " by factor " << factor);
+ MSG_TRACE("Scaling histo " << histo->path() << " by factor " << double(factor));
try {
histo->scaleW(factor);
} catch (YODA::Exception& we) {
MSG_WARNING("Could not scale histo " << histo->path());
return;
}
}
void Analysis::integrate(Histo1DPtr h, Scatter2DPtr s) const {
// preserve the path info
const string path = s->path();
*s = toIntegralHisto(*h);
s->setPath(path);
}
void Analysis::integrate(const Histo1D& h, Scatter2DPtr s) const {
// preserve the path info
const string path = s->path();
*s = toIntegralHisto(h);
s->setPath(path);
}
}
/// @todo 2D versions of integrate... defined how, exactly?!?
//////////////////////////////////
namespace {
void errormsg(std::string name) {
// #ifdef HAVE_BACKTRACE
// void * buffer[4];
// backtrace(buffer, 4);
// backtrace_symbols_fd(buffer, 4 , 1);
// #endif
std::cerr << name << ": Can't book objects outside of init().\n";
assert(false);
}
}
namespace Rivet {
void Analysis::addAnalysisObject(const MultiweightAOPtr & ao) {
if (handler().stage() == AnalysisHandler::Stage::INIT) {
_analysisobjects.push_back(ao);
}
else {
errormsg(name());
}
}
void Analysis::removeAnalysisObject(const string& path) {
for (auto it = _analysisobjects.begin();
it != _analysisobjects.end(); ++it) {
if ((*it)->path() == path) {
_analysisobjects.erase(it);
break;
}
}
}
void Analysis::removeAnalysisObject(const MultiweightAOPtr & ao) {
for (auto it = _analysisobjects.begin(); it != _analysisobjects.end(); ++it) {
if ((*it) == ao) {
_analysisobjects.erase(it);
break;
}
}
}
}

File Metadata

Mime Type
text/x-diff
Expires
Tue, Nov 19, 7:31 PM (1 d, 8 h)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
3805837
Default Alt Text
(617 KB)

Event Timeline