Page MenuHomeHEPForge

No OneTemporary

diff --git a/analyses/pluginALICE/ALICE_2012_I930312.cc b/analyses/pluginALICE/ALICE_2012_I930312.cc
--- a/analyses/pluginALICE/ALICE_2012_I930312.cc
+++ b/analyses/pluginALICE/ALICE_2012_I930312.cc
@@ -1,335 +1,316 @@
// -*- C++ -*-
#include "Rivet/Projections/ChargedFinalState.hh"
-#include "Rivet/Tools/Cuts.hh"
#include "Rivet/Projections/SingleValueProjection.hh"
#include "Rivet/Tools/AliceCommon.hh"
#include "Rivet/Projections/AliceCommon.hh"
-#include <fstream>
-
-#define _USE_MATH_DEFINES
-#include <math.h>
namespace Rivet {
-
- /// Analysis
+
+
class ALICE_2012_I930312 : public Analysis {
-
public:
-
+
+ // Constructor
DEFAULT_RIVET_ANALYSIS_CTOR(ALICE_2012_I930312);
-
+
+
/// Book histograms and initialise projections before the run
void init() {
// Declare centrality projection
- declareCentrality(ALICE::V0MMultiplicity(), "ALICE_2015_PBPBCentrality",
- "V0M", "V0M");
+ declareCentrality(ALICE::V0MMultiplicity(), "ALICE_2015_PBPBCentrality", "V0M", "V0M");
- // Charged final states with |eta| < 1.0 and 8 < pT < 15 GeV/c for
- // trigger particles
- const Cut& cutTrigger =
- Cuts::abseta < 1.0 && Cuts::pT > 8*GeV && Cuts::pT < 15*GeV;
+ // Charged final states with |eta| < 1.0 and 8 < pT < 15 GeV/c for trigger particles
+ const Cut& cutTrigger = Cuts::abseta < 1.0 && Cuts::pT > 8*GeV && Cuts::pT < 15*GeV;
const ChargedFinalState cfsTrigger(cutTrigger);
addProjection(cfsTrigger,"CFSTrigger");
-
+
// Set limit values of pT bins
pt_limits[0] = 3.;
pt_limits[1] = 4.;
pt_limits[2] = 6.;
pt_limits[3] = 8.;
pt_limits[4] = 10.;
-
- // Charged final states with |eta| < 1.0 and different pT bins for
- // associated particles.
+
+ // Charged final states with |eta| < 1.0 and different pT bins for associated particles.
for (int ipt = 0; ipt < PT_BINS; ipt++) {
- Cut mycut = Cuts::abseta < 1.0 && Cuts::pT > pt_limits[ipt]*GeV &&
- Cuts::pT < pt_limits[ipt + 1]*GeV;
- declare(ChargedFinalState(mycut), "CFSAssoc" + std::to_string(ipt));
+ Cut mycut = Cuts::abseta < 1.0 && Cuts::pT > pt_limits[ipt]*GeV && Cuts::pT < pt_limits[ipt + 1]*GeV;
+ declare(ChargedFinalState(mycut), "CFSAssoc" + std::to_string(ipt));
}
-
+
// Create event strings
event_string[0] = "pp";
event_string[1] = "central";
event_string[2] = "peripheral";
event_string[3] = "other";
-
+
// For each event type
for (int itype = 0; itype < EVENT_TYPES; itype++) {
- // For each pT range
- for (int ipt = 0; ipt < PT_BINS; ipt++) {
- // Initialize yield histograms
- _histYield[itype][ipt] = bookHisto1D("Yield_" + event_string[itype]
- + "_" + std::to_string(ipt), 36, -0.5 * M_PI, 1.5 * M_PI,
- "Associated particle per trigger particle yield",
- "$\\Delta\\eta$ (rad)", "$1 / N_{trig} dN_{assoc} / d\\Delta\\eta$ (rad$^-1$)");
- _histYieldBkgRemoved[itype][ipt] = bookHisto1D("Yield_" +
- event_string[itype] + "_nobkg_" + std::to_string(ipt), 36, -0.5*M_PI,
- 1.5 * M_PI, "Associated particle per trigger particle yield no bkg",
- "$\\Delta\\eta$ (rad)", "$1 / N_{trig} dN_{assoc} / d\\Delta\\eta$ (rad$^-1$)");
- }
+ // For each pT range
+ for (int ipt = 0; ipt < PT_BINS; ipt++) {
+ // Initialize yield histograms
+ _histYield[itype][ipt] = bookHisto1D("Yield_" + event_string[itype]
+ + "_" + std::to_string(ipt), 36, -0.5 * M_PI, 1.5 * M_PI,
+ "Associated particle per trigger particle yield",
+ "$\\Delta\\eta$ (rad)", "$1 / N_{trig} dN_{assoc} / d\\Delta\\eta$ (rad$^-1$)");
+ _histYieldBkgRemoved[itype][ipt] = bookHisto1D("Yield_" +
+ event_string[itype] + "_nobkg_" + std::to_string(ipt), 36, -0.5*M_PI,
+ 1.5 * M_PI, "Associated particle per trigger particle yield no bkg",
+ "$\\Delta\\eta$ (rad)", "$1 / N_{trig} dN_{assoc} / d\\Delta\\eta$ (rad$^-1$)");
+ }
}
-
+
// Histogram for counting trigger particles for each event type
- _histTriggerCounter = bookHisto1D("Trigger", EVENT_TYPES, 0.0,
- EVENT_TYPES, "Trigger counter", "event type", "N");
-
+ _histTriggerCounter = bookHisto1D("Trigger", EVENT_TYPES, 0.0,
+ EVENT_TYPES, "Trigger counter", "event type", "N");
+
}
+
/// Perform the per-event analysis
void analyze(const Event& event) {
-
+
const double weight = event.weight();
-
+
// Create charged final state for trigger particle
- const ChargedFinalState& triggerFinalState =
- applyProjection<ChargedFinalState>(event, "CFSTrigger");
+ const ChargedFinalState& triggerFinalState = apply<ChargedFinalState>(event, "CFSTrigger");
Particles triggerParticles = triggerFinalState.particlesByPt();
- // Create charged final state for associated particle
+ // Create charged final state for associated particle
ChargedFinalState associatedFinalState[PT_BINS];
Particles associatedParticles[PT_BINS];
for (int ipt = 0; ipt < PT_BINS; ipt++) {
- associatedFinalState[ipt] = applyProjection<ChargedFinalState>(event,
- "CFSAssoc" + std::to_string(ipt));
- associatedParticles[ipt] = associatedFinalState[ipt].particlesByPt();
+ associatedFinalState[ipt] = apply<ChargedFinalState>(event, "CFSAssoc" + std::to_string(ipt));
+ associatedParticles[ipt] = associatedFinalState[ipt].particlesByPt();
}
-
+
// Check event type
if (event.genEvent()->heavy_ion()) {
- // Prepare centrality projection and value
- const CentralityProjection& centrProj =
- apply<CentralityProjection>(event, "V0M");
- double centr = centrProj();
-
- // Set the flag for the type of the event
- if (centr > 0.0 && centr < 5.0)
- event_type = 1; // PbPb, central
- else if (centr > 60.0 && centr < 90.0)
- event_type = 2; // PbPb, peripherial
- else
- event_type = 3; // PbPb, other
+ // Prepare centrality projection and value
+ const CentralityProjection& centrProj = apply<CentralityProjection>(event, "V0M");
+ double centr = centrProj();
+
+ // Set the flag for the type of the event
+ if (centr > 0.0 && centr < 5.0)
+ event_type = 1; // PbPb, central
+ else if (centr > 60.0 && centr < 90.0)
+ event_type = 2; // PbPb, peripherial
+ else
+ event_type = 3; // PbPb, other
}
else {
- event_type = 0; // pp
+ event_type = 0; // pp
}
-
+
// Veto event if not valid event type
- if (event_type == 3)
- vetoEvent;
-
+ if (event_type == 3) vetoEvent;
+
// Fill trigger histogram for a proper event type
_histTriggerCounter->fill(event_type, triggerParticles.size());
-
+
// Loop over trigger particles
- for (const auto& triggerParticle : triggerParticles) {
- // For each pt bin
- for (int ipt = 0; ipt < PT_BINS; ipt++) {
- // Loop over associated particles
- for (const auto& associatedParticle : associatedParticles[ipt]) {
- // If associated and trigger particle are not the same particles.
- if (associatedParticle != triggerParticle) {
- // Test trigger particle.
- if (triggerParticle.pt() > associatedParticle.pt()) {
- // Calculate delta phi in range (-0.5*PI, 1.5*PI).
- double dPhi = triggerParticle.phi() -
- associatedParticle.phi();
- while (dPhi > 1.5 * M_PI) { dPhi -= 2 * M_PI; }
- while (dPhi < -0.5 * M_PI) { dPhi += 2 * M_PI; }
- // Fill yield histogram for calculated delta phi
- _histYield[event_type][ipt]->fill(dPhi, weight);
- }
- }
- }
- }
+ for (const Particle& triggerParticle : triggerParticles) {
+ // For each pt bin
+ for (int ipt = 0; ipt < PT_BINS; ipt++) {
+ // Loop over associated particles
+ for (const Particle& associatedParticle : associatedParticles[ipt]) {
+ // If associated and trigger particle are not the same particles.
+ if (!isSame(triggerParticle, associatedParticle)) {
+ // Test trigger particle.
+ if (triggerParticle.pt() > associatedParticle.pt()) {
+ // Calculate delta phi in range (-0.5*PI, 1.5*PI).
+ const double dPhi = deltaPhi(triggerParticle, associatedParticle, true);
+ // Fill yield histogram for calculated delta phi
+ _histYield[event_type][ipt]->fill(dPhi, weight);
+ }
+ }
+ }
+ }
}
}
+
/// Normalise histograms etc., after the run
void finalize() {
-
+
// Check for the reentrant finalize
bool pp_available = false, PbPb_available = false;
reentrant_flag = false;
// For each event type
for (int itype = 0; itype < EVENT_TYPES; itype++) {
- // For each pT range
- for (int ipt = 0; ipt < PT_BINS; ipt++) {
- if (_histYield[itype][ipt]->numEntries() > 0)
- itype == 0 ? pp_available = true : PbPb_available = true;
- }
+ // For each pT range
+ for (int ipt = 0; ipt < PT_BINS; ipt++) {
+ if (_histYield[itype][ipt]->numEntries() > 0)
+ itype == 0 ? pp_available = true : PbPb_available = true;
+ }
}
reentrant_flag = (pp_available && PbPb_available);
-
+
// Postprocessing of the histograms
if (reentrant_flag == true) {
-
+
// Initialize IAA and ICP histograms
_histIAA[0] = bookScatter2D(1, 1, 1);
_histIAA[1] = bookScatter2D(2, 1, 1);
_histIAA[2] = bookScatter2D(5, 1, 1);
-
_histIAA[3] = bookScatter2D(3, 1, 1);
_histIAA[4] = bookScatter2D(4, 1, 1);
_histIAA[5] = bookScatter2D(6, 1, 1);
- // Variables for near and away side peak calculation
- double nearSide[EVENT_TYPES][PT_BINS] = { {0.0} };
- double awaySide[EVENT_TYPES][PT_BINS] = { {0.0} };
-
- // Variables for background error calculation
- double background[EVENT_TYPES][PT_BINS] = { {0.0} };
- double backgroundError[EVENT_TYPES][PT_BINS] = { {0.0} };
-
- // Variables for integration error calculation
- double scalingFactor[EVENT_TYPES] = {0.0};
- double numberOfEntries[EVENT_TYPES][PT_BINS][2] = { { {0.0} } };
- int numberOfBins[EVENT_TYPES][PT_BINS][2] = { { {0} } };
-
- // For each event type
- for (int itype = 0; itype < EVENT_TYPES; itype++) {
- // For each pT range
- for (int ipt = 0; ipt < PT_BINS; ipt++) {
- // Check if histograms are fine
- if (_histTriggerCounter->numEntries() == 0 ||
- _histYield[itype][ipt]->numEntries() == 0) {
- cout << "There are no entries in one of the histograms" << endl;
- continue;
- }
-
- // Scale yield histogram
- if ((_histTriggerCounter->bin(itype).sumW() != 0)) {
- scalingFactor[itype] = 1. /
- _histTriggerCounter->bin(itype).sumW();
- scale(_histYield[itype][ipt],
- (1. / _histTriggerCounter->bin(itype).sumW()));
- }
-
- // Calculate background
- double sum = 0.0;
- int nbins = 0;
- for (unsigned int ibin = 0; ibin < _histYield[itype][ipt]->numBins(); ibin++) {
- if ((_histYield[itype][ipt]->bin(ibin).xMid() > (-0.5 * M_PI) &&
- _histYield[itype][ipt]->bin(ibin).xMid() < (-0.5 * M_PI + 0.4)) ||
- (_histYield[itype][ipt]->bin(ibin).xMid() > (0.5 * M_PI - 0.4) &&
- _histYield[itype][ipt]->bin(ibin).xMid() < (0.5 * M_PI + 0.4)) ||
- (_histYield[itype][ipt]->bin(ibin).xMid() > (1.5 * M_PI - 0.4) &&
- _histYield[itype][ipt]->bin(ibin).xMid() < (1.5 * M_PI))) {
- sum += _histYield[itype][ipt]->bin(ibin).sumW();
- nbins++;
- }
- }
- if (nbins == 0) {
- std::cout << "Failed to estimate background!" << std::endl;
- continue;
- }
- background[itype][ipt] = sum / nbins;
-
- // Calculate background error
- sum = 0.0;
- nbins = 0;
- for (unsigned int ibin = 0; ibin < _histYield[itype][ipt]->numBins(); ibin++) {
- if (_histYield[itype][ipt]->bin(ibin).xMid() > (0.5 * M_PI - 0.4) &&
- _histYield[itype][ipt]->bin(ibin).xMid() < (0.5 * M_PI + 0.4)) {
- sum += (_histYield[itype][ipt]->bin(ibin).sumW() - background[itype][ipt]) *
- (_histYield[itype][ipt]->bin(ibin).sumW() - background[itype][ipt]);
- nbins++;
- }
- }
- backgroundError[itype][ipt] = sqrt(sum / (nbins - 1));
-
- // Fill histograms with removed background
- for (unsigned int ibin = 0; ibin < _histYield[itype][ipt]->numBins(); ibin++) {
- _histYieldBkgRemoved[itype][ipt]->fillBin(ibin,
- _histYield[itype][ipt]->bin(ibin).sumW() - background[itype][ipt]);
- }
-
- // Integrate near-side yield
- unsigned int lowerBin = _histYield[itype][ipt]->binIndexAt(-0.7 + 0.02);
- unsigned int upperBin = _histYield[itype][ipt]->binIndexAt( 0.7 - 0.02) + 1;
- nbins = upperBin - lowerBin;
- numberOfBins[itype][ipt][0] = nbins;
- nearSide[itype][ipt] = _histYield[itype][ipt]->integralRange(lowerBin,
- upperBin) - nbins * background[itype][ipt];
- numberOfEntries[itype][ipt][0] = _histYield[itype][ipt]->integralRange(
- lowerBin, upperBin) * _histTriggerCounter->bin(itype).sumW();
-
- // Integrate away-side yield
- lowerBin = _histYield[itype][ipt]->binIndexAt(M_PI - 0.7 + 0.02);
- upperBin = _histYield[itype][ipt]->binIndexAt(M_PI + 0.7 - 0.02) + 1;
- nbins = upperBin - lowerBin;
- numberOfBins[itype][ipt][1] = nbins;
- awaySide[itype][ipt] = _histYield[itype][ipt]->integralRange(
- lowerBin, upperBin) - nbins * background[itype][ipt];
- numberOfEntries[itype][ipt][1] =
- _histYield[itype][ipt]->integralRange(lowerBin, upperBin) *
- _histTriggerCounter->bin(itype).sumW();
-
- }
- }
-
- // Variables for IAA/ICP plots
- double dI = 0.0;
- int near = 0;
- int away = 1;
- double xval[PT_BINS] = { 3.5, 5.0, 7.0, 9.0 };
- double xerr[PT_BINS] = { 0.5, 1.0, 1.0, 1.0 };
-
- int types1[3] = {1, 2, 1};
- int types2[3] = {0, 0, 2};
- // Fill IAA/ICP plots for near side peak
- for (int ihist = 0; ihist < 3; ihist++) {
- int type1 = types1[ihist];
- int type2 = types2[ihist];
- for (int ipt = 0; ipt < PT_BINS; ipt++) {
- dI = scalingFactor[type1] * scalingFactor[type1] * numberOfEntries[type1][ipt][near] +
- scalingFactor[type2] * scalingFactor[type2] * numberOfEntries[type2][ipt][near] *
- nearSide[type1][ipt] * nearSide[type1][ipt] / (nearSide[type2][ipt] * nearSide[type2][ipt]) +
- numberOfBins[type1][ipt][near] * numberOfBins[type1][ipt][near] * backgroundError[type1][ipt] *
- backgroundError[type1][ipt] + numberOfBins[type2][ipt][near] * numberOfBins[type2][ipt][near] *
- backgroundError[type2][ipt] * backgroundError[type2][ipt] * nearSide[type1][ipt] *
- nearSide[type1][ipt] / (nearSide[type2][ipt] * nearSide[type2][ipt]);
+ // Variables for near and away side peak calculation
+ double nearSide[EVENT_TYPES][PT_BINS] = { {0.0} };
+ double awaySide[EVENT_TYPES][PT_BINS] = { {0.0} };
- dI = sqrt(dI)/nearSide[type2][ipt];
- _histIAA[ihist]->addPoint(xval[ipt], nearSide[type1][ipt] / nearSide[type2][ipt], xerr[ipt], dI);
- }
- }
-
- // Fill IAA/ICP plots for away side peak
- for (int ihist = 0; ihist < 3; ihist++) {
- int type1 = types1[ihist];
- int type2 = types2[ihist];
- for (int ipt = 0; ipt < PT_BINS; ipt++) {
- dI = scalingFactor[type1] * scalingFactor[type1] * numberOfEntries[type1][ipt][away] +
- scalingFactor[type2] * scalingFactor[type2] * numberOfEntries[type2][ipt][away] *
- awaySide[type1][ipt] * awaySide[type1][ipt] / (awaySide[type2][ipt] * awaySide[type2][ipt]) +
- numberOfBins[type1][ipt][away] * numberOfBins[type1][ipt][away] * backgroundError[type1][ipt] *
- backgroundError[type1][ipt] + numberOfBins[type2][ipt][away] * numberOfBins[type2][ipt][away] *
- backgroundError[type2][ipt] * backgroundError[type2][ipt] * awaySide[type1][ipt] *
- awaySide[type1][ipt] / (awaySide[type2][ipt] * awaySide[type2][ipt]);
+ // Variables for background error calculation
+ double background[EVENT_TYPES][PT_BINS] = { {0.0} };
+ double backgroundError[EVENT_TYPES][PT_BINS] = { {0.0} };
- dI = sqrt(dI)/awaySide[type2][ipt];
- _histIAA[ihist + 3]->addPoint(xval[ipt], awaySide[type1][ipt] / awaySide[type2][ipt], xerr[ipt], dI);
- }
- }
+ // Variables for integration error calculation
+ double scalingFactor[EVENT_TYPES] = {0.0};
+ double numberOfEntries[EVENT_TYPES][PT_BINS][2] = { { {0.0} } };
+ int numberOfBins[EVENT_TYPES][PT_BINS][2] = { { {0} } };
+
+ // For each event type
+ for (int itype = 0; itype < EVENT_TYPES; itype++) {
+ // For each pT range
+ for (int ipt = 0; ipt < PT_BINS; ipt++) {
+ // Check if histograms are fine
+ if (_histTriggerCounter->numEntries() == 0 || _histYield[itype][ipt]->numEntries() == 0) {
+ MSG_WARNING("There are no entries in one of the histograms");
+ continue;
+ }
+
+ // Scale yield histogram
+ if ((_histTriggerCounter->bin(itype).sumW() != 0)) {
+ scalingFactor[itype] = 1. / _histTriggerCounter->bin(itype).sumW();
+ scale(_histYield[itype][ipt], (1. / _histTriggerCounter->bin(itype).sumW()));
+ }
+
+ // Calculate background
+ double sum = 0.0;
+ int nbins = 0;
+ for (size_t ibin = 0; ibin < _histYield[itype][ipt]->numBins(); ibin++) {
+ if ((_histYield[itype][ipt]->bin(ibin).xMid() > (-0.5 * M_PI) &&
+ _histYield[itype][ipt]->bin(ibin).xMid() < (-0.5 * M_PI + 0.4)) ||
+ (_histYield[itype][ipt]->bin(ibin).xMid() > (0.5 * M_PI - 0.4) &&
+ _histYield[itype][ipt]->bin(ibin).xMid() < (0.5 * M_PI + 0.4)) ||
+ (_histYield[itype][ipt]->bin(ibin).xMid() > (1.5 * M_PI - 0.4) &&
+ _histYield[itype][ipt]->bin(ibin).xMid() < (1.5 * M_PI))) {
+ sum += _histYield[itype][ipt]->bin(ibin).sumW();
+ nbins += 1;
+ }
+ }
+ if (nbins == 0) {
+ MSG_WARNING("Failed to estimate background!");
+ continue;
+ }
+ background[itype][ipt] = sum / nbins;
+
+ // Calculate background error
+ sum = 0.0;
+ nbins = 0;
+ for (size_t ibin = 0; ibin < _histYield[itype][ipt]->numBins(); ibin++) {
+ if (_histYield[itype][ipt]->bin(ibin).xMid() > (0.5 * M_PI - 0.4) &&
+ _histYield[itype][ipt]->bin(ibin).xMid() < (0.5 * M_PI + 0.4)) {
+ sum += (_histYield[itype][ipt]->bin(ibin).sumW() - background[itype][ipt]) *
+ (_histYield[itype][ipt]->bin(ibin).sumW() - background[itype][ipt]);
+ nbins++;
+ }
+ }
+ backgroundError[itype][ipt] = sqrt(sum / (nbins - 1));
+
+ // Fill histograms with removed background
+ for (size_t ibin = 0; ibin < _histYield[itype][ipt]->numBins(); ibin++) {
+ _histYieldBkgRemoved[itype][ipt]->fillBin(ibin,
+ _histYield[itype][ipt]->bin(ibin).sumW() - background[itype][ipt]);
+ }
+
+ // Integrate near-side yield
+ size_t lowerBin = _histYield[itype][ipt]->binIndexAt(-0.7 + 0.02);
+ size_t upperBin = _histYield[itype][ipt]->binIndexAt( 0.7 - 0.02) + 1;
+ nbins = upperBin - lowerBin;
+ numberOfBins[itype][ipt][0] = nbins;
+ nearSide[itype][ipt] = _histYield[itype][ipt]->integralRange(lowerBin, upperBin) - nbins * background[itype][ipt];
+ numberOfEntries[itype][ipt][0] = _histYield[itype][ipt]->integralRange(lowerBin, upperBin) * _histTriggerCounter->bin(itype).sumW();
+
+ // Integrate away-side yield
+ lowerBin = _histYield[itype][ipt]->binIndexAt(M_PI - 0.7 + 0.02);
+ upperBin = _histYield[itype][ipt]->binIndexAt(M_PI + 0.7 - 0.02) + 1;
+ nbins = upperBin - lowerBin;
+ numberOfBins[itype][ipt][1] = nbins;
+ awaySide[itype][ipt] = _histYield[itype][ipt]->integralRange(lowerBin, upperBin) - nbins * background[itype][ipt];
+ numberOfEntries[itype][ipt][1] = _histYield[itype][ipt]->integralRange(lowerBin, upperBin) * _histTriggerCounter->bin(itype).sumW();
+
+ }
+ }
+
+ // Variables for IAA/ICP plots
+ double dI = 0.0;
+ int near = 0;
+ int away = 1;
+ double xval[PT_BINS] = { 3.5, 5.0, 7.0, 9.0 };
+ double xerr[PT_BINS] = { 0.5, 1.0, 1.0, 1.0 };
+
+ int types1[3] = {1, 2, 1};
+ int types2[3] = {0, 0, 2};
+
+ // Fill IAA/ICP plots for near side peak
+ for (int ihist = 0; ihist < 3; ihist++) {
+ int type1 = types1[ihist];
+ int type2 = types2[ihist];
+ for (int ipt = 0; ipt < PT_BINS; ipt++) {
+ dI = scalingFactor[type1] * scalingFactor[type1] * numberOfEntries[type1][ipt][near] +
+ scalingFactor[type2] * scalingFactor[type2] * numberOfEntries[type2][ipt][near] *
+ nearSide[type1][ipt] * nearSide[type1][ipt] / (nearSide[type2][ipt] * nearSide[type2][ipt]) +
+ numberOfBins[type1][ipt][near] * numberOfBins[type1][ipt][near] * backgroundError[type1][ipt] *
+ backgroundError[type1][ipt] + numberOfBins[type2][ipt][near] * numberOfBins[type2][ipt][near] *
+ backgroundError[type2][ipt] * backgroundError[type2][ipt] * nearSide[type1][ipt] *
+ nearSide[type1][ipt] / (nearSide[type2][ipt] * nearSide[type2][ipt]);
+
+ dI = sqrt(dI)/nearSide[type2][ipt];
+ _histIAA[ihist]->addPoint(xval[ipt], nearSide[type1][ipt] / nearSide[type2][ipt], xerr[ipt], dI);
+ }
+ }
+
+ // Fill IAA/ICP plots for away side peak
+ for (int ihist = 0; ihist < 3; ihist++) {
+ int type1 = types1[ihist];
+ int type2 = types2[ihist];
+ for (int ipt = 0; ipt < PT_BINS; ipt++) {
+ dI = scalingFactor[type1] * scalingFactor[type1] * numberOfEntries[type1][ipt][away] +
+ scalingFactor[type2] * scalingFactor[type2] * numberOfEntries[type2][ipt][away] *
+ awaySide[type1][ipt] * awaySide[type1][ipt] / (awaySide[type2][ipt] * awaySide[type2][ipt]) +
+ numberOfBins[type1][ipt][away] * numberOfBins[type1][ipt][away] * backgroundError[type1][ipt] *
+ backgroundError[type1][ipt] + numberOfBins[type2][ipt][away] * numberOfBins[type2][ipt][away] *
+ backgroundError[type2][ipt] * backgroundError[type2][ipt] * awaySide[type1][ipt] *
+ awaySide[type1][ipt] / (awaySide[type2][ipt] * awaySide[type2][ipt]);
+
+ dI = sqrt(dI)/awaySide[type2][ipt];
+ _histIAA[ihist + 3]->addPoint(xval[ipt], awaySide[type1][ipt] / awaySide[type2][ipt], xerr[ipt], dI);
+ }
+ }
}
-
+
}
-
+
+
private:
-
+
static const int PT_BINS = 4;
static const int EVENT_TYPES = 3;
Histo1DPtr _histYield[EVENT_TYPES][PT_BINS];
Histo1DPtr _histYieldBkgRemoved[EVENT_TYPES][PT_BINS];
Histo1DPtr _histTriggerCounter;
Scatter2DPtr _histIAA[6];
double pt_limits[5];
int event_type;
string event_string[EVENT_TYPES + 1];
bool reentrant_flag;
+
};
- // The hook for the plugin system
+
DECLARE_RIVET_PLUGIN(ALICE_2012_I930312);
+
}
diff --git a/analyses/pluginALICE/ALICE_2016_I1507157.cc b/analyses/pluginALICE/ALICE_2016_I1507157.cc
--- a/analyses/pluginALICE/ALICE_2016_I1507157.cc
+++ b/analyses/pluginALICE/ALICE_2016_I1507157.cc
@@ -1,202 +1,167 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/AliceCommon.hh"
#include "Rivet/Projections/PrimaryParticles.hh"
#include "Rivet/Projections/ChargedFinalState.hh"
#include "Rivet/Projections/EventMixingFinalState.hh"
namespace Rivet {
- /// @brief Correlations of identified particles in pp.
+ /// @brief ALICE correlations of identified particles in pp
/// Also showcasing use of EventMixingFinalState.
-
class ALICE_2016_I1507157 : public Analysis {
public:
/// Constructor
DEFAULT_RIVET_ANALYSIS_CTOR(ALICE_2016_I1507157);
/// @name Analysis methods
//@{
-
- /// @brief Calculate angular distance between particles.
- double phaseDif(double a1, double a2){
- double dif = a1 - a2;
- while (dif < -M_PI/2)
- dif += 2*M_PI;
- while (dif > 3*M_PI/2)
- dif -= 2*M_PI;
- return dif;
- }
/// Book histograms and initialise projections before the run
void init() {
double etamax = 0.8;
- double pTmin = 0.5; // GeV
-
+ double pTmin = 0.5; // GeV
+
// Trigger
declare(ALICE::V0AndTrigger(), "V0-AND");
// Charged tracks used to manage the mixing observable.
ChargedFinalState cfsMult(Cuts::abseta < etamax);
addProjection(cfsMult, "CFSMult");
-
+
// Primary particles.
- PrimaryParticles pp({Rivet::PID::PIPLUS, Rivet::PID::KPLUS,
- Rivet::PID::K0S, Rivet::PID::K0L, Rivet::PID::PROTON,
- Rivet::PID::NEUTRON, Rivet::PID::LAMBDA, Rivet::PID::SIGMAMINUS,
- Rivet::PID::SIGMAPLUS, Rivet::PID::XIMINUS, Rivet::PID::XI0,
- Rivet::PID::OMEGAMINUS},Cuts::abseta < etamax && Cuts::pT > pTmin*GeV);
+ PrimaryParticles pp({Rivet::PID::PIPLUS, Rivet::PID::KPLUS,
+ Rivet::PID::K0S, Rivet::PID::K0L, Rivet::PID::PROTON,
+ Rivet::PID::NEUTRON, Rivet::PID::LAMBDA, Rivet::PID::SIGMAMINUS,
+ Rivet::PID::SIGMAPLUS, Rivet::PID::XIMINUS, Rivet::PID::XI0,
+ Rivet::PID::OMEGAMINUS},Cuts::abseta < etamax && Cuts::pT > pTmin*GeV);
addProjection(pp,"APRIM");
// The event mixing projection
declare(EventMixingFinalState(cfsMult, pp, 5, 0, 100, 10),"EVM");
// The particle pairs.
pid = {{211, -211}, {321, -321}, {2212, -2212}, {3122, -3122}, {211, 211},
{321, 321}, {2212, 2212}, {3122, 3122}, {2212, 3122}, {2212, -3122}};
// The associated histograms in the data file.
vector<string> refdata = {"d04-x01-y01","d04-x01-y02","d04-x01-y03",
- "d06-x01-y02","d05-x01-y01","d05-x01-y02","d05-x01-y03","d06-x01-y01",
- "d01-x01-y02","d02-x01-y02"};
+ "d06-x01-y02","d05-x01-y01","d05-x01-y02","d05-x01-y03","d06-x01-y01",
+ "d01-x01-y02","d02-x01-y02"};
for (int i = 0, N = refdata.size(); i < N; ++i) {
// The ratio plots.
- ratio.push_back(bookScatter2D(refdata[i], true));
- // Signal and mixed background.
+ ratio.push_back(bookScatter2D(refdata[i], true));
+ // Signal and mixed background.
signal.push_back(bookHisto1D("/TMP/" + refdata[i] +
- "-s", *ratio[i], refdata[i] + "-s"));
- background.push_back(bookHisto1D("/TMP/" + refdata[i] +
- "-b", *ratio[i], refdata[i] + "-b"));
+ "-s", *ratio[i], refdata[i] + "-s"));
+ background.push_back(bookHisto1D("/TMP/" + refdata[i] +
+ "-b", *ratio[i], refdata[i] + "-b"));
// Number of signal and mixed pairs.
- nsp.push_back(0.);
+ nsp.push_back(0.);
nmp.push_back(0.);
- }
+ }
}
/// Perform the per-event analysis
void analyze(const Event& event) {
const double weight = event.weight();
-
+
// Triggering
if (!apply<ALICE::V0AndTrigger>(event, "V0-AND")()) return;
- // The projections
- const PrimaryParticles& pp = applyProjection<PrimaryParticles>(event,"APRIM");
- const EventMixingFinalState& evm =
- applyProjection<EventMixingFinalState>(event, "EVM");
- // Get all mixing events
- vector<Particles> mixEvents = evm.getMixingEvents();
-
- // If there are not enough events to mix, don't fill any histograms
- if(mixEvents.size() == 0)
- return;
+ // The primary particles
+ const PrimaryParticles& pp = apply<PrimaryParticles>(event, "APRIM");
+ const Particles pparticles = pp.particles();
+
+ // The mixed events
+ const EventMixingFinalState& evm = apply<EventMixingFinalState>(event, "EVM");
+ const vector<Particles> mixEvents = evm.getMixingEvents();
+ if (mixEvents.empty()) vetoEvent;
// Make a vector of mixed event particles
vector<Particle> mixParticles;
size_t pSize = 0;
- for(size_t i = 0; i < mixEvents.size(); ++i)
- pSize+=mixEvents[i].size();
+ for (size_t i = 0; i < mixEvents.size(); ++i)
+ pSize += mixEvents[i].size();
mixParticles.reserve(pSize);
- for(size_t i = 0; i < mixEvents.size(); ++i)
- mixParticles.insert(mixParticles.end(), mixEvents[i].begin(),
- mixEvents[i].end());
-
- // Shuffle the particles in the mixing events
+ for (size_t i = 0; i < mixEvents.size(); ++i)
+ mixParticles.insert(mixParticles.end(), mixEvents[i].begin(), mixEvents[i].end());
random_shuffle(mixParticles.begin(), mixParticles.end());
-
- for(const Particle& p1 : pp.particles()) {
+
+ for (size_t ip1 = 0; ip1 < pparticles.size()-1; ++ip1) {
+ const Particle& p1 = pparticles[ip1];
+
// Start by doing the signal distributions
- for(const Particle& p2 : pp.particles()) {
- if(p1 == p2)
- continue;
- double dEta = abs(p1.eta() - p2.eta());
- double dPhi = phaseDif(p1.phi(), p2.phi());
- if(dEta < 1.3) {
- for (int i = 0, N = pid.size(); i < N; ++i) {
- int pid1 = pid[i].first;
- int pid2 = pid[i].second;
- bool samesign = (pid1 * pid2 > 0);
- if (samesign && ((pid1 == p1.pid() && pid2 == p2.pid()) ||
- (pid1 == -p1.pid() && pid2 == -p2.pid()))) {
- signal[i]->fill(dPhi, weight);
- nsp[i] += 1.0;
- }
- if (!samesign && abs(pid1) == abs(pid2) &&
- pid1 == p1.pid() && pid2 == p2.pid()) {
- signal[i]->fill(dPhi, weight);
- nsp[i] += 1.0;
- }
- if (!samesign && abs(pid1) != abs(pid2) &&
- ( (pid1 == p1.pid() && pid2 == p2.pid()) ||
- (pid2 == p1.pid() && pid1 == p2.pid()) ) ) {
- signal[i]->fill(dPhi, weight);
- nsp[i] += 1.0;
- }
- }
- }
- }
- // Then do the background distribution
- for(const Particle& pMix : mixParticles){
- double dEta = abs(p1.eta() - pMix.eta());
- double dPhi = phaseDif(p1.phi(), pMix.phi());
- if(dEta < 1.3) {
- for (int i = 0, N = pid.size(); i < N; ++i) {
- int pid1 = pid[i].first;
- int pid2 = pid[i].second;
- bool samesign = (pid1 * pid2 > 0);
- if (samesign && ((pid1 == p1.pid() && pid2 == pMix.pid()) ||
- (pid1 == -p1.pid() && pid2 == -pMix.pid()))) {
- background[i]->fill(dPhi, weight);
- nmp[i] += 1.0;
- }
- if (!samesign && abs(pid1) == abs(pid2) &&
- pid1 == p1.pid() && pid2 == pMix.pid()) {
- background[i]->fill(dPhi, weight);
- nmp[i] += 1.0;
- }
- if (!samesign && abs(pid1) != abs(pid2) &&
- ( (pid1 == p1.pid() && pid2 == pMix.pid()) ||
- (pid2 == p1.pid() && pid1 == pMix.pid()) ) ) {
- background[i]->fill(dPhi, weight);
- nmp[i] += 1.0;
- }
- }
- }
- }
+ for (size_t ip2 = 0; ip2 < pparticles.size(); ++ip1) {
+ if (ip1 == ip2) continue;
+ const Particle& p2 = pparticles[ip2];
+ const double dEta = deltaEta(p1, p2);
+ const double dPhi = deltaPhi(p1, p2, true);
+ if (dEta > 1.3) continue;
+ for (int i = 0, N = pid.size(); i < N; ++i) {
+ const int pid1 = pid[i].first;
+ const int pid2 = pid[i].second;
+ const bool samesign = (pid1 * pid2 > 0);
+ const bool pidmatch1 = (pid1 == p1.pid() && pid2 == p2.pid()) || (pid1 == -p1.pid() && pid2 == -p2.pid());
+ const bool pidmatch2 = abs(pid1) == abs(pid2) && pid1 == p1.pid() && pid2 == p2.pid();
+ const bool pidmatch3 = abs(pid1) != abs(pid2) && ( (pid1 == p1.pid() && pid2 == p2.pid()) || (pid2 == p1.pid() && pid1 == p2.pid()) );
+ if ((samesign && pidmatch1) || (!samesign && (pidmatch2 || pidmatch3))) {
+ signal[i]->fill(dPhi, weight);
+ nsp[i] += 1.0;
+ }
+ }
+ }
+
+ // Then do the background distribution
+ for (const Particle& pMix : mixParticles){
+ const double dEta = deltaEta(p1, pMix);
+ const double dPhi = deltaPhi(p1, pMix, true);
+ if (dEta > 1.3) continue;
+ for (int i = 0, N = pid.size(); i < N; ++i) {
+ const int pid1 = pid[i].first;
+ const int pid2 = pid[i].second;
+ const bool samesign = (pid1 * pid2 > 0);
+ const bool pidmatch1 = (pid1 == p1.pid() && pid2 == pMix.pid()) || (pid1 == -p1.pid() && pid2 == -pMix.pid());
+ const bool pidmatch2 = abs(pid1) == abs(pid2) && pid1 == p1.pid() && pid2 == pMix.pid();
+ const bool pidmatch3 = abs(pid1) != abs(pid2) && ( (pid1 == p1.pid() && pid2 == pMix.pid()) || (pid2 == p1.pid() && pid1 == pMix.pid()) );
+ if ((samesign && pidmatch1) || (!samesign && (pidmatch2 || pidmatch3))) {
+ background[i]->fill(dPhi, weight);
+ nmp[i] += 1.0;
+ }
+ }
+ }
}
}
/// Normalise histograms etc., after the run
void finalize() {
for (int i = 0, N = pid.size(); i < N; ++i) {
- double sc = nmp[i] / nsp[i];
- signal[i]->scaleW(sc);
- divide(signal[i],background[i],ratio[i]);
+ const double sc = nmp[i] / nsp[i];
+ signal[i]->scaleW(sc);
+ divide(signal[i],background[i],ratio[i]);
}
}
//@}
/// @name Histograms
//@{
vector<pair<int, int> > pid;
vector<Histo1DPtr> signal;
vector<Histo1DPtr> background;
vector<Scatter2DPtr> ratio;
vector<double> nsp;
vector<double> nmp;
//@}
};
- // The hook for the plugin system
DECLARE_RIVET_PLUGIN(ALICE_2016_I1507157);
-
}
diff --git a/analyses/pluginATLAS/ATLAS_2012_I1203852.cc b/analyses/pluginATLAS/ATLAS_2012_I1203852.cc
--- a/analyses/pluginATLAS/ATLAS_2012_I1203852.cc
+++ b/analyses/pluginATLAS/ATLAS_2012_I1203852.cc
@@ -1,373 +1,375 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FastJets.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/IdentifiedFinalState.hh"
#include "Rivet/Projections/WFinder.hh"
#include "Rivet/Projections/LeadingParticlesFinalState.hh"
#include "Rivet/Projections/UnstableFinalState.hh"
#include "Rivet/Projections/VetoedFinalState.hh"
#include "Rivet/Projections/DressedLeptons.hh"
#include "Rivet/Projections/MergedFinalState.hh"
#include "Rivet/Projections/MissingMomentum.hh"
#include "Rivet/Projections/InvMassFinalState.hh"
namespace Rivet {
+
/// Generic Z candidate
struct Zstate : public ParticlePair {
Zstate() { }
Zstate(ParticlePair _particlepair) : ParticlePair(_particlepair) { }
FourMomentum mom() const { return first.momentum() + second.momentum(); }
operator FourMomentum() const { return mom(); }
static bool cmppT(const Zstate& lx, const Zstate& rx) { return lx.mom().pT() < rx.mom().pT(); }
};
- /// @name ZZ analysis
+ /// ZZ analysis
class ATLAS_2012_I1203852 : public Analysis {
public:
/// Default constructor
ATLAS_2012_I1203852()
: Analysis("ATLAS_2012_I1203852")
{ }
+
void init() {
// NB Missing ET is not required to be neutrinos
FinalState fs(-5.0, 5.0, 0.0*GeV);
// Final states to form Z bosons
vids.push_back(make_pair(PID::ELECTRON, PID::POSITRON));
vids.push_back(make_pair(PID::MUON, PID::ANTIMUON));
IdentifiedFinalState Photon(fs);
Photon.acceptIdPair(PID::PHOTON);
IdentifiedFinalState bare_EL(fs);
bare_EL.acceptIdPair(PID::ELECTRON);
IdentifiedFinalState bare_MU(fs);
bare_MU.acceptIdPair(PID::MUON);
// Selection 1: ZZ-> llll selection
Cut etaranges_lep = Cuts::abseta < 3.16 && Cuts::pT > 7*GeV;
DressedLeptons electron_sel4l(Photon, bare_EL, 0.1, etaranges_lep);
declare(electron_sel4l, "ELECTRON_sel4l");
DressedLeptons muon_sel4l(Photon, bare_MU, 0.1, etaranges_lep);
declare(muon_sel4l, "MUON_sel4l");
// Selection 2: ZZ-> llnunu selection
Cut etaranges_lep2 = Cuts::abseta < 2.5 && Cuts::pT > 10*GeV;
DressedLeptons electron_sel2l2nu(Photon, bare_EL, 0.1, etaranges_lep2);
declare(electron_sel2l2nu, "ELECTRON_sel2l2nu");
DressedLeptons muon_sel2l2nu(Photon, bare_MU, 0.1, etaranges_lep2);
declare(muon_sel2l2nu, "MUON_sel2l2nu");
/// Get all neutrinos. These will not be used to form jets.
IdentifiedFinalState neutrino_fs(Cuts::abseta < 4.5);
neutrino_fs.acceptNeutrinos();
declare(neutrino_fs, "NEUTRINO_FS");
// Calculate missing ET from the visible final state, not by requiring neutrinos
addProjection(MissingMomentum(Cuts::abseta < 4.5), "MISSING");
VetoedFinalState jetinput;
jetinput.addVetoOnThisFinalState(bare_MU);
jetinput.addVetoOnThisFinalState(neutrino_fs);
FastJets jetpro(fs, FastJets::ANTIKT, 0.4);
declare(jetpro, "jet");
// Both ZZ on-shell histos
_h_ZZ_xsect = bookHisto1D(1, 1, 1);
_h_ZZ_ZpT = bookHisto1D(3, 1, 1);
_h_ZZ_phill = bookHisto1D(5, 1, 1);
_h_ZZ_mZZ = bookHisto1D(7, 1, 1);
// One Z off-shell (ZZstar) histos
_h_ZZs_xsect = bookHisto1D(1, 1, 2);
// ZZ -> llnunu histos
_h_ZZnunu_xsect = bookHisto1D(1, 1, 3);
_h_ZZnunu_ZpT = bookHisto1D(4, 1, 1);
_h_ZZnunu_phill = bookHisto1D(6, 1, 1);
_h_ZZnunu_mZZ = bookHisto1D(8, 1, 1);
}
/// Do the analysis
void analyze(const Event& e) {
const double weight = e.weight();
////////////////////////////////////////////////////////////////////
// preselection of leptons for ZZ-> llll final state
////////////////////////////////////////////////////////////////////
Particles leptons_sel4l;
const vector<DressedLepton>& mu_sel4l = apply<DressedLeptons>(e, "MUON_sel4l").dressedLeptons();
const vector<DressedLepton>& el_sel4l = apply<DressedLeptons>(e, "ELECTRON_sel4l").dressedLeptons();
vector<DressedLepton> leptonsFS_sel4l;
leptonsFS_sel4l.insert( leptonsFS_sel4l.end(), mu_sel4l.begin(), mu_sel4l.end() );
leptonsFS_sel4l.insert( leptonsFS_sel4l.end(), el_sel4l.begin(), el_sel4l.end() );
////////////////////////////////////////////////////////////////////
// OVERLAP removal dR(l,l)>0.2
////////////////////////////////////////////////////////////////////
- foreach ( const DressedLepton& l1, leptonsFS_sel4l) {
+ for (const DressedLepton& l1 : leptonsFS_sel4l) {
bool isolated = true;
- foreach (DressedLepton& l2, leptonsFS_sel4l) {
+ for (DressedLepton& l2 : leptonsFS_sel4l) {
const double dR = deltaR(l1, l2);
- if (dR < 0.2 && l1 != l2) { isolated = false; break; }
+ if (dR < 0.2 && !isSame(l1, l2)) { isolated = false; break; }
}
if (isolated) leptons_sel4l.push_back(l1);
}
//////////////////////////////////////////////////////////////////
// Exactly two opposite charged leptons
//////////////////////////////////////////////////////////////////
// calculate total 'flavour' charge
double totalcharge = 0;
- foreach (Particle& l, leptons_sel4l) totalcharge += l.pid();
+ for (const Particle& l : leptons_sel4l) totalcharge += l.pid();
// Analyze 4 lepton events
if (leptons_sel4l.size() == 4 && totalcharge == 0 ) {
Zstate Z1, Z2;
// Identifies Z states from 4 lepton pairs
identifyZstates(Z1, Z2,leptons_sel4l);
////////////////////////////////////////////////////////////////////////////
// Z MASS WINDOW
// -ZZ: for both Z: 66<mZ<116 GeV
// -ZZ*: one Z on-shell: 66<mZ<116 GeV, one Z off-shell: mZ>20 GeV
///////////////////////////////////////////////////////////////////////////
Zstate leadPtZ = std::max(Z1, Z2, Zstate::cmppT);
double mZ1 = Z1.mom().mass();
double mZ2 = Z2.mom().mass();
double ZpT = leadPtZ.mom().pT();
double phill = fabs(deltaPhi(leadPtZ.first, leadPtZ.second));
if (phill > M_PI) phill = 2*M_PI-phill;
double mZZ = (Z1.mom() + Z2.mom()).mass();
if (mZ1 > 20*GeV && mZ2 > 20*GeV) {
// ZZ* selection
if (inRange(mZ1, 66*GeV, 116*GeV) || inRange(mZ2, 66*GeV, 116*GeV)) {
_h_ZZs_xsect -> fill(sqrtS()*GeV, weight);
}
// ZZ selection
if (inRange(mZ1, 66*GeV, 116*GeV) && inRange(mZ2, 66*GeV, 116*GeV)) {
_h_ZZ_xsect -> fill(sqrtS()*GeV, weight);
_h_ZZ_ZpT -> fill(ZpT , weight);
_h_ZZ_phill -> fill(phill , weight);
_h_ZZ_mZZ -> fill(mZZ , weight);
}
}
}
////////////////////////////////////////////////////////////////////
/// preselection of leptons for ZZ-> llnunu final state
////////////////////////////////////////////////////////////////////
Particles leptons_sel2l2nu; // output
const vector<DressedLepton>& mu_sel2l2nu = apply<DressedLeptons>(e, "MUON_sel2l2nu").dressedLeptons();
const vector<DressedLepton>& el_sel2l2nu = apply<DressedLeptons>(e, "ELECTRON_sel2l2nu").dressedLeptons();
vector<DressedLepton> leptonsFS_sel2l2nu;
leptonsFS_sel2l2nu.insert( leptonsFS_sel2l2nu.end(), mu_sel2l2nu.begin(), mu_sel2l2nu.end() );
leptonsFS_sel2l2nu.insert( leptonsFS_sel2l2nu.end(), el_sel2l2nu.begin(), el_sel2l2nu.end() );
// Lepton preselection for ZZ-> llnunu
if ((mu_sel2l2nu.empty() || el_sel2l2nu.empty()) // cannot have opposite flavour
&& (leptonsFS_sel2l2nu.size() == 2) // exactly two leptons
&& (leptonsFS_sel2l2nu[0].charge() * leptonsFS_sel2l2nu[1].charge() < 1 ) // opposite charge
&& (deltaR(leptonsFS_sel2l2nu[0], leptonsFS_sel2l2nu[1]) > 0.3) // overlap removal
&& (leptonsFS_sel2l2nu[0].pT() > 20*GeV && leptonsFS_sel2l2nu[1].pT() > 20*GeV)) { // trigger requirement
leptons_sel2l2nu.insert(leptons_sel2l2nu.end(), leptonsFS_sel2l2nu.begin(), leptonsFS_sel2l2nu.end());
}
if (leptons_sel2l2nu.empty()) vetoEvent; // no further analysis, fine to veto
Particles leptons_sel2l2nu_jetveto;
foreach (const DressedLepton& l, mu_sel2l2nu) leptons_sel2l2nu_jetveto.push_back(l.constituentLepton());
foreach (const DressedLepton& l, el_sel2l2nu) leptons_sel2l2nu_jetveto.push_back(l.constituentLepton());
double ptll = (leptons_sel2l2nu[0].momentum() + leptons_sel2l2nu[1].momentum()).pT();
// Find Z1-> ll
FinalState fs2(-3.2, 3.2);
InvMassFinalState imfs(fs2, vids, 20*GeV, sqrtS());
imfs.calc(leptons_sel2l2nu);
if (imfs.particlePairs().size() != 1) vetoEvent;
const ParticlePair& Z1constituents = imfs.particlePairs()[0];
FourMomentum Z1 = Z1constituents.first.momentum() + Z1constituents.second.momentum();
// Z to neutrinos candidate from missing ET
const MissingMomentum & missmom = applyProjection<MissingMomentum>(e, "MISSING");
const FourMomentum Z2 = missmom.missingMomentum(ZMASS);
double met_Znunu = missmom.missingEt(); //Z2.pT();
// mTZZ
const double mT2_1st_term = add_quad(ZMASS, ptll) + add_quad(ZMASS, met_Znunu);
const double mT2_2nd_term = Z1.px() + Z2.px();
const double mT2_3rd_term = Z1.py() + Z2.py();
const double mTZZ = sqrt(sqr(mT2_1st_term) - sqr(mT2_2nd_term) - sqr(mT2_3rd_term));
if (!inRange(Z2.mass(), 66*GeV, 116*GeV)) vetoEvent;
if (!inRange(Z1.mass(), 76*GeV, 106*GeV)) vetoEvent;
/////////////////////////////////////////////////////////////
// AXIAL MET < 75 GeV
////////////////////////////////////////////////////////////
double dPhiZ1Z2 = fabs(deltaPhi(Z1, Z2));
if (dPhiZ1Z2 > M_PI) dPhiZ1Z2 = 2*M_PI - dPhiZ1Z2;
const double axialEtmiss = -Z2.pT()*cos(dPhiZ1Z2);
if (axialEtmiss < 75*GeV) vetoEvent;
const double ZpT = Z1.pT();
double phill = fabs(deltaPhi(Z1constituents.first, Z1constituents.second));
if (phill > M_PI) phill = 2*M_PI - phill;
////////////////////////////////////////////////////////////////////////////
// JETS
// -"j": found by "jetpro" projection && pT() > 25 GeV && |eta| < 4.5
// -"goodjets": "j" && dR(electron/muon,jet) > 0.3
//
// JETVETO: veto all events with at least one good jet
///////////////////////////////////////////////////////////////////////////
vector<Jet> good_jets;
foreach (const Jet& j, apply<FastJets>(e, "jet").jetsByPt(25)) {
if (j.abseta() > 4.5) continue;
bool isLepton = 0;
foreach (const Particle& l, leptons_sel2l2nu_jetveto) {
const double dR = deltaR(l.momentum(), j.momentum());
if (dR < 0.3) { isLepton = true; break; }
}
if (!isLepton) good_jets.push_back(j);
}
size_t n_sel_jets = good_jets.size();
if (n_sel_jets != 0) vetoEvent;
/////////////////////////////////////////////////////////////
// Fractional MET and lepton pair difference: "RatioMet"< 0.4
////////////////////////////////////////////////////////////
double ratioMet = fabs(Z2.pT() - Z1.pT()) / Z1.pT();
if (ratioMet > 0.4 ) vetoEvent;
// End of ZZllnunu selection: now fill histograms
_h_ZZnunu_xsect->fill(sqrtS()/GeV, weight);
_h_ZZnunu_ZpT ->fill(ZpT, weight);
_h_ZZnunu_phill->fill(phill, weight);
_h_ZZnunu_mZZ ->fill(mTZZ, weight);
}
/// Finalize
void finalize() {
const double norm = crossSection()/sumOfWeights()/femtobarn;
scale(_h_ZZ_xsect, norm);
normalize(_h_ZZ_ZpT);
normalize(_h_ZZ_phill);
normalize(_h_ZZ_mZZ);
scale(_h_ZZs_xsect, norm);
scale(_h_ZZnunu_xsect, norm);
normalize(_h_ZZnunu_ZpT);
normalize(_h_ZZnunu_phill);
normalize(_h_ZZnunu_mZZ);
}
private:
void identifyZstates(Zstate& Z1, Zstate& Z2, const Particles& leptons_sel4l);
Histo1DPtr _h_ZZ_xsect, _h_ZZ_ZpT, _h_ZZ_phill, _h_ZZ_mZZ;
Histo1DPtr _h_ZZs_xsect;
Histo1DPtr _h_ZZnunu_xsect, _h_ZZnunu_ZpT, _h_ZZnunu_phill, _h_ZZnunu_mZZ;
vector< pair<PdgId,PdgId> > vids;
const double ZMASS = 91.1876; // GeV
};
/// 4l to ZZ assignment -- algorithm
void ATLAS_2012_I1203852::identifyZstates(Zstate& Z1, Zstate& Z2, const Particles& leptons_sel4l) {
/////////////////////////////////////////////////////////////////////////////
/// ZZ->4l pairing
/// - Exactly two same flavour opposite charged leptons
/// - Ambiguities in pairing are resolved by choosing the combination
/// that results in the smaller value of the sum |mll - mZ| for the two pairs
/////////////////////////////////////////////////////////////////////////////
Particles part_pos_el, part_neg_el, part_pos_mu, part_neg_mu;
foreach (const Particle& l , leptons_sel4l) {
if (l.abspid() == PID::ELECTRON) {
if (l.pid() < 0) part_neg_el.push_back(l);
if (l.pid() > 0) part_pos_el.push_back(l);
}
else if (l.abspid() == PID::MUON) {
if (l.pid() < 0) part_neg_mu.push_back(l);
if (l.pid() > 0) part_pos_mu.push_back(l);
}
}
// ee/mm channel
if ( part_neg_el.size() == 2 || part_neg_mu.size() == 2) {
Zstate Zcand_1, Zcand_2, Zcand_3, Zcand_4;
if (part_neg_el.size() == 2) { // ee
Zcand_1 = Zstate( ParticlePair( part_neg_el[0], part_pos_el[0] ) );
Zcand_2 = Zstate( ParticlePair( part_neg_el[0], part_pos_el[1] ) );
Zcand_3 = Zstate( ParticlePair( part_neg_el[1], part_pos_el[0] ) );
Zcand_4 = Zstate( ParticlePair( part_neg_el[1], part_pos_el[1] ) );
} else { // mumu
Zcand_1 = Zstate( ParticlePair( part_neg_mu[0], part_pos_mu[0] ) );
Zcand_2 = Zstate( ParticlePair( part_neg_mu[0], part_pos_mu[1] ) );
Zcand_3 = Zstate( ParticlePair( part_neg_mu[1], part_pos_mu[0] ) );
Zcand_4 = Zstate( ParticlePair( part_neg_mu[1], part_pos_mu[1] ) );
}
// We can have the following pairs: (Z1 + Z4) || (Z2 + Z3)
double minValue_1, minValue_2;
minValue_1 = fabs( Zcand_1.mom().mass() - ZMASS ) + fabs( Zcand_4.mom().mass() - ZMASS);
minValue_2 = fabs( Zcand_2.mom().mass() - ZMASS ) + fabs( Zcand_3.mom().mass() - ZMASS);
if (minValue_1 < minValue_2 ) {
Z1 = Zcand_1;
Z2 = Zcand_4;
} else {
Z1 = Zcand_2;
Z2 = Zcand_3;
}
// emu channel
} else if (part_neg_mu.size() == 1 && part_neg_el.size() == 1) {
Z1 = Zstate ( ParticlePair (part_neg_mu[0], part_pos_mu[0] ) );
Z2 = Zstate ( ParticlePair (part_neg_el[0], part_pos_el[0] ) );
}
}
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(ATLAS_2012_I1203852);
}
diff --git a/analyses/pluginATLAS/ATLAS_2013_I1190187.cc b/analyses/pluginATLAS/ATLAS_2013_I1190187.cc
--- a/analyses/pluginATLAS/ATLAS_2013_I1190187.cc
+++ b/analyses/pluginATLAS/ATLAS_2013_I1190187.cc
@@ -1,261 +1,261 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/IdentifiedFinalState.hh"
#include "Rivet/Projections/VetoedFinalState.hh"
#include "Rivet/Projections/FastJets.hh"
#include "Rivet/Projections/DressedLeptons.hh"
#include "Rivet/Projections/MissingMomentum.hh"
namespace Rivet {
/// ATLAS Wee Wemu Wmumu analysis at Z TeV
class ATLAS_2013_I1190187 : public Analysis {
public:
/// Default constructor
ATLAS_2013_I1190187()
: Analysis("ATLAS_2013_I1190187")
{ }
void init() {
FinalState fs;
Cut etaRanges_EL = (Cuts::abseta < 1.37 || Cuts::absetaIn(1.52, 2.47)) && Cuts::pT > 20*GeV;
Cut etaRanges_MU = Cuts::abseta < 2.4 && Cuts::pT > 20*GeV;
MissingMomentum met(fs);
declare(met, "MET");
IdentifiedFinalState Photon(fs);
Photon.acceptIdPair(PID::PHOTON);
IdentifiedFinalState bare_EL(fs);
bare_EL.acceptIdPair(PID::ELECTRON);
IdentifiedFinalState bare_MU(fs);
bare_MU.acceptIdPair(PID::MUON);
IdentifiedFinalState neutrinoFS(fs);
neutrinoFS.acceptNeutrinos();
declare(neutrinoFS, "Neutrinos");
////////////////////////////////////////////////////////
// DRESSED LEPTONS
// 3.arg: 0.1 = dR(lep,phot)
// 4.arg: true = do clustering
// 7.arg: false = ignore photons from hadron or tau
//
//////////////////////////////////////////////////////////
DressedLeptons electronFS(Photon, bare_EL, 0.1, etaRanges_EL);
declare(electronFS, "ELECTRON_FS");
DressedLeptons muonFS(Photon, bare_MU, 0.1, etaRanges_MU);
declare(muonFS, "MUON_FS");
VetoedFinalState jetinput;
jetinput.addVetoOnThisFinalState(bare_MU);
jetinput.addVetoOnThisFinalState(neutrinoFS);
FastJets jetpro(jetinput, FastJets::ANTIKT, 0.4);
declare(jetpro, "jet");
// Book histograms
_h_Wl1_pT_mumu = bookHisto1D(1, 1, 2);
_h_Wl1_pT_ee = bookHisto1D(1, 1, 1);
_h_Wl1_pT_emu = bookHisto1D(1, 1, 3);
_h_Wl1_pT_inclusive = bookHisto1D(4, 1, 1);
}
/// Do the analysis
void analyze(const Event& e) {
const vector<DressedLepton>& muonFS = apply<DressedLeptons>(e, "MUON_FS").dressedLeptons();
const vector<DressedLepton>& electronFS = apply<DressedLeptons>(e, "ELECTRON_FS").dressedLeptons();
const MissingMomentum& met = apply<MissingMomentum>(e, "MET");
vector<DressedLepton> dressed_lepton, isolated_lepton, fiducial_lepton;
dressed_lepton.insert(dressed_lepton.end(), muonFS.begin(), muonFS.end());
dressed_lepton.insert(dressed_lepton.end(), electronFS.begin(), electronFS.end());
////////////////////////////////////////////////////////////////////////////
// OVERLAP REMOVAL
// -electrons with dR(e,mu)<0.1 are removed
// -lower pT electrons with dR(e,e)<0.1 are removed
//
////////////////////////////////////////////////////////////////////////////
- foreach (DressedLepton& l1, dressed_lepton) {
+ for (DressedLepton& l1 : dressed_lepton) {
bool l_isolated = true;
- foreach (DressedLepton& l2, dressed_lepton) {
- if (l1 != l2 && l2.constituentLepton().abspid() == PID::ELECTRON) {
+ for (DressedLepton& l2 : dressed_lepton) {
+ if (!isSame(l1, l2) && l2.constituentLepton().abspid() == PID::ELECTRON) {
double overlapControl_ll= deltaR(l1.constituentLepton(),l2.constituentLepton());
if (overlapControl_ll < 0.1) {
l_isolated = false;
// e/e overlap removal
if (l1.constituentLepton().abspid() == PID::ELECTRON) {
if (l1.constituentLepton().pT()>l2.constituentLepton().pT()) {
isolated_lepton.push_back(l1);//keep e with highest pT
} else {
isolated_lepton.push_back(l2);//keep e with highest pT
}
}
// e/mu overlap removal
if (l1.constituentLepton().abspid() == PID::MUON) isolated_lepton.push_back(l1); //keep mu
}
}
}
if (l_isolated) isolated_lepton.push_back(l1);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
// PRESELECTION:
// "isolated_lepton:"
// * electron: pt>20 GeV, |eta|<1.37, 1.52<|eta|<2.47, dR(electron,muon)>0.1
// * muon: pt>20 GeV, |eta|<2.4
// * dR(l,l)>0.1
//
// "fiducial_lepton"= isolated_lepton with
// * 2 leptons (e or mu)
// * leading lepton pt (pT_l1) >25 GeV
// * opposite charged leptons
//
///////////////////////////////////////////////////////////////////////////////////////////////////////////
if (isolated_lepton.size() != 2) vetoEvent;
sort(isolated_lepton.begin(), isolated_lepton.end(), cmpMomByPt);
if (isolated_lepton[0].pT() > 25*GeV && threeCharge(isolated_lepton[0]) != threeCharge(isolated_lepton[1])) {
fiducial_lepton.insert(fiducial_lepton.end(), isolated_lepton.begin(), isolated_lepton.end());
}
if (fiducial_lepton.size() == 0) vetoEvent;
double pT_l1 = fiducial_lepton[0].pT();
double M_l1l2 = (fiducial_lepton[0].momentum() + fiducial_lepton[1].momentum()).mass();
double pT_l1l2 = (fiducial_lepton[0].momentum() + fiducial_lepton[1].momentum()).pT();
/////////////////////////////////////////////////////////////////////////
// JETS
// -"alljets": found by "jetpro" projection && pT()>25 GeV && |y|<4.5
// -"vetojets": "alljets" && dR(electron,jet)>0.3
//
/////////////////////////////////////////////////////////////////////////
Jets alljets, vetojets;
foreach (const Jet& j, apply<FastJets>(e, "jet").jetsByPt(25)) {
if (j.absrap() > 4.5 ) continue;
alljets.push_back(j);
bool deltaRcontrol = true;
foreach (DressedLepton& fl,fiducial_lepton) {
if (fl.constituentLepton().abspid() == PID::ELECTRON) { //electrons
double deltaRjets = deltaR(fl.constituentLepton().momentum(), j.momentum(), RAPIDITY);
if (deltaRjets <= 0.3) deltaRcontrol = false; //false if at least one electron is in the overlap region
}
}
if (deltaRcontrol) vetojets.push_back(j);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
// MISSING ETrel
// -"mismom": fourvector of invisible momentum found by "met" projection
// -"delta_phi": delta phi between mismom and the nearest "fiducial_lepton" or "vetojet"
// -"MET_rel": missing transverse energy defined as:
// *"mismom" for "delta_phi" >= (0.5*pi)
// *"mismom.pT()*sin(delta_phi)" for "delta_phi" < (0.5*pi)
//
/////////////////////////////////////////////////////////////////////////////////////////////////
FourMomentum mismom;
double MET_rel = 0, delta_phi = 0;
mismom = -met.visibleMomentum();
vector<double> vL_MET_angle, vJet_MET_angle;
vL_MET_angle.push_back(fabs(deltaPhi(fiducial_lepton[0].momentum(), mismom)));
vL_MET_angle.push_back(fabs(deltaPhi(fiducial_lepton[1].momentum(), mismom)));
foreach (double& lM, vL_MET_angle) if (lM > M_PI) lM = 2*M_PI - lM;
std::sort(vL_MET_angle.begin(), vL_MET_angle.end());
if (vetojets.size() == 0) delta_phi = vL_MET_angle[0];
if (vetojets.size() > 0) {
foreach (Jet& vj, vetojets) {
double jet_MET_angle = fabs(deltaPhi(vj.momentum(), mismom));
if (jet_MET_angle > M_PI) jet_MET_angle = 2*M_PI - jet_MET_angle;
vJet_MET_angle.push_back(jet_MET_angle);
}
std::sort(vJet_MET_angle.begin(), vJet_MET_angle.end());
if (vL_MET_angle[0] <= vJet_MET_angle[0]) delta_phi = vL_MET_angle[0];
if (vL_MET_angle[0] > vJet_MET_angle[0]) delta_phi = vJet_MET_angle[0];
}
if (delta_phi >= (0.5*M_PI)) delta_phi = 0.5*M_PI;
MET_rel = mismom.pT()*sin(delta_phi);
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// CUTS
// -jetveto: event with at least one vetojet is vetoed
// -M_Z: Z mass M_Z=91.1876*GeV
//
// * ee channel: MET_rel > 45 GeV, M_l1l2 > 15 GeV, |M_l1l2-M_Z| > 15 GeV, jetveto, pT_l1l2 > 30 GeV
// * mumu channel: MET_rel > 45 GeV, M_l1l2 > 15 GeV, |M_l1l2-M_Z| > 15 GeV, jetveto, pT_l1l2 > 30 GeV
// * emu channel: MET_rel > 25 GeV, M_l1l2 > 10 GeV, |M_l1l2-M_Z| > 0 GeV, jetveto, pT_l1l2 > 30 GeV
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Get event weight for histo filling
const double weight = e.weight();
// ee channel
if (fiducial_lepton[0].abspid() == PID::ELECTRON && fiducial_lepton[1].abspid() == PID::ELECTRON) {
if (MET_rel <= 45*GeV) vetoEvent;
if (M_l1l2 <= 15*GeV) vetoEvent;
if (fabs(M_l1l2 - 91.1876*GeV) <= 15*GeV) vetoEvent;
if (vetojets.size() != 0) vetoEvent;
if (pT_l1l2 <= 30*GeV) vetoEvent;
_h_Wl1_pT_ee->fill(sqrtS()*GeV, weight);
_h_Wl1_pT_inclusive->fill(pT_l1, weight);
}
// mumu channel
else if (fiducial_lepton[0].abspid() == PID::MUON && fiducial_lepton[1].abspid() == PID::MUON) {
if (MET_rel <= 45*GeV) vetoEvent;
if (M_l1l2 <= 15*GeV) vetoEvent;
if (fabs(M_l1l2-91.1876*GeV) <= 15*GeV) vetoEvent;
if (vetojets.size() != 0) vetoEvent;
if (pT_l1l2 <= 30*GeV) vetoEvent;
_h_Wl1_pT_mumu->fill(sqrtS()*GeV, weight);
_h_Wl1_pT_inclusive->fill(pT_l1, weight);
}
// emu channel
else if (fiducial_lepton[0].abspid() != fiducial_lepton[1].abspid()) {
if (MET_rel <= 25*GeV) vetoEvent;
if (M_l1l2 <= 10*GeV) vetoEvent;
if (vetojets.size() != 0) vetoEvent;
if (pT_l1l2 <= 30*GeV) vetoEvent;
_h_Wl1_pT_emu->fill(sqrtS()*GeV, weight);
_h_Wl1_pT_inclusive->fill(pT_l1, weight);
}
}
/// Finalize
void finalize() {
const double norm = crossSection()/sumOfWeights()/femtobarn;
scale(_h_Wl1_pT_ee, norm);
scale(_h_Wl1_pT_mumu, norm);
scale(_h_Wl1_pT_emu, norm);
normalize(_h_Wl1_pT_inclusive, 1);
}
private:
Histo1DPtr _h_Wl1_pT_ee, _h_Wl1_pT_mumu, _h_Wl1_pT_emu, _h_Wl1_pT_inclusive;
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(ATLAS_2013_I1190187);
}
diff --git a/analyses/pluginATLAS/ATLAS_2017_I1625109.cc b/analyses/pluginATLAS/ATLAS_2017_I1625109.cc
--- a/analyses/pluginATLAS/ATLAS_2017_I1625109.cc
+++ b/analyses/pluginATLAS/ATLAS_2017_I1625109.cc
@@ -1,309 +1,311 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/PromptFinalState.hh"
#include "Rivet/Projections/VetoedFinalState.hh"
#include "Rivet/Projections/DressedLeptons.hh"
#include "Rivet/Projections/FastJets.hh"
namespace Rivet {
-
+
+
class ATLAS_2017_I1625109 : public Analysis {
public:
/// Constructor
/// @brief measurement of on-shell ZZ at 13 TeV
DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2017_I1625109);
/// @name Analysis methods
//@{
struct Dilepton {
Dilepton() {};
Dilepton(const ParticlePair & _leptons) : leptons(_leptons) {}
FourMomentum momentum() const {
return leptons.first.mom() + leptons.second.mom();
}
ParticlePair leptons;
};
-
-
+
+
struct Quadruplet {
-
+
vector<DressedLepton> getLeptonsSortedByPt() const {
- vector<DressedLepton> out = { leadingDilepton.leptons.first, leadingDilepton.leptons.second,
+ vector<DressedLepton> out = { leadingDilepton.leptons.first, leadingDilepton.leptons.second,
subleadingDilepton.leptons.first, subleadingDilepton.leptons.second };
std::sort(out.begin(), out.end(), cmpMomByPt);
return out;
}
-
+
Quadruplet(const Dilepton& dilepton1, const Dilepton& dilepton2) {
if (dilepton1.momentum().pt() > dilepton2.momentum().pt()) {
leadingDilepton = dilepton1;
subleadingDilepton = dilepton2;
}
else {
leadingDilepton = dilepton2;
subleadingDilepton = dilepton1;
}
leptonsSortedByPt = getLeptonsSortedByPt();
}
FourMomentum momentum() const {
return leadingDilepton.momentum() + subleadingDilepton.momentum();
}
-
+
double distanceFromZMass() const {
return abs(leadingDilepton.momentum().mass() - Z_mass) + abs(subleadingDilepton.momentum().mass() - Z_mass);
- }
+ }
Dilepton leadingDilepton;
Dilepton subleadingDilepton;
vector<DressedLepton> leptonsSortedByPt;
};
-
+
typedef vector<Quadruplet> Quadruplets;
-
+
typedef std::pair<size_t, size_t> IndexPair;
-
+
+
vector<IndexPair> getOppositeChargePairsIndices(const vector<DressedLepton>& leptons) {
vector<IndexPair> indices = {};
if (leptons.size() < 2) return indices;
for (size_t i = 0; i < leptons.size(); ++i) {
for (size_t k = i+1; k < leptons.size(); ++k) {
const auto charge_i = leptons.at(i).charge();
const auto charge_k = leptons.at(k).charge();
if (charge_i == -charge_k) {
indices.push_back(std::make_pair(i, k));
}
}
}
return indices;
}
-
+
bool indicesOverlap(IndexPair a, IndexPair b) {
return (a.first == b.first || a.first == b.second || a.second == b.first || a.second == b.second);
}
-
-
+
+
bool passesHierarchicalPtRequirements(const Quadruplet& quadruplet) {
const auto& sorted_leptons = quadruplet.leptonsSortedByPt;
if (sorted_leptons.at(0).pt() < 20*GeV) return false;
if (sorted_leptons.at(1).pt() < 15*GeV) return false;
if (sorted_leptons.at(2).pt() < 10*GeV) return false;
return true;
}
-
+
bool passesDileptonMinimumMassRequirement(const Quadruplet& quadruplet) {
const auto& leptons = quadruplet.leptonsSortedByPt;
- for (const auto& l1 : leptons) {
- for (const auto& l2 : leptons) {
- if (l1 == l2) continue;
- if ((l1.pdgId() + l2.pdgId() == 0) && ((l1.mom() + l2.mom()).mass() < 5.0*GeV)) return false;
+ for (const Particle& l1 : leptons) {
+ for (const Particle& l2 : leptons) {
+ if (isSame(l1, l2)) continue;
+ if ((l1.pid() + l2.pid() == 0) && ((l1.mom() + l2.mom()).mass() < 5.0*GeV)) return false;
}
}
return true;
}
-
+
bool passesLeptonDeltaRRequirements(const Quadruplet& quadruplet) {
const auto& leptons = quadruplet.leptonsSortedByPt;
- for (const auto& l1 : leptons) {
- for (const auto& l2 : leptons) {
- if (l1 == l2) continue;
+ for (const Particle& l1 : leptons) {
+ for (const Particle& l2 : leptons) {
+ if (isSame(l1, l2)) continue;
// Any lepton flavour:
if (deltaR(l1.mom(), l2.mom()) < 0.1) return false;
// Different lepton flavour:
if ((l1.abspid() - l2.abspid() != 0) && (deltaR(l1.mom(), l2.mom()) < 0.2)) return false;
}
}
return true;
}
-
+
Quadruplets formQuadrupletsByChannel(const vector<DressedLepton>& same_flavour_leptons, vector<IndexPair> indices) {
Quadruplets quadruplets = {};
for (size_t i = 0; i < indices.size(); ++i) {
for (size_t k = i+1; k < indices.size(); ++k) {
const auto& pair_i = indices.at(i);
const auto& pair_k = indices.at(k);
if (indicesOverlap(pair_i, pair_k)) continue;
const auto d1 = Dilepton({same_flavour_leptons.at(pair_i.first), same_flavour_leptons.at(pair_i.second)});
const auto d2 = Dilepton({same_flavour_leptons.at(pair_k.first), same_flavour_leptons.at(pair_k.second)});
const auto quadruplet = Quadruplet(d1, d2);
if (passesHierarchicalPtRequirements(quadruplet)) quadruplets.push_back(quadruplet);
}
}
return quadruplets;
}
-
- Quadruplets formQuadrupletsByChannel(const vector<DressedLepton>& electrons, vector<IndexPair> e_indices,
+
+ Quadruplets formQuadrupletsByChannel(const vector<DressedLepton>& electrons, vector<IndexPair> e_indices,
const vector<DressedLepton>& muons, vector<IndexPair> m_indices) {
Quadruplets quadruplets = {};
for (const auto& pair_e : e_indices) {
for (const auto& pair_m : m_indices) {
const auto d1 = Dilepton({electrons.at(pair_e.first), electrons.at(pair_e.second)});
const auto d2 = Dilepton({muons.at(pair_m.first), muons.at(pair_m.second)});
const auto quadruplet = Quadruplet(d1, d2);
if (passesHierarchicalPtRequirements(quadruplet)) quadruplets.push_back(quadruplet);
}
}
return quadruplets;
}
-
-
+
+
Quadruplets getQuadruplets(const vector<DressedLepton>& electrons, const vector<DressedLepton>& muons) {
const auto oc_electrons_indices = getOppositeChargePairsIndices(electrons);
const auto oc_muons_indices = getOppositeChargePairsIndices(muons);
const auto electron_quadruplets = formQuadrupletsByChannel(electrons, oc_electrons_indices);
const auto muon_quadruplets = formQuadrupletsByChannel(muons, oc_muons_indices);
const auto mixed_quadruplets = formQuadrupletsByChannel(electrons, oc_electrons_indices, muons, oc_muons_indices);
auto quadruplets = electron_quadruplets;
quadruplets.insert(quadruplets.end(), muon_quadruplets.begin(), muon_quadruplets.end());
quadruplets.insert(quadruplets.end(), mixed_quadruplets.begin(), mixed_quadruplets.end());
-
+
return quadruplets;
}
-
+
Quadruplet selectQuadruplet(const Quadruplets& quadruplets) {
if (quadruplets.empty()) throw std::logic_error("Expect at least one quadruplet! The user should veto events without quadruplets.");
Quadruplets sortedQuadruplets = quadruplets;
std::sort(sortedQuadruplets.begin(), sortedQuadruplets.end(), [](const Quadruplet& a, const Quadruplet& b) {
return a.distanceFromZMass() < b.distanceFromZMass();
});
return sortedQuadruplets.at(0);
}
/// Book histograms and initialise projections before the run
void init() {
const Cut presel = Cuts::abseta < 5 && Cuts::pT > 100*MeV;
const FinalState fs(presel);
-
+
// Prompt leptons, photons, neutrinos
// Excluding those from tau decay
const PromptFinalState photons(presel && Cuts::abspid == PID::PHOTON, false);
const PromptFinalState bare_elecs(presel && Cuts::abspid == PID::ELECTRON, false);
const PromptFinalState bare_muons(presel && Cuts::abspid == PID::MUON, false);
// Baseline lepton and jet declaration
const Cut lepton_baseline_cuts = Cuts::abseta < 2.7 && Cuts::pT > 5*GeV;
const DressedLeptons elecs = DressedLeptons(photons, bare_elecs, 0.1, lepton_baseline_cuts);
const DressedLeptons muons = DressedLeptons(photons, bare_muons, 0.1, lepton_baseline_cuts);
declare(elecs, "electrons");
declare(muons, "muons");
-
+
VetoedFinalState jet_input(fs);
jet_input.addVetoOnThisFinalState(elecs);
jet_input.addVetoOnThisFinalState(muons);
declare(FastJets(jet_input, FastJets::ANTIKT, 0.4), "jets");
// // Book histograms
_h["pT_4l"] = bookHisto1D(2, 1, 1);
_h["pT_leading_dilepton"] = bookHisto1D(8, 1, 1);
_h["pT_subleading_dilepton"] = bookHisto1D(14, 1, 1);
_h["pT_lepton1"] = bookHisto1D(20, 1, 1);
_h["pT_lepton2"] = bookHisto1D(26, 1, 1);
_h["pT_lepton3"] = bookHisto1D(32, 1, 1);
_h["pT_lepton4"] = bookHisto1D(38, 1, 1);
_h["absy_4l"] = bookHisto1D(44, 1, 1);
_h["deltay_dileptons"] = bookHisto1D(50, 1, 1);
_h["deltaphi_dileptons"] = bookHisto1D(56, 1, 1);
_h["N_jets"] = bookHisto1D(62, 1, 1);
_h["N_central_jets"] = bookHisto1D(68, 1, 1);
_h["N_jets60"] = bookHisto1D(74, 1, 1);
_h["mass_dijet"] = bookHisto1D(80, 1, 1);
_h["deltay_dijet"] = bookHisto1D(86, 1, 1);
_h["scalarpTsum_jets"] = bookHisto1D(92, 1, 1);
_h["abseta_jet1"] = bookHisto1D(98, 1, 1);
_h["abseta_jet2"] = bookHisto1D(104, 1, 1);
_h["pT_jet1"] = bookHisto1D(110, 1, 1);
_h["pT_jet2"] = bookHisto1D(116, 1, 1);
}
/// Perform the per-event analysis
void analyze(Event const & event) {
const double weight = event.weight();
-
+
const auto& baseline_electrons = apply<DressedLeptons>(event, "electrons").dressedLeptons();
const auto& baseline_muons = apply<DressedLeptons>(event, "muons").dressedLeptons();
-
+
// Form all possible quadruplets passing hierarchical lepton pT cuts
const auto quadruplets = getQuadruplets(baseline_electrons, baseline_muons);
-
+
if (quadruplets.empty()) vetoEvent;
-
+
// Select the best quadruplet, the one minimising the total distance from the Z pole mass
auto const quadruplet = selectQuadruplet(quadruplets);
-
+
// Event selection on the best quadruplet
if (!passesDileptonMinimumMassRequirement(quadruplet)) vetoEvent;
if (!passesLeptonDeltaRRequirements(quadruplet)) vetoEvent;
if (!inRange(quadruplet.leadingDilepton.momentum().mass(), 66*GeV, 116*GeV)) vetoEvent;
if (!inRange(quadruplet.subleadingDilepton.momentum().mass(), 66*GeV, 116*GeV)) vetoEvent;
-
+
// Select jets
Jets alljets = apply<JetAlg>(event, "jets").jetsByPt(Cuts::pT > 30*GeV);
for (const DressedLepton& lep : quadruplet.leptonsSortedByPt)
ifilter_discard(alljets, deltaRLess(lep, 0.4));
const Jets jets = alljets;
const Jets centralJets = filterBy(jets, Cuts::abseta < 2.4);
const Jets pt60Jets = filterBy(jets, Cuts::pT > 60*GeV);
-
+
const auto& leadingDilepton = quadruplet.leadingDilepton.momentum();
const auto& subleadingDilepton = quadruplet.subleadingDilepton.momentum();
-
+
_h["pT_4l"]->fill((leadingDilepton + subleadingDilepton).pt()/GeV, weight);
_h["pT_leading_dilepton"]->fill(leadingDilepton.pt()/GeV, weight);
_h["pT_subleading_dilepton"]->fill(subleadingDilepton.pt()/GeV, weight);
_h["pT_lepton1"]->fill(quadruplet.leptonsSortedByPt.at(0).pt()/GeV, weight);
_h["pT_lepton2"]->fill(quadruplet.leptonsSortedByPt.at(1).pt()/GeV, weight);
_h["pT_lepton3"]->fill(quadruplet.leptonsSortedByPt.at(2).pt()/GeV, weight);
_h["pT_lepton4"]->fill(quadruplet.leptonsSortedByPt.at(3).pt()/GeV, weight);
_h["absy_4l"]->fill((leadingDilepton + subleadingDilepton).absrapidity(), weight);
_h["deltay_dileptons"]->fill(fabs(leadingDilepton.rapidity() - subleadingDilepton.rapidity()), weight);
_h["deltaphi_dileptons"]->fill(deltaPhi(leadingDilepton, subleadingDilepton)/pi, weight);
_h["N_jets"]->fill(jets.size(), weight);
_h["N_central_jets"]->fill(centralJets.size(), weight);
_h["N_jets60"]->fill(pt60Jets.size(), weight);
-
+
// If at least one jet present
if (jets.empty()) vetoEvent;
_h["scalarpTsum_jets"]->fill(sum(jets, pT, 0.)/GeV, weight);
_h["abseta_jet1"]->fill(jets.front().abseta(), weight);
_h["pT_jet1"]->fill(jets.front().pt()/GeV, weight);
-
+
// If at least two jets present
- if (jets.size() < 2) vetoEvent;
+ if (jets.size() < 2) vetoEvent;
_h["mass_dijet"]->fill((jets.at(0).mom() + jets.at(1).mom()).mass()/GeV, weight);
_h["deltay_dijet"]->fill(fabs(jets.at(0).rapidity() - jets.at(1).rapidity()), weight);
_h["abseta_jet2"]->fill(jets.at(1).abseta(), weight);
_h["pT_jet2"]->fill(jets.at(1).pt()/GeV, weight);
}
/// Normalise histograms etc., after the run
void finalize() {
// Normalise histograms to cross section
const double sf = crossSectionPerEvent() / femtobarn;
for (map<string, Histo1DPtr>::iterator it = _h.begin(); it != _h.end(); ++it) {
scale(it->second, sf);
}
}
//@}
-
+
private:
/// @name Histograms
//@{
map<string, Histo1DPtr> _h;
static constexpr double Z_mass = 91.1876;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(ATLAS_2017_I1625109);
}
diff --git a/analyses/pluginATLAS/ATLAS_2017_I1626105.cc b/analyses/pluginATLAS/ATLAS_2017_I1626105.cc
--- a/analyses/pluginATLAS/ATLAS_2017_I1626105.cc
+++ b/analyses/pluginATLAS/ATLAS_2017_I1626105.cc
@@ -1,130 +1,129 @@
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/VetoedFinalState.hh"
#include "Rivet/Projections/IdentifiedFinalState.hh"
#include "Rivet/Projections/PromptFinalState.hh"
#include "Rivet/Projections/DressedLeptons.hh"
#include "Rivet/Projections/FastJets.hh"
namespace Rivet {
- /// @brief lepton differential ttbar analysis at 8 TeV
+
+ /// Lepton differential ttbar analysis at 8 TeV
class ATLAS_2017_I1626105 : public Analysis {
public:
-
+
+
DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2017_I1626105);
-
+
+
void init() {
Cut eta_full = Cuts::abseta < 5.0 && Cuts::pT > 1.0*MeV;
// All final state particles
const FinalState fs;
// Get photons to dress leptons
IdentifiedFinalState photons(fs);
photons.acceptIdPair(PID::PHOTON);
// Projection to find the electrons
PromptFinalState prompt_el(Cuts::abspid == PID::ELECTRON, true);
DressedLeptons elecs(photons, prompt_el, 0.1, (Cuts::abseta < 2.5) && (Cuts::pT > 25*GeV));
DressedLeptons veto_elecs(photons, prompt_el, 0.1, eta_full, false);
declare(elecs, "elecs");
// Projection to find the muons
PromptFinalState prompt_mu(Cuts::abspid == PID::MUON, true);
DressedLeptons muons(photons, prompt_mu, 0.1, (Cuts::abseta < 2.5) && (Cuts::pT > 25*GeV));
DressedLeptons veto_muons(photons, prompt_mu, 0.1, eta_full, false);
declare(muons, "muons");
// Jet clustering.
VetoedFinalState vfs;
vfs.addVetoOnThisFinalState(veto_elecs);
vfs.addVetoOnThisFinalState(veto_muons);
FastJets jets(vfs, FastJets::ANTIKT, 0.4);
jets.useInvisibles();
declare(jets, "jets");
// Book histograms
bookHistos("lep_pt", 1);
bookHistos("lep_eta", 3);
bookHistos("dilep_pt", 5);
bookHistos("dilep_mass", 7);
bookHistos("dilep_rap", 9);
bookHistos("dilep_dphi", 11);
bookHistos("dilep_sumpt", 13);
bookHistos("dilep_sumE", 15);
}
+
void analyze(const Event& event) {
vector<DressedLepton> elecs = sortByPt(apply<DressedLeptons>(event, "elecs").dressedLeptons());
- vector<DressedLepton> muons = sortByPt(apply<DressedLeptons>(event, "muons").dressedLeptons());
+ vector<DressedLepton> muons = sortByPt(apply<DressedLeptons>(event, "muons").dressedLeptons());
Jets jets = apply<FastJets>(event, "jets").jetsByPt(Cuts::pT > 25*GeV && Cuts::abseta < 2.5);
-
+
// Check overlap of jets/leptons.
for (const Jet& jet : jets) {
- for (const DressedLepton& el : elecs) {
- if (deltaR(jet, el) < 0.4) delete el;
- }
- for (const DressedLepton& mu : muons) {
- if (deltaR(jet, mu) < 0.4) delete mu;
- }
+ ifilter_discard(elecs, deltaRLess(jet, 0.4));
+ ifilter_discard(muons, deltaRLess(jet, 0.4));
}
- if (elecs.empty() || muons.empty()) vetoEvent;
-
- if (elecs[0].charge() == muons[0].charge()) vetoEvent;
-
+ if (elecs.empty() || muons.empty()) vetoEvent;
+ if (elecs[0].charge() == muons[0].charge()) vetoEvent;
+
FourMomentum el = elecs[0].momentum();
FourMomentum mu = muons[0].momentum();
FourMomentum ll = elecs[0].momentum() + muons[0].momentum();
-
+
// Fill histograms
const double weight = event.weight();
fillHistos("lep_pt", el.pT()/GeV, weight);
fillHistos("lep_pt", mu.pT()/GeV, weight);
fillHistos("lep_eta", el.abseta(), weight);
fillHistos("lep_eta", mu.abseta(), weight);
fillHistos("dilep_pt", ll.pT()/GeV, weight);
fillHistos("dilep_mass", ll.mass()/GeV, weight);
fillHistos("dilep_rap", ll.absrap(), weight);
fillHistos("dilep_dphi", deltaPhi(el, mu), weight);
fillHistos("dilep_sumpt", (el.pT() + mu.pT())/GeV, weight);
fillHistos("dilep_sumE", (el.E() + mu.E())/GeV, weight);
}
void finalize() {
// Normalize to cross-section
const double sf = crossSection()/femtobarn/sumOfWeights();
for (auto& hist : _h) {
const double norm = 1.0 / hist.second->integral();
- // add overflow to last bin
+ // add overflow to last bin
double overflow = hist.second->overflow().effNumEntries();
hist.second->fillBin(hist.second->numBins() - 1, overflow);
// histogram normalisation
if (hist.first.find("norm") != string::npos) scale(hist.second, norm);
else scale(hist.second, sf);
}
}
private:
/// @name Histogram helper functions
//@{
void bookHistos(const std::string name, unsigned int index) {
_h[name] = bookHisto1D(index, 1, 1);
_h["norm_" + name] = bookHisto1D(index + 1, 1, 1);
}
void fillHistos(const std::string name, double value, double weight) {
_h[name]->fill(value, weight);
_h["norm_" + name]->fill(value, weight);
}
map<string, Histo1DPtr> _h;
//@}
};
// Declare the class as a hook for the plugin system
DECLARE_RIVET_PLUGIN(ATLAS_2017_I1626105);
}
diff --git a/analyses/pluginCMS/CMS_2016_I1491950.cc b/analyses/pluginCMS/CMS_2016_I1491950.cc
--- a/analyses/pluginCMS/CMS_2016_I1491950.cc
+++ b/analyses/pluginCMS/CMS_2016_I1491950.cc
@@ -1,505 +1,503 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Tools/Logging.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/FastJets.hh"
#include "Rivet/Projections/DressedLeptons.hh"
#include "Rivet/Projections/PromptFinalState.hh"
#include "Rivet/Projections/IdentifiedFinalState.hh"
#include "Rivet/Projections/VetoedFinalState.hh"
#include "Rivet/Tools/ParticleName.hh"
#include "Rivet/Tools/ParticleIdUtils.hh"
-namespace Rivet
-{
- namespace CMS_2016_I1491950_INTERNAL { //< only visible in this compilation unit
+namespace Rivet {
+
+
+ namespace { //< only visible in this compilation unit
/// @brief Special dressed lepton finder
///
/// Find dressed leptons by clustering all leptons and photons
class SpecialDressedLeptons : public FinalState {
public:
/// The default constructor. May specify cuts
SpecialDressedLeptons(const FinalState& fs, const Cut& cut)
: FinalState(cut)
{
setName("SpecialDressedLeptons");
IdentifiedFinalState ifs(fs);
ifs.acceptIdPair(PID::PHOTON);
ifs.acceptIdPair(PID::ELECTRON);
ifs.acceptIdPair(PID::MUON);
addProjection(ifs, "IFS");
addProjection(FastJets(ifs, FastJets::ANTIKT, 0.1), "LeptonJets");
}
-
+
/// Clone on the heap.
virtual unique_ptr<Projection> clone() const {
return unique_ptr<Projection>(new SpecialDressedLeptons(*this));
}
-
+
/// Retrieve the dressed leptons
const vector<DressedLepton>& dressedLeptons() const { return _clusteredLeptons; }
-
+
private:
/// Container which stores the clustered lepton objects
vector<DressedLepton> _clusteredLeptons;
-
+
public:
void project(const Event& e) {
_theParticles.clear();
_clusteredLeptons.clear();
-
+
vector<DressedLepton> allClusteredLeptons;
-
+
const Jets jets = applyProjection<FastJets>(e, "LeptonJets").jetsByPt(5.*GeV);
foreach (const Jet& jet, jets) {
Particle lepCand;
for (const Particle& cand : jet.particles()) {
const int absPdgId = abs(cand.pdgId());
if (absPdgId == PID::ELECTRON || absPdgId == PID::MUON) {
if (cand.pt() > lepCand.pt()) lepCand = cand;
}
}
//Central lepton must be the major component
if ((lepCand.pt() < jet.pt()/2.) || (lepCand.pdgId() == 0)) continue;
-
- DressedLepton lepton = DressedLepton(lepCand);
-
+
+ DressedLepton lepton(lepCand);
for (const Particle& cand : jet.particles()) {
- if (cand == lepCand) continue;
- if (cand.pid() != PID::PHOTON) continue;
+ if (isSame(cand, lepCand)) continue;
+ if (cand.pid() != PID::PHOTON) continue;
lepton.addPhoton(cand, true);
}
allClusteredLeptons.push_back(lepton);
}
-
+
for (const DressedLepton& lepton : allClusteredLeptons) {
if (accept(lepton)) {
_clusteredLeptons.push_back(lepton);
_theParticles.push_back(lepton.constituentLepton());
_theParticles += lepton.constituentPhotons();
}
}
}
};
}
- class CMS_2016_I1491950 : public Analysis
- {
- public:
- typedef CMS_2016_I1491950_INTERNAL::SpecialDressedLeptons SpecialDressedLeptons;
+ class CMS_2016_I1491950 : public Analysis {
+ public:
+
/// Constructor
CMS_2016_I1491950()
: Analysis("CMS_2016_I1491950")
{ }
/// Book histograms and initialise projections before the run
void init()
{
FinalState fs(Cuts::pT > 0. && Cuts::abseta < 6.);
PromptFinalState prompt_fs(fs);
prompt_fs.acceptMuonDecays(true);
prompt_fs.acceptTauDecays(true);
-
+
// Projection for dressed electrons and muons
Cut leptonCuts = Cuts::abseta < 2.5 and Cuts::pt > 30.*GeV;
SpecialDressedLeptons dressedleptons(prompt_fs, leptonCuts);
addProjection(dressedleptons, "DressedLeptons");
-
+
// Neutrinos
IdentifiedFinalState neutrinos(prompt_fs);
neutrinos.acceptNeutrinos();
addProjection(neutrinos, "Neutrinos");
-
+
// Projection for jets
VetoedFinalState fsForJets(fs);
fsForJets.addVetoOnThisFinalState(dressedleptons);
fsForJets.addVetoOnThisFinalState(neutrinos);
addProjection(FastJets(fsForJets, FastJets::ANTIKT, 0.4, JetAlg::DECAY_MUONS, JetAlg::DECAY_INVISIBLES), "Jets");
-
+
//book hists
_hist_thadpt = bookHisto1D("d01-x02-y01");
_hist_thady = bookHisto1D("d03-x02-y01");
_hist_tleppt = bookHisto1D("d05-x02-y01");
_hist_tlepy = bookHisto1D("d07-x02-y01");
_hist_ttpt = bookHisto1D("d09-x02-y01");
_hist_tty = bookHisto1D("d13-x02-y01");
_hist_ttm = bookHisto1D("d11-x02-y01");
_hist_njet = bookHisto1D("d15-x02-y01");
_hist_njets_thadpt_1 = bookHisto1D("d17-x02-y01");
_hist_njets_thadpt_2 = bookHisto1D("d18-x02-y01");
_hist_njets_thadpt_3 = bookHisto1D("d19-x02-y01");
_hist_njets_thadpt_4 = bookHisto1D("d20-x02-y01");
_hist_njets_ttpt_1 = bookHisto1D("d22-x02-y01");
_hist_njets_ttpt_2 = bookHisto1D("d23-x02-y01");
_hist_njets_ttpt_3 = bookHisto1D("d24-x02-y01");
_hist_njets_ttpt_4 = bookHisto1D("d25-x02-y01");
_hist_thady_thadpt_1 = bookHisto1D("d27-x02-y01");
_hist_thady_thadpt_2 = bookHisto1D("d28-x02-y01");
_hist_thady_thadpt_3 = bookHisto1D("d29-x02-y01");
_hist_thady_thadpt_4 = bookHisto1D("d30-x02-y01");
_hist_ttm_tty_1 = bookHisto1D("d32-x02-y01");
_hist_ttm_tty_2 = bookHisto1D("d33-x02-y01");
_hist_ttm_tty_3 = bookHisto1D("d34-x02-y01");
_hist_ttm_tty_4 = bookHisto1D("d35-x02-y01");
_hist_ttpt_ttm_1 = bookHisto1D("d37-x02-y01");
_hist_ttpt_ttm_2 = bookHisto1D("d38-x02-y01");
_hist_ttpt_ttm_3 = bookHisto1D("d39-x02-y01");
_hist_ttpt_ttm_4 = bookHisto1D("d40-x02-y01");
_histnorm_thadpt = bookHisto1D("d42-x02-y01");
_histnorm_thady = bookHisto1D("d44-x02-y01");
_histnorm_tleppt = bookHisto1D("d46-x02-y01");
_histnorm_tlepy = bookHisto1D("d48-x02-y01");
_histnorm_ttpt = bookHisto1D("d50-x02-y01");
_histnorm_tty = bookHisto1D("d54-x02-y01");
_histnorm_ttm = bookHisto1D("d52-x02-y01");
_histnorm_njet = bookHisto1D("d56-x02-y01");
_histnorm_njets_thadpt_1 = bookHisto1D("d58-x02-y01");
_histnorm_njets_thadpt_2 = bookHisto1D("d59-x02-y01");
_histnorm_njets_thadpt_3 = bookHisto1D("d60-x02-y01");
_histnorm_njets_thadpt_4 = bookHisto1D("d61-x02-y01");
_histnorm_njets_ttpt_1 = bookHisto1D("d63-x02-y01");
_histnorm_njets_ttpt_2 = bookHisto1D("d64-x02-y01");
_histnorm_njets_ttpt_3 = bookHisto1D("d65-x02-y01");
_histnorm_njets_ttpt_4 = bookHisto1D("d66-x02-y01");
_histnorm_thady_thadpt_1 = bookHisto1D("d68-x02-y01");
_histnorm_thady_thadpt_2 = bookHisto1D("d69-x02-y01");
_histnorm_thady_thadpt_3 = bookHisto1D("d70-x02-y01");
_histnorm_thady_thadpt_4 = bookHisto1D("d71-x02-y01");
_histnorm_ttm_tty_1 = bookHisto1D("d73-x02-y01");
_histnorm_ttm_tty_2 = bookHisto1D("d74-x02-y01");
_histnorm_ttm_tty_3 = bookHisto1D("d75-x02-y01");
_histnorm_ttm_tty_4 = bookHisto1D("d76-x02-y01");
_histnorm_ttpt_ttm_1 = bookHisto1D("d78-x02-y01");
_histnorm_ttpt_ttm_2 = bookHisto1D("d79-x02-y01");
_histnorm_ttpt_ttm_3 = bookHisto1D("d80-x02-y01");
_histnorm_ttpt_ttm_4 = bookHisto1D("d81-x02-y01");
}
/// Perform the per-event analysis
void analyze(const Event& event)
{
const double weight = event.weight();
-
+
// leptons
const SpecialDressedLeptons& dressedleptons_proj = applyProjection<SpecialDressedLeptons>(event, "DressedLeptons");
std::vector<DressedLepton> dressedLeptons = dressedleptons_proj.dressedLeptons();
-
+
if(dressedLeptons.size() != 1) return;
-
+
// neutrinos
const Particles neutrinos = applyProjection<FinalState>(event, "Neutrinos").particlesByPt();
_nusum = FourMomentum(0., 0., 0., 0.);
for(const Particle& neutrino : neutrinos)
{
_nusum += neutrino.momentum();
}
_wl = _nusum + dressedLeptons[0].momentum();
-
+
// jets
Cut jet_cut = (Cuts::abseta < 2.5) and (Cuts::pT > 25.*GeV);
const Jets jets = applyProjection<FastJets>(event, "Jets").jetsByPt(jet_cut);
Jets allJets;
for (const Jet& jet : jets) {
allJets.push_back(jet);
- }
+ }
Jets bJets;
for (const Jet& jet : allJets) {
if (jet.bTagged()) bJets.push_back(jet);
}
if(bJets.size() < 2 || allJets.size() < 4) return;
-
+
//construct top quark proxies
double Kmin = numeric_limits<double>::max();
for(const Jet& itaj : allJets)
{
for(const Jet& itbj : allJets)
{
if (itaj.momentum() == itbj.momentum()) continue;
FourMomentum wh(itaj.momentum() + itbj.momentum());
for(const Jet& ithbj : bJets)
{
if(itaj.momentum() == ithbj.momentum() || itbj.momentum() == ithbj.momentum()) continue;
FourMomentum th(wh + ithbj.momentum());
for(const Jet& itlbj : bJets)
{
if(itaj.momentum() == itlbj.momentum() || itbj.momentum() == itlbj.momentum() || ithbj.momentum() == itlbj.momentum()) continue;
FourMomentum tl(_wl + itlbj.momentum());
double K = pow(wh.mass() - 80.4, 2) + pow(th.mass() - 172.5, 2) + pow(tl.mass() - 172.5, 2);
if(K < Kmin)
{
Kmin = K;
_tl = tl;
_th = th;
_wh = wh;
}
}
}
}
}
- _hist_thadpt->fill(_th.pt(), weight);
+ _hist_thadpt->fill(_th.pt(), weight);
_hist_thady->fill(abs(_th.rapidity()) , weight);
_hist_tleppt->fill(_tl.pt() , weight);
_hist_tlepy->fill(abs(_tl.rapidity()) , weight);
- _histnorm_thadpt->fill(_th.pt(), weight);
+ _histnorm_thadpt->fill(_th.pt(), weight);
_histnorm_thady->fill(abs(_th.rapidity()) , weight);
_histnorm_tleppt->fill(_tl.pt() , weight);
_histnorm_tlepy->fill(abs(_tl.rapidity()) , weight);
FourMomentum tt(_tl+_th);
_hist_ttpt->fill(tt.pt() , weight);
_hist_tty->fill(abs(tt.rapidity()) , weight);
_hist_ttm->fill(tt.mass() , weight);
_hist_njet->fill(min(allJets.size()-4., 4.), weight);
_histnorm_ttpt->fill(tt.pt() , weight);
_histnorm_tty->fill(abs(tt.rapidity()) , weight);
_histnorm_ttm->fill(tt.mass() , weight);
_histnorm_njet->fill(min(allJets.size()-4., 4.), weight);
if(allJets.size() == 4)
{
_hist_njets_thadpt_1->fill(_th.pt(), weight);
_hist_njets_ttpt_1->fill(tt.pt(), weight);
_histnorm_njets_thadpt_1->fill(_th.pt(), weight);
_histnorm_njets_ttpt_1->fill(tt.pt(), weight);
}
else if(allJets.size() == 5)
{
_hist_njets_thadpt_2->fill(_th.pt(), weight);
_hist_njets_ttpt_2->fill(tt.pt(), weight);
_histnorm_njets_thadpt_2->fill(_th.pt(), weight);
_histnorm_njets_ttpt_2->fill(tt.pt(), weight);
}
else if(allJets.size() == 6)
{
_hist_njets_thadpt_3->fill(_th.pt(), weight);
_hist_njets_ttpt_3->fill(tt.pt(), weight);
_histnorm_njets_thadpt_3->fill(_th.pt(), weight);
_histnorm_njets_ttpt_3->fill(tt.pt(), weight);
}
else //>= 4 jets
{
_hist_njets_thadpt_4->fill(_th.pt(), weight);
_hist_njets_ttpt_4->fill(tt.pt(), weight);
_histnorm_njets_thadpt_4->fill(_th.pt(), weight);
_histnorm_njets_ttpt_4->fill(tt.pt(), weight);
}
if(abs(_th.rapidity()) < 0.5)
{
_hist_thady_thadpt_1->fill(_th.pt(), weight);
_histnorm_thady_thadpt_1->fill(_th.pt(), weight);
}
else if(abs(_th.rapidity()) < 1.0)
{
_hist_thady_thadpt_2->fill(_th.pt(), weight);
_histnorm_thady_thadpt_2->fill(_th.pt(), weight);
}
else if(abs(_th.rapidity()) < 1.5)
{
_hist_thady_thadpt_3->fill(_th.pt(), weight);
_histnorm_thady_thadpt_3->fill(_th.pt(), weight);
}
else if(abs(_th.rapidity()) < 2.5)
{
_hist_thady_thadpt_4->fill(_th.pt(), weight);
_histnorm_thady_thadpt_4->fill(_th.pt(), weight);
}
if(tt.mass() >= 300. && tt.mass() < 450.)
{
_hist_ttm_tty_1->fill(abs(tt.rapidity()), weight);
_histnorm_ttm_tty_1->fill(abs(tt.rapidity()), weight);
}
else if(tt.mass() >= 450. && tt.mass() < 625.)
{
_hist_ttm_tty_2->fill(abs(tt.rapidity()), weight);
_histnorm_ttm_tty_2->fill(abs(tt.rapidity()), weight);
}
else if(tt.mass() >= 625. && tt.mass() < 850.)
{
_hist_ttm_tty_3->fill(abs(tt.rapidity()), weight);
_histnorm_ttm_tty_3->fill(abs(tt.rapidity()), weight);
}
else if(tt.mass() >= 850. && tt.mass() < 2000.)
{
_hist_ttm_tty_4->fill(abs(tt.rapidity()), weight);
_histnorm_ttm_tty_4->fill(abs(tt.rapidity()), weight);
}
if(tt.pt() < 35.)
{
_hist_ttpt_ttm_1->fill(tt.mass(), weight);
_histnorm_ttpt_ttm_1->fill(tt.mass(), weight);
}
else if(tt.pt() < 80.)
{
_hist_ttpt_ttm_2->fill(tt.mass(), weight);
_histnorm_ttpt_ttm_2->fill(tt.mass(), weight);
}
else if(tt.pt() < 140.)
{
_hist_ttpt_ttm_3->fill(tt.mass(), weight);
_histnorm_ttpt_ttm_3->fill(tt.mass(), weight);
}
else if(tt.pt() < 500.)
{
_hist_ttpt_ttm_4->fill(tt.mass(), weight);
_histnorm_ttpt_ttm_4->fill(tt.mass(), weight);
}
}
/// Normalise histograms etc., after the run
void finalize()
{
scale(_hist_thadpt, crossSection()/sumOfWeights());
scale(_hist_thady, crossSection()/sumOfWeights());
scale(_hist_tleppt, crossSection()/sumOfWeights());
scale(_hist_tlepy, crossSection()/sumOfWeights());
scale(_hist_ttpt, crossSection()/sumOfWeights());
scale(_hist_tty, crossSection()/sumOfWeights());
scale(_hist_ttm, crossSection()/sumOfWeights());
scale(_hist_njet, crossSection()/sumOfWeights());
scale(_hist_njets_thadpt_1, crossSection()/sumOfWeights());
scale(_hist_njets_thadpt_2, crossSection()/sumOfWeights());
scale(_hist_njets_thadpt_3, crossSection()/sumOfWeights());
scale(_hist_njets_thadpt_4, crossSection()/sumOfWeights());
scale(_hist_njets_ttpt_1, crossSection()/sumOfWeights());
scale(_hist_njets_ttpt_2, crossSection()/sumOfWeights());
scale(_hist_njets_ttpt_3, crossSection()/sumOfWeights());
scale(_hist_njets_ttpt_4, crossSection()/sumOfWeights());
scale(_hist_thady_thadpt_1, crossSection()/sumOfWeights()/0.5);
scale(_hist_thady_thadpt_2, crossSection()/sumOfWeights()/0.5);
scale(_hist_thady_thadpt_3, crossSection()/sumOfWeights()/0.5);
scale(_hist_thady_thadpt_4, crossSection()/sumOfWeights()/1.0);
scale(_hist_ttm_tty_1, crossSection()/sumOfWeights()/150.);
scale(_hist_ttm_tty_2, crossSection()/sumOfWeights()/175.);
scale(_hist_ttm_tty_3, crossSection()/sumOfWeights()/225.);
scale(_hist_ttm_tty_4, crossSection()/sumOfWeights()/1150.);
scale(_hist_ttpt_ttm_1, crossSection()/sumOfWeights()/35.);
scale(_hist_ttpt_ttm_2, crossSection()/sumOfWeights()/45.);
scale(_hist_ttpt_ttm_3, crossSection()/sumOfWeights()/60.);
scale(_hist_ttpt_ttm_4, crossSection()/sumOfWeights()/360.);
scale(_histnorm_thadpt, 1./_histnorm_thadpt->sumW(false));
scale(_histnorm_thady, 1./_histnorm_thady->sumW(false));
scale(_histnorm_tleppt, 1./_histnorm_tleppt->sumW(false));
scale(_histnorm_tlepy, 1./_histnorm_tlepy->sumW(false));
scale(_histnorm_ttpt, 1./_histnorm_ttpt->sumW(false));
scale(_histnorm_tty, 1./_histnorm_tty->sumW(false));
scale(_histnorm_ttm, 1./_histnorm_ttm->sumW(false));
scale(_histnorm_njet, 1./_histnorm_njet->sumW(false));
double sum_njets_thadpt = _histnorm_njets_thadpt_1->sumW(false) + _histnorm_njets_thadpt_2->sumW(false) + _histnorm_njets_thadpt_3->sumW(false) + _histnorm_njets_thadpt_4->sumW(false);
scale(_histnorm_njets_thadpt_1, 1./sum_njets_thadpt);
scale(_histnorm_njets_thadpt_2, 1./sum_njets_thadpt);
scale(_histnorm_njets_thadpt_3, 1./sum_njets_thadpt);
scale(_histnorm_njets_thadpt_4, 1./sum_njets_thadpt);
double sum_njets_ttpt = _histnorm_njets_ttpt_1->sumW(false) + _histnorm_njets_ttpt_2->sumW(false) + _histnorm_njets_ttpt_3->sumW(false) + _histnorm_njets_ttpt_4->sumW(false);
scale(_histnorm_njets_ttpt_1, 1./sum_njets_ttpt);
scale(_histnorm_njets_ttpt_2, 1./sum_njets_ttpt);
scale(_histnorm_njets_ttpt_3, 1./sum_njets_ttpt);
scale(_histnorm_njets_ttpt_4, 1./sum_njets_ttpt);
double sum_thady_thadpt = _histnorm_thady_thadpt_1->sumW(false) + _histnorm_thady_thadpt_2->sumW(false) + _histnorm_thady_thadpt_3->sumW(false) + _histnorm_thady_thadpt_4->sumW(false);
scale(_histnorm_thady_thadpt_1, 1./sum_thady_thadpt/0.5);
scale(_histnorm_thady_thadpt_2, 1./sum_thady_thadpt/0.5);
scale(_histnorm_thady_thadpt_3, 1./sum_thady_thadpt/0.5);
scale(_histnorm_thady_thadpt_4, 1./sum_thady_thadpt/1.0);
double sum_ttm_tty = _histnorm_ttm_tty_1->sumW(false) + _histnorm_ttm_tty_2->sumW(false) + _histnorm_ttm_tty_3->sumW(false) + _histnorm_ttm_tty_4->sumW(false);
scale(_histnorm_ttm_tty_1, 1./sum_ttm_tty/150.);
scale(_histnorm_ttm_tty_2, 1./sum_ttm_tty/175.);
scale(_histnorm_ttm_tty_3, 1./sum_ttm_tty/225.);
scale(_histnorm_ttm_tty_4, 1./sum_ttm_tty/1150.);
double sum_ttpt_ttm = _histnorm_ttpt_ttm_1->sumW(false) + _histnorm_ttpt_ttm_2->sumW(false) + _histnorm_ttpt_ttm_3->sumW(false) + _histnorm_ttpt_ttm_4->sumW(false);
scale(_histnorm_ttpt_ttm_1, 1./sum_ttpt_ttm/35.);
scale(_histnorm_ttpt_ttm_2, 1./sum_ttpt_ttm/45.);
scale(_histnorm_ttpt_ttm_3, 1./sum_ttpt_ttm/60.);
scale(_histnorm_ttpt_ttm_4, 1./sum_ttpt_ttm/360.);
}
private:
FourMomentum _tl;
FourMomentum _th;
FourMomentum _wl;
FourMomentum _wh;
FourMomentum _nusum;
Histo1DPtr _hist_thadpt;
Histo1DPtr _hist_thady;
Histo1DPtr _hist_tleppt;
Histo1DPtr _hist_tlepy;
Histo1DPtr _hist_ttpt;
Histo1DPtr _hist_tty;
Histo1DPtr _hist_ttm;
Histo1DPtr _hist_njet;
Histo1DPtr _hist_njets_thadpt_1;
Histo1DPtr _hist_njets_thadpt_2;
Histo1DPtr _hist_njets_thadpt_3;
Histo1DPtr _hist_njets_thadpt_4;
Histo1DPtr _hist_njets_ttpt_1;
Histo1DPtr _hist_njets_ttpt_2;
Histo1DPtr _hist_njets_ttpt_3;
Histo1DPtr _hist_njets_ttpt_4;
Histo1DPtr _hist_thady_thadpt_1;
Histo1DPtr _hist_thady_thadpt_2;
Histo1DPtr _hist_thady_thadpt_3;
Histo1DPtr _hist_thady_thadpt_4;
Histo1DPtr _hist_ttm_tty_1;
Histo1DPtr _hist_ttm_tty_2;
Histo1DPtr _hist_ttm_tty_3;
Histo1DPtr _hist_ttm_tty_4;
Histo1DPtr _hist_ttpt_ttm_1;
Histo1DPtr _hist_ttpt_ttm_2;
Histo1DPtr _hist_ttpt_ttm_3;
Histo1DPtr _hist_ttpt_ttm_4;
Histo1DPtr _histnorm_thadpt;
Histo1DPtr _histnorm_thady;
Histo1DPtr _histnorm_tleppt;
Histo1DPtr _histnorm_tlepy;
Histo1DPtr _histnorm_ttpt;
Histo1DPtr _histnorm_tty;
Histo1DPtr _histnorm_ttm;
Histo1DPtr _histnorm_njet;
Histo1DPtr _histnorm_njets_thadpt_1;
Histo1DPtr _histnorm_njets_thadpt_2;
Histo1DPtr _histnorm_njets_thadpt_3;
Histo1DPtr _histnorm_njets_thadpt_4;
Histo1DPtr _histnorm_njets_ttpt_1;
Histo1DPtr _histnorm_njets_ttpt_2;
Histo1DPtr _histnorm_njets_ttpt_3;
Histo1DPtr _histnorm_njets_ttpt_4;
Histo1DPtr _histnorm_thady_thadpt_1;
Histo1DPtr _histnorm_thady_thadpt_2;
Histo1DPtr _histnorm_thady_thadpt_3;
Histo1DPtr _histnorm_thady_thadpt_4;
Histo1DPtr _histnorm_ttm_tty_1;
Histo1DPtr _histnorm_ttm_tty_2;
Histo1DPtr _histnorm_ttm_tty_3;
Histo1DPtr _histnorm_ttm_tty_4;
Histo1DPtr _histnorm_ttpt_ttm_1;
Histo1DPtr _histnorm_ttpt_ttm_2;
Histo1DPtr _histnorm_ttpt_ttm_3;
- Histo1DPtr _histnorm_ttpt_ttm_4;
-
+ Histo1DPtr _histnorm_ttpt_ttm_4;
+
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(CMS_2016_I1491950);
}
-
diff --git a/analyses/pluginCMS/CMS_2016_PAS_TOP_15_006.cc b/analyses/pluginCMS/CMS_2016_PAS_TOP_15_006.cc
--- a/analyses/pluginCMS/CMS_2016_PAS_TOP_15_006.cc
+++ b/analyses/pluginCMS/CMS_2016_PAS_TOP_15_006.cc
@@ -1,186 +1,182 @@
#include "Rivet/Analysis.hh"
-#include "Rivet/Tools/Logging.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/FastJets.hh"
#include "Rivet/Projections/DressedLeptons.hh"
#include "Rivet/Projections/IdentifiedFinalState.hh"
#include "Rivet/Projections/VetoedFinalState.hh"
-#include "Rivet/Tools/ParticleName.hh"
-#include "Rivet/Tools/ParticleIdUtils.hh"
namespace Rivet {
- namespace CMS_2016_PAS_TOP_15_006_INTERNAL { //< only visible in this compilation unit
+ namespace { //< only visible in this compilation unit
/// @brief Special dressed lepton finder
///
/// Find dressed leptons by clustering all leptons and photons
class SpecialDressedLeptons : public FinalState {
public:
/// Constructor
SpecialDressedLeptons(const FinalState& fs, const Cut& cut)
: FinalState(cut)
{
setName("SpecialDressedLeptons");
IdentifiedFinalState ifs(fs);
ifs.acceptIdPair(PID::PHOTON);
ifs.acceptIdPair(PID::ELECTRON);
ifs.acceptIdPair(PID::MUON);
addProjection(ifs, "IFS");
addProjection(FastJets(ifs, FastJets::ANTIKT, 0.1), "LeptonJets");
}
/// Clone on the heap
virtual unique_ptr<Projection> clone() const {
return unique_ptr<Projection>(new SpecialDressedLeptons(*this));
}
/// Retrieve the dressed leptons
const vector<DressedLepton>& dressedLeptons() const { return _clusteredLeptons; }
/// Perform the calculation
void project(const Event& e) {
_theParticles.clear();
_clusteredLeptons.clear();
vector<DressedLepton> allClusteredLeptons;
const Jets jets = applyProjection<FastJets>(e, "LeptonJets").jetsByPt(5*GeV);
for (const Jet& jet : jets) {
Particle lepCand;
for (const Particle& cand : jet.particles()) {
const int absPdgId = cand.abspid();
if (absPdgId == PID::ELECTRON || absPdgId == PID::MUON) {
if (cand.pt() > lepCand.pt()) lepCand = cand;
}
}
// Central lepton must be the major component
if ((lepCand.pt() < jet.pt()/2.) || (lepCand.pdgId() == 0)) continue;
DressedLepton lepton = DressedLepton(lepCand);
for (const Particle& cand : jet.particles()) {
- if (cand == lepCand) continue;
+ if (isSame(cand, lepCand)) continue;
lepton.addPhoton(cand, true);
}
allClusteredLeptons.push_back(lepton);
}
for (const DressedLepton& lepton : allClusteredLeptons) {
if (accept(lepton)) {
_clusteredLeptons.push_back(lepton);
_theParticles.push_back(lepton.constituentLepton());
_theParticles += lepton.constituentPhotons();
}
}
}
+
private:
/// Container which stores the clustered lepton objects
vector<DressedLepton> _clusteredLeptons;
};
}
/// Jet multiplicity in lepton+jets ttbar at 8 TeV
class CMS_2016_PAS_TOP_15_006 : public Analysis {
public:
/// Constructor
DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2016_PAS_TOP_15_006);
- typedef CMS_2016_PAS_TOP_15_006_INTERNAL::SpecialDressedLeptons SpecialDressedLeptons;
-
/// @name Analysis methods
//@{
/// Set up projections and book histograms
void init() {
// Complete final state
FinalState fs;
Cut superLooseLeptonCuts = Cuts::pt > 5*GeV;
SpecialDressedLeptons dressedleptons(fs, superLooseLeptonCuts);
addProjection(dressedleptons, "DressedLeptons");
// Projection for jets
VetoedFinalState fsForJets(fs);
fsForJets.addVetoOnThisFinalState(dressedleptons);
addProjection(FastJets(fsForJets, FastJets::ANTIKT, 0.5), "Jets");
// Booking of histograms
_normedElectronMuonHisto = bookHisto1D("normedElectronMuonHisto", 7, 3.5, 10.5,
"Normalized differential cross section in lepton+jets channel", "Jet multiplicity", "Normed units");
_absXSElectronMuonHisto = bookHisto1D("absXSElectronMuonHisto", 7, 3.5, 10.5,
"Differential cross section in lepton+jets channel", "Jet multiplicity", "pb");
}
/// Per-event analysis
void analyze(const Event& event) {
const double weight = event.weight();
// Select ttbar -> lepton+jets
const SpecialDressedLeptons& dressedleptons = applyProjection<SpecialDressedLeptons>(event, "DressedLeptons");
vector<FourMomentum> selleptons;
for (const DressedLepton& dressedlepton : dressedleptons.dressedLeptons()) {
// Select good leptons
if (dressedlepton.pT() > 30*GeV && dressedlepton.abseta() < 2.4) selleptons += dressedlepton.mom();
// Veto loose leptons
else if (dressedlepton.pT() > 15*GeV && dressedlepton.abseta() < 2.5) vetoEvent;
}
if (selleptons.size() != 1) vetoEvent;
// Identify hardest tight lepton
const FourMomentum lepton = selleptons[0];
// Jets
const FastJets& jets = applyProjection<FastJets>(event, "Jets");
const Jets jets30 = jets.jetsByPt(30*GeV);
int nJets = 0, nBJets = 0;
for (const Jet& jet : jets30) {
if (jet.abseta() > 2.5) continue;
if (deltaR(jet.momentum(), lepton) < 0.5) continue;
nJets += 1;
if (jet.bTagged(Cuts::pT > 5*GeV)) nBJets += 1;
}
// Require >= 4 resolved jets, of which two must be b-tagged
if (nJets < 4 || nBJets < 2) vetoEvent;
// Fill histograms
_normedElectronMuonHisto->fill(min(nJets, 10), weight);
_absXSElectronMuonHisto ->fill(min(nJets, 10), weight);
}
void finalize() {
const double ttbarXS = !std::isnan(crossSectionPerEvent()) ? crossSection() : 252.89*picobarn;
if (std::isnan(crossSectionPerEvent()))
MSG_INFO("No valid cross-section given, using NNLO (arXiv:1303.6254; sqrt(s)=8 TeV, m_t=172.5 GeV): " <<
ttbarXS/picobarn << " pb");
const double xsPerWeight = ttbarXS/picobarn / sumOfWeights();
scale(_absXSElectronMuonHisto, xsPerWeight);
normalize(_normedElectronMuonHisto);
}
//@}
private:
/// Histograms
Histo1DPtr _normedElectronMuonHisto, _absXSElectronMuonHisto;
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(CMS_2016_PAS_TOP_15_006);
}

File Metadata

Mime Type
text/x-diff
Expires
Tue, Nov 19, 3:39 PM (1 d, 18 h)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
3804996
Default Alt Text
(106 KB)

Event Timeline