diff --git a/analyses/pluginLEP/ALEPH_1996_S3486095.cc b/analyses/pluginLEP/ALEPH_1996_S3486095.cc --- a/analyses/pluginLEP/ALEPH_1996_S3486095.cc +++ b/analyses/pluginLEP/ALEPH_1996_S3486095.cc @@ -1,477 +1,478 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/Beam.hh" #include "Rivet/Projections/Sphericity.hh" #include "Rivet/Projections/Thrust.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/ParisiTensor.hh" #include "Rivet/Projections/Hemispheres.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief ALEPH QCD study with event shapes and identified particles /// @author Holger Schulz class ALEPH_1996_S3486095 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(ALEPH_1996_S3486095); /// @name Analysis methods //@{ void init() { // Set up projections declare(Beam(), "Beams"); const ChargedFinalState cfs; declare(cfs, "FS"); declare(UnstableParticles(), "UFS"); declare(FastJets(cfs, FastJets::DURHAM, 0.7), "DurhamJets"); declare(Sphericity(cfs), "Sphericity"); declare(ParisiTensor(cfs), "Parisi"); const Thrust thrust(cfs); declare(thrust, "Thrust"); declare(Hemispheres(thrust), "Hemispheres"); // Book histograms book(_histSphericity ,1, 1, 1); book(_histAplanarity ,2, 1, 1); book(_hist1MinusT ,3, 1, 1); book(_histTMinor ,4, 1, 1); book(_histY3 ,5, 1, 1); book(_histHeavyJetMass ,6, 1, 1); book(_histCParam ,7, 1, 1); book(_histOblateness ,8, 1, 1); book(_histScaledMom ,9, 1, 1); book(_histRapidityT ,10, 1, 1); book(_histPtSIn ,11, 1, 1); book(_histPtSOut ,12, 1, 1); book(_histLogScaledMom ,17, 1, 1); book(_histChMult ,18, 1, 1); book(_histMeanChMult ,19, 1, 1); book(_histMeanChMultRapt05,20, 1, 1); book(_histMeanChMultRapt10,21, 1, 1); book(_histMeanChMultRapt15,22, 1, 1); book(_histMeanChMultRapt20,23, 1, 1); // Particle spectra book(_histMultiPiPlus ,25, 1, 1); book(_histMultiKPlus ,26, 1, 1); book(_histMultiP ,27, 1, 1); book(_histMultiPhoton ,28, 1, 1); book(_histMultiPi0 ,29, 1, 1); book(_histMultiEta ,30, 1, 1); book(_histMultiEtaPrime ,31, 1, 1); book(_histMultiK0 ,32, 1, 1); book(_histMultiLambda0 ,33, 1, 1); book(_histMultiXiMinus ,34, 1, 1); book(_histMultiSigma1385Plus ,35, 1, 1); book(_histMultiXi1530_0 ,36, 1, 1); book(_histMultiRho ,37, 1, 1); book(_histMultiOmega782 ,38, 1, 1); book(_histMultiKStar892_0 ,39, 1, 1); book(_histMultiPhi ,40, 1, 1); book(_histMultiKStar892Plus ,43, 1, 1); // Mean multiplicities book(_histMeanMultiPi0 ,44, 1, 2); book(_histMeanMultiEta ,44, 1, 3); book(_histMeanMultiEtaPrime ,44, 1, 4); book(_histMeanMultiK0 ,44, 1, 5); book(_histMeanMultiRho ,44, 1, 6); book(_histMeanMultiOmega782 ,44, 1, 7); book(_histMeanMultiPhi ,44, 1, 8); book(_histMeanMultiKStar892Plus ,44, 1, 9); book(_histMeanMultiKStar892_0 ,44, 1, 10); book(_histMeanMultiLambda0 ,44, 1, 11); book(_histMeanMultiSigma0 ,44, 1, 12); book(_histMeanMultiXiMinus ,44, 1, 13); book(_histMeanMultiSigma1385Plus ,44, 1, 14); book(_histMeanMultiXi1530_0 ,44, 1, 15); book(_histMeanMultiOmegaOmegaBar ,44, 1, 16); + book(_weightedTotalPartNum, "/TMP/weightedTotalPartNum"); } void analyze(const Event& e) { // First, veto on leptonic events by requiring at least 4 charged FS particles const FinalState& fs = apply(e, "FS"); const size_t numParticles = fs.particles().size(); // Even if we only generate hadronic events, we still need a cut on numCharged >= 2. if (numParticles < 2) { MSG_DEBUG("Failed leptonic event cut"); vetoEvent; } MSG_DEBUG("Passed leptonic event cut"); _weightedTotalPartNum->fill(numParticles); // Get beams and average beam momentum const ParticlePair& beams = apply(e, "Beams").beams(); const double meanBeamMom = ( beams.first.p3().mod() + beams.second.p3().mod() ) / 2.0; MSG_DEBUG("Avg beam momentum = " << meanBeamMom); // Thrusts MSG_DEBUG("Calculating thrust"); const Thrust& thrust = apply(e, "Thrust"); _hist1MinusT->fill(1 - thrust.thrust()); _histTMinor->fill(thrust.thrustMinor()); _histOblateness->fill(thrust.oblateness()); // Jets MSG_DEBUG("Calculating differential jet rate plots:"); const FastJets& durjet = apply(e, "DurhamJets"); if (durjet.clusterSeq()) { double y3 = durjet.clusterSeq()->exclusive_ymerge_max(2); if (y3>0.0) _histY3->fill(-1. * std::log(y3)); } // Sphericities MSG_DEBUG("Calculating sphericity"); const Sphericity& sphericity = apply(e, "Sphericity"); _histSphericity->fill(sphericity.sphericity()); _histAplanarity->fill(sphericity.aplanarity()); // C param MSG_DEBUG("Calculating Parisi params"); const ParisiTensor& parisi = apply(e, "Parisi"); _histCParam->fill(parisi.C()); // Hemispheres MSG_DEBUG("Calculating hemisphere variables"); const Hemispheres& hemi = apply(e, "Hemispheres"); _histHeavyJetMass->fill(hemi.scaledM2high()); // Iterate over all the charged final state particles. double Evis = 0.0; double rapt05 = 0.; double rapt10 = 0.; double rapt15 = 0.; double rapt20 = 0.; MSG_DEBUG("About to iterate over charged FS particles"); for (const Particle& p : fs.particles()) { // Get momentum and energy of each particle. const Vector3 mom3 = p.p3(); const double energy = p.E(); Evis += energy; // Scaled momenta. const double mom = mom3.mod(); const double scaledMom = mom/meanBeamMom; const double logInvScaledMom = -std::log(scaledMom); _histLogScaledMom->fill(logInvScaledMom); _histScaledMom->fill(scaledMom); // Get momenta components w.r.t. thrust and sphericity. const double momT = dot(thrust.thrustAxis(), mom3); const double pTinS = dot(mom3, sphericity.sphericityMajorAxis()); const double pToutS = dot(mom3, sphericity.sphericityMinorAxis()); _histPtSIn->fill(fabs(pTinS/GeV)); _histPtSOut->fill(fabs(pToutS/GeV)); // Calculate rapidities w.r.t. thrust. const double rapidityT = 0.5 * std::log((energy + momT) / (energy - momT)); _histRapidityT->fill(fabs(rapidityT)); if (std::fabs(rapidityT) <= 0.5) { rapt05 += 1.0; } if (std::fabs(rapidityT) <= 1.0) { rapt10 += 1.0; } if (std::fabs(rapidityT) <= 1.5) { rapt15 += 1.0; } if (std::fabs(rapidityT) <= 2.0) { rapt20 += 1.0; } } _histChMult->fill(numParticles); _histMeanChMultRapt05->fill(_histMeanChMultRapt05->bin(0).xMid(), rapt05); _histMeanChMultRapt10->fill(_histMeanChMultRapt10->bin(0).xMid(), rapt10); _histMeanChMultRapt15->fill(_histMeanChMultRapt15->bin(0).xMid(), rapt15); _histMeanChMultRapt20->fill(_histMeanChMultRapt20->bin(0).xMid(), rapt20); _histMeanChMult->fill(_histMeanChMult->bin(0).xMid(), numParticles); //// Final state of unstable particles to get particle spectra const UnstableParticles& ufs = apply(e, "UFS"); for (Particles::const_iterator p = ufs.particles().begin(); p != ufs.particles().end(); ++p) { const Vector3 mom3 = p->momentum().p3(); int id = abs(p->pid()); const double mom = mom3.mod(); const double energy = p->momentum().E(); const double scaledMom = mom/meanBeamMom; const double scaledEnergy = energy/meanBeamMom; // meanBeamMom is approximately beam energy switch (id) { case 22: _histMultiPhoton->fill(-1.*std::log(scaledMom)); break; case -321: case 321: _histMultiKPlus->fill(scaledMom); break; case 211: case -211: _histMultiPiPlus->fill(scaledMom); break; case 2212: case -2212: _histMultiP->fill(scaledMom); break; case 111: _histMultiPi0->fill(scaledMom); _histMeanMultiPi0->fill(_histMeanMultiPi0->bin(0).xMid()); break; case 221: if (scaledMom >= 0.1) { _histMultiEta->fill(scaledEnergy); _histMeanMultiEta->fill(_histMeanMultiEta->bin(0).xMid()); } break; case 331: if (scaledMom >= 0.1) { _histMultiEtaPrime->fill(scaledEnergy); _histMeanMultiEtaPrime->fill(_histMeanMultiEtaPrime->bin(0).xMid()); } break; case 130: //klong case 310: //kshort _histMultiK0->fill(scaledMom); _histMeanMultiK0->fill(_histMeanMultiK0->bin(0).xMid()); break; case 113: _histMultiRho->fill(scaledMom); _histMeanMultiRho->fill(_histMeanMultiRho->bin(0).xMid()); break; case 223: _histMultiOmega782->fill(scaledMom); _histMeanMultiOmega782->fill(_histMeanMultiOmega782->bin(0).xMid()); break; case 333: _histMultiPhi->fill(scaledMom); _histMeanMultiPhi->fill(_histMeanMultiPhi->bin(0).xMid()); break; case 313: case -313: _histMultiKStar892_0->fill(scaledMom); _histMeanMultiKStar892_0->fill(_histMeanMultiKStar892_0->bin(0).xMid()); break; case 323: case -323: _histMultiKStar892Plus->fill(scaledEnergy); _histMeanMultiKStar892Plus->fill(_histMeanMultiKStar892Plus->bin(0).xMid()); break; case 3122: case -3122: _histMultiLambda0->fill(scaledMom); _histMeanMultiLambda0->fill(_histMeanMultiLambda0->bin(0).xMid()); break; case 3212: case -3212: _histMeanMultiSigma0->fill(_histMeanMultiSigma0->bin(0).xMid()); break; case 3312: case -3312: _histMultiXiMinus->fill(scaledEnergy); _histMeanMultiXiMinus->fill(_histMeanMultiXiMinus->bin(0).xMid()); break; case 3114: case -3114: case 3224: case -3224: _histMultiSigma1385Plus->fill(scaledEnergy); _histMeanMultiSigma1385Plus->fill(_histMeanMultiSigma1385Plus->bin(0).xMid()); break; case 3324: case -3324: _histMultiXi1530_0->fill(scaledEnergy); _histMeanMultiXi1530_0->fill(_histMeanMultiXi1530_0->bin(0).xMid()); break; case 3334: _histMeanMultiOmegaOmegaBar->fill(_histMeanMultiOmegaOmegaBar->bin(0).xMid()); break; } } } /// Finalize void finalize() { // Normalize inclusive single particle distributions to the average number // of charged particles per event. const double avgNumParts = _weightedTotalPartNum->sumW() / sumOfWeights(); normalize(_histPtSIn, avgNumParts); normalize(_histPtSOut, avgNumParts); normalize(_histRapidityT, avgNumParts); normalize(_histY3); normalize(_histLogScaledMom, avgNumParts); normalize(_histScaledMom, avgNumParts); // particle spectra scale(_histMultiPiPlus ,1./sumOfWeights()); scale(_histMultiKPlus ,1./sumOfWeights()); scale(_histMultiP ,1./sumOfWeights()); scale(_histMultiPhoton ,1./sumOfWeights()); scale(_histMultiPi0 ,1./sumOfWeights()); scale(_histMultiEta ,1./sumOfWeights()); scale(_histMultiEtaPrime ,1./sumOfWeights()); scale(_histMultiK0 ,1./sumOfWeights()); scale(_histMultiLambda0 ,1./sumOfWeights()); scale(_histMultiXiMinus ,1./sumOfWeights()); scale(_histMultiSigma1385Plus ,1./sumOfWeights()); scale(_histMultiXi1530_0 ,1./sumOfWeights()); scale(_histMultiRho ,1./sumOfWeights()); scale(_histMultiOmega782 ,1./sumOfWeights()); scale(_histMultiKStar892_0 ,1./sumOfWeights()); scale(_histMultiPhi ,1./sumOfWeights()); scale(_histMultiKStar892Plus ,1./sumOfWeights()); // event shape normalize(_hist1MinusT); normalize(_histTMinor); normalize(_histOblateness); normalize(_histSphericity); normalize(_histAplanarity); normalize(_histHeavyJetMass); normalize(_histCParam); // mean multiplicities scale(_histChMult , 2.0/sumOfWeights()); // taking into account the binwidth of 2 scale(_histMeanChMult , 1.0/sumOfWeights()); scale(_histMeanChMultRapt05 , 1.0/sumOfWeights()); scale(_histMeanChMultRapt10 , 1.0/sumOfWeights()); scale(_histMeanChMultRapt15 , 1.0/sumOfWeights()); scale(_histMeanChMultRapt20 , 1.0/sumOfWeights()); scale(_histMeanMultiPi0 , 1.0/sumOfWeights()); scale(_histMeanMultiEta , 1.0/sumOfWeights()); scale(_histMeanMultiEtaPrime , 1.0/sumOfWeights()); scale(_histMeanMultiK0 , 1.0/sumOfWeights()); scale(_histMeanMultiRho , 1.0/sumOfWeights()); scale(_histMeanMultiOmega782 , 1.0/sumOfWeights()); scale(_histMeanMultiPhi , 1.0/sumOfWeights()); scale(_histMeanMultiKStar892Plus , 1.0/sumOfWeights()); scale(_histMeanMultiKStar892_0 , 1.0/sumOfWeights()); scale(_histMeanMultiLambda0 , 1.0/sumOfWeights()); scale(_histMeanMultiSigma0 , 1.0/sumOfWeights()); scale(_histMeanMultiXiMinus , 1.0/sumOfWeights()); scale(_histMeanMultiSigma1385Plus, 1.0/sumOfWeights()); scale(_histMeanMultiXi1530_0 , 1.0/sumOfWeights()); scale(_histMeanMultiOmegaOmegaBar, 1.0/sumOfWeights()); } //@} private: /// Store the weighted sums of numbers of charged / charged+neutral /// particles - used to calculate average number of particles for the /// inclusive single particle distributions' normalisations. CounterPtr _weightedTotalPartNum; /// @name Histograms //@{ Histo1DPtr _histSphericity; Histo1DPtr _histAplanarity; Histo1DPtr _hist1MinusT; Histo1DPtr _histTMinor; Histo1DPtr _histY3; Histo1DPtr _histHeavyJetMass; Histo1DPtr _histCParam; Histo1DPtr _histOblateness; Histo1DPtr _histScaledMom; Histo1DPtr _histRapidityT; Histo1DPtr _histPtSIn; Histo1DPtr _histPtSOut; Histo1DPtr _histJetRate2Durham; Histo1DPtr _histJetRate3Durham; Histo1DPtr _histJetRate4Durham; Histo1DPtr _histJetRate5Durham; Histo1DPtr _histLogScaledMom; Histo1DPtr _histChMult; Histo1DPtr _histMultiPiPlus; Histo1DPtr _histMultiKPlus; Histo1DPtr _histMultiP; Histo1DPtr _histMultiPhoton; Histo1DPtr _histMultiPi0; Histo1DPtr _histMultiEta; Histo1DPtr _histMultiEtaPrime; Histo1DPtr _histMultiK0; Histo1DPtr _histMultiLambda0; Histo1DPtr _histMultiXiMinus; Histo1DPtr _histMultiSigma1385Plus; Histo1DPtr _histMultiXi1530_0; Histo1DPtr _histMultiRho; Histo1DPtr _histMultiOmega782; Histo1DPtr _histMultiKStar892_0; Histo1DPtr _histMultiPhi; Histo1DPtr _histMultiKStar892Plus; // mean multiplicities Histo1DPtr _histMeanChMult; Histo1DPtr _histMeanChMultRapt05; Histo1DPtr _histMeanChMultRapt10; Histo1DPtr _histMeanChMultRapt15; Histo1DPtr _histMeanChMultRapt20; Histo1DPtr _histMeanMultiPi0; Histo1DPtr _histMeanMultiEta; Histo1DPtr _histMeanMultiEtaPrime; Histo1DPtr _histMeanMultiK0; Histo1DPtr _histMeanMultiRho; Histo1DPtr _histMeanMultiOmega782; Histo1DPtr _histMeanMultiPhi; Histo1DPtr _histMeanMultiKStar892Plus; Histo1DPtr _histMeanMultiKStar892_0; Histo1DPtr _histMeanMultiLambda0; Histo1DPtr _histMeanMultiSigma0; Histo1DPtr _histMeanMultiXiMinus; Histo1DPtr _histMeanMultiSigma1385Plus; Histo1DPtr _histMeanMultiXi1530_0; Histo1DPtr _histMeanMultiOmegaOmegaBar; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ALEPH_1996_S3486095); } diff --git a/analyses/pluginLEP/ALEPH_2004_S5765862.cc b/analyses/pluginLEP/ALEPH_2004_S5765862.cc --- a/analyses/pluginLEP/ALEPH_2004_S5765862.cc +++ b/analyses/pluginLEP/ALEPH_2004_S5765862.cc @@ -1,331 +1,331 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/Thrust.hh" #include "Rivet/Projections/Sphericity.hh" #include "Rivet/Projections/ParisiTensor.hh" #include "Rivet/Projections/Hemispheres.hh" #include "Rivet/Projections/Beam.hh" namespace Rivet { /// @brief ALEPH jet rates and event shapes at LEP 1 and 2 class ALEPH_2004_S5765862 : public Analysis { public: ALEPH_2004_S5765862() : Analysis("ALEPH_2004_S5765862") , _initialisedJets(false), _initialisedSpectra(false) { } public: void init() { _initialisedJets = true; _initialisedSpectra = true; // TODO: According to the paper they seem to discard neutral particles // between 1 and 2 GeV. That correction is included in the systematic // uncertainties and overly complicated to program, so we ignore it. const FinalState fs; declare(fs, "FS"); FastJets durhamjets(fs, FastJets::DURHAM, 0.7, JetAlg::Muons::ALL, JetAlg::Invisibles::ALL); declare(durhamjets, "DurhamJets"); const Thrust thrust(fs); declare(thrust, "Thrust"); declare(Sphericity(fs), "Sphericity"); declare(ParisiTensor(fs), "Parisi"); declare(Hemispheres(thrust), "Hemispheres"); const ChargedFinalState cfs; declare(Beam(), "Beams"); declare(cfs, "CFS"); // Histos // offset for the event shapes and jets int offset = 0; switch (int(sqrtS()/GeV + 0.5)) { case 91: offset = 0; break; case 133: offset = 1; break; case 161: offset = 2; break; case 172: offset = 3; break; case 183: offset = 4; break; case 189: offset = 5; break; case 200: offset = 6; break; case 206: offset = 7; break; default: _initialisedJets = false; } // event shapes if(_initialisedJets) { book(_h_thrust ,offset+54, 1, 1); book(_h_heavyjetmass ,offset+62, 1, 1); book(_h_totaljetbroadening ,offset+70, 1, 1); book(_h_widejetbroadening ,offset+78, 1, 1); book(_h_cparameter ,offset+86, 1, 1); book(_h_thrustmajor ,offset+94, 1, 1); book(_h_thrustminor ,offset+102, 1, 1); book(_h_jetmassdifference ,offset+110, 1, 1); book(_h_aplanarity ,offset+118, 1, 1); if ( offset != 0 ) book(_h_planarity, offset+125, 1, 1); book(_h_oblateness ,offset+133, 1, 1); book(_h_sphericity ,offset+141, 1, 1); // Durham n->m jet resolutions book(_h_y_Durham[0] ,offset+149, 1, 1); // y12 d149 ... d156 book(_h_y_Durham[1] ,offset+157, 1, 1); // y23 d157 ... d164 if (offset<6) { // there is no y34, y45 and y56 for 200 gev book(_h_y_Durham[2] ,offset+165, 1, 1); // y34 d165 ... d172, but not 171 book(_h_y_Durham[3] ,offset+173, 1, 1); // y45 d173 ... d179 book(_h_y_Durham[4] ,offset+180, 1, 1); // y56 d180 ... d186 } else if (offset==6) { _h_y_Durham[2] = Histo1DPtr(); _h_y_Durham[3] = Histo1DPtr(); _h_y_Durham[4] = Histo1DPtr(); } else if (offset==7) { book(_h_y_Durham[2] ,172, 1, 1); book(_h_y_Durham[3] ,179, 1, 1); book(_h_y_Durham[4] ,186, 1, 1); } // Durham n-jet fractions book(_h_R_Durham[0] ,offset+187, 1, 1); // R1 d187 ... d194 book(_h_R_Durham[1] ,offset+195, 1, 1); // R2 d195 ... d202 book(_h_R_Durham[2] ,offset+203, 1, 1); // R3 d203 ... d210 book(_h_R_Durham[3] ,offset+211, 1, 1); // R4 d211 ... d218 book(_h_R_Durham[4] ,offset+219, 1, 1); // R5 d219 ... d226 book(_h_R_Durham[5] ,offset+227, 1, 1); // R>=6 d227 ... d234 } // offset for the charged particle distributions offset = 0; switch (int(sqrtS()/GeV + 0.5)) { case 133: offset = 0; break; case 161: offset = 1; break; case 172: offset = 2; break; case 183: offset = 3; break; case 189: offset = 4; break; case 196: offset = 5; break; case 200: offset = 6; break; case 206: offset = 7; break; default: _initialisedSpectra=false; } if (_initialisedSpectra) { book(_h_xp , 2+offset, 1, 1); book(_h_xi ,11+offset, 1, 1); book(_h_xe ,19+offset, 1, 1); book(_h_pTin ,27+offset, 1, 1); if (offset == 7) book(_h_pTout, 35, 1, 1); book(_h_rapidityT ,36+offset, 1, 1); book(_h_rapidityS ,44+offset, 1, 1); } - book(_weightedTotalChargedPartNum, "weightedTotalChargedPartNum"); + book(_weightedTotalChargedPartNum, "_weightedTotalChargedPartNum"); if (!_initialisedSpectra && !_initialisedJets) { MSG_WARNING("CoM energy of events sqrt(s) = " << sqrtS()/GeV << " doesn't match any available analysis energy ."); } book(mult, 1, 1, 1); } void analyze(const Event& e) { const Thrust& thrust = apply(e, "Thrust"); const Sphericity& sphericity = apply(e, "Sphericity"); if(_initialisedJets) { bool LEP1 = fuzzyEquals(sqrtS(),91.2*GeV,0.01); // event shapes double thr = LEP1 ? thrust.thrust() : 1.0 - thrust.thrust(); _h_thrust->fill(thr); _h_thrustmajor->fill(thrust.thrustMajor()); if(LEP1) _h_thrustminor->fill(log(thrust.thrustMinor())); else _h_thrustminor->fill(thrust.thrustMinor()); _h_oblateness->fill(thrust.oblateness()); const Hemispheres& hemi = apply(e, "Hemispheres"); _h_heavyjetmass->fill(hemi.scaledM2high()); _h_jetmassdifference->fill(hemi.scaledM2diff()); _h_totaljetbroadening->fill(hemi.Bsum()); _h_widejetbroadening->fill(hemi.Bmax()); const ParisiTensor& parisi = apply(e, "Parisi"); _h_cparameter->fill(parisi.C()); _h_aplanarity->fill(sphericity.aplanarity()); if(_h_planarity) _h_planarity->fill(sphericity.planarity()); _h_sphericity->fill(sphericity.sphericity()); // Jet rates const FastJets& durjet = apply(e, "DurhamJets"); double log10e = log10(exp(1.)); if (durjet.clusterSeq()) { double logynm1=0.; double logyn; for (size_t i=0; i<5; ++i) { double yn = durjet.clusterSeq()->exclusive_ymerge_max(i+1); if (yn<=0.0) continue; logyn = -log(yn); if (_h_y_Durham[i]) { _h_y_Durham[i]->fill(logyn); } if(!LEP1) logyn *= log10e; for (size_t j = 0; j < _h_R_Durham[i]->numBins(); ++j) { double val = _h_R_Durham[i]->bin(j).xMin(); double width = _h_R_Durham[i]->bin(j).xWidth(); if(-val<=logynm1) break; if(-valfill(val+0.5*width, width); } } logynm1 = logyn; } for (size_t j = 0; j < _h_R_Durham[5]->numBins(); ++j) { double val = _h_R_Durham[5]->bin(j).xMin(); double width = _h_R_Durham[5]->bin(j).xWidth(); if(-val<=logynm1) break; _h_R_Durham[5]->fill(val+0.5*width, width); } } if( !_initialisedSpectra) { const ChargedFinalState& cfs = apply(e, "CFS"); const size_t numParticles = cfs.particles().size(); _weightedTotalChargedPartNum->fill(numParticles); } } // charged particle distributions if(_initialisedSpectra) { const ChargedFinalState& cfs = apply(e, "CFS"); const size_t numParticles = cfs.particles().size(); _weightedTotalChargedPartNum->fill(numParticles); const ParticlePair& beams = apply(e, "Beams").beams(); const double meanBeamMom = ( beams.first.p3().mod() + beams.second.p3().mod() ) / 2.0; for (const Particle& p : cfs.particles()) { const double xp = p.p3().mod()/meanBeamMom; _h_xp->fill(xp ); const double logxp = -std::log(xp); _h_xi->fill(logxp); const double xe = p.E()/meanBeamMom; _h_xe->fill(xe ); const double pTinT = dot(p.p3(), thrust.thrustMajorAxis()); const double pToutT = dot(p.p3(), thrust.thrustMinorAxis()); _h_pTin->fill(fabs(pTinT/GeV)); if(_h_pTout) _h_pTout->fill(fabs(pToutT/GeV)); const double momT = dot(thrust.thrustAxis() ,p.p3()); const double rapidityT = 0.5 * std::log((p.E() + momT) / (p.E() - momT)); _h_rapidityT->fill(fabs(rapidityT)); const double momS = dot(sphericity.sphericityAxis(),p.p3()); const double rapidityS = 0.5 * std::log((p.E() + momS) / (p.E() - momS)); _h_rapidityS->fill(fabs(rapidityS)); } } } void finalize() { if(!_initialisedJets && !_initialisedSpectra) return; if (_initialisedJets) { normalize(_h_thrust); normalize(_h_heavyjetmass); normalize(_h_totaljetbroadening); normalize(_h_widejetbroadening); normalize(_h_cparameter); normalize(_h_thrustmajor); normalize(_h_thrustminor); normalize(_h_jetmassdifference); normalize(_h_aplanarity); if(_h_planarity) normalize(_h_planarity); normalize(_h_oblateness); normalize(_h_sphericity); for (size_t n=0; n<6; ++n) { scale(_h_R_Durham[n], 1./sumOfWeights()); } for (size_t n = 0; n < 5; ++n) { if (_h_y_Durham[n]) { scale(_h_y_Durham[n], 1.0/sumOfWeights()); } } } Histo1D temphisto(refData(1, 1, 1)); const double avgNumParts = dbl(*_weightedTotalChargedPartNum) / sumOfWeights(); for (size_t b = 0; b < temphisto.numBins(); b++) { const double x = temphisto.bin(b).xMid(); const double ex = temphisto.bin(b).xWidth()/2.; if (inRange(sqrtS()/GeV, x-ex, x+ex)) { mult->addPoint(x, avgNumParts, ex, 0.); } } if (_initialisedSpectra) { normalize(_h_xp, avgNumParts); normalize(_h_xi, avgNumParts); normalize(_h_xe, avgNumParts); normalize(_h_pTin , avgNumParts); if (_h_pTout) normalize(_h_pTout, avgNumParts); normalize(_h_rapidityT, avgNumParts); normalize(_h_rapidityS, avgNumParts); } } private: bool _initialisedJets; bool _initialisedSpectra; Scatter2DPtr mult; Histo1DPtr _h_xp; Histo1DPtr _h_xi; Histo1DPtr _h_xe; Histo1DPtr _h_pTin; Histo1DPtr _h_pTout; Histo1DPtr _h_rapidityT; Histo1DPtr _h_rapidityS; Histo1DPtr _h_thrust; Histo1DPtr _h_heavyjetmass; Histo1DPtr _h_totaljetbroadening; Histo1DPtr _h_widejetbroadening; Histo1DPtr _h_cparameter; Histo1DPtr _h_thrustmajor; Histo1DPtr _h_thrustminor; Histo1DPtr _h_jetmassdifference; Histo1DPtr _h_aplanarity; Histo1DPtr _h_planarity; Histo1DPtr _h_oblateness; Histo1DPtr _h_sphericity; Histo1DPtr _h_R_Durham[6]; Histo1DPtr _h_y_Durham[5]; CounterPtr _weightedTotalChargedPartNum; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ALEPH_2004_S5765862); } diff --git a/analyses/pluginLEP/DELPHI_1995_S3137023.cc b/analyses/pluginLEP/DELPHI_1995_S3137023.cc --- a/analyses/pluginLEP/DELPHI_1995_S3137023.cc +++ b/analyses/pluginLEP/DELPHI_1995_S3137023.cc @@ -1,104 +1,104 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/Beam.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief DELPHI strange baryon paper /// @author Hendrik Hoeth class DELPHI_1995_S3137023 : public Analysis { public: /// Constructor DELPHI_1995_S3137023() : Analysis("DELPHI_1995_S3137023") {} /// @name Analysis methods //@{ void init() { declare(Beam(), "Beams"); declare(ChargedFinalState(), "FS"); declare(UnstableParticles(), "UFS"); book(_histXpXiMinus ,2, 1, 1); book(_histXpSigma1385Plus ,3, 1, 1); - book(_weightedTotalNumXiMinus, "weightedTotalNumXiMinus"); - book(_weightedTotalNumSigma1385Plus, "weightedTotalNumSigma1385Plus"); + book(_weightedTotalNumXiMinus, "_weightedTotalNumXiMinus"); + book(_weightedTotalNumSigma1385Plus, "_weightedTotalNumSigma1385Plus"); } void analyze(const Event& e) { // First, veto on leptonic events by requiring at least 4 charged FS particles const FinalState& fs = apply(e, "FS"); const size_t numParticles = fs.particles().size(); // Even if we only generate hadronic events, we still need a cut on numCharged >= 2. if (numParticles < 2) { MSG_DEBUG("Failed leptonic event cut"); vetoEvent; } MSG_DEBUG("Passed leptonic event cut"); // Get beams and average beam momentum const ParticlePair& beams = apply(e, "Beams").beams(); const double meanBeamMom = ( beams.first.p3().mod() + beams.second.p3().mod() ) / 2.0; MSG_DEBUG("Avg beam momentum = " << meanBeamMom); // Final state of unstable particles to get particle spectra const UnstableParticles& ufs = apply(e, "UFS"); for (const Particle& p : ufs.particles()) { const int id = p.abspid(); switch (id) { case 3312: _histXpXiMinus->fill(p.p3().mod()/meanBeamMom); _weightedTotalNumXiMinus->fill(); break; case 3114: case 3224: _histXpSigma1385Plus->fill(p.p3().mod()/meanBeamMom); _weightedTotalNumSigma1385Plus->fill(); break; } } } /// Finalize void finalize() { normalize(_histXpXiMinus , dbl(*_weightedTotalNumXiMinus)/sumOfWeights()); normalize(_histXpSigma1385Plus , dbl(*_weightedTotalNumSigma1385Plus)/sumOfWeights()); } //@} private: /// Store the weighted sums of numbers of charged / charged+neutral /// particles - used to calculate average number of particles for the /// inclusive single particle distributions' normalisations. CounterPtr _weightedTotalNumXiMinus; CounterPtr _weightedTotalNumSigma1385Plus; Histo1DPtr _histXpXiMinus; Histo1DPtr _histXpSigma1385Plus; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(DELPHI_1995_S3137023); } diff --git a/analyses/pluginLEP/DELPHI_1996_S3430090.cc b/analyses/pluginLEP/DELPHI_1996_S3430090.cc --- a/analyses/pluginLEP/DELPHI_1996_S3430090.cc +++ b/analyses/pluginLEP/DELPHI_1996_S3430090.cc @@ -1,553 +1,553 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/Beam.hh" #include "Rivet/Projections/Sphericity.hh" #include "Rivet/Projections/Thrust.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/ParisiTensor.hh" #include "Rivet/Projections/Hemispheres.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /** * @brief DELPHI event shapes and identified particle spectra * @author Andy Buckley * @author Hendrik Hoeth * * This is the paper which was used for the original PROFESSOR MC tuning * study. It studies a wide range of e+ e- event shape variables, differential * jet rates in the Durham and JADE schemes, and incorporates identified * particle spectra, from other LEP analyses. * * @par Run conditions * * @arg LEP1 beam energy: \f$ \sqrt{s} = \f$ 91.2 GeV * @arg Run with generic QCD events. * @arg No \f$ p_\perp^\text{min} \f$ cutoff is required */ class DELPHI_1996_S3430090 : public Analysis { public: /// Constructor DELPHI_1996_S3430090() : Analysis("DELPHI_1996_S3430090") { } /// @name Analysis methods //@{ void init() { declare(Beam(), "Beams"); // Don't try to introduce a pT or eta cut here. It's all corrected // back. (See Section 2 of the paper.) const ChargedFinalState cfs; declare(cfs, "FS"); declare(UnstableParticles(), "UFS"); declare(FastJets(cfs, FastJets::JADE, 0.7), "JadeJets"); declare(FastJets(cfs, FastJets::DURHAM, 0.7), "DurhamJets"); declare(Sphericity(cfs), "Sphericity"); declare(ParisiTensor(cfs), "Parisi"); const Thrust thrust(cfs); declare(thrust, "Thrust"); declare(Hemispheres(thrust), "Hemispheres"); book(_histPtTIn, 1, 1, 1); book(_histPtTOut,2, 1, 1); book(_histPtSIn, 3, 1, 1); book(_histPtSOut,4, 1, 1); book(_histRapidityT, 5, 1, 1); book(_histRapidityS, 6, 1, 1); book(_histScaledMom, 7, 1, 1); book(_histLogScaledMom, 8, 1, 1); book(_histPtTOutVsXp ,9, 1, 1); book(_histPtVsXp ,10, 1, 1); book(_hist1MinusT, 11, 1, 1); book(_histTMajor, 12, 1, 1); book(_histTMinor, 13, 1, 1); book(_histOblateness, 14, 1, 1); book(_histSphericity, 15, 1, 1); book(_histAplanarity, 16, 1, 1); book(_histPlanarity, 17, 1, 1); book(_histCParam, 18, 1, 1); book(_histDParam, 19, 1, 1); book(_histHemiMassH, 20, 1, 1); book(_histHemiMassL, 21, 1, 1); book(_histHemiMassD, 22, 1, 1); book(_histHemiBroadW, 23, 1, 1); book(_histHemiBroadN, 24, 1, 1); book(_histHemiBroadT, 25, 1, 1); book(_histHemiBroadD, 26, 1, 1); // Binned in y_cut book(_histDiffRate2Durham, 27, 1, 1); book(_histDiffRate2Jade, 28, 1, 1); book(_histDiffRate3Durham, 29, 1, 1); book(_histDiffRate3Jade, 30, 1, 1); book(_histDiffRate4Durham, 31, 1, 1); book(_histDiffRate4Jade, 32, 1, 1); // Binned in cos(chi) book(_histEEC, 33, 1, 1); book(_histAEEC, 34, 1, 1); book(_histMultiCharged, 35, 1, 1); book(_histMultiPiPlus, 36, 1, 1); book(_histMultiPi0, 36, 1, 2); book(_histMultiKPlus, 36, 1, 3); book(_histMultiK0, 36, 1, 4); book(_histMultiEta, 36, 1, 5); book(_histMultiEtaPrime, 36, 1, 6); book(_histMultiDPlus, 36, 1, 7); book(_histMultiD0, 36, 1, 8); book(_histMultiBPlus0, 36, 1, 9); book(_histMultiF0, 37, 1, 1); book(_histMultiRho, 38, 1, 1); book(_histMultiKStar892Plus, 38, 1, 2); book(_histMultiKStar892_0, 38, 1, 3); book(_histMultiPhi, 38, 1, 4); book(_histMultiDStar2010Plus, 38, 1, 5); book(_histMultiF2, 39, 1, 1); book(_histMultiK2Star1430_0, 39, 1, 2); book(_histMultiP, 40, 1, 1); book(_histMultiLambda0, 40, 1, 2); book(_histMultiXiMinus, 40, 1, 3); book(_histMultiOmegaMinus, 40, 1, 4); book(_histMultiDeltaPlusPlus, 40, 1, 5); book(_histMultiSigma1385Plus, 40, 1, 6); book(_histMultiXi1530_0, 40, 1, 7); book(_histMultiLambdaB0, 40, 1, 8); - book(_weightedTotalPartNum,"TotalPartNum"); - book(_passedCutWeightSum, "passedCutWeightSum"); - book(_passedCut3WeightSum, "passedCut3WeightSum"); - book(_passedCut4WeightSum, "passedCut4WeightSum"); - book(_passedCut5WeightSum, "passedCut5WeightSum"); + book(_weightedTotalPartNum,"_TotalPartNum"); + book(_passedCutWeightSum, "_passedCutWeightSum"); + book(_passedCut3WeightSum, "_passedCut3WeightSum"); + book(_passedCut4WeightSum, "_passedCut4WeightSum"); + book(_passedCut5WeightSum, "_passedCut5WeightSum"); } void analyze(const Event& e) { // First, veto on leptonic events by requiring at least 4 charged FS particles const FinalState& fs = apply(e, "FS"); const size_t numParticles = fs.particles().size(); // Even if we only generate hadronic events, we still need a cut on numCharged >= 2. if (numParticles < 2) { MSG_DEBUG("Failed leptonic event cut"); vetoEvent; } MSG_DEBUG("Passed leptonic event cut"); _passedCutWeightSum->fill(); _weightedTotalPartNum->fill(numParticles); // Get beams and average beam momentum const ParticlePair& beams = apply(e, "Beams").beams(); const double meanBeamMom = ( beams.first.p3().mod() + beams.second.p3().mod() ) / 2.0; MSG_DEBUG("Avg beam momentum = " << meanBeamMom); // Thrusts MSG_DEBUG("Calculating thrust"); const Thrust& thrust = apply(e, "Thrust"); _hist1MinusT->fill(1 - thrust.thrust()); _histTMajor->fill(thrust.thrustMajor()); _histTMinor->fill(thrust.thrustMinor()); _histOblateness->fill(thrust.oblateness()); // Jets const FastJets& durjet = apply(e, "DurhamJets"); const FastJets& jadejet = apply(e, "JadeJets"); if (numParticles >= 3) { _passedCut3WeightSum->fill(); if (durjet.clusterSeq()) _histDiffRate2Durham->fill(durjet.clusterSeq()->exclusive_ymerge_max(2)); if (jadejet.clusterSeq()) _histDiffRate2Jade->fill(jadejet.clusterSeq()->exclusive_ymerge_max(2)); } if (numParticles >= 4) { _passedCut4WeightSum->fill(); if (durjet.clusterSeq()) _histDiffRate3Durham->fill(durjet.clusterSeq()->exclusive_ymerge_max(3)); if (jadejet.clusterSeq()) _histDiffRate3Jade->fill(jadejet.clusterSeq()->exclusive_ymerge_max(3)); } if (numParticles >= 5) { _passedCut5WeightSum->fill(); if (durjet.clusterSeq()) _histDiffRate4Durham->fill(durjet.clusterSeq()->exclusive_ymerge_max(4)); if (jadejet.clusterSeq()) _histDiffRate4Jade->fill(jadejet.clusterSeq()->exclusive_ymerge_max(4)); } // Sphericities MSG_DEBUG("Calculating sphericity"); const Sphericity& sphericity = apply(e, "Sphericity"); _histSphericity->fill(sphericity.sphericity()); _histAplanarity->fill(sphericity.aplanarity()); _histPlanarity->fill(sphericity.planarity()); // C & D params MSG_DEBUG("Calculating Parisi params"); const ParisiTensor& parisi = apply(e, "Parisi"); _histCParam->fill(parisi.C()); _histDParam->fill(parisi.D()); // Hemispheres MSG_DEBUG("Calculating hemisphere variables"); const Hemispheres& hemi = apply(e, "Hemispheres"); _histHemiMassH->fill(hemi.scaledM2high()); _histHemiMassL->fill(hemi.scaledM2low()); _histHemiMassD->fill(hemi.scaledM2diff()); _histHemiBroadW->fill(hemi.Bmax()); _histHemiBroadN->fill(hemi.Bmin()); _histHemiBroadT->fill(hemi.Bsum()); _histHemiBroadD->fill(hemi.Bdiff()); // Iterate over all the charged final state particles. double Evis = 0.0; double Evis2 = 0.0; MSG_DEBUG("About to iterate over charged FS particles"); for (const Particle& p : fs.particles()) { // Get momentum and energy of each particle. const Vector3 mom3 = p.p3(); const double energy = p.E(); Evis += energy; // Scaled momenta. const double mom = mom3.mod(); const double scaledMom = mom/meanBeamMom; const double logInvScaledMom = -std::log(scaledMom); _histLogScaledMom->fill(logInvScaledMom); _histScaledMom->fill(scaledMom); // Get momenta components w.r.t. thrust and sphericity. const double momT = dot(thrust.thrustAxis(), mom3); const double momS = dot(sphericity.sphericityAxis(), mom3); const double pTinT = dot(mom3, thrust.thrustMajorAxis()); const double pToutT = dot(mom3, thrust.thrustMinorAxis()); const double pTinS = dot(mom3, sphericity.sphericityMajorAxis()); const double pToutS = dot(mom3, sphericity.sphericityMinorAxis()); const double pT = sqrt(pow(pTinT, 2) + pow(pToutT, 2)); _histPtTIn->fill(fabs(pTinT/GeV)); _histPtTOut->fill(fabs(pToutT/GeV)); _histPtSIn->fill(fabs(pTinS/GeV)); _histPtSOut->fill(fabs(pToutS/GeV)); _histPtVsXp->fill(scaledMom, fabs(pT/GeV)); _histPtTOutVsXp->fill(scaledMom, fabs(pToutT/GeV)); // Calculate rapidities w.r.t. thrust and sphericity. const double rapidityT = 0.5 * std::log((energy + momT) / (energy - momT)); const double rapidityS = 0.5 * std::log((energy + momS) / (energy - momS)); _histRapidityT->fill(fabs(rapidityT)); _histRapidityS->fill(fabs(rapidityS)); MSG_TRACE(fabs(rapidityT) << " " << scaledMom/GeV); } Evis2 = Evis*Evis; // (A)EEC // Need iterators since second loop starts at current outer loop iterator, i.e. no "for" here! for (Particles::const_iterator p_i = fs.particles().begin(); p_i != fs.particles().end(); ++p_i) { for (Particles::const_iterator p_j = p_i; p_j != fs.particles().end(); ++p_j) { if (p_i == p_j) continue; const Vector3 mom3_i = p_i->momentum().p3(); const Vector3 mom3_j = p_j->momentum().p3(); const double energy_i = p_i->momentum().E(); const double energy_j = p_j->momentum().E(); const double cosij = dot(mom3_i.unit(), mom3_j.unit()); const double eec = (energy_i*energy_j) / Evis2; _histEEC->fill(cosij, eec); if (cosij < 0) _histAEEC->fill( cosij, eec); else _histAEEC->fill(-cosij, -eec); } } _histMultiCharged->fill(_histMultiCharged->bin(0).xMid(), numParticles); // Final state of unstable particles to get particle spectra const UnstableParticles& ufs = apply(e, "UFS"); for (const Particle& p : ufs.particles()) { int id = p.abspid(); switch (id) { case 211: _histMultiPiPlus->fill(_histMultiPiPlus->bin(0).xMid()); break; case 111: _histMultiPi0->fill(_histMultiPi0->bin(0).xMid()); break; case 321: _histMultiKPlus->fill(_histMultiKPlus->bin(0).xMid()); break; case 130: case 310: _histMultiK0->fill(_histMultiK0->bin(0).xMid()); break; case 221: _histMultiEta->fill(_histMultiEta->bin(0).xMid()); break; case 331: _histMultiEtaPrime->fill(_histMultiEtaPrime->bin(0).xMid()); break; case 411: _histMultiDPlus->fill(_histMultiDPlus->bin(0).xMid()); break; case 421: _histMultiD0->fill(_histMultiD0->bin(0).xMid()); break; case 511: case 521: case 531: _histMultiBPlus0->fill(_histMultiBPlus0->bin(0).xMid()); break; case 9010221: _histMultiF0->fill(_histMultiF0->bin(0).xMid()); break; case 113: _histMultiRho->fill(_histMultiRho->bin(0).xMid()); break; case 323: _histMultiKStar892Plus->fill(_histMultiKStar892Plus->bin(0).xMid()); break; case 313: _histMultiKStar892_0->fill(_histMultiKStar892_0->bin(0).xMid()); break; case 333: _histMultiPhi->fill(_histMultiPhi->bin(0).xMid()); break; case 413: _histMultiDStar2010Plus->fill(_histMultiDStar2010Plus->bin(0).xMid()); break; case 225: _histMultiF2->fill(_histMultiF2->bin(0).xMid()); break; case 315: _histMultiK2Star1430_0->fill(_histMultiK2Star1430_0->bin(0).xMid()); break; case 2212: _histMultiP->fill(_histMultiP->bin(0).xMid()); break; case 3122: _histMultiLambda0->fill(_histMultiLambda0->bin(0).xMid()); break; case 3312: _histMultiXiMinus->fill(_histMultiXiMinus->bin(0).xMid()); break; case 3334: _histMultiOmegaMinus->fill(_histMultiOmegaMinus->bin(0).xMid()); break; case 2224: _histMultiDeltaPlusPlus->fill(_histMultiDeltaPlusPlus->bin(0).xMid()); break; case 3114: _histMultiSigma1385Plus->fill(_histMultiSigma1385Plus->bin(0).xMid()); break; case 3324: _histMultiXi1530_0->fill(_histMultiXi1530_0->bin(0).xMid()); break; case 5122: _histMultiLambdaB0->fill(_histMultiLambdaB0->bin(0).xMid()); break; } } } // Finalize void finalize() { // Normalize inclusive single particle distributions to the average number // of charged particles per event. const double avgNumParts = dbl(*_weightedTotalPartNum / *_passedCutWeightSum); normalize(_histPtTIn, avgNumParts); normalize(_histPtTOut, avgNumParts); normalize(_histPtSIn, avgNumParts); normalize(_histPtSOut, avgNumParts); normalize(_histRapidityT, avgNumParts); normalize(_histRapidityS, avgNumParts); normalize(_histLogScaledMom, avgNumParts); normalize(_histScaledMom, avgNumParts); scale(_histEEC, 1.0 / *_passedCutWeightSum); scale(_histAEEC, 1.0 / *_passedCutWeightSum); scale(_histMultiCharged, 1.0 / *_passedCutWeightSum); scale(_histMultiPiPlus, 1.0 / *_passedCutWeightSum); scale(_histMultiPi0, 1.0 / *_passedCutWeightSum); scale(_histMultiKPlus, 1.0 / *_passedCutWeightSum); scale(_histMultiK0, 1.0 / *_passedCutWeightSum); scale(_histMultiEta, 1.0 / *_passedCutWeightSum); scale(_histMultiEtaPrime, 1.0 / *_passedCutWeightSum); scale(_histMultiDPlus, 1.0 / *_passedCutWeightSum); scale(_histMultiD0, 1.0 / *_passedCutWeightSum); scale(_histMultiBPlus0, 1.0 / *_passedCutWeightSum); scale(_histMultiF0, 1.0 / *_passedCutWeightSum); scale(_histMultiRho, 1.0 / *_passedCutWeightSum); scale(_histMultiKStar892Plus, 1.0 / *_passedCutWeightSum); scale(_histMultiKStar892_0, 1.0 / *_passedCutWeightSum); scale(_histMultiPhi, 1.0 / *_passedCutWeightSum); scale(_histMultiDStar2010Plus, 1.0 / *_passedCutWeightSum); scale(_histMultiF2, 1.0 / *_passedCutWeightSum); scale(_histMultiK2Star1430_0, 1.0 / *_passedCutWeightSum); scale(_histMultiP, 1.0 / *_passedCutWeightSum); scale(_histMultiLambda0, 1.0 / *_passedCutWeightSum); scale(_histMultiXiMinus, 1.0 / *_passedCutWeightSum); scale(_histMultiOmegaMinus, 1.0 / *_passedCutWeightSum); scale(_histMultiDeltaPlusPlus, 1.0 / *_passedCutWeightSum); scale(_histMultiSigma1385Plus, 1.0 / *_passedCutWeightSum); scale(_histMultiXi1530_0, 1.0 / *_passedCutWeightSum); scale(_histMultiLambdaB0, 1.0 / *_passedCutWeightSum); scale(_hist1MinusT, 1.0 / *_passedCutWeightSum); scale(_histTMajor, 1.0 / *_passedCutWeightSum); scale(_histTMinor, 1.0 / *_passedCutWeightSum); scale(_histOblateness, 1.0 / *_passedCutWeightSum); scale(_histSphericity, 1.0 / *_passedCutWeightSum); scale(_histAplanarity, 1.0 / *_passedCutWeightSum); scale(_histPlanarity, 1.0 / *_passedCutWeightSum); scale(_histHemiMassD, 1.0 / *_passedCutWeightSum); scale(_histHemiMassH, 1.0 / *_passedCutWeightSum); scale(_histHemiMassL, 1.0 / *_passedCutWeightSum); scale(_histHemiBroadW, 1.0 / *_passedCutWeightSum); scale(_histHemiBroadN, 1.0 / *_passedCutWeightSum); scale(_histHemiBroadT, 1.0 / *_passedCutWeightSum); scale(_histHemiBroadD, 1.0 / *_passedCutWeightSum); scale(_histCParam, 1.0 / *_passedCutWeightSum); scale(_histDParam, 1.0 / *_passedCutWeightSum); scale(_histDiffRate2Durham, 1.0 / *_passedCut3WeightSum); scale(_histDiffRate2Jade, 1.0 / *_passedCut3WeightSum); scale(_histDiffRate3Durham, 1.0 / *_passedCut4WeightSum); scale(_histDiffRate3Jade, 1.0 / *_passedCut4WeightSum); scale(_histDiffRate4Durham, 1.0 / *_passedCut5WeightSum); scale(_histDiffRate4Jade, 1.0 / *_passedCut5WeightSum); } //@} private: /// Store the weighted sums of numbers of charged / charged+neutral /// particles - used to calculate average number of particles for the /// inclusive single particle distributions' normalisations. CounterPtr _weightedTotalPartNum; /// @name Sums of weights past various cuts //@{ CounterPtr _passedCutWeightSum; CounterPtr _passedCut3WeightSum; CounterPtr _passedCut4WeightSum; CounterPtr _passedCut5WeightSum; //@} /// @name Histograms //@{ Histo1DPtr _histPtTIn; Histo1DPtr _histPtTOut; Histo1DPtr _histPtSIn; Histo1DPtr _histPtSOut; Histo1DPtr _histRapidityT; Histo1DPtr _histRapidityS; Histo1DPtr _histScaledMom, _histLogScaledMom; Profile1DPtr _histPtTOutVsXp, _histPtVsXp; Histo1DPtr _hist1MinusT; Histo1DPtr _histTMajor; Histo1DPtr _histTMinor; Histo1DPtr _histOblateness; Histo1DPtr _histSphericity; Histo1DPtr _histAplanarity; Histo1DPtr _histPlanarity; Histo1DPtr _histCParam; Histo1DPtr _histDParam; Histo1DPtr _histHemiMassD; Histo1DPtr _histHemiMassH; Histo1DPtr _histHemiMassL; Histo1DPtr _histHemiBroadW; Histo1DPtr _histHemiBroadN; Histo1DPtr _histHemiBroadT; Histo1DPtr _histHemiBroadD; Histo1DPtr _histDiffRate2Durham; Histo1DPtr _histDiffRate2Jade; Histo1DPtr _histDiffRate3Durham; Histo1DPtr _histDiffRate3Jade; Histo1DPtr _histDiffRate4Durham; Histo1DPtr _histDiffRate4Jade; Histo1DPtr _histEEC, _histAEEC; Histo1DPtr _histMultiCharged; Histo1DPtr _histMultiPiPlus; Histo1DPtr _histMultiPi0; Histo1DPtr _histMultiKPlus; Histo1DPtr _histMultiK0; Histo1DPtr _histMultiEta; Histo1DPtr _histMultiEtaPrime; Histo1DPtr _histMultiDPlus; Histo1DPtr _histMultiD0; Histo1DPtr _histMultiBPlus0; Histo1DPtr _histMultiF0; Histo1DPtr _histMultiRho; Histo1DPtr _histMultiKStar892Plus; Histo1DPtr _histMultiKStar892_0; Histo1DPtr _histMultiPhi; Histo1DPtr _histMultiDStar2010Plus; Histo1DPtr _histMultiF2; Histo1DPtr _histMultiK2Star1430_0; Histo1DPtr _histMultiP; Histo1DPtr _histMultiLambda0; Histo1DPtr _histMultiXiMinus; Histo1DPtr _histMultiOmegaMinus; Histo1DPtr _histMultiDeltaPlusPlus; Histo1DPtr _histMultiSigma1385Plus; Histo1DPtr _histMultiXi1530_0; Histo1DPtr _histMultiLambdaB0; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(DELPHI_1996_S3430090); } diff --git a/analyses/pluginLEP/L3_2004_I652683.cc b/analyses/pluginLEP/L3_2004_I652683.cc --- a/analyses/pluginLEP/L3_2004_I652683.cc +++ b/analyses/pluginLEP/L3_2004_I652683.cc @@ -1,212 +1,212 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/Beam.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/Thrust.hh" #include "Rivet/Projections/ParisiTensor.hh" #include "Rivet/Projections/Hemispheres.hh" #define I_KNOW_THE_INITIAL_QUARKS_PROJECTION_IS_DODGY_BUT_NEED_TO_USE_IT #include "Rivet/Projections/InitialQuarks.hh" namespace Rivet { /// Jet rates and event shapes at LEP I+II class L3_2004_I652683 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(L3_2004_I652683); // L3_2004_I652683() : Analysis("L3_2004_I652683") // { } /// Book histograms and initialise projections before the run void init() { // Projections to use const FinalState FS; declare(FS, "FS"); declare(Beam(), "beams"); const ChargedFinalState CFS; declare(CFS, "CFS"); const Thrust thrust(FS); declare(thrust, "thrust"); declare(ParisiTensor(FS), "Parisi"); declare(Hemispheres(thrust), "Hemispheres"); declare(InitialQuarks(), "initialquarks"); // Book the histograms book(_h_Thrust_udsc , 47, 1, 1); book(_h_Thrust_bottom , 47, 1, 2); book(_h_heavyJetmass_udsc , 48, 1, 1); book(_h_heavyJetmass_bottom , 48, 1, 2); book(_h_totalJetbroad_udsc , 49, 1, 1); book(_h_totalJetbroad_bottom , 49, 1, 2); book(_h_wideJetbroad_udsc , 50, 1, 1); book(_h_wideJetbroad_bottom , 50, 1, 2); book(_h_Cparameter_udsc , 51, 1, 1); book(_h_Cparameter_bottom , 51, 1, 2); book(_h_Dparameter_udsc , 52, 1, 1); book(_h_Dparameter_bottom , 52, 1, 2); book(_h_Ncharged , 59, 1, 1); book(_h_Ncharged_udsc , 59, 1, 2); book(_h_Ncharged_bottom , 59, 1, 3); book(_h_scaledMomentum , 65, 1, 1); book(_h_scaledMomentum_udsc , 65, 1, 2); book(_h_scaledMomentum_bottom, 65, 1, 3); - book(_sumW_udsc, "sumW_udsc"); - book(_sumW_b, "sumW_b"); - book(_sumW_ch, "sumW_ch"); - book(_sumW_ch_udsc, "sumW_ch_udsc"); - book(_sumW_ch_b, "sumW_ch_b"); + book(_sumW_udsc, "_sumW_udsc"); + book(_sumW_b, "_sumW_b"); + book(_sumW_ch, "_sumW_ch"); + book(_sumW_ch_udsc, "_sumW_ch_udsc"); + book(_sumW_ch_b, "_sumW_ch_b"); } /// Perform the per-event analysis void analyze(const Event& event) { // Get beam average momentum const ParticlePair& beams = apply(event, "beams").beams(); const double beamMomentum = ( beams.first.p3().mod() + beams.second.p3().mod() ) / 2.0; // InitialQuarks projection to have udsc events separated from b events /// @todo Yuck!!! Eliminate when possible... int flavour = 0; const InitialQuarks& iqf = apply(event, "initialquarks"); Particles quarks; if ( iqf.particles().size() == 2 ) { flavour = iqf.particles().front().abspid(); quarks = iqf.particles(); } else { map quarkmap; for (const Particle& p : iqf.particles()) { if (quarkmap.find(p.pid()) == quarkmap.end()) quarkmap[p.pid()] = p; else if (quarkmap[p.pid()].E() < p.E()) quarkmap[p.pid()] = p; } double max_energy = 0.; for (int i = 1; i <= 5; ++i) { double energy = 0.; if (quarkmap.find(i) != quarkmap.end()) energy += quarkmap[ i].E(); if (quarkmap.find(-i) != quarkmap.end()) energy += quarkmap[-i].E(); if (energy > max_energy) flavour = i; } if (quarkmap.find(flavour) != quarkmap.end()) quarks.push_back(quarkmap[flavour]); if (quarkmap.find(-flavour) != quarkmap.end()) quarks.push_back(quarkmap[-flavour]); } // Flavour label /// @todo Change to a bool? const int iflav = (flavour == PID::DQUARK || flavour == PID::UQUARK || flavour == PID::SQUARK || flavour == PID::CQUARK) ? 1 : (flavour == PID::BQUARK) ? 5 : 0; // Update weight sums if (iflav == 1) { _sumW_udsc->fill(); } else if (iflav == 5) { _sumW_b->fill(); } _sumW_ch->fill(); // Charged multiplicity const FinalState& cfs = applyProjection(event, "CFS"); _h_Ncharged->fill(cfs.size()); if (iflav == 1) { _sumW_ch_udsc->fill(); _h_Ncharged_udsc->fill(cfs.size()); } else if (iflav == 5) { _sumW_ch_b->fill(); _h_Ncharged_bottom->fill(cfs.size()); } // Scaled momentum const Particles& chparticles = cfs.particlesByPt(); for (const Particle& p : chparticles) { const Vector3 momentum3 = p.p3(); const double mom = momentum3.mod(); const double scaledMom = mom/beamMomentum; const double logScaledMom = std::log(scaledMom); _h_scaledMomentum->fill(-logScaledMom); if (iflav == 1) { _h_scaledMomentum_udsc->fill(-logScaledMom); } else if (iflav == 5) { _h_scaledMomentum_bottom->fill(-logScaledMom); } } // Thrust const Thrust& thrust = applyProjection(event, "thrust"); if (iflav == 1) { _h_Thrust_udsc->fill(thrust.thrust()); } else if (iflav == 5) { _h_Thrust_bottom->fill(thrust.thrust()); } // C and D Parisi parameters const ParisiTensor& parisi = applyProjection(event, "Parisi"); if (iflav == 1) { _h_Cparameter_udsc->fill(parisi.C()); _h_Dparameter_udsc->fill(parisi.D()); } else if (iflav == 5) { _h_Cparameter_bottom->fill(parisi.C()); _h_Dparameter_bottom->fill(parisi.D()); } // The hemisphere variables const Hemispheres& hemisphere = applyProjection(event, "Hemispheres"); if (iflav == 1) { _h_heavyJetmass_udsc->fill(hemisphere.scaledM2high()); _h_totalJetbroad_udsc->fill(hemisphere.Bsum()); _h_wideJetbroad_udsc->fill(hemisphere.Bmax()); } else if (iflav == 5) { _h_heavyJetmass_bottom->fill(hemisphere.scaledM2high()); _h_totalJetbroad_bottom->fill(hemisphere.Bsum()); _h_wideJetbroad_bottom->fill(hemisphere.Bmax()); } } /// Normalise histograms etc., after the run void finalize() { scale({_h_Thrust_udsc, _h_heavyJetmass_udsc, _h_totalJetbroad_udsc, _h_wideJetbroad_udsc, _h_Cparameter_udsc, _h_Dparameter_udsc}, 1/ *_sumW_udsc); scale({_h_Thrust_bottom, _h_heavyJetmass_bottom, _h_totalJetbroad_bottom, _h_wideJetbroad_bottom, _h_Cparameter_bottom, _h_Dparameter_bottom}, 1./ *_sumW_b); scale(_h_Ncharged, 2/ *_sumW_ch); scale(_h_Ncharged_udsc, 2/ *_sumW_ch_udsc); scale(_h_Ncharged_bottom, 2/ *_sumW_ch_b); scale(_h_scaledMomentum, 1/ *_sumW_ch); scale(_h_scaledMomentum_udsc, 1/ *_sumW_ch_udsc); scale(_h_scaledMomentum_bottom, 1/ *_sumW_ch_b); } /// Weight counters CounterPtr _sumW_udsc, _sumW_b, _sumW_ch, _sumW_ch_udsc, _sumW_ch_b; /// @name Histograms //@{ Histo1DPtr _h_Thrust_udsc, _h_Thrust_bottom; Histo1DPtr _h_heavyJetmass_udsc, _h_heavyJetmass_bottom; Histo1DPtr _h_totalJetbroad_udsc, _h_totalJetbroad_bottom; Histo1DPtr _h_wideJetbroad_udsc, _h_wideJetbroad_bottom; Histo1DPtr _h_Cparameter_udsc, _h_Cparameter_bottom; Histo1DPtr _h_Dparameter_udsc, _h_Dparameter_bottom; Histo1DPtr _h_Ncharged, _h_Ncharged_udsc, _h_Ncharged_bottom; Histo1DPtr _h_scaledMomentum, _h_scaledMomentum_udsc, _h_scaledMomentum_bottom; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(L3_2004_I652683); } diff --git a/analyses/pluginLEP/OPAL_1996_S3257789.cc b/analyses/pluginLEP/OPAL_1996_S3257789.cc --- a/analyses/pluginLEP/OPAL_1996_S3257789.cc +++ b/analyses/pluginLEP/OPAL_1996_S3257789.cc @@ -1,96 +1,96 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/Beam.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/UnstableParticles.hh" namespace Rivet { /// @brief OPAL J/Psi fragmentation function paper /// @author Peter Richardson class OPAL_1996_S3257789 : public Analysis { public: /// Constructor OPAL_1996_S3257789() : Analysis("OPAL_1996_S3257789") {} /// @name Analysis methods //@{ void init() { declare(Beam(), "Beams"); declare(ChargedFinalState(), "FS"); declare(UnstableParticles(), "UFS"); book(_histXpJPsi , 1, 1, 1); book(_multJPsi , 2, 1, 1); book(_multPsiPrime , 2, 1, 2); - book(_weightSum,"weightSum"); + book(_weightSum,"_weightSum"); } void analyze(const Event& e) { // First, veto on leptonic events by requiring at least 4 charged FS particles const FinalState& fs = apply(e, "FS"); const size_t numParticles = fs.particles().size(); // Even if we only generate hadronic events, we still need a cut on numCharged >= 2. if (numParticles < 2) { MSG_DEBUG("Failed leptonic event cut"); vetoEvent; } MSG_DEBUG("Passed leptonic event cut"); // Get beams and average beam momentum const ParticlePair& beams = apply(e, "Beams").beams(); const double meanBeamMom = ( beams.first.p3().mod() + beams.second.p3().mod() ) / 2.0; MSG_DEBUG("Avg beam momentum = " << meanBeamMom); // Final state of unstable particles to get particle spectra const UnstableParticles& ufs = apply(e, "UFS"); for (const Particle& p : ufs.particles()) { if(p.abspid()==443) { double xp = p.p3().mod()/meanBeamMom; _histXpJPsi->fill(xp); _multJPsi->fill(91.2); _weightSum->fill(); } else if(p.abspid()==100443) { _multPsiPrime->fill(91.2); } } } /// Finalize void finalize() { if(_weightSum->val()>0.) scale(_histXpJPsi , 0.1/ *_weightSum); scale(_multJPsi , 1./sumOfWeights()); scale(_multPsiPrime, 1./sumOfWeights()); } //@} private: CounterPtr _weightSum; Histo1DPtr _histXpJPsi; Histo1DPtr _multJPsi; Histo1DPtr _multPsiPrime; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(OPAL_1996_S3257789); } diff --git a/analyses/pluginLEP/OPAL_1998_S3780481.cc b/analyses/pluginLEP/OPAL_1998_S3780481.cc --- a/analyses/pluginLEP/OPAL_1998_S3780481.cc +++ b/analyses/pluginLEP/OPAL_1998_S3780481.cc @@ -1,195 +1,195 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/Beam.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" #define I_KNOW_THE_INITIAL_QUARKS_PROJECTION_IS_DODGY_BUT_NEED_TO_USE_IT #include "Rivet/Projections/InitialQuarks.hh" namespace Rivet { /// @brief OPAL flavour-dependent fragmentation paper /// @author Hendrik Hoeth class OPAL_1998_S3780481 : public Analysis { public: /// Constructor OPAL_1998_S3780481() : Analysis("OPAL_1998_S3780481") { } /// @name Analysis methods //@{ void analyze(const Event& e) { // First, veto on leptonic events by requiring at least 4 charged FS particles const FinalState& fs = apply(e, "FS"); const size_t numParticles = fs.particles().size(); // Even if we only generate hadronic events, we still need a cut on numCharged >= 2. if (numParticles < 2) { MSG_DEBUG("Failed ncharged cut"); vetoEvent; } MSG_DEBUG("Passed ncharged cut"); _weightedTotalPartNum->fill(numParticles); // Get beams and average beam momentum const ParticlePair& beams = apply(e, "Beams").beams(); const double meanBeamMom = ( beams.first.p3().mod() + beams.second.p3().mod() ) / 2.0; MSG_DEBUG("Avg beam momentum = " << meanBeamMom); int flavour = 0; const InitialQuarks& iqf = apply(e, "IQF"); // If we only have two quarks (qqbar), just take the flavour. // If we have more than two quarks, look for the highest energetic q-qbar pair. /// @todo Yuck... does this *really* have to be quark-based?!? if (iqf.particles().size() == 2) { flavour = iqf.particles().front().abspid(); } else { map quarkmap; for (const Particle& p : iqf.particles()) { if (quarkmap[p.pid()] < p.E()) { quarkmap[p.pid()] = p.E(); } } double maxenergy = 0.; for (int i = 1; i <= 5; ++i) { if (quarkmap[i]+quarkmap[-i] > maxenergy) { flavour = i; } } } switch (flavour) { case 1: case 2: case 3: _SumOfudsWeights->fill(); break; case 4: _SumOfcWeights->fill(); break; case 5: _SumOfbWeights->fill(); break; } for (const Particle& p : fs.particles()) { const double xp = p.p3().mod()/meanBeamMom; const double logxp = -std::log(xp); _histXpall->fill(xp); _histLogXpall->fill(logxp); _histMultiChargedall->fill(_histMultiChargedall->bin(0).xMid()); switch (flavour) { /// @todo Use PDG code enums case PID::DQUARK: case PID::UQUARK: case PID::SQUARK: _histXpuds->fill(xp); _histLogXpuds->fill(logxp); _histMultiChargeduds->fill(_histMultiChargeduds->bin(0).xMid()); break; case PID::CQUARK: _histXpc->fill(xp); _histLogXpc->fill(logxp); _histMultiChargedc->fill(_histMultiChargedc->bin(0).xMid()); break; case PID::BQUARK: _histXpb->fill(xp); _histLogXpb->fill(logxp); _histMultiChargedb->fill(_histMultiChargedb->bin(0).xMid()); break; } } } void init() { // Projections declare(Beam(), "Beams"); declare(ChargedFinalState(), "FS"); declare(InitialQuarks(), "IQF"); // Book histos book(_histXpuds ,1, 1, 1); book(_histXpc ,2, 1, 1); book(_histXpb ,3, 1, 1); book(_histXpall ,4, 1, 1); book(_histLogXpuds ,5, 1, 1); book(_histLogXpc ,6, 1, 1); book(_histLogXpb ,7, 1, 1); book(_histLogXpall ,8, 1, 1); book(_histMultiChargeduds ,9, 1, 1); book(_histMultiChargedc ,9, 1, 2); book(_histMultiChargedb ,9, 1, 3); book(_histMultiChargedall ,9, 1, 4); // Counters - book(_weightedTotalPartNum, "TotalPartNum"); - book(_SumOfudsWeights, "udsWeights"); - book(_SumOfcWeights, "cWeights"); - book(_SumOfbWeights, "bWeights"); + book(_weightedTotalPartNum, "_TotalPartNum"); + book(_SumOfudsWeights, "_udsWeights"); + book(_SumOfcWeights, "_cWeights"); + book(_SumOfbWeights, "_bWeights"); } /// Finalize void finalize() { const double avgNumParts = dbl(*_weightedTotalPartNum) / sumOfWeights(); normalize(_histXpuds , avgNumParts); normalize(_histXpc , avgNumParts); normalize(_histXpb , avgNumParts); normalize(_histXpall , avgNumParts); normalize(_histLogXpuds , avgNumParts); normalize(_histLogXpc , avgNumParts); normalize(_histLogXpb , avgNumParts); normalize(_histLogXpall , avgNumParts); scale(_histMultiChargeduds, 1.0/ *_SumOfudsWeights); scale(_histMultiChargedc , 1.0/ *_SumOfcWeights); scale(_histMultiChargedb , 1.0/ *_SumOfbWeights); scale(_histMultiChargedall, 1.0/sumOfWeights()); } //@} private: /// Store the weighted sums of numbers of charged / charged+neutral /// particles - used to calculate average number of particles for the /// inclusive single particle distributions' normalisations. CounterPtr _weightedTotalPartNum; CounterPtr _SumOfudsWeights; CounterPtr _SumOfcWeights; CounterPtr _SumOfbWeights; Histo1DPtr _histXpuds; Histo1DPtr _histXpc; Histo1DPtr _histXpb; Histo1DPtr _histXpall; Histo1DPtr _histLogXpuds; Histo1DPtr _histLogXpc; Histo1DPtr _histLogXpb; Histo1DPtr _histLogXpall; Histo1DPtr _histMultiChargeduds; Histo1DPtr _histMultiChargedc; Histo1DPtr _histMultiChargedb; Histo1DPtr _histMultiChargedall; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(OPAL_1998_S3780481); } diff --git a/analyses/pluginLEP/OPAL_2004_I631361.cc b/analyses/pluginLEP/OPAL_2004_I631361.cc --- a/analyses/pluginLEP/OPAL_2004_I631361.cc +++ b/analyses/pluginLEP/OPAL_2004_I631361.cc @@ -1,362 +1,352 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/HadronicFinalState.hh" #include "Rivet/Tools/BinnedHistogram.hh" #include "fastjet/JetDefinition.hh" namespace fastjet { class P_scheme : public JetDefinition::Recombiner { public: std::string description() const {return "";} void recombine(const PseudoJet & pa, const PseudoJet & pb, PseudoJet & pab) const { PseudoJet tmp = pa + pb; double E = sqrt(tmp.px()*tmp.px() + tmp.py()*tmp.py() + tmp.pz()*tmp.pz()); pab.reset_momentum(tmp.px(), tmp.py(), tmp.pz(), E); } void preprocess(PseudoJet & p) const { double E = sqrt(p.px()*p.px() + p.py()*p.py() + p.pz()*p.pz()); p.reset_momentum(p.px(), p.py(), p.pz(), E); } ~P_scheme() { } }; } namespace Rivet { class OPAL_2004_I631361 : public Analysis { public: /// Constructor - OPAL_2004_I631361() - : Analysis("OPAL_2004_I631361"), _sumW(0.0) - { } - + DEFAULT_RIVET_ANALYSIS_CTOR(OPAL_2004_I631361); /// @name Analysis methods //@{ void init() { // Get options from the new option system _mode = 0; if ( getOption("PROCESS") == "GG" ) _mode = 0; if ( getOption("PROCESS") == "QQ" ) _mode = 1; // projections we need for both cases const FinalState fs; declare(fs, "FS"); const ChargedFinalState cfs; declare(cfs, "CFS"); // additional projections for q qbar if(_mode==1) { - declare(HadronicFinalState(fs), "HFS"); - declare(HadronicFinalState(cfs), "HCFS"); + declare(HadronicFinalState(fs), "HFS"); + declare(HadronicFinalState(cfs), "HCFS"); } // book the histograms if(_mode==0) { int ih(0), iy(0); if (inRange(0.5*sqrtS()/GeV, 5.0, 5.5)) { ih = 1; - iy = 1; + iy = 1; } else if (inRange(0.5*sqrtS()/GeV, 5.5, 6.5)) { ih = 1; - iy = 2; + iy = 2; } else if (inRange(0.5*sqrtS()/GeV, 6.5, 7.5)) { ih = 1; - iy = 3; + iy = 3; } else if (inRange(0.5*sqrtS()/GeV, 7.5, 9.5)) { ih = 2; - iy = 1; + iy = 1; } else if (inRange(0.5*sqrtS()/GeV, 9.5, 13.0)) { ih = 2; - iy = 2; + iy = 2; } else if (inRange(0.5*sqrtS()/GeV, 13.0, 16.0)) { ih = 3; - iy = 1; + iy = 1; } else if (inRange(0.5*sqrtS()/GeV, 16.0, 20.0)) { ih = 3; - iy = 2; + iy = 2; } + if (!ih) MSG_WARNING("Option \"PROCESS=GG\" not compatible with this beam energy!"); assert(ih>0); book(_h_chMult_gg, ih,1,iy); - if(ih==3) - book(_h_chFragFunc_gg, 5,1,iy); - else - _h_chFragFunc_gg = nullptr; + if(ih==3) book(_h_chFragFunc_gg, 5,1,iy); + else _h_chFragFunc_gg = nullptr; } else { Histo1DPtr dummy; - book(dummy, 1,1,1); - _h_chMult_qq.add(5.0, 5.5, dummy); - book(dummy, 1,1,2); - _h_chMult_qq.add(5.5, 6.5, dummy); - book(dummy, 1,1,3); - _h_chMult_qq.add(6.5, 7.5, dummy); - book(dummy, 2,1,1); - _h_chMult_qq.add(7.5, 9.5, dummy); - book(dummy, 2,1,2); - _h_chMult_qq.add(9.5, 13.0, dummy); - book(dummy, 3,1,1); - _h_chMult_qq.add(13.0, 16.0, dummy); - book(dummy, 3,1,2); - _h_chMult_qq.add(16.0, 20.0, dummy); + _h_chMult_qq.add( 5.0, 5.5, book(dummy, 1,1,1)); + _h_chMult_qq.add( 5.5, 6.5, book(dummy, 1,1,2)); + _h_chMult_qq.add( 6.5, 7.5, book(dummy, 1,1,3)); + _h_chMult_qq.add( 7.5, 9.5, book(dummy, 2,1,1)); + _h_chMult_qq.add( 9.5, 13.0, book(dummy, 2,1,2)); + _h_chMult_qq.add(13.0, 16.0, book(dummy, 3,1,1)); + _h_chMult_qq.add(16.0, 20.0, book(dummy, 3,1,2)); - book(dummy, 5,1,1); - _h_chFragFunc_qq.add(13.0, 16.0, dummy); - book(dummy, 5,1,2); - _h_chFragFunc_qq.add(16.0, 20.0, dummy); + _h_chFragFunc_qq.add(13.0, 16.0, book(dummy, 5,1,1)); + _h_chFragFunc_qq.add(16.0, 20.0, book(dummy, 5,1,2)); + + _sumWEbin.resize(7); + for (size_t i = 0; i < 7; ++i) { + book(_sumWEbin[i], "/TMP/sumWEbin" + to_string(i)); + } } } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = 1.0; // gg mode if(_mode==0) { - // find the initial gluons - Particles initial; - for (ConstGenParticlePtr p : HepMCUtils::particles(event.genEvent())) { - ConstGenVertexPtr pv = p->production_vertex(); - const PdgId pid = p->pdg_id(); - if(pid!=21) continue; - bool passed = false; - for (ConstGenParticlePtr pp : HepMCUtils::particles(pv, Relatives::PARENTS)) { - const PdgId ppid = abs(pp->pdg_id()); - passed = (ppid == PID::ELECTRON || ppid == PID::HIGGS || - ppid == PID::ZBOSON || ppid == PID::GAMMA); - if(passed) break; - } - if(passed) initial.push_back(Particle(*p)); - } - if(initial.size()!=2) vetoEvent; - // use the direction for the event axis - Vector3 axis = initial[0].momentum().p3().unit(); - // fill histograms - const Particles& chps = applyProjection(event, "CFS").particles(); - unsigned int nMult[2] = {0,0}; - _sumW += 2.*weight; - // distribution - for (const Particle& p : chps) { - double xE = 2.*p.E()/sqrtS(); - if(_h_chFragFunc_gg) _h_chFragFunc_gg->fill(xE, weight); - if(p.momentum().p3().dot(axis)>0.) - ++nMult[0]; - else - ++nMult[1]; - } - // multiplicities in jet - _h_chMult_gg->fill(nMult[0],weight); - _h_chMult_gg->fill(nMult[1],weight); + // find the initial gluons + Particles initial; + for (ConstGenParticlePtr p : HepMCUtils::particles(event.genEvent())) { + ConstGenVertexPtr pv = p->production_vertex(); + const PdgId pid = p->pdg_id(); + if(pid!=21) continue; + bool passed = false; + for (ConstGenParticlePtr pp : HepMCUtils::particles(pv, Relatives::PARENTS)) { + const PdgId ppid = abs(pp->pdg_id()); + passed = (ppid == PID::ELECTRON || ppid == PID::HIGGS || + ppid == PID::ZBOSON || ppid == PID::GAMMA); + if(passed) break; + } + if(passed) initial.push_back(Particle(*p)); + } + if(initial.size()!=2) vetoEvent; + // use the direction for the event axis + Vector3 axis = initial[0].momentum().p3().unit(); + // fill histograms + const Particles& chps = applyProjection(event, "CFS").particles(); + unsigned int nMult[2] = {0,0}; + // distribution + for (const Particle& p : chps) { + double xE = 2.*p.E()/sqrtS(); + if(_h_chFragFunc_gg) _h_chFragFunc_gg->fill(xE, weight); + if(p.momentum().p3().dot(axis)>0.) + ++nMult[0]; + else + ++nMult[1]; + } + // multiplicities in jet + _h_chMult_gg->fill(nMult[0],weight); + _h_chMult_gg->fill(nMult[1],weight); } // qqbar mode else { - // cut on the number of charged particles - const Particles& chParticles = applyProjection(event, "CFS").particles(); - if(chParticles.size() < 5) vetoEvent; - // cluster the jets - const Particles& particles = applyProjection(event, "FS").particles(); - fastjet::JetDefinition ee_kt_def(fastjet::ee_kt_algorithm, &p_scheme); - PseudoJets pParticles; - for (Particle p : particles) { - PseudoJet temp = p.pseudojet(); - if(p.fromBottom()) { - temp.set_user_index(5); - } - pParticles.push_back(temp); - } - fastjet::ClusterSequence cluster(pParticles, ee_kt_def); - // rescale energys to just keep the directions of the jets - // and keep track of b tags - PseudoJets pJets = sorted_by_E(cluster.exclusive_jets_up_to(3)); - if(pJets.size() < 3) vetoEvent; - array dirs; - for(int i=0; i<3; i++) { - dirs[i] = Vector3(pJets[i].px(),pJets[i].py(),pJets[i].pz()).unit(); - } - array bTagged; - Jets jets; - for(int i=0; i<3; i++) { - double Ejet = sqrtS()*sin(angle(dirs[(i+1)%3],dirs[(i+2)%3])) / - (sin(angle(dirs[i],dirs[(i+1)%3])) + sin(angle(dirs[i],dirs[(i+2)%3])) + sin(angle(dirs[(i+1)%3],dirs[(i+2)%3]))); - jets.push_back(FourMomentum(Ejet,Ejet*dirs[i].x(),Ejet*dirs[i].y(),Ejet*dirs[i].z())); - bTagged[i] = false; - for (PseudoJet particle : pJets[i].constituents()) { - if(particle.user_index() > 1 and !bTagged[i]) { - bTagged[i] = true; - } - } - } - - int QUARK1 = 0, QUARK2 = 1, GLUON = 2; - - if(jets[QUARK2].E() > jets[QUARK1].E()) swap(QUARK1, QUARK2); - if(jets[GLUON].E() > jets[QUARK1].E()) swap(QUARK1, GLUON); - if(!bTagged[QUARK2]) { - if(!bTagged[GLUON]) vetoEvent; - else swap(QUARK2, GLUON); - } - if(bTagged[GLUON]) vetoEvent; - - // exclude collinear or soft jets - double k1 = jets[QUARK1].E()*min(angle(jets[QUARK1].momentum(),jets[QUARK2].momentum()), - angle(jets[QUARK1].momentum(),jets[GLUON].momentum())); - double k2 = jets[QUARK2].E()*min(angle(jets[QUARK2].momentum(),jets[QUARK1].momentum()), - angle(jets[QUARK2].momentum(),jets[GLUON].momentum())); - if(k1<8.0*GeV || k2<8.0*GeV) vetoEvent; - - double sqg = (jets[QUARK1].momentum()+jets[GLUON].momentum()).mass2(); - double sgq = (jets[QUARK2].momentum()+jets[GLUON].momentum()).mass2(); - double s = (jets[QUARK1].momentum()+jets[QUARK2].momentum()+jets[GLUON].momentum()).mass2(); - - double Eg = 0.5*sqrt(sqg*sgq/s); - - if(Eg < 5.0 || Eg > 20.0) { vetoEvent; } - else if(Eg > 9.5) { - //requirements for experimental reconstructability raise as energy raises - if(!bTagged[QUARK1]) { - vetoEvent; - } - } - - // all cuts applied, increment sum of weights - _sumWEbin[getEbin(Eg)] += weight; - - - // transform to frame with event in y-z and glue jet in z direction - Matrix3 glueTOz(jets[GLUON].momentum().vector3(), Vector3(0,0,1)); - Vector3 transQuark = glueTOz*jets[QUARK2].momentum().vector3(); - Matrix3 quarksTOyz(Vector3(transQuark.x(), transQuark.y(), 0), Vector3(0,1,0)); - - // work out transformation to symmetric frame - array x_cm; - array x_cm_y; - array x_cm_z; - array x_pr; - for(int i=0; i<3; i++) { - x_cm[i] = 2*jets[i].E()/sqrt(s); - Vector3 p_transf = quarksTOyz*glueTOz*jets[i].p3(); - x_cm_y[i] = 2*p_transf.y()/sqrt(s); - x_cm_z[i] = 2*p_transf.z()/sqrt(s); - } - x_pr[GLUON] = sqrt(4*(1-x_cm[QUARK1])*(1-x_cm[QUARK2])/(3+x_cm[GLUON])); - x_pr[QUARK1] = x_pr[GLUON]/(1-x_cm[QUARK1]); - x_pr[QUARK2] = x_pr[GLUON]/(1-x_cm[QUARK2]); - double gamma = (x_pr[QUARK1] + x_pr[GLUON] + x_pr[QUARK2])/2; - double beta_z = x_pr[GLUON]/(gamma*x_cm[GLUON]) - 1; - double beta_y = (x_pr[QUARK2]/gamma - x_cm[QUARK2] - beta_z*x_cm_z[QUARK2])/x_cm_y[QUARK2]; - - LorentzTransform toSymmetric = LorentzTransform::mkObjTransformFromBeta(Vector3(0.,beta_y,beta_z)). - postMult(quarksTOyz*glueTOz); - - FourMomentum transGlue = toSymmetric.transform(jets[GLUON].momentum()); - double cutAngle = angle(toSymmetric.transform(jets[QUARK2].momentum()), transGlue)/2; - - int nCh = 0; - for (const Particle& chP : chParticles ) { - FourMomentum pSymmFrame = toSymmetric.transform(FourMomentum(chP.p3().mod(), chP.px(), chP.py(), chP.pz())); - if(angle(pSymmFrame, transGlue) < cutAngle) { - _h_chFragFunc_qq.fill(Eg, pSymmFrame.E()*sin(cutAngle)/Eg, weight); - nCh++; - } - } - _h_chMult_qq.fill(Eg, nCh, weight); + // cut on the number of charged particles + const Particles& chParticles = applyProjection(event, "CFS").particles(); + if(chParticles.size() < 5) vetoEvent; + // cluster the jets + const Particles& particles = applyProjection(event, "FS").particles(); + fastjet::JetDefinition ee_kt_def(fastjet::ee_kt_algorithm, &p_scheme); + PseudoJets pParticles; + for (Particle p : particles) { + PseudoJet temp = p.pseudojet(); + if(p.fromBottom()) { + temp.set_user_index(5); + } + pParticles.push_back(temp); + } + fastjet::ClusterSequence cluster(pParticles, ee_kt_def); + // rescale energys to just keep the directions of the jets + // and keep track of b tags + PseudoJets pJets = sorted_by_E(cluster.exclusive_jets_up_to(3)); + if(pJets.size() < 3) vetoEvent; + array dirs; + for(int i=0; i<3; i++) { + dirs[i] = Vector3(pJets[i].px(),pJets[i].py(),pJets[i].pz()).unit(); + } + array bTagged; + Jets jets; + for(int i=0; i<3; i++) { + double Ejet = sqrtS()*sin(angle(dirs[(i+1)%3],dirs[(i+2)%3])) / + (sin(angle(dirs[i],dirs[(i+1)%3])) + sin(angle(dirs[i],dirs[(i+2)%3])) + sin(angle(dirs[(i+1)%3],dirs[(i+2)%3]))); + jets.push_back(FourMomentum(Ejet,Ejet*dirs[i].x(),Ejet*dirs[i].y(),Ejet*dirs[i].z())); + bTagged[i] = false; + for (PseudoJet particle : pJets[i].constituents()) { + if(particle.user_index() > 1 and !bTagged[i]) { + bTagged[i] = true; + } + } + } + + int QUARK1 = 0, QUARK2 = 1, GLUON = 2; + + if(jets[QUARK2].E() > jets[QUARK1].E()) swap(QUARK1, QUARK2); + if(jets[GLUON].E() > jets[QUARK1].E()) swap(QUARK1, GLUON); + if(!bTagged[QUARK2]) { + if(!bTagged[GLUON]) vetoEvent; + else swap(QUARK2, GLUON); + } + if(bTagged[GLUON]) vetoEvent; + + // exclude collinear or soft jets + double k1 = jets[QUARK1].E()*min(angle(jets[QUARK1].momentum(),jets[QUARK2].momentum()), + angle(jets[QUARK1].momentum(),jets[GLUON].momentum())); + double k2 = jets[QUARK2].E()*min(angle(jets[QUARK2].momentum(),jets[QUARK1].momentum()), + angle(jets[QUARK2].momentum(),jets[GLUON].momentum())); + if(k1<8.0*GeV || k2<8.0*GeV) vetoEvent; + + double sqg = (jets[QUARK1].momentum()+jets[GLUON].momentum()).mass2(); + double sgq = (jets[QUARK2].momentum()+jets[GLUON].momentum()).mass2(); + double s = (jets[QUARK1].momentum()+jets[QUARK2].momentum()+jets[GLUON].momentum()).mass2(); + + double Eg = 0.5*sqrt(sqg*sgq/s); + + if(Eg < 5.0 || Eg > 46.) { vetoEvent; } + else if(Eg > 9.5) { + //requirements for experimental reconstructability raise as energy raises + if(!bTagged[QUARK1]) { + vetoEvent; + } + } + + // all cuts applied, increment sum of weights + _sumWEbin[getEbin(Eg)]->fill(); + + + // transform to frame with event in y-z and glue jet in z direction + Matrix3 glueTOz(jets[GLUON].momentum().vector3(), Vector3(0,0,1)); + Vector3 transQuark = glueTOz*jets[QUARK2].momentum().vector3(); + Matrix3 quarksTOyz(Vector3(transQuark.x(), transQuark.y(), 0), Vector3(0,1,0)); + + // work out transformation to symmetric frame + array x_cm; + array x_cm_y; + array x_cm_z; + array x_pr; + for(int i=0; i<3; i++) { + x_cm[i] = 2*jets[i].E()/sqrt(s); + Vector3 p_transf = quarksTOyz*glueTOz*jets[i].p3(); + x_cm_y[i] = 2*p_transf.y()/sqrt(s); + x_cm_z[i] = 2*p_transf.z()/sqrt(s); + } + x_pr[GLUON] = sqrt(4*(1-x_cm[QUARK1])*(1-x_cm[QUARK2])/(3+x_cm[GLUON])); + x_pr[QUARK1] = x_pr[GLUON]/(1-x_cm[QUARK1]); + x_pr[QUARK2] = x_pr[GLUON]/(1-x_cm[QUARK2]); + double gamma = (x_pr[QUARK1] + x_pr[GLUON] + x_pr[QUARK2])/2; + double beta_z = x_pr[GLUON]/(gamma*x_cm[GLUON]) - 1; + double beta_y = (x_pr[QUARK2]/gamma - x_cm[QUARK2] - beta_z*x_cm_z[QUARK2])/x_cm_y[QUARK2]; + + LorentzTransform toSymmetric = LorentzTransform::mkObjTransformFromBeta(Vector3(0.,beta_y,beta_z)). + postMult(quarksTOyz*glueTOz); + + FourMomentum transGlue = toSymmetric.transform(jets[GLUON].momentum()); + double cutAngle = angle(toSymmetric.transform(jets[QUARK2].momentum()), transGlue)/2; + + int nCh = 0; + for (const Particle& chP : chParticles ) { + FourMomentum pSymmFrame = toSymmetric.transform(FourMomentum(chP.p3().mod(), chP.px(), chP.py(), chP.pz())); + if(angle(pSymmFrame, transGlue) < cutAngle) { + _h_chFragFunc_qq.fill(Eg, pSymmFrame.E()*sin(cutAngle)/Eg, weight); + nCh++; + } + } + _h_chMult_qq.fill(Eg, nCh, weight); } } /// Normalise histograms etc., after the run void finalize() { if(_mode==0) { - normalize(_h_chMult_gg); - if(_h_chFragFunc_gg) scale(_h_chFragFunc_gg, 1./_sumW); + normalize(_h_chMult_gg); + if(_h_chFragFunc_gg) normalize(_h_chFragFunc_gg, 1.0, true); } else { - for (Histo1DPtr hist : _h_chMult_qq.histos()) { - normalize(hist); - } - for(int i=0; i<2; i++) { - if(!isZero(_sumWEbin[i+5])) { - scale(_h_chFragFunc_qq.histos()[i], 1./_sumWEbin[i+5]); - } - } + for (Histo1DPtr hist : _h_chMult_qq.histos()) { + normalize(hist); + } + for(int i=0; i<2; i++) { + if(!isZero(_sumWEbin[i+5]->val())) { + scale(_h_chFragFunc_qq.histos()[i], 1./(*_sumWEbin[i+5])); + } + } } } //@} private: int getEbin(double E_glue) { int ih = -1; if (inRange(E_glue/GeV, 5.0, 5.5)) { ih = 0; } else if (inRange(E_glue/GeV, 5.5, 6.5)) { ih = 1; } else if (inRange(E_glue/GeV, 6.5, 7.5)) { ih = 2; } else if (inRange(E_glue/GeV, 7.5, 9.5)) { ih = 3; } else if (inRange(E_glue/GeV, 9.5, 13.0)) { ih = 4; } else if (inRange(E_glue/GeV, 13.0, 16.0)) { ih = 5; } else if (inRange(E_glue/GeV, 16.0, 20.0)) { ih = 6; } assert(ih >= 0); return ih; } class PScheme : public JetDefinition::Recombiner { public: std::string description() const {return "";} void recombine(const PseudoJet & pa, const PseudoJet & pb, PseudoJet & pab) const { PseudoJet tmp = pa + pb; double E = sqrt(tmp.px()*tmp.px() + tmp.py()*tmp.py() + tmp.pz()*tmp.pz()); pab.reset_momentum(tmp.px(), tmp.py(), tmp.pz(), E); } void preprocess(PseudoJet & p) const { double E = sqrt(p.px()*p.px() + p.py()*p.py() + p.pz()*p.pz()); p.reset_momentum(p.px(), p.py(), p.pz(), E); } ~PScheme() { } }; private: // The mode unsigned int _mode; /// @todo IMPROVEMENT NEEDED? - double _sumW; - vector _sumWEbin; + vector _sumWEbin; // p scheme jet definition fastjet::P_scheme p_scheme; /// @name Histograms //@{ Histo1DPtr _h_chMult_gg; Histo1DPtr _h_chFragFunc_gg; BinnedHistogram _h_chMult_qq; BinnedHistogram _h_chFragFunc_qq; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(OPAL_2004_I631361); } diff --git a/analyses/pluginLEP/OPAL_2004_I631361.info b/analyses/pluginLEP/OPAL_2004_I631361.info --- a/analyses/pluginLEP/OPAL_2004_I631361.info +++ b/analyses/pluginLEP/OPAL_2004_I631361.info @@ -1,55 +1,55 @@ Name: OPAL_2004_I631361 Year: 2004 Summary: Gluon jet charged multiplicities and fragmentation functions Experiment: OPAL Collider: LEP InspireID: 631361 Status: VALIDATED Authors: - Daniel Reichelt References: - Phys. Rev. D69, 032002,2004 - hep-ex/0310048 RunInfo: The fictional $e^+e^-\to gg$ process NumEvents: 100000 NeedCrossSection: no Beams: [e+, e- ] -Energies: [10.50,11.96,13.96,16.86,21.84,28.48,35.44,91,2] +Energies: [10.50,11.96,13.96,16.86,21.84,28.48,35.44,91.2] Options: - PROCESS=GG,QQ NeedCrossSection: False Description: ' Measurement of gluon jet properties using the jet boost algorithm, a technique to select unbiased samples of gluon jets in $e^+e^-$ annihilation, i.e. gluon jets free of biases introduced by event selection or jet finding criteria. Two modes are provided, the prefer option is to produce the fictional $e^+e^-\to g g $ process to be used due to the corrections applied to the data, PROCESS=GG. The original analysis technique to extract gluon jets from hadronic $e^+e^-$ events using $e^+e^-\to q\bar{q}$ events, PROCESS=QQ, is also provided but cannot be used for tuning as the data has been corrected for impurities, however it is still useful qualitatively in order to check the properties of gluon jets in the original way in which there were measured rather than using a fictitious process.' BibKey: Abbiendi:2004gh BibTeX: '@article{Abbiendi:2004gh, author = "Abbiendi, G. and others", title = "{Experimental studies of unbiased gluon jets from $e^{+} e^{-}$ annihilations using the jet boost algorithm}", collaboration = "OPAL", journal = "Phys. Rev.", volume = "D69", year = "2004", pages = "032002", doi = "10.1103/PhysRevD.69.032002", eprint = "hep-ex/0310048", archivePrefix = "arXiv", primaryClass = "hep-ex", reportNumber = "CERN-EP-2004-067", SLACcitation = "%%CITATION = HEP-EX/0310048;%%" }' Validation: - $A LEP-10.5-gg - $A-2 LEP-11.96-gg - $A-3 LEP-13.96-gg - $A-4 LEP-16.86-gg - $A-5 LEP-21.84-gg - $A-6 LEP-28.48-gg - $A-7 LEP-35.44-gg - $A-8 LEP-91; rivet $< -a $A:PROCESS=QQ -o $@ - $A-9 LEP-91 :PROCESS=QQ diff --git a/analyses/pluginLEP/OPAL_2004_I648738.cc b/analyses/pluginLEP/OPAL_2004_I648738.cc --- a/analyses/pluginLEP/OPAL_2004_I648738.cc +++ b/analyses/pluginLEP/OPAL_2004_I648738.cc @@ -1,118 +1,118 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" namespace Rivet { class OPAL_2004_I648738 : public Analysis { public: /// Constructor OPAL_2004_I648738() : Analysis("OPAL_2004_I648738"), _sumW(3), _histo_xE(3) { } /// @name Analysis methods //@{ void init() { declare(FinalState(), "FS"); declare(ChargedFinalState(), "CFS"); unsigned int ih=0; if (inRange(0.5*sqrtS()/GeV, 4.0, 9.0)) { ih = 1; } else if (inRange(0.5*sqrtS()/GeV, 9.0, 19.0)) { ih = 2; } else if (inRange(0.5*sqrtS()/GeV, 19.0, 30.0)) { ih = 3; } else if (inRange(0.5*sqrtS()/GeV, 45.5, 45.7)) { ih = 5; } else if (inRange(0.5*sqrtS()/GeV, 30.0, 70.0)) { ih = 4; } else if (inRange(0.5*sqrtS()/GeV, 91.5, 104.5)) { ih = 6; } assert(ih>0); // book the histograms book(_histo_xE[0], ih+5,1,1); book(_histo_xE[1], ih+5,1,2); if(ih<5) book(_histo_xE[2] ,ih+5,1,3); - book(_sumW[0], "sumW_0"); - book(_sumW[1], "sumW_1"); - book(_sumW[2], "sumW_2"); + book(_sumW[0], "_sumW_0"); + book(_sumW[1], "_sumW_1"); + book(_sumW[2], "_sumW_2"); } /// Perform the per-event analysis void analyze(const Event& event) { // find the initial quarks/gluons Particles initial; for (ConstGenParticlePtr p : HepMCUtils::particles(event.genEvent())) { ConstGenVertexPtr pv = p->production_vertex(); const PdgId pid = abs(p->pdg_id()); if(!( (pid>=1&&pid<=5) || pid ==21) ) continue; bool passed = false; for (ConstGenParticlePtr pp : HepMCUtils::particles(pv, Relatives::PARENTS)) { const PdgId ppid = abs(pp->pdg_id()); passed = (ppid == PID::ELECTRON || ppid == PID::HIGGS || ppid == PID::ZBOSON || ppid == PID::GAMMA); if(passed) break; } if(passed) initial.push_back(Particle(*p)); } if(initial.size()!=2) { vetoEvent; } // type of event unsigned int itype=2; if(initial[0].pid()==-initial[1].pid()) { PdgId pid = abs(initial[0].pid()); if(pid>=1&&pid<=4) itype=0; else itype=1; } assert(itype<_histo_xE.size()); // fill histograms _sumW[itype]->fill(2.); const Particles& chps = applyProjection(event, "CFS").particles(); for(const Particle& p : chps) { double xE = 2.*p.E()/sqrtS(); _histo_xE[itype]->fill(xE); } } /// Normalise histograms etc., after the run void finalize() { for(unsigned int ix=0;ix<_histo_xE.size();++ix) { if(_sumW[ix]->val()>0.) scale(_histo_xE[ix],1./ *_sumW[ix]); } } //@} private: vector _sumW; /// @name Histograms //@{ vector _histo_xE; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(OPAL_2004_I648738); } diff --git a/analyses/pluginLEP/OPAL_2004_S6132243.cc b/analyses/pluginLEP/OPAL_2004_S6132243.cc --- a/analyses/pluginLEP/OPAL_2004_S6132243.cc +++ b/analyses/pluginLEP/OPAL_2004_S6132243.cc @@ -1,274 +1,274 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/Beam.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/Sphericity.hh" #include "Rivet/Projections/Thrust.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/ParisiTensor.hh" #include "Rivet/Projections/Hemispheres.hh" #include namespace Rivet { /// @brief OPAL event shapes and moments at 91, 133, 177, and 197 GeV /// @author Andy Buckley class OPAL_2004_S6132243 : public Analysis { public: /// Constructor OPAL_2004_S6132243() : Analysis("OPAL_2004_S6132243"), _isqrts(-1) { // } /// @name Analysis methods //@{ /// Energies: 91, 133, 177 (161-183), 197 (189-209) => index 0..4 int getHistIndex(double sqrts) { int ih = -1; if (inRange(sqrts/GeV, 89.9, 91.5)) { ih = 0; } else if (fuzzyEquals(sqrts/GeV, 133)) { ih = 1; } else if (fuzzyEquals(sqrts/GeV, 177)) { // (161-183) ih = 2; } else if (fuzzyEquals(sqrts/GeV, 197)) { // (189-209) ih = 3; } else { stringstream ss; ss << "Invalid energy for OPAL_2004 analysis: " << sqrts/GeV << " GeV != 91, 133, 177, or 197 GeV"; throw Error(ss.str()); } assert(ih >= 0); return ih; } void init() { // Projections declare(Beam(), "Beams"); const FinalState fs; declare(fs, "FS"); const ChargedFinalState cfs; declare(cfs, "CFS"); declare(FastJets(fs, FastJets::DURHAM, 0.7), "DurhamJets"); declare(Sphericity(fs), "Sphericity"); declare(ParisiTensor(fs), "Parisi"); const Thrust thrust(fs); declare(thrust, "Thrust"); declare(Hemispheres(thrust), "Hemispheres"); // Get beam energy index _isqrts = getHistIndex(sqrtS()); // Book histograms book(_hist1MinusT[_isqrts] ,1, 1, _isqrts+1); book(_histHemiMassH[_isqrts] ,2, 1, _isqrts+1); book(_histCParam[_isqrts] ,3, 1, _isqrts+1); book(_histHemiBroadT[_isqrts] ,4, 1, _isqrts+1); book(_histHemiBroadW[_isqrts] ,5, 1, _isqrts+1); book(_histY23Durham[_isqrts] ,6, 1, _isqrts+1); book(_histTMajor[_isqrts] ,7, 1, _isqrts+1); book(_histTMinor[_isqrts] ,8, 1, _isqrts+1); book(_histAplanarity[_isqrts] ,9, 1, _isqrts+1); book(_histSphericity[_isqrts] ,10, 1, _isqrts+1); book(_histOblateness[_isqrts] ,11, 1, _isqrts+1); book(_histHemiMassL[_isqrts] ,12, 1, _isqrts+1); book(_histHemiBroadN[_isqrts] ,13, 1, _isqrts+1); book(_histDParam[_isqrts] ,14, 1, _isqrts+1); // book(_hist1MinusTMom[_isqrts] ,15, 1, _isqrts+1); book(_histHemiMassHMom[_isqrts] ,16, 1, _isqrts+1); book(_histCParamMom[_isqrts] ,17, 1, _isqrts+1); book(_histHemiBroadTMom[_isqrts] ,18, 1, _isqrts+1); book(_histHemiBroadWMom[_isqrts] ,19, 1, _isqrts+1); book(_histY23DurhamMom[_isqrts] ,20, 1, _isqrts+1); book(_histTMajorMom[_isqrts] ,21, 1, _isqrts+1); book(_histTMinorMom[_isqrts] ,22, 1, _isqrts+1); book(_histSphericityMom[_isqrts] ,23, 1, _isqrts+1); book(_histOblatenessMom[_isqrts] ,24, 1, _isqrts+1); book(_histHemiMassLMom[_isqrts] ,25, 1, _isqrts+1); book(_histHemiBroadNMom[_isqrts] ,26, 1, _isqrts+1); - book(_sumWTrack2, "sumWTrack2"); - book(_sumWJet3, "sumWJet3"); + book(_sumWTrack2, "_sumWTrack2"); + book(_sumWJet3, "_sumWJet3"); } void analyze(const Event& event) { // Even if we only generate hadronic events, we still need a cut on numCharged >= 2. const FinalState& cfs = apply(event, "CFS"); if (cfs.size() < 2) vetoEvent; _sumWTrack2->fill(); // Thrusts const Thrust& thrust = apply(event, "Thrust"); _hist1MinusT[_isqrts]->fill(1-thrust.thrust()); _histTMajor[_isqrts]->fill(thrust.thrustMajor()); _histTMinor[_isqrts]->fill(thrust.thrustMinor()); _histOblateness[_isqrts]->fill(thrust.oblateness()); for (int n = 1; n <= 5; ++n) { _hist1MinusTMom[_isqrts]->fill(n, pow(1-thrust.thrust(), n)); _histTMajorMom[_isqrts]->fill(n, pow(thrust.thrustMajor(), n)); _histTMinorMom[_isqrts]->fill(n, pow(thrust.thrustMinor(), n)); _histOblatenessMom[_isqrts]->fill(n, pow(thrust.oblateness(), n)); } // Jets const FastJets& durjet = apply(event, "DurhamJets"); if (durjet.clusterSeq()) { _sumWJet3->fill(); const double y23 = durjet.clusterSeq()->exclusive_ymerge_max(2); if (y23>0.0) { _histY23Durham[_isqrts]->fill(y23); for (int n = 1; n <= 5; ++n) { _histY23DurhamMom[_isqrts]->fill(n, pow(y23, n)); } } } // Sphericities const Sphericity& sphericity = apply(event, "Sphericity"); const double sph = sphericity.sphericity(); const double apl = sphericity.aplanarity(); _histSphericity[_isqrts]->fill(sph); _histAplanarity[_isqrts]->fill(apl); for (int n = 1; n <= 5; ++n) { _histSphericityMom[_isqrts]->fill(n, pow(sph, n)); } // C & D params const ParisiTensor& parisi = apply(event, "Parisi"); const double cparam = parisi.C(); const double dparam = parisi.D(); _histCParam[_isqrts]->fill(cparam); _histDParam[_isqrts]->fill(dparam); for (int n = 1; n <= 5; ++n) { _histCParamMom[_isqrts]->fill(n, pow(cparam, n)); } // Hemispheres const Hemispheres& hemi = apply(event, "Hemispheres"); // The paper says that M_H/L are scaled by sqrt(s), but scaling by E_vis is the way that fits the data... const double hemi_mh = hemi.scaledMhigh(); const double hemi_ml = hemi.scaledMlow(); /// @todo This shouldn't be necessary... what's going on? Memory corruption suspected :( // if (std::isnan(hemi_ml)) { // MSG_ERROR("NaN in HemiL! Event = " << numEvents()); // MSG_ERROR(hemi.M2low() << ", " << hemi.E2vis()); // } if (!std::isnan(hemi_mh) && !std::isnan(hemi_ml)) { const double hemi_bmax = hemi.Bmax(); const double hemi_bmin = hemi.Bmin(); const double hemi_bsum = hemi.Bsum(); _histHemiMassH[_isqrts]->fill(hemi_mh); _histHemiMassL[_isqrts]->fill(hemi_ml); _histHemiBroadW[_isqrts]->fill(hemi_bmax); _histHemiBroadN[_isqrts]->fill(hemi_bmin); _histHemiBroadT[_isqrts]->fill(hemi_bsum); for (int n = 1; n <= 5; ++n) { // if (std::isnan(pow(hemi_ml, n))) MSG_ERROR("NaN in HemiL moment! Event = " << numEvents()); _histHemiMassHMom[_isqrts]->fill(n, pow(hemi_mh, n)); _histHemiMassLMom[_isqrts]->fill(n, pow(hemi_ml, n)); _histHemiBroadWMom[_isqrts]->fill(n, pow(hemi_bmax, n)); _histHemiBroadNMom[_isqrts]->fill(n, pow(hemi_bmin, n)); _histHemiBroadTMom[_isqrts]->fill(n, pow(hemi_bsum, n)); } } } void finalize() { scale(_hist1MinusT[_isqrts], 1.0 / *_sumWTrack2); scale(_histTMajor[_isqrts], 1.0 / *_sumWTrack2); scale(_histTMinor[_isqrts], 1.0 / *_sumWTrack2); scale(_histOblateness[_isqrts], 1.0 / *_sumWTrack2); scale(_histSphericity[_isqrts], 1.0 / *_sumWTrack2); scale(_histAplanarity[_isqrts], 1.0 / *_sumWTrack2); scale(_histHemiMassH[_isqrts], 1.0 / *_sumWTrack2); scale(_histHemiMassL[_isqrts], 1.0 / *_sumWTrack2); scale(_histHemiBroadW[_isqrts], 1.0 / *_sumWTrack2); scale(_histHemiBroadN[_isqrts], 1.0 / *_sumWTrack2); scale(_histHemiBroadT[_isqrts], 1.0 / *_sumWTrack2); scale(_histCParam[_isqrts], 1.0 / *_sumWTrack2); scale(_histDParam[_isqrts], 1.0 / *_sumWTrack2); scale(_histY23Durham[_isqrts], 1.0 / *_sumWJet3); // scale(_hist1MinusTMom[_isqrts], 1.0 / *_sumWTrack2); scale(_histTMajorMom[_isqrts], 1.0 / *_sumWTrack2); scale(_histTMinorMom[_isqrts], 1.0 / *_sumWTrack2); scale(_histOblatenessMom[_isqrts], 1.0 / *_sumWTrack2); scale(_histSphericityMom[_isqrts], 1.0 / *_sumWTrack2); scale(_histHemiMassHMom[_isqrts], 1.0 / *_sumWTrack2); scale(_histHemiMassLMom[_isqrts], 1.0 / *_sumWTrack2); scale(_histHemiBroadWMom[_isqrts], 1.0 / *_sumWTrack2); scale(_histHemiBroadNMom[_isqrts], 1.0 / *_sumWTrack2); scale(_histHemiBroadTMom[_isqrts], 1.0 / *_sumWTrack2); scale(_histCParamMom[_isqrts], 1.0 / *_sumWTrack2); scale(_histY23DurhamMom[_isqrts], 1.0 / *_sumWJet3); } //@} private: /// Beam energy index for histograms int _isqrts; /// @name Counters of event weights passing the cuts //@{ CounterPtr _sumWTrack2, _sumWJet3; //@} /// @name Event shape histos at 4 energies //@{ Histo1DPtr _hist1MinusT[4]; Histo1DPtr _histHemiMassH[4]; Histo1DPtr _histCParam[4]; Histo1DPtr _histHemiBroadT[4]; Histo1DPtr _histHemiBroadW[4]; Histo1DPtr _histY23Durham[4]; Histo1DPtr _histTMajor[4]; Histo1DPtr _histTMinor[4]; Histo1DPtr _histAplanarity[4]; Histo1DPtr _histSphericity[4]; Histo1DPtr _histOblateness[4]; Histo1DPtr _histHemiMassL[4]; Histo1DPtr _histHemiBroadN[4]; Histo1DPtr _histDParam[4]; //@} /// @name Event shape moment histos at 4 energies //@{ Histo1DPtr _hist1MinusTMom[4]; Histo1DPtr _histHemiMassHMom[4]; Histo1DPtr _histCParamMom[4]; Histo1DPtr _histHemiBroadTMom[4]; Histo1DPtr _histHemiBroadWMom[4]; Histo1DPtr _histY23DurhamMom[4]; Histo1DPtr _histTMajorMom[4]; Histo1DPtr _histTMinorMom[4]; Histo1DPtr _histSphericityMom[4]; Histo1DPtr _histOblatenessMom[4]; Histo1DPtr _histHemiMassLMom[4]; Histo1DPtr _histHemiBroadNMom[4]; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(OPAL_2004_S6132243); } diff --git a/analyses/pluginLEP/SLD_1996_S3398250.cc b/analyses/pluginLEP/SLD_1996_S3398250.cc --- a/analyses/pluginLEP/SLD_1996_S3398250.cc +++ b/analyses/pluginLEP/SLD_1996_S3398250.cc @@ -1,141 +1,141 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/Beam.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/Sphericity.hh" #include "Rivet/Projections/Thrust.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/ParisiTensor.hh" #include "Rivet/Projections/Hemispheres.hh" #include #define I_KNOW_THE_INITIAL_QUARKS_PROJECTION_IS_DODGY_BUT_NEED_TO_USE_IT #include "Rivet/Projections/InitialQuarks.hh" namespace Rivet { /// @brief SLD multiplicities at mZ /// @author Peter Richardson class SLD_1996_S3398250 : public Analysis { public: /// Constructor SLD_1996_S3398250() : Analysis("SLD_1996_S3398250") {} /// @name Analysis methods //@{ void init() { // Projections declare(Beam(), "Beams"); declare(ChargedFinalState(), "CFS"); declare(InitialQuarks(), "IQF"); book(_h_bottom ,1, 1, 1); book(_h_charm ,2, 1, 1); book(_h_light ,3, 1, 1); - book(_weightLight, "weightLight"); - book(_weightCharm, "weightCharm"); - book(_weightBottom, "weightBottom"); + book(_weightLight, "_weightLight"); + book(_weightCharm, "_weightCharm"); + book(_weightBottom, "_weightBottom"); book(scatter_c, 4,1,1); book(scatter_b, 5,1,1); } void analyze(const Event& event) { // Even if we only generate hadronic events, we still need a cut on numCharged >= 2. const FinalState& cfs = apply(event, "CFS"); if (cfs.size() < 2) vetoEvent; int flavour = 0; const InitialQuarks& iqf = apply(event, "IQF"); // If we only have two quarks (qqbar), just take the flavour. // If we have more than two quarks, look for the highest energetic q-qbar pair. if (iqf.particles().size() == 2) { flavour = iqf.particles().front().abspid(); } else { map quarkmap; for (const Particle& p : iqf.particles()) { if (quarkmap[p.pid()] < p.E()) { quarkmap[p.pid()] = p.E(); } } double maxenergy = 0.; for (int i = 1; i <= 5; ++i) { if (quarkmap[i]+quarkmap[-i] > maxenergy) { flavour = i; } } } const size_t numParticles = cfs.particles().size(); switch (flavour) { case 1: case 2: case 3: _weightLight ->fill(); _h_light->fillBin(0, numParticles); break; case 4: _weightCharm ->fill(); _h_charm->fillBin(0, numParticles); break; case 5: _weightBottom->fill(); _h_bottom->fillBin(0, numParticles); break; } } void multiplicity_subtract(const Histo1DPtr first, const Histo1DPtr second, Scatter2DPtr & scatter) { const double x = first->bin(0).xMid(); const double ex = first->bin(0).xWidth()/2.; const double y = first->bin(0).area() - second->bin(0).area(); const double ey = sqrt(sqr(first->bin(0).areaErr()) + sqr(second->bin(0).areaErr())); scatter->addPoint(x, y, ex, ey); } void finalize() { if (_weightBottom->val() != 0) scale(_h_bottom, 1./ *_weightBottom); if (_weightCharm->val() != 0) scale(_h_charm, 1./ *_weightCharm ); if (_weightLight->val() != 0) scale(_h_light, 1./ *_weightLight ); multiplicity_subtract(_h_charm, _h_light, scatter_c); multiplicity_subtract(_h_bottom, _h_light, scatter_b); } //@} private: Scatter2DPtr scatter_c, scatter_b; /// @name Weights //@{ CounterPtr _weightLight; CounterPtr _weightCharm; CounterPtr _weightBottom; //@} Histo1DPtr _h_bottom; Histo1DPtr _h_charm; Histo1DPtr _h_light; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(SLD_1996_S3398250); } diff --git a/analyses/pluginLEP/SLD_1999_S3743934.cc b/analyses/pluginLEP/SLD_1999_S3743934.cc --- a/analyses/pluginLEP/SLD_1999_S3743934.cc +++ b/analyses/pluginLEP/SLD_1999_S3743934.cc @@ -1,738 +1,738 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/Beam.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/UnstableParticles.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/Thrust.hh" #define I_KNOW_THE_INITIAL_QUARKS_PROJECTION_IS_DODGY_BUT_NEED_TO_USE_IT #include "Rivet/Projections/InitialQuarks.hh" namespace Rivet { /// @brief SLD flavour-dependent fragmentation paper /// @author Peter Richardson class SLD_1999_S3743934 : public Analysis { public: /// Constructor SLD_1999_S3743934() : Analysis("SLD_1999_S3743934"), _multPiPlus(4),_multKPlus(4),_multK0(4), _multKStar0(4),_multPhi(4), _multProton(4),_multLambda(4) { } /// @name Analysis methods //@{ void analyze(const Event& e) { // First, veto on leptonic events by requiring at least 4 charged FS particles const FinalState& fs = apply(e, "FS"); const size_t numParticles = fs.particles().size(); // Even if we only generate hadronic events, we still need a cut on numCharged >= 2. if (numParticles < 2) { MSG_DEBUG("Failed ncharged cut"); vetoEvent; } MSG_DEBUG("Passed ncharged cut"); // Get beams and average beam momentum const ParticlePair& beams = apply(e, "Beams").beams(); const double meanBeamMom = ( beams.first.p3().mod() + beams.second.p3().mod() ) / 2.0; MSG_DEBUG("Avg beam momentum = " << meanBeamMom); int flavour = 0; const InitialQuarks& iqf = apply(e, "IQF"); // If we only have two quarks (qqbar), just take the flavour. // If we have more than two quarks, look for the highest energetic q-qbar pair. /// @todo Can we make this based on hadron flavour instead? Particles quarks; if (iqf.particles().size() == 2) { flavour = iqf.particles().front().abspid(); quarks = iqf.particles(); } else { map quarkmap; for (const Particle& p : iqf.particles()) { if (quarkmap.find(p.pid()) == quarkmap.end()) quarkmap[p.pid()] = p; else if (quarkmap[p.pid()].E() < p.E()) quarkmap[p.pid()] = p; } double maxenergy = 0.; for (int i = 1; i <= 5; ++i) { double energy(0.); if (quarkmap.find( i) != quarkmap.end()) energy += quarkmap[ i].E(); if (quarkmap.find(-i) != quarkmap.end()) energy += quarkmap[-i].E(); if (energy > maxenergy) flavour = i; } if (quarkmap.find(flavour) != quarkmap.end()) quarks.push_back(quarkmap[flavour]); if (quarkmap.find(-flavour) != quarkmap.end()) quarks.push_back(quarkmap[-flavour]); } switch (flavour) { case PID::DQUARK: case PID::UQUARK: case PID::SQUARK: _SumOfudsWeights->fill(); break; case PID::CQUARK: _SumOfcWeights->fill(); break; case PID::BQUARK: _SumOfbWeights->fill(); break; } // thrust axis for projections Vector3 axis = apply(e, "Thrust").thrustAxis(); double dot(0.); if (!quarks.empty()) { dot = quarks[0].p3().dot(axis); if (quarks[0].pid() < 0) dot *= -1; } for (const Particle& p : fs.particles()) { const double xp = p.p3().mod()/meanBeamMom; // if in quark or antiquark hemisphere bool quark = p.p3().dot(axis)*dot > 0.; _h_XpChargedN->fill(xp); _temp_XpChargedN1->fill(xp); _temp_XpChargedN2->fill(xp); _temp_XpChargedN3->fill(xp); int id = p.abspid(); // charged pions if (id == PID::PIPLUS) { _h_XpPiPlusN->fill(xp); _multPiPlus[0]->fill(); switch (flavour) { case PID::DQUARK: case PID::UQUARK: case PID::SQUARK: _multPiPlus[1]->fill(); _h_XpPiPlusLight->fill(xp); if( ( quark && p.pid()>0 ) || ( !quark && p.pid()<0 )) _h_RPiPlus->fill(xp); else _h_RPiMinus->fill(xp); break; case PID::CQUARK: _multPiPlus[2]->fill(); _h_XpPiPlusCharm->fill(xp); break; case PID::BQUARK: _multPiPlus[3]->fill(); _h_XpPiPlusBottom->fill(xp); break; } } else if (id == PID::KPLUS) { _h_XpKPlusN->fill(xp); _multKPlus[0]->fill(); switch (flavour) { case PID::DQUARK: case PID::UQUARK: case PID::SQUARK: _multKPlus[1]->fill(); _temp_XpKPlusLight->fill(xp); _h_XpKPlusLight->fill(xp); if( ( quark && p.pid()>0 ) || ( !quark && p.pid()<0 )) _h_RKPlus->fill(xp); else _h_RKMinus->fill(xp); break; break; case PID::CQUARK: _multKPlus[2]->fill(); _h_XpKPlusCharm->fill(xp); _temp_XpKPlusCharm->fill(xp); break; case PID::BQUARK: _multKPlus[3]->fill(); _h_XpKPlusBottom->fill(xp); break; } } else if (id == PID::PROTON) { _h_XpProtonN->fill(xp); _multProton[0]->fill(); switch (flavour) { case PID::DQUARK: case PID::UQUARK: case PID::SQUARK: _multProton[1]->fill(); _temp_XpProtonLight->fill(xp); _h_XpProtonLight->fill(xp); if( ( quark && p.pid()>0 ) || ( !quark && p.pid()<0 )) _h_RProton->fill(xp); else _h_RPBar ->fill(xp); break; break; case PID::CQUARK: _multProton[2]->fill(); _temp_XpProtonCharm->fill(xp); _h_XpProtonCharm->fill(xp); break; case PID::BQUARK: _multProton[3]->fill(); _h_XpProtonBottom->fill(xp); break; } } } const UnstableParticles& ufs = apply(e, "UFS"); for (const Particle& p : ufs.particles()) { const double xp = p.p3().mod()/meanBeamMom; // if in quark or antiquark hemisphere bool quark = p.p3().dot(axis)*dot>0.; int id = p.abspid(); if (id == PID::LAMBDA) { _multLambda[0]->fill(); _h_XpLambdaN->fill(xp); switch (flavour) { case PID::DQUARK: case PID::UQUARK: case PID::SQUARK: _multLambda[1]->fill(); _h_XpLambdaLight->fill(xp); if( ( quark && p.pid()>0 ) || ( !quark && p.pid()<0 )) _h_RLambda->fill(xp); else _h_RLBar ->fill(xp); break; case PID::CQUARK: _multLambda[2]->fill(); _h_XpLambdaCharm->fill(xp); break; case PID::BQUARK: _multLambda[3]->fill(); _h_XpLambdaBottom->fill(xp); break; } } else if (id == 313) { _multKStar0[0]->fill(); _h_XpKStar0N->fill(xp); switch (flavour) { case PID::DQUARK: case PID::UQUARK: case PID::SQUARK: _multKStar0[1]->fill(); _temp_XpKStar0Light->fill(xp); _h_XpKStar0Light->fill(xp); if ( ( quark && p.pid()>0 ) || ( !quark && p.pid()<0 )) _h_RKS0 ->fill(xp); else _h_RKSBar0->fill(xp); break; break; case PID::CQUARK: _multKStar0[2]->fill(); _temp_XpKStar0Charm->fill(xp); _h_XpKStar0Charm->fill(xp); break; case PID::BQUARK: _multKStar0[3]->fill(); _h_XpKStar0Bottom->fill(xp); break; } } else if (id == 333) { _multPhi[0]->fill(); _h_XpPhiN->fill(xp); switch (flavour) { case PID::DQUARK: case PID::UQUARK: case PID::SQUARK: _multPhi[1]->fill(); _h_XpPhiLight->fill(xp); break; case PID::CQUARK: _multPhi[2]->fill(); _h_XpPhiCharm->fill(xp); break; case PID::BQUARK: _multPhi[3]->fill(); _h_XpPhiBottom->fill(xp); break; } } else if (id == PID::K0S || id == PID::K0L) { _multK0[0]->fill(); _h_XpK0N->fill(xp); switch (flavour) { case PID::DQUARK: case PID::UQUARK: case PID::SQUARK: _multK0[1]->fill(); _h_XpK0Light->fill(xp); break; case PID::CQUARK: _multK0[2]->fill(); _h_XpK0Charm->fill(xp); break; case PID::BQUARK: _multK0[3]->fill(); _h_XpK0Bottom->fill(xp); break; } } } } void init() { // Projections declare(Beam(), "Beams"); declare(ChargedFinalState(), "FS"); declare(UnstableParticles(), "UFS"); declare(InitialQuarks(), "IQF"); declare(Thrust(FinalState()), "Thrust"); book(_temp_XpChargedN1 ,"TMP/XpChargedN1", refData( 1, 1, 1)); book(_temp_XpChargedN2 ,"TMP/XpChargedN2", refData( 2, 1, 1)); book(_temp_XpChargedN3 ,"TMP/XpChargedN3", refData( 3, 1, 1)); book(_h_XpPiPlusN , 1, 1, 2); book(_h_XpKPlusN , 2, 1, 2); book(_h_XpProtonN , 3, 1, 2); book(_h_XpChargedN , 4, 1, 1); book(_h_XpK0N , 5, 1, 1); book(_h_XpLambdaN , 7, 1, 1); book(_h_XpKStar0N , 8, 1, 1); book(_h_XpPhiN , 9, 1, 1); book(_h_XpPiPlusLight ,10, 1, 1); book(_h_XpPiPlusCharm ,10, 1, 2); book(_h_XpPiPlusBottom ,10, 1, 3); book(_h_XpKPlusLight ,12, 1, 1); book(_h_XpKPlusCharm ,12, 1, 2); book(_h_XpKPlusBottom ,12, 1, 3); book(_h_XpKStar0Light ,14, 1, 1); book(_h_XpKStar0Charm ,14, 1, 2); book(_h_XpKStar0Bottom ,14, 1, 3); book(_h_XpProtonLight ,16, 1, 1); book(_h_XpProtonCharm ,16, 1, 2); book(_h_XpProtonBottom ,16, 1, 3); book(_h_XpLambdaLight ,18, 1, 1); book(_h_XpLambdaCharm ,18, 1, 2); book(_h_XpLambdaBottom ,18, 1, 3); book(_h_XpK0Light ,20, 1, 1); book(_h_XpK0Charm ,20, 1, 2); book(_h_XpK0Bottom ,20, 1, 3); book(_h_XpPhiLight ,22, 1, 1); book(_h_XpPhiCharm ,22, 1, 2); book(_h_XpPhiBottom ,22, 1, 3); book(_temp_XpKPlusCharm ,"TMP/XpKPlusCharm", refData(13, 1, 1)); book(_temp_XpKPlusLight ,"TMP/XpKPlusLight", refData(13, 1, 1)); book(_temp_XpKStar0Charm ,"TMP/XpKStar0Charm", refData(15, 1, 1)); book(_temp_XpKStar0Light ,"TMP/XpKStar0Light", refData(15, 1, 1)); book(_temp_XpProtonCharm ,"TMP/XpProtonCharm", refData(17, 1, 1)); book(_temp_XpProtonLight ,"TMP/XpProtonLight", refData(17, 1, 1)); book(_h_RPiPlus , 26, 1, 1); book(_h_RPiMinus , 26, 1, 2); book(_h_RKS0 , 28, 1, 1); book(_h_RKSBar0 , 28, 1, 2); book(_h_RKPlus , 30, 1, 1); book(_h_RKMinus , 30, 1, 2); book(_h_RProton , 32, 1, 1); book(_h_RPBar , 32, 1, 2); book(_h_RLambda , 34, 1, 1); book(_h_RLBar , 34, 1, 2); book(_s_Xp_PiPl_Ch , 1, 1, 1); book(_s_Xp_KPl_Ch , 2, 1, 1); book(_s_Xp_Pr_Ch , 3, 1, 1); book(_s_Xp_PiPlCh_PiPlLi, 11, 1, 1); book(_s_Xp_PiPlBo_PiPlLi, 11, 1, 2); book(_s_Xp_KPlCh_KPlLi , 13, 1, 1); book(_s_Xp_KPlBo_KPlLi , 13, 1, 2); book(_s_Xp_KS0Ch_KS0Li , 15, 1, 1); book(_s_Xp_KS0Bo_KS0Li , 15, 1, 2); book(_s_Xp_PrCh_PrLi , 17, 1, 1); book(_s_Xp_PrBo_PrLi , 17, 1, 2); book(_s_Xp_LaCh_LaLi , 19, 1, 1); book(_s_Xp_LaBo_LaLi , 19, 1, 2); book(_s_Xp_K0Ch_K0Li , 21, 1, 1); book(_s_Xp_K0Bo_K0Li , 21, 1, 2); book(_s_Xp_PhiCh_PhiLi , 23, 1, 1); book(_s_Xp_PhiBo_PhiLi , 23, 1, 2); book(_s_PiM_PiP , 27, 1, 1); book(_s_KSBar0_KS0, 29, 1, 1); book(_s_KM_KP , 31, 1, 1); book(_s_Pr_PBar , 33, 1, 1); book(_s_Lam_LBar , 35, 1, 1); - book(_SumOfudsWeights, "SumOfudsWeights"); - book(_SumOfcWeights, "SumOfcWeights"); - book(_SumOfbWeights, "SumOfbWeights"); + book(_SumOfudsWeights, "_SumOfudsWeights"); + book(_SumOfcWeights, "_SumOfcWeights"); + book(_SumOfbWeights, "_SumOfbWeights"); for ( size_t i=0; i<4; ++i) { - book(_multPiPlus[i], "multPiPlus_"+to_str(i)); - book(_multKPlus[i], "multKPlus_"+to_str(i)); - book(_multK0[i], "multK0_"+to_str(i)); - book(_multKStar0[i], "multKStar0_"+to_str(i)); - book(_multPhi[i], "multPhi_"+to_str(i)); - book(_multProton[i], "multProton_"+to_str(i)); - book(_multLambda[i], "multLambda_"+to_str(i)); + book(_multPiPlus[i], "_multPiPlus_"+to_str(i)); + book(_multKPlus[i], "_multKPlus_"+to_str(i)); + book(_multK0[i], "_multK0_"+to_str(i)); + book(_multKStar0[i], "_multKStar0_"+to_str(i)); + book(_multPhi[i], "_multPhi_"+to_str(i)); + book(_multProton[i], "_multProton_"+to_str(i)); + book(_multLambda[i], "_multLambda_"+to_str(i)); } book(tmp1, 24, 1, 1, true); book(tmp2, 24, 1, 2, true); book(tmp3, 24, 1, 3, true); book(tmp4, 24, 1, 4, true); book(tmp5, 25, 1, 1, true); book(tmp6, 25, 1, 2, true); book(tmp7, 24, 2, 1, true); book(tmp8, 24, 2, 2, true); book(tmp9, 24, 2, 3, true); book(tmp10, 24, 2, 4, true); book(tmp11, 25, 2, 1, true); book(tmp12, 25, 2, 2, true); book(tmp13, 24, 3, 1, true); book(tmp14, 24, 3, 2, true); book(tmp15, 24, 3, 3, true); book(tmp16, 24, 3, 4, true); book(tmp17, 25, 3, 1, true); book(tmp18, 25, 3, 2, true); book(tmp19, 24, 4, 1, true); book(tmp20, 24, 4, 2, true); book(tmp21, 24, 4, 3, true); book(tmp22, 24, 4, 4, true); book(tmp23, 25, 4, 1, true); book(tmp24, 25, 4, 2, true); book(tmp25, 24, 5, 1, true); book(tmp26, 24, 5, 2, true); book(tmp27, 24, 5, 3, true); book(tmp28, 24, 5, 4, true); book(tmp29, 25, 5, 1, true); book(tmp30, 25, 5, 2, true); book(tmp31, 24, 6, 1, true); book(tmp32, 24, 6, 2, true); book(tmp33, 24, 6, 3, true); book(tmp34, 24, 6, 4, true); book(tmp35, 25, 6, 1, true); book(tmp36, 25, 6, 2, true); book(tmp37, 24, 7, 1, true); book(tmp38, 24, 7, 2, true); book(tmp39, 24, 7, 3, true); book(tmp40, 24, 7, 4, true); book(tmp41, 25, 7, 1, true); book(tmp42, 25, 7, 2, true); } /// Finalize void finalize() { // Get the ratio plots sorted out first divide(_h_XpPiPlusN, _temp_XpChargedN1, _s_Xp_PiPl_Ch); divide(_h_XpKPlusN, _temp_XpChargedN2, _s_Xp_KPl_Ch); divide(_h_XpProtonN, _temp_XpChargedN3, _s_Xp_Pr_Ch); divide(_h_XpPiPlusCharm, _h_XpPiPlusLight, _s_Xp_PiPlCh_PiPlLi); _s_Xp_PiPlCh_PiPlLi->scale(1., dbl(*_SumOfudsWeights / *_SumOfcWeights)); divide(_h_XpPiPlusBottom, _h_XpPiPlusLight, _s_Xp_PiPlBo_PiPlLi); _s_Xp_PiPlBo_PiPlLi->scale(1., dbl(*_SumOfudsWeights / *_SumOfbWeights)); divide(_temp_XpKPlusCharm , _temp_XpKPlusLight, _s_Xp_KPlCh_KPlLi); _s_Xp_KPlCh_KPlLi->scale(1., dbl(*_SumOfudsWeights / *_SumOfcWeights)); divide(_h_XpKPlusBottom, _h_XpKPlusLight, _s_Xp_KPlBo_KPlLi); _s_Xp_KPlBo_KPlLi->scale(1., dbl(*_SumOfudsWeights / *_SumOfbWeights)); divide(_temp_XpKStar0Charm, _temp_XpKStar0Light, _s_Xp_KS0Ch_KS0Li); _s_Xp_KS0Ch_KS0Li->scale(1., dbl(*_SumOfudsWeights / *_SumOfcWeights)); divide(_h_XpKStar0Bottom, _h_XpKStar0Light, _s_Xp_KS0Bo_KS0Li); _s_Xp_KS0Bo_KS0Li->scale(1., dbl(*_SumOfudsWeights / *_SumOfbWeights)); divide(_temp_XpProtonCharm, _temp_XpProtonLight, _s_Xp_PrCh_PrLi); _s_Xp_PrCh_PrLi->scale(1., dbl(*_SumOfudsWeights / *_SumOfcWeights)); divide(_h_XpProtonBottom, _h_XpProtonLight, _s_Xp_PrBo_PrLi); _s_Xp_PrBo_PrLi->scale(1., dbl(*_SumOfudsWeights / *_SumOfbWeights)); divide(_h_XpLambdaCharm, _h_XpLambdaLight, _s_Xp_LaCh_LaLi); _s_Xp_LaCh_LaLi->scale(1., dbl(*_SumOfudsWeights / *_SumOfcWeights)); divide(_h_XpLambdaBottom, _h_XpLambdaLight, _s_Xp_LaBo_LaLi); _s_Xp_LaBo_LaLi->scale(1., dbl(*_SumOfudsWeights / *_SumOfbWeights)); divide(_h_XpK0Charm, _h_XpK0Light, _s_Xp_K0Ch_K0Li); _s_Xp_K0Ch_K0Li->scale(1., dbl(*_SumOfudsWeights / *_SumOfcWeights)); divide(_h_XpK0Bottom, _h_XpK0Light, _s_Xp_K0Bo_K0Li); _s_Xp_K0Bo_K0Li->scale(1., dbl(*_SumOfudsWeights / *_SumOfbWeights)); divide(_h_XpPhiCharm, _h_XpPhiLight, _s_Xp_PhiCh_PhiLi); _s_Xp_PhiCh_PhiLi->scale(1., dbl(*_SumOfudsWeights / *_SumOfcWeights)); divide(_h_XpPhiBottom, _h_XpPhiLight, _s_Xp_PhiBo_PhiLi); _s_Xp_PhiBo_PhiLi->scale(1., dbl(*_SumOfudsWeights / *_SumOfbWeights)); // Then the leading particles divide(*_h_RPiMinus - *_h_RPiPlus, *_h_RPiMinus + *_h_RPiPlus, _s_PiM_PiP); divide(*_h_RKSBar0 - *_h_RKS0, *_h_RKSBar0 + *_h_RKS0, _s_KSBar0_KS0); divide(*_h_RKMinus - *_h_RKPlus, *_h_RKMinus + *_h_RKPlus, _s_KM_KP); divide(*_h_RProton - *_h_RPBar, *_h_RProton + *_h_RPBar, _s_Pr_PBar); divide(*_h_RLambda - *_h_RLBar, *_h_RLambda + *_h_RLBar, _s_Lam_LBar); // Then the rest scale(_h_XpPiPlusN, 1/sumOfWeights()); scale(_h_XpKPlusN, 1/sumOfWeights()); scale(_h_XpProtonN, 1/sumOfWeights()); scale(_h_XpChargedN, 1/sumOfWeights()); scale(_h_XpK0N, 1/sumOfWeights()); scale(_h_XpLambdaN, 1/sumOfWeights()); scale(_h_XpKStar0N, 1/sumOfWeights()); scale(_h_XpPhiN, 1/sumOfWeights()); scale(_h_XpPiPlusLight, 1 / *_SumOfudsWeights); scale(_h_XpPiPlusCharm, 1 / *_SumOfcWeights); scale(_h_XpPiPlusBottom, 1 / *_SumOfbWeights); scale(_h_XpKPlusLight, 1 / *_SumOfudsWeights); scale(_h_XpKPlusCharm, 1 / *_SumOfcWeights); scale(_h_XpKPlusBottom, 1 / *_SumOfbWeights); scale(_h_XpKStar0Light, 1 / *_SumOfudsWeights); scale(_h_XpKStar0Charm, 1 / *_SumOfcWeights); scale(_h_XpKStar0Bottom, 1 / *_SumOfbWeights); scale(_h_XpProtonLight, 1 / *_SumOfudsWeights); scale(_h_XpProtonCharm, 1 / *_SumOfcWeights); scale(_h_XpProtonBottom, 1 / *_SumOfbWeights); scale(_h_XpLambdaLight, 1 / *_SumOfudsWeights); scale(_h_XpLambdaCharm, 1 / *_SumOfcWeights); scale(_h_XpLambdaBottom, 1 / *_SumOfbWeights); scale(_h_XpK0Light, 1 / *_SumOfudsWeights); scale(_h_XpK0Charm, 1 / *_SumOfcWeights); scale(_h_XpK0Bottom, 1 / *_SumOfbWeights); scale(_h_XpPhiLight, 1 / *_SumOfudsWeights); scale(_h_XpPhiCharm , 1 / *_SumOfcWeights); scale(_h_XpPhiBottom, 1 / *_SumOfbWeights); scale(_h_RPiPlus, 1 / *_SumOfudsWeights); scale(_h_RPiMinus, 1 / *_SumOfudsWeights); scale(_h_RKS0, 1 / *_SumOfudsWeights); scale(_h_RKSBar0, 1 / *_SumOfudsWeights); scale(_h_RKPlus, 1 / *_SumOfudsWeights); scale(_h_RKMinus, 1 / *_SumOfudsWeights); scale(_h_RProton, 1 / *_SumOfudsWeights); scale(_h_RPBar, 1 / *_SumOfudsWeights); scale(_h_RLambda, 1 / *_SumOfudsWeights); scale(_h_RLBar, 1 / *_SumOfudsWeights); // Multiplicities double avgNumPartsAll, avgNumPartsLight,avgNumPartsCharm, avgNumPartsBottom; // pi+/- // all avgNumPartsAll = dbl(*_multPiPlus[0])/sumOfWeights(); tmp1->point(0).setY(avgNumPartsAll); // light avgNumPartsLight = dbl(*_multPiPlus[1] / *_SumOfudsWeights); tmp2->point(0).setY(avgNumPartsLight); // charm avgNumPartsCharm = dbl(*_multPiPlus[2] / *_SumOfcWeights); tmp3->point(0).setY(avgNumPartsCharm); // bottom avgNumPartsBottom = dbl(*_multPiPlus[3] / *_SumOfbWeights); tmp4->point(0).setY(avgNumPartsBottom); // charm-light tmp5->point(0).setY(avgNumPartsCharm - avgNumPartsLight); // bottom-light tmp6->point(0).setY(avgNumPartsBottom - avgNumPartsLight); // K+/- // all avgNumPartsAll = dbl(*_multKPlus[0])/sumOfWeights(); tmp7->point(0).setY(avgNumPartsAll); // light avgNumPartsLight = dbl(*_multKPlus[1] / *_SumOfudsWeights); tmp8->point(0).setY(avgNumPartsLight); // charm avgNumPartsCharm = dbl(*_multKPlus[2] / *_SumOfcWeights); tmp9->point(0).setY(avgNumPartsCharm); // bottom avgNumPartsBottom = dbl(*_multKPlus[3] / *_SumOfbWeights); tmp10->point(0).setY(avgNumPartsBottom); // charm-light tmp11->point(0).setY(avgNumPartsCharm - avgNumPartsLight); // bottom-light tmp12->point(0).setY(avgNumPartsBottom - avgNumPartsLight); // K0 // all avgNumPartsAll = dbl(*_multK0[0])/sumOfWeights(); tmp13->point(0).setY(avgNumPartsAll); // light avgNumPartsLight = dbl(*_multK0[1] / *_SumOfudsWeights); tmp14->point(0).setY(avgNumPartsLight); // charm avgNumPartsCharm = dbl(*_multK0[2] / *_SumOfcWeights); tmp15->point(0).setY(avgNumPartsCharm); // bottom avgNumPartsBottom = dbl(*_multK0[3] / *_SumOfbWeights); tmp16->point(0).setY(avgNumPartsBottom); // charm-light tmp17->point(0).setY(avgNumPartsCharm - avgNumPartsLight); // bottom-light tmp18->point(0).setY(avgNumPartsBottom - avgNumPartsLight); // K*0 // all avgNumPartsAll = dbl(*_multKStar0[0])/sumOfWeights(); tmp19->point(0).setY(avgNumPartsAll); // light avgNumPartsLight = dbl(*_multKStar0[1] / *_SumOfudsWeights); tmp20->point(0).setY(avgNumPartsLight); // charm avgNumPartsCharm = dbl(*_multKStar0[2] / *_SumOfcWeights); tmp21->point(0).setY(avgNumPartsCharm); // bottom avgNumPartsBottom = dbl(*_multKStar0[3] / *_SumOfbWeights); tmp22->point(0).setY(avgNumPartsBottom); // charm-light tmp23->point(0).setY(avgNumPartsCharm - avgNumPartsLight); // bottom-light tmp24->point(0).setY(avgNumPartsBottom - avgNumPartsLight); // phi // all avgNumPartsAll = dbl(*_multPhi[0])/sumOfWeights(); tmp25->point(0).setY(avgNumPartsAll); // light avgNumPartsLight = dbl(*_multPhi[1] / *_SumOfudsWeights); tmp26->point(0).setY(avgNumPartsLight); // charm avgNumPartsCharm = dbl(*_multPhi[2] / *_SumOfcWeights); tmp27->point(0).setY(avgNumPartsCharm); // bottom avgNumPartsBottom = dbl(*_multPhi[3] / *_SumOfbWeights); tmp28->point(0).setY(avgNumPartsBottom); // charm-light tmp29->point(0).setY(avgNumPartsCharm - avgNumPartsLight); // bottom-light tmp30->point(0).setY(avgNumPartsBottom - avgNumPartsLight); // p // all avgNumPartsAll = dbl(*_multProton[0])/sumOfWeights(); tmp31->point(0).setY(avgNumPartsAll); // light avgNumPartsLight = dbl(*_multProton[1] / *_SumOfudsWeights); tmp32->point(0).setY(avgNumPartsLight); // charm avgNumPartsCharm = dbl(*_multProton[2] / *_SumOfcWeights); tmp33->point(0).setY(avgNumPartsCharm); // bottom avgNumPartsBottom = dbl(*_multProton[3] / *_SumOfbWeights); tmp34->point(0).setY(avgNumPartsBottom); // charm-light tmp35->point(0).setY(avgNumPartsCharm - avgNumPartsLight); // bottom-light tmp36->point(0).setY(avgNumPartsBottom - avgNumPartsLight); // Lambda // all avgNumPartsAll = dbl(*_multLambda[0])/sumOfWeights(); tmp37->point(0).setY(avgNumPartsAll); // light avgNumPartsLight = dbl(*_multLambda[1] / *_SumOfudsWeights); tmp38->point(0).setY(avgNumPartsLight); // charm avgNumPartsCharm = dbl(*_multLambda[2] / *_SumOfcWeights); tmp39->point(0).setY(avgNumPartsCharm); // bottom avgNumPartsBottom = dbl(*_multLambda[3] / *_SumOfbWeights); tmp40->point(0).setY(avgNumPartsBottom); // charm-light tmp41->point(0).setY(avgNumPartsCharm - avgNumPartsLight); // bottom-light tmp42->point(0).setY(avgNumPartsBottom - avgNumPartsLight); } //@} private: /// Store the weighted sums of numbers of charged / charged+neutral /// particles. Used to calculate average number of particles for the /// inclusive single particle distributions' normalisations. CounterPtr _SumOfudsWeights, _SumOfcWeights, _SumOfbWeights; vector _multPiPlus, _multKPlus, _multK0, _multKStar0, _multPhi, _multProton, _multLambda; Histo1DPtr _h_XpPiPlusSig, _h_XpPiPlusN; Histo1DPtr _h_XpKPlusSig, _h_XpKPlusN; Histo1DPtr _h_XpProtonSig, _h_XpProtonN; Histo1DPtr _h_XpChargedN; Histo1DPtr _h_XpK0N, _h_XpLambdaN; Histo1DPtr _h_XpKStar0N, _h_XpPhiN; Histo1DPtr _h_XpPiPlusLight, _h_XpPiPlusCharm, _h_XpPiPlusBottom; Histo1DPtr _h_XpKPlusLight, _h_XpKPlusCharm, _h_XpKPlusBottom; Histo1DPtr _h_XpKStar0Light, _h_XpKStar0Charm, _h_XpKStar0Bottom; Histo1DPtr _h_XpProtonLight, _h_XpProtonCharm, _h_XpProtonBottom; Histo1DPtr _h_XpLambdaLight, _h_XpLambdaCharm, _h_XpLambdaBottom; Histo1DPtr _h_XpK0Light, _h_XpK0Charm, _h_XpK0Bottom; Histo1DPtr _h_XpPhiLight, _h_XpPhiCharm, _h_XpPhiBottom; Histo1DPtr _temp_XpChargedN1, _temp_XpChargedN2, _temp_XpChargedN3; Histo1DPtr _temp_XpKPlusCharm , _temp_XpKPlusLight; Histo1DPtr _temp_XpKStar0Charm, _temp_XpKStar0Light; Histo1DPtr _temp_XpProtonCharm, _temp_XpProtonLight; Histo1DPtr _h_RPiPlus, _h_RPiMinus; Histo1DPtr _h_RKS0, _h_RKSBar0; Histo1DPtr _h_RKPlus, _h_RKMinus; Histo1DPtr _h_RProton, _h_RPBar; Histo1DPtr _h_RLambda, _h_RLBar; Scatter2DPtr _s_Xp_PiPl_Ch, _s_Xp_KPl_Ch, _s_Xp_Pr_Ch; Scatter2DPtr _s_Xp_PiPlCh_PiPlLi, _s_Xp_PiPlBo_PiPlLi; Scatter2DPtr _s_Xp_KPlCh_KPlLi, _s_Xp_KPlBo_KPlLi; Scatter2DPtr _s_Xp_KS0Ch_KS0Li, _s_Xp_KS0Bo_KS0Li; Scatter2DPtr _s_Xp_PrCh_PrLi, _s_Xp_PrBo_PrLi; Scatter2DPtr _s_Xp_LaCh_LaLi, _s_Xp_LaBo_LaLi; Scatter2DPtr _s_Xp_K0Ch_K0Li, _s_Xp_K0Bo_K0Li; Scatter2DPtr _s_Xp_PhiCh_PhiLi, _s_Xp_PhiBo_PhiLi; Scatter2DPtr _s_PiM_PiP, _s_KSBar0_KS0, _s_KM_KP, _s_Pr_PBar, _s_Lam_LBar; //@} Scatter2DPtr tmp1; Scatter2DPtr tmp2; Scatter2DPtr tmp3; Scatter2DPtr tmp4; Scatter2DPtr tmp5; Scatter2DPtr tmp6; Scatter2DPtr tmp7; Scatter2DPtr tmp8; Scatter2DPtr tmp9; Scatter2DPtr tmp10; Scatter2DPtr tmp11; Scatter2DPtr tmp12; Scatter2DPtr tmp13; Scatter2DPtr tmp14; Scatter2DPtr tmp15; Scatter2DPtr tmp16; Scatter2DPtr tmp17; Scatter2DPtr tmp18; Scatter2DPtr tmp19; Scatter2DPtr tmp20; Scatter2DPtr tmp21; Scatter2DPtr tmp22; Scatter2DPtr tmp23; Scatter2DPtr tmp24; Scatter2DPtr tmp25; Scatter2DPtr tmp26; Scatter2DPtr tmp27; Scatter2DPtr tmp28; Scatter2DPtr tmp29; Scatter2DPtr tmp30; Scatter2DPtr tmp31; Scatter2DPtr tmp32; Scatter2DPtr tmp33; Scatter2DPtr tmp34; Scatter2DPtr tmp35; Scatter2DPtr tmp36; Scatter2DPtr tmp37; Scatter2DPtr tmp38; Scatter2DPtr tmp39; Scatter2DPtr tmp40; Scatter2DPtr tmp41; Scatter2DPtr tmp42; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(SLD_1999_S3743934); } diff --git a/analyses/pluginLEP/SLD_2004_S5693039.cc b/analyses/pluginLEP/SLD_2004_S5693039.cc --- a/analyses/pluginLEP/SLD_2004_S5693039.cc +++ b/analyses/pluginLEP/SLD_2004_S5693039.cc @@ -1,379 +1,379 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/Beam.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/Thrust.hh" #define I_KNOW_THE_INITIAL_QUARKS_PROJECTION_IS_DODGY_BUT_NEED_TO_USE_IT #include "Rivet/Projections/InitialQuarks.hh" namespace Rivet { /// @brief SLD flavour-dependent fragmentation paper /// @author Peter Richardson class SLD_2004_S5693039 : public Analysis { public: /// Constructor SLD_2004_S5693039() : Analysis("SLD_2004_S5693039") {} /// @name Analysis methods //@{ void analyze(const Event& e) { // First, veto on leptonic events by requiring at least 2 charged FS particles const FinalState& fs = apply(e, "FS"); const size_t numParticles = fs.particles().size(); // Even if we only generate hadronic events, we still need a cut on numCharged >= 2. if (numParticles < 2) { MSG_DEBUG("Failed ncharged cut"); vetoEvent; } MSG_DEBUG("Passed ncharged cut"); // Get beams and average beam momentum const ParticlePair& beams = apply(e, "Beams").beams(); const double meanBeamMom = ( beams.first.p3().mod() + beams.second.p3().mod() ) / 2.0; MSG_DEBUG("Avg beam momentum = " << meanBeamMom); int flavour = 0; const InitialQuarks& iqf = apply(e, "IQF"); // If we only have two quarks (qqbar), just take the flavour. // If we have more than two quarks, look for the highest energetic q-qbar pair. Particles quarks; if (iqf.particles().size() == 2) { flavour = iqf.particles().front().abspid(); quarks = iqf.particles(); } else { map quarkmap; for (const Particle& p : iqf.particles()) { if (quarkmap.find(p.pid())==quarkmap.end()) quarkmap[p.pid()] = p; else if (quarkmap[p.pid()].E() < p.E()) quarkmap[p.pid()] = p; } double maxenergy = 0.; for (int i = 1; i <= 5; ++i) { double energy(0.); if(quarkmap.find( i)!=quarkmap.end()) energy += quarkmap[ i].E(); if(quarkmap.find(-i)!=quarkmap.end()) energy += quarkmap[-i].E(); if (energy > maxenergy) flavour = i; } if(quarkmap.find( flavour)!=quarkmap.end()) quarks.push_back(quarkmap[ flavour]); if(quarkmap.find(-flavour)!=quarkmap.end()) quarks.push_back(quarkmap[-flavour]); } // total multiplicities switch (flavour) { case PID::DQUARK: case PID::UQUARK: case PID::SQUARK: _weightLight ->fill(); _weightedTotalChargedPartNumLight ->fill(numParticles); break; case PID::CQUARK: _weightCharm ->fill(); _weightedTotalChargedPartNumCharm ->fill(numParticles); break; case PID::BQUARK: _weightBottom ->fill(); _weightedTotalChargedPartNumBottom ->fill(numParticles); break; } // thrust axis for projections Vector3 axis = apply(e, "Thrust").thrustAxis(); double dot(0.); if(!quarks.empty()) { dot = quarks[0].p3().dot(axis); if(quarks[0].pid()<0) dot *= -1.; } // spectra and individual multiplicities for (const Particle& p : fs.particles()) { double pcm = p.p3().mod(); const double xp = pcm/meanBeamMom; // if in quark or antiquark hemisphere bool quark = p.p3().dot(axis)*dot>0.; _h_PCharged ->fill(pcm ); // all charged switch (flavour) { case PID::DQUARK: case PID::UQUARK: case PID::SQUARK: _h_XpChargedL->fill(xp); break; case PID::CQUARK: _h_XpChargedC->fill(xp); break; case PID::BQUARK: _h_XpChargedB->fill(xp); break; } int id = p.abspid(); // charged pions if (id == PID::PIPLUS) { _h_XpPiPlus->fill(xp); _h_XpPiPlusTotal->fill(xp); switch (flavour) { case PID::DQUARK: case PID::UQUARK: case PID::SQUARK: _h_XpPiPlusL->fill(xp); _h_NPiPlusL->fill(sqrtS()); if( ( quark && p.pid()>0 ) || ( !quark && p.pid()<0 )) _h_RPiPlus->fill(xp); else _h_RPiMinus->fill(xp); break; case PID::CQUARK: _h_XpPiPlusC->fill(xp); _h_NPiPlusC->fill(sqrtS()); break; case PID::BQUARK: _h_XpPiPlusB->fill(xp); _h_NPiPlusB->fill(sqrtS()); break; } } else if (id == PID::KPLUS) { _h_XpKPlus->fill(xp); _h_XpKPlusTotal->fill(xp); switch (flavour) { case PID::DQUARK: case PID::UQUARK: case PID::SQUARK: _h_XpKPlusL->fill(xp); _h_NKPlusL->fill(sqrtS()); if( ( quark && p.pid()>0 ) || ( !quark && p.pid()<0 )) _h_RKPlus->fill(xp); else _h_RKMinus->fill(xp); break; case PID::CQUARK: _h_XpKPlusC->fill(xp); _h_NKPlusC->fill(sqrtS()); break; case PID::BQUARK: _h_XpKPlusB->fill(xp); _h_NKPlusB->fill(sqrtS()); break; } } else if (id == PID::PROTON) { _h_XpProton->fill(xp); _h_XpProtonTotal->fill(xp); switch (flavour) { case PID::DQUARK: case PID::UQUARK: case PID::SQUARK: _h_XpProtonL->fill(xp); _h_NProtonL->fill(sqrtS()); if( ( quark && p.pid()>0 ) || ( !quark && p.pid()<0 )) _h_RProton->fill(xp); else _h_RPBar ->fill(xp); break; case PID::CQUARK: _h_XpProtonC->fill(xp); _h_NProtonC->fill(sqrtS()); break; case PID::BQUARK: _h_XpProtonB->fill(xp); _h_NProtonB->fill(sqrtS()); break; } } } } void init() { // Projections declare(Beam(), "Beams"); declare(ChargedFinalState(), "FS"); declare(InitialQuarks(), "IQF"); declare(Thrust(FinalState()), "Thrust"); // Book histograms book(_h_PCharged , 1, 1, 1); book(_h_XpPiPlus , 2, 1, 2); book(_h_XpKPlus , 3, 1, 2); book(_h_XpProton , 4, 1, 2); book(_h_XpPiPlusTotal , 2, 2, 2); book(_h_XpKPlusTotal , 3, 2, 2); book(_h_XpProtonTotal , 4, 2, 2); book(_h_XpPiPlusL , 5, 1, 1); book(_h_XpPiPlusC , 5, 1, 2); book(_h_XpPiPlusB , 5, 1, 3); book(_h_XpKPlusL , 6, 1, 1); book(_h_XpKPlusC , 6, 1, 2); book(_h_XpKPlusB , 6, 1, 3); book(_h_XpProtonL , 7, 1, 1); book(_h_XpProtonC , 7, 1, 2); book(_h_XpProtonB , 7, 1, 3); book(_h_XpChargedL , 8, 1, 1); book(_h_XpChargedC , 8, 1, 2); book(_h_XpChargedB , 8, 1, 3); book(_h_NPiPlusL , 5, 2, 1); book(_h_NPiPlusC , 5, 2, 2); book(_h_NPiPlusB , 5, 2, 3); book(_h_NKPlusL , 6, 2, 1); book(_h_NKPlusC , 6, 2, 2); book(_h_NKPlusB , 6, 2, 3); book(_h_NProtonL , 7, 2, 1); book(_h_NProtonC , 7, 2, 2); book(_h_NProtonB , 7, 2, 3); book(_h_RPiPlus , 9, 1, 1); book(_h_RPiMinus , 9, 1, 2); book(_h_RKPlus ,10, 1, 1); book(_h_RKMinus ,10, 1, 2); book(_h_RProton ,11, 1, 1); book(_h_RPBar ,11, 1, 2); // Ratios: used as target of divide() later book(_s_PiM_PiP, 9, 1, 3); book(_s_KM_KP , 10, 1, 3); book(_s_Pr_PBar, 11, 1, 3); - book(_weightedTotalChargedPartNumLight, "weightedTotalChargedPartNumLight"); - book(_weightedTotalChargedPartNumCharm, "weightedTotalChargedPartNumCharm"); - book(_weightedTotalChargedPartNumBottom, "weightedTotalChargedPartNumBottom"); - book(_weightLight, "weightLight"); - book(_weightCharm, "weightCharm"); - book(_weightBottom, "weightBottom"); + book(_weightedTotalChargedPartNumLight, "_weightedTotalChargedPartNumLight"); + book(_weightedTotalChargedPartNumCharm, "_weightedTotalChargedPartNumCharm"); + book(_weightedTotalChargedPartNumBottom, "_weightedTotalChargedPartNumBottom"); + book(_weightLight, "_weightLight"); + book(_weightCharm, "_weightCharm"); + book(_weightBottom, "_weightBottom"); book(tmp1, 8, 2, 1, true); book(tmp2, 8, 2, 2, true); book(tmp3, 8, 2, 3, true); book(tmp4, 8, 3, 2, true); book(tmp5, 8, 3, 3, true); } /// Finalize void finalize() { // Multiplicities /// @todo Include errors const double avgNumPartsLight = _weightedTotalChargedPartNumLight->val() / _weightLight->val(); const double avgNumPartsCharm = _weightedTotalChargedPartNumCharm->val() / _weightCharm->val(); const double avgNumPartsBottom = _weightedTotalChargedPartNumBottom->val() / _weightBottom->val(); tmp1->point(0).setY(avgNumPartsLight); tmp2->point(0).setY(avgNumPartsCharm); tmp3->point(0).setY(avgNumPartsBottom); tmp4->point(0).setY(avgNumPartsCharm - avgNumPartsLight); tmp5->point(0).setY(avgNumPartsBottom - avgNumPartsLight); // Do divisions divide(*_h_RPiMinus - *_h_RPiPlus, *_h_RPiMinus + *_h_RPiPlus, _s_PiM_PiP); divide(*_h_RKMinus - *_h_RKPlus, *_h_RKMinus + *_h_RKPlus, _s_KM_KP); divide(*_h_RProton - *_h_RPBar, *_h_RProton + *_h_RPBar, _s_Pr_PBar); // Scale histograms scale(_h_PCharged, 1./sumOfWeights()); scale(_h_XpPiPlus, 1./sumOfWeights()); scale(_h_XpKPlus, 1./sumOfWeights()); scale(_h_XpProton, 1./sumOfWeights()); scale(_h_XpPiPlusTotal, 1./sumOfWeights()); scale(_h_XpKPlusTotal, 1./sumOfWeights()); scale(_h_XpProtonTotal, 1./sumOfWeights()); scale(_h_XpPiPlusL, 1. / *_weightLight); scale(_h_XpPiPlusC, 1. / *_weightCharm); scale(_h_XpPiPlusB, 1. / *_weightBottom); scale(_h_XpKPlusL, 1. / *_weightLight); scale(_h_XpKPlusC, 1. / *_weightCharm); scale(_h_XpKPlusB, 1. / *_weightBottom); scale(_h_XpProtonL, 1. / *_weightLight); scale(_h_XpProtonC, 1. / *_weightCharm); scale(_h_XpProtonB, 1. / *_weightBottom); scale(_h_XpChargedL, 1. / *_weightLight); scale(_h_XpChargedC, 1. / *_weightCharm); scale(_h_XpChargedB, 1. / *_weightBottom); scale(_h_NPiPlusL, 1. / *_weightLight); scale(_h_NPiPlusC, 1. / *_weightCharm); scale(_h_NPiPlusB, 1. / *_weightBottom); scale(_h_NKPlusL, 1. / *_weightLight); scale(_h_NKPlusC, 1. / *_weightCharm); scale(_h_NKPlusB, 1. / *_weightBottom); scale(_h_NProtonL, 1. / *_weightLight); scale(_h_NProtonC, 1. / *_weightCharm); scale(_h_NProtonB, 1. / *_weightBottom); // Paper suggests this should be 0.5/weight but it has to be 1.0 to get normalisations right... scale(_h_RPiPlus, 1. / *_weightLight); scale(_h_RPiMinus, 1. / *_weightLight); scale(_h_RKPlus, 1. / *_weightLight); scale(_h_RKMinus, 1. / *_weightLight); scale(_h_RProton, 1. / *_weightLight); scale(_h_RPBar, 1. / *_weightLight); // convert ratio to % _s_PiM_PiP->scale(1.,100.); _s_KM_KP ->scale(1.,100.); _s_Pr_PBar->scale(1.,100.); } //@} private: Scatter2DPtr tmp1; Scatter2DPtr tmp2; Scatter2DPtr tmp3; Scatter2DPtr tmp4; Scatter2DPtr tmp5; /// @name Multiplicities //@{ CounterPtr _weightedTotalChargedPartNumLight; CounterPtr _weightedTotalChargedPartNumCharm; CounterPtr _weightedTotalChargedPartNumBottom; //@} /// @name Weights //@{ CounterPtr _weightLight, _weightCharm, _weightBottom; //@} // Histograms //@{ Histo1DPtr _h_PCharged; Histo1DPtr _h_XpPiPlus, _h_XpKPlus, _h_XpProton; Histo1DPtr _h_XpPiPlusTotal, _h_XpKPlusTotal, _h_XpProtonTotal; Histo1DPtr _h_XpPiPlusL, _h_XpPiPlusC, _h_XpPiPlusB; Histo1DPtr _h_XpKPlusL, _h_XpKPlusC, _h_XpKPlusB; Histo1DPtr _h_XpProtonL, _h_XpProtonC, _h_XpProtonB; Histo1DPtr _h_XpChargedL, _h_XpChargedC, _h_XpChargedB; Histo1DPtr _h_NPiPlusL, _h_NPiPlusC, _h_NPiPlusB; Histo1DPtr _h_NKPlusL, _h_NKPlusC, _h_NKPlusB; Histo1DPtr _h_NProtonL, _h_NProtonC, _h_NProtonB; Histo1DPtr _h_RPiPlus, _h_RPiMinus, _h_RKPlus; Histo1DPtr _h_RKMinus, _h_RProton, _h_RPBar; Scatter2DPtr _s_PiM_PiP, _s_KM_KP, _s_Pr_PBar; //@} }; // Hook for the plugin system DECLARE_RIVET_PLUGIN(SLD_2004_S5693039); }