diff --git a/analyses/pluginATLAS/ATLAS_2012_I1204447.cc b/analyses/pluginATLAS/ATLAS_2012_I1204447.cc --- a/analyses/pluginATLAS/ATLAS_2012_I1204447.cc +++ b/analyses/pluginATLAS/ATLAS_2012_I1204447.cc @@ -1,1058 +1,1057 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/VisibleFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/UnstableParticles.hh" #include "Rivet/Projections/FastJets.hh" namespace Rivet { class ATLAS_2012_I1204447 : public Analysis { public: /// Constructor ATLAS_2012_I1204447() : Analysis("ATLAS_2012_I1204447") { } /// Book histograms and initialise projections before the run void init() { // To calculate the acceptance without having the fiducial lepton efficiencies included, this part can be turned off _use_fiducial_lepton_efficiency = true; // Random numbers for simulation of ATLAS detector reconstruction efficiency srand(160385); // Read in all signal regions _signal_regions = getSignalRegions(); // Set number of events per signal region to 0 for (size_t i = 0; i < _signal_regions.size(); i++) book(_eventCountsPerSR[_signal_regions[i]], "_eventCountsPerSR_" + _signal_regions[i]); // Final state including all charged and neutral particles const FinalState fs((Cuts::etaIn(-5.0, 5.0) && Cuts::pT >= 1*GeV)); declare(fs, "FS"); // Final state including all charged particles declare(ChargedFinalState(Cuts::abseta < 2.5 && Cuts::pT > 1*GeV), "CFS"); // Final state including all visible particles (to calculate MET, Jets etc.) declare(VisibleFinalState(Cuts::abseta < 5.0), "VFS"); // Final state including all AntiKt 04 Jets VetoedFinalState vfs; vfs.addVetoPairId(PID::MUON); declare(FastJets(vfs, FastJets::ANTIKT, 0.4), "AntiKtJets04"); // Final state including all unstable particles (including taus) declare(UnstableParticles(Cuts::abseta < 5.0 && Cuts::pT > 5*GeV), "UFS"); // Final state including all electrons IdentifiedFinalState elecs(Cuts::abseta < 2.47 && Cuts::pT > 10*GeV); elecs.acceptIdPair(PID::ELECTRON); declare(elecs, "elecs"); // Final state including all muons IdentifiedFinalState muons(Cuts::abseta < 2.5 && Cuts::pT > 10*GeV); muons.acceptIdPair(PID::MUON); declare(muons, "muons"); // Book histograms book(_h_HTlep_all ,"HTlep_all" , 30, 0, 1500); book(_h_HTjets_all ,"HTjets_all", 30, 0, 1500); book(_h_MET_all ,"MET_all" , 20, 0, 1000); book(_h_Meff_all ,"Meff_all" , 30, 0, 3000); book(_h_e_n ,"e_n" , 10, -0.5, 9.5); book(_h_mu_n ,"mu_n" , 10, -0.5, 9.5); book(_h_tau_n ,"tau_n", 10, -0.5, 9.5); book(_h_pt_1_3l ,"pt_1_3l", 100, 0, 2000); book(_h_pt_2_3l ,"pt_2_3l", 100, 0, 2000); book(_h_pt_3_3l ,"pt_3_3l", 100, 0, 2000); book(_h_pt_1_2ltau ,"pt_1_2ltau", 100, 0, 2000); book(_h_pt_2_2ltau ,"pt_2_2ltau", 100, 0, 2000); book(_h_pt_3_2ltau ,"pt_3_2ltau", 100, 0, 2000); book(_h_excluded ,"excluded", 2, -0.5, 1.5); } /// Perform the per-event analysis void analyze(const Event& event) { // Muons Particles muon_candidates; const Particles charged_tracks = apply(event, "CFS").particles(); const Particles visible_particles = apply(event, "VFS").particles(); for (const Particle& mu : apply(event, "muons").particlesByPt()) { // Calculate pTCone30 variable (pT of all tracks within dR<0.3 - pT of muon itself) double pTinCone = -mu.pT(); for (const Particle& track : charged_tracks) { if (deltaR(mu.momentum(), track.momentum()) < 0.3) pTinCone += track.pT(); } // Calculate eTCone30 variable (pT of all visible particles within dR<0.3) double eTinCone = 0.; for (const Particle& visible_particle : visible_particles) { if (visible_particle.abspid() != PID::MUON && inRange(deltaR(mu.momentum(), visible_particle.momentum()), 0.1, 0.3)) eTinCone += visible_particle.pT(); } // Apply reconstruction efficiency and simulate reco int muon_id = 13; if ( mu.hasAncestor(15) || mu.hasAncestor(-15)) muon_id = 14; const double eff = (_use_fiducial_lepton_efficiency) ? apply_reco_eff(muon_id, mu) : 1.0; const bool keep_muon = rand()/static_cast(RAND_MAX) <= eff; // Keep muon if pTCone30/pT < 0.15 and eTCone30/pT < 0.2 and reconstructed if (keep_muon && pTinCone/mu.pT() <= 0.15 && eTinCone/mu.pT() < 0.2) muon_candidates.push_back(mu); } // Electrons Particles electron_candidates; for (const Particle& e : apply(event, "elecs").particlesByPt()) { // Neglect electrons in crack regions if (inRange(e.abseta(), 1.37, 1.52)) continue; // Calculate pTCone30 variable (pT of all tracks within dR<0.3 - pT of electron itself) double pTinCone = -e.pT(); for (const Particle& track : charged_tracks) { if (deltaR(e.momentum(), track.momentum()) < 0.3) pTinCone += track.pT(); } // Calculate eTCone30 variable (pT of all visible particles (except muons) within dR<0.3) double eTinCone = 0.; for (const Particle& visible_particle : visible_particles) { if (visible_particle.abspid() != PID::MUON && inRange(deltaR(e.momentum(), visible_particle.momentum()), 0.1, 0.3)) eTinCone += visible_particle.pT(); } // Apply reconstruction efficiency and simulate reco int elec_id = 11; if (e.hasAncestor(15) || e.hasAncestor(-15)) elec_id = 12; const double eff = (_use_fiducial_lepton_efficiency) ? apply_reco_eff(elec_id, e) : 1.0; const bool keep_elec = rand()/static_cast(RAND_MAX) <= eff; // Keep electron if pTCone30/pT < 0.13 and eTCone30/pT < 0.2 and reconstructed if (keep_elec && pTinCone/e.pT() <= 0.13 && eTinCone/e.pT() < 0.2) electron_candidates.push_back(e); } // Taus /// @todo This could benefit from a tau finder projection Particles tau_candidates; for (const Particle& tau : apply(event, "UFS").particlesByPt()) { // Only pick taus out of all unstable particles if (tau.abspid() != PID::TAU) continue; // Check that tau has decayed into daughter particles /// @todo Huh? Unstable taus with no decay vtx? Can use Particle.isStable()? But why in this situation? if (tau.genParticle()->end_vertex() == 0) continue; // Calculate visible tau pT from pT of tau neutrino in tau decay for pT and |eta| cuts FourMomentum daughter_tau_neutrino_momentum = get_tau_neutrino_mom(tau); Particle tau_vis = tau; tau_vis.setMomentum(tau.momentum()-daughter_tau_neutrino_momentum); // keep only taus in certain eta region and above 15 GeV of visible tau pT if ( tau_vis.pT() <= 15.0*GeV || tau_vis.abseta() > 2.5) continue; // Get prong number (number of tracks) in tau decay and check if tau decays leptonically unsigned int nprong = 0; bool lep_decaying_tau = false; get_prong_number(tau.genParticle(), nprong, lep_decaying_tau); // Apply reconstruction efficiency int tau_id = 15; if (nprong == 1) tau_id = 15; else if (nprong == 3) tau_id = 16; // Get fiducial lepton efficiency simulate reco efficiency const double eff = (_use_fiducial_lepton_efficiency) ? apply_reco_eff(tau_id, tau_vis) : 1.0; const bool keep_tau = rand()/static_cast(RAND_MAX) <= eff; // Keep tau if nprong = 1, it decays hadronically, and it's reconstructed by the detector if ( !lep_decaying_tau && nprong == 1 && keep_tau) tau_candidates.push_back(tau_vis); } // Jets (all anti-kt R=0.4 jets with pT > 25 GeV and eta < 4.9) Jets jet_candidates; for (const Jet& jet : apply(event, "AntiKtJets04").jetsByPt(25*GeV)) { if (jet.abseta() < 4.9) jet_candidates.push_back(jet); } // ETmiss Particles vfs_particles = apply(event, "VFS").particles(); FourMomentum pTmiss; for (const Particle& p : vfs_particles) pTmiss -= p.momentum(); double eTmiss = pTmiss.pT()/GeV; //------------------ // Overlap removal // electron - electron Particles electron_candidates_2; for (size_t ie = 0; ie < electron_candidates.size(); ++ie) { const Particle & e = electron_candidates[ie]; bool away = true; // If electron pair within dR < 0.1: remove electron with lower pT for (size_t ie2=0; ie2 < electron_candidates_2.size(); ++ie2) { if ( deltaR( e.momentum(), electron_candidates_2[ie2].momentum()) < 0.1 ) { away = false; break; } } // If isolated keep it if ( away ) electron_candidates_2.push_back( e ); } // jet - electron Jets recon_jets; for (const Jet& jet : jet_candidates) { bool away = true; // if jet within dR < 0.2 of electron: remove jet for (const Particle& e : electron_candidates_2) { if (deltaR(e.momentum(), jet.momentum()) < 0.2) { away = false; break; } } // jet - tau if (away) { // If jet within dR < 0.2 of tau: remove jet for (const Particle& tau : tau_candidates) { if (deltaR(tau.momentum(), jet.momentum()) < 0.2) { away = false; break; } } } // If isolated keep it if ( away ) recon_jets.push_back( jet ); } // electron - jet Particles recon_leptons, recon_e; for (size_t ie = 0; ie < electron_candidates_2.size(); ++ie) { const Particle& e = electron_candidates_2[ie]; // If electron within 0.2 < dR < 0.4 from any jets: remove electron bool away = true; for (const Jet& jet : recon_jets) { if (deltaR(e.momentum(), jet.momentum()) < 0.4) { away = false; break; } } // electron - muon // if electron within dR < 0.1 of a muon: remove electron if (away) { for (const Particle& mu : muon_candidates) { if (deltaR(mu.momentum(), e.momentum()) < 0.1) { away = false; break; } } } // If isolated keep it if (away) { recon_e += e; recon_leptons += e; } } // tau - electron Particles recon_tau; for ( const Particle& tau : tau_candidates ) { bool away = true; // If tau within dR < 0.2 of an electron: remove tau for ( const Particle& e : recon_e ) { if (deltaR( tau.momentum(), e.momentum()) < 0.2) { away = false; break; } } // tau - muon // If tau within dR < 0.2 of a muon: remove tau if (away) { for (const Particle& mu : muon_candidates) { if (deltaR(tau.momentum(), mu.momentum()) < 0.2) { away = false; break; } } } // If isolated keep it if (away) recon_tau.push_back( tau ); } // Muon - jet isolation Particles recon_mu, trigger_mu; // If muon within dR < 0.4 of a jet, remove muon for (const Particle& mu : muon_candidates) { bool away = true; for (const Jet& jet : recon_jets) { if ( deltaR( mu.momentum(), jet.momentum()) < 0.4 ) { away = false; break; } } if (away) { recon_mu.push_back( mu ); recon_leptons.push_back( mu ); if (mu.abseta() < 2.4) trigger_mu.push_back( mu ); } } // End overlap removal //------------------ // Jet cleaning if (rand()/static_cast(RAND_MAX) <= 0.42) { for (const Jet& jet : recon_jets) { const double eta = jet.rapidity(); const double phi = jet.azimuthalAngle(MINUSPI_PLUSPI); if (jet.pT() > 25*GeV && inRange(eta, -0.1, 1.5) && inRange(phi, -0.9, -0.5)) vetoEvent; } } // Post-isolation event cuts // Require at least 3 charged tracks in event if (charged_tracks.size() < 3) vetoEvent; // And at least one e/mu passing trigger if (!( !recon_e .empty() && recon_e[0] .pT() > 25*GeV) && !( !trigger_mu.empty() && trigger_mu[0].pT() > 25*GeV) ) { MSG_DEBUG("Hardest lepton fails trigger"); vetoEvent; } // And only accept events with at least 2 electrons and muons and at least 3 leptons in total if (recon_mu.size() + recon_e.size() + recon_tau.size() < 3 || recon_leptons.size() < 2) vetoEvent; // Sort leptons by decreasing pT sortByPt(recon_leptons); sortByPt(recon_tau); // Calculate HTlep, fill lepton pT histograms & store chosen combination of 3 leptons double HTlep = 0.; Particles chosen_leptons; if ( recon_leptons.size() > 2 ) { _h_pt_1_3l->fill(recon_leptons[0].perp()/GeV); _h_pt_2_3l->fill(recon_leptons[1].perp()/GeV); _h_pt_3_3l->fill(recon_leptons[2].perp()/GeV); HTlep = (recon_leptons[0].pT() + recon_leptons[1].pT() + recon_leptons[2].pT())/GeV; chosen_leptons.push_back( recon_leptons[0] ); chosen_leptons.push_back( recon_leptons[1] ); chosen_leptons.push_back( recon_leptons[2] ); } else { _h_pt_1_2ltau->fill(recon_leptons[0].perp()/GeV); _h_pt_2_2ltau->fill(recon_leptons[1].perp()/GeV); _h_pt_3_2ltau->fill(recon_tau[0].perp()/GeV); HTlep = (recon_leptons[0].pT() + recon_leptons[1].pT() + recon_tau[0].pT())/GeV ; chosen_leptons.push_back( recon_leptons[0] ); chosen_leptons.push_back( recon_leptons[1] ); chosen_leptons.push_back( recon_tau[0] ); } // Number of prompt e/mu and had taus _h_e_n ->fill(recon_e.size()); _h_mu_n ->fill(recon_mu.size()); _h_tau_n->fill(recon_tau.size()); // Calculate HTjets double HTjets = 0.; for ( const Jet & jet : recon_jets ) HTjets += jet.perp()/GeV; // Calculate meff double meff = eTmiss + HTjets; Particles all_leptons; for ( const Particle & e : recon_e ) { meff += e.perp()/GeV; all_leptons.push_back( e ); } for ( const Particle & mu : recon_mu ) { meff += mu.perp()/GeV; all_leptons.push_back( mu ); } for ( const Particle & tau : recon_tau ) { meff += tau.perp()/GeV; all_leptons.push_back( tau ); } // Fill histogram of kinematic variables _h_HTlep_all ->fill(HTlep); _h_HTjets_all->fill(HTjets); _h_MET_all ->fill(eTmiss); _h_Meff_all ->fill(meff); // Determine signal region (3l/2ltau, onZ/offZ) string basic_signal_region; if ( recon_mu.size() + recon_e.size() > 2 ) basic_signal_region += "3l_"; else if ( (recon_mu.size() + recon_e.size() == 2) && (recon_tau.size() > 0)) basic_signal_region += "2ltau_"; // Is there an OSSF pair or a three lepton combination with an invariant mass close to the Z mass int onZ = isonZ(chosen_leptons); if (onZ == 1) basic_signal_region += "onZ"; else if (onZ == 0) basic_signal_region += "offZ"; // Check in which signal regions this event falls and adjust event counters fillEventCountsPerSR(basic_signal_region, onZ, HTlep, eTmiss, HTjets, meff); } /// Normalise histograms etc., after the run void finalize() { // Normalize to an integrated luminosity of 1 fb-1 double norm = crossSection()/femtobarn/sumOfWeights(); string best_signal_region = ""; double ratio_best_SR = 0.; // Loop over all signal regions and find signal region with best sensitivity (ratio signal events/visible cross-section) for (size_t i = 0; i < _signal_regions.size(); i++) { double signal_events = _eventCountsPerSR[_signal_regions[i]]->val() * norm; // Use expected upper limits to find best signal region double UL95 = getUpperLimit(_signal_regions[i], false); double ratio = signal_events / UL95; if (ratio > ratio_best_SR) { best_signal_region = _signal_regions[i]; ratio_best_SR = ratio; } } double signal_events_best_SR = _eventCountsPerSR[best_signal_region]->val() * norm; double exp_UL_best_SR = getUpperLimit(best_signal_region, false); double obs_UL_best_SR = getUpperLimit(best_signal_region, true); // Print out result cout << "----------------------------------------------------------------------------------------" << endl; cout << "Best signal region: " << best_signal_region << endl; cout << "Normalized number of signal events in this best signal region (per fb-1): " << signal_events_best_SR << endl; cout << "Efficiency*Acceptance: " << _eventCountsPerSR[best_signal_region]->val()/sumOfWeights() << endl; cout << "Cross-section [fb]: " << crossSection()/femtobarn << endl; cout << "Expected visible cross-section (per fb-1): " << exp_UL_best_SR << endl; cout << "Ratio (signal events / expected visible cross-section): " << ratio_best_SR << endl; cout << "Observed visible cross-section (per fb-1): " << obs_UL_best_SR << endl; cout << "Ratio (signal events / observed visible cross-section): " << signal_events_best_SR/obs_UL_best_SR << endl; cout << "----------------------------------------------------------------------------------------" << endl; cout << "Using the EXPECTED limits (visible cross-section) of the analysis: " << endl; if (signal_events_best_SR > exp_UL_best_SR) { cout << "Since the number of signal events > the visible cross-section, this model/grid point is EXCLUDED with 95% CL." << endl; _h_excluded->fill(1); } else { cout << "Since the number of signal events < the visible cross-section, this model/grid point is NOT EXCLUDED." << endl; _h_excluded->fill(0); } cout << "----------------------------------------------------------------------------------------" << endl; cout << "Using the OBSERVED limits (visible cross-section) of the analysis: " << endl; if (signal_events_best_SR > obs_UL_best_SR) { cout << "Since the number of signal events > the visible cross-section, this model/grid point is EXCLUDED with 95% CL." << endl; _h_excluded->fill(1); } else { cout << "Since the number of signal events < the visible cross-section, this model/grid point is NOT EXCLUDED." << endl; _h_excluded->fill(0); } cout << "----------------------------------------------------------------------------------------" << endl; // Normalize to cross section if (norm != 0) { scale(_h_HTlep_all, norm); scale(_h_HTjets_all, norm); scale(_h_MET_all, norm); scale(_h_Meff_all, norm); scale(_h_pt_1_3l, norm); scale(_h_pt_2_3l, norm); scale(_h_pt_3_3l, norm); scale(_h_pt_1_2ltau, norm); scale(_h_pt_2_2ltau, norm); scale(_h_pt_3_2ltau, norm); scale(_h_e_n, norm); scale(_h_mu_n, norm); scale(_h_tau_n, norm); scale(_h_excluded, signal_events_best_SR); } } /// Helper functions //@{ /// Function giving a list of all signal regions vector getSignalRegions() { // List of basic signal regions vector basic_signal_regions; basic_signal_regions.push_back("3l_offZ"); basic_signal_regions.push_back("3l_onZ"); basic_signal_regions.push_back("2ltau_offZ"); basic_signal_regions.push_back("2ltau_onZ"); // List of kinematic variables vector kinematic_variables; kinematic_variables.push_back("HTlep"); kinematic_variables.push_back("METStrong"); kinematic_variables.push_back("METWeak"); kinematic_variables.push_back("Meff"); kinematic_variables.push_back("MeffStrong"); vector signal_regions; // Loop over all kinematic variables and basic signal regions for (size_t i0 = 0; i0 < kinematic_variables.size(); i0++) { for (size_t i1 = 0; i1 < basic_signal_regions.size(); i1++) { // Is signal region onZ? int onZ = (basic_signal_regions[i1].find("onZ") != string::npos) ? 1 : 0; // Get cut values for this kinematic variable vector cut_values = getCutsPerSignalRegion(kinematic_variables[i0], onZ); // Loop over all cut values for (size_t i2 = 0; i2 < cut_values.size(); i2++) { // push signal region into vector signal_regions.push_back( (kinematic_variables[i0] + "_" + basic_signal_regions[i1] + "_cut_" + toString(i2)) ); } } } return signal_regions; } /// Function giving all cut vales per kinematic variable (taking onZ for MET into account) vector getCutsPerSignalRegion(const string& signal_region, int onZ=0) { vector cutValues; // Cut values for HTlep if (signal_region.compare("HTlep") == 0) { cutValues.push_back(0); cutValues.push_back(100); cutValues.push_back(150); cutValues.push_back(200); cutValues.push_back(300); } // Cut values for METStrong (HTjets > 100 GeV) and METWeak (HTjets < 100 GeV) else if (signal_region.compare("METStrong") == 0 || signal_region.compare("METWeak") == 0) { if (onZ == 0) cutValues.push_back(0); else if (onZ == 1) cutValues.push_back(20); cutValues.push_back(50); cutValues.push_back(75); } // Cut values for Meff and MeffStrong (MET > 75 GeV) if (signal_region.compare("Meff") == 0 || signal_region.compare("MeffStrong") == 0) { cutValues.push_back(0); cutValues.push_back(150); cutValues.push_back(300); cutValues.push_back(500); } return cutValues; } /// function fills map EventCountsPerSR by looping over all signal regions /// and looking if the event falls into this signal region void fillEventCountsPerSR(const string& basic_signal_region, int onZ, double HTlep, double eTmiss, double HTjets, double meff) { // Get cut values for HTlep, loop over them and add event if cut is passed vector cut_values = getCutsPerSignalRegion("HTlep", onZ); for (size_t i = 0; i < cut_values.size(); i++) { if (HTlep > cut_values[i]) _eventCountsPerSR[("HTlep_" + basic_signal_region + "_cut_" + toString(cut_values[i]))]->fill(); } // Get cut values for METStrong, loop over them and add event if cut is passed cut_values = getCutsPerSignalRegion("METStrong", onZ); for (size_t i = 0; i < cut_values.size(); i++) { if (eTmiss > cut_values[i] && HTjets > 100.) _eventCountsPerSR[("METStrong_" + basic_signal_region + "_cut_" + toString(cut_values[i]))]->fill(); } // Get cut values for METWeak, loop over them and add event if cut is passed cut_values = getCutsPerSignalRegion("METWeak", onZ); for (size_t i = 0; i < cut_values.size(); i++) { if (eTmiss > cut_values[i] && HTjets <= 100.) _eventCountsPerSR[("METWeak_" + basic_signal_region + "_cut_" + toString(cut_values[i]))]->fill(); } // Get cut values for Meff, loop over them and add event if cut is passed cut_values = getCutsPerSignalRegion("Meff", onZ); for (size_t i = 0; i < cut_values.size(); i++) { if (meff > cut_values[i]) _eventCountsPerSR[("Meff_" + basic_signal_region + "_cut_" + toString(cut_values[i]))]->fill(); } // Get cut values for MeffStrong, loop over them and add event if cut is passed cut_values = getCutsPerSignalRegion("MeffStrong", onZ); for (size_t i = 0; i < cut_values.size(); i++) { if (meff > cut_values[i] && eTmiss > 75.) _eventCountsPerSR[("MeffStrong_" + basic_signal_region + "_cut_" + toString(cut_values[i]))]->fill(); } } /// Function returning 4-vector of daughter-particle if it is a tau neutrino /// @todo Move to TauFinder and make less HepMC-ish FourMomentum get_tau_neutrino_mom(const Particle& p) { assert(p.abspid() == PID::TAU); ConstGenVertexPtr dv = p.genParticle()->end_vertex(); assert(dv != nullptr); for(ConstGenParticlePtr pp: HepMCUtils::particles(dv, Relatives::CHILDREN)){ if (abs(pp->pdg_id()) == PID::NU_TAU) return FourMomentum(pp->momentum()); } return FourMomentum(); } /// Function calculating the prong number of taus /// @todo Move to TauFinder and make less HepMC-ish void get_prong_number(ConstGenParticlePtr p, unsigned int& nprong, bool& lep_decaying_tau) { assert(p != nullptr); //const int tau_barcode = p->barcode(); ConstGenVertexPtr dv = p->end_vertex(); assert(dv != nullptr); for(ConstGenParticlePtr pp: HepMCUtils::particles(dv, Relatives::CHILDREN)){ // If they have status 1 and are charged they will produce a track and the prong number is +1 if (pp->status() == 1 ) { const int id = pp->pdg_id(); if (Rivet::PID::charge(id) != 0 ) ++nprong; // Check if tau decays leptonically // @todo Can a tau decay include a tau in its decay daughters?! if ((abs(id) == PID::ELECTRON || abs(id) == PID::MUON || abs(id) == PID::TAU) && abs(p->pdg_id()) == PID::TAU) lep_decaying_tau = true; } // If the status of the daughter particle is 2 it is unstable and the further decays are checked else if (pp->status() == 2 ) { get_prong_number(pp, nprong, lep_decaying_tau); } } } /// Function giving fiducial lepton efficiency double apply_reco_eff(int flavor, const Particle& p) { float pt = p.pT()/GeV; float eta = p.eta(); double eff = 0.; //double err = 0.; if (flavor == 11) { // weight prompt electron -- now including data/MC ID SF in eff. //float rho = 0.820; float p0 = 7.34; float p1 = 0.8977; //float ep0= 0.5 ; float ep1= 0.0087; eff = p1 - p0/pt; //double err0 = ep0/pt; // d(eff)/dp0 //double err1 = ep1; // d(eff)/dp1 //err = sqrt(err0*err0 + err1*err1 - 2*rho*err0*err1); double avgrate = 0.6867; float wz_ele_eta[] = {0.588717,0.603674,0.666135,0.747493,0.762202,0.675051,0.751606,0.745569,0.665333,0.610432,0.592693,}; //float ewz_ele_eta[] ={0.00292902,0.002476,0.00241209,0.00182319,0.00194339,0.00299785,0.00197339,0.00182004,0.00241793,0.00245997,0.00290394,}; int ibin = 3; if (eta >= -2.5 && eta < -2.0) ibin = 0; if (eta >= -2.0 && eta < -1.5) ibin = 1; if (eta >= -1.5 && eta < -1.0) ibin = 2; if (eta >= -1.0 && eta < -0.5) ibin = 3; if (eta >= -0.5 && eta < -0.1) ibin = 4; if (eta >= -0.1 && eta < 0.1) ibin = 5; if (eta >= 0.1 && eta < 0.5) ibin = 6; if (eta >= 0.5 && eta < 1.0) ibin = 7; if (eta >= 1.0 && eta < 1.5) ibin = 8; if (eta >= 1.5 && eta < 2.0) ibin = 9; if (eta >= 2.0 && eta < 2.5) ibin = 10; double eff_eta = wz_ele_eta[ibin]; //double err_eta = ewz_ele_eta[ibin]; eff = (eff*eff_eta)/avgrate; } if (flavor == 12) { // weight electron from tau //float rho = 0.884; float p0 = 6.799; float p1 = 0.842; //float ep0= 0.664; float ep1= 0.016; eff = p1 - p0/pt; //double err0 = ep0/pt; // d(eff)/dp0 //double err1 = ep1; // d(eff)/dp1 //err = sqrt(err0*err0 + err1*err1 - 2*rho*err0*err1); double avgrate = 0.5319; float wz_elet_eta[] = {0.468945,0.465953,0.489545,0.58709,0.59669,0.515829,0.59284,0.575828,0.498181,0.463536,0.481738,}; //float ewz_elet_eta[] ={0.00933795,0.00780868,0.00792679,0.00642083,0.00692652,0.0101568,0.00698452,0.00643524,0.0080002,0.00776238,0.0094699,}; int ibin = 3; if (eta >= -2.5 && eta < -2.0) ibin = 0; if (eta >= -2.0 && eta < -1.5) ibin = 1; if (eta >= -1.5 && eta < -1.0) ibin = 2; if (eta >= -1.0 && eta < -0.5) ibin = 3; if (eta >= -0.5 && eta < -0.1) ibin = 4; if (eta >= -0.1 && eta < 0.1) ibin = 5; if (eta >= 0.1 && eta < 0.5) ibin = 6; if (eta >= 0.5 && eta < 1.0) ibin = 7; if (eta >= 1.0 && eta < 1.5) ibin = 8; if (eta >= 1.5 && eta < 2.0) ibin = 9; if (eta >= 2.0 && eta < 2.5) ibin = 10; double eff_eta = wz_elet_eta[ibin]; //double err_eta = ewz_elet_eta[ibin]; eff = (eff*eff_eta)/avgrate; } if (flavor == 13) {// weight prompt muon //if eta>0.1 float p0 = -18.21; float p1 = 14.83; float p2 = 0.9312; //float ep0= 5.06; float ep1= 1.9; float ep2=0.00069; if ( fabs(eta) < 0.1) { p0 = 7.459; p1 = 2.615; p2 = 0.5138; //ep0 = 10.4; ep1 = 4.934; ep2 = 0.0034; } double arg = ( pt-p0 )/( 2.*p1 ) ; eff = 0.5 * p2 * (1.+erf(arg)); //err = 0.1*eff; } if (flavor == 14) {// weight muon from tau if (fabs(eta) < 0.1) { float p0 = -1.756; float p1 = 12.38; float p2 = 0.4441; //float ep0= 10.39; float ep1= 7.9; float ep2=0.022; double arg = ( pt-p0 )/( 2.*p1 ) ; eff = 0.5 * p2 * (1.+erf(arg)); //err = 0.1*eff; } else { float p0 = 2.102; float p1 = 0.8293; //float ep0= 0.271; float ep1= 0.0083; eff = p1 - p0/pt; //double err0 = ep0/pt; // d(eff)/dp0 //double err1 = ep1; // d(eff)/dp1 //err = sqrt(err0*err0 + err1*err1 - 2*rho*err0*err1); } } if (flavor == 15) {// weight hadronic tau 1p float wz_tau1p[] = {0.0249278,0.146978,0.225049,0.229212,0.21519,0.206152,0.201559,0.197917,0.209249,0.228336,0.193548,}; //float ewz_tau1p[] ={0.00178577,0.00425252,0.00535052,0.00592126,0.00484684,0.00612941,0.00792099,0.0083006,0.0138307,0.015568,0.0501751,}; int ibin = 0; if (pt > 15) ibin = 1; if (pt > 20) ibin = 2; if (pt > 25) ibin = 3; if (pt > 30) ibin = 4; if (pt > 40) ibin = 5; if (pt > 50) ibin = 6; if (pt > 60) ibin = 7; if (pt > 80) ibin = 8; if (pt > 100) ibin = 9; if (pt > 200) ibin = 10; eff = wz_tau1p[ibin]; //err = ewz_tau1p[ibin]; double avgrate = 0.1718; float wz_tau1p_eta[] = {0.162132,0.176393,0.139619,0.178813,0.185144,0.210027,0.203937,0.178688,0.137034,0.164216,0.163713,}; //float ewz_tau1p_eta[] ={0.00706705,0.00617989,0.00506798,0.00525172,0.00581865,0.00865675,0.00599245,0.00529877,0.00506368,0.00617025,0.00726219,}; ibin = 3; if (eta >= -2.5 && eta < -2.0) ibin = 0; if (eta >= -2.0 && eta < -1.5) ibin = 1; if (eta >= -1.5 && eta < -1.0) ibin = 2; if (eta >= -1.0 && eta < -0.5) ibin = 3; if (eta >= -0.5 && eta < -0.1) ibin = 4; if (eta >= -0.1 && eta < 0.1) ibin = 5; if (eta >= 0.1 && eta < 0.5) ibin = 6; if (eta >= 0.5 && eta < 1.0) ibin = 7; if (eta >= 1.0 && eta < 1.5) ibin = 8; if (eta >= 1.5 && eta < 2.0) ibin = 9; if (eta >= 2.0 && eta < 2.5) ibin = 10; double eff_eta = wz_tau1p_eta[ibin]; //double err_eta = ewz_tau1p_eta[ibin]; eff = (eff*eff_eta)/avgrate; } if (flavor == 16) { //weight hadronic tau 3p float wz_tau3p[] = {0.000587199,0.00247181,0.0013031,0.00280112,}; //float ewz_tau3p[] ={0.000415091,0.000617187,0.000582385,0.00197792,}; int ibin = 0; if (pt > 15) ibin = 1; if (pt > 20) ibin = 2; if (pt > 40) ibin = 3; if (pt > 80) ibin = 4; eff = wz_tau3p[ibin]; //err = ewz_tau3p[ibin]; } return eff; } /// Function giving observed upper limit (visible cross-section) double getUpperLimit(const string& signal_region, bool observed) { map upperLimitsObserved; upperLimitsObserved["HTlep_3l_offZ_cut_0"] = 11.; upperLimitsObserved["HTlep_3l_offZ_cut_100"] = 8.7; upperLimitsObserved["HTlep_3l_offZ_cut_150"] = 4.0; upperLimitsObserved["HTlep_3l_offZ_cut_200"] = 4.4; upperLimitsObserved["HTlep_3l_offZ_cut_300"] = 1.6; upperLimitsObserved["HTlep_2ltau_offZ_cut_0"] = 25.; upperLimitsObserved["HTlep_2ltau_offZ_cut_100"] = 14.; upperLimitsObserved["HTlep_2ltau_offZ_cut_150"] = 6.1; upperLimitsObserved["HTlep_2ltau_offZ_cut_200"] = 3.3; upperLimitsObserved["HTlep_2ltau_offZ_cut_300"] = 1.2; upperLimitsObserved["HTlep_3l_onZ_cut_0"] = 48.; upperLimitsObserved["HTlep_3l_onZ_cut_100"] = 38.; upperLimitsObserved["HTlep_3l_onZ_cut_150"] = 14.; upperLimitsObserved["HTlep_3l_onZ_cut_200"] = 7.2; upperLimitsObserved["HTlep_3l_onZ_cut_300"] = 4.5; upperLimitsObserved["HTlep_2ltau_onZ_cut_0"] = 85.; upperLimitsObserved["HTlep_2ltau_onZ_cut_100"] = 53.; upperLimitsObserved["HTlep_2ltau_onZ_cut_150"] = 11.0; upperLimitsObserved["HTlep_2ltau_onZ_cut_200"] = 5.2; upperLimitsObserved["HTlep_2ltau_onZ_cut_300"] = 3.0; upperLimitsObserved["METStrong_3l_offZ_cut_0"] = 2.6; upperLimitsObserved["METStrong_3l_offZ_cut_50"] = 2.1; upperLimitsObserved["METStrong_3l_offZ_cut_75"] = 2.1; upperLimitsObserved["METStrong_2ltau_offZ_cut_0"] = 4.2; upperLimitsObserved["METStrong_2ltau_offZ_cut_50"] = 3.1; upperLimitsObserved["METStrong_2ltau_offZ_cut_75"] = 2.6; upperLimitsObserved["METStrong_3l_onZ_cut_20"] = 11.0; upperLimitsObserved["METStrong_3l_onZ_cut_50"] = 6.4; upperLimitsObserved["METStrong_3l_onZ_cut_75"] = 5.1; upperLimitsObserved["METStrong_2ltau_onZ_cut_20"] = 5.9; upperLimitsObserved["METStrong_2ltau_onZ_cut_50"] = 3.4; upperLimitsObserved["METStrong_2ltau_onZ_cut_75"] = 1.2; upperLimitsObserved["METWeak_3l_offZ_cut_0"] = 11.; upperLimitsObserved["METWeak_3l_offZ_cut_50"] = 5.3; upperLimitsObserved["METWeak_3l_offZ_cut_75"] = 3.1; upperLimitsObserved["METWeak_2ltau_offZ_cut_0"] = 23.; upperLimitsObserved["METWeak_2ltau_offZ_cut_50"] = 4.3; upperLimitsObserved["METWeak_2ltau_offZ_cut_75"] = 3.1; upperLimitsObserved["METWeak_3l_onZ_cut_20"] = 41.; upperLimitsObserved["METWeak_3l_onZ_cut_50"] = 16.; upperLimitsObserved["METWeak_3l_onZ_cut_75"] = 8.0; upperLimitsObserved["METWeak_2ltau_onZ_cut_20"] = 80.; upperLimitsObserved["METWeak_2ltau_onZ_cut_50"] = 4.4; upperLimitsObserved["METWeak_2ltau_onZ_cut_75"] = 1.8; upperLimitsObserved["Meff_3l_offZ_cut_0"] = 11.; upperLimitsObserved["Meff_3l_offZ_cut_150"] = 8.1; upperLimitsObserved["Meff_3l_offZ_cut_300"] = 3.1; upperLimitsObserved["Meff_3l_offZ_cut_500"] = 2.1; upperLimitsObserved["Meff_2ltau_offZ_cut_0"] = 25.; upperLimitsObserved["Meff_2ltau_offZ_cut_150"] = 12.; upperLimitsObserved["Meff_2ltau_offZ_cut_300"] = 3.9; upperLimitsObserved["Meff_2ltau_offZ_cut_500"] = 2.2; upperLimitsObserved["Meff_3l_onZ_cut_0"] = 48.; upperLimitsObserved["Meff_3l_onZ_cut_150"] = 37.; upperLimitsObserved["Meff_3l_onZ_cut_300"] = 11.; upperLimitsObserved["Meff_3l_onZ_cut_500"] = 4.8; upperLimitsObserved["Meff_2ltau_onZ_cut_0"] = 85.; upperLimitsObserved["Meff_2ltau_onZ_cut_150"] = 28.; upperLimitsObserved["Meff_2ltau_onZ_cut_300"] = 5.9; upperLimitsObserved["Meff_2ltau_onZ_cut_500"] = 1.9; upperLimitsObserved["MeffStrong_3l_offZ_cut_0"] = 3.8; upperLimitsObserved["MeffStrong_3l_offZ_cut_150"] = 3.8; upperLimitsObserved["MeffStrong_3l_offZ_cut_300"] = 2.8; upperLimitsObserved["MeffStrong_3l_offZ_cut_500"] = 2.1; upperLimitsObserved["MeffStrong_2ltau_offZ_cut_0"] = 3.9; upperLimitsObserved["MeffStrong_2ltau_offZ_cut_150"] = 4.0; upperLimitsObserved["MeffStrong_2ltau_offZ_cut_300"] = 2.9; upperLimitsObserved["MeffStrong_2ltau_offZ_cut_500"] = 1.5; upperLimitsObserved["MeffStrong_3l_onZ_cut_0"] = 10.0; upperLimitsObserved["MeffStrong_3l_onZ_cut_150"] = 10.0; upperLimitsObserved["MeffStrong_3l_onZ_cut_300"] = 6.8; upperLimitsObserved["MeffStrong_3l_onZ_cut_500"] = 3.9; upperLimitsObserved["MeffStrong_2ltau_onZ_cut_0"] = 1.6; upperLimitsObserved["MeffStrong_2ltau_onZ_cut_150"] = 1.4; upperLimitsObserved["MeffStrong_2ltau_onZ_cut_300"] = 1.5; upperLimitsObserved["MeffStrong_2ltau_onZ_cut_500"] = 0.9; // Expected upper limits are also given but not used in this analysis map upperLimitsExpected; upperLimitsExpected["HTlep_3l_offZ_cut_0"] = 11.; upperLimitsExpected["HTlep_3l_offZ_cut_100"] = 8.5; upperLimitsExpected["HTlep_3l_offZ_cut_150"] = 4.6; upperLimitsExpected["HTlep_3l_offZ_cut_200"] = 3.6; upperLimitsExpected["HTlep_3l_offZ_cut_300"] = 1.9; upperLimitsExpected["HTlep_2ltau_offZ_cut_0"] = 23.; upperLimitsExpected["HTlep_2ltau_offZ_cut_100"] = 14.; upperLimitsExpected["HTlep_2ltau_offZ_cut_150"] = 6.4; upperLimitsExpected["HTlep_2ltau_offZ_cut_200"] = 3.6; upperLimitsExpected["HTlep_2ltau_offZ_cut_300"] = 1.5; upperLimitsExpected["HTlep_3l_onZ_cut_0"] = 33.; upperLimitsExpected["HTlep_3l_onZ_cut_100"] = 25.; upperLimitsExpected["HTlep_3l_onZ_cut_150"] = 12.; upperLimitsExpected["HTlep_3l_onZ_cut_200"] = 6.5; upperLimitsExpected["HTlep_3l_onZ_cut_300"] = 3.1; upperLimitsExpected["HTlep_2ltau_onZ_cut_0"] = 94.; upperLimitsExpected["HTlep_2ltau_onZ_cut_100"] = 61.; upperLimitsExpected["HTlep_2ltau_onZ_cut_150"] = 9.9; upperLimitsExpected["HTlep_2ltau_onZ_cut_200"] = 4.5; upperLimitsExpected["HTlep_2ltau_onZ_cut_300"] = 1.9; upperLimitsExpected["METStrong_3l_offZ_cut_0"] = 3.1; upperLimitsExpected["METStrong_3l_offZ_cut_50"] = 2.4; upperLimitsExpected["METStrong_3l_offZ_cut_75"] = 2.3; upperLimitsExpected["METStrong_2ltau_offZ_cut_0"] = 4.8; upperLimitsExpected["METStrong_2ltau_offZ_cut_50"] = 3.3; upperLimitsExpected["METStrong_2ltau_offZ_cut_75"] = 2.1; upperLimitsExpected["METStrong_3l_onZ_cut_20"] = 8.7; upperLimitsExpected["METStrong_3l_onZ_cut_50"] = 4.9; upperLimitsExpected["METStrong_3l_onZ_cut_75"] = 3.8; upperLimitsExpected["METStrong_2ltau_onZ_cut_20"] = 7.3; upperLimitsExpected["METStrong_2ltau_onZ_cut_50"] = 2.8; upperLimitsExpected["METStrong_2ltau_onZ_cut_75"] = 1.5; upperLimitsExpected["METWeak_3l_offZ_cut_0"] = 10.; upperLimitsExpected["METWeak_3l_offZ_cut_50"] = 4.7; upperLimitsExpected["METWeak_3l_offZ_cut_75"] = 3.0; upperLimitsExpected["METWeak_2ltau_offZ_cut_0"] = 21.; upperLimitsExpected["METWeak_2ltau_offZ_cut_50"] = 4.0; upperLimitsExpected["METWeak_2ltau_offZ_cut_75"] = 2.6; upperLimitsExpected["METWeak_3l_onZ_cut_20"] = 30.; upperLimitsExpected["METWeak_3l_onZ_cut_50"] = 10.; upperLimitsExpected["METWeak_3l_onZ_cut_75"] = 5.4; upperLimitsExpected["METWeak_2ltau_onZ_cut_20"] = 88.; upperLimitsExpected["METWeak_2ltau_onZ_cut_50"] = 5.5; upperLimitsExpected["METWeak_2ltau_onZ_cut_75"] = 2.2; upperLimitsExpected["Meff_3l_offZ_cut_0"] = 11.; upperLimitsExpected["Meff_3l_offZ_cut_150"] = 8.8; upperLimitsExpected["Meff_3l_offZ_cut_300"] = 3.7; upperLimitsExpected["Meff_3l_offZ_cut_500"] = 2.1; upperLimitsExpected["Meff_2ltau_offZ_cut_0"] = 23.; upperLimitsExpected["Meff_2ltau_offZ_cut_150"] = 13.; upperLimitsExpected["Meff_2ltau_offZ_cut_300"] = 4.9; upperLimitsExpected["Meff_2ltau_offZ_cut_500"] = 2.4; upperLimitsExpected["Meff_3l_onZ_cut_0"] = 33.; upperLimitsExpected["Meff_3l_onZ_cut_150"] = 25.; upperLimitsExpected["Meff_3l_onZ_cut_300"] = 9.; upperLimitsExpected["Meff_3l_onZ_cut_500"] = 3.9; upperLimitsExpected["Meff_2ltau_onZ_cut_0"] = 94.; upperLimitsExpected["Meff_2ltau_onZ_cut_150"] = 35.; upperLimitsExpected["Meff_2ltau_onZ_cut_300"] = 6.8; upperLimitsExpected["Meff_2ltau_onZ_cut_500"] = 2.5; upperLimitsExpected["MeffStrong_3l_offZ_cut_0"] = 3.9; upperLimitsExpected["MeffStrong_3l_offZ_cut_150"] = 3.9; upperLimitsExpected["MeffStrong_3l_offZ_cut_300"] = 3.0; upperLimitsExpected["MeffStrong_3l_offZ_cut_500"] = 2.0; upperLimitsExpected["MeffStrong_2ltau_offZ_cut_0"] = 3.8; upperLimitsExpected["MeffStrong_2ltau_offZ_cut_150"] = 3.9; upperLimitsExpected["MeffStrong_2ltau_offZ_cut_300"] = 3.1; upperLimitsExpected["MeffStrong_2ltau_offZ_cut_500"] = 1.6; upperLimitsExpected["MeffStrong_3l_onZ_cut_0"] = 6.9; upperLimitsExpected["MeffStrong_3l_onZ_cut_150"] = 7.1; upperLimitsExpected["MeffStrong_3l_onZ_cut_300"] = 4.9; upperLimitsExpected["MeffStrong_3l_onZ_cut_500"] = 3.0; upperLimitsExpected["MeffStrong_2ltau_onZ_cut_0"] = 2.4; upperLimitsExpected["MeffStrong_2ltau_onZ_cut_150"] = 2.5; upperLimitsExpected["MeffStrong_2ltau_onZ_cut_300"] = 2.0; upperLimitsExpected["MeffStrong_2ltau_onZ_cut_500"] = 1.1; if (observed) return upperLimitsObserved[signal_region]; else return upperLimitsExpected[signal_region]; } /// Function checking if there is an OSSF lepton pair or a combination of 3 leptons with an invariant mass close to the Z mass /// @todo Should the reference Z mass be 91.2? int isonZ (const Particles& particles) { int onZ = 0; double best_mass_2 = 999.; double best_mass_3 = 999.; // Loop over all 2 particle combinations to find invariant mass of OSSF pair closest to Z mass for ( const Particle& p1 : particles ) { for ( const Particle& p2 : particles ) { double mass_difference_2_old = fabs(91.0 - best_mass_2); double mass_difference_2_new = fabs(91.0 - (p1.momentum() + p2.momentum()).mass()/GeV); // If particle combination is OSSF pair calculate mass difference to Z mass if ( (p1.pid()*p2.pid() == -121 || p1.pid()*p2.pid() == -169) ) { // Get invariant mass closest to Z mass if (mass_difference_2_new < mass_difference_2_old) best_mass_2 = (p1.momentum() + p2.momentum()).mass()/GeV; // In case there is an OSSF pair take also 3rd lepton into account (e.g. from FSR and photon to electron conversion) for ( const Particle & p3 : particles ) { double mass_difference_3_old = fabs(91.0 - best_mass_3); double mass_difference_3_new = fabs(91.0 - (p1.momentum() + p2.momentum() + p3.momentum()).mass()/GeV); if (mass_difference_3_new < mass_difference_3_old) best_mass_3 = (p1.momentum() + p2.momentum() + p3.momentum()).mass()/GeV; } } } } // Pick the minimum invariant mass of the best OSSF pair combination and the best 3 lepton combination // If this mass is in a 20 GeV window around the Z mass, the event is classified as onZ double best_mass = min(best_mass_2, best_mass_3); if (fabs(91.0 - best_mass) < 20) onZ = 1; return onZ; } //@} private: /// Histograms //@{ Histo1DPtr _h_HTlep_all, _h_HTjets_all, _h_MET_all, _h_Meff_all; Histo1DPtr _h_pt_1_3l, _h_pt_2_3l, _h_pt_3_3l, _h_pt_1_2ltau, _h_pt_2_2ltau, _h_pt_3_2ltau; Histo1DPtr _h_e_n, _h_mu_n, _h_tau_n; Histo1DPtr _h_excluded; //@} /// Fiducial efficiencies to model the effects of the ATLAS detector bool _use_fiducial_lepton_efficiency; /// List of signal regions and event counts per signal region vector _signal_regions; - // *** LEIF *** i think this was intended map _eventCountsPerSR; }; DECLARE_RIVET_PLUGIN(ATLAS_2012_I1204447); } diff --git a/analyses/pluginATLAS/ATLAS_2013_I1219109.cc b/analyses/pluginATLAS/ATLAS_2013_I1219109.cc --- a/analyses/pluginATLAS/ATLAS_2013_I1219109.cc +++ b/analyses/pluginATLAS/ATLAS_2013_I1219109.cc @@ -1,147 +1,146 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/WFinder.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/HeavyHadrons.hh" namespace Rivet { /// @brief ATLAS W+b measurement class ATLAS_2013_I1219109: public Analysis { public: ///@brief: Electroweak Wjj production at 8 TeV DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2013_I1219109); //@} void init() { // Get options from the new option system _mode = 0; if ( getOption("LMODE") == "EL" ) _mode = 1; if ( getOption("LMODE") == "MU" ) _mode = 2; const FinalState fs; Cut cuts = Cuts::abseta < 2.5 && Cuts::pT >= 25*GeV; // W finder for electrons and muons WFinder wf_mu(fs, cuts, PID::MUON, 0.0*GeV, DBL_MAX, 0.0, 0.1, WFinder::ChargedLeptons::PROMPT, WFinder::ClusterPhotons::NODECAY, WFinder::AddPhotons::NO, WFinder::MassWindow::MT); WFinder wf_el(fs, cuts, PID::ELECTRON, 0.0*GeV, DBL_MAX, 0.0, 0.1, WFinder::ChargedLeptons::PROMPT, WFinder::ClusterPhotons::NODECAY, WFinder::AddPhotons::NO, WFinder::MassWindow::MT); declare(wf_mu, "WFmu"); declare(wf_el, "WFel"); // jets VetoedFinalState jet_fs(fs); jet_fs.addVetoOnThisFinalState(wf_el); jet_fs.addVetoOnThisFinalState(wf_mu); FastJets fj(jet_fs, FastJets::ANTIKT, 0.4); fj.useInvisibles(); declare(fj, "Jets"); declare(HeavyHadrons(Cuts::abseta < 2.5 && Cuts::pT > 5*GeV), "BHadrons"); // book histograms - // *** LEIF *** where did the y02 histos go? book(_njet ,1, 1, 1); // dSigma / dNjet book(_jet1_bPt ,3, 1, 1); // dSigma / dBjetPt for Njet = 1 book(_jet2_bPt ,8, 1, 1); // dSigma / dBjetPt for Njet = 2 } void analyze(const Event& event) { // retrieve W boson candidate const WFinder& wf_mu = apply(event, "WFmu"); const WFinder& wf_el = apply(event, "WFel"); size_t nWmu = wf_mu.size(); size_t nWel = wf_el.size(); if (_mode == 0 && !((nWmu == 1 && !nWel) || (!nWmu && nWel == 1))) vetoEvent; // one W->munu OR W->elnu candidate, otherwise veto if (_mode == 1 && !(!nWmu && nWel == 1)) vetoEvent; // one W->elnu candidate, otherwise veto if (_mode == 2 && !(nWmu == 1 && !nWel)) vetoEvent; // one W->munu candidate, otherwise veto if ( (nWmu? wf_mu : wf_el).bosons().size() != 1 ) vetoEvent; // only one W boson candidate if ( !((nWmu? wf_mu : wf_el).mT() > 60.0*GeV) ) vetoEvent; //const Particle& Wboson = wf.boson(); // retrieve constituent neutrino const Particle& neutrino = (nWmu? wf_mu : wf_el).constituentNeutrino(); if( !(neutrino.pT() > 25*GeV) ) vetoEvent; // retrieve constituent lepton const Particle& lepton = (nWmu? wf_mu : wf_el).constituentLepton(); // count good jets, check if good jet contains B hadron const Particles& bHadrons = apply(event, "BHadrons").bHadrons(); const Jets& jets = apply(event, "Jets").jetsByPt(25*GeV); int goodjets = 0, bjets = 0; double bPt = 0.; for(const Jet& j : jets) { if( (j.abseta() < 2.1) && (deltaR(lepton, j) > 0.5) ) { // this jet passes the selection! ++goodjets; // j.bTagged() uses ghost association which is // more elegant, but not what has been used in // this analysis originally, will match B had- // rons in eta-phi space instead for(const Particle& b : bHadrons) { if( deltaR(j, b) < 0.3 ) { // jet matched to B hadron! if(!bPt) bPt = j.pT() * GeV; // leading b-jet pT ++bjets; // count number of b-jets break; } } } } if( goodjets > 2 ) vetoEvent; // at most two jets if( !bjets ) vetoEvent; // at least one of them b-tagged double njets = double(goodjets); double ncomb = 3.0; _njet->fill(njets); _njet->fill(ncomb); if( goodjets == 1) _jet1_bPt->fill(bPt); else if(goodjets == 2) _jet2_bPt->fill(bPt); } void finalize() { const double sf = _mode? 1.0 : 0.5; const double xs_pb = sf * crossSection() / picobarn / sumOfWeights(); const double xs_fb = sf * crossSection() / femtobarn / sumOfWeights(); scale(_njet, xs_pb); scale(_jet1_bPt, xs_fb); scale(_jet2_bPt, xs_fb); } protected: size_t _mode; private: Histo1DPtr _njet; Histo1DPtr _jet1_bPt; Histo1DPtr _jet2_bPt; //bool _isMuon; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2013_I1219109); } diff --git a/analyses/pluginMC/MC_XS.cc b/analyses/pluginMC/MC_XS.cc --- a/analyses/pluginMC/MC_XS.cc +++ b/analyses/pluginMC/MC_XS.cc @@ -1,91 +1,89 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #ifndef ENABLE_HEPMC_3 #include "HepMC/HepMCDefs.h" #endif namespace Rivet { /// @brief Analysis for the generated cross section class MC_XS : public Analysis { public: /// @name Constructors etc. //@{ /// Constructor MC_XS() : Analysis("MC_XS") { } //@} public: /// @name Analysis methods //@{ /// Book histograms and initialise projections before the run void init() { /// @todo Convert to Scatter1D or Counter book(_h_XS, "XS"); book(_h_N, "N", 1, 0.0, 1.0); book(_h_pmXS, "pmXS", 2, -1.0, 1.0); book(_h_pmN, "pmN", 2, -1.0, 1.0); _mc_xs = _mc_error = 0.; } - /// Perform the per-event analysis void analyze(const Event& event) { - // *** LEIF *** This doesn't really make sense any more. _h_N->fill(0.5); _h_pmXS->fill(0.5); _h_pmN ->fill(0.5); #if defined ENABLE_HEPMC_3 //@todo HepMC3::GenCrossSection methods aren't const accessible :( RivetHepMC::GenCrossSection gcs = *(event.genEvent()->cross_section()); _mc_xs = gcs.xsec(); _mc_error = gcs.xsec_err(); #elif defined HEPMC_HAS_CROSS_SECTION _mc_xs = event.genEvent()->cross_section()->cross_section(); _mc_error = event.genEvent()->cross_section()->cross_section_error(); #endif // VERSION_CODE >= 3000000 } /// Normalise histograms etc., after the run void finalize() { scale(_h_pmXS, crossSection()/sumOfWeights()); #ifndef HEPMC_HAS_CROSS_SECTION _mc_xs = crossSection(); _mc_error = 0.0; #endif _h_XS->addPoint(0, _mc_xs, 0.5, _mc_error); } //@} private: /// @name Histograms //@{ Scatter2DPtr _h_XS; Histo1DPtr _h_N; Histo1DPtr _h_pmXS; Histo1DPtr _h_pmN; double _mc_xs, _mc_error; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(MC_XS); } diff --git a/analyses/pluginMC/MC_XS.info b/analyses/pluginMC/MC_XS.info --- a/analyses/pluginMC/MC_XS.info +++ b/analyses/pluginMC/MC_XS.info @@ -1,12 +1,12 @@ Name: MC_XS Summary: MC analysis for process total cross section -Status: VALIDATED +Status: UNVALIDATED Authors: - Marek Schoenherr RunInfo: Suitable for any process. NumEvents: 1000 PtCuts: [0] Description: Analysis for bookkeeping of the total cross section, number of generated events and the ratio of events with positive and negative weights. NeedsCrossSection: yes diff --git a/include/Rivet/Analysis.hh b/include/Rivet/Analysis.hh --- a/include/Rivet/Analysis.hh +++ b/include/Rivet/Analysis.hh @@ -1,1424 +1,1440 @@ // -*- C++ -*- #ifndef RIVET_Analysis_HH #define RIVET_Analysis_HH #include "Rivet/Config/RivetCommon.hh" #include "Rivet/AnalysisInfo.hh" #include "Rivet/Event.hh" #include "Rivet/Projection.hh" #include "Rivet/ProjectionApplier.hh" #include "Rivet/ProjectionHandler.hh" #include "Rivet/AnalysisLoader.hh" #include "Rivet/Tools/Cuts.hh" #include "Rivet/Tools/Logging.hh" #include "Rivet/Tools/ParticleUtils.hh" #include "Rivet/Tools/BinnedHistogram.hh" #include "Rivet/Tools/RivetMT2.hh" #include "Rivet/Tools/RivetYODA.hh" #include "Rivet/Tools/Percentile.hh" #include "Rivet/Projections/CentralityProjection.hh" #include /// @def vetoEvent /// Preprocessor define for vetoing events, including the log message and return. #define vetoEvent \ do { MSG_DEBUG("Vetoing event on line " << __LINE__ << " of " << __FILE__); return; } while(0) namespace Rivet { // Convenience for analysis writers using std::cout; using std::cerr; using std::endl; using std::tuple; using std::stringstream; using std::swap; using std::numeric_limits; // Forward declaration class AnalysisHandler; /// @brief This is the base class of all analysis classes in Rivet. /// /// There are /// three virtual functions which should be implemented in base classes: /// /// void init() is called by Rivet before a run is started. Here the /// analysis class should book necessary histograms. The needed /// projections should probably rather be constructed in the /// constructor. /// /// void analyze(const Event&) is called once for each event. Here the /// analysis class should apply the necessary Projections and fill the /// histograms. /// /// void finalize() is called after a run is finished. Here the analysis /// class should do whatever manipulations are necessary on the /// histograms. Writing the histograms to a file is, however, done by /// the Rivet class. class Analysis : public ProjectionApplier { /// The AnalysisHandler is a friend. friend class AnalysisHandler; public: /// @name Standard constructors and destructors. //@{ // /// The default constructor. // Analysis(); /// Constructor Analysis(const std::string& name); /// The destructor. virtual ~Analysis() {} //@} public: /// @name Main analysis methods //@{ /// Initialize this analysis object. A concrete class should here /// book all necessary histograms. An overridden function must make /// sure it first calls the base class function. virtual void init() { } /// Analyze one event. A concrete class should here apply the /// necessary projections on the \a event and fill the relevant /// histograms. An overridden function must make sure it first calls /// the base class function. virtual void analyze(const Event& event) = 0; /// Finalize this analysis object. A concrete class should here make /// all necessary operations on the histograms. Writing the /// histograms to a file is, however, done by the Rivet class. An /// overridden function must make sure it first calls the base class /// function. virtual void finalize() { } //@} public: /// @name Metadata /// Metadata is used for querying from the command line and also for /// building web pages and the analysis pages in the Rivet manual. //@{ /// Get the actual AnalysisInfo object in which all this metadata is stored. const AnalysisInfo& info() const { assert(_info && "No AnalysisInfo object :O"); return *_info; } /// @brief Get the name of the analysis. /// /// By default this is computed by combining the results of the /// experiment, year and Spires ID metadata methods and you should /// only override it if there's a good reason why those won't /// work. If options has been set for this instance, a /// corresponding string is appended at the end. virtual std::string name() const { return ( (info().name().empty()) ? _defaultname : info().name() ) + _optstring; } /// Get name of reference data file, which could be different from plugin name virtual std::string getRefDataName() const { return (info().getRefDataName().empty()) ? _defaultname : info().getRefDataName(); } /// Set name of reference data file, which could be different from plugin name virtual void setRefDataName(const std::string& ref_data="") { info().setRefDataName(!ref_data.empty() ? ref_data : name()); } /// Get the Inspire ID code for this analysis. virtual std::string inspireId() const { return info().inspireId(); } /// Get the SPIRES ID code for this analysis (~deprecated). virtual std::string spiresId() const { return info().spiresId(); } /// @brief Names & emails of paper/analysis authors. /// /// Names and email of authors in 'NAME \' format. The first /// name in the list should be the primary contact person. virtual std::vector authors() const { return info().authors(); } /// @brief Get a short description of the analysis. /// /// Short (one sentence) description used as an index entry. /// Use @a description() to provide full descriptive paragraphs /// of analysis details. virtual std::string summary() const { return info().summary(); } /// @brief Get a full description of the analysis. /// /// Full textual description of this analysis, what it is useful for, /// what experimental techniques are applied, etc. Should be treated /// as a chunk of restructuredText (http://docutils.sourceforge.net/rst.html), /// with equations to be rendered as LaTeX with amsmath operators. virtual std::string description() const { return info().description(); } /// @brief Information about the events needed as input for this analysis. /// /// Event types, energies, kinematic cuts, particles to be considered /// stable, etc. etc. Should be treated as a restructuredText bullet list /// (http://docutils.sourceforge.net/rst.html) virtual std::string runInfo() const { return info().runInfo(); } /// Experiment which performed and published this analysis. virtual std::string experiment() const { return info().experiment(); } /// Collider on which the experiment ran. virtual std::string collider() const { return info().collider(); } /// When the original experimental analysis was published. virtual std::string year() const { return info().year(); } /// The luminosity in inverse femtobarn virtual std::string luminosityfb() const { return info().luminosityfb(); } /// Journal, and preprint references. virtual std::vector references() const { return info().references(); } /// BibTeX citation key for this article. virtual std::string bibKey() const { return info().bibKey(); } /// BibTeX citation entry for this article. virtual std::string bibTeX() const { return info().bibTeX(); } /// Whether this analysis is trusted (in any way!) virtual std::string status() const { return (info().status().empty()) ? "UNVALIDATED" : info().status(); } /// Any work to be done on this analysis. virtual std::vector todos() const { return info().todos(); } /// make-style commands for validating this analysis. virtual std::vector validation() const { return info().validation(); } /// Return the allowed pairs of incoming beams required by this analysis. virtual const std::vector& requiredBeams() const { return info().beams(); } /// Declare the allowed pairs of incoming beams required by this analysis. virtual Analysis& setRequiredBeams(const std::vector& requiredBeams) { info().setBeams(requiredBeams); return *this; } /// Sets of valid beam energy pairs, in GeV virtual const std::vector >& requiredEnergies() const { return info().energies(); } /// Get vector of analysis keywords virtual const std::vector & keywords() const { return info().keywords(); } /// Declare the list of valid beam energy pairs, in GeV virtual Analysis& setRequiredEnergies(const std::vector >& requiredEnergies) { info().setEnergies(requiredEnergies); return *this; } //@} /// @name Internal metadata modifying methods //@{ /// Get the actual AnalysisInfo object in which all this metadata is stored (non-const). AnalysisInfo& info() { assert(_info && "No AnalysisInfo object :O"); return *_info; } //@} /// @name Run conditions //@{ /// Incoming beams for this run const ParticlePair& beams() const; /// Incoming beam IDs for this run const PdgIdPair beamIds() const; /// Centre of mass energy for this run double sqrtS() const; /// Check if we are running rivet-merge. bool merging() const { return sqrtS() <= 0.0; } //@} /// @name Analysis / beam compatibility testing /// @todo Replace with beamsCompatible() with no args (calling beams() function internally) /// @todo Add beamsMatch() methods with same (shared-code?) tolerance as in beamsCompatible() //@{ /// Check if analysis is compatible with the provided beam particle IDs and energies bool isCompatible(const ParticlePair& beams) const; /// Check if analysis is compatible with the provided beam particle IDs and energies bool isCompatible(PdgId beam1, PdgId beam2, double e1, double e2) const; /// Check if analysis is compatible with the provided beam particle IDs and energies bool isCompatible(const PdgIdPair& beams, const std::pair& energies) const; //@} /// Access the controlling AnalysisHandler object. AnalysisHandler& handler() const { return *_analysishandler; } protected: /// Get a Log object based on the name() property of the calling analysis object. Log& getLog() const; /// Get the process cross-section in pb. Throws if this hasn't been set. double crossSection() const; /// Get the process cross-section per generated event in pb. Throws if this /// hasn't been set. double crossSectionPerEvent() const; /// @brief Get the number of events seen (via the analysis handler). /// /// @note Use in the finalize phase only. size_t numEvents() const; /// @brief Get the sum of event weights seen (via the analysis handler). /// /// @note Use in the finalize phase only. double sumW() const; /// Alias double sumOfWeights() const { return sumW(); } /// @brief Get the sum of squared event weights seen (via the analysis handler). /// /// @note Use in the finalize phase only. double sumW2() const; protected: /// @name Histogram paths //@{ /// Get the canonical histogram "directory" path for this analysis. const std::string histoDir() const; /// Get the canonical histogram path for the named histogram in this analysis. const std::string histoPath(const std::string& hname) const; /// Get the canonical histogram path for the numbered histogram in this analysis. const std::string histoPath(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const; /// Get the internal histogram name for given d, x and y (cf. HepData) const std::string mkAxisCode(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const; //@} /// @name Histogram reference data //@{ /// Get reference data for a named histo /// @todo SFINAE to ensure that the type inherits from YODA::AnalysisObject? template const T& refData(const string& hname) const { _cacheRefData(); MSG_TRACE("Using histo bin edges for " << name() << ":" << hname); if (!_refdata[hname]) { MSG_ERROR("Can't find reference histogram " << hname); throw Exception("Reference data " + hname + " not found."); } return dynamic_cast(*_refdata[hname]); } /// Get reference data for a numbered histo /// @todo SFINAE to ensure that the type inherits from YODA::AnalysisObject? template const T& refData(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const { const string hname = mkAxisCode(datasetId, xAxisId, yAxisId); return refData(hname); } //@} /// @name Counter booking //@{ /// Book a counter. CounterPtr & book(CounterPtr &, const std::string& name, const std::string& title=""); // const std::string& valtitle="" /// Book a counter, using a path generated from the dataset and axis ID codes /// /// The paper, dataset and x/y-axis IDs will be used to build the histo name in the HepData standard way. CounterPtr & book(CounterPtr &, unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId, const std::string& title=""); // const std::string& valtitle="" //@} /// @name 1D histogram booking //@{ /// Book a 1D histogram with @a nbins uniformly distributed across the range @a lower - @a upper . Histo1DPtr & book(Histo1DPtr &,const std::string& name, size_t nbins, double lower, double upper, const std::string& title="", const std::string& xtitle="", const std::string& ytitle=""); /// Book a 1D histogram with non-uniform bins defined by the vector of bin edges @a binedges . Histo1DPtr & book(Histo1DPtr &,const std::string& name, const std::vector& binedges, const std::string& title="", const std::string& xtitle="", const std::string& ytitle=""); /// Book a 1D histogram with non-uniform bins defined by the vector of bin edges @a binedges . Histo1DPtr & book(Histo1DPtr &,const std::string& name, const std::initializer_list& binedges, const std::string& title="", const std::string& xtitle="", const std::string& ytitle=""); /// Book a 1D histogram with binning from a reference scatter. Histo1DPtr & book(Histo1DPtr &,const std::string& name, const Scatter2D& refscatter, const std::string& title="", const std::string& xtitle="", const std::string& ytitle=""); /// Book a 1D histogram, using the binnings in the reference data histogram. Histo1DPtr & book(Histo1DPtr &,const std::string& name, const std::string& title="", const std::string& xtitle="", const std::string& ytitle=""); /// Book a 1D histogram, using the binnings in the reference data histogram. /// /// The paper, dataset and x/y-axis IDs will be used to build the histo name in the HepData standard way. Histo1DPtr & book(Histo1DPtr &,unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId, const std::string& title="", const std::string& xtitle="", const std::string& ytitle=""); //@} /// @name 2D histogram booking //@{ /// Book a 2D histogram with @a nxbins and @a nybins uniformly /// distributed across the ranges @a xlower - @a xupper and @a /// ylower - @a yupper respectively along the x- and y-axis. Histo2DPtr & book(Histo2DPtr &,const std::string& name, size_t nxbins, double xlower, double xupper, size_t nybins, double ylower, double yupper, const std::string& title="", const std::string& xtitle="", const std::string& ytitle="", const std::string& ztitle=""); /// Book a 2D histogram with non-uniform bins defined by the /// vectors of bin edges @a xbinedges and @a ybinedges. Histo2DPtr & book(Histo2DPtr &,const std::string& name, const std::vector& xbinedges, const std::vector& ybinedges, const std::string& title="", const std::string& xtitle="", const std::string& ytitle="", const std::string& ztitle=""); /// Book a 2D histogram with non-uniform bins defined by the /// vectors of bin edges @a xbinedges and @a ybinedges. Histo2DPtr & book(Histo2DPtr &,const std::string& name, const std::initializer_list& xbinedges, const std::initializer_list& ybinedges, const std::string& title="", const std::string& xtitle="", const std::string& ytitle="", const std::string& ztitle=""); /// Book a 2D histogram with binning from a reference scatter. Histo2DPtr & book(Histo2DPtr &,const std::string& name, const Scatter3D& refscatter, const std::string& title="", const std::string& xtitle="", const std::string& ytitle="", const std::string& ztitle=""); /// Book a 2D histogram, using the binnings in the reference data histogram. Histo2DPtr & book(Histo2DPtr &,const std::string& name, const std::string& title="", const std::string& xtitle="", const std::string& ytitle="", const std::string& ztitle=""); /// Book a 2D histogram, using the binnings in the reference data histogram. /// /// The paper, dataset and x/y-axis IDs will be used to build the histo name in the HepData standard way. Histo2DPtr & book(Histo2DPtr &,unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId, const std::string& title="", const std::string& xtitle="", const std::string& ytitle="", const std::string& ztitle=""); //@} /// @name 1D profile histogram booking //@{ /// Book a 1D profile histogram with @a nbins uniformly distributed across the range @a lower - @a upper . Profile1DPtr & book(Profile1DPtr &, const std::string& name, size_t nbins, double lower, double upper, const std::string& title="", const std::string& xtitle="", const std::string& ytitle=""); /// Book a 1D profile histogram with non-uniform bins defined by the vector of bin edges @a binedges . Profile1DPtr & book(Profile1DPtr &, const std::string& name, const std::vector& binedges, const std::string& title="", const std::string& xtitle="", const std::string& ytitle=""); /// Book a 1D profile histogram with non-uniform bins defined by the vector of bin edges @a binedges . Profile1DPtr & book(Profile1DPtr &, const std::string& name, const std::initializer_list& binedges, const std::string& title="", const std::string& xtitle="", const std::string& ytitle=""); /// Book a 1D profile histogram with binning from a reference scatter. Profile1DPtr & book(Profile1DPtr &, const std::string& name, const Scatter2D& refscatter, const std::string& title="", const std::string& xtitle="", const std::string& ytitle=""); /// Book a 1D profile histogram, using the binnings in the reference data histogram. Profile1DPtr & book(Profile1DPtr &, const std::string& name, const std::string& title="", const std::string& xtitle="", const std::string& ytitle=""); /// Book a 1D profile histogram, using the binnings in the reference data histogram. /// /// The paper, dataset and x/y-axis IDs will be used to build the histo name in the HepData standard way. Profile1DPtr & book(Profile1DPtr &, unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId, const std::string& title="", const std::string& xtitle="", const std::string& ytitle=""); //@} /// @name 2D profile histogram booking //@{ /// Book a 2D profile histogram with @a nxbins and @a nybins uniformly /// distributed across the ranges @a xlower - @a xupper and @a ylower - @a /// yupper respectively along the x- and y-axis. Profile2DPtr & book(Profile2DPtr &, const std::string& name, size_t nxbins, double xlower, double xupper, size_t nybins, double ylower, double yupper, const std::string& title="", const std::string& xtitle="", const std::string& ytitle="", const std::string& ztitle=""); /// Book a 2D profile histogram with non-uniform bins defined by the vectorx /// of bin edges @a xbinedges and @a ybinedges. Profile2DPtr & book(Profile2DPtr &, const std::string& name, const std::vector& xbinedges, const std::vector& ybinedges, const std::string& title="", const std::string& xtitle="", const std::string& ytitle="", const std::string& ztitle=""); /// Book a 2D profile histogram with non-uniform bins defined by the vectorx /// of bin edges @a xbinedges and @a ybinedges. Profile2DPtr & book(Profile2DPtr &, const std::string& name, const std::initializer_list& xbinedges, const std::initializer_list& ybinedges, const std::string& title="", const std::string& xtitle="", const std::string& ytitle="", const std::string& ztitle=""); /// @todo REINSTATE // /// Book a 2D profile histogram with binning from a reference scatter. // Profile2DPtr & book(const Profile2DPtr &, const std::string& name, // const Scatter3D& refscatter, // const std::string& title="", // const std::string& xtitle="", // const std::string& ytitle="", // const std::string& ztitle=""); // /// Book a 2D profile histogram, using the binnings in the reference data histogram. // Profile2DPtr & book(const Profile2DPtr &, const std::string& name, // const std::string& title="", // const std::string& xtitle="", // const std::string& ytitle="", // const std::string& ztitle=""); // /// Book a 2D profile histogram, using the binnings in the reference data histogram. // /// // /// The paper, dataset and x/y-axis IDs will be used to build the histo name in the HepData standard way. // Profile2DPtr & book(const Profile2DPtr &, unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId, // const std::string& title="", // const std::string& xtitle="", // const std::string& ytitle="", // const std::string& ztitle=""); //@} /// @name 2D scatter booking //@{ /// @brief Book a 2-dimensional data point set with the given name. /// /// @note Unlike histogram booking, scatter booking by default makes no /// attempt to use reference data to pre-fill the data object. If you want /// this, which is sometimes useful e.g. when the x-position is not really /// meaningful and can't be extracted from the data, then set the @a /// copy_pts parameter to true. This creates points to match the reference /// data's x values and errors, but with the y values and errors zeroed... /// assuming that there is a reference histo with the same name: if there /// isn't, an exception will be thrown. Scatter2DPtr & book(Scatter2DPtr & s2d, const string& hname, bool copy_pts=false, const std::string& title="", const std::string& xtitle="", const std::string& ytitle=""); /// @brief Book a 2-dimensional data point set, using the binnings in the reference data histogram. /// /// The paper, dataset and x/y-axis IDs will be used to build the histo name in the HepData standard way. /// /// @note Unlike histogram booking, scatter booking by default makes no /// attempt to use reference data to pre-fill the data object. If you want /// this, which is sometimes useful e.g. when the x-position is not really /// meaningful and can't be extracted from the data, then set the @a /// copy_pts parameter to true. This creates points to match the reference /// data's x values and errors, but with the y values and errors zeroed. Scatter2DPtr & book(Scatter2DPtr & s2d, unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId, bool copy_pts=false, const std::string& title="", const std::string& xtitle="", const std::string& ytitle=""); /// @brief Book a 2-dimensional data point set with equally spaced x-points in a range. /// /// The y values and errors will be set to 0. Scatter2DPtr & book(Scatter2DPtr & s2d, const string& hname, size_t npts, double lower, double upper, const std::string& title="", const std::string& xtitle="", const std::string& ytitle=""); /// @brief Book a 2-dimensional data point set based on provided contiguous "bin edges". /// /// The y values and errors will be set to 0. Scatter2DPtr & book(Scatter2DPtr & s2d, const string& hname, const std::vector& binedges, const std::string& title, const std::string& xtitle, const std::string& ytitle); /// Book a 2-dimensional data point set with x-points from an existing scatter and a new path. Scatter2DPtr book(Scatter2DPtr & s2d, const Scatter2DPtr & scPtr, const std::string& path, const std::string& title="", const std::string& xtitle="", const std::string& ytitle=""); //@} public: /// @name Accessing options for this Analysis instance. //@{ /// Return the map of all options given to this analysis. const std::map & options() { return _options; } /// Get an option for this analysis instance as a string. std::string getOption(std::string optname) { if ( _options.find(optname) != _options.end() ) return _options.find(optname)->second; return ""; } /// Get an option for this analysis instance converted to a /// specific type (given by the specified @a def value). template T getOption(std::string optname, T def) { if (_options.find(optname) == _options.end()) return def; std::stringstream ss; ss << _options.find(optname)->second; T ret; ss >> ret; return ret; } //@} /// @brief Book a CentralityProjection /// /// Using a SingleValueProjection, @a proj, giving the value of an /// experimental observable to be used as a centrality estimator, /// book a CentralityProjection based on the experimentally /// measured pecentiles of this observable (as given by the /// reference data for the @a calHistName histogram in the @a /// calAnaName analysis. If a preloaded file with the output of a /// run using the @a calAnaName analysis contains a valid /// generated @a calHistName histogram, it will be used as an /// optional percentile binning. Also if this preloaded file /// contains a histogram with the name @a calHistName with an /// appended "_IMP" This histogram will be used to add an optional /// centrality percentile based on the generated impact /// parameter. If @increasing is true, a low (high) value of @proj /// is assumed to correspond to a more peripheral (central) event. const CentralityProjection& declareCentrality(const SingleValueProjection &proj, string calAnaName, string calHistName, const string projName, bool increasing = false); /// @brief Book a Percentile wrapper around AnalysisObjects. /// /// Based on a previously registered CentralityProjection named @a /// projName book one AnalysisObject for each @a centralityBin and /// name them according to the corresponding code in the @a ref /// vector. /// /// @todo Convert to just be called book() cf. others template Percentile bookPercentile(string projName, vector > centralityBins, vector > ref) { typedef typename ReferenceTraits::RefT RefT; typedef rivet_shared_ptr> WrapT; Percentile pctl(this, projName); const int nCent = centralityBins.size(); for (int iCent = 0; iCent < nCent; ++iCent) { const string axisCode = mkAxisCode(std::get<0>(ref[iCent]), std::get<1>(ref[iCent]), std::get<2>(ref[iCent])); const RefT & refscatter = refData(axisCode); WrapT wtf(_weightNames(), T(refscatter, histoPath(axisCode))); wtf = addAnalysisObject(wtf); CounterPtr cnt(_weightNames(), Counter(histoPath("TMP/COUNTER/" + axisCode))); cnt = addAnalysisObject(cnt); pctl.add(wtf, cnt, centralityBins[iCent]); } return pctl; } // /// @brief Book Percentile wrappers around AnalysisObjects. // /// // /// Based on a previously registered CentralityProjection named @a // /// projName book one (or several) AnalysisObject(s) named // /// according to @a ref where the x-axis will be filled according // /// to the percentile output(s) of the @projName. // /// // /// @todo Convert to just be called book() cf. others // template // PercentileXaxis bookPercentileXaxis(string projName, // tuple ref) { // typedef typename ReferenceTraits::RefT RefT; // typedef rivet_shared_ptr> WrapT; // PercentileXaxis pctl(this, projName); // const string axisCode = mkAxisCode(std::get<0>(ref), // std::get<1>(ref), // std::get<2>(ref)); // const RefT & refscatter = refData(axisCode); // WrapT wtf(_weightNames(), T(refscatter, histoPath(axisCode))); // wtf = addAnalysisObject(wtf); // CounterPtr cnt(_weightNames(), Counter()); // cnt = addAnalysisObject(cnt); // pctl.add(wtf, cnt); // return pctl; // } private: // Functions that have to be defined in the .cc file to avoid circular #includes /// Get the list of weight names from the handler vector _weightNames() const; /// Get the list of weight names from the handler YODA::AnalysisObjectPtr _getPreload(string name) const; /// Get the default/nominal weight index size_t _defaultWeightIndex() const; /// Get an AO from another analysis MultiweightAOPtr _getOtherAnalysisObject(const std::string & ananame, const std::string& name); /// Check that analysis objects aren't being booked/registered outside the init stage void _checkBookInit() const; + /// Check if we are in the init stage. + bool inInit() const; + /// Check if we are in the finalize stage. + bool inFinalize() const; + private: /// To be used in finalize context only: class CounterAdapter { public: CounterAdapter(double x) : x_(x ) {} CounterAdapter(const YODA::Counter & c) : x_(c.val() ) {} // CounterAdapter(CounterPtr cp) : x_(cp->val() ) {} CounterAdapter(const YODA::Scatter1D & s) : x_(s.points()[0].x()) { assert( s.numPoints() == 1 || "Can only scale by a single value."); } // CounterAdapter(Scatter1DPtr sp) : x_(sp->points()[0].x()) { // assert( sp->numPoints() == 1 || "Can only scale by a single value."); // } operator double() const { return x_; } private: double x_; }; public: double dbl(double x) { return x; } double dbl(const YODA::Counter & c) { return c.val(); } double dbl(const YODA::Scatter1D & s) { assert( s.numPoints() == 1 ); return s.points()[0].x(); } /// @name Analysis object manipulation /// @todo Should really be protected: only public to keep BinnedHistogram happy for now... //@{ /// Multiplicatively scale the given counter, @a cnt, by factor @s factor. void scale(CounterPtr cnt, CounterAdapter factor); /// Multiplicatively scale the given counters, @a cnts, by factor @s factor. /// @note Constness intentional, if weird, to allow passing rvalue refs of smart ptrs (argh) /// @todo Use SFINAE for a generic iterable of CounterPtrs void scale(const std::vector& cnts, CounterAdapter factor) { for (auto& c : cnts) scale(c, factor); } /// @todo YUCK! template void scale(const CounterPtr (&cnts)[array_size], CounterAdapter factor) { // for (size_t i = 0; i < std::extent::value; ++i) scale(cnts[i], factor); for (auto& c : cnts) scale(c, factor); } /// Normalize the given histogram, @a histo, to area = @a norm. void normalize(Histo1DPtr histo, CounterAdapter norm=1.0, bool includeoverflows=true); /// Normalize the given histograms, @a histos, to area = @a norm. /// @note Constness intentional, if weird, to allow passing rvalue refs of smart ptrs (argh) /// @todo Use SFINAE for a generic iterable of Histo1DPtrs void normalize(const std::vector& histos, CounterAdapter norm=1.0, bool includeoverflows=true) { for (auto& h : histos) normalize(h, norm, includeoverflows); } /// @todo YUCK! template void normalize(const Histo1DPtr (&histos)[array_size], CounterAdapter norm=1.0, bool includeoverflows=true) { for (auto& h : histos) normalize(h, norm, includeoverflows); } /// Multiplicatively scale the given histogram, @a histo, by factor @s factor. void scale(Histo1DPtr histo, CounterAdapter factor); /// Multiplicatively scale the given histograms, @a histos, by factor @s factor. /// @note Constness intentional, if weird, to allow passing rvalue refs of smart ptrs (argh) /// @todo Use SFINAE for a generic iterable of Histo1DPtrs void scale(const std::vector& histos, CounterAdapter factor) { for (auto& h : histos) scale(h, factor); } /// @todo YUCK! template void scale(const Histo1DPtr (&histos)[array_size], CounterAdapter factor) { for (auto& h : histos) scale(h, factor); } /// Normalize the given histogram, @a histo, to area = @a norm. void normalize(Histo2DPtr histo, CounterAdapter norm=1.0, bool includeoverflows=true); /// Normalize the given histograms, @a histos, to area = @a norm. /// @note Constness intentional, if weird, to allow passing rvalue refs of smart ptrs (argh) /// @todo Use SFINAE for a generic iterable of Histo2DPtrs void normalize(const std::vector& histos, CounterAdapter norm=1.0, bool includeoverflows=true) { for (auto& h : histos) normalize(h, norm, includeoverflows); } /// @todo YUCK! template void normalize(const Histo2DPtr (&histos)[array_size], CounterAdapter norm=1.0, bool includeoverflows=true) { for (auto& h : histos) normalize(h, norm, includeoverflows); } /// Multiplicatively scale the given histogram, @a histo, by factor @s factor. void scale(Histo2DPtr histo, CounterAdapter factor); /// Multiplicatively scale the given histograms, @a histos, by factor @s factor. /// @note Constness intentional, if weird, to allow passing rvalue refs of smart ptrs (argh) /// @todo Use SFINAE for a generic iterable of Histo2DPtrs void scale(const std::vector& histos, CounterAdapter factor) { for (auto& h : histos) scale(h, factor); } /// @todo YUCK! template void scale(const Histo2DPtr (&histos)[array_size], CounterAdapter factor) { for (auto& h : histos) scale(h, factor); } /// Helper for counter division. /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void divide(CounterPtr c1, CounterPtr c2, Scatter1DPtr s) const; /// Helper for histogram division with raw YODA objects. /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void divide(const YODA::Counter& c1, const YODA::Counter& c2, Scatter1DPtr s) const; /// Helper for histogram division. /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void divide(Histo1DPtr h1, Histo1DPtr h2, Scatter2DPtr s) const; /// Helper for histogram division with raw YODA objects. /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void divide(const YODA::Histo1D& h1, const YODA::Histo1D& h2, Scatter2DPtr s) const; /// Helper for profile histogram division. /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void divide(Profile1DPtr p1, Profile1DPtr p2, Scatter2DPtr s) const; /// Helper for profile histogram division with raw YODA objects. /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void divide(const YODA::Profile1D& p1, const YODA::Profile1D& p2, Scatter2DPtr s) const; /// Helper for 2D histogram division. /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void divide(Histo2DPtr h1, Histo2DPtr h2, Scatter3DPtr s) const; /// Helper for 2D histogram division with raw YODA objects. /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void divide(const YODA::Histo2D& h1, const YODA::Histo2D& h2, Scatter3DPtr s) const; /// Helper for 2D profile histogram division. /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void divide(Profile2DPtr p1, Profile2DPtr p2, Scatter3DPtr s) const; /// Helper for 2D profile histogram division with raw YODA objects /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void divide(const YODA::Profile2D& p1, const YODA::Profile2D& p2, Scatter3DPtr s) const; /// Helper for histogram efficiency calculation. /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void efficiency(Histo1DPtr h1, Histo1DPtr h2, Scatter2DPtr s) const; /// Helper for histogram efficiency calculation. /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void efficiency(const YODA::Histo1D& h1, const YODA::Histo1D& h2, Scatter2DPtr s) const; /// Helper for histogram asymmetry calculation. /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void asymm(Histo1DPtr h1, Histo1DPtr h2, Scatter2DPtr s) const; /// Helper for histogram asymmetry calculation. /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void asymm(const YODA::Histo1D& h1, const YODA::Histo1D& h2, Scatter2DPtr s) const; /// Helper for converting a differential histo to an integral one. /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void integrate(Histo1DPtr h, Scatter2DPtr s) const; /// Helper for converting a differential histo to an integral one. /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void integrate(const Histo1D& h, Scatter2DPtr s) const; //@} public: /// List of registered analysis data objects const vector& analysisObjects() const { return _analysisobjects; } protected: /// @name Data object registration, retrieval, and removal //@{ /// Get a preloaded YODA object. template shared_ptr getPreload(string path) const { return dynamic_pointer_cast(_getPreload(path)); } /// Register a new data object, optionally read in preloaded data. template rivet_shared_ptr< Wrapper > registerAO(const YODAT & yao) { typedef Wrapper WrapperT; typedef shared_ptr YODAPtrT; typedef rivet_shared_ptr RAOT; + if ( !inInit() && !inFinalize() ) { + MSG_ERROR("Can't book objects outside of init()"); + throw UserError(name() + ": Can't book objects outside of init() or finalize()."); + } + + // First check that we haven't booked this before. This is + // allowed when booking in finalze. + for (auto & waold : analysisObjects()) { + if ( yao.path() == waold.get()->basePath() ) { + if ( !inInit() ) + MSG_WARNING("Found double-booking of " << yao.path() << " in " + << name() << ". Keeping previous booking"); + return RAOT(dynamic_pointer_cast(waold.get())); + } + } + + shared_ptr wao = make_shared(); YODAPtrT yaop = make_shared(yao); for (const string& weightname : _weightNames()) { // Create two YODA objects for each weight. Copy from // preloaded YODAs if present. First the finalized yoda: string finalpath = yao.path(); if ( weightname != "" ) finalpath += "[" + weightname + "]"; YODAPtrT preload = getPreload(finalpath); if ( preload ) { if ( !bookingCompatible(preload, yaop) ) { MSG_WARNING("Found incompatible pre-existing data object with same base path " << finalpath << " for " << name()); preload = nullptr; } else { MSG_TRACE("Using preloaded " << finalpath << " in " <_final.push_back(make_shared(*preload)); } } if ( !preload ) { wao->_final.push_back(make_shared(yao)); wao->_final.back()->setPath(finalpath); } // Then the raw filling yodas. string rawpath = "/RAW" + finalpath; preload = getPreload(rawpath); if ( preload ) { if ( !bookingCompatible(preload, yaop) ) { MSG_WARNING("Found incompatible pre-existing data object with same base path " << rawpath << " for " << name()); preload = nullptr; } else { MSG_TRACE("Using preloaded " << rawpath << " in " <_persistent.push_back(make_shared(*preload)); } } if ( !preload ) { wao->_persistent.push_back(make_shared(yao)); wao->_persistent.back()->setPath(rawpath); } } rivet_shared_ptr ret(wao); - // Now check that we haven't booked this before. - ret.get()->setActiveFinalWeightIdx(_defaultWeightIndex()); - for (auto & waold : analysisObjects()) { - waold.get()->setActiveFinalWeightIdx(_defaultWeightIndex()); - if ( ret->path() == waold->path() ) { - MSG_WARNING("Found double-booking of " << ret->path() << " in " - << name() << ". Keeping previous booking"); - waold.get()->unsetActiveWeight(); - return RAOT(dynamic_pointer_cast(waold.get())); - } - waold.get()->unsetActiveWeight(); + ret.get()->unsetActiveWeight(); + if ( inFinalize() ) { + // If booked in finalize() we assume it is the first time + // finalize is run. + ret.get()->pushToFinal(); + ret.get()->setActiveFinalWeightIdx(0); } - ret.get()->unsetActiveWeight(); _analysisobjects.push_back(ret); return ret; } /// Register a data object in the histogram system template AO addAnalysisObject(const AO & aonew) { _checkBookInit(); for (const MultiweightAOPtr& ao : analysisObjects()) { // Check AO base-name first ao.get()->setActiveWeightIdx(_defaultWeightIndex()); aonew.get()->setActiveWeightIdx(_defaultWeightIndex()); if (ao->path() != aonew->path()) continue; // If base-name matches, check compatibility // NB. This evil is because dynamic_ptr_cast can't work on rivet_shared_ptr directly AO aoold = AO(dynamic_pointer_cast(ao.get())); //< OMG if ( !aoold || !bookingCompatible(aonew, aoold) ) { MSG_WARNING("Found incompatible pre-existing data object with same base path " << aonew->path() << " for " << name()); throw LookupError("Found incompatible pre-existing data object with same base path during AO booking"); } // Finally, check all weight variations for (size_t weightIdx = 0; weightIdx < _weightNames().size(); ++weightIdx) { aoold.get()->setActiveWeightIdx(weightIdx); aonew.get()->setActiveWeightIdx(weightIdx); if (aoold->path() != aonew->path()) { MSG_WARNING("Found incompatible pre-existing data object with different weight-path " << aonew->path() << " for " << name()); throw LookupError("Found incompatible pre-existing data object with same weight-path during AO booking"); } } // They're fully compatible: bind and return aoold.get()->unsetActiveWeight(); MSG_TRACE("Bound pre-existing data object " << aoold->path() << " for " << name()); return aoold; } // No equivalent found MSG_TRACE("Registered " << aonew->annotation("Type") << " " << aonew->path() << " for " << name()); aonew.get()->unsetActiveWeight(); _analysisobjects.push_back(aonew); return aonew; } /// Unregister a data object from the histogram system (by name) void removeAnalysisObject(const std::string& path); /// Unregister a data object from the histogram system (by pointer) void removeAnalysisObject(const MultiweightAOPtr & ao); // /// Get all data objects, for all analyses, from the AnalysisHandler // /// @todo Can we remove this? Why not call handler().getData()? // vector getAllData(bool includeorphans) const; /// Get a Rivet data object from the histogram system template const AO getAnalysisObject(const std::string& aoname) const { for (const MultiweightAOPtr& ao : analysisObjects()) { ao.get()->setActiveWeightIdx(_defaultWeightIndex()); if (ao->path() == histoPath(aoname)) { // return dynamic_pointer_cast(ao); return AO(dynamic_pointer_cast(ao.get())); } } throw LookupError("Data object " + histoPath(aoname) + " not found"); } // /// Get a data object from the histogram system // template // const std::shared_ptr getAnalysisObject(const std::string& name) const { // foreach (const AnalysisObjectPtr& ao, analysisObjects()) { // if (ao->path() == histoPath(name)) return dynamic_pointer_cast(ao); // } // throw LookupError("Data object " + histoPath(name) + " not found"); // } // /// Get a data object from the histogram system (non-const) // template // std::shared_ptr getAnalysisObject(const std::string& name) { // foreach (const AnalysisObjectPtr& ao, analysisObjects()) { // if (ao->path() == histoPath(name)) return dynamic_pointer_cast(ao); // } // throw LookupError("Data object " + histoPath(name) + " not found"); // } /// Get a data object from another analysis (e.g. preloaded /// calibration histogram). template AO getAnalysisObject(const std::string& ananame, const std::string& aoname) { MultiweightAOPtr ao = _getOtherAnalysisObject(ananame, aoname); // return dynamic_pointer_cast(ao); return AO(dynamic_pointer_cast(ao.get())); } // /// Get a named Histo1D object from the histogram system // const Histo1DPtr getHisto1D(const std::string& name) const { // return getAnalysisObject(name); // } // /// Get a named Histo1D object from the histogram system (non-const) // Histo1DPtr getHisto1D(const std::string& name) { // return getAnalysisObject(name); // } // /// Get a Histo1D object from the histogram system by axis ID codes (non-const) // const Histo1DPtr getHisto1D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const { // return getAnalysisObject(makeAxisCode(datasetId, xAxisId, yAxisId)); // } // /// Get a Histo1D object from the histogram system by axis ID codes (non-const) // Histo1DPtr getHisto1D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) { // return getAnalysisObject(makeAxisCode(datasetId, xAxisId, yAxisId)); // } // /// Get a named Histo2D object from the histogram system // const Histo2DPtr getHisto2D(const std::string& name) const { // return getAnalysisObject(name); // } // /// Get a named Histo2D object from the histogram system (non-const) // Histo2DPtr getHisto2D(const std::string& name) { // return getAnalysisObject(name); // } // /// Get a Histo2D object from the histogram system by axis ID codes (non-const) // const Histo2DPtr getHisto2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const { // return getAnalysisObject(makeAxisCode(datasetId, xAxisId, yAxisId)); // } // /// Get a Histo2D object from the histogram system by axis ID codes (non-const) // Histo2DPtr getHisto2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) { // return getAnalysisObject(makeAxisCode(datasetId, xAxisId, yAxisId)); // } // /// Get a named Profile1D object from the histogram system // const Profile1DPtr getProfile1D(const std::string& name) const { // return getAnalysisObject(name); // } // /// Get a named Profile1D object from the histogram system (non-const) // Profile1DPtr getProfile1D(const std::string& name) { // return getAnalysisObject(name); // } // /// Get a Profile1D object from the histogram system by axis ID codes (non-const) // const Profile1DPtr getProfile1D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const { // return getAnalysisObject(makeAxisCode(datasetId, xAxisId, yAxisId)); // } // /// Get a Profile1D object from the histogram system by axis ID codes (non-const) // Profile1DPtr getProfile1D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) { // return getAnalysisObject(makeAxisCode(datasetId, xAxisId, yAxisId)); // } // /// Get a named Profile2D object from the histogram system // const Profile2DPtr getProfile2D(const std::string& name) const { // return getAnalysisObject(name); // } // /// Get a named Profile2D object from the histogram system (non-const) // Profile2DPtr getProfile2D(const std::string& name) { // return getAnalysisObject(name); // } // /// Get a Profile2D object from the histogram system by axis ID codes (non-const) // const Profile2DPtr getProfile2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const { // return getAnalysisObject(makeAxisCode(datasetId, xAxisId, yAxisId)); // } // /// Get a Profile2D object from the histogram system by axis ID codes (non-const) // Profile2DPtr getProfile2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) { // return getAnalysisObject(makeAxisCode(datasetId, xAxisId, yAxisId)); // } // /// Get a named Scatter2D object from the histogram system // const Scatter2DPtr getScatter2D(const std::string& name) const { // return getAnalysisObject(name); // } // /// Get a named Scatter2D object from the histogram system (non-const) // Scatter2DPtr getScatter2D(const std::string& name) { // return getAnalysisObject(name); // } // /// Get a Scatter2D object from the histogram system by axis ID codes (non-const) // const Scatter2DPtr getScatter2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const { // return getAnalysisObject(makeAxisCode(datasetId, xAxisId, yAxisId)); // } // /// Get a Scatter2D object from the histogram system by axis ID codes (non-const) // Scatter2DPtr getScatter2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) { // return getAnalysisObject(makeAxisCode(datasetId, xAxisId, yAxisId)); // } //@} private: /// Name passed to constructor (used to find .info analysis data file, and as a fallback) string _defaultname; /// Pointer to analysis metadata object unique_ptr _info; /// Storage of all plot objects /// @todo Make this a map for fast lookup by path? vector _analysisobjects; /// @name Cross-section variables //@{ double _crossSection; bool _gotCrossSection; //@} /// The controlling AnalysisHandler object. AnalysisHandler* _analysishandler; /// Collection of cached refdata to speed up many autobookings: the /// reference data file should only be read once. mutable std::map _refdata; /// Options the (this instance of) the analysis map _options; /// The string of options. string _optstring; private: /// @name Utility functions //@{ /// Get the reference data for this paper and cache it. void _cacheRefData() const; //@} /// The assignment operator is private and must never be called. /// In fact, it should not even be implemented. Analysis& operator=(const Analysis&); }; } // Include definition of analysis plugin system so that analyses automatically see it when including Analysis.hh #include "Rivet/AnalysisBuilder.hh" /// @def DECLARE_RIVET_PLUGIN /// Preprocessor define to prettify the global-object plugin hook mechanism. #define DECLARE_RIVET_PLUGIN(clsname) ::Rivet::AnalysisBuilder plugin_ ## clsname /// @def DECLARE_ALIASED_RIVET_PLUGIN /// Preprocessor define to prettify the global-object plugin hook mechanism, with an extra alias name for this analysis. // #define DECLARE_ALIASED_RIVET_PLUGIN(clsname, alias) Rivet::AnalysisBuilder plugin_ ## clsname ## ( ## #alias ## ) #define DECLARE_ALIASED_RIVET_PLUGIN(clsname, alias) DECLARE_RIVET_PLUGIN(clsname)( #alias ) /// @def DEFAULT_RIVET_ANALYSIS_CONSTRUCTOR /// Preprocessor define to prettify the manky constructor with name string argument #define DEFAULT_RIVET_ANALYSIS_CONSTRUCTOR(clsname) clsname() : Analysis(# clsname) {} /// @def DEFAULT_RIVET_ANALYSIS_CTOR /// Slight abbreviation for DEFAULT_RIVET_ANALYSIS_CONSTRUCTOR #define DEFAULT_RIVET_ANALYSIS_CTOR(clsname) DEFAULT_RIVET_ANALYSIS_CONSTRUCTOR(clsname) #endif diff --git a/include/Rivet/AnalysisHandler.hh b/include/Rivet/AnalysisHandler.hh --- a/include/Rivet/AnalysisHandler.hh +++ b/include/Rivet/AnalysisHandler.hh @@ -1,374 +1,360 @@ // -*- C++ -*- #ifndef RIVET_RivetHandler_HH #define RIVET_RivetHandler_HH #include "Rivet/Config/RivetCommon.hh" #include "Rivet/Particle.hh" #include "Rivet/AnalysisLoader.hh" #include "Rivet/Tools/RivetYODA.hh" namespace Rivet { // Forward declaration and smart pointer for Analysis class Analysis; typedef std::shared_ptr AnaHandle; /// A class which handles a number of analysis objects to be applied to /// generated events. An {@link Analysis}' AnalysisHandler is also responsible /// for handling the final writing-out of histograms. class AnalysisHandler { public: /// @name Constructors and destructors. */ //@{ /// Preferred constructor, with optional run name. AnalysisHandler(const string& runname=""); /// @brief Destructor /// The destructor is not virtual, as this class should not be inherited from. ~AnalysisHandler(); //@} private: /// Get a logger object. Log& getLog() const; public: /// @name Run properties and weights //@{ /// Get the name of this run. string runName() const; /// Get the number of events seen. Should only really be used by external /// steering code or analyses in the finalize phase. size_t numEvents() const; /// @brief Access the sum of the event weights seen /// /// This is the weighted equivalent of the number of events. It should only /// be used by external steering code or analyses in the finalize phase. double sumW() const { return _eventCounter->sumW(); } /// Access to the sum of squared-weights double sumW2() const { return _eventCounter->sumW2(); } /// Names of event weight categories const vector& weightNames() const { return _weightNames; } /// Are any of the weights non-numeric? size_t numWeights() const { return _weightNames.size(); } /// Are any of the weights non-numeric? bool haveNamedWeights() const; /// Set the weight names from a GenEvent void setWeightNames(const GenEvent& ge); /// Get the index of the nominal weight-stream size_t defaultWeightIndex() const { return _defaultWeightIdx; } //@} /// @name Cross-sections //@{ /// Get the cross-section known to the handler. Scatter1DPtr crossSection() const { return _xs; } /// Set the cross-section for the process being generated. AnalysisHandler& setCrossSection(double xs, double xserr); /// Get the nominal cross-section double nominalCrossSection() const { _xs.get()->setActiveWeightIdx(_defaultWeightIdx); const YODA::Scatter1D::Points& ps = _xs->points(); if (ps.size() != 1) { string errMsg = "cross section missing when requesting nominal cross section"; throw Error(errMsg); } double xs = ps[0].x(); _xs.get()->unsetActiveWeight(); return xs; } //@} /// @name Beams //@{ /// Set the beam particles for this run AnalysisHandler& setRunBeams(const ParticlePair& beams) { _beams = beams; MSG_DEBUG("Setting run beams = " << beams << " @ " << sqrtS()/GeV << " GeV"); return *this; } /// Get the beam particles for this run, usually determined from the first event. const ParticlePair& beams() const { return _beams; } /// Get beam IDs for this run, usually determined from the first event. /// @deprecated Use standalone beamIds(ah.beams()), to clean AH interface PdgIdPair beamIds() const; /// Get energy for this run, usually determined from the first event. /// @deprecated Use standalone sqrtS(ah.beams()), to clean AH interface double sqrtS() const; /// Setter for _ignoreBeams void setIgnoreBeams(bool ignore=true); //@} /// @name Handle analyses //@{ /// Get a list of the currently registered analyses' names. std::vector analysisNames() const; /// Get the collection of currently registered analyses. const std::map& analysesMap() const { return _analyses; } /// Get the collection of currently registered analyses. std::vector analyses() const { std::vector rtn; rtn.reserve(_analyses.size()); for (const auto& apair : _analyses) rtn.push_back(apair.second); return rtn; } /// Get a registered analysis by name. AnaHandle analysis(const std::string& analysisname) { if ( _analyses.find(analysisname) == _analyses.end() ) throw LookupError("No analysis named '" + analysisname + "' registered in AnalysisHandler"); try { return _analyses[analysisname]; } catch (...) { throw LookupError("No analysis named '" + analysisname + "' registered in AnalysisHandler"); } } /// Add an analysis to the run list by object AnalysisHandler& addAnalysis(Analysis* analysis); /// @brief Add an analysis to the run list using its name. /// /// The actual Analysis to be used will be obtained via /// AnalysisLoader::getAnalysis(string). If no matching analysis is found, /// no analysis is added (i.e. the null pointer is checked and discarded. AnalysisHandler& addAnalysis(const std::string& analysisname); /// @brief Add an analysis with a map of analysis options. AnalysisHandler& addAnalysis(const std::string& analysisname, std::map pars); /// @brief Add analyses to the run list using their names. /// /// The actual {@link Analysis}' to be used will be obtained via /// AnalysisHandler::addAnalysis(string), which in turn uses /// AnalysisLoader::getAnalysis(string). If no matching analysis is found /// for a given name, no analysis is added, but also no error is thrown. AnalysisHandler& addAnalyses(const std::vector& analysisnames); /// Remove an analysis from the run list using its name. AnalysisHandler& removeAnalysis(const std::string& analysisname); /// Remove analyses from the run list using their names. AnalysisHandler& removeAnalyses(const std::vector& analysisnames); /// //@} /// @name Main init/execute/finalise //@{ /// Initialize a run, with the run beams taken from the example event. void init(const GenEvent& event); /// @brief Analyze the given \a event by reference. /// /// This function will call the AnalysisBase::analyze() function of all /// included analysis objects. void analyze(const GenEvent& event); /// @brief Analyze the given \a event by pointer. /// /// This function will call the AnalysisBase::analyze() function of all /// included analysis objects, after checking the event pointer validity. void analyze(const GenEvent* event); /// Finalize a run. This function calls the AnalysisBase::finalize() /// functions of all included analysis objects. void finalize(); //@} /// @name Histogram / data object access //@{ - /// Add a vector of analysis objects to the current state. - // *** LEIF *** removed void addData(const std::vector& aos); - /// Read analysis plots into the histo collection (via addData) from the named file. void readData(const std::string& filename); /// Get all multi-weight Rivet analysis object wrappers vector getRivetAOs() const; /// Get a pointer to a preloaded yoda object with the given path, /// or null if path is not found. const YODA::AnalysisObjectPtr getPreload(string path) const { auto it = _preloads.find(path); if ( it == _preloads.end() ) return nullptr; return it->second; } - /// Get single-weight YODA analysis objects - // *** LEIF *** thinks This is not needed anymore - // vector getYodaAOs(bool includeorphans, - // bool includetmps, - // bool usefinalized) const; - - // /// Get all analyses' plots as a vector of analysis objects. - // std::vector getData(bool includeorphans = false, - // bool includetmps = false, - // bool usefinalized = true) const; - /// Write all analyses' plots (via getData) to the named file. void writeData(const std::string& filename) const; /// Tell Rivet to dump intermediate result to a file named @a /// dumpfile every @a period'th event. If @period is not positive, /// no dumping will be done. void dump(string dumpfile, int period) { _dumpPeriod = period; _dumpFile = dumpfile; } /// Take the vector of yoda files and merge them together using /// the cross section and weight information provided in each /// file. Each file in @a aofiles is assumed to have been produced /// by Rivet. By default the files are assumed to contain /// different processes (or the same processs but mutually /// exclusive cuts), but if @a equiv if ture, the files are /// assumed to contain output of completely equivalent (but /// statistically independent) Rivet runs. The corresponding /// analyses will be loaded and their analysis objects will be /// filled with the merged result. finalize() will be run on each /// relevant analysis. The resulting YODA file can then be rwitten /// out by writeData(). If delopts is non-empty, it is assumed to /// contain names different options to be merged into the same /// analysis objects. void mergeYodas(const vector & aofiles, const vector & delopts = vector(), bool equiv = false); /// Helper function to strip specific options from data object paths. void stripOptions(YODA::AnalysisObjectPtr ao, const vector & delopts) const; //@} /// Indicate which Rivet stage we're in. /// At the moment, only INIT is used to enable booking. - enum class Stage { OTHER, INIT }; + enum class Stage { OTHER, INIT, FINALIZE }; /// Which stage are we in? Stage stage() const { return _stage; } private: /// Current handler stage Stage _stage = Stage::OTHER; /// The collection of Analysis objects to be used. std::map _analyses; /// A vector of pre-loaded object which do not have a valid /// Analysis plugged in. map _preloads; /// A vector containing copies of analysis objects after /// finalize() has been run. vector _finalizedAOs; /// @name Run properties //@{ /// Weight names std::vector _weightNames; std::vector > _subEventWeights; size_t _numWeightTypes; // always == WeightVector.size() /// Run name std::string _runname; /// Event counter mutable CounterPtr _eventCounter; /// Cross-section known to AH Scatter1DPtr _xs; /// Beams used by this run. ParticlePair _beams; /// Flag to check if init has been called bool _initialised; /// Flag whether input event beams should be ignored in compatibility check bool _ignoreBeams; /// Current event number int _eventNumber; /// The index in the weight vector for the nominal weight stream size_t _defaultWeightIdx; /// Determines how often Rivet runs finalize() and writes the /// result to a YODA file. int _dumpPeriod; /// The name of a YODA file to which Rivet periodically dumps /// results. string _dumpFile; /// Flag to indicate periodic dumping is in progress bool _dumping; //@} private: /// The assignment operator is private and must never be called. /// In fact, it should not even be implemented. AnalysisHandler& operator=(const AnalysisHandler&); /// The copy constructor is private and must never be called. In /// fact, it should not even be implemented. AnalysisHandler(const AnalysisHandler&); }; } #endif diff --git a/include/Rivet/Event.hh b/include/Rivet/Event.hh --- a/include/Rivet/Event.hh +++ b/include/Rivet/Event.hh @@ -1,221 +1,212 @@ // -*- C++ -*- #ifndef RIVET_Event_HH #define RIVET_Event_HH #include "Rivet/Config/RivetCommon.hh" #include "Rivet/Particle.hh" #include "Rivet/Projection.hh" namespace Rivet { /// Rivet wrapper for HepMC event and Projection references. /// /// Event is a concrete class representing an generated event in Rivet. It is /// constructed given a HepMC::GenEvent, a pointer to which is kept by the /// Event object throughout its lifetime. The user must therefore make sure /// that the corresponding HepMC::GenEvent will persist at least as long as /// the Event object. /// /// In addition to the HepMC::GenEvent object the Event also keeps track of /// all Projection objects which have been applied to the Event so far. class Event { public: /// @name Constructors and destructors. //@{ /// Constructor from a HepMC GenEvent pointer Event(const GenEvent* ge, bool strip = false) : _genevent_original(ge) { assert(ge); _genevent = *ge; if ( strip ) _strip(_genevent); _init(*ge); } /// Constructor from a HepMC GenEvent reference /// @deprecated HepMC uses pointers, so we should talk to HepMC via pointers Event(const GenEvent& ge, bool strip = false) : _genevent_original(&ge), _genevent(ge) { if ( strip ) _strip(_genevent); _init(ge); } /// Copy constructor Event(const Event& e) : _genevent_original(e._genevent_original), _genevent(e._genevent) { } //@} /// @name Major event properties //@{ /// The generated event obtained from an external event generator const GenEvent* genEvent() const { return &_genevent; } /// The generated event obtained from an external event generator const GenEvent* originalGenEvent() const { return _genevent_original; } /// Get the beam particles ParticlePair beams() const; /// Get the beam centre-of-mass energy double sqrtS() const; /// Get the beam centre-of-mass energy per nucleon double asqrtS() const; - /// Get the generator centrality (impact-parameter quantile in [0,1]; or -1 if undefined (usual for non-HI generators)) - //double centrality() const; - - // /// Get the boost to the beam centre-of-mass - // Vector3 beamCMSBoost() const; - - // /// Get the boost to the beam centre-of-mass - // LorentzTransform beamCMSTransform(); - //@} /// @name Access to event particles //@{ /// All the raw GenEvent particles, wrapped in Rivet::Particle objects const Particles& allParticles() const; /// @brief All the raw GenEvent particles, wrapped in Rivet::Particle objects, but with a Cut applied /// /// @note Due to the cut, this returns by value, i.e. involves an expensive copy inline Particles allParticles(const Cut& c) const { return filter_select(allParticles(), c); } /// @brief All the raw GenEvent particles, wrapped in Rivet::Particle objects, but with a selection function applied /// /// @note Due to the cut, this returns by value, i.e. involves an expensive copy template inline Particles allParticles(const FN& f) const { return filter_select(allParticles(), f); } /// @brief The generation weight associated with the event /// /// @todo This needs to be revisited when we finally add the mechanism to /// support NLO counter-events and weight vectors. std::valarray weights() const; /// @brief Obsolete weight method. Always returns 1 now. DEPRECATED("Event weight does not need to be included anymore. For compatibility, it's always == 1 now.") double weight() const { return 1.0; } //@} /// @name Projection running //@{ /// @brief Add a projection @a p to this Event. /// /// If an equivalent Projection has been applied before, the /// Projection::project(const Event&) of @a p is not called and a reference /// to the previous equivalent projection is returned. If no previous /// Projection was found, the Projection::project(const Event&) of @a p is /// called and a reference to @a p is returned. /// /// @todo Can make this non-templated, since only cares about ptr to Projection base class /// /// @note Comparisons here are by direct pointer comparison, because /// equivalence is guaranteed if pointers are equal, and inequivalence /// guaranteed if they aren't, thanks to the ProjectionHandler registry template const PROJ& applyProjection(PROJ& p) const { Log& log = Log::getLog("Rivet.Event"); static bool docaching = getEnvParam("RIVET_CACHE_PROJECTIONS", true); if (docaching) { log << Log::TRACE << "Applying projection " << &p << " (" << p.name() << ") -> comparing to projections " << _projections << std::endl; // First search for this projection *or an equivalent* in the already-executed list const Projection* cpp(&p); /// @note Currently using reint cast to integer type to bypass operator==(Proj*, Proj*) // std::set::const_iterator old = _projections.find(cpp); std::set::const_iterator old = std::begin(_projections); std::uintptr_t recpp = reinterpret_cast(cpp); for (; old != _projections.end(); ++old) if (reinterpret_cast(*old) == recpp) break; if (old != _projections.end()) { log << Log::TRACE << "Equivalent projection found -> returning already-run projection " << *old << std::endl; const Projection& pRef = **old; return pcast(pRef); } log << Log::TRACE << "No equivalent projection in the already-run list -> projecting now" << std::endl; } else { log << Log::TRACE << "Applying projection " << &p << " (" << p.name() << ") WITHOUT projection caching & comparison" << std::endl; } // If this one hasn't been run yet on this event, run it and add to the list Projection* pp = const_cast(&p); pp->_isValid = true; pp->project(*this); if (docaching) _projections.insert(pp); return p; } /// @brief Add a projection @a p to this Event by pointer. template const PROJ& applyProjection(PROJ* pp) const { if (!pp) throw Error("Event::applyProjection(PROJ*): Projection pointer is null."); return applyProjection(*pp); } //@} private: /// @brief Actual (shared) implementation of the constructors from GenEvents void _init(const GenEvent& ge); /// @brief Remove uninteresting or unphysical particles in the /// GenEvent to speed up searches. void _strip(GenEvent & ge); // /// @brief Convert the GenEvent to use conventional alignment // /// // /// For example, FHerwig only produces DIS events in the unconventional // /// hadron-lepton orientation and has to be corrected for DIS analysis // /// portability. // void _geNormAlignment(); /// @brief The generated event, as obtained from an external generator. /// /// This is the original GenEvent. In practise the version seen by users /// will often/always be a modified one. /// /// @todo Provide access to this via an Event::originalGenEvent() method? If requested... const GenEvent* _genevent_original; /// @brief The GenEvent used by Rivet analysis projections etc. /// /// This version may be rotated to a "normal" alignment, have /// generator-specific particles stripped out, etc. If an analysis is /// affected by these modifications, it is probably an unphysical analysis! /// /// Stored as a non-pointer since it may get overwritten, and memory for /// copying and cleanup is neater this way. /// @todo Change needed for HepMC3? mutable GenEvent _genevent; /// All the GenEvent particles, wrapped as Rivet::Particles /// @note To be populated lazily, hence mutability mutable Particles _particles; /// The set of Projection objects applied so far mutable std::set _projections; }; } #endif diff --git a/include/Rivet/Tools/CentralityBinner.hh b/include/Rivet/Tools/CentralityBinner.hh --- a/include/Rivet/Tools/CentralityBinner.hh +++ b/include/Rivet/Tools/CentralityBinner.hh @@ -1,831 +1,829 @@ // -*- C++ -*- #ifndef RIVET_CENTRALITYBINNER_HH #define RIVET_CENTRALITYBINNER_HH #include #include "Rivet/Config/RivetCommon.hh" #include "Rivet/Tools/RivetYODA.hh" #include "Rivet/Projections/HepMCHeavyIon.hh" namespace Rivet { /** @brief Base class for projections giving the value of an observable sensitive to the centrality of a collision. @author Leif Lönnblad The centrality of a collision is not really an observable, but the concept is anyway often used in the heavy ion community as if it were just that. This base class can be used to provide a an estimator for the centrality by projecting down to a single number which then can be used by a CentralityBinner object to select a histogram to be filled with another observable depending on centrality percentile. The estimate() should be a non-negative number with large values indicating a higher overlap than small ones. A negative value indicates that the centrality estimate could not be calculated. In the best of all worlds the centrality estimator should be a proper hadron-level observable corrected for detector effects, however, this base class only returns the inverse of the impact_parameter member of the GenHeavyIon object in an GenEvent if present and zero otherwise. */ class CentralityEstimator : public Projection { public: /// Constructor. CentralityEstimator(): _estimate(-1.0) { setName("CentralityEstimator"); declare(HepMCHeavyIon(), "HepMC"); } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(CentralityEstimator); protected: /// Perform the projection on the Event void project(const Event& e) { _estimate = -1.0; double imp = apply(e, "HepMC").impact_parameter(); if ( imp < 0.0 ) return; _estimate = imp > 0.0? 1.0/imp: numeric_limits::max(); } /// Compare projections CmpState compare(const Projection& p) const { return mkNamedPCmp(p, "HepMC"); } public: /// The value of the centrality estimate. double estimate() const { return _estimate; } protected: /// The value of the centrality estimate. double _estimate; }; /// This is a traits class describing how to handle object handles by /// CentralityBinner. The default implementation basically describes /// what to do with Histo1DPtr. template struct CentralityBinTraits { /// Make a clone of the given object. static T clone(const T & t) { return T(t->newclone()); } /// Add the contents of @a o to @a t. static void add(T & t, const T & o) { *t += *o; } /// Scale the contents of a given object. static void scale(T & t, double f) { t->scaleW(f); } /// Normalize the AnalysisObject to the sum of weights in a /// centrality bin. static void normalize(T & t, double sumw) { if ( t->sumW() > 0.0 ) t->normalize(t->sumW()/sumw); } /// Return the path of an AnalysisObject. static string path(T t) { return t->path(); } }; /// The sole purpose of the MergeDistance class is to provide a /// "distance" for a potential merging of two neighboring bins in a /// CentralityBinner. struct MergeDistance { /// This function should return a generalized distance between two /// adjecent centrality bins to be merged. CentralityBinner will /// always try to merge bins with the smallest distance. @a cestLo /// and @cestHi are the lower and upper edges of resulting bin. @a /// weight is the resulting sum of event weights in the bin. @a /// centLo and @a centHi are the lower and upper prcentile limits /// where the two bins currently resides. The two last arguments are /// the total number of events in the two bins and the total number /// of previous mergers repectively. static double dist(double cestLo, double cestHi, double weight, double clo, double chi, double, double) { return (cestHi - cestLo)*weight/(cestHi*(chi - clo)); } }; /** * CentralityBinner contains a series of AnalysisObject of the same * quantity each in a different percentiles of another quantity. For * example, a CentralityBinner may e.g. contain histograms of the * cross section differential in \f$ p_T \f$ in different centrality * regions for heavy ion collisions based on forward energy flow. **/ template class CentralityBinner: public ProjectionApplier { public: /// Create a new empty CentralityBinner. @a maxbins is the maximum /// number of bins used by the binner. Default is 1000, which is /// typically enough. @a wlim is the mximum allowed error allowed /// for the centrality limits before a warning is emitted. CentralityBinner(int maxbins = 200, double wlim = 0.02) : _currentCEst(-1.0), _maxBins(maxbins), _warnlimit(wlim), _weightsum(0.0) { _percentiles.insert(0.0); _percentiles.insert(1.0); } /// Set the centrality projection to be used. Note that this /// projection must have already been declared to Rivet. void setProjection(const CentralityEstimator & p, string pname) { declare(p, pname); _estimator = pname; } /// Return the class name. virtual std::string name() const { return "Rivet::CentralityBinner"; } /// Add an AnalysisObject in the region between @a cmin and @a cmax to /// this set of CentralityBinners. The range represent /// percentiles and must be between 0 and 100. No overlaping bins /// are allowed. /// Note that (cmin=0, cmax=5), means the five percent MOST central /// events although the internal notation is reversed for /// convenience. /// Optionally supply corresponding limits @a cestmin and @a cestmax /// of the centrality extimator. void add(T t, double cmin, double cmax, double cestmin = -1.0, double cestmax = -1.0 ) { _percentiles.insert(max(1.0 - cmax/100.0, 0.0)); _percentiles.insert(min(1.0 - cmin/100.0, 1.0)); if ( _unfilled.empty() && _ready.empty() ) _devnull = CentralityBinTraits::clone(t); if ( cestmin < 0.0 ) _unfilled.push_back(Bin(t, 1.0 - cmax/100.0, 1.0 - cmin/100.0)); else _ready[t] = Bin(t, 1.0 - cmax/100.0, 1.0 - cmin/100.0, cestmin, cestmax); } /// Return one of the AnalysisObjects in the CentralityBinner for /// the given @a event. This version requires that a /// CentralityEstimator object has been assigned that can compute /// the value of the centrality estimator from the @a /// event. Optionally the @a weight of the event is given. This /// should be the weight that will be used to fill the /// AnalysisObject. If the centrality estimate is less than zero, /// the _devnull object will be returned. T select(const Event & event, double weight = 1.0) { return select(applyProjection (event, _estimator).estimate(), weight); } /// Return one of the AnalysisObjecsts in the Setup the /// CentralityBinner depending on the value of the centrality /// estimator, @a cest. Optionally the @a weight of the event is /// given. This should be the weight that will be used to fill the /// AnalysisObject. If the centrality estimate is less than zero, /// the _devnull object will be returned. T select(double cest, double weight = 1.0); /// At the end of the run, calculate the percentiles and fill the /// AnalysisObjectss provided with the add() function. This is /// typically called from the finalize method in an Analysis, but /// can also be called earlier in which case the the select /// functions can be continued to run as before with the edges /// between the centrality regions now fixed. void finalize(); /// Normalize each AnalysisObjects to the sum of event weights in the /// corresponding centrality bin. void normalizePerEvent() { for ( auto & b : _ready ) b.second.normalizePerEvent(); } /// Return a map bin edges of the centrality extimator indexed by /// the corresponing percentile. map edges() const { map ret; for ( auto & b : _ready ) { ret[1.0 - b.second._centLo] = b.second._cestLo; ret[1.0 - b.second._centHi] = b.second._cestHi; } return ret; } /// Return the current AnalysisObject from the latest call to select(). const T & current() const { return _currenT; } /// Return the value of the centrality estimator set in the latest /// call to select(). double estimator() const { return _currentCEst; } vector allObjects() { vector ret; for ( auto & fb : _flexiBins ) ret.push_back(fb._t); if ( !ret.empty() ) return ret; for ( auto b : _ready ) ret.push_back(b.second._t); return ret; } private: /// A flexible bin struct to be used to store temporary AnalysisObjects. struct FlexiBin { /// Construct with an initial centrality estimate and an event /// weight. FlexiBin(T & t, double cest = 0.0, double weight = 0.0) : _t(t), _cestLo(cest), _cestHi(cest), _weightsum(weight), _n(1), _m(0) {} /// Construct a temporary FlexiBin for finding a bin in a set. FlexiBin(double cest) : _cestLo(cest), _cestHi(cest), _weightsum(0.0), _n(0), _m(0) {} /// Merge in the contents of another FlexiBin into this. void merge(const FlexiBin & fb) { _cestLo = min(_cestLo, fb._cestLo); _cestHi = max(_cestHi, fb._cestHi); _weightsum += fb._weightsum; CentralityBinTraits::add(_t, fb._t); _n += fb._n; _m += fb._m + 1; } /// Comparisons for containers. bool operator< (const FlexiBin & fb) const { return _cestLo < fb._cestLo; } /// Return true if the given centrality estimate is in the range /// of this bin. bool inRange(double cest) const { return cest == _cestLo || ( _cestLo < cest && cest < _cestHi ); } /// The associated AnalysisObject. T _t; /// Current lower and upper edge of the centrality estimator for /// the fills in the associated AnalysiObject. double _cestLo, _cestHi; /// The sum of weights for all events entering the associated /// AnalysisObject. mutable double _weightsum; /// The number of times this bin has been selected. mutable int _n; /// The number of times this bin has been merged. mutable int _m; }; struct Bin { /// Construct a completely empty bin. Bin() : _centLo(-1.0), _centHi(-1.0), _cestLo(-1.0), _cestHi(-1.0), _weightsum(0.0), _underflow(0.0), _overflow(0.0), _ambiguous(0), _ambweight(0.0) {} /// Constructor taking an AnalysisObject and centrality interval /// as argument. Optionally the interval in the estimator can be /// given, in which case this bin is considered to be /// "final". Bin(T t, double centLo, double centHi, double cestLo = -1.0, double cestHi = -1.0) : _t(t), _centLo(centLo), _centHi(centHi), _cestLo(cestLo), _cestHi(cestHi), _weightsum(0.0), _underflow(0.0), _overflow(0.0), _ambiguous(0.0), _ambweight(0.0) {} /// Return true if the given centrality estimate is in the range /// of this AnalysisObject. bool inRange(double cest) const { return _cestLo >= 0 && _cestLo <= cest && ( _cestHi < 0.0 || cest <= _cestHi ); } /// Normalise the AnalysisObject to the tital cross section. void normalizePerEvent() { CentralityBinTraits::normalize(_t, _weightsum); } /// The AnalysisObject. T _t; /// The range in centrality. double _centLo, _centHi; /// The corresponding range in the centrality estimator. double _cestLo, _cestHi; /// The sum of event weights for this bin; double _weightsum; /// The weight in a final AnalysisObject that contains events /// below the centrality limit. double _underflow; /// The weight in a final AnalysisObject that contain events above /// the centrality limit. double _overflow; /// Number of ambiguous events in this bin. double _ambiguous; /// Sum of abmiguous weights. double _ambweight; }; protected: /// Convenient typedefs. typedef set FlexiBinSet; /// Find a bin corresponding to a given value of the centrality /// estimator. typename FlexiBinSet::iterator _findBin(double cest) { if ( _flexiBins.empty() ) return _flexiBins.end(); auto it = _flexiBins.lower_bound(FlexiBin(cest)); if ( it->_cestLo == cest ) return it; if ( it != _flexiBins.begin() ) --it; if ( it->_cestLo < cest && cest < it->_cestHi ) return it; return _flexiBins.end(); } /// The name of the CentralityEstimator projection to be used. string _estimator; /// The current temporary AnalysisObject selected for the centrality /// estimator calculated from the event presented in setup(). T _currenT; /// The current value of the centrality estimator. double _currentCEst; /// The oversampling of centrality bins. For each requested /// centrality bin this number of dynamic bins will be used. int _maxBins; /// If the fraction of events in a bin that comes from adjecent /// centrality bins exceeds this, emit a warning. double _warnlimit; /// The unfilled AnalysisObjectss where the esimator edges has not yet /// been determined. vector _unfilled; /// The dynamic bins for ranges of centrality estimators. FlexiBinSet _flexiBins; /// The sum of all event weights so far. double _weightsum; /// The requested percentile limits. set _percentiles; /// The filled AnalysisObjects where the estimator edges has been determined. map _ready; /// A special AnalysisObject which will be filled if the centrality /// estimate is out of range (negative). T _devnull; public: /// Print out the _flexiBins to cerr. void debug(); void fulldebug(); }; /// Traits specialization for Profile histograms. template <> struct CentralityBinTraits { typedef Profile1DPtr T; /// Make a clone of the given object. static T clone(const T & t) { return Profile1DPtr(t->newclone()); } /// Add the contents of @a o to @a t. static void add(T & t, const T & o) { *t += *o; } /// Scale the contents of a given object. static void scale(T & t, double f) { t->scaleW(f); } static void normalize(T & t, double sumw) {} /// Return the path of an AnalysisObject. static string path(T t) { return t->path(); } }; /// Traits specialization for Profile histograms. template <> struct CentralityBinTraits { typedef Profile2DPtr T; /// Make a clone of the given object. static T clone(const T & t) { return Profile2DPtr(t->newclone()); } /// Add the contents of @a o to @a t. static void add(T & t, const T & o) { *t += *o; } /// Scale the contents of a given object. static void scale(T & t, double f) { t->scaleW(f); } static void normalize(T & t, double sumw) {} /// Return the name of an AnalysisObject. static string path(T t) { return t->path(); } }; template struct CentralityBinTraits< vector > { /// Make a clone of the given object. static vector clone(const vector & tv) { vector rtv; for ( auto t : tv ) rtv.push_back(CentralityBinTraits::clone(t)); return rtv; } /// Add the contents of @a o to @a t. static void add(vector & tv, const vector & ov) { for ( int i = 0, N = tv.size(); i < N; ++i ) CentralityBinTraits::add(tv[i], ov[i]); } /// Scale the contents of a given object. static void scale(vector & tv, double f) { for ( auto t : tv ) CentralityBinTraits::scale(t, f); } static void normalize(vector & tv, double sumw) { for ( auto t : tv ) CentralityBinTraits::normalize(t, sumw); } /// Return the path of an AnalysisObject. static string path(const vector & tv) { string ret = "(vector:"; for ( auto t : tv ) { ret += " "; ret += CentralityBinTraits::path(t); } ret += ")"; return ret; } }; template struct TupleCentralityBinTraitsHelper { typedef tuple Tuple; typedef typename tuple_element::type T; static void clone(Tuple & ret, const Tuple & tup) { get(ret) = CentralityBinTraits::clone(get(tup)); TupleCentralityBinTraitsHelper::clone(ret, tup); } static void add(Tuple & tup, const Tuple & otup) { CentralityBinTraits::add(get(tup),get(otup)); TupleCentralityBinTraitsHelper::add(tup, otup); } static void scale(Tuple & tup, double f) { CentralityBinTraits::scale(get(tup), f); TupleCentralityBinTraitsHelper::scale(tup, f); } static void normalize(Tuple & tup, double sumw) { CentralityBinTraits::normalize(get(tup), sumw); TupleCentralityBinTraitsHelper::normalize(tup, sumw); } static string path(const Tuple & tup) { return " " + CentralityBinTraits::path(get(tup)) + TupleCentralityBinTraitsHelper::path(tup); } }; template struct TupleCentralityBinTraitsHelper<0,Types...> { typedef tuple Tuple; static void clone(Tuple &, const Tuple &) {} static void add(Tuple & tup, const Tuple & otup) {} static void scale(Tuple & tup, double f) {} static void normalize(Tuple & tup, double sumw) {} static string path(const Tuple & tup) {return "";} }; template struct CentralityBinTraits< tuple > { typedef tuple Tuple; static const size_t N = tuple_size::value; /// Make a clone of the given object. static Tuple clone(const Tuple & tup) { Tuple ret; TupleCentralityBinTraitsHelper::clone(ret, tup); return ret; } /// Add the contents of @a o to @a t. static void add(Tuple & tup, const Tuple & otup) { TupleCentralityBinTraitsHelper::add(tup, otup); } /// Scale the contents of a given object. static void scale(Tuple & tup, double f) { TupleCentralityBinTraitsHelper::scale(tup, f); } static void normalize(Tuple & tup, double sumw) { TupleCentralityBinTraitsHelper::normalize(tup, sumw); } /// Return the path of an AnalysisObject. static string path(const Tuple & tup) { string ret = "(tuple:"; ret += TupleCentralityBinTraitsHelper::path(tup); ret += ")"; return ret; } }; template T CentralityBinner::select(double cest, double weight) { _currenT = _devnull; _currentCEst = cest; _weightsum += weight; // If estimator is negative, something has gone wrong. if ( _currentCEst < 0.0 ) return _currenT; // If we already have finalized the limits on the centrality // estimator, we just add the weights to their bins and return the // corresponding AnalysisObject. if ( _unfilled.empty() ) { for ( auto & b : _ready ) if ( b.second.inRange(_currentCEst) ) { b.second._weightsum += weight; return b.second._t; } return _currenT; } auto it = _findBin(cest); if ( it == _flexiBins.end() ) { _currenT = CentralityBinTraits::clone(_unfilled.begin()->_t); it = _flexiBins.insert(FlexiBin(_currenT, _currentCEst, weight)).first; } else { it->_weightsum += weight; ++(it->_n); _currenT = it->_t; } if ( (int)_flexiBins.size() <= _maxBins ) return _currenT; set::iterator citn = _percentiles.begin(); set::iterator cit0 = citn++; auto selectit = _flexiBins.end(); double mindist = -1.0; double acc = 0.0; auto next = _flexiBins.begin(); auto prev = next++; for ( ; next != _flexiBins.end(); prev = next++ ) { acc += prev->_weightsum/_weightsum; if ( acc > *citn ) { cit0 = citn++; continue; } if ( acc + next->_weightsum/_weightsum > *citn ) continue; double dist = MDist::dist(prev->_cestLo, next->_cestHi, next->_weightsum + prev->_weightsum, *cit0, *citn, next->_n + prev->_n, next->_m + prev->_m); if ( mindist < 0.0 || dist < mindist ) { selectit = prev; mindist = dist; } } if ( selectit == _flexiBins.end() ) return _currenT; auto mergeit = selectit++; FlexiBin merged = *mergeit; merged.merge(*selectit); if ( merged.inRange(cest) || selectit->inRange(cest) ) _currenT = merged._t; _flexiBins.erase(mergeit); _flexiBins.erase(selectit); _flexiBins.insert(merged); return _currenT; } template void CentralityBinner::finalize() { // Take the contents of the dynamical binning and fill the original // AnalysisObjects. double clo = 0.0; for ( const FlexiBin & fb : _flexiBins ) { double chi = min(clo + fb._weightsum/_weightsum, 1.0); for ( Bin & bin : _unfilled ) { double olo = bin._centLo; double ohi = bin._centHi; if ( clo > ohi || chi <= olo ) continue; // If we only have partial overlap we need to scale double lo = max(olo, clo); double hi = min(ohi, chi); T t = CentralityBinTraits::clone(fb._t); double frac = (hi - lo)/(chi - clo); CentralityBinTraits::scale(t, frac); CentralityBinTraits::add(bin._t, t); bin._weightsum += fb._weightsum*frac; if ( clo <= olo ) bin._cestLo = fb._cestLo + (fb._cestHi - fb._cestLo)*(olo - clo)/(chi - clo); if ( clo < olo ) { bin._underflow = clo; bin._ambiguous += fb._n*frac; bin._ambweight += fb._weightsum*frac*(1.0 - frac); } if ( chi > ohi ) { bin._cestHi = fb._cestLo + (fb._cestHi - fb._cestLo)*(ohi - clo)/(chi - clo); bin._overflow = chi; bin._ambiguous += fb._n*frac; bin._ambweight += fb._weightsum*frac*(1.0 - frac); } } clo = chi; } _flexiBins.clear(); for ( Bin & bin : _unfilled ) { if ( bin._overflow == 0.0 ) bin._overflow = 1.0; _ready[bin._t] = bin; if ( bin._ambweight/bin._weightsum >_warnlimit ) MSG_WARNING("Analysis object \"" << CentralityBinTraits::path(bin._t) << "\", contains events with centralities between " << bin._underflow*100.0 << " and " << bin._overflow*100.0 << "% (" << int(bin._ambiguous + 0.5) << " ambiguous events with effectively " << 100.0*bin._ambweight/bin._weightsum << "% of the weights)." << "Consider increasing the number of bins."); } _unfilled.clear(); } template void CentralityBinner::fulldebug() { cerr << endl; double acc = 0.0; set::iterator citn = _percentiles.begin(); set::iterator cit0 = citn++; int i = 0; for ( auto it = _flexiBins.begin(); it != _flexiBins.end(); ) { ++i; auto curr = it++; double w = curr->_weightsum/_weightsum; acc += w; if ( curr == _flexiBins.begin() || it == _flexiBins.end() || acc > *citn ) cerr << "*"; else cerr << " "; if ( acc > *citn ) cit0 = citn++; cerr << setw(6) << i << setw(12) << acc - w << setw(12) << acc << setw(8) << curr->_n << setw(8) << curr->_m << setw(12) << curr->_cestLo << setw(12) << curr->_cestHi << endl; } cerr << "Number of sampler bins: " << _flexiBins.size() << endl; } template void CentralityBinner::debug() { cerr << endl; double acc = 0.0; int i = 0; set::iterator citn = _percentiles.begin(); set::iterator cit0 = citn++; for ( auto it = _flexiBins.begin(); it != _flexiBins.end(); ) { auto curr = it++; ++i; double w = curr->_weightsum/_weightsum; acc += w; if ( curr == _flexiBins.begin() || it == _flexiBins.end() || acc > *citn ) { if ( acc > *citn ) cit0 = citn++; cerr << setw(6) << i << setw(12) << acc - w << setw(12) << acc << setw(8) << curr->_n << setw(8) << curr->_m << setw(12) << curr->_cestLo << setw(12) << curr->_cestHi << endl; } } cerr << "Number of sampler bins: " << _flexiBins.size() << endl; } /// Example of CentralityEstimator projection that the generated /// centrality as given in the GenHeavyIon object in HepMC3. class GeneratedCentrality: public CentralityEstimator { public: /// Constructor. - GeneratedCentrality() {} + GeneratedCentrality() { + declare(HepMCHeavyIon(), "HI"); + } /// Clone on the heap. DEFAULT_RIVET_PROJ_CLONE(GeneratedCentrality); protected: /// Perform the projection on the Event void project(const Event& e) { - _estimate = -1.0; -#ifdef ENABLE_HEPMC_3 - RivetHepMC::ConstGenHeavyIonPtr hi = e.genEvent()->heavy_ion(); - if ( hi ) _estimate = 100.0 - hi->centrality; // @TODO We don't really know how to interpret this number! -#endif + _estimate = apply(e, "HI").centrality(); } /// Compare projections CmpState compare(const Projection& p) const { return mkNamedPCmp(p, "GeneratedCentrality"); } }; } #endif diff --git a/include/Rivet/Tools/RivetHepMC.hh b/include/Rivet/Tools/RivetHepMC.hh --- a/include/Rivet/Tools/RivetHepMC.hh +++ b/include/Rivet/Tools/RivetHepMC.hh @@ -1,100 +1,105 @@ // -*- C++ -*- #ifndef RIVET_RivetHepMC_HH #define RIVET_RivetHepMC_HH +#include + #ifdef ENABLE_HEPMC_3 #include "HepMC3/HepMC3.h" #include "HepMC3/Relatives.h" #include "HepMC3/Reader.h" namespace Rivet{ namespace RivetHepMC = HepMC3; using RivetHepMC::ConstGenParticlePtr; using RivetHepMC::ConstGenVertexPtr; using RivetHepMC::Relatives; using RivetHepMC::ConstGenHeavyIonPtr; using HepMC_IO_type = RivetHepMC::Reader; using PdfInfo = RivetHepMC::GenPdfInfo; } #else #include "HepMC/GenEvent.h" #include "HepMC/GenParticle.h" #include "HepMC/HeavyIon.h" #include "HepMC/GenVertex.h" #include "HepMC/Version.h" #include "HepMC/GenRanges.h" #include "HepMC/IO_GenEvent.h" namespace Rivet{ namespace RivetHepMC = HepMC; // HepMC 2.07 provides its own #defines typedef const HepMC::GenParticle* ConstGenParticlePtr; typedef const HepMC::GenVertex* ConstGenVertexPtr; typedef const HepMC::HeavyIon* ConstGenHeavyIonPtr; /// @brief Replicated the HepMC3 Relatives syntax using HepMC2 IteratorRanges /// This is necessary mainly because of capitalisation differences class Relatives{ public: constexpr Relatives(HepMC::IteratorRange relo): _internal(relo){} constexpr HepMC::IteratorRange operator()() const {return _internal;} operator HepMC::IteratorRange() const {return _internal;} const static Relatives PARENTS; const static Relatives CHILDREN; const static Relatives ANCESTORS; const static Relatives DESCENDANTS; private: const HepMC::IteratorRange _internal; }; using HepMC_IO_type = HepMC::IO_GenEvent; using PdfInfo = RivetHepMC::PdfInfo; } #endif #include "Rivet/Tools/RivetSTL.hh" #include "Rivet/Tools/Exceptions.hh" namespace Rivet { using RivetHepMC::GenEvent; using ConstGenEventPtr = std::shared_ptr; /// @todo Use mcutils? namespace HepMCUtils{ ConstGenParticlePtr getParticlePtr(const RivetHepMC::GenParticle & gp); std::vector particles(ConstGenEventPtr ge); std::vector particles(const GenEvent *ge); std::vector vertices(ConstGenEventPtr ge); std::vector vertices(const GenEvent *ge); std::vector particles(ConstGenVertexPtr gv, const Relatives &relo); std::vector particles(ConstGenParticlePtr gp, const Relatives &relo); int uniqueId(ConstGenParticlePtr gp); int particles_size(ConstGenEventPtr ge); int particles_size(const GenEvent *ge); std::pair beams(const GenEvent *ge); std::shared_ptr makeReader(std::istream &istr, std::string * errm = 0); bool readEvent(std::shared_ptr io, std::shared_ptr evt); void strip(GenEvent & ge, const set & stripid = {1, -1, 2, -2, 3,-3, 21}); + vector weightNames(const GenEvent & ge); + double crossSection(const GenEvent & ge); + std::valarray weights(const GenEvent & ge); } } #endif diff --git a/include/Rivet/Tools/RivetYODA.hh b/include/Rivet/Tools/RivetYODA.hh --- a/include/Rivet/Tools/RivetYODA.hh +++ b/include/Rivet/Tools/RivetYODA.hh @@ -1,697 +1,703 @@ #ifndef RIVET_RIVETYODA_HH #define RIVET_RIVETYODA_HH #include "Rivet/Config/RivetCommon.hh" #include "YODA/AnalysisObject.h" #include "YODA/Counter.h" #include "YODA/Histo1D.h" #include "YODA/Histo2D.h" #include "YODA/Profile1D.h" #include "YODA/Profile2D.h" #include "YODA/Scatter1D.h" #include "YODA/Scatter2D.h" #include "YODA/Scatter3D.h" #include #include namespace YODA { typedef std::shared_ptr AnalysisObjectPtr; typedef std::shared_ptr CounterPtr; typedef std::shared_ptr Histo1DPtr; typedef std::shared_ptr Histo2DPtr; typedef std::shared_ptr Profile1DPtr; typedef std::shared_ptr Profile2DPtr; typedef std::shared_ptr Scatter1DPtr; typedef std::shared_ptr Scatter2DPtr; typedef std::shared_ptr Scatter3DPtr; } namespace Rivet { class AnalysisObjectWrapper { public: virtual ~AnalysisObjectWrapper() {} virtual YODA::AnalysisObject* operator->() = 0; virtual YODA::AnalysisObject* operator->() const = 0; virtual const YODA::AnalysisObject & operator*() const = 0; /// @todo Rename to setActive(idx) virtual void setActiveWeightIdx(unsigned int iWeight) = 0; /// @todo Set active object for finalize virtual void setActiveFinalWeightIdx(unsigned int iWeight) = 0; virtual void unsetActiveWeight() = 0; bool operator ==(const AnalysisObjectWrapper& p) { return (this == &p); } bool operator !=(const AnalysisObjectWrapper& p) { return (this != &p); } protected: /// @todo do we need this? // virtual void reset() = 0; }; /// @todo /// implement scatter1dptr and scatter2dptr here /// these need to be multi-weighted eventually. /* class Scatter1DPtr : public AnalysisObjectPtr { public: Scatter1DPtr() : _persistent() { } Scatter1DPtr(size_t len_of_weightvec, const YODA::Scatter1D& p) { for (size_t m = 0; m < len_of_weightvec; ++m) _persistent.push_back(make_shared(p)); } bool operator!() const { return !_persistent; } explicit operator bool() const { return bool(_persistent); } YODA::Scatter1D* operator->() { return _persistent.get(); } YODA::Scatter1D* operator->() const { return _persistent.get(); } YODA::Scatter1D & operator*() { return *_persistent; } const YODA::Scatter1D & operator*() const { return *_persistent; } protected: vector _persistent; }; class Scatter2DPtr : public AnalysisObjectPtr { public: Scatter2DPtr(size_t len_of_weightvec, const YODA::Scatter2D& p) { for (size_t m = 0; m < len_of_weightvec; ++m) _persistent.push_back(make_shared(p)); } Scatter2DPtr() : _persistent() { } bool operator!() { return !_persistent; } explicit operator bool() { return bool(_persistent); } YODA::Scatter2D* operator->() { return _persistent.get(); } YODA::Scatter2D* operator->() const { return _persistent.get(); } YODA::Scatter2D & operator*() { return *_persistent; } const YODA::Scatter2D & operator*() const { return *_persistent; } protected: vector _persistent; }; class Scatter3DPtr : public AnalysisObjectPtr { public: Scatter3DPtr(size_t len_of_weightvec, const YODA::Scatter3D& p) { for (size_t m = 0; m < len_of_weightvec; ++m) _persistent.push_back(make_shared(p)); } Scatter3DPtr() : _persistent() { } bool operator!() { return !_persistent; } explicit operator bool() { return bool(_persistent); } YODA::Scatter3D* operator->() { return _persistent.get(); } YODA::Scatter3D* operator->() const { return _persistent.get(); } YODA::Scatter3D & operator*() { return *_persistent; } const YODA::Scatter3D & operator*() const { return *_persistent; } protected: vector _persistent; }; */ class MultiweightAOWrapper : public AnalysisObjectWrapper { public: using Inner = YODA::AnalysisObject; virtual void newSubEvent() = 0; virtual void pushToPersistent(const vector >& weight) = 0; virtual void pushToFinal() = 0; virtual YODA::AnalysisObjectPtr activeYODAPtr() const = 0; + + virtual string basePath() const = 0; }; using Weight = double; template using Fill = pair; template using Fills = multiset>; // TODO TODO TODO // need to override the old fill method too! // otherwise we can't intercept existing fill calls in analysis code // TODO TODO TODO /// Wrappers for analysis objects to store all fills unaggregated, until collapsed template class TupleWrapper; template<> class TupleWrapper : public YODA::Counter { public: typedef shared_ptr> Ptr; TupleWrapper(const YODA::Counter & h) : YODA::Counter(h) {} // todo: do we need to deal with users using fractions directly? void fill( double weight=1.0, double fraction=1.0 ) { fills_.insert( {YODA::Counter::FillType(),weight} ); } void reset() { fills_.clear(); } const Fills & fills() const { return fills_; } private: // x / weight pairs Fills fills_; }; template<> class TupleWrapper : public YODA::Histo1D { public: typedef shared_ptr> Ptr; TupleWrapper(const YODA::Histo1D & h) : YODA::Histo1D(h) {} // todo: do we need to deal with users using fractions directly? void fill( double x, double weight=1.0, double fraction=1.0 ) { if ( std::isnan(x) ) throw YODA::RangeError("X is NaN"); fills_.insert( { x , weight } ); } void reset() { fills_.clear(); } const Fills & fills() const { return fills_; } private: // x / weight pairs Fills fills_; }; template<> class TupleWrapper : public YODA::Profile1D { public: typedef shared_ptr> Ptr; TupleWrapper(const YODA::Profile1D & h) : YODA::Profile1D(h) {} // todo: do we need to deal with users using fractions directly? void fill( double x, double y, double weight=1.0, double fraction=1.0 ) { if ( std::isnan(x) ) throw YODA::RangeError("X is NaN"); if ( std::isnan(y) ) throw YODA::RangeError("Y is NaN"); fills_.insert( { YODA::Profile1D::FillType{x,y}, weight } ); } void reset() { fills_.clear(); } const Fills & fills() const { return fills_; } private: // x / weight pairs Fills fills_; }; template<> class TupleWrapper : public YODA::Histo2D { public: typedef shared_ptr> Ptr; TupleWrapper(const YODA::Histo2D & h) : YODA::Histo2D(h) {} // todo: do we need to deal with users using fractions directly? void fill( double x, double y, double weight=1.0, double fraction=1.0 ) { if ( std::isnan(x) ) throw YODA::RangeError("X is NaN"); if ( std::isnan(y) ) throw YODA::RangeError("Y is NaN"); fills_.insert( { YODA::Histo2D::FillType{x,y}, weight } ); } void reset() { fills_.clear(); } const Fills & fills() const { return fills_; } private: // x / weight pairs Fills fills_; }; template<> class TupleWrapper : public YODA::Profile2D { public: typedef shared_ptr> Ptr; TupleWrapper(const YODA::Profile2D & h) : YODA::Profile2D(h) {} // todo: do we need to deal with users using fractions directly? void fill( double x, double y, double z, double weight=1.0, double fraction=1.0 ) { if ( std::isnan(x) ) throw YODA::RangeError("X is NaN"); if ( std::isnan(y) ) throw YODA::RangeError("Y is NaN"); if ( std::isnan(z) ) throw YODA::RangeError("Z is NaN"); fills_.insert( { YODA::Profile2D::FillType{x,y,z}, weight } ); } void reset() { fills_.clear(); } const Fills & fills() const { return fills_; } private: // x / weight pairs Fills fills_; }; template<> class TupleWrapper : public YODA::Scatter1D { public: typedef shared_ptr> Ptr; TupleWrapper(const YODA::Scatter1D & h) : YODA::Scatter1D(h) {} }; template<> class TupleWrapper : public YODA::Scatter2D { public: typedef shared_ptr> Ptr; TupleWrapper(const YODA::Scatter2D & h) : YODA::Scatter2D(h) {} }; template<> class TupleWrapper : public YODA::Scatter3D { public: typedef shared_ptr> Ptr; TupleWrapper(const YODA::Scatter3D & h) : YODA::Scatter3D(h) {} }; template class Wrapper : public MultiweightAOWrapper { friend class Analysis; public: using Inner = T; /* @todo * some things are not really well-defined here * for instance: fill() in the finalize() method and integral() in * the analyze() method. */ Wrapper() = default; Wrapper(const vector& weightnames, const T & p); ~Wrapper(); typename T::Ptr active() const; /* @todo this probably need to loop over all? */ bool operator!() const { return !_active; } // Don't use active() here, assert will catch explicit operator bool() const { return static_cast(_active); } // Don't use active() here, assert will catch T * operator->() { return active().get(); } T * operator->() const { return active().get(); } T & operator*() { return *active(); } const T & operator*() const { return *active(); } /* @todo * these need to be re-thought out. void reset() { active()->reset(); } */ /* @todo * these probably need to loop over all? * do we even want to provide equality? */ /* @todo * how about no. friend bool operator==(Wrapper a, Wrapper b){ if (a._persistent.size() != b._persistent.size()) return false; for (size_t i = 0; i < a._persistent.size(); i++) { if (a._persistent.at(i) != b._persistent.at(i)) { return false; } } return true; } friend bool operator!=(Wrapper a, Wrapper b){ return !(a == b); } friend bool operator<(Wrapper a, Wrapper b){ if (a._persistent.size() >= b._persistent.size()) return false; for (size_t i = 0; i < a._persistent.size(); i++) { if (*(a._persistent.at(i)) >= *(b._persistent.at(i))) { return false; } } return true; } */ private: void setActiveWeightIdx(unsigned int iWeight) { _active = _persistent.at(iWeight); } void setActiveFinalWeightIdx(unsigned int iWeight) { _active = _final.at(iWeight); } /* this is for dev only---we shouldn't need this in real runs. */ void unsetActiveWeight() { _active.reset(); } void newSubEvent(); virtual YODA::AnalysisObjectPtr activeYODAPtr() const { return _active; } const vector & persistent() const { return _persistent; } const vector & final() const { return _final; } /* to be implemented for each type */ void pushToPersistent(const vector >& weight); void pushToFinal(); /* M of these, one for each weight */ vector _persistent; /* This is the copy of _persistent that will be passed to finalize(). */ vector _final; /* N of these, one for each event in evgroup */ vector::Ptr> _evgroup; typename T::Ptr _active; + string basePath() const { return _basePath; } + + string _basePath; + // do we need implicit cast? // operator typename T::Ptr () { // return _active; // } friend class AnalysisHandler; }; /// We need our own shared_ptr class, so we can dispatch -> and * /// all the way down to the inner YODA analysis objects /// /// TODO: provide remaining functionality that shared_ptr has (not needed right now) /// template class rivet_shared_ptr { public: typedef T value_type; rivet_shared_ptr() = default; rivet_shared_ptr(decltype(nullptr)) : _p(nullptr) {} /// Convenience constructor, pass through to the Wrapper constructor rivet_shared_ptr(const vector& weightNames, const typename T::Inner & p) : _p( make_shared(weightNames, p) ) {} template rivet_shared_ptr(const shared_ptr & p) : _p(p) {} template rivet_shared_ptr(const rivet_shared_ptr & p) : _p(p.get()) {} // Goes right through to the active YODA object's members T & operator->() { return *_p; } const T & operator->() const { return *_p; } // The active YODA object typename T::Inner & operator*() { return **_p; } const typename T::Inner & operator*() const { return **_p; } bool operator!() const { return !_p || !(*_p); } explicit operator bool() const { return _p && bool(*_p); } template bool operator==(const rivet_shared_ptr & other) const { return _p == other._p; } template bool operator!=(const rivet_shared_ptr & other) const { return _p != other._p; } template bool operator<(const rivet_shared_ptr & other) const { return _p < other._p; } template bool operator>(const rivet_shared_ptr & other) const { return _p > other._p; } template bool operator<=(const rivet_shared_ptr & other) const { return _p <= other._p; } template bool operator>=(const rivet_shared_ptr & other) const { return _p >= other._p; } shared_ptr get() const { return _p; } private: shared_ptr _p; }; // every object listed here needs a virtual fill method in YODA, // otherwise the Tuple fakery won't work. using MultiweightAOPtr = rivet_shared_ptr; using Histo1DPtr = rivet_shared_ptr>; using Histo2DPtr = rivet_shared_ptr>; using Profile1DPtr = rivet_shared_ptr>; using Profile2DPtr = rivet_shared_ptr>; using CounterPtr = rivet_shared_ptr>; using Scatter1DPtr = rivet_shared_ptr>; using Scatter2DPtr = rivet_shared_ptr>; using Scatter3DPtr = rivet_shared_ptr>; using YODA::Counter; using YODA::Histo1D; using YODA::HistoBin1D; using YODA::Histo2D; using YODA::HistoBin2D; using YODA::Profile1D; using YODA::ProfileBin1D; using YODA::Profile2D; using YODA::ProfileBin2D; using YODA::Scatter1D; using YODA::Point1D; using YODA::Scatter2D; using YODA::Point2D; using YODA::Scatter3D; using YODA::Point3D; /// Function to get a map of all the refdata in a paper with the /// given @a papername. map getRefData(const string& papername); /// @todo Also provide a Scatter3D getRefData() version? /// Get the file system path to the reference file for this paper. string getDatafilePath(const string& papername); /// Traits class to access the type of the AnalysisObject in the /// reference files. template struct ReferenceTraits {}; template<> struct ReferenceTraits { typedef Counter RefT; }; template<> struct ReferenceTraits { typedef Scatter1D RefT; }; template<> struct ReferenceTraits { typedef Scatter2D RefT; }; template<> struct ReferenceTraits { typedef Scatter2D RefT; }; template<> struct ReferenceTraits { typedef Scatter2D RefT; }; template<> struct ReferenceTraits { typedef Scatter3D RefT; }; template<> struct ReferenceTraits { typedef Scatter3D RefT; }; template<> struct ReferenceTraits { typedef Scatter3D RefT; }; /// If @a dst and @a src both are of same subclass T, copy the /// contents of @a src into @a dst and return true. Otherwise return /// false. template inline bool aocopy(YODA::AnalysisObjectPtr src, YODA::AnalysisObjectPtr dst) { shared_ptr tsrc = dynamic_pointer_cast(src); if ( !tsrc ) return false; shared_ptr tdst = dynamic_pointer_cast(dst); if ( !tdst ) return false; *tdst = *tsrc; return true; } /// If @a dst and @a src both are of same subclass T, add the /// contents of @a src into @a dst and return true. Otherwise return /// false. template inline bool aoadd(YODA::AnalysisObjectPtr dst, YODA::AnalysisObjectPtr src, double scale) { shared_ptr tsrc = dynamic_pointer_cast(src); if ( !tsrc ) return false; shared_ptr tdst = dynamic_pointer_cast(dst); if ( !tdst ) return false; tsrc->scaleW(scale); *tdst += *tsrc; return true; } /// If @a dst is the same subclass as @a src, copy the contents of @a /// src into @a dst and return true. Otherwise return false. bool copyao(YODA::AnalysisObjectPtr src, YODA::AnalysisObjectPtr dst); /// If @a dst is the same subclass as @a src, scale the contents of /// @a src with @a scale and add it to @a dst and return true. Otherwise /// return false. bool addaos(YODA::AnalysisObjectPtr dst, YODA::AnalysisObjectPtr src, double scale); /// Check if two analysis objects have the same binning or, if not /// binned, are in other ways compatible. template inline bool bookingCompatible(TPtr a, TPtr b) { return a->sameBinning(*b); } inline bool bookingCompatible(CounterPtr, CounterPtr) { return true; } inline bool bookingCompatible(Scatter1DPtr a, Scatter1DPtr b) { return a->numPoints() == b->numPoints(); } inline bool bookingCompatible(Scatter2DPtr a, Scatter2DPtr b) { return a->numPoints() == b->numPoints(); } inline bool bookingCompatible(Scatter3DPtr a, Scatter3DPtr b) { return a->numPoints() == b->numPoints(); } inline bool bookingCompatible(YODA::CounterPtr, YODA::CounterPtr) { return true; } inline bool bookingCompatible(YODA::Scatter1DPtr a, YODA::Scatter1DPtr b) { return a->numPoints() == b->numPoints(); } inline bool bookingCompatible(YODA::Scatter2DPtr a, YODA::Scatter2DPtr b) { return a->numPoints() == b->numPoints(); } inline bool bookingCompatible(YODA::Scatter3DPtr a, YODA::Scatter3DPtr b) { return a->numPoints() == b->numPoints(); } /// class representing a YODA path with all its components. class AOPath { public: /// Constructor AOPath(string fullpath); /// The full path. string path() const { return _path; } /// The analysis name. string analysis() const { return _analysis; } /// The analysis name with options. string analysisWithOptions() const { return _analysis + _optionstring; } /// The base name of the analysis object. string name() const { return _name; } /// The weight name. string weight() const { return _weight; } /// Is This a RAW (filling) object? bool isRaw() const { return _raw; } // Is This a temporary (filling) object? bool isTmp() const { return _tmp; } /// Is This a reference object? bool isRef() const { return _ref; } /// The string describing the options passed to the analysis. string optionString() const { return _optionstring; } /// Are there options passed to the analysis? bool hasOptions() const { return !_options.empty(); } /// Don't pass This optionto the analysis void removeOption(string opt) { _options.erase(opt); fixOptionString(); } /// Pass this option to the analysis. void setOption(string opt, string val) { _options[opt] = val; fixOptionString();} /// Was This option passed to the analyisi. bool hasOption(string opt) const { return _options.find(opt) != _options.end(); } /// Get the value of this option. string getOption(string opt) const { auto it = _options.find(opt); if ( it != _options.end() ) return it->second; return ""; } /// Reset the option string after changes; void fixOptionString(); /// Creat a full path (and set) for this. string mkPath() const; string setPath() { return _path = mkPath(); } /// Print out information void debug() const; /// Make this class ordered. bool operator<(const AOPath & other) const { return _path < other._path; } /// Check if path is valid. bool valid() const { return _valid; }; bool operator!() const { return !valid(); } private: bool _valid; string _path; string _analysis; string _optionstring; string _name; string _weight; bool _raw; bool _tmp; bool _ref; map _options; }; } #endif diff --git a/src/Core/Analysis.cc b/src/Core/Analysis.cc --- a/src/Core/Analysis.cc +++ b/src/Core/Analysis.cc @@ -1,1066 +1,1073 @@ // -*- C++ -*- #include "Rivet/Config/RivetCommon.hh" #include "Rivet/Analysis.hh" #include "Rivet/AnalysisHandler.hh" #include "Rivet/AnalysisInfo.hh" #include "Rivet/Tools/BeamConstraint.hh" #include "Rivet/Projections/ImpactParameterProjection.hh" #include "Rivet/Projections/GeneratedPercentileProjection.hh" #include "Rivet/Projections/UserCentEstimate.hh" #include "Rivet/Projections/CentralityProjection.hh" namespace Rivet { Analysis::Analysis(const string& name) : _analysishandler(nullptr) { ProjectionApplier::_allowProjReg = false; _defaultname = name; unique_ptr ai = AnalysisInfo::make(name); assert(ai); _info = move(ai); assert(_info); } double Analysis::sqrtS() const { return handler().sqrtS(); } const ParticlePair& Analysis::beams() const { return handler().beams(); } const PdgIdPair Analysis::beamIds() const { return handler().beamIds(); } const string Analysis::histoDir() const { /// @todo Cache in a member variable string _histoDir; if (_histoDir.empty()) { _histoDir = "/" + name(); if (handler().runName().length() > 0) { _histoDir = "/" + handler().runName() + _histoDir; } replace_all(_histoDir, "//", "/"); //< iterates until none } return _histoDir; } const string Analysis::histoPath(const string& hname) const { const string path = histoDir() + "/" + hname; return path; } const string Analysis::histoPath(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const { return histoDir() + "/" + mkAxisCode(datasetId, xAxisId, yAxisId); } const string Analysis::mkAxisCode(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const { std::stringstream axisCode; axisCode << "d"; if (datasetId < 10) axisCode << 0; axisCode << datasetId; axisCode << "-x"; if (xAxisId < 10) axisCode << 0; axisCode << xAxisId; axisCode << "-y"; if (yAxisId < 10) axisCode << 0; axisCode << yAxisId; return axisCode.str(); } Log& Analysis::getLog() const { string logname = "Rivet.Analysis." + name(); return Log::getLog(logname); } /////////////////////////////////////////// size_t Analysis::numEvents() const { return handler().numEvents(); } double Analysis::sumW() const { return handler().sumW(); } double Analysis::sumW2() const { return handler().sumW2(); } /////////////////////////////////////////// bool Analysis::isCompatible(const ParticlePair& beams) const { return isCompatible(beams.first.pid(), beams.second.pid(), beams.first.energy(), beams.second.energy()); } bool Analysis::isCompatible(PdgId beam1, PdgId beam2, double e1, double e2) const { PdgIdPair beams(beam1, beam2); pair energies(e1, e2); return isCompatible(beams, energies); } // bool Analysis::beamIDsCompatible(const PdgIdPair& beams) const { // bool beamIdsOk = false; // for (const PdgIdPair& bp : requiredBeams()) { // if (compatible(beams, bp)) { // beamIdsOk = true; // break; // } // } // return beamIdsOk; // } // /// Check that the energies are compatible (within 1% or 1 GeV, whichever is larger, for a bit of UI forgiveness) // bool Analysis::beamEnergiesCompatible(const pair& energies) const { // /// @todo Use some sort of standard ordering to improve comparisons, esp. when the two beams are different particles // bool beamEnergiesOk = requiredEnergies().size() > 0 ? false : true; // typedef pair DoublePair; // for (const DoublePair& ep : requiredEnergies()) { // if ((fuzzyEquals(ep.first, energies.first, 0.01) && fuzzyEquals(ep.second, energies.second, 0.01)) || // (fuzzyEquals(ep.first, energies.second, 0.01) && fuzzyEquals(ep.second, energies.first, 0.01)) || // (abs(ep.first - energies.first) < 1*GeV && abs(ep.second - energies.second) < 1*GeV) || // (abs(ep.first - energies.second) < 1*GeV && abs(ep.second - energies.first) < 1*GeV)) { // beamEnergiesOk = true; // break; // } // } // return beamEnergiesOk; // } // bool Analysis::beamsCompatible(const PdgIdPair& beams, const pair& energies) const { bool Analysis::isCompatible(const PdgIdPair& beams, const pair& energies) const { // First check the beam IDs bool beamIdsOk = false; for (const PdgIdPair& bp : requiredBeams()) { if (compatible(beams, bp)) { beamIdsOk = true; break; } } if (!beamIdsOk) return false; // Next check that the energies are compatible (within 1% or 1 GeV, whichever is larger, for a bit of UI forgiveness) /// @todo Use some sort of standard ordering to improve comparisons, esp. when the two beams are different particles bool beamEnergiesOk = requiredEnergies().size() > 0 ? false : true; typedef pair DoublePair; for (const DoublePair& ep : requiredEnergies()) { if ((fuzzyEquals(ep.first, energies.first, 0.01) && fuzzyEquals(ep.second, energies.second, 0.01)) || (fuzzyEquals(ep.first, energies.second, 0.01) && fuzzyEquals(ep.second, energies.first, 0.01)) || (abs(ep.first - energies.first) < 1*GeV && abs(ep.second - energies.second) < 1*GeV) || (abs(ep.first - energies.second) < 1*GeV && abs(ep.second - energies.first) < 1*GeV)) { beamEnergiesOk = true; break; } } return beamEnergiesOk; } /////////////////////////////////////////// double Analysis::crossSection() const { const YODA::Scatter1D::Points& ps = handler().crossSection()->points(); if (ps.size() != 1) { string errMsg = "cross section missing for analysis " + name(); throw Error(errMsg); } return ps[0].x(); } double Analysis::crossSectionPerEvent() const { return crossSection()/sumW(); } //////////////////////////////////////////////////////////// // Histogramming void Analysis::_cacheRefData() const { if (_refdata.empty()) { MSG_TRACE("Getting refdata cache for paper " << name()); _refdata = getRefData(getRefDataName()); } } // vector Analysis::getAllData(bool includeorphans) const{ // return handler().getData(includeorphans, false, false); // } CounterPtr & Analysis::book(CounterPtr & ctr, const string& cname, const string& title) { // const string path = histoPath(cname); // ctr = CounterPtr(handler().weightNames(), Counter(path, title)); // ctr = addAnalysisObject(ctr); // return ctr; return ctr = registerAO(Counter(histoPath(cname), title)); } CounterPtr & Analysis::book(CounterPtr & ctr, unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId, const string& title) { const string axisCode = mkAxisCode(datasetId, xAxisId, yAxisId); return book(ctr, axisCode, title); } Histo1DPtr & Analysis::book(Histo1DPtr & histo, const string& hname, size_t nbins, double lower, double upper, const string& title, const string& xtitle, const string& ytitle) { const string path = histoPath(hname); Histo1D hist = Histo1D(nbins, lower, upper, path, title); hist.setAnnotation("XLabel", xtitle); hist.setAnnotation("YLabel", ytitle); // histo = Histo1DPtr(handler().weightNames(), hist); // histo = addAnalysisObject(histo); // return histo; return histo = registerAO(hist); } Histo1DPtr & Analysis::book(Histo1DPtr & histo, const string& hname, const initializer_list& binedges, const string& title, const string& xtitle, const string& ytitle) { return book(histo, hname, vector{binedges}, title, xtitle, ytitle); } Histo1DPtr & Analysis::book(Histo1DPtr & histo, const string& hname, const vector& binedges, const string& title, const string& xtitle, const string& ytitle) { const string path = histoPath(hname); Histo1D hist = Histo1D(binedges, path, title); hist.setAnnotation("XLabel", xtitle); hist.setAnnotation("YLabel", ytitle); // histo = Histo1DPtr(handler().weightNames(), hist); // histo = addAnalysisObject(histo); // return histo; return histo = registerAO(hist); } Histo1DPtr & Analysis::book(Histo1DPtr & histo, const string& hname, const string& title, const string& xtitle, const string& ytitle) { const Scatter2D& refdata = refData(hname); return book(histo, hname, refdata, title, xtitle, ytitle); } Histo1DPtr & Analysis::book(Histo1DPtr & histo, unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId, const string& title, const string& xtitle, const string& ytitle) { const string axisCode = mkAxisCode(datasetId, xAxisId, yAxisId); return book(histo, axisCode, title, xtitle, ytitle); } Histo1DPtr & Analysis::book(Histo1DPtr& histo, const string& hname, const Scatter2D& refscatter, const string& title, const string& xtitle, const string& ytitle) { const string path = histoPath(hname); Histo1D hist = Histo1D(refscatter, path); hist.setTitle(title); hist.setAnnotation("XLabel", xtitle); hist.setAnnotation("YLabel", ytitle); if (hist.hasAnnotation("IsRef")) hist.rmAnnotation("IsRef"); // histo = Histo1DPtr(handler().weightNames(), hist); // histo = addAnalysisObject(histo); // return histo; return histo = registerAO(hist); } ///////////////// Histo2DPtr & Analysis::book(Histo2DPtr & h2d,const string& hname, size_t nxbins, double xlower, double xupper, size_t nybins, double ylower, double yupper, const string& title, const string& xtitle, const string& ytitle, const string& ztitle) { const string path = histoPath(hname); Histo2D hist(nxbins, xlower, xupper, nybins, ylower, yupper, path, title); hist.setAnnotation("XLabel", xtitle); hist.setAnnotation("YLabel", ytitle); hist.setAnnotation("ZLabel", ztitle); // h2d = Histo2DPtr(handler().weightNames(), hist); // h2d = addAnalysisObject(h2d); // return h2d; return h2d = registerAO(hist); } Histo2DPtr & Analysis::book(Histo2DPtr & h2d,const string& hname, const initializer_list& xbinedges, const initializer_list& ybinedges, const string& title, const string& xtitle, const string& ytitle, const string& ztitle) { return book(h2d, hname, vector{xbinedges}, vector{ybinedges}, title, xtitle, ytitle, ztitle); } Histo2DPtr & Analysis::book(Histo2DPtr & h2d,const string& hname, const vector& xbinedges, const vector& ybinedges, const string& title, const string& xtitle, const string& ytitle, const string& ztitle) { const string path = histoPath(hname); Histo2D hist(xbinedges, ybinedges, path, title); hist.setAnnotation("XLabel", xtitle); hist.setAnnotation("YLabel", ytitle); hist.setAnnotation("ZLabel", ztitle); // h2d = Histo2DPtr(handler().weightNames(), hist); // h2d = addAnalysisObject(h2d); // return h2d; return h2d = registerAO(hist); } Histo2DPtr & Analysis::book(Histo2DPtr & histo, const string& hname, const Scatter3D& refscatter, const string& title, const string& xtitle, const string& ytitle, const string& ztitle) { const string path = histoPath(hname); Histo2D hist = Histo2D(refscatter, path); hist.setTitle(title); hist.setAnnotation("XLabel", xtitle); hist.setAnnotation("YLabel", ytitle); hist.setAnnotation("ZLabel", ztitle); if (hist.hasAnnotation("IsRef")) hist.rmAnnotation("IsRef"); // histo = Histo2DPtr(handler().weightNames(), hist); // histo = addAnalysisObject(histo); // return histo; return histo = registerAO(hist); } Histo2DPtr & Analysis::book(Histo2DPtr & histo, const string& hname, const string& title, const string& xtitle, const string& ytitle, const string& ztitle) { const Scatter3D& refdata = refData(hname); return book(histo, hname, refdata, title, xtitle, ytitle, ztitle); } Histo2DPtr & Analysis::book(Histo2DPtr & histo, unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId, const string& title, const string& xtitle, const string& ytitle, const string& ztitle) { const string axisCode = mkAxisCode(datasetId, xAxisId, yAxisId); return book(histo, axisCode, title, xtitle, ytitle, ztitle); } ///////////////// Profile1DPtr & Analysis::book(Profile1DPtr & p1d,const string& hname, size_t nbins, double lower, double upper, const string& title, const string& xtitle, const string& ytitle) { const string path = histoPath(hname); Profile1D prof(nbins, lower, upper, path, title); prof.setAnnotation("XLabel", xtitle); prof.setAnnotation("YLabel", ytitle); // p1d = Profile1DPtr(handler().weightNames(), prof); // p1d = addAnalysisObject(p1d); // return p1d; return p1d = registerAO(prof); } Profile1DPtr & Analysis::book(Profile1DPtr & p1d,const string& hname, const initializer_list& binedges, const string& title, const string& xtitle, const string& ytitle) { return book(p1d, hname, vector{binedges}, title, xtitle, ytitle); } Profile1DPtr & Analysis::book(Profile1DPtr & p1d, const string& hname, const vector& binedges, const string& title, const string& xtitle, const string& ytitle) { const string path = histoPath(hname); Profile1D prof(binedges, path, title); prof.setAnnotation("XLabel", xtitle); prof.setAnnotation("YLabel", ytitle); // p1d = Profile1DPtr(handler().weightNames(), prof); // p1d = addAnalysisObject(p1d); // return p1d; return p1d = registerAO(prof); } Profile1DPtr & Analysis::book(Profile1DPtr & p1d, const string& hname, const Scatter2D& refscatter, const string& title, const string& xtitle, const string& ytitle) { const string path = histoPath(hname); Profile1D prof(refscatter, path); prof.setTitle(title); prof.setAnnotation("XLabel", xtitle); prof.setAnnotation("YLabel", ytitle); if (prof.hasAnnotation("IsRef")) prof.rmAnnotation("IsRef"); // p1d = Profile1DPtr(handler().weightNames(), prof); // p1d = addAnalysisObject(p1d); // return p1d; return p1d = registerAO(prof); } Profile1DPtr & Analysis::book(Profile1DPtr & p1d,const string& hname, const string& title, const string& xtitle, const string& ytitle) { const Scatter2D& refdata = refData(hname); book(p1d, hname, refdata, title, xtitle, ytitle); return p1d; } Profile1DPtr & Analysis::book(Profile1DPtr & p1d,unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId, const string& title, const string& xtitle, const string& ytitle) { const string axisCode = mkAxisCode(datasetId, xAxisId, yAxisId); return book(p1d, axisCode, title, xtitle, ytitle); } /////////////////// Profile2DPtr & Analysis::book(Profile2DPtr & p2d, const string& hname, size_t nxbins, double xlower, double xupper, size_t nybins, double ylower, double yupper, const string& title, const string& xtitle, const string& ytitle, const string& ztitle) { const string path = histoPath(hname); Profile2D prof(nxbins, xlower, xupper, nybins, ylower, yupper, path, title); prof.setAnnotation("XLabel", xtitle); prof.setAnnotation("YLabel", ytitle); prof.setAnnotation("ZLabel", ztitle); // p2d = Profile2DPtr(handler().weightNames(), prof); // p2d = addAnalysisObject(p2d); // return p2d; return p2d = registerAO(prof); } Profile2DPtr & Analysis::book(Profile2DPtr & p2d, const string& hname, const initializer_list& xbinedges, const initializer_list& ybinedges, const string& title, const string& xtitle, const string& ytitle, const string& ztitle) { return book(p2d, hname, vector{xbinedges}, vector{ybinedges}, title, xtitle, ytitle, ztitle); } Profile2DPtr & Analysis::book(Profile2DPtr & p2d, const string& hname, const vector& xbinedges, const vector& ybinedges, const string& title, const string& xtitle, const string& ytitle, const string& ztitle) { const string path = histoPath(hname); Profile2D prof(xbinedges, ybinedges, path, title); prof.setAnnotation("XLabel", xtitle); prof.setAnnotation("YLabel", ytitle); prof.setAnnotation("ZLabel", ztitle); // p2d = Profile2DPtr(handler().weightNames(), prof); // p2d = addAnalysisObject(p2d); // return p2d; return p2d = registerAO(prof); } /// @todo REINSTATE // Profile2DPtr Analysis::book(Profile2DPtr& prof,const string& hname, // const Scatter3D& refscatter, // const string& title, // const string& xtitle, // const string& ytitle, // const string& ztitle) { // const string path = histoPath(hname); // /// @todo Add no-metadata argument to YODA copy constructors // Profile2D prof(refscatter, path); // prof.setTitle(title); // prof.setAnnotation("XLabel", xtitle); // prof.setAnnotation("YLabel", ytitle); // prof.setAnnotation("ZLabel", ztitle); // if (prof.hasAnnotation("IsRef")) prof.rmAnnotation("IsRef"); // p2d = Profile2DPtr(handler().weightNames(), prof); // p2d = addAnalysisObject(p2d); // return p2d; // } // Profile2DPtr Analysis::book(Profile2DPtr& prof, const string& hname, // const string& title, // const string& xtitle, // const string& ytitle, // const string& ztitle) { // const Scatter3D& refdata = refData(hname); // return book(prof, hname, refdata, title, xtitle, ytitle, ztitle); // } /////////////// /// @todo Should be able to book Scatter1Ds /////////////// Scatter2DPtr & Analysis::book(Scatter2DPtr & s2d, unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId, bool copy_pts, const string& title, const string& xtitle, const string& ytitle) { const string axisCode = mkAxisCode(datasetId, xAxisId, yAxisId); return book(s2d, axisCode, copy_pts, title, xtitle, ytitle); } Scatter2DPtr & Analysis::book(Scatter2DPtr & s2d, const string& hname, bool copy_pts, const string& title, const string& xtitle, const string& ytitle) { const string path = histoPath(hname); Scatter2D scat; if (copy_pts) { const Scatter2D& refdata = refData(hname); scat = Scatter2D(refdata, path); for (Point2D& p : scat.points()) p.setY(0, 0); } else { scat = Scatter2D(path); } scat.setTitle(title); scat.setAnnotation("XLabel", xtitle); scat.setAnnotation("YLabel", ytitle); if (scat.hasAnnotation("IsRef")) scat.rmAnnotation("IsRef"); // s2d = Scatter2DPtr(handler().weightNames(), scat); // s2d = addAnalysisObject(s2d); // return s2d; return s2d = registerAO(scat); } Scatter2DPtr & Analysis::book(Scatter2DPtr & s2d, const string& hname, size_t npts, double lower, double upper, const string& title, const string& xtitle, const string& ytitle) { const string path = histoPath(hname); Scatter2D scat; const double binwidth = (upper-lower)/npts; for (size_t pt = 0; pt < npts; ++pt) { const double bincentre = lower + (pt + 0.5) * binwidth; scat.addPoint(bincentre, 0, binwidth/2.0, 0); } scat.setTitle(title); scat.setAnnotation("XLabel", xtitle); scat.setAnnotation("YLabel", ytitle); // s2d = Scatter2DPtr(handler().weightNames(), scat); // s2d = addAnalysisObject(s2d); // return s2d; return s2d = registerAO(scat); } Scatter2DPtr & Analysis::book(Scatter2DPtr & s2d, const string& hname, const vector& binedges, const string& title, const string& xtitle, const string& ytitle) { const string path = histoPath(hname); Scatter2D scat; for (size_t pt = 0; pt < binedges.size()-1; ++pt) { const double bincentre = (binedges[pt] + binedges[pt+1]) / 2.0; const double binwidth = binedges[pt+1] - binedges[pt]; scat.addPoint(bincentre, 0, binwidth/2.0, 0); } scat.setTitle(title); scat.setAnnotation("XLabel", xtitle); scat.setAnnotation("YLabel", ytitle); // s2d = Scatter2DPtr(handler().weightNames(), scat); // s2d = addAnalysisObject(s2d); // return s2d; return s2d = registerAO(scat); } /// @todo Book Scatter3Ds? ///////////////////// void Analysis::divide(CounterPtr c1, CounterPtr c2, Scatter1DPtr s) const { const string path = s->path(); *s = *c1 / *c2; s->setPath(path); } void Analysis::divide(const Counter& c1, const Counter& c2, Scatter1DPtr s) const { const string path = s->path(); *s = c1 / c2; s->setPath(path); } void Analysis::divide(Histo1DPtr h1, Histo1DPtr h2, Scatter2DPtr s) const { const string path = s->path(); *s = *h1 / *h2; s->setPath(path); } void Analysis::divide(const Histo1D& h1, const Histo1D& h2, Scatter2DPtr s) const { const string path = s->path(); *s = h1 / h2; s->setPath(path); } void Analysis::divide(Profile1DPtr p1, Profile1DPtr p2, Scatter2DPtr s) const { const string path = s->path(); *s = *p1 / *p2; s->setPath(path); } void Analysis::divide(const Profile1D& p1, const Profile1D& p2, Scatter2DPtr s) const { const string path = s->path(); *s = p1 / p2; s->setPath(path); } void Analysis::divide(Histo2DPtr h1, Histo2DPtr h2, Scatter3DPtr s) const { const string path = s->path(); *s = *h1 / *h2; s->setPath(path); } void Analysis::divide(const Histo2D& h1, const Histo2D& h2, Scatter3DPtr s) const { const string path = s->path(); *s = h1 / h2; s->setPath(path); } void Analysis::divide(Profile2DPtr p1, Profile2DPtr p2, Scatter3DPtr s) const { const string path = s->path(); *s = *p1 / *p2; s->setPath(path); } void Analysis::divide(const Profile2D& p1, const Profile2D& p2, Scatter3DPtr s) const { const string path = s->path(); *s = p1 / p2; s->setPath(path); } /// @todo Counter and Histo2D efficiencies and asymms void Analysis::efficiency(Histo1DPtr h1, Histo1DPtr h2, Scatter2DPtr s) const { const string path = s->path(); *s = YODA::efficiency(*h1, *h2); s->setPath(path); } void Analysis::efficiency(const Histo1D& h1, const Histo1D& h2, Scatter2DPtr s) const { const string path = s->path(); *s = YODA::efficiency(h1, h2); s->setPath(path); } void Analysis::asymm(Histo1DPtr h1, Histo1DPtr h2, Scatter2DPtr s) const { const string path = s->path(); *s = YODA::asymm(*h1, *h2); s->setPath(path); } void Analysis::asymm(const Histo1D& h1, const Histo1D& h2, Scatter2DPtr s) const { const string path = s->path(); *s = YODA::asymm(h1, h2); s->setPath(path); } void Analysis::scale(CounterPtr cnt, Analysis::CounterAdapter factor) { if (!cnt) { MSG_WARNING("Failed to scale counter=NULL in analysis " << name() << " (scale=" << double(factor) << ")"); return; } if (std::isnan(double(factor)) || std::isinf(double(factor))) { MSG_WARNING("Failed to scale counter=" << cnt->path() << " in analysis: " << name() << " (invalid scale factor = " << double(factor) << ")"); factor = 0; } MSG_TRACE("Scaling counter " << cnt->path() << " by factor " << double(factor)); try { cnt->scaleW(factor); } catch (YODA::Exception& we) { MSG_WARNING("Could not scale counter " << cnt->path()); return; } } void Analysis::normalize(Histo1DPtr histo, Analysis::CounterAdapter norm, bool includeoverflows) { if (!histo) { MSG_WARNING("Failed to normalize histo=NULL in analysis " << name() << " (norm=" << double(norm) << ")"); return; } MSG_TRACE("Normalizing histo " << histo->path() << " to " << double(norm)); try { const double hint = histo->integral(includeoverflows); if (hint == 0) MSG_WARNING("Skipping histo with null area " << histo->path()); else histo->normalize(norm, includeoverflows); } catch (YODA::Exception& we) { MSG_WARNING("Could not normalize histo " << histo->path()); return; } } void Analysis::scale(Histo1DPtr histo, Analysis::CounterAdapter factor) { if (!histo) { MSG_WARNING("Failed to scale histo=NULL in analysis " << name() << " (scale=" << double(factor) << ")"); return; } if (std::isnan(double(factor)) || std::isinf(double(factor))) { MSG_WARNING("Failed to scale histo=" << histo->path() << " in analysis: " << name() << " (invalid scale factor = " << double(factor) << ")"); factor = 0; } MSG_TRACE("Scaling histo " << histo->path() << " by factor " << double(factor)); try { histo->scaleW(factor); } catch (YODA::Exception& we) { MSG_WARNING("Could not scale histo " << histo->path()); return; } } void Analysis::normalize(Histo2DPtr histo, Analysis::CounterAdapter norm, bool includeoverflows) { if (!histo) { MSG_ERROR("Failed to normalize histo=NULL in analysis " << name() << " (norm=" << double(norm) << ")"); return; } MSG_TRACE("Normalizing histo " << histo->path() << " to " << double(norm)); try { const double hint = histo->integral(includeoverflows); if (hint == 0) MSG_WARNING("Skipping histo with null area " << histo->path()); else histo->normalize(norm, includeoverflows); } catch (YODA::Exception& we) { MSG_WARNING("Could not normalize histo " << histo->path()); return; } } void Analysis::scale(Histo2DPtr histo, Analysis::CounterAdapter factor) { if (!histo) { MSG_ERROR("Failed to scale histo=NULL in analysis " << name() << " (scale=" << double(factor) << ")"); return; } if (std::isnan(double(factor)) || std::isinf(double(factor))) { MSG_ERROR("Failed to scale histo=" << histo->path() << " in analysis: " << name() << " (invalid scale factor = " << double(factor) << ")"); factor = 0; } MSG_TRACE("Scaling histo " << histo->path() << " by factor " << double(factor)); try { histo->scaleW(factor); } catch (YODA::Exception& we) { MSG_WARNING("Could not scale histo " << histo->path()); return; } } void Analysis::integrate(Histo1DPtr h, Scatter2DPtr s) const { // preserve the path info const string path = s->path(); *s = toIntegralHisto(*h); s->setPath(path); } void Analysis::integrate(const Histo1D& h, Scatter2DPtr s) const { // preserve the path info const string path = s->path(); *s = toIntegralHisto(h); s->setPath(path); } } /// @todo 2D versions of integrate... defined how, exactly?!? ////////////////////////////////// // namespace { // void errormsg(std::string name) { // // #ifdef HAVE_BACKTRACE // // void * buffer[4]; // // backtrace(buffer, 4); // // backtrace_symbols_fd(buffer, 4 , 1); // // #endif // std::cerr << name << ": Can't book objects outside of init().\n"; // assert(false); // } // } namespace Rivet { // void Analysis::addAnalysisObject(const MultiweightAOPtr & ao) { // if (handler().stage() == AnalysisHandler::Stage::INIT) { // _analysisobjects.push_back(ao); // } // else { // errormsg(name()); // } // } void Analysis::removeAnalysisObject(const string& path) { for (auto it = _analysisobjects.begin(); it != _analysisobjects.end(); ++it) { if ((*it)->path() == path) { _analysisobjects.erase(it); break; } } } void Analysis::removeAnalysisObject(const MultiweightAOPtr & ao) { for (auto it = _analysisobjects.begin(); it != _analysisobjects.end(); ++it) { if ((*it) == ao) { _analysisobjects.erase(it); break; } } } const CentralityProjection & Analysis::declareCentrality(const SingleValueProjection &proj, string calAnaName, string calHistName, const string projName, bool increasing) { CentralityProjection cproj; // Select the centrality variable from option. Use REF as default. // Other selections are "GEN", "IMP" and "USR" (USR only in HEPMC 3). string sel = getOption("cent","REF"); set done; if ( sel == "REF" ) { YODA::Scatter2DPtr refscat; auto refmap = getRefData(calAnaName); if ( refmap.find(calHistName) != refmap.end() ) refscat = dynamic_pointer_cast(refmap.find(calHistName)->second); if ( !refscat ) { MSG_WARNING("No reference calibration histogram for " << "CentralityProjection " << projName << " found " << "(requested histogram " << calHistName << " in " << calAnaName << ")"); } else { MSG_INFO("Found calibration histogram " << sel << " " << refscat->path()); cproj.add(PercentileProjection(proj, *refscat, increasing), sel); } } else if ( sel == "GEN" ) { YODA::Histo1DPtr genhists = getPreload("/" + calAnaName + "/" + calHistName); // for ( YODA::AnalysisObjectPtr ao : handler().getData(true) ) { // if ( ao->path() == histpath ) // genhist = dynamic_pointer_cast(ao); // } if ( !genhists || genhists->numEntries() <= 1 ) { MSG_WARNING("No generated calibration histogram for " << "CentralityProjection " << projName << " found " << "(requested histogram " << calHistName << " in " << calAnaName << ")"); } else { MSG_INFO("Found calibration histogram " << sel << " " << genhists->path()); cproj.add(PercentileProjection(proj, *genhists, increasing), sel); } } else if ( sel == "IMP" ) { YODA::Histo1DPtr imphists = getPreload("/" + calAnaName + "/" + calHistName + "_IMP"); if ( !imphists || imphists->numEntries() <= 1 ) { MSG_WARNING("No impact parameter calibration histogram for " << "CentralityProjection " << projName << " found " << "(requested histogram " << calHistName << "_IMP in " << calAnaName << ")"); } else { MSG_INFO("Found calibration histogram " << sel << " " << imphists->path()); cproj.add(PercentileProjection(ImpactParameterProjection(), *imphists, true), sel); } } else if ( sel == "USR" ) { #if HEPMC_VERSION_CODE >= 3000000 YODA::Histo1DPtr usrhists = getPreload("/" + calAnaName + "/" + calHistName + "_USR"); if ( !usrhists || usrhists->numEntries() <= 1 ) { MSG_WARNING("No user-defined calibration histogram for " << "CentralityProjection " << projName << " found " << "(requested histogram " << calHistName << "_USR in " << calAnaName << ")"); continue; } else { MSG_INFO("Found calibration histogram " << sel << " " << usrhists->path()); cproj.add((UserCentEstimate(), usrhists*, true), sel); } #else MSG_WARNING("UserCentEstimate is only available with HepMC3."); #endif } else if ( sel == "RAW" ) { #if HEPMC_VERSION_CODE >= 3000000 cproj.add(GeneratedCentrality(), sel); #else MSG_WARNING("GeneratedCentrality is only available with HepMC3."); #endif } else MSG_WARNING("'" << sel << "' is not a valid PercentileProjection tag."); if ( cproj.empty() ) MSG_WARNING("CentralityProjection " << projName << " did not contain any valid PercentileProjections."); return declare(cproj, projName); } vector Analysis::_weightNames() const { return handler().weightNames(); } YODA::AnalysisObjectPtr Analysis::_getPreload(string path) const { return handler().getPreload(path); } size_t Analysis::_defaultWeightIndex() const { return handler().defaultWeightIndex(); } MultiweightAOPtr Analysis::_getOtherAnalysisObject(const std::string & ananame, const std::string& name) { std::string path = "/" + ananame + "/" + name; const auto& ana = handler().analysis(ananame); return ana->getAnalysisObject(name); //< @todo includeorphans check?? } void Analysis::_checkBookInit() const { if (handler().stage() != AnalysisHandler::Stage::INIT) { MSG_ERROR("Can't book objects outside of init()"); throw UserError(name() + ": Can't book objects outside of init()."); } } + bool Analysis::inInit() const { + return handler().stage() != AnalysisHandler::Stage::INIT; + } + + bool Analysis::inFinalize() const { + return handler().stage() != AnalysisHandler::Stage::FINALIZE; + } } diff --git a/src/Core/AnalysisHandler.cc b/src/Core/AnalysisHandler.cc --- a/src/Core/AnalysisHandler.cc +++ b/src/Core/AnalysisHandler.cc @@ -1,780 +1,686 @@ // -*- C++ -*- #include "Rivet/Config/RivetCommon.hh" #include "Rivet/AnalysisHandler.hh" #include "Rivet/Analysis.hh" #include "Rivet/Tools/ParticleName.hh" #include "Rivet/Tools/BeamConstraint.hh" #include "Rivet/Tools/Logging.hh" #include "Rivet/Projections/Beam.hh" #include "YODA/IO.h" #include #include using std::cout; using std::cerr; namespace { inline std::vector split(const std::string& input, const std::string& regex) { // passing -1 as the submatch index parameter performs splitting std::regex re(regex); std::sregex_token_iterator first{input.begin(), input.end(), re, -1}, last; return {first, last}; } } namespace Rivet { AnalysisHandler::AnalysisHandler(const string& runname) : _runname(runname), - // *** LEIF *** temporarily removed this - // _eventcounter("/_EVTCOUNT"), - // _xs(NAN), _xserr(NAN), _initialised(false), _ignoreBeams(false), _defaultWeightIdx(0), _dumpPeriod(0), _dumping(false) { } AnalysisHandler::~AnalysisHandler() { } Log& AnalysisHandler::getLog() const { return Log::getLog("Rivet.Analysis.Handler"); } /// http://stackoverflow.com/questions/4654636/how-to-determine-if-a-string-is-a-number-with-c namespace { bool is_number(const std::string& s) { std::string::const_iterator it = s.begin(); while (it != s.end() && std::isdigit(*it)) ++it; return !s.empty() && it == s.end(); } } /// Check if any of the weightnames is not a number bool AnalysisHandler::haveNamedWeights() const { bool dec=false; for (unsigned int i=0;i<_weightNames.size();++i) { string s = _weightNames[i]; if (!is_number(s)) { dec=true; break; } } return dec; } void AnalysisHandler::init(const GenEvent& ge) { if (_initialised) throw UserError("AnalysisHandler::init has already been called: cannot re-initialize!"); /// @todo Should the Rivet analysis objects know about weight names? setRunBeams(Rivet::beams(ge)); MSG_DEBUG("Initialising the analysis handler"); _eventNumber = ge.event_number(); setWeightNames(ge); if (haveNamedWeights()) MSG_INFO("Using named weights"); else MSG_INFO("NOT using named weights. Using first weight as nominal weight"); _eventCounter = CounterPtr(weightNames(), Counter("_EVTCOUNT")); // Set the cross section based on what is reported by this event. if (ge.cross_section()) { MSG_TRACE("Getting cross section."); double xs = ge.cross_section()->cross_section(); double xserr = ge.cross_section()->cross_section_error(); setCrossSection(xs, xserr); } // Check that analyses are beam-compatible, and remove those that aren't const size_t num_anas_requested = analysisNames().size(); vector anamestodelete; for (const AnaHandle a : analyses()) { if (!_ignoreBeams && !a->isCompatible(beams())) { //MSG_DEBUG(a->name() << " requires beams " << a->requiredBeams() << " @ " << a->requiredEnergies() << " GeV"); anamestodelete.push_back(a->name()); } } for (const string& aname : anamestodelete) { MSG_WARNING("Analysis '" << aname << "' is incompatible with the provided beams: removing"); removeAnalysis(aname); } if (num_anas_requested > 0 && analysisNames().empty()) { cerr << "All analyses were incompatible with the first event's beams\n" << "Exiting, since this probably wasn't intentional!" << endl; exit(1); } // Warn if any analysis' status is not unblemished for (const AnaHandle a : analyses()) { if ( a->info().preliminary() ) { MSG_WARNING("Analysis '" << a->name() << "' is preliminary: be careful, it may change and/or be renamed!"); } else if ( a->info().obsolete() ) { MSG_WARNING("Analysis '" << a->name() << "' is obsolete: please update!"); } else if (( a->info().unvalidated() ) ) { MSG_WARNING("Analysis '" << a->name() << "' is unvalidated: be careful, it may be broken!"); } } // Initialize the remaining analyses _stage = Stage::INIT; for (AnaHandle a : analyses()) { MSG_DEBUG("Initialising analysis: " << a->name()); try { // Allow projection registration in the init phase onwards a->_allowProjReg = true; a->init(); //MSG_DEBUG("Checking consistency of analysis: " << a->name()); //a->checkConsistency(); } catch (const Error& err) { cerr << "Error in " << a->name() << "::init method: " << err.what() << endl; exit(1); } MSG_DEBUG("Done initialising analysis: " << a->name()); } _stage = Stage::OTHER; _initialised = true; MSG_DEBUG("Analysis handler initialised"); } -// *** LEIF *** Reinstated this. void AnalysisHandler::setWeightNames(const GenEvent& ge) { - /// reroute the print output to a std::stringstream and process - /// The iteration is done over a map in hepmc2 so this is safe - std::ostringstream stream; - ge.weights().print(stream); // Super lame, I know - string str = stream.str(); - - std::regex re("(([^()]+))"); // Regex for stuff enclosed by parentheses () - size_t idx = 0; - for (std::sregex_iterator i = std::sregex_iterator(str.begin(), str.end(), re); i != std::sregex_iterator(); ++i ) { - std::smatch m = *i; - vector temp = ::split(m.str(), "[,]"); - if (temp.size() ==2) { - MSG_DEBUG("Name of weight #" << _weightNames.size() << ": " << temp[0]); - - // store the default weight based on weight names - if (temp[0] == "Weight" || temp[0] == "0" || temp[0] == "Default") { - MSG_DEBUG(_weightNames.size() << " is being used as the nominal."); - _weightNames.push_back(""); - _defaultWeightIdx = idx; - } else - _weightNames.push_back(temp[0]); - - - idx++; - } - } + _weightNames = HepMCUtils::weightNames(ge); + if ( _weightNames.empty() ) _weightNames.push_back(""); + for ( int i = 0, N = _weightNames.size(); i < N; ++i ) + if ( _weightNames[i] == "" ) _defaultWeightIdx = i; } - - void AnalysisHandler::analyze(const GenEvent& ge) { // Call init with event as template if not already initialised if (!_initialised) init(ge); assert(_initialised); // Ensure that beam details match those from the first event (if we're checking beams) if ( !_ignoreBeams ) { const PdgIdPair beams = Rivet::beamIds(ge); const double sqrts = Rivet::sqrtS(ge); if (!compatible(beams, _beams) || !fuzzyEquals(sqrts, sqrtS())) { cerr << "Event beams mismatch: " << PID::toBeamsString(beams) << " @ " << sqrts/GeV << " GeV" << " vs. first beams " << this->beams() << " @ " << this->sqrtS()/GeV << " GeV" << endl; exit(1); } } // Create the Rivet event wrapper /// @todo Filter/normalize the event here bool strip = ( getEnvParam("RIVET_STRIP_HEPMC", string("NOOOO") ) != "NOOOO" ); Event event(ge, strip); - // Weights - /// @todo Drop this / just report first weight when we support multiweight events - // *** LEIF *** temporarily removed this - // _eventcounter.fill(event.weight()); - // MSG_DEBUG("Event #" << _eventcounter.numEntries() << " weight = " << event.weight()); - - // *** LEIF *** temporarily removed this - // // Cross-section - // #if defined ENABLE_HEPMC_3 - // if (ge.cross_section()) { - // //@todo HepMC3::GenCrossSection methods aren't const accessible :( - // RivetHepMC::GenCrossSection gcs = *(event.genEvent()->cross_section()); - // _xs = gcs.xsec(); - // _xserr = gcs.xsec_err(); - // } - // #elif defined HEPMC_HAS_CROSS_SECTION - // if (ge.cross_section()) { - // _xs = ge.cross_section()->cross_section(); - // _xserr = ge.cross_section()->cross_section_error(); - // } - // #endif - // Won't happen for first event because _eventNumber is set in init() if (_eventNumber != ge.event_number()) { /// @todo Can we get away with not passing a matrix? MSG_TRACE("AnalysisHandler::analyze(): Pushing _eventCounter to persistent."); _eventCounter.get()->pushToPersistent(_subEventWeights); // if this is indeed a new event, push the temporary // histograms and reset for (const AnaHandle& a : analyses()) { for (auto ao : a->analysisObjects()) { MSG_TRACE("AnalysisHandler::analyze(): Pushing " << a->name() << "'s " << ao->name() << " to persistent."); ao.get()->pushToPersistent(_subEventWeights); } MSG_TRACE("AnalysisHandler::analyze(): finished pushing " << a->name() << "'s objects to persistent."); } _eventNumber = ge.event_number(); MSG_DEBUG("nominal event # " << _eventCounter.get()->_persistent[0]->numEntries()); MSG_DEBUG("nominal sum of weights: " << _eventCounter.get()->_persistent[0]->sumW()); MSG_DEBUG("Event has " << _subEventWeights.size() << " sub events."); _subEventWeights.clear(); } MSG_TRACE("starting new sub event"); _eventCounter.get()->newSubEvent(); for (const AnaHandle& a : analyses()) { for (auto ao : a->analysisObjects()) { ao.get()->newSubEvent(); } } _subEventWeights.push_back(event.weights()); MSG_DEBUG("Analyzing subevent #" << _subEventWeights.size() - 1 << "."); _eventCounter->fill(); // Run the analyses for (AnaHandle a : analyses()) { MSG_TRACE("About to run analysis " << a->name()); try { a->analyze(event); } catch (const Error& err) { cerr << "Error in " << a->name() << "::analyze method: " << err.what() << endl; exit(1); } MSG_TRACE("Finished running analysis " << a->name()); } if ( _dumpPeriod > 0 && numEvents() > 0 && numEvents()%_dumpPeriod == 0 ) { MSG_INFO("Dumping intermediate results to " << _dumpFile << "."); _dumping = numEvents()/_dumpPeriod; finalize(); writeData(_dumpFile); _dumping = 0; } } void AnalysisHandler::analyze(const GenEvent* ge) { if (ge == nullptr) { MSG_ERROR("AnalysisHandler received null pointer to GenEvent"); //throw Error("AnalysisHandler received null pointer to GenEvent"); } analyze(*ge); } void AnalysisHandler::finalize() { if (!_initialised) return; MSG_INFO("Finalising analyses"); + _stage = Stage::FINALIZE; + // First push all analyses' objects to persistent and final MSG_TRACE("AnalysisHandler::finalize(): Pushing analysis objects to persistent."); _eventCounter.get()->pushToPersistent(_subEventWeights); _eventCounter.get()->pushToFinal(); _xs.get()->pushToFinal(); for (const AnaHandle& a : analyses()) { for (auto ao : a->analysisObjects()) { ao.get()->pushToPersistent(_subEventWeights); ao.get()->pushToFinal(); } } for (AnaHandle a : analyses()) { if ( _dumping && !a->info().reentrant() ) { if ( _dumping == 1 ) MSG_INFO("Skipping finalize in periodic dump of " << a->name() << " as it is not declared reentrant."); continue; } for (size_t iW = 0; iW < numWeights(); iW++) { _eventCounter.get()->setActiveFinalWeightIdx(iW); _xs.get()->setActiveFinalWeightIdx(iW); for (auto ao : a->analysisObjects()) ao.get()->setActiveFinalWeightIdx(iW); try { MSG_TRACE("running " << a->name() << "::finalize() for weight " << iW << "."); a->finalize(); } catch (const Error& err) { cerr << "Error in " << a->name() << "::finalize method: " << err.what() << '\n'; exit(1); } } } // Print out number of events processed const int nevts = numEvents(); MSG_INFO("Processed " << nevts << " event" << (nevts != 1 ? "s" : "")); + _stage = Stage::OTHER; + if ( _dumping ) return; // Print out MCnet boilerplate cout << endl; cout << "The MCnet usage guidelines apply to Rivet: see http://www.montecarlonet.org/GUIDELINES" << endl; cout << "Please acknowledge plots made with Rivet analyses, and cite arXiv:1003.0694 (http://arxiv.org/abs/1003.0694)" << endl; } AnalysisHandler& AnalysisHandler::addAnalysis(const string& analysisname, std::map pars) { // Make an option handle. std::string parHandle = ""; for (map::iterator par = pars.begin(); par != pars.end(); ++par) { parHandle +=":"; parHandle += par->first + "=" + par->second; } return addAnalysis(analysisname + parHandle); } AnalysisHandler& AnalysisHandler::addAnalysis(const string& analysisname) { // Check for a duplicate analysis /// @todo Might we want to be able to run an analysis twice, with different params? /// Requires avoiding histo tree clashes, i.e. storing the histos on the analysis objects. string ananame = analysisname; vector anaopt = split(analysisname, ":"); if ( anaopt.size() > 1 ) ananame = anaopt[0]; AnaHandle analysis( AnalysisLoader::getAnalysis(ananame) ); if (analysis.get() != 0) { // < Check for null analysis. MSG_DEBUG("Adding analysis '" << analysisname << "'"); map opts; for ( int i = 1, N = anaopt.size(); i < N; ++i ) { vector opt = split(anaopt[i], "="); if ( opt.size() != 2 ) { MSG_WARNING("Error in option specification. Skipping analysis " << analysisname); return *this; } if ( !analysis->info().validOption(opt[0], opt[1]) ) { MSG_WARNING("Cannot set option '" << opt[0] << "' to '" << opt[1] << "'. Skipping analysis " << analysisname); return *this; } opts[opt[0]] = opt[1]; } for ( auto opt: opts) { analysis->_options[opt.first] = opt.second; analysis->_optstring += ":" + opt.first + "=" + opt.second; } for (const AnaHandle& a : analyses()) { if (a->name() == analysis->name() ) { MSG_WARNING("Analysis '" << analysisname << "' already registered: skipping duplicate"); return *this; } } analysis->_analysishandler = this; _analyses[analysisname] = analysis; } else { MSG_WARNING("Analysis '" << analysisname << "' not found."); } // MSG_WARNING(_analyses.size()); // for (const AnaHandle& a : _analyses) MSG_WARNING(a->name()); return *this; } AnalysisHandler& AnalysisHandler::removeAnalysis(const string& analysisname) { MSG_DEBUG("Removing analysis '" << analysisname << "'"); if (_analyses.find(analysisname) != _analyses.end()) _analyses.erase(analysisname); // } return *this; } // void AnalysisHandler::addData(const std::vector& aos) { // for (const YODA::AnalysisObjectPtr ao : aos) { // string path = ao->path(); // if ( path.substr(0, 5) != "/RAW/" ) { // _orphanedPreloads.push_back(ao); // continue; // } // path = path.substr(4); // ao->setPath(path); // if (path.size() > 1) { // path > "/" // try { // const string ananame = ::split(path, "/")[0]; // AnaHandle a = analysis(ananame); // /// @todo FIXXXXX // //MultiweightAOPtr mao = ????; /// @todo generate right Multiweight object from ao // //a->addAnalysisObject(mao); /// @todo Need to statistically merge... // } catch (const Error& e) { // MSG_TRACE("Adding analysis object " << path << // " to the list of orphans."); // _orphanedPreloads.push_back(ao); // } // } // } // } void AnalysisHandler::stripOptions(YODA::AnalysisObjectPtr ao, const vector & delopts) const { string path = ao->path(); string ananame = split(path, "/")[0]; vector anaopts = split(ananame, ":"); for ( int i = 1, N = anaopts.size(); i < N; ++i ) for ( auto opt : delopts ) if ( opt == "*" || anaopts[i].find(opt + "=") == 0 ) path.replace(path.find(":" + anaopts[i]), (":" + anaopts[i]).length(), ""); ao->setPath(path); } void AnalysisHandler::mergeYodas(const vector & aofiles, const vector & delopts, bool equiv) { // Convenient typedef; typedef multimap AOMap; // Store all found weights here. set foundWeightNames; // Stor all found analyses. set foundAnalyses; // Store all analysis objects here. vector allaos; // Go through all files and collect information. for ( auto file : aofiles ) { allaos.push_back(AOMap()); AOMap & aomap = allaos.back(); vector aos_raw; try { YODA::read(file, aos_raw); } catch (...) { //< YODA::ReadError& throw UserError("Unexpected error in reading file: " + file); } for (YODA::AnalysisObject* aor : aos_raw) { YODA::AnalysisObjectPtr ao(aor); AOPath path(ao->path()); if ( !path ) throw UserError("Invalid path name in file: " + file); if ( !path.isRaw() ) continue; foundWeightNames.insert(path.weight()); // Now check if any options should be removed. for ( string delopt : delopts ) if ( path.hasOption(delopt) ) path.removeOption(delopt); - path.setPath(); - if ( path.analysisWithOptions() != "" ) - foundAnalyses.insert(path.analysisWithOptions()); - aomap.insert(make_pair(path.path(), ao)); + path.setPath(); + if ( path.analysisWithOptions() != "" ) + foundAnalyses.insert(path.analysisWithOptions()); + aomap.insert(make_pair(path.path(), ao)); } } // Now make analysis handler aware of the weight names present. _weightNames.clear(); _weightNames.push_back(""); + _defaultWeightIdx = 0; for ( string name : foundWeightNames ) _weightNames.push_back(name); // Then we create and initialize all analyses for ( string ananame : foundAnalyses ) addAnalysis(ananame); for (AnaHandle a : analyses() ) { MSG_TRACE("Initialising analysis: " << a->name()); if ( !a->info().reentrant() ) MSG_WARNING("Analysis " << a->name() << " has not been validated to have " << "a reentrant finalize method. The merged result is unpredictable."); try { // Allow projection registration in the init phase onwards a->_allowProjReg = true; a->init(); } catch (const Error& err) { cerr << "Error in " << a->name() << "::init method: " << err.what() << endl; exit(1); } MSG_TRACE("Done initialising analysis: " << a->name()); } // Now get all booked analysis objects. vector raos; for (AnaHandle a : analyses()) { for (const auto & ao : a->analysisObjects()) { raos.push_back(ao); } } // Collect global weights and xcoss sections and fix scaling for // all files. _eventCounter = CounterPtr(weightNames(), Counter("_EVTCOUNT")); _xs = Scatter1DPtr(weightNames(), Scatter1D("_XSEC")); for (size_t iW = 0; iW < numWeights(); iW++) { _eventCounter.get()->setActiveWeightIdx(iW); _xs.get()->setActiveWeightIdx(iW); YODA::Counter & sumw = *_eventCounter; YODA::Scatter1D & xsec = *_xs; vector xsecs; vector sows; for ( auto & aomap : allaos ) { auto xit = aomap.find(xsec.path()); if ( xit == aomap.end() ) xsecs.push_back(dynamic_pointer_cast(xit->second)); else xsecs.push_back(YODA::Scatter1DPtr()); xit = aomap.find(sumw.path()); if ( xit == aomap.end() ) sows.push_back(dynamic_pointer_cast(xit->second)); else sows.push_back(YODA::CounterPtr()); } double xs = 0.0, xserr = 0.0; for ( int i = 0, N = sows.size(); i < N; ++i ) { if ( !sows[i] || !xsecs[i] ) continue; double xseci = xsecs[i]->point(0).x(); double xsecerri = sqr(xsecs[i]->point(0).xErrAvg()); sumw += *sows[i]; double effnent = sows[i]->effNumEntries(); xs += (equiv? effnent: 1.0)*xseci; xserr += (equiv? sqr(effnent): 1.0)*xsecerri; } vector scales(sows.size(), 1.0); if ( equiv ) { xs /= sumw.effNumEntries(); xserr = sqrt(xserr)/sumw.effNumEntries(); } else { xserr = sqrt(xserr); for ( int i = 0, N = sows.size(); i < N; ++i ) scales[i] = (sumw.sumW()/sows[i]->sumW())* (xsecs[i]->point(0).x()/xs); } xsec.point(0) = Point1D(xs, xserr); // Go through alla analyses and add stuff to their analysis objects; for (AnaHandle a : analyses()) { for (const auto & ao : a->analysisObjects()) { ao.get()->setActiveWeightIdx(iW); YODA::AnalysisObjectPtr yao = ao.get()->activeYODAPtr(); for ( int i = 0, N = sows.size(); i < N; ++i ) { if ( !sows[i] || !xsecs[i] ) continue; auto range = allaos[i].equal_range(yao->path()); for ( auto aoit = range.first; aoit != range.second; ++aoit ) if ( !addaos(yao, aoit->second, scales[i]) ) MSG_WARNING("Cannot merge objects with path " << yao->path() <<" of type " << yao->annotation("Type") ); } ao.get()->unsetActiveWeight(); } } _eventCounter.get()->unsetActiveWeight(); _xs.get()->unsetActiveWeight(); } // Finally we just have to finalize all analyses, leaving to the // controlling program to write it out some yoda-file. finalize(); } void AnalysisHandler::readData(const string& filename) { try { /// @todo Use new YODA SFINAE to fill the smart ptr vector directly vector aos_raw; YODA::read(filename, aos_raw); for (YODA::AnalysisObject* aor : aos_raw) _preloads[aor->path()] = YODA::AnalysisObjectPtr(aor); } catch (...) { //< YODA::ReadError& throw UserError("Unexpected error in reading file: " + filename); } } vector AnalysisHandler::getRivetAOs() const { vector rtn; for (AnaHandle a : analyses()) { for (const auto & ao : a->analysisObjects()) { rtn.push_back(ao); } } rtn.push_back(_eventCounter); rtn.push_back(_xs); return rtn; } -// *** LEIF *** thinks This is not needed anymore - // vector AnalysisHandler::getYodaAOs(bool includeorphans, - // bool includetmps, - // bool usefinalized) const { - // vector rtn; - // if (usefinalized) - // rtn = _finalizedAOs; - // else { - // for (auto rao : getRivetAOs()) { - // // need to set the index - // // before we can search the PATH - // rao.get()->setActiveWeightIdx(_defaultWeightIdx); - // // Exclude paths from final write-out if they contain a "TMP" layer (i.e. matching "/TMP/") - // if (!includetmps && rao->path().find("/TMP/") != string::npos) - // continue; - - // for (size_t iW = 0; iW < numWeights(); iW++) { - // rao.get()->setActiveWeightIdx(iW); - // rtn.push_back(rao.get()->activeYODAPtr()); - // } - // } - // } - - // // Sort histograms alphanumerically by path before write-out - // sort(rtn.begin(), rtn.end(), - // [](YODA::AnalysisObjectPtr a, YODA::AnalysisObjectPtr b) { - // return a->path() < b->path(); - // } - // ); - - // return rtn; - // } - - - // vector AnalysisHandler::getData(bool includeorphans, - // bool includetmps, - // bool usefinalized) const { - // return getYodaAOs(includeorphans, includetmps, usefinalized); - // } - - void AnalysisHandler::writeData(const string& filename) const { // This is where we store the OAs to be written. vector output; // First get all multiwight AOs vector raos = getRivetAOs(); output.reserve(raos.size()*2*numWeights()); // Then we go through all finalized AOs one weight at a time for (size_t iW = 0; iW < numWeights(); iW++) { for ( auto rao : raos ) { rao.get()->setActiveFinalWeightIdx(iW); if ( rao->path().find("/TMP/") != string::npos ) continue; output.push_back(rao.get()->activeYODAPtr()); } } // Finally the RAW objects. for (size_t iW = 0; iW < numWeights(); iW++) { for ( auto rao : raos ) { rao.get()->setActiveFinalWeightIdx(iW); output.push_back(rao.get()->activeYODAPtr()); } } try { YODA::write(filename, output.begin(), output.end()); } catch (...) { //< YODA::WriteError& throw UserError("Unexpected error in writing file: " + filename); } } string AnalysisHandler::runName() const { return _runname; } size_t AnalysisHandler::numEvents() const { return _eventCounter->numEntries(); } std::vector AnalysisHandler::analysisNames() const { std::vector rtn; for (AnaHandle a : analyses()) { rtn.push_back(a->name()); } return rtn; } AnalysisHandler& AnalysisHandler::addAnalyses(const std::vector& analysisnames) { for (const string& aname : analysisnames) { //MSG_DEBUG("Adding analysis '" << aname << "'"); addAnalysis(aname); } return *this; } AnalysisHandler& AnalysisHandler::removeAnalyses(const std::vector& analysisnames) { for (const string& aname : analysisnames) { removeAnalysis(aname); } return *this; } AnalysisHandler& AnalysisHandler::setCrossSection(double xs, double xserr) { _xs = Scatter1DPtr(weightNames(), Scatter1D("_XSEC")); _eventCounter.get()->setActiveWeightIdx(_defaultWeightIdx); double nomwgt = sumW(); // The cross section of each weight variation is the nominal cross section // times the sumW(variation) / sumW(nominal). // This way the cross section will work correctly for (size_t iW = 0; iW < numWeights(); iW++) { _eventCounter.get()->setActiveWeightIdx(iW); double s = sumW() / nomwgt; _xs.get()->setActiveWeightIdx(iW); _xs->addPoint(xs*s, xserr*s); } _eventCounter.get()->unsetActiveWeight(); _xs.get()->unsetActiveWeight(); return *this; } - - // *** LEIF *** temporarily removed this - // bool AnalysisHandler::hasCrossSection() const { - // return (!std::isnan(crossSection())); - // } - - AnalysisHandler& AnalysisHandler::addAnalysis(Analysis* analysis) { analysis->_analysishandler = this; // _analyses.insert(AnaHandle(analysis)); _analyses[analysis->name()] = AnaHandle(analysis); return *this; } - PdgIdPair AnalysisHandler::beamIds() const { return Rivet::beamIds(beams()); } double AnalysisHandler::sqrtS() const { return Rivet::sqrtS(beams()); } void AnalysisHandler::setIgnoreBeams(bool ignore) { _ignoreBeams=ignore; } } diff --git a/src/Core/Event.cc b/src/Core/Event.cc --- a/src/Core/Event.cc +++ b/src/Core/Event.cc @@ -1,64 +1,60 @@ // -*- C++ -*- #include "Rivet/Event.hh" #include "Rivet/Tools/BeamConstraint.hh" #include "Rivet/Tools/Logging.hh" #include "Rivet/Tools/Utils.hh" #include "Rivet/Projections/Beam.hh" namespace Rivet { /* double Event::weight() const { // Get the weight index, which defaults to 0, i.e. nominal // NB. This should normally only perform the slow env var lookup once per run // NB. An explicitly set index of -1 is used to ignore all weights, for debugging static int WEIGHT_INDEX = -2; if (WEIGHT_INDEX < -1) { WEIGHT_INDEX = getEnvParam("RIVET_WEIGHT_INDEX", 0); Log::getLog("Core.Weight") << Log::TRACE << "Got weight index from env/default = "<< WEIGHT_INDEX << endl; } // If RIVET_WEIGHT_INDEX=-1, or there are no event weights, return 1 if (WEIGHT_INDEX == -1 || genEvent()->weights().empty()) return 1.0; // Otherwise return the appropriate weight index return _genevent.weights()[WEIGHT_INDEX]; } */ ParticlePair Event::beams() const { return Rivet::beams(*this); } double Event::sqrtS() const { return Rivet::sqrtS(beams()); } double Event::asqrtS() const { return Rivet::asqrtS(beams()); } void Event::_init(const GenEvent& ge) { // Use Rivet's preferred units if possible #ifdef HEPMC_HAS_UNITS _genevent.use_units(HepMC::Units::GEV, HepMC::Units::MM); #endif } void Event::_strip(GenEvent & ge) { HepMCUtils::strip(ge); } const Particles& Event::allParticles() const { if (_particles.empty()) { //< assume that empty means no attempt yet made for (ConstGenParticlePtr gp : HepMCUtils::particles(genEvent())) { _particles += Particle(gp); } } return _particles; } std::valarray Event::weights() const { - const size_t W = _genevent.weights().size(); - std::valarray wts(W); - for (unsigned int iw = 0; iw < W; ++iw) - wts[iw] = _genevent.weights()[iw]; - return wts; + return HepMCUtils::weights(_genevent); } } diff --git a/src/Core/Run.cc b/src/Core/Run.cc --- a/src/Core/Run.cc +++ b/src/Core/Run.cc @@ -1,176 +1,142 @@ // -*- C++ -*- #include "Rivet/Run.hh" #include "Rivet/AnalysisHandler.hh" #include "Rivet/Math/MathUtils.hh" #include "Rivet/Tools/RivetPaths.hh" #include "zstr/zstr.hpp" #include #include using std::cout; using std::endl; namespace Rivet { Run::Run(AnalysisHandler& ah) : _ah(ah), _fileweight(1.0), _xs(NAN) { } Run::~Run() { } Run& Run::setCrossSection(const double xs) { _xs = xs; return *this; } Run& Run::setListAnalyses(const bool dolist) { _listAnalyses = dolist; return *this; } // Fill event and check for a bad read state bool Run::readEvent() { /// @todo Clear rather than new the GenEvent object per-event? _evt.reset(new GenEvent()); if(!HepMCUtils::readEvent(_hepmcReader, _evt)){ Log::getLog("Rivet.Run") << Log::DEBUG << "Read failed. End of file?" << endl; return false; } // Rescale event weights by file-level weight, if scaling is non-trivial if (!fuzzyEquals(_fileweight, 1.0)) { for (size_t i = 0; i < (size_t) _evt->weights().size(); ++i) { _evt->weights()[i] *= _fileweight; } } return true; } bool Run::openFile(const std::string& evtfile, double weight) { // Set current weight-scaling member _fileweight = weight; // In case makeReader fails. std::string errormessage; // Set up HepMC input reader objects if (evtfile == "-") { #ifdef HAVE_LIBZ _istr = make_shared(std::cin); _hepmcReader = HepMCUtils::makeReader(*_istr, &errormessage); #else _hepmcReader = HepMCUtils::makeReader(std::cin, &errormessage); #endif } else { if ( !fileexists(evtfile) ) throw Error("Event file '" + evtfile + "' not found"); #ifdef HAVE_LIBZ // NB. zstr auto-detects if file is deflated or plain-text _istr = make_shared(evtfile.c_str()); #else _istr = make_shared(evtfile.c_str()); #endif _hepmcReader = HepMCUtils::makeReader(*_istr, &errormessage); } if (_hepmcReader == nullptr) { Log::getLog("Rivet.Run") << Log::ERROR << "Read error on file '" << evtfile << "' " << errormessage << endl; return false; } return true; } bool Run::init(const std::string& evtfile, double weight) { if (!openFile(evtfile, weight)) return false; // Read first event to define run conditions bool ok = readEvent(); if (!ok) return false; if(HepMCUtils::particles(_evt).size() == 0){ Log::getLog("Rivet.Run") << Log::ERROR << "Empty first event." << endl; return false; } // Initialise AnalysisHandler with beam information from first event _ah.init(*_evt); // Set cross-section from command line if (!std::isnan(_xs)) { Log::getLog("Rivet.Run") << Log::DEBUG << "Setting user cross-section = " << _xs << " pb" << endl; _ah.setCrossSection(_xs, 0.0); - // *** LEIF *** check if second argument should be 0. } // List the chosen & compatible analyses if requested if (_listAnalyses) { for (const std::string& ana : _ah.analysisNames()) { cout << ana << endl; } } return true; } bool Run::processEvent() { - // Set cross-section if found in event and not from command line - - #if defined ENABLE_HEPMC_3 - if (std::isnan(_xs) && _evt->cross_section()) { - const double xs = _evt->cross_section()->xsec(); ///< in pb - Log::getLog("Rivet.Run") - << Log::DEBUG << "Setting cross-section = " << xs << " pb" << endl; - _ah.setCrossSection(xs); - } - #elif defined HEPMC_HAS_CROSS_SECTION - if (std::isnan(_xs) && _evt->cross_section()) { - const double xs = _evt->cross_section()->cross_section(); ///< in pb - Log::getLog("Rivet.Run") - << Log::DEBUG << "Setting cross-section = " << xs << " pb" << endl; - _ah.setCrossSection(xs, 0.0); - // *** LEIF *** check if second argument should be 0. - } - #endif - - // *** LEIF *** temporarily removed this - // // Complain about absence of cross-section if required! - // if (_ah.needCrossSection() && !_ah.hasCrossSection()) { - // Log::getLog("Rivet.Run") - // << Log::ERROR - // << "Total cross-section needed for at least one of the analyses. " - // << "Please set it (on the command line with '-x' if using the 'rivet' program)" << endl; - // return false; - // } - // Analyze event _ah.analyze(*_evt); return true; } bool Run::finalize() { _evt.reset(); - /// @todo reinstate for HepMC3 - //_istr.reset(); - // *** LEIF *** temporarily removed this - // if (!std::isnan(_xs)) _ah.setCrossSection(_xs); _ah.finalize(); return true; } } diff --git a/src/Projections/ChargedFinalState.cc b/src/Projections/ChargedFinalState.cc --- a/src/Projections/ChargedFinalState.cc +++ b/src/Projections/ChargedFinalState.cc @@ -1,47 +1,45 @@ // -*- C++ -*- #include "Rivet/Projections/ChargedFinalState.hh" namespace Rivet { ChargedFinalState::ChargedFinalState(const FinalState& fsp) { setName("ChargedFinalState"); declare(fsp, "FS"); } ChargedFinalState::ChargedFinalState(const Cut& c) { setName("ChargedFinalState"); declare(FinalState(c), "FS"); } CmpState ChargedFinalState::compare(const Projection& p) const { return mkNamedPCmp(p, "FS"); } } namespace { inline bool chargedParticleFilter(const Rivet::Particle& p) { - // *** LEIF *** temporarily changed this - // return Rivet::PID::threeCharge(p.pdgId()) == 0; return Rivet::PID::charge3(p.pid()) == 0; } } namespace Rivet { void ChargedFinalState::project(const Event& e) { const FinalState& fs = applyProjection(e, "FS"); // _theParticles.clear(); // std::remove_copy_if(fs.particles().begin(), fs.particles().end(), // std::back_inserter(_theParticles), // [](const Rivet::Particle& p) { return p.charge3() == 0; }); _theParticles = filter_select(fs.particles(), isCharged); MSG_DEBUG("Number of charged final-state particles = " << _theParticles.size()); if (getLog().isActive(Log::TRACE)) { for (const Particle& p : _theParticles) { MSG_TRACE("Selected: " << p.pid() << ", charge = " << p.charge()); } } } } diff --git a/src/Tools/RivetHepMC_2.cc b/src/Tools/RivetHepMC_2.cc --- a/src/Tools/RivetHepMC_2.cc +++ b/src/Tools/RivetHepMC_2.cc @@ -1,123 +1,161 @@ // -*- C++ -*- +#include +#include "Rivet/Tools/Utils.hh" #include "Rivet/Tools/RivetHepMC.hh" +#include "Rivet/Tools/Logging.hh" namespace Rivet{ const Relatives Relatives::PARENTS = HepMC::parents; const Relatives Relatives::CHILDREN = HepMC::children; const Relatives Relatives::ANCESTORS = HepMC::ancestors; const Relatives Relatives::DESCENDANTS = HepMC::descendants; namespace HepMCUtils{ ConstGenParticlePtr getParticlePtr(const RivetHepMC::GenParticle & gp) { return &gp; } std::vector particles(ConstGenEventPtr ge){ std::vector result; for(GenEvent::particle_const_iterator pi = ge->particles_begin(); pi != ge->particles_end(); ++pi){ result.push_back(*pi); } return result; } std::vector particles(const GenEvent *ge){ std::vector result; for(GenEvent::particle_const_iterator pi = ge->particles_begin(); pi != ge->particles_end(); ++pi){ result.push_back(*pi); } return result; } std::vector vertices(ConstGenEventPtr ge){ std::vector result; for(GenEvent::vertex_const_iterator vi = ge->vertices_begin(); vi != ge->vertices_end(); ++vi){ result.push_back(*vi); } return result; } std::vector vertices(const GenEvent *ge){ std::vector result; for(GenEvent::vertex_const_iterator vi = ge->vertices_begin(); vi != ge->vertices_end(); ++vi){ result.push_back(*vi); } return result; } std::vector particles(ConstGenVertexPtr gv, const Relatives &relo){ std::vector result; /// @todo A particle_const_iterator on GenVertex would be nice... // Before HepMC 2.7.0 there were no GV::particles_const_iterators and constness consistency was all screwed up :-/ #if HEPMC_VERSION_CODE >= 2007000 for (HepMC::GenVertex::particle_iterator pi = gv->particles_begin(relo); pi != gv->particles_end(relo); ++pi) result.push_back(*pi); #else HepMC::GenVertex* gv2 = const_cast(gv); for (HepMC::GenVertex::particle_iterator pi = gv2->particles_begin(relo); pi != gv2->particles_end(relo); ++pi) result.push_back(const_cast(*pi)); #endif return result; } std::vector particles(ConstGenParticlePtr gp, const Relatives &relo){ ConstGenVertexPtr vtx; switch(relo){ case HepMC::parents: case HepMC::ancestors: vtx = gp->production_vertex(); break; case HepMC::children: case HepMC::descendants: vtx = gp->end_vertex(); break; default: throw std::runtime_error("Not implemented!"); break; } return particles(vtx, relo); } int uniqueId(ConstGenParticlePtr gp){ return gp->barcode(); } int particles_size(ConstGenEventPtr ge){ return ge->particles_size(); } int particles_size(const GenEvent *ge){ return ge->particles_size(); } std::pair beams(const GenEvent *ge){ return ge->beam_particles(); } std::shared_ptr makeReader(std::istream &istr, std::string *) { return make_shared(istr); } bool readEvent(std::shared_ptr io, std::shared_ptr evt){ if(io->rdstate() != 0) return false; if(!io->fill_next_event(evt.get())) return false; return true; } // This functions could be filled with code doing the same stuff as // in the HepMC3 version of This file. void strip(GenEvent &, const set &) {} + vector weightNames(const GenEvent & ge) { + /// reroute the print output to a std::stringstream and process + /// The iteration is done over a map in hepmc2 so this is safe + vector ret; + std::ostringstream stream; + ge.weights().print(stream); // Super lame, I know + string str = stream.str(); + + std::regex re("(([^()]+))"); // Regex for stuff enclosed by parentheses () + for (std::sregex_iterator i = std::sregex_iterator(str.begin(), str.end(), re); + i != std::sregex_iterator(); ++i ) { + std::smatch m = *i; + vector temp = Rivet::split(m.str(), "[,]"); + if (temp.size() ==2) { + // store the default weight based on weight names + if (temp[0] == "Weight" || temp[0] == "0" || temp[0] == "Default") { + ret.push_back(""); + } else + ret.push_back(temp[0]); + } + } + return ret; + } + + double crossSection(const GenEvent & ge) { + return ge.cross_section()->cross_section(); + } + + std::valarray weights(const GenEvent & ge) { + const size_t W = ge.weights().size(); + std::valarray wts(W); + for (unsigned int iw = 0; iw < W; ++iw) + wts[iw] = ge.weights()[iw]; + return wts; + } } } diff --git a/src/Tools/RivetYODA.cc b/src/Tools/RivetYODA.cc --- a/src/Tools/RivetYODA.cc +++ b/src/Tools/RivetYODA.cc @@ -1,504 +1,505 @@ #include "Rivet/Config/RivetCommon.hh" #include "Rivet/Tools/RivetYODA.hh" #include "Rivet/Tools/RivetPaths.hh" #include "YODA/ReaderYODA.h" #include "YODA/ReaderAIDA.h" // use execinfo for backtrace if available #include "DummyConfig.hh" #ifdef HAVE_EXECINFO_H #include #endif #include #include using namespace std; namespace Rivet { template Wrapper::~Wrapper() {} template Wrapper::Wrapper(const vector& weightNames, const T & p) { + _basePath = p.path(); for (const string& weightname : weightNames) { _persistent.push_back(make_shared(p)); _final.push_back(make_shared(p)); auto obj = _persistent.back(); auto final = _final.back(); if (weightname != "") { obj->setPath("/RAW" + obj->path() + "[" + weightname + "]"); final->setPath(final->path() + "[" + weightname + "]"); } } } template typename T::Ptr Wrapper::active() const { if ( !_active ) { #ifdef HAVE_BACKTRACE void * buffer[4]; backtrace(buffer, 4); backtrace_symbols_fd(buffer, 4 , 1); #endif assert(false && "No active pointer set. Was this object booked in init()?"); } return _active; } template void Wrapper::newSubEvent() { typename TupleWrapper::Ptr tmp = make_shared>(_persistent[0]->clone()); tmp->reset(); _evgroup.push_back( tmp ); _active = _evgroup.back(); assert(_active); } string getDatafilePath(const string& papername) { /// Try to find YODA otherwise fall back to try AIDA const string path1 = findAnalysisRefFile(papername + ".yoda"); if (!path1.empty()) return path1; const string path2 = findAnalysisRefFile(papername + ".aida"); if (!path2.empty()) return path2; throw Rivet::Error("Couldn't find ref data file '" + papername + ".yoda" + " in data path, '" + getRivetDataPath() + "', or '.'"); } map getRefData(const string& papername) { const string datafile = getDatafilePath(papername); // Make an appropriate data file reader and read the data objects /// @todo Remove AIDA support some day... YODA::Reader& reader = (datafile.find(".yoda") != string::npos) ? \ YODA::ReaderYODA::create() : YODA::ReaderAIDA::create(); vector aovec; reader.read(datafile, aovec); // Return value, to be populated map rtn; for ( YODA::AnalysisObject* ao : aovec ) { YODA::AnalysisObjectPtr refdata(ao); if (!refdata) continue; const string plotpath = refdata->path(); // Split path at "/" and only return the last field, i.e. the histogram ID const size_t slashpos = plotpath.rfind("/"); const string plotname = (slashpos+1 < plotpath.size()) ? plotpath.substr(slashpos+1) : ""; rtn[plotname] = refdata; } return rtn; } } namespace { using Rivet::Fill; using Rivet::Fills; using Rivet::TupleWrapper; template double get_window_size(const typename T::Ptr & histo, typename T::BinType x) { // the bin index we fall in const auto binidx = histo->binIndexAt(x); // gaps, overflow, underflow don't contribute if ( binidx == -1 ) return 0; const auto & b = histo->bin(binidx); // if we don't have a valid neighbouring bin, // we use infinite width typename T::Bin b1(-1.0/0.0, 1.0/0.0); // points in the top half compare to the upper neighbour if ( x > b.xMid() ) { size_t nextidx = binidx + 1; if ( nextidx < histo->bins().size() ) b1 = histo->bin(nextidx); } else { // compare to the lower neighbour int nextidx = binidx - 1; if ( nextidx >= 0 ) b1 = histo->bin(nextidx); } // the factor 2 is arbitrary, could poss. be smaller return min( b.width(), b1.width() ) / 2.0; } template typename T::BinType fillT2binT(typename T::FillType a) { return a; } template <> YODA::Profile1D::BinType fillT2binT(YODA::Profile1D::FillType a) { return get<0>(a); } template <> YODA::Profile2D::BinType fillT2binT(YODA::Profile2D::FillType a) { return YODA::Profile2D::BinType{ get<0>(a), get<1>(a) }; } template void commit(vector & persistent, const vector< vector> > & tuple, const vector> & weights ) { // TODO check if all the xs are in the same bin anyway! // Then no windowing needed assert(persistent.size() == weights[0].size()); for ( const auto & x : tuple ) { double maxwindow = 0.0; for ( const auto & xi : x ) { // TODO check for NOFILL here // persistent[0] has the same binning as all the other persistent objects double window = get_window_size(persistent[0], fillT2binT(xi.first)); if ( window > maxwindow ) maxwindow = window; } const double wsize = maxwindow; // all windows have same size set edgeset; // bin edges need to be in here! for ( const auto & xi : x ) { edgeset.insert(fillT2binT(xi.first) - wsize); edgeset.insert(fillT2binT(xi.first) + wsize); } vector< std::tuple,double> > hfill; double sumf = 0.0; auto edgit = edgeset.begin(); double ehi = *edgit; while ( ++edgit != edgeset.end() ) { double elo = ehi; ehi = *edgit; valarray sumw(0.0, persistent.size()); // need m copies of this bool gap = true; // Check for gaps between the sub-windows. for ( size_t i = 0; i < x.size(); ++i ) { // check equals comparisons here! if ( fillT2binT(x[i].first) + wsize >= ehi && fillT2binT(x[i].first) - wsize <= elo ) { sumw += x[i].second * weights[i]; gap = false; } } if ( gap ) continue; hfill.push_back( make_tuple( (ehi + elo)/2.0, sumw, (ehi - elo) ) ); sumf += ehi - elo; } for ( auto f : hfill ) for ( size_t m = 0; m < persistent.size(); ++m ) persistent[m]->fill( get<0>(f), get<1>(f)[m], get<2>(f)/sumf ); // Note the scaling to one single fill } } template<> void commit(vector & persistent, const vector< vector> > & tuple, const vector> & weights) {} template<> void commit(vector & persistent, const vector< vector> > & tuple, const vector> & weights) {} template double distance(T a, T b) { return abs(a - b); } template <> double distance >(tuple a, tuple b) { return Rivet::sqr(get<0>(a) - get<0>(b)) + Rivet::sqr(get<1>(a) - get<1>(b)); } } namespace Rivet { bool copyao(YODA::AnalysisObjectPtr src, YODA::AnalysisObjectPtr dst) { for (const std::string& a : src->annotations()) dst->setAnnotation(a, src->annotation(a)); if ( aocopy(src,dst) ) return true; if ( aocopy(src,dst) ) return true; if ( aocopy(src,dst) ) return true; if ( aocopy(src,dst) ) return true; if ( aocopy(src,dst) ) return true; if ( aocopy(src,dst) ) return true; if ( aocopy(src,dst) ) return true; if ( aocopy(src,dst) ) return true; return false; } bool addaos(YODA::AnalysisObjectPtr dst, YODA::AnalysisObjectPtr src, double scale) { if ( aoadd(dst,src,scale) ) return true; if ( aoadd(dst,src,scale) ) return true; if ( aoadd(dst,src,scale) ) return true; if ( aoadd(dst,src,scale) ) return true; if ( aoadd(dst,src,scale) ) return true; return false; } } namespace { /// fills is a vector of sub-event with an ordered set of x-values of /// the fills in each sub-event. NOFILL should be an "impossible" /// value for this histogram. Returns a vector of sub-events with /// an ordered vector of fills (including NOFILLs) for each sub-event. template vector< vector > > match_fills(const vector::Ptr> & evgroup, const Fill & NOFILL) { vector< vector > > matched; // First just copy subevents into vectors and find the longest vector. unsigned int maxfill = 0; // length of biggest vector int imax = 0; // index position of biggest vector for ( const auto & it : evgroup ) { const auto & subev = it->fills(); if ( subev.size() > maxfill ) { maxfill = subev.size(); imax = matched.size(); } matched.push_back(vector >(subev.begin(), subev.end())); } // Now, go through all subevents with missing fills. const vector> & full = matched[imax]; // the longest one for ( auto & subev : matched ) { if ( subev.size() == maxfill ) continue; // Add NOFILLs to the end; while ( subev.size() < maxfill ) subev.push_back(NOFILL); // Iterate from the back and shift all fill values backwards by // swapping with NOFILLs so that they better match the full // subevent. for ( int i = maxfill - 1; i >= 0; --i ) { if ( subev[i] == NOFILL ) continue; size_t j = i; while ( j + 1 < maxfill && subev[j + 1] == NOFILL && distance(fillT2binT(subev[j].first), fillT2binT(full[j].first)) > distance(fillT2binT(subev[j].first), fillT2binT(full[j + 1].first)) ) { swap(subev[j], subev[j + 1]); ++j; } } } // transpose vector>> result(maxfill,vector>(matched.size())); for (size_t i = 0; i < matched.size(); ++i) for (size_t j = 0; j < maxfill; ++j) result.at(j).at(i) = matched.at(i).at(j); return result; } } namespace Rivet { template void Wrapper::pushToPersistent(const vector >& weight) { assert( _evgroup.size() == weight.size() ); // have we had subevents at all? const bool have_subevents = _evgroup.size() > 1; if ( ! have_subevents ) { // simple replay of all tuple entries // each recorded fill is inserted into all persistent weightname histos for ( size_t m = 0; m < _persistent.size(); ++m ) for ( const auto & f : _evgroup[0]->fills() ) _persistent[m]->fill( f.first, f.second * weight[0][m] ); } else { // outer index is subevent, inner index is jets in the event vector>> linedUpXs = match_fills(_evgroup, {typename T::FillType(), 0.0}); commit( _persistent, linedUpXs, weight ); } _evgroup.clear(); _active.reset(); } template void Wrapper::pushToFinal() { for ( size_t m = 0; m < _persistent.size(); ++m ) { copyao(_persistent.at(m), _final.at(m)); } } template <> void Wrapper::pushToPersistent(const vector >& weight) { for ( size_t m = 0; m < _persistent.size(); ++m ) { for ( size_t n = 0; n < _evgroup.size(); ++n ) { for ( const auto & f : _evgroup[n]->fills() ) { _persistent[m]->fill( f.second * weight[n][m] ); } } } _evgroup.clear(); _active.reset(); } template <> void Wrapper::pushToPersistent(const vector >& weight) { _evgroup.clear(); _active.reset(); } template <> void Wrapper::pushToPersistent(const vector >& weight) { _evgroup.clear(); _active.reset(); } template <> void Wrapper::pushToPersistent(const vector >& weight) { _evgroup.clear(); _active.reset(); } // explicitly instantiate all wrappers template class Wrapper; template class Wrapper; template class Wrapper; template class Wrapper; template class Wrapper; template class Wrapper; template class Wrapper; template class Wrapper; AOPath::AOPath(string fullpath) { // First checck if This is a global system path _path = fullpath; std::regex resys("^(/RAW)?/([^\\[/]+)(\\[(.+)\\])?$"); smatch m; _valid = regex_search(fullpath, m, resys); if ( _valid ) { _raw = (m[1] == "/RAW"); _name = m[2]; _weight = m[4]; return; } // If not, assume it is a normal analysis path. std::regex repath("^(/RAW)?(/REF)?/([^/:]+)(:[^/]+)?(/TMP)?/([^\\[]+)(\\[(.+)\\])?"); _valid = regex_search(fullpath, m, repath); if ( !_valid ) return; _raw = (m[1] == "/RAW"); _ref = (m[2] == "/REF"); _analysis = m[3]; _optionstring = m[4]; _tmp = (m[5] == "/TMP"); _name = m[6]; _weight = m[8]; std::regex reopt(":([^=]+)=([^:]+)"); string s = _optionstring; while ( regex_search(s, m, reopt) ) { _options[m[1]] = m[2]; s = m.suffix(); } } void AOPath::fixOptionString() { ostringstream oss; for ( auto optval : _options ) oss << ":" << optval.first << "=" << optval.second; _optionstring = oss.str(); } string AOPath::mkPath() const { ostringstream oss; if ( isRaw() ) oss << "/RAW"; else if ( isRef() ) oss << "/REF"; if ( _analysis != "" ) oss << "/" << analysis(); for ( auto optval : _options ) oss << ":" << optval.first << "=" << optval.second; if ( isTmp() ) oss << "/TMP"; oss << "/" << name(); if ( weight() != "" ) oss << "[" << weight() << "]"; return oss.str(); } void AOPath::debug() const { cout << "Full path: " << _path << endl; if ( !_valid ) { cout << "This is not a valid analysis object path" << endl << endl; return; } cout << "Check path: " << mkPath() << endl; cout << "Analysis: " << _analysis << endl; cout << "Name: " << _name << endl; cout << "Weight: " << _weight << endl; cout << "Properties: "; if ( _raw ) cout << "raw "; if ( _tmp ) cout << "tmp "; if ( _ref ) cout << "ref "; cout << endl; cout << "Options: "; for ( auto opt : _options ) cout << opt.first << "->" << opt.second << " "; cout << endl << endl; } }