Page MenuHomeHEPForge

No OneTemporary

diff --git a/analyses/pluginCMS/CMS_2016_PAS_SUS_16_14.cc b/analyses/pluginCMS/CMS_2016_PAS_SUS_16_14.cc
--- a/analyses/pluginCMS/CMS_2016_PAS_SUS_16_14.cc
+++ b/analyses/pluginCMS/CMS_2016_PAS_SUS_16_14.cc
@@ -1,221 +1,221 @@
// -*- C++ -*-
#include "Rivet/Analysis.hh"
#include "Rivet/Projections/FinalState.hh"
#include "Rivet/Projections/PromptFinalState.hh"
#include "Rivet/Projections/FastJets.hh"
#include "Rivet/Projections/Sphericity.hh"
#include "Rivet/Projections/SmearedParticles.hh"
#include "Rivet/Projections/SmearedJets.hh"
#include "Rivet/Projections/SmearedMET.hh"
#include "Rivet/Tools/Cutflow.hh"
namespace Rivet {
/// @brief CMS 2016 0-lepton SUSY search, from 13/fb PAS note
class CMS_2016_PAS_SUS_16_14 : public Analysis {
public:
/// Constructor
DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2016_PAS_SUS_16_14);
/// @name Analysis methods
//@{
/// Book histograms and initialise projections before the run
void init() {
// Initialise and register projections
FinalState calofs(Cuts::abseta < 5.0);
FastJets fj(calofs, FastJets::ANTIKT, 0.4);
declare(fj, "TruthJets");
declare(SmearedJets(fj, JET_SMEAR_CMS_RUN2, [](const Jet& j) {
if (j.abseta() > 2.5) return 0.;
return j.bTagged() ? 0.55 : j.cTagged() ? 0.12 : 0.016; }), "Jets");
FinalState es(Cuts::abspid == PID::ELECTRON && Cuts::abseta < 2.5);
declare(es, "TruthElectrons");
declare(SmearedParticles(es, ELECTRON_EFF_CMS_RUN2, ELECTRON_SMEAR_CMS_RUN2), "Electrons");
FinalState mus(Cuts::abspid == PID::MUON && Cuts::abseta < 2.4);
declare(mus, "TruthMuons");
declare(SmearedParticles(mus, MUON_EFF_CMS_RUN2, MUON_SMEAR_CMS_RUN2), "Muons");
FinalState isofs(Cuts::abseta < 3.0 && Cuts::abspid != PID::ELECTRON && Cuts::abspid != PID::MUON);
declare(isofs, "IsoFS");
FinalState cfs(Cuts::abseta < 2.5 && Cuts::abscharge != 0);
declare(cfs, "TruthTracks");
declare(SmearedParticles(cfs, TRK_EFF_CMS_RUN2), "Tracks");
// Book histograms/counters
_h_srcounts.resize(160);
for (size_t ij = 0; ij < 4; ++ij) {
for (size_t ib = 0; ib < 4; ++ib) {
for (size_t ih = 0; ih < 10; ++ih) {
const size_t i = 40*ij + 10*ib + ih;
_h_srcounts[i] = bookCounter(toString(2*ij+3) + "j-" + toString(ib) + "b-" + toString(ih));
}
}
}
_h_srcountsagg.resize(12);
for (size_t ia = 0; ia < 12; ++ia) {
_h_srcountsagg[ia] = bookCounter("agg-" + toString(ia));
}
}
/// Perform the per-event analysis
void analyze(const Event& event) {
// Get jets and require Nj >= 3
const Jets jets24 = apply<JetAlg>(event, "Jets").jetsByPt(Cuts::pT > 30*GeV && Cuts::abseta < 2.4);
if (jets24.size() < 3) vetoEvent;
// HT cut
- vector<double> jetpts24; transform(jets24, jetpts24, pT);
+ vector<double> jetpts24; transform(jets24, jetpts24, Kin::pT);
const double ht = sum(jetpts24, 0.0);
if (ht < 300*GeV) vetoEvent;
// HTmiss cut
const Jets jets50 = apply<JetAlg>(event, "Jets").jetsByPt(Cuts::pT > 30*GeV && Cuts::abseta < 5.0);
const FourMomentum htmissvec = -sum(jets24, mom, FourMomentum());
const double htmiss = htmissvec.pT();
if (htmissvec.pT() < 300*GeV) vetoEvent;
// Get baseline electrons & muons
Particles elecs = apply<ParticleFinder>(event, "Electrons").particles(Cuts::pT > 10*GeV);
Particles muons = apply<ParticleFinder>(event, "Muons").particles(Cuts::pT > 10*GeV);
// Electron/muon isolation
const Particles calofs = apply<ParticleFinder>(event, "IsoFS").particles();
ifilter_discard(elecs, [&](const Particle& e) {
const double R = max(0.05, min(0.2, 10*GeV/e.pT()));
double ptsum = -e.pT();
for (const Particle& p : calofs)
if (deltaR(p,e) < R) ptsum += p.pT();
return ptsum / e.pT() > 0.1;
});
ifilter_discard(muons, [&](const Particle& m) {
const double R = max(0.05, min(0.2, 10*GeV/m.pT()));
double ptsum = -m.pT();
for (const Particle& p : calofs)
if (deltaR(p,m) < R) ptsum += p.pT();
return ptsum / m.pT() > 0.2;
});
// Veto the event if there are any remaining baseline leptons
if (!elecs.empty()) vetoEvent;
if (!muons.empty()) vetoEvent;
// Get isolated tracks
Particles trks25 = apply<ParticleFinder>(event, "Tracks").particles();
ifilter_discard(trks25, [&](const Particle& t) {
double ptsum = -t.pT();
for (const Particle& p : trks25)
if (deltaR(p,t) < 0.3) ptsum += p.pT();
return ptsum/t.pT() > ((t.abspid() == PID::ELECTRON || t.abspid() == PID::MUON) ? 0.2 : 0.1);
});
const Particles trks = filter_select(trks25, Cuts::abseta < 2.4);
// Isolated track pT, pTmiss and mT cut
// mT^2 = m1^2 + m2^2 + 2(ET1 ET2 - pT1 . pT2))
// => mT0^2 = 2(ET1 |pT2| - pT1 . pT2)) for m1, m2 -> 0
FourMomentum ptmissvec = htmissvec; ///< @todo Can we do better? No e,mu left...
const double ptmiss = ptmissvec.pT();
for (const Particle& t : trks) {
const double ptcut = (t.abspid() == PID::ELECTRON || t.abspid() == PID::MUON) ? 5*GeV : 10*GeV;
const double mT = sqrt( t.mass2() + 2*(t.Et()*ptmiss - t.pT()*ptmiss*cos(deltaPhi(t,ptmissvec))) );
if (mT < 100*GeV && t.pT() < ptcut) vetoEvent;
}
// Lead jets isolation from Htmiss
if (deltaPhi(htmissvec, jets24[0]) < 0.5) vetoEvent;
if (deltaPhi(htmissvec, jets24[1]) < 0.5) vetoEvent;
if (deltaPhi(htmissvec, jets24[2]) < 0.3) vetoEvent;
if (jets24.size() >= 4 && deltaPhi(htmissvec, jets24[3]) < 0.3) vetoEvent;
// Count jet and b-jet multiplicities
const size_t nj = jets24.size();
size_t nbj = 0;
for (const Jet& j : jets24)
if (j.bTagged()) nbj += 1;
////////
// Fill the aggregate signal regions first
if (nj >= 3 && nbj == 0 && ht > 500*GeV && htmiss > 500*GeV) _h_srcountsagg[ 0]->fill(event.weight());
if (nj >= 3 && nbj == 0 && ht > 1500*GeV && htmiss > 750*GeV) _h_srcountsagg[ 1]->fill(event.weight());
if (nj >= 5 && nbj == 0 && ht > 500*GeV && htmiss > 500*GeV) _h_srcountsagg[ 2]->fill(event.weight());
if (nj >= 5 && nbj == 0 && ht > 1500*GeV && htmiss > 750*GeV) _h_srcountsagg[ 3]->fill(event.weight());
if (nj >= 9 && nbj == 0 && ht > 1500*GeV && htmiss > 750*GeV) _h_srcountsagg[ 4]->fill(event.weight());
if (nj >= 3 && nbj >= 2 && ht > 500*GeV && htmiss > 500*GeV) _h_srcountsagg[ 5]->fill(event.weight());
if (nj >= 3 && nbj >= 1 && ht > 750*GeV && htmiss > 750*GeV) _h_srcountsagg[ 6]->fill(event.weight());
if (nj >= 5 && nbj >= 3 && ht > 500*GeV && htmiss > 500*GeV) _h_srcountsagg[ 7]->fill(event.weight());
if (nj >= 5 && nbj >= 2 && ht > 1500*GeV && htmiss > 750*GeV) _h_srcountsagg[ 8]->fill(event.weight());
if (nj >= 9 && nbj >= 3 && ht > 750*GeV && htmiss > 750*GeV) _h_srcountsagg[ 9]->fill(event.weight());
if (nj >= 7 && nbj >= 1 && ht > 300*GeV && htmiss > 300*GeV) _h_srcountsagg[10]->fill(event.weight());
if (nj >= 5 && nbj >= 1 && ht > 750*GeV && htmiss > 750*GeV) _h_srcountsagg[11]->fill(event.weight());
// Nj bin and Nbj bins
static const vector<double> njedges = {3., 5., 7., 9.};
const size_t inj = binIndex(nj, njedges, true);
static const vector<double> njbedges = {0., 1., 2., 3.};
const size_t inbj = binIndex(nbj, njbedges, true);
// HTmiss vs HT 2D bin
int iht = 0;
if (htmiss < 350*GeV) {
iht = ht < 500 ? 1 : ht < 1000 ? 2 : 3;
} else if (htmiss < 500*GeV && ht > 350*GeV) {
iht = ht < 500 ? 4 : ht < 1000 ? 5 : 6;
} else if (htmiss < 750*GeV && ht > 500*GeV) {
iht = ht < 1000 ? 7 : 8;
} else if (ht > 750*GeV) {
iht = ht < 1500 ? 9 : 10;
}
if (iht == 0) vetoEvent;
iht -= 1; //< change from the paper's indexing scheme to C++ zero-indexed
// Total bin number
const size_t ibin = 40*inj + 10*inbj + (size_t)iht;
// Fill SR counter
_h_srcounts[ibin]->fill(event.weight());
}
/// Normalise counters after the run
void finalize() {
const double sf = 12.9*crossSection()/femtobarn/sumOfWeights();
scale(_h_srcounts, sf);
scale(_h_srcountsagg, sf);
}
//@}
private:
/// @name Histograms
//@{
vector<CounterPtr> _h_srcounts, _h_srcountsagg;
//@}
};
// The hook for the plugin system
DECLARE_RIVET_PLUGIN(CMS_2016_PAS_SUS_16_14);
}
diff --git a/bin/rivet b/bin/rivet
--- a/bin/rivet
+++ b/bin/rivet
@@ -1,680 +1,674 @@
#! /usr/bin/env python
"""\
Run Rivet analyses on HepMC events read from a file or Unix pipe
Examples:
%(prog)s [options] <hepmcfile> [<hepmcfile2> ...]
or
mkfifo fifo.hepmc
my_generator -o fifo.hepmc &
%(prog)s [options] fifo.hepmc
ENVIRONMENT:
* RIVET_ANALYSIS_PATH: list of paths to be searched for plugin analysis libraries at runtime
* RIVET_DATA_PATH: list of paths to be searched for data files (defaults to use analysis path)
* RIVET_WEIGHT_INDEX: the numerical weight-vector index to use in this run (default = 0; -1 = ignore weights)
* See the documentation for more environment variables.
"""
from __future__ import print_function
import os, sys
## Load the rivet module
try:
import rivet
except:
## If rivet loading failed, try to bootstrap the Python path!
try:
# TODO: Is this a good idea? Maybe just notify the user that their PYTHONPATH is wrong?
import commands
modname = sys.modules[__name__].__file__
binpath = os.path.dirname(modname)
rivetconfigpath = os.path.join(binpath, "rivet-config")
rivetpypath = commands.getoutput(rivetconfigpath + " --pythonpath")
sys.path.append(rivetpypath)
import rivet
except:
sys.stderr.write("The rivet Python module could not be loaded: is your PYTHONPATH set correctly?\n")
sys.exit(5)
rivet.util.check_python_version()
rivet.util.set_process_name("rivet")
import time, datetime, logging, signal
## Parse command line options
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("ARGS", nargs="*")
parser.add_argument("--version", dest="SHOW_VERSION", action="store_true", default=False, help="show Rivet version")
anagroup = parser.add_argument_group("Analysis handling")
anagroup.add_argument("-a", "--analysis", "--analyses", dest="ANALYSES", action="append",
default=[], metavar="ANA",
help="add an analysis (or comma-separated list of analyses) to the processing list.")
anagroup.add_argument("--list-analyses", "--list", dest="LIST_ANALYSES", action="store_true",
default=False, help="show the list of available analyses' names. With -v, it shows the descriptions, too")
anagroup.add_argument("--list-keywords", "--keywords", dest="LIST_KEYWORDS", action="store_true",
default=False, help="show the list of available keywords.")
anagroup.add_argument("--list-used-analyses", action="store_true", dest="LIST_USED_ANALYSES",
default=False, help="list the analyses used by this command (after subtraction of inappropriate ones)")
anagroup.add_argument("--show-analysis", "--show-analyses", "--show", dest="SHOW_ANALYSES", action="append",
default=[], help="show the details of an analysis")
anagroup.add_argument("--show-bibtex", dest="SHOW_BIBTEX", action="store_true",
default=False, help="show BibTeX entries for all used analyses")
anagroup.add_argument("--analysis-path", dest="ANALYSIS_PATH", metavar="PATH", default=None,
help="specify the analysis search path (cf. $RIVET_ANALYSIS_PATH).")
# TODO: remove/deprecate the append?
anagroup.add_argument("--analysis-path-append", dest="ANALYSIS_PATH_APPEND", metavar="PATH", default=None,
help="append to the analysis search path (cf. $RIVET_ANALYSIS_PATH).")
anagroup.add_argument("--pwd", dest="ANALYSIS_PATH_PWD", action="store_true", default=False,
help="append the current directory (pwd) to the analysis/data search paths (cf. $RIVET_ANALYSIS_PATH).")
extragroup = parser.add_argument_group("Extra run settings")
extragroup.add_argument("-o", "-H", "--histo-file", dest="HISTOFILE",
default="Rivet.yoda", help="specify the output histo file path (default = %(default)s)")
extragroup.add_argument("-p", "--preload-file", dest="PRELOADFILE",
default=None, help="specify and old yoda file to initialize (default = %(default)s)")
extragroup.add_argument("--no-histo-file", dest="WRITE_DATA", action="store_false", default=True,
help="don't write out any histogram file at the end of the run (default = write)")
extragroup.add_argument("-x", "--cross-section", dest="CROSS_SECTION",
default=None, metavar="XS",
help="specify the signal process cross-section in pb")
extragroup.add_argument("-n", "--nevts", dest="MAXEVTNUM", type=int,
default=None, metavar="NUM",
help="restrict the max number of events to process")
extragroup.add_argument("--nskip", dest="EVTSKIPNUM", type=int,
default=0, metavar="NUM",
help="skip NUM events read from input before beginning processing")
extragroup.add_argument("--runname", dest="RUN_NAME", default=None, metavar="NAME",
help="give an optional run name, to be prepended as a 'top level directory' in histo paths")
extragroup.add_argument("--ignore-beams", dest="IGNORE_BEAMS", action="store_true", default=False,
help="ignore input event beams when checking analysis compatibility. "
"WARNING: analyses may not work correctly, or at all, with inappropriate beams")
-extragroup.add_argument("-d", "--dump", dest="DUMP_PERIOD", type=int,
- default=None, metavar="NUM",
- help="run finalize() and write out the resulting yoda-file at regular intervals")
+extragroup.add_argument("-d", "--dump", "--histo-interval", dest="DUMP_PERIOD", type=int,
+ default=1000, metavar="NUM",
+ help="specify the number of events between histogram file updates, "
+ "default = %default. Set to 0 to only write out at the end of the run. "
+ "Note that intermediate histograms will be those from the analyze step "
+ "only, except for analyses explicitly declared Reentrant for which the "
+ "finalize function is executed first.")
timinggroup = parser.add_argument_group("Timeouts and periodic operations")
timinggroup.add_argument("--event-timeout", dest="EVENT_TIMEOUT", type=int,
default=21600, metavar="NSECS",
help="max time in whole seconds to wait for an event to be generated from the specified source (default = %(default)s)")
timinggroup.add_argument("--run-timeout", dest="RUN_TIMEOUT", type=int,
default=None, metavar="NSECS",
help="max time in whole seconds to wait for the run to finish. This can be useful on batch systems such "
"as the LCG Grid where tokens expire on a fixed wall-clock and can render long Rivet runs unable to write "
"out the final histogram file (default = unlimited)")
-timinggroup.add_argument("--histo-interval", dest="HISTO_WRITE_INTERVAL", type=int,
- default=1000, help="specify the number of events between histogram file updates, default = %(default)s. "
- "Set to 0 to only write out at the end of the run. Note that intermediate histograms will be those "
- "from the analyze step only: analysis finalizing is currently not executed until the end of the run.")
verbgroup = parser.add_argument_group("Verbosity control")
parser.add_argument("-l", dest="NATIVE_LOG_STRS", action="append",
default=[], help="set a log level in the Rivet library")
verbgroup.add_argument("-v", "--verbose", action="store_const", const=logging.DEBUG, dest="LOGLEVEL",
default=logging.INFO, help="print debug (very verbose) messages")
verbgroup.add_argument("-q", "--quiet", action="store_const", const=logging.WARNING, dest="LOGLEVEL",
default=logging.INFO, help="be very quiet")
args = parser.parse_args()
## Print the version and exit
if args.SHOW_VERSION:
print("rivet v%s" % rivet.version())
sys.exit(0)
## Override/modify analysis search path
if args.ANALYSIS_PATH:
rivet.setAnalysisLibPaths(args.ANALYSIS_PATH.split(":"))
rivet.setAnalysisDataPaths(args.ANALYSIS_PATH.split(":"))
if args.ANALYSIS_PATH_APPEND:
for ap in args.ANALYSIS_PATH_APPEND.split(":"):
rivet.addAnalysisLibPath(ap)
rivet.addAnalysisDataPath(ap)
if args.ANALYSIS_PATH_PWD:
rivet.addAnalysisLibPath(os.path.abspath("."))
rivet.addAnalysisDataPath(os.path.abspath("."))
## Configure logging
logging.basicConfig(level=args.LOGLEVEL, format="%(message)s")
for l in args.NATIVE_LOG_STRS:
name, level = None, None
try:
name, level = l.split("=")
except:
name = "Rivet"
level = l
## Fix name
if name != "Rivet" and not name.startswith("Rivet."):
name = "Rivet." + name
try:
## Get right error type
level = rivet.LEVELS.get(level.upper(), None)
logging.debug("Setting log level: %s %d" % (name, level))
rivet.setLogLevel(name, level)
except:
logging.warning("Couldn't process logging string '%s'" % l)
############################
## Listing available analyses/keywords
def getAnalysesByKeyword(alist, kstring):
add, veto, ret = [], [], []
bits = [i for i in kstring.replace("^@", "@^").split("@") if len(i) > 0]
for b in bits:
if b.startswith("^"):
veto.append(b.strip("^"))
else:
add.append(b)
add = set(add)
veto = set(veto)
for a in alist:
kwds = set([i.lower() for i in rivet.AnalysisLoader.getAnalysis(a).keywords()])
if kwds.intersection(veto) and len(kwds.intersection(add)) == len(list(add)):
ret.append(a)
return ret
## List of analyses
all_analyses = rivet.AnalysisLoader.analysisNames()
if args.LIST_ANALYSES:
## Treat args as case-insensitive regexes if present
regexes = None
if args.ARGS:
import re
regexes = [re.compile(arg, re.I) for arg in args.ARGS]
# import tempfile, subprocess
# tf, tfpath = tempfile.mkstemp(prefix="rivet-list.")
names = []
msg = []
for aname in all_analyses:
if not regexes:
toshow = True
else:
toshow = False
for regex in regexes:
if regex.search(aname):
toshow = True
break
if toshow:
names.append(aname)
msg.append('')
if args.LOGLEVEL <= logging.INFO:
a = rivet.AnalysisLoader.getAnalysis(aname)
st = "" if a.status() == "VALIDATED" else ("[%s] " % a.status())
# detex will very likely introduce some non-ASCII chars from
# greek names in analysis titles.
# The u"" prefix and explicit print encoding are necessary for
# py2 to handle this properly
msg[-1] = u"%s%s" % (st, a.summary())
if args.LOGLEVEL < logging.INFO:
if a.keywords():
msg[-1] += u" [%s]" % " ".join(a.keywords())
if a.luminosityfb():
msg[-1] += u" [ \int L = %s fb^{-1} ]" % a.luminosityfb()
msg = rivet.util.detex(msg)
retlist = '\n'.join([ u"%-25s %s" % (a,m) for a,m in zip(names,msg) ])
if type(u'') is not str:
retlist = retlist.encode('utf-8')
print(retlist)
sys.exit(0)
def getKeywords(alist):
all_keywords = []
for a in alist:
all_keywords.extend(rivet.AnalysisLoader.getAnalysis(a).keywords())
all_keywords = [i.lower() for i in all_keywords]
return sorted(list(set(all_keywords)))
## List keywords
if args.LIST_KEYWORDS:
# a = rivet.AnalysisLoader.getAnalysis(aname)
for k in getKeywords(all_analyses):
print(k)
sys.exit(0)
## Show analyses' details
if len(args.SHOW_ANALYSES) > 0:
toshow = []
for i, a in enumerate(args.SHOW_ANALYSES):
a_up = a.upper()
if a_up in all_analyses and a_up not in toshow:
toshow.append(a_up)
else:
## Treat as a case-insensitive regex
import re
regex = re.compile(a, re.I)
for ana in all_analyses:
if regex.search(ana) and a_up not in toshow:
toshow.append(ana)
msgs = []
for i, name in enumerate(sorted(toshow)):
import textwrap
ana = rivet.AnalysisLoader.getAnalysis(name)
msg = ""
msg += name + "\n"
msg += (len(name) * "=") + "\n\n"
msg += rivet.util.detex(ana.summary()) + "\n\n"
msg += "Status: " + ana.status() + "\n\n"
# TODO: reduce to only show Inspire in v3
if ana.inspireId():
msg += "Inspire ID: " + ana.inspireId() + "\n"
msg += "Inspire URL: http://inspire-hep.net/record/" + ana.inspireId() + "\n"
msg += "HepData URL: http://hepdata.net/record/ins" + ana.inspireId() + "\n"
elif ana.spiresId():
msg += "Spires ID: " + ana.spiresId() + "\n"
msg += "Inspire URL: http://inspire-hep.net/search?p=find+key+" + ana.spiresId() + "\n"
msg += "HepData URL: http://hepdata.cedar.ac.uk/view/irn" + ana.spiresId() + "\n"
if ana.year():
msg += "Year of publication: " + ana.year() + "\n"
if ana.bibKey():
msg += "BibTeX key: " + ana.bibKey() + "\n"
msg += "Authors:\n"
for a in ana.authors():
msg += " " + a + "\n"
msg += "\n"
msg += "Description:\n"
twrap = textwrap.TextWrapper(width=75, initial_indent=2*" ", subsequent_indent=2*" ")
msg += twrap.fill(rivet.util.detex(ana.description())) + "\n\n"
if ana.experiment():
msg += "Experiment: " + ana.experiment()
if ana.collider():
msg += "(%s)" % ana.collider()
msg += "\n"
# TODO: move this formatting into Analysis or a helper function?
if ana.requiredBeams():
def pid_to_str(pid):
if pid == 11:
return "e-"
elif pid == -11:
return "e+"
elif pid == 2212:
return "p+"
elif pid == -2212:
return "p-"
elif pid == 10000:
return "*"
else:
return str(pid)
beamstrs = []
for bp in ana.requiredBeams():
beamstrs.append(pid_to_str(bp[0]) + " " + pid_to_str(bp[1]))
msg += "Beams:" + ", ".join(beamstrs) + "\n"
if ana.requiredEnergies():
msg += "Beam energies:" + "; ".join(["(%0.1f, %0.1f) GeV\n" % (epair[0], epair[1]) for epair in ana.requiredEnergies()])
else:
msg += "Beam energies: ANY\n"
if ana.runInfo():
msg += "Run details:\n"
twrap = textwrap.TextWrapper(width=75, initial_indent=2*" ", subsequent_indent=4*" ")
for l in ana.runInfo().split("\n"):
msg += twrap.fill(l) + "\n"
if ana.luminosityfb():
msg+= "\nIntegrated data luminosity = %s inverse fb.\n"%ana.luminosityfb()
if ana.keywords():
msg += "\nAnalysis keywords:"
for k in ana.keywords():
msg += " %s"%k
msg+= "\n\n"
if ana.references():
msg += "\n" + "References:\n"
for r in ana.references():
url = None
if r.startswith("arXiv:"):
code = r.split()[0].replace("arXiv:", "")
url = "http://arxiv.org/abs/" + code
elif r.startswith("doi:"):
code = r.replace("doi:", "")
url = "http://dx.doi.org/" + code
if url is not None:
r += " - " + url
msg += " " + r + "\n"
## Add to the output
msgs.append(msg)
## Write the combined messages to a temporary file and page it
if msgs:
try:
import tempfile, subprocess
tffd, tfpath = tempfile.mkstemp(prefix="rivet-show.")
msgsum = u"\n\n".join(msgs)
if type(u'') is not str:
msgsum = msgsum.encode('utf-8')
os.write(tffd, msgsum)
if sys.stdout.isatty():
pager = subprocess.Popen(["less", "-FX", tfpath]) #, stdin=subprocess.PIPE)
pager.communicate()
else:
f = open(tfpath)
print(f.read())
f.close()
finally:
os.unlink(tfpath) #< always clean up
sys.exit(0)
############################
## Actual analysis runs
## We allow comma-separated lists of analysis names -- normalise the list here
newanas = []
for a in args.ANALYSES:
if "," in a:
newanas += a.split(",")
elif "@" in a: #< NB. this bans combination of ana lists and keywords in a single arg
temp = getAnalysesByKeyword(all_analyses, a)
for i in temp:
newanas.append(i)
else:
newanas.append(a)
args.ANALYSES = newanas
## Parse supplied cross-section
if args.CROSS_SECTION is not None:
xsstr = args.CROSS_SECTION
try:
args.CROSS_SECTION = float(xsstr)
except:
import re
suffmatch = re.search(r"[^\d.]", xsstr)
if not suffmatch:
raise ValueError("Bad cross-section string: %s" % xsstr)
factor = base = None
suffstart = suffmatch.start()
if suffstart != -1:
base = xsstr[:suffstart]
suffix = xsstr[suffstart:].lower()
if suffix == "mb":
factor = 1e+9
elif suffix == "mub":
factor = 1e+6
elif suffix == "nb":
factor = 1e+3
elif suffix == "pb":
factor = 1
elif suffix == "fb":
factor = 1e-3
elif suffix == "ab":
factor = 1e-6
if factor is None or base is None:
raise ValueError("Bad cross-section string: %s" % xsstr)
xs = float(base) * factor
args.CROSS_SECTION = xs
## Print the available CLI options!
#if args.LIST_OPTIONS:
# for o in parser.option_list:
# print(o.get_opt_string())
# sys.exit(0)
## Set up signal handling
RECVD_KILL_SIGNAL = None
def handleKillSignal(signum, frame):
"Declare us as having been signalled, and return to default handling behaviour"
global RECVD_KILL_SIGNAL
logging.critical("Signal handler called with signal " + str(signum))
RECVD_KILL_SIGNAL = signum
signal.signal(signum, signal.SIG_DFL)
## Signals to handle
signal.signal(signal.SIGTERM, handleKillSignal);
signal.signal(signal.SIGHUP, handleKillSignal);
signal.signal(signal.SIGINT, handleKillSignal);
signal.signal(signal.SIGUSR1, handleKillSignal);
signal.signal(signal.SIGUSR2, handleKillSignal);
try:
signal.signal(signal.SIGXCPU, handleKillSignal);
except:
pass
## Identify HepMC files/streams
## TODO: check readability, deal with stdin
if len(args.ARGS) > 0:
HEPMCFILES = args.ARGS
else:
HEPMCFILES = ["-"]
## Event number logging
def logNEvt(n, starttime, maxevtnum):
if n % 10000 == 0:
nevtloglevel = logging.CRITICAL
elif n % 1000 == 0:
nevtloglevel = logging.WARNING
elif n % 100 == 0:
nevtloglevel = logging.INFO
else:
nevtloglevel = logging.DEBUG
currenttime = datetime.datetime.now().replace(microsecond=0)
elapsedtime = currenttime - starttime
logging.log(nevtloglevel, "Event %d (%s elapsed)" % (n, str(elapsedtime)))
# if maxevtnum is None:
# logging.log(nevtloglevel, "Event %d (%s elapsed)" % (n, str(elapsedtime)))
# else:
# remainingtime = (maxevtnum-n) * elapsedtime.total_seconds() / float(n)
# eta = time.strftime("%a %b %d %H:%M", datetime.localtime(currenttime + remainingtime))
# logging.log(nevtloglevel, "Event %d (%d s elapsed / %d s left) -> ETA: %s" %
# (n, elapsedtime, remainingtime, eta))
## Do some checks on output histo file, before we stat the event loop
histo_parentdir = os.path.dirname(os.path.abspath(args.HISTOFILE))
if not os.path.exists(histo_parentdir):
logging.error('Parent path of output histogram file does not exist: %s\nExiting.' % histo_parentdir)
sys.exit(4)
if not os.access(histo_parentdir,os.W_OK):
logging.error('Insufficient permissions to write output histogram file to directory %s\nExiting.' % histo_parentdir)
sys.exit(4)
## Set up analysis handler
RUNNAME = args.RUN_NAME or ""
ah = rivet.AnalysisHandler(RUNNAME)
ah.setIgnoreBeams(args.IGNORE_BEAMS)
for a in args.ANALYSES:
## Print warning message and exit if not a valid analysis name
if not rivet.stripOptions(a) in all_analyses:
logging.warning("'%s' is not a known Rivet analysis! Do you need to set RIVET_ANALYSIS_PATH or use the --pwd switch?\n" % a)
# TODO: lay out more neatly, or even try for a "did you mean XXXX?" heuristic?
logging.warning("There are %d currently available analyses:\n" % len(all_analyses) + ", ".join(all_analyses))
sys.exit(1)
logging.debug("Adding analysis '%s'" % a)
ah.addAnalysis(a)
if args.PRELOADFILE is not None:
ah.readData(args.PRELOADFILE)
if args.DUMP_PERIOD:
ah.dump(args.HISTOFILE, args.DUMP_PERIOD)
if args.SHOW_BIBTEX:
bibs = []
for aname in sorted(ah.analysisNames()):
ana = rivet.AnalysisLoader.getAnalysis(aname)
bibs.append("% " + aname + "\n" + ana.bibTeX())
if bibs:
print("\nBibTeX for used Rivet analyses:\n")
print("% --------------------------\n")
print("\n\n".join(bibs) + "\n")
print("% --------------------------\n")
## Read and process events
run = rivet.Run(ah)
if args.CROSS_SECTION is not None:
logging.info("User-supplied cross-section = %e pb" % args.CROSS_SECTION)
run.setCrossSection(args.CROSS_SECTION)
if args.LIST_USED_ANALYSES is not None:
run.setListAnalyses(args.LIST_USED_ANALYSES)
## Print platform type
import platform
starttime = datetime.datetime.now().replace(microsecond=0)
logging.info("Rivet %s running on machine %s (%s) at %s" % \
(rivet.version(), platform.node(), platform.machine(), str(starttime)))
def min_nonnull(a, b):
"A version of min which considers None to always be greater than a real number"
if a is None: return b
if b is None: return a
return min(a, b)
## Set up an event timeout handler
class TimeoutException(Exception):
pass
if args.EVENT_TIMEOUT or args.RUN_TIMEOUT:
def evttimeouthandler(signum, frame):
logging.warn("It has taken more than %d secs to get an event! Is the input event stream working?" %
min_nonnull(args.EVENT_TIMEOUT, args.RUN_TIMEOUT))
raise TimeoutException("Event timeout")
signal.signal(signal.SIGALRM, evttimeouthandler)
## Init run based on one event
hepmcfile = HEPMCFILES[0]
## Apply a file-level weight derived from the filename
hepmcfileweight = 1.0
if ":" in hepmcfile:
hepmcfile, hepmcfileweight = hepmcfile.rsplit(":", 1)
hepmcfileweight = float(hepmcfileweight)
try:
if args.EVENT_TIMEOUT or args.RUN_TIMEOUT:
signal.alarm(min_nonnull(args.EVENT_TIMEOUT, args.RUN_TIMEOUT))
init_ok = run.init(hepmcfile, hepmcfileweight)
signal.alarm(0)
if not init_ok:
logging.error("Failed to initialise using event file '%s'... exiting" % hepmcfile)
sys.exit(2)
except TimeoutException as te:
logging.error("Timeout in initialisation from event file '%s'... exiting" % hepmcfile)
sys.exit(3)
except Exception as ex:
logging.warning("Could not read from '%s' (error=%s)" % (hepmcfile, str(ex)))
sys.exit(3)
## Event loop
evtnum = 0
for fileidx, hepmcfile in enumerate(HEPMCFILES):
## Apply a file-level weight derived from the filename
hepmcfileweight = 1.0
if ":" in hepmcfile:
hepmcfile, hepmcfileweight = hepmcfile.rsplit(":", 1)
hepmcfileweight = float(hepmcfileweight)
## Open next HepMC file (NB. this doesn't apply to the first file: it was already used for the run init)
if fileidx > 0:
try:
run.openFile(hepmcfile, hepmcfileweight)
except Exception as ex:
logging.warning("Could not read from '%s' (error=%s)" % (hepmcfile, ex))
continue
if not run.readEvent():
logging.warning("Could not read events from '%s'" % hepmcfile)
continue
## Announce new file
msg = "Reading events from '%s'" % hepmcfile
if hepmcfileweight != 1.0:
msg += " (file weight = %e)" % hepmcfileweight
logging.info(msg)
## The event loop
while args.MAXEVTNUM is None or evtnum-args.EVTSKIPNUM < args.MAXEVTNUM:
evtnum += 1
## Optional event skipping
if evtnum <= args.EVTSKIPNUM:
logging.debug("Skipping event #%i" % evtnum)
run.skipEvent();
continue
## Only log the event number once we're actually processing
logNEvt(evtnum, starttime, args.MAXEVTNUM)
## Process this event
processed_ok = run.processEvent()
if not processed_ok:
logging.warn("Event processing failed for evt #%i!" % evtnum)
break
## Set flag to exit event loop if run timeout exceeded
if args.RUN_TIMEOUT and (time.time() - starttime) > args.RUN_TIMEOUT:
logging.warning("Run timeout of %d secs exceeded... exiting gracefully" % args.RUN_TIMEOUT)
RECVD_KILL_SIGNAL = True
## Exit the loop if signalled
if RECVD_KILL_SIGNAL is not None:
break
## Read next event (with timeout handling if requested)
try:
if args.EVENT_TIMEOUT:
signal.alarm(args.EVENT_TIMEOUT)
read_ok = run.readEvent()
signal.alarm(0)
if not read_ok:
break
except TimeoutException as te:
logging.error("Timeout in reading event from '%s'... exiting" % hepmcfile)
sys.exit(3)
- ## Write a histo file snapshot if appropriate
- if args.HISTO_WRITE_INTERVAL is not None and args.HISTO_WRITE_INTERVAL > 0:
- if evtnum % args.HISTO_WRITE_INTERVAL == 0:
- ah.writeData(args.HISTOFILE)
-
-
## Print end-of-loop messages
loopendtime = datetime.datetime.now().replace(microsecond=0)
logging.info("Finished event loop at %s" % str(loopendtime))
logging.info("Cross-section = %e pb" % ah.crossSection())
print()
## Finalize and write out data file
run.finalize()
if args.WRITE_DATA:
ah.writeData(args.HISTOFILE)
print()
endtime = datetime.datetime.now().replace(microsecond=0)
logging.info("Rivet run completed at %s, time elapsed = %s" % (str(endtime), str(endtime-starttime)))
print()
logging.info("Histograms written to %s" % os.path.abspath(args.HISTOFILE))
diff --git a/docker/rivet-herwig/Dockerfile b/docker/rivet-herwig/Dockerfile
--- a/docker/rivet-herwig/Dockerfile
+++ b/docker/rivet-herwig/Dockerfile
@@ -1,31 +1,31 @@
-FROM hepstore/rivet:2.7.0
+FROM hepstore/rivet:2.7.1
MAINTAINER Andy Buckley <andy.buckley@cern.ch>
RUN dnf install -y boost-devel
RUN mkdir /code && cd /code \
- && wget https://www.hepforge.org/archive/lhapdf/LHAPDF-6.2.1.tar.gz -O- | tar xz \
+ && wget https://www.hepforge.org/archive/lhapdf/LHAPDF-6.2.2.tar.gz -O- | tar xz \
&& cd LHAPDF-*/ && ./configure --prefix=/usr/local \
&& make -j5 && make install \
&& cd ../.. && rm -rf /code
-#RUN lhapdf install MMHT2014{,n}lo68cl
-RUN cd /usr/local/share/LHAPDF/ && \
- for pdf in MMHT2014{,n}lo68cl; do \
- wget https://lhapdf.hepforge.org/downloads?f=pdfsets/current/$pdf.tar.gz -O- | tar xz; \
- done
+RUN lhapdf install MMHT2014{,n}lo68cl
+# RUN cd /usr/local/share/LHAPDF/ && \
+# for pdf in MMHT2014{,n}lo68cl; do \
+# wget https://lhapdf.hepforge.org/downloads?f=pdfsets/current/$pdf.tar.gz -O- | tar xz; \
+# done
RUN mkdir /code && cd /code \
&& wget https://www.hepforge.org/archive/thepeg/ThePEG-2.1.4.tar.bz2 -O- | tar xj \
&& cd ThePEG-*/ && ./configure --enable-shared --{prefix,with-{fastjet,hepmc,lhapdf,rivet}}=/usr/local \
&& make -j5 && make install \
&& cd ../.. && rm -rf /code
RUN mkdir /code && cd /code \
&& wget https://www.hepforge.org/archive/herwig/Herwig-7.1.4.tar.bz2 -O- | tar xj \
&& cd Herwig-*/ \
&& ./configure --{prefix,with-{thepeg,fastjet}}=/usr/local \
&& make -j5 && make install \
&& cd ../.. && rm -rf /code
WORKDIR /work
diff --git a/docker/rivet-mg5amcnlo/Dockerfile b/docker/rivet-mg5amcnlo/Dockerfile
--- a/docker/rivet-mg5amcnlo/Dockerfile
+++ b/docker/rivet-mg5amcnlo/Dockerfile
@@ -1,25 +1,25 @@
-FROM hepstore/rivet:2.7.0
+FROM hepstore/rivet:2.7.1
MAINTAINER Andy Buckley <andy.buckley@cern.ch>
RUN dnf install -y rsync
RUN mkdir /code && cd /code \
&& wget https://www.hepforge.org/archive/lhapdf/LHAPDF-6.2.1.tar.gz -O- | tar xz \
&& cd LHAPDF-*/ && ./configure --prefix=/usr/local \
&& make -j5 && make install \
&& cd ../.. && rm -r /code
RUN mkdir /code && cd /code \
&& wget http://home.thep.lu.se/~torbjorn/pythia8/pythia8240.tgz -O- | tar xz \
&& cd pythia*/ && ./configure --enable-shared --{prefix,with-{hepmc2,lhapdf6}}=/usr/local \
&& make -j5 && make install \
&& cd ../.. && rm -r /code
RUN mkdir /code && cd /code \
&& wget https://agile.hepforge.org/downloads/?f=Sacrifice-1.1.2.tar.gz -O- | tar xz \
&& cd Sacrifice-*/ \
&& ./configure --{prefix,with-{pythia,hepmc,LHAPDF}}=/usr/local \
&& make -j5 && make install \
&& cd ../.. && rm -r /code
WORKDIR /work
diff --git a/docker/rivet-pythia/Dockerfile b/docker/rivet-pythia/Dockerfile
--- a/docker/rivet-pythia/Dockerfile
+++ b/docker/rivet-pythia/Dockerfile
@@ -1,25 +1,25 @@
-FROM hepstore/rivet:2.7.0
+FROM hepstore/rivet:2.7.1
MAINTAINER Andy Buckley <andy.buckley@cern.ch>
RUN dnf install -y rsync
RUN mkdir /code && cd /code \
- && wget https://www.hepforge.org/archive/lhapdf/LHAPDF-6.2.1.tar.gz -O- | tar xz \
+ && wget https://www.hepforge.org/archive/lhapdf/LHAPDF-6.2.2.tar.gz -O- | tar xz \
&& cd LHAPDF-*/ && ./configure --prefix=/usr/local \
&& make -j5 && make install \
&& cd ../.. && rm -r /code
RUN mkdir /code && cd /code \
&& wget http://home.thep.lu.se/~torbjorn/pythia8/pythia8240.tgz -O- | tar xz \
&& cd pythia*/ && ./configure --enable-shared --{prefix,with-{hepmc2,lhapdf6}}=/usr/local \
&& make -j5 && make install \
&& cd ../.. && rm -r /code
RUN mkdir /code && cd /code \
&& wget https://agile.hepforge.org/downloads/?f=Sacrifice-1.1.2.tar.gz -O- | tar xz \
&& cd Sacrifice-*/ \
&& ./configure --{prefix,with-{pythia,hepmc,LHAPDF}}=/usr/local \
&& make -j5 && make install \
&& cd ../.. && rm -r /code
WORKDIR /work
diff --git a/docker/rivet-sherpa/Dockerfile b/docker/rivet-sherpa/Dockerfile
--- a/docker/rivet-sherpa/Dockerfile
+++ b/docker/rivet-sherpa/Dockerfile
@@ -1,25 +1,25 @@
-FROM hepstore/rivet:2.7.0
+FROM hepstore/rivet:2.7.1
MAINTAINER Andy Buckley <andy.buckley@cern.ch>
RUN dnf install -y rsync
RUN mkdir /code && cd /code \
&& wget https://www.hepforge.org/archive/lhapdf/LHAPDF-6.2.1.tar.gz -O- | tar xz \
&& cd LHAPDF-*/ && ./configure --prefix=/usr/local \
&& make -j5 && make install \
&& cd ../.. && rm -r /code
RUN mkdir /code && cd /code \
&& wget http://home.thep.lu.se/~torbjorn/pythia8/pythia8240.tgz -O- | tar xz \
&& cd pythia*/ && ./configure --enable-shared --{prefix,with-{hepmc2,lhapdf6}}=/usr/local \
&& make -j5 && make install \
&& cd ../.. && rm -r /code
RUN mkdir /code && cd /code \
&& wget https://agile.hepforge.org/downloads/?f=Sacrifice-1.1.2.tar.gz -O- | tar xz \
&& cd Sacrifice-*/ \
&& ./configure --{prefix,with-{pythia,hepmc,LHAPDF}}=/usr/local \
&& make -j5 && make install \
&& cd ../.. && rm -r /code
WORKDIR /work
diff --git a/docker/rivet-tutorial/Dockerfile b/docker/rivet-tutorial/Dockerfile
--- a/docker/rivet-tutorial/Dockerfile
+++ b/docker/rivet-tutorial/Dockerfile
@@ -1,8 +1,9 @@
FROM hepstore/rivet-pythia
MAINTAINER Andy Buckley <andy.buckley@cern.ch>
CMD /bin/bash
WORKDIR /work
ADD . /work
+RUN rm /work/Dockerfile
diff --git a/docker/rivet/Dockerfile b/docker/rivet/Dockerfile
--- a/docker/rivet/Dockerfile
+++ b/docker/rivet/Dockerfile
@@ -1,26 +1,26 @@
FROM fedora:27
LABEL maintainer="rivet@projects.hepforge.org"
RUN dnf update -y \
&& dnf install -y \
make gcc-c++ gcc-gfortran redhat-rpm-config \
wget tar less bzip2 findutils which nano zlib-devel \
python python-devel file python-matplotlib gsl-devel \
texlive-latex-bin texlive-texconfig-bin texlive-pst-tools \
ghostscript ImageMagick texlive-dvips texlive-relsize \
texlive-cm texlive-hyphen-base texlive-collection-fontsrecommended \
&& dnf clean all
RUN mkdir /code && cd /code \
- && wget https://phab.hepforge.org/source/rivetbootstraphg/browse/2.7.0/rivet-bootstrap?view=raw -O rivet-bootstrap \
+ && wget https://phab.hepforge.org/source/rivetbootstraphg/browse/2.7.1/rivet-bootstrap?view=raw -O rivet-bootstrap \
&& chmod +x rivet-bootstrap \
&& INSTALL_PREFIX=/usr/local INSTALL_GSL=0 INSTALL_RIVETDEV=0 MAKE="make -j7" ./rivet-bootstrap \
&& echo "source /usr/local/share/Rivet/rivet-completion" > /etc/profile.d/rivet-completion.sh \
&& echo "source /usr/local/share/YODA/yoda-completion" > /etc/profile.d/yoda-completion.sh \
&& texconfig rehash \
&& rm -rf /code
ENV LD_LIBRARY_PATH /usr/local/lib
ENV PYTHONPATH /usr/local/lib64/python2.7/site-packages
WORKDIR /work
diff --git a/include/Rivet/AnalysisHandler.hh b/include/Rivet/AnalysisHandler.hh
--- a/include/Rivet/AnalysisHandler.hh
+++ b/include/Rivet/AnalysisHandler.hh
@@ -1,329 +1,329 @@
// -*- C++ -*-
#ifndef RIVET_RivetHandler_HH
#define RIVET_RivetHandler_HH
#include "Rivet/Config/RivetCommon.hh"
#include "Rivet/Particle.hh"
#include "Rivet/AnalysisLoader.hh"
#include "Rivet/Tools/RivetYODA.hh"
namespace Rivet {
// Forward declaration and smart pointer for Analysis
class Analysis;
typedef std::shared_ptr<Analysis> AnaHandle;
// // Needed to make smart pointers compare equivalent in the STL set
// struct CmpAnaHandle {
// bool operator() (const AnaHandle& a, const AnaHandle& b) const {
// return a.get() < b.get();
// }
// };
/// A class which handles a number of analysis objects to be applied to
/// generated events. An {@link Analysis}' AnalysisHandler is also responsible
/// for handling the final writing-out of histograms.
class AnalysisHandler {
public:
/// @name Constructors and destructors. */
//@{
/// Preferred constructor, with optional run name.
AnalysisHandler(const string& runname="");
/// @brief Destructor
/// The destructor is not virtual, as this class should not be inherited from.
~AnalysisHandler();
//@}
private:
/// Get a logger object.
Log& getLog() const;
public:
/// @name Run properties
//@{
/// Get the name of this run.
string runName() const { return _runname; }
/// Get the number of events seen. Should only really be used by external
/// steering code or analyses in the finalize phase.
size_t numEvents() const { return _eventcounter.numEntries(); }
/// @brief Access the sum of the event weights seen
///
/// This is the weighted equivalent of the number of events. It should only
/// be used by external steering code or analyses in the finalize phase.
double sumW() const { return _eventcounter.sumW(); }
/// Access to the sum of squared-weights
double sumW2() const { return _eventcounter.sumW2(); }
/// @brief Compatibility alias for sumOfWeights
///
/// @deprecated Prefer sumW
double sumOfWeights() const { return sumW(); }
/// @brief Set the sum of weights
///
/// This is useful if Rivet is steered externally and
/// the analyses are run for a sub-contribution of the events
/// (but of course have to be normalised to the total sum of weights)
///
/// @todo What about the sumW2 term? That needs to be set coherently. Need a
/// new version, with all three N,sumW,sumW2 numbers (or a counter)
/// supplied.
///
/// @deprecated Weight sums are no longer tracked this way...
void setSumOfWeights(const double& sum) {
//_sumOfWeights = sum;
throw Error("Can't set sum of weights independently, since it's now tracked by a Counter. "
"Please contact the Rivet authors if you need this.");
}
/// Is cross-section information required by at least one child analysis?
/// @deprecated Should no-longer be an issue: does any generator not write the cross-section?
bool needCrossSection() const;
/// Whether the handler knows about a cross-section.
/// @deprecated Should no-longer be an issue: does any generator not write the cross-section?
bool hasCrossSection() const;
/// Get the cross-section known to the handler.
double crossSection() const { return _xs; }
/// Set the cross-section for the process being generated.
AnalysisHandler& setCrossSection(double xs, double xserr=0);
/// Set the beam particles for this run
AnalysisHandler& setRunBeams(const ParticlePair& beams) {
_beams = beams;
MSG_DEBUG("Setting run beams = " << beams << " @ " << sqrtS()/GeV << " GeV");
return *this;
}
/// Get the beam particles for this run, usually determined from the first event.
const ParticlePair& beams() const { return _beams; }
/// Get beam IDs for this run, usually determined from the first event.
/// @deprecated Use standalone beamIds(ah.beams()), to clean AH interface
PdgIdPair beamIds() const;
/// Get energy for this run, usually determined from the first event.
/// @deprecated Use standalone sqrtS(ah.beams()), to clean AH interface
double sqrtS() const;
/// Setter for _ignoreBeams
void setIgnoreBeams(bool ignore=true);
//@}
/// @name Handle analyses
//@{
/// Get a list of the currently registered analyses' names.
std::vector<std::string> analysisNames() const;
/// Get the collection of currently registered analyses.
const std::map<std::string, AnaHandle>& analysesMap() const {
return _analyses;
}
/// Get the collection of currently registered analyses.
std::vector<AnaHandle> analyses() const {
std::vector<AnaHandle> rtn;
rtn.reserve(_analyses.size());
for (const auto& apair : _analyses) rtn.push_back(apair.second);
return rtn;
}
/// Get a registered analysis by name.
AnaHandle analysis(const std::string& analysisname) {
try {
return _analyses[analysisname];
} catch (...) {
throw LookupError("No analysis named '" + analysisname + "' registered in AnalysisHandler");
}
}
/// Add an analysis to the run list by object
AnalysisHandler& addAnalysis(Analysis* analysis);
/// @brief Add an analysis to the run list using its name.
///
/// The actual Analysis to be used will be obtained via
/// AnalysisLoader::getAnalysis(string). If no matching analysis is found,
/// no analysis is added (i.e. the null pointer is checked and discarded.
AnalysisHandler& addAnalysis(const std::string& analysisname);
/// @brief Add an analysis with a map of analysis options.
AnalysisHandler& addAnalysis(const std::string& analysisname, std::map<string, string> pars);
/// @brief Add analyses to the run list using their names.
///
/// The actual {@link Analysis}' to be used will be obtained via
/// AnalysisHandler::addAnalysis(string), which in turn uses
/// AnalysisLoader::getAnalysis(string). If no matching analysis is found
/// for a given name, no analysis is added, but also no error is thrown.
AnalysisHandler& addAnalyses(const std::vector<std::string>& analysisnames);
/// Remove an analysis from the run list using its name.
AnalysisHandler& removeAnalysis(const std::string& analysisname);
/// Remove analyses from the run list using their names.
AnalysisHandler& removeAnalyses(const std::vector<std::string>& analysisnames);
//@}
/// @name Main init/execute/finalise
//@{
/// Initialize a run, with the run beams taken from the example event.
void init(const GenEvent& event);
/// @brief Analyze the given \a event by reference.
///
/// This function will call the AnalysisBase::analyze() function of all
/// included analysis objects.
void analyze(const GenEvent& event);
/// @brief Analyze the given \a event by pointer.
///
/// This function will call the AnalysisBase::analyze() function of all
/// included analysis objects, after checking the event pointer validity.
void analyze(const GenEvent* event);
/// Finalize a run. This function calls the AnalysisBase::finalize()
/// functions of all included analysis objects.
void finalize();
//@}
/// @name Histogram / data object access
//@{
/// Add a vector of analysis objects to the current state.
void addData(const std::vector<AnalysisObjectPtr>& aos);
/// Read analysis plots into the histo collection (via addData) from the named file.
void readData(const std::string& filename);
/// Get all analyses' plots as a vector of analysis objects.
std::vector<AnalysisObjectPtr> getData(bool includeorphans = false,
bool includetmps = false,
bool usefinalized = true) const;
/// Write all analyses' plots (via getData) to the named file.
void writeData(const std::string& filename) const;
/// Tell Rivet to dump intermediate result to a file named @a
/// dumpfile every @a period'th event. If @period is not positive,
/// no dumping will be done.
void dump(string dumpfile, int period) {
_dumpPeriod = period;
_dumpFile = dumpfile;
}
/// Take the vector of yoda files and merge them together using
/// the cross section and weight information provided in each
/// file. Each file in @a aofiles is assumed to have been produced
/// by Rivet. By default the files are assumed to contain
/// different processes (or the same processs but mutually
/// exclusive cuts), but if @a equiv if ture, the files are
/// assumed to contain output of completely equivalent (but
/// statistically independent) Rivet runs. The corresponding
/// analyses will be loaded and their analysis objects will be
/// filled with the merged result. finalize() will be run on each
/// relevant anslysis. The resulting YODA file can then be rwitten
/// out by writeData(). If delopts is non-empty, it is assumed to
/// contain names different options to be merged into the same
/// analysis objects.
void mergeYodas(const vector<string> & aofiles,
const vector<string> & delopts = vector<string>(),
bool equiv = false);
/// Helper function to strip specific options from data object paths.
void stripOptions(AnalysisObjectPtr ao,
const vector<string> & delopts) const;
//@}
private:
/// The collection of Analysis objects to be used.
std::map<std::string, AnaHandle> _analyses;
/// A vector of pre-loaded object which do not have a valid
/// Analysis plugged in.
vector<AnalysisObjectPtr> _orphanedPreloads;
/// A vector containing copies of analysis objects after
/// finalize() has been run.
vector<AnalysisObjectPtr> _finalizedAOs;
/// @name Run properties
//@{
/// Run name
std::string _runname;
/// Event counter
Counter _eventcounter;
/// Cross-section known to AH
double _xs, _xserr;
/// Beams used by this run.
ParticlePair _beams;
/// Flag to check if init has been called
bool _initialised;
/// Flag whether input event beams should be ignored in compatibility check
bool _ignoreBeams;
/// Determines how often Rivet runs finalize() and writes the
/// result to a YODA file.
int _dumpPeriod;
/// The name of a YODA file to which Rivet periodically dumps
/// results.
string _dumpFile;
/// Flag to indicate periodic dumping is in progress
- bool _dumping;
+ int _dumping;
//@}
private:
/// The assignment operator is private and must never be called.
/// In fact, it should not even be implemented.
AnalysisHandler& operator=(const AnalysisHandler&);
/// The copy constructor is private and must never be called. In
/// fact, it should not even be implemented.
AnalysisHandler(const AnalysisHandler&);
};
}
#endif
diff --git a/src/Core/AnalysisHandler.cc b/src/Core/AnalysisHandler.cc
--- a/src/Core/AnalysisHandler.cc
+++ b/src/Core/AnalysisHandler.cc
@@ -1,576 +1,586 @@
// -*- C++ -*-
#include "Rivet/Config/RivetCommon.hh"
#include "Rivet/AnalysisHandler.hh"
#include "Rivet/Analysis.hh"
#include "Rivet/Tools/ParticleName.hh"
#include "Rivet/Tools/BeamConstraint.hh"
#include "Rivet/Tools/Logging.hh"
#include "Rivet/Projections/Beam.hh"
#include "YODA/IO.h"
namespace Rivet {
AnalysisHandler::AnalysisHandler(const string& runname)
: _runname(runname),
_eventcounter("/_EVTCOUNT"),
_xs(NAN), _xserr(NAN),
- _initialised(false), _ignoreBeams(false), _dumpPeriod(0), _dumping(false)
+ _initialised(false), _ignoreBeams(false), _dumpPeriod(0), _dumping(0)
{ }
AnalysisHandler::~AnalysisHandler()
{ }
Log& AnalysisHandler::getLog() const {
return Log::getLog("Rivet.Analysis.Handler");
}
void AnalysisHandler::init(const GenEvent& ge) {
if (_initialised)
throw UserError("AnalysisHandler::init has already been called: cannot re-initialize!");
setRunBeams(Rivet::beams(ge));
MSG_DEBUG("Initialising the analysis handler");
_eventcounter.reset();
// Check that analyses are beam-compatible, and remove those that aren't
const size_t num_anas_requested = analysisNames().size();
vector<string> anamestodelete;
for (const AnaHandle a : analyses()) {
if (!_ignoreBeams && !a->isCompatible(beams())) {
//MSG_DEBUG(a->name() << " requires beams " << a->requiredBeams() << " @ " << a->requiredEnergies() << " GeV");
anamestodelete.push_back(a->name());
}
}
for (const string& aname : anamestodelete) {
MSG_WARNING("Analysis '" << aname << "' is incompatible with the provided beams: removing");
removeAnalysis(aname);
}
if (num_anas_requested > 0 && analysisNames().empty()) {
cerr << "All analyses were incompatible with the first event's beams\n"
<< "Exiting, since this probably wasn't intentional!" << endl;
exit(1);
}
// Warn if any analysis' status is not unblemished
for (const AnaHandle a : analyses()) {
if (toUpper(a->status()) == "PRELIMINARY") {
MSG_WARNING("Analysis '" << a->name() << "' is preliminary: be careful, it may change and/or be renamed!");
} else if (toUpper(a->status()) == "OBSOLETE") {
MSG_WARNING("Analysis '" << a->name() << "' is obsolete: please update!");
} else if (toUpper(a->status()).find("UNVALIDATED") != string::npos) {
MSG_WARNING("Analysis '" << a->name() << "' is unvalidated: be careful, it may be broken!");
}
}
// Initialize the remaining analyses
for (AnaHandle a : analyses()) {
MSG_DEBUG("Initialising analysis: " << a->name());
try {
// Allow projection registration in the init phase onwards
a->_allowProjReg = true;
a->init();
//MSG_DEBUG("Checking consistency of analysis: " << a->name());
//a->checkConsistency();
} catch (const Error& err) {
cerr << "Error in " << a->name() << "::init method: " << err.what() << endl;
exit(1);
}
MSG_DEBUG("Done initialising analysis: " << a->name());
}
_initialised = true;
MSG_DEBUG("Analysis handler initialised");
}
void AnalysisHandler::analyze(const GenEvent& ge) {
// Call init with event as template if not already initialised
if (!_initialised) init(ge);
assert(_initialised);
// Ensure that beam details match those from the first event (if we're checking beams)
if ( !_ignoreBeams ) {
const PdgIdPair beams = Rivet::beamIds(ge);
const double sqrts = Rivet::sqrtS(ge);
if (!compatible(beams, _beams) || !fuzzyEquals(sqrts, sqrtS())) {
cerr << "Event beams mismatch: "
<< PID::toBeamsString(beams) << " @ " << sqrts/GeV << " GeV" << " vs. first beams "
<< this->beams() << " @ " << this->sqrtS()/GeV << " GeV" << endl;
exit(1);
}
}
// Create the Rivet event wrapper
/// @todo Filter/normalize the event here
Event event(ge);
// Weights
/// @todo Drop this / just report first weight when we support multiweight events
_eventcounter.fill(event.weight());
MSG_DEBUG("Event #" << _eventcounter.numEntries() << " weight = " << event.weight());
// Cross-section
#ifdef HEPMC_HAS_CROSS_SECTION
if (ge.cross_section()) {
_xs = ge.cross_section()->cross_section();
_xserr = ge.cross_section()->cross_section_error();
}
#endif
// Run the analyses
for (AnaHandle a : analyses()) {
MSG_TRACE("About to run analysis " << a->name());
try {
a->analyze(event);
} catch (const Error& err) {
cerr << "Error in " << a->name() << "::analyze method: " << err.what() << endl;
exit(1);
}
MSG_TRACE("Finished running analysis " << a->name());
}
if ( _dumpPeriod > 0 && numEvents()%_dumpPeriod == 0 ) {
MSG_INFO("Dumping intermediate results to " << _dumpFile << ".");
- _dumping = true;
+ _dumping = numEvents()/_dumpPeriod;
finalize();
- _dumping = false;
writeData(_dumpFile);
+ _dumping = 0;
}
}
void AnalysisHandler::analyze(const GenEvent* ge) {
if (ge == nullptr) {
MSG_ERROR("AnalysisHandler received null pointer to GenEvent");
//throw Error("AnalysisHandler received null pointer to GenEvent");
}
analyze(*ge);
}
void AnalysisHandler::finalize() {
if (!_initialised) return;
// First we make copies of all analysis objects.
map<string,AnalysisObjectPtr> backupAOs;
for (auto ao : getData(false, true, false) )
backupAOs[ao->path()] = AnalysisObjectPtr(ao->newclone());
// Now we run the (re-entrant) finalize() functions for all analyses.
MSG_INFO("Finalising analyses");
for (AnaHandle a : analyses()) {
a->setCrossSection(_xs);
try {
if ( !_dumping || a->info().reentrant() ) a->finalize();
- else if ( _dumping )
- MSG_INFO("Skipping periodic dump of " << a->name()
+ else if ( _dumping == 1 )
+ MSG_INFO("Skipping finalize in periodic dump of " << a->name()
<< " as it is not declared reentrant.");
} catch (const Error& err) {
cerr << "Error in " << a->name() << "::finalize method: " << err.what() << endl;
exit(1);
}
}
// Now we copy all analysis objects to the list of finalized
// ones, and restore the value to their original ones.
_finalizedAOs.clear();
for ( auto ao : getData(false, false, false) )
_finalizedAOs.push_back(AnalysisObjectPtr(ao->newclone()));
for ( auto ao : getData(false, true, false) ) {
// TODO: This should be possible to do in a nicer way, with a flag etc.
if (ao->path().find("/FINAL") != std::string::npos) continue;
auto aoit = backupAOs.find(ao->path());
if ( aoit == backupAOs.end() ) {
AnaHandle ana = analysis(split(ao->path(), "/")[0]);
if ( ana ) ana->removeAnalysisObject(ao->path());
} else
copyao(aoit->second, ao);
}
// Print out number of events processed
const int nevts = _eventcounter.numEntries();
MSG_INFO("Processed " << nevts << " event" << (nevts != 1 ? "s" : ""));
// // Delete analyses
// MSG_DEBUG("Deleting analyses");
// _analyses.clear();
// Print out MCnet boilerplate
cout << endl;
cout << "The MCnet usage guidelines apply to Rivet: see http://www.montecarlonet.org/GUIDELINES" << endl;
cout << "Please acknowledge plots made with Rivet analyses, and cite arXiv:1003.0694 (http://arxiv.org/abs/1003.0694)" << endl;
}
AnalysisHandler& AnalysisHandler::addAnalysis(const string& analysisname, std::map<string, string> pars) {
// Make an option handle.
std::string parHandle = "";
for (map<string, string>::iterator par = pars.begin(); par != pars.end(); ++par) {
parHandle +=":";
parHandle += par->first + "=" + par->second;
}
return addAnalysis(analysisname + parHandle);
}
AnalysisHandler& AnalysisHandler::addAnalysis(const string& analysisname) {
// Check for a duplicate analysis
/// @todo Might we want to be able to run an analysis twice, with different params?
/// Requires avoiding histo tree clashes, i.e. storing the histos on the analysis objects.
string ananame = analysisname;
vector<string> anaopt = split(analysisname, ":");
if ( anaopt.size() > 1 ) ananame = anaopt[0];
AnaHandle analysis( AnalysisLoader::getAnalysis(ananame) );
if (analysis.get() != 0) { // < Check for null analysis.
MSG_DEBUG("Adding analysis '" << analysisname << "'");
map<string,string> opts;
for ( int i = 1, N = anaopt.size(); i < N; ++i ) {
vector<string> opt = split(anaopt[i], "=");
if ( opt.size() != 2 ) {
MSG_WARNING("Error in option specification. Skipping analysis " << analysisname);
return *this;
}
if ( !analysis->info().validOption(opt[0], opt[1]) ) {
MSG_WARNING("Cannot set option '" << opt[0] << "' to '" << opt[1]
<< "'. Skipping analysis " << analysisname);
return *this;
}
opts[opt[0]] = opt[1];
}
for ( auto opt: opts) {
analysis->_options[opt.first] = opt.second;
analysis->_optstring += ":" + opt.first + "=" + opt.second;
}
for (const AnaHandle& a : analyses()) {
if (a->name() == analysis->name() ) {
MSG_WARNING("Analysis '" << analysisname << "' already registered: skipping duplicate");
return *this;
}
}
analysis->_analysishandler = this;
_analyses[analysisname] = analysis;
} else {
MSG_WARNING("Analysis '" << analysisname << "' not found.");
}
// MSG_WARNING(_analyses.size());
// for (const AnaHandle& a : _analyses) MSG_WARNING(a->name());
return *this;
}
AnalysisHandler& AnalysisHandler::removeAnalysis(const string& analysisname) {
// std::shared_ptr<Analysis> toremove;
// for (const AnaHandle a : _analyses) {
// if (a->name() == analysisname) {
// toremove = a;
// break;
// }
// }
// if (toremove.get() != 0) {
MSG_DEBUG("Removing analysis '" << analysisname << "'");
if (_analyses.find(analysisname) != _analyses.end()) _analyses.erase(analysisname);
// }
return *this;
}
/////////////////////////////
void AnalysisHandler::addData(const std::vector<AnalysisObjectPtr>& aos) {
for (const AnalysisObjectPtr ao : aos) {
string path = ao->path();
if ( path.substr(0, 5) != "/RAW/" ) {
_orphanedPreloads.push_back(ao);
continue;
}
path = path.substr(4);
ao->setPath(path);
if (path.size() > 1) { // path > "/"
try {
const string ananame = split(path, "/")[0];
AnaHandle a = analysis(ananame);
a->addAnalysisObject(ao); /// @todo Need to statistically merge...
} catch (const Error& e) {
MSG_TRACE("Adding analysis object " << path <<
" to the list of orphans.");
_orphanedPreloads.push_back(ao);
}
}
}
}
void AnalysisHandler::stripOptions(AnalysisObjectPtr ao,
const vector<string> & delopts) const {
string path = ao->path();
string ananame = split(path, "/")[0];
vector<string> anaopts = split(ananame, ":");
for ( int i = 1, N = anaopts.size(); i < N; ++i )
for ( auto opt : delopts )
if ( opt == "*" || anaopts[i].find(opt + "=") == 0 )
path.replace(path.find(":" + anaopts[i]), (":" + anaopts[i]).length(), "");
ao->setPath(path);
}
void AnalysisHandler::
mergeYodas(const vector<string> & aofiles, const vector<string> & delopts, bool equiv) {
vector< vector<AnalysisObjectPtr> > aosv;
vector<double> xsecs;
vector<double> xsecerrs;
vector<CounterPtr> sows;
set<string> ananames;
_eventcounter.reset();
// First scan all files and extract analysis objects and add the
// corresponding anayses..
for ( auto file : aofiles ) {
Scatter1DPtr xsec;
CounterPtr sow;
// For each file make sure that cross section and sum-of-weights
// objects are present and stor all RAW ones in a vector;
vector<AnalysisObjectPtr> aos;
try {
/// @todo Use new YODA SFINAE to fill the smart ptr vector directly
vector<YODA::AnalysisObject*> aos_raw;
YODA::read(file, aos_raw);
for (AnalysisObject* aor : aos_raw) {
AnalysisObjectPtr ao = AnalysisObjectPtr(aor);
if ( ao->path().substr(0, 5) != "/RAW/" ) continue;
ao->setPath(ao->path().substr(4));
if ( ao->path() == "/_XSEC" )
xsec = dynamic_pointer_cast<Scatter1D>(ao);
else if ( ao->path() == "/_EVTCOUNT" )
sow = dynamic_pointer_cast<Counter>(ao);
else {
stripOptions(ao, delopts);
string ananame = split(ao->path(), "/")[0];
if ( ananames.insert(ananame).second ) addAnalysis(ananame);
aos.push_back(ao);
}
}
if ( !xsec || !sow ) {
MSG_ERROR( "Error in AnalysisHandler::mergeYodas: The file " << file
<< " did not contain weights and cross section info.");
exit(1);
}
xsecs.push_back(xsec->point(0).x());
sows.push_back(sow);
xsecerrs.push_back(sqr(xsec->point(0).xErrAvg()));
_eventcounter += *sow;
sows.push_back(sow);
aosv.push_back(aos);
} catch (...) { //< YODA::ReadError&
throw UserError("Unexpected error in reading file: " + file);
}
}
// Now calculate the scale to be applied for all bins in a file
// and get the common cross section and sum of weights.
_xs = _xserr = 0.0;
for ( int i = 0, N = sows.size(); i < N; ++i ) {
double effnent = sows[i]->effNumEntries();
_xs += (equiv? effnent: 1.0)*xsecs[i];
_xserr += (equiv? sqr(effnent): 1.0)*xsecerrs[i];
}
vector<double> scales(sows.size(), 1.0);
if ( equiv ) {
_xs /= _eventcounter.effNumEntries();
_xserr = sqrt(_xserr)/_eventcounter.effNumEntries();
} else {
_xserr = sqrt(_xserr);
for ( int i = 0, N = sows.size(); i < N; ++i )
scales[i] = (_eventcounter.sumW()/sows[i]->sumW())*(xsecs[i]/_xs);
}
// Initialize the analyses allowing them to book analysis objects.
for (AnaHandle a : analyses()) {
MSG_DEBUG("Initialising analysis: " << a->name());
if ( !a->info().reentrant() )
MSG_WARNING("Analysis " << a->name() << " has not been validated to have "
<< "a reentrant finalize method. The result is unpredictable.");
try {
// Allow projection registration in the init phase onwards
a->_allowProjReg = true;
cerr << "sqrtS " << sqrtS() << endl;
a->init();
//MSG_DEBUG("Checking consistency of analysis: " << a->name());
//a->checkConsistency();
} catch (const Error& err) {
cerr << "Error in " << a->name() << "::init method: " << err.what() << endl;
exit(1);
}
MSG_DEBUG("Done initialising analysis: " << a->name());
}
_initialised = true;
// Get a list of all anaysis objects to handle.
map<string,AnalysisObjectPtr> current;
for ( auto ao : getData(false, true, false) ) current[ao->path()] = ao;
// Go through all objects to be merged and add them to current
// after appropriate scaling.
for ( int i = 0, N = aosv.size(); i < N; ++i)
for ( auto ao : aosv[i] ) {
if ( ao->path() == "/_XSEC" || ao->path() == "_EVTCOUNT" ) continue;
auto aoit = current.find(ao->path());
if ( aoit == current.end() ) {
MSG_WARNING("" << ao->path() << " was not properly booked.");
continue;
}
if ( !addaos(aoit->second, ao, scales[i]) )
MSG_WARNING("Cannot merge objects with path " << ao->path()
<<" of type " << ao->annotation("Type") );
}
// Now we can simply finalize() the analysis, leaving the
// controlling program to write it out some yoda-file.
finalize();
}
void AnalysisHandler::readData(const string& filename) {
vector<AnalysisObjectPtr> aos;
try {
/// @todo Use new YODA SFINAE to fill the smart ptr vector directly
vector<YODA::AnalysisObject*> aos_raw;
YODA::read(filename, aos_raw);
for (AnalysisObject* aor : aos_raw) aos.push_back(AnalysisObjectPtr(aor));
} catch (...) { //< YODA::ReadError&
throw UserError("Unexpected error in reading file: " + filename);
}
if (!aos.empty()) addData(aos);
}
vector<AnalysisObjectPtr> AnalysisHandler::
getData(bool includeorphans, bool includetmps, bool usefinalized) const {
vector<AnalysisObjectPtr> rtn;
// Event counter
rtn.push_back( make_shared<Counter>(_eventcounter) );
// Cross-section + err as scatter
YODA::Scatter1D::Points pts; pts.insert(YODA::Point1D(_xs, _xserr));
rtn.push_back( make_shared<Scatter1D>(pts, "/_XSEC") );
// Analysis histograms
vector<AnalysisObjectPtr> aos;
if (usefinalized)
aos = _finalizedAOs;
else {
for (const AnaHandle a : analyses()) {
// MSG_WARNING(a->name() << " " << aos.size());
for (const AnalysisObjectPtr ao : a->analysisObjects()) {
aos.push_back(ao);
}
}
}
for (const AnalysisObjectPtr ao : aos) {
// Exclude paths from final write-out if they contain a "TMP" layer (i.e. matching "/TMP/")
/// @todo This needs to be much more nuanced for re-entrant histogramming
if ( !includetmps && ao->path().find("/TMP/" ) != string::npos) continue;
rtn.push_back(ao);
}
// Sort histograms alphanumerically by path before write-out
sort(rtn.begin(), rtn.end(), [](AnalysisObjectPtr a, AnalysisObjectPtr b) {return a->path() < b->path();});
if ( includeorphans )
rtn.insert(rtn.end(), _orphanedPreloads.begin(), _orphanedPreloads.end());
return rtn;
}
void AnalysisHandler::writeData(const string& filename) const {
vector<AnalysisObjectPtr> out = _finalizedAOs;
+ set<string> finalana;
+ for ( auto ao : out) finalana.insert(ao->path());
out.reserve(2*out.size());
vector<AnalysisObjectPtr> aos = getData(false, true, false);
+
+ if ( _dumping ) {
+ for ( auto ao : aos ) {
+ if ( finalana.find(ao->path()) == finalana.end() )
+ out.push_back(AnalysisObjectPtr(ao->newclone()));
+ }
+ }
+
for ( auto ao : aos ) {
ao = AnalysisObjectPtr(ao->newclone());
ao->setPath("/RAW" + ao->path());
out.push_back(ao);
}
try {
YODA::write(filename, out.begin(), out.end());
} catch (...) { //< YODA::WriteError&
throw UserError("Unexpected error in writing file: " + filename);
}
}
std::vector<std::string> AnalysisHandler::analysisNames() const {
std::vector<std::string> rtn;
for (AnaHandle a : analyses()) {
rtn.push_back(a->name());
}
return rtn;
}
AnalysisHandler& AnalysisHandler::addAnalyses(const std::vector<std::string>& analysisnames) {
for (const string& aname : analysisnames) {
//MSG_DEBUG("Adding analysis '" << aname << "'");
addAnalysis(aname);
}
return *this;
}
AnalysisHandler& AnalysisHandler::removeAnalyses(const std::vector<std::string>& analysisnames) {
for (const string& aname : analysisnames) {
removeAnalysis(aname);
}
return *this;
}
bool AnalysisHandler::needCrossSection() const {
bool rtn = false;
for (const AnaHandle a : analyses()) {
if (!rtn) rtn = a->needsCrossSection();
if (rtn) break;
}
return rtn;
}
AnalysisHandler& AnalysisHandler::setCrossSection(double xs, double xserr) {
_xs = xs;
_xserr = xserr;
return *this;
}
bool AnalysisHandler::hasCrossSection() const {
return (!std::isnan(crossSection()));
}
AnalysisHandler& AnalysisHandler::addAnalysis(Analysis* analysis) {
analysis->_analysishandler = this;
// _analyses.insert(AnaHandle(analysis));
_analyses[analysis->name()] = AnaHandle(analysis);
return *this;
}
PdgIdPair AnalysisHandler::beamIds() const {
return Rivet::beamIds(beams());
}
double AnalysisHandler::sqrtS() const {
return Rivet::sqrtS(beams());
}
void AnalysisHandler::setIgnoreBeams(bool ignore) {
_ignoreBeams=ignore;
}
}

File Metadata

Mime Type
text/x-diff
Expires
Sat, May 3, 6:05 AM (1 d, 3 h)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
4959511
Default Alt Text
(75 KB)

Event Timeline