diff --git a/analyses/pluginATLAS/ATLAS_2013_I1230812.cc b/analyses/pluginATLAS/ATLAS_2013_I1230812.cc --- a/analyses/pluginATLAS/ATLAS_2013_I1230812.cc +++ b/analyses/pluginATLAS/ATLAS_2013_I1230812.cc @@ -1,312 +1,317 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" #include "Rivet/Projections/ZFinder.hh" #include "Rivet/Projections/FastJets.hh" #include "Rivet/Projections/VetoedFinalState.hh" #include "Rivet/Projections/PromptFinalState.hh" namespace Rivet { /// Z + jets in pp at 7 TeV (combined channel / base class) /// @note This base class contains a "mode" variable for combined, e, and mu channel derived classes class ATLAS_2013_I1230812 : public Analysis { public: /// @name Constructors etc. //@{ /// Constructor ATLAS_2013_I1230812(string name="ATLAS_2013_I1230812") : Analysis(name) { // This class uses the combined e+mu mode _mode = 1; } //@} /// Book histograms and initialise projections before the run void init() { + + // Get options from the new option system + if ( getOption("ZMODE") == "EL" ) _mode = 2; + if ( getOption("ZMODE") == "MU" ) _mode = 3; + // Determine the e/mu decay channels used (NB Prompt leptons only). /// @todo Note that Zs are accepted with any rapidity: the cuts are on the e/mu: is this correct? Cut pt20 = Cuts::pT >= 20.0*GeV; if (_mode == 1) { // Combined ZFinder zfinder(FinalState(Cuts::abseta < 2.5), pt20, PID::ELECTRON, 66*GeV, 116*GeV); declare(zfinder, "zfinder"); } else if (_mode == 2) { // Electron const Cut eta_e = Cuts::abseta < 1.37 || Cuts::absetaIn(1.52, 2.47); ZFinder zfinder(FinalState(eta_e), pt20, PID::ELECTRON, 66*GeV, 116*GeV); declare(zfinder, "zfinder"); } else if (_mode == 3) { // Muon ZFinder zfinder(FinalState(Cuts::abseta < 2.4), pt20, PID::MUON, 66*GeV, 116*GeV); declare(zfinder, "zfinder"); } else { MSG_ERROR("Unknown decay channel mode!!!"); } // Define veto FS in order to prevent Z-decay products entering the jet algorithm VetoedFinalState had_fs; had_fs.addVetoOnThisFinalState(getProjection("zfinder")); FastJets jets(had_fs, FastJets::ANTIKT, 0.4); jets.useInvisibles(true); declare(jets, "jets"); _h_njet_incl = bookHisto1D( 1, 1, _mode); _h_njet_incl_ratio = bookScatter2D(2, 1, _mode, true); _h_njet_excl = bookHisto1D( 3, 1, _mode); _h_njet_excl_ratio = bookScatter2D(4, 1, _mode, true); _h_njet_excl_pt150 = bookHisto1D( 5, 1, _mode); _h_njet_excl_pt150_ratio = bookScatter2D(6, 1, _mode, true); _h_njet_excl_vbf = bookHisto1D ( 7, 1, _mode); _h_njet_excl_vbf_ratio = bookScatter2D(8, 1, _mode, true); _h_ptlead = bookHisto1D( 9, 1, _mode); _h_ptseclead = bookHisto1D( 10, 1, _mode); _h_ptthirdlead = bookHisto1D( 11, 1, _mode); _h_ptfourthlead = bookHisto1D( 12, 1, _mode); _h_ptlead_excl = bookHisto1D( 13, 1, _mode); _h_pt_ratio = bookHisto1D( 14, 1, _mode); _h_pt_z = bookHisto1D( 15, 1, _mode); _h_pt_z_excl = bookHisto1D( 16, 1, _mode); _h_ylead = bookHisto1D( 17, 1, _mode); _h_yseclead = bookHisto1D( 18, 1, _mode); _h_ythirdlead = bookHisto1D( 19, 1, _mode); _h_yfourthlead = bookHisto1D( 20, 1, _mode); _h_deltay = bookHisto1D( 21, 1, _mode); _h_mass = bookHisto1D( 22, 1, _mode); _h_deltaphi = bookHisto1D( 23, 1, _mode); _h_deltaR = bookHisto1D( 24, 1, _mode); _h_ptthirdlead_vbf = bookHisto1D( 25, 1, _mode); _h_ythirdlead_vbf = bookHisto1D( 26, 1, _mode); _h_ht = bookHisto1D( 27, 1, _mode); _h_st = bookHisto1D( 28, 1, _mode); } /// Perform the per-event analysis void analyze(const Event& event) { const ZFinder& zfinder = apply(event, "zfinder"); MSG_DEBUG("# Z constituents = " << zfinder.constituents().size()); if (zfinder.constituents().size() != 2) vetoEvent; FourMomentum z = zfinder.boson().momentum(); FourMomentum lp = zfinder.constituents()[0].momentum(); FourMomentum lm = zfinder.constituents()[1].momentum(); if (deltaR(lp, lm) < 0.2) vetoEvent; Jets jets = apply(event, "jets").jetsByPt(Cuts::pT > 30*GeV && Cuts::absrap < 4.4); ifilter_discard(jets, deltaRLess(lp, 0.5)); ifilter_discard(jets, deltaRLess(lm, 0.5)); const double weight = event.weight(); // Fill jet multiplicities for (size_t ijet = 0; ijet <= jets.size(); ++ijet) { _h_njet_incl->fill(ijet, weight); } _h_njet_excl->fill(jets.size(), weight); // Require at least one jet if (jets.size() >= 1) { // Leading jet histos const double ptlead = jets[0].pT()/GeV; const double yabslead = fabs(jets[0].rapidity()); const double ptz = z.pT()/GeV; _h_ptlead->fill(ptlead, weight); _h_ylead ->fill(yabslead, weight); _h_pt_z ->fill(ptz, weight); // Fill jet multiplicities if (ptlead > 150) _h_njet_excl_pt150->fill(jets.size(), weight); // Loop over selected jets, fill inclusive distributions double st = 0; double ht = lp.pT()/GeV + lm.pT()/GeV; for (size_t ijet = 0; ijet < jets.size(); ++ijet) { ht += jets[ijet].pT()/GeV; st += jets[ijet].pT()/GeV; } _h_ht->fill(ht, weight); _h_st->fill(st, weight); // Require exactly one jet if (jets.size() == 1) { _h_ptlead_excl->fill(ptlead, weight); _h_pt_z_excl ->fill(ptz, weight); } } // Require at least two jets if (jets.size() >= 2) { // Second jet histos const double ptlead = jets[0].pT()/GeV; const double pt2ndlead = jets[1].pT()/GeV; const double ptratio = pt2ndlead/ptlead; const double yabs2ndlead = fabs(jets[1].rapidity()); _h_ptseclead->fill(pt2ndlead, weight); _h_yseclead->fill( yabs2ndlead, weight); _h_pt_ratio->fill( ptratio, weight); // Dijet histos const double deltaphi = fabs(deltaPhi(jets[1], jets[0])); const double deltarap = fabs(jets[0].rapidity() - jets[1].rapidity()) ; const double deltar = fabs(deltaR(jets[0], jets[1], RAPIDITY)); const double mass = (jets[0].momentum() + jets[1].momentum()).mass()/GeV; _h_mass->fill( mass, weight); _h_deltay->fill( deltarap, weight); _h_deltaphi->fill(deltaphi, weight); _h_deltaR->fill( deltar, weight); if (mass > 350 && deltarap > 3) _h_njet_excl_vbf->fill(jets.size(), weight); } // Require at least three jets if (jets.size() >= 3) { // Third jet histos const double pt3rdlead = jets[2].pT()/GeV; const double yabs3rdlead = fabs(jets[2].rapidity()); _h_ptthirdlead->fill(pt3rdlead, weight); _h_ythirdlead->fill( yabs3rdlead, weight); //Histos after VBF preselection const double deltarap = fabs(jets[0].rapidity() - jets[1].rapidity()) ; const double mass = (jets[0].momentum() + jets[1].momentum()).mass(); if (mass > 350 && deltarap > 3) { _h_ptthirdlead_vbf->fill(pt3rdlead, weight); _h_ythirdlead_vbf->fill( yabs3rdlead, weight); } } // Require at least four jets if (jets.size() >= 4) { // Fourth jet histos const double pt4thlead = jets[3].pT()/GeV; const double yabs4thlead = fabs(jets[3].rapidity()); _h_ptfourthlead->fill(pt4thlead, weight); _h_yfourthlead->fill( yabs4thlead, weight); } } /// @name Ratio calculator util functions //@{ /// Calculate the efficiency error, being careful about div-by-zero double err_incl(const HistoBin1D &M, const HistoBin1D &N, bool hasWeights) { double r = safediv(M.sumW(), N.sumW()); if (hasWeights) { // use F. James's approximation for weighted events return sqrt( safediv((1 - 2 * r) * M.sumW2() + r * r * N.sumW2(), N.sumW() * N.sumW()) ); } return sqrt( safediv(r * (1 - r), N.sumW()) ); } /// Calculate the ratio error, being careful about div-by-zero double err_excl(const HistoBin1D &A, const HistoBin1D &B) { double r = safediv(A.sumW(), B.sumW()); double dAsquared = safediv(A.sumW2(), A.sumW() * A.sumW()); // squared relative error of A double dBsquared = safediv(B.sumW2(), B.sumW() * B.sumW()); // squared relative error of B return r * sqrt(dAsquared + dBsquared); } //@} void finalize() { bool hasWeights = _h_njet_incl->effNumEntries() != _h_njet_incl->numEntries(); for (size_t i = 0; i < 6; ++i) { _h_njet_incl_ratio->point(i).setY(safediv(_h_njet_incl->bin(i + 1).sumW(), _h_njet_incl->bin(i).sumW()), err_incl(_h_njet_incl->bin(i + 1), _h_njet_incl->bin(i), hasWeights)); _h_njet_excl_ratio->point(i).setY(safediv(_h_njet_excl->bin(i + 1).sumW(), _h_njet_excl->bin(i).sumW()), err_excl(_h_njet_excl->bin(i + 1), _h_njet_excl->bin(i))); if (i >= 1) { _h_njet_excl_pt150_ratio->point(i - 1).setY(safediv(_h_njet_excl_pt150->bin(i).sumW(), _h_njet_excl_pt150->bin(i - 1).sumW()), err_excl(_h_njet_excl_pt150->bin(i), _h_njet_excl_pt150->bin(i - 1))); if (i >= 2) { _h_njet_excl_vbf_ratio->point(i - 2).setY(safediv(_h_njet_excl_vbf->bin(i).sumW(), _h_njet_excl_vbf->bin(i - 1).sumW()), err_excl(_h_njet_excl_vbf->bin(i), _h_njet_excl_vbf->bin(i - 1))); } } } const double xs = crossSectionPerEvent()/picobarn; scale({_h_njet_incl, _h_njet_excl, _h_njet_excl_pt150, _h_njet_excl_vbf}, xs); scale({_h_ptlead, _h_ptseclead, _h_ptthirdlead, _h_ptfourthlead, _h_ptlead_excl}, xs); scale({_h_pt_ratio, _h_pt_z, _h_pt_z_excl}, xs); scale({_h_ylead, _h_yseclead, _h_ythirdlead, _h_yfourthlead}, xs); scale({_h_deltay, _h_mass, _h_deltaphi, _h_deltaR}, xs); scale({_h_ptthirdlead_vbf, _h_ythirdlead_vbf}, xs); scale({_h_ht, _h_st}, xs); } //@} protected: size_t _mode; private: Scatter2DPtr _h_njet_incl_ratio; Scatter2DPtr _h_njet_excl_ratio; Scatter2DPtr _h_njet_excl_pt150_ratio; Scatter2DPtr _h_njet_excl_vbf_ratio; Histo1DPtr _h_njet_incl; Histo1DPtr _h_njet_excl; Histo1DPtr _h_njet_excl_pt150; Histo1DPtr _h_njet_excl_vbf; Histo1DPtr _h_ptlead; Histo1DPtr _h_ptseclead; Histo1DPtr _h_ptthirdlead; Histo1DPtr _h_ptfourthlead; Histo1DPtr _h_ptlead_excl; Histo1DPtr _h_pt_ratio; Histo1DPtr _h_pt_z; Histo1DPtr _h_pt_z_excl; Histo1DPtr _h_ylead; Histo1DPtr _h_yseclead; Histo1DPtr _h_ythirdlead; Histo1DPtr _h_yfourthlead; Histo1DPtr _h_deltay; Histo1DPtr _h_mass; Histo1DPtr _h_deltaphi; Histo1DPtr _h_deltaR; Histo1DPtr _h_ptthirdlead_vbf; Histo1DPtr _h_ythirdlead_vbf; Histo1DPtr _h_ht; Histo1DPtr _h_st; }; class ATLAS_2013_I1230812_EL : public ATLAS_2013_I1230812 { public: ATLAS_2013_I1230812_EL() : ATLAS_2013_I1230812("ATLAS_2013_I1230812_EL") { _mode = 2; } }; class ATLAS_2013_I1230812_MU : public ATLAS_2013_I1230812 { public: ATLAS_2013_I1230812_MU() : ATLAS_2013_I1230812("ATLAS_2013_I1230812_MU") { _mode = 3; } }; DECLARE_RIVET_PLUGIN(ATLAS_2013_I1230812); DECLARE_RIVET_PLUGIN(ATLAS_2013_I1230812_EL); DECLARE_RIVET_PLUGIN(ATLAS_2013_I1230812_MU); } diff --git a/analyses/pluginATLAS/ATLAS_2013_I1230812.info b/analyses/pluginATLAS/ATLAS_2013_I1230812.info --- a/analyses/pluginATLAS/ATLAS_2013_I1230812.info +++ b/analyses/pluginATLAS/ATLAS_2013_I1230812.info @@ -1,52 +1,54 @@ Name: ATLAS_2013_I1230812 Year: 2013 Summary: $Z$ + jets in $pp$ at 7 TeV Experiment: ATLAS Collider: LHC InspireID: 1230812 Status: VALIDATED Authors: - Katharina Bierwagen - Frank Siegert References: - arXiv:1304.7098 [hep-ex] - J. High Energy Phys. 07 (2013) 032 RunInfo: Z+jets, electronic Z-decays (data are a weighted combination of electron/muon). NumEvents: 1000000 Beams: [p+, p+] Energies: [7000] NeedCrossSection: True +Options: + - ZMODE=EL,MU Description: 'Measurements of the production of jets of particles in association with a $Z$ boson in $pp$ collisions at $\sqrt{s}$ = 7 TeV are presented, using data corresponding to an integrated luminosity of 4.6/fb collected by the ATLAS experiment at the Large Hadron Collider. Inclusive and differential jet cross sections in $Z$ events, with Z decaying into electron or muon pairs, are measured for jets with transverse momentum $p_T > 30$ GeV and rapidity $|y| < 4.4$. This Rivet module implements the event selection for the weighted combination of both decay channels and uses the data from that combination (as in the paper plots). But for simplification of its usage it only requires events with the electronic final state (muonic final state will be ignored). This allows to use it with either pure electronic samples, or mixed electron/muon events. If you want to use it with a pure muon sample, please refer to ATLAS\_2013\_I1230812\_MU.' BibKey: Aad:2013ysa BibTeX: '@article{Aad:2013ysa, author = "Aad, Georges and others", title = "{Measurement of the production cross section of jets in association with a Z boson in pp collisions at $\sqrt{s}$ = 7 TeV with the ATLAS detector}", collaboration = "ATLAS Collaboration", journal = "JHEP", volume = "1307", pages = "032", doi = "10.1007/JHEP07(2013)032", year = "2013", eprint = "1304.7098", archivePrefix = "arXiv", primaryClass = "hep-ex", reportNumber = "CERN-PH-EP-2013-023", SLACcitation = "%%CITATION = ARXIV:1304.7098;%%", }' diff --git a/bin/rivet-cmphistos b/bin/rivet-cmphistos --- a/bin/rivet-cmphistos +++ b/bin/rivet-cmphistos @@ -1,483 +1,490 @@ #! /usr/bin/env python """\ %prog - generate histogram comparison plots USAGE: %prog [options] yodafile1[:'PlotOption1=Value':'PlotOption2=Value':...] [path/to/yodafile2 ...] [PLOT:Key1=Val1:...] where the plot options are described in the make-plots manual in the HISTOGRAM section. ENVIRONMENT: * RIVET_ANALYSIS_PATH: list of paths to be searched for plugin analysis libraries at runtime * RIVET_DATA_PATH: list of paths to be searched for data files """ from __future__ import print_function import rivet, yoda, sys, os rivet.util.check_python_version() rivet.util.set_process_name(os.path.basename(__file__)) class Plot(dict): "A tiny Plot object to help writing out the head in the .dat file" def __repr__(self): return "# BEGIN PLOT\n" + "\n".join("%s=%s" % (k,v) for k,v in self.items()) + "\n# END PLOT\n\n" def sanitiseString(s): #s = s.replace('_','\\_') #s = s.replace('^','\\^{}') #s = s.replace('$','\\$') s = s.replace('#','\\#') s = s.replace('%','\\%') return s def getCommandLineOptions(): "Parse command line options" from optparse import OptionParser, OptionGroup parser = OptionParser(usage=__doc__) parser.add_option('-o', '--outdir', dest='OUTDIR', default='.', help='write data files into this directory') parser.add_option("--hier-out", action="store_true", dest="HIER_OUTPUT", default=False, help="write output dat files into a directory hierarchy which matches the analysis paths") parser.add_option('--plotinfodir', dest='PLOTINFODIRS', action='append', default=['.'], help='directory which may contain plot header information (in addition ' 'to standard Rivet search paths)') parser.add_option("--no-rivet-refs", dest="RIVETREFS", action="store_false", default=True, help="don't use Rivet reference data files") # parser.add_option("--refid", dest="REF_ID", # default="REF", help="ID of reference data set (file path for non-REF data)") parser.add_option("--reftitle", dest="REFTITLE", default='Data', help="Reference data legend entry") parser.add_option("--pwd", dest="PATH_PWD", action="store_true", default=False, help="append the current directory (pwd) to the analysis/data search paths (cf. $RIVET_ANALYSIS/DATA_PATH)") parser.add_option("-v", "--verbose", dest="VERBOSE", action="store_true", default=False, help="produce debug output to the terminal") stygroup = OptionGroup(parser, "Plot style") stygroup.add_option("--linear", action="store_true", dest="LINEAR", default=False, help="plot with linear scale") stygroup.add_option("--errs", "--mc-errs", action="store_true", dest="MC_ERRS", default=False, help="show vertical error bars on the MC lines") stygroup.add_option("--no-ratio", action="store_false", dest="RATIO", default=True, help="disable the ratio plot") stygroup.add_option("--rel-ratio", action="store_true", dest="RATIO_DEVIATION", default=False, help="show the ratio plots scaled to the ref error") stygroup.add_option("--no-plottitle", action="store_true", dest="NOPLOTTITLE", default=False, help="don't show the plot title on the plot " "(useful when the plot description should only be given in a caption)") stygroup.add_option("--style", dest="STYLE", default="default", help="change plotting style: default|bw|talk") stygroup.add_option("-c", "--config", dest="CONFIGFILES", action="append", default=["~/.make-plots"], help="additional plot config file(s). Settings will be included in the output configuration.") parser.add_option_group(stygroup) selgroup = OptionGroup(parser, "Selective plotting") # selgroup.add_option("--show-single", dest="SHOW_SINGLE", choices=("no", "ref", "mc", "all"), # default="mc", help="control if a plot file is made if there is only one dataset to be plotted " # "[default=%default]. If the value is 'no', single plots are always skipped, for 'ref' and 'mc', " # "the plot will be written only if the single plot is a reference plot or an MC " # "plot respectively, and 'all' will always create single plot files.\n The 'ref' and 'all' values " # "should be used with great care, as they will also write out plot files for all reference " # "histograms without MC traces: combined with the -R/--rivet-refs flag, this is a great way to " # "write out several thousand irrelevant reference data histograms!") # selgroup.add_option("--show-mc-only", "--all", action="store_true", dest="SHOW_IF_MC_ONLY", # default=False, help="make a plot file even if there is only one dataset to be plotted and " # "it is an MC one. Deprecated and will be removed: use --show-single instead, which overrides this.") # # selgroup.add_option("-l", "--histogram-list", dest="HISTOGRAMLIST", # # default=None, help="specify a file containing a list of histograms to plot, in the format " # # "/ANALYSIS_ID/histoname, one per line, e.g. '/DELPHI_1996_S3430090/d01-x01-y01'.") selgroup.add_option("-m", "--match", action="append", help="only write out histograms whose $path/$name string matches these regexes. The argument " "may also be a text file.", dest="PATHPATTERNS") selgroup.add_option("-M", "--unmatch", action="append", help="exclude histograms whose $path/$name string matches these regexes", dest="PATHUNPATTERNS") parser.add_option_group(selgroup) return parser def getHistos(filelist): """Loop over all input files. Only use the first occurrence of any REF-histogram and the first occurrence in each MC file for every MC-histogram.""" refhistos, mchistos = {}, {} for infile in filelist: mchistos.setdefault(infile, {}) analysisobjects = yoda.read(infile, patterns=opts.PATHPATTERNS, unpatterns=opts.PATHUNPATTERNS) #print(analysisobjects) for path, ao in analysisobjects.items(): ## We can't plot non-histograms yet # TODO: support counter plotting with a faked x (or y) position and forced plot width/height if ao.type not in ("Counter", "Histo1D", "Histo2D", "Profile1D", "Profile2D", "Scatter1D", "Scatter2D", "Scatter3D"): continue ## Make a path object and ensure the path is in standard form try: aop = rivet.AOPath(path) except Exception as e: #print(e) print("Found analysis object with non-standard path structure:", path, "... skipping") continue ## We don't plot data objects with path components hidden by an underscore prefix if aop.istmp(): continue ## Add it to the ref or mc paths, if this path isn't already known basepath = aop.basepath(keepref=False) if aop.isref() and basepath not in refhistos: ao.path = aop.varpath(keepref=False, defaultvarid=0) refhistos[basepath] = ao else: #if basepath not in mchistos[infile]: mchistos[infile].setdefault(basepath, {})[aop.varid(0)] = ao return refhistos, mchistos def getRivetRefData(anas=None): "Find all Rivet reference data files" refhistos = {} rivet_data_dirs = rivet.getAnalysisRefPaths() dirlist = [] for d in rivet_data_dirs: if anas is None: import glob dirlist.append(glob.glob(os.path.join(d, '*.yoda'))) else: dirlist.append([os.path.join(d, a+'.yoda') for a in anas]) for filelist in dirlist: # TODO: delegate to getHistos? for infile in filelist: analysisobjects = yoda.read(infile, patterns=opts.PATHPATTERNS, unpatterns=opts.PATHUNPATTERNS) for path, ao in analysisobjects.items(): aop = rivet.AOPath(ao.path) if aop.isref(): ao.path = aop.basepath(keepref=False) refhistos[ao.path] = ao return refhistos def parseArgs(args): """Look at the argument list and split it at colons, in order to separate the file names from the plotting options. Store the file names and file specific plotting options.""" filelist = [] plotoptions = {} for a in args: asplit = a.split(':') path = asplit[0] filelist.append(path) plotoptions[path] = [] has_title = False for i in range(1, len(asplit)): ## Add 'Title' if there is no = sign before math mode if '=' not in asplit[i] or ('$' in asplit[i] and asplit[i].index('$') < asplit[i].index('=')): asplit[i] = 'Title=%s' % asplit[i] if asplit[i].startswith('Title='): has_title = True plotoptions[path].append(asplit[i]) if path != "PLOT" and not has_title: plotoptions[path].append('Title=%s' % sanitiseString(os.path.basename( os.path.splitext(path)[0] )) ) return filelist, plotoptions def setStyle(ao, istyle, variation=False): """Set default plot styles (color and line width) colors borrowed from Google Ngrams""" # LINECOLORS = ['{[HTML]{EE3311}}', # red (Google uses 'DC3912') # '{[HTML]{3366FF}}', # blue # '{[HTML]{109618}}', # green # '{[HTML]{FF9900}}', # orange # '{[HTML]{990099}}'] # lilac LINECOLORS = ['red', 'blue', 'green', 'orange', 'lilac'] LINESTYLES = ['solid', 'dashed', 'dashdotted', 'dotted'] if opts.STYLE == 'talk': ao.setAnnotation('LineWidth', '1pt') if opts.STYLE == 'bw': LINECOLORS = ['black!90', 'black!50', 'black!30'] jc = istyle % len(LINECOLORS) c = LINECOLORS[jc] js = (istyle // len(LINECOLORS)) % len(LINESTYLES) s = LINESTYLES[js] ## If plotting a variation (i.e. band), fade the colour if variation: c += "!30" ao.setAnnotation('LineStyle', '%s' % s) ao.setAnnotation('LineColor', '%s' % c) def setOptions(ao, options): "Set arbitrary annotations" for opt in options: key, val = opt.split('=', 1) ao.setAnnotation(key, val) # TODO: move to rivet.utils def mkoutdir(outdir): "Function to make output directories" if not os.path.exists(outdir): try: os.makedirs(outdir) except: msg = "Can't make output directory '%s'" % outdir raise Exception(msg) if not os.access(outdir, os.W_OK): msg = "Can't write to output directory '%s'" % outdir raise Exception(msg) def mkOutput(hpath, aos, plot=None, special=None): """ Make the .dat file string. We can't use "yoda.writeFLAT(anaobjects, 'foobar.dat')" because the PLOT and SPECIAL blocks don't have a corresponding analysis object. """ output = '' if plot is not None: output += str(plot) if special is not None: output += "\n" output += "# BEGIN SPECIAL %s\n" % hpath output += special output += "# END SPECIAL\n\n" from io import StringIO sio = StringIO() yoda.writeFLAT(aos, sio) output += sio.getvalue() return output def writeOutput(output, h): "Choose output file name and dir" if opts.HIER_OUTPUT: hparts = h.strip("/").split("/", 1) ana = "_".join(hparts[:-1]) if len(hparts) > 1 else "ANALYSIS" outdir = os.path.join(opts.OUTDIR, ana) outfile = '%s.dat' % hparts[-1].replace("/", "_") else: hparts = h.strip("/").split("/") outdir = opts.OUTDIR outfile = '%s.dat' % "_".join(hparts) mkoutdir(outdir) outfilepath = os.path.join(outdir, outfile) f = open(outfilepath, 'w') f.write(output) f.close() #-------------------------------------------------------------------------------------------- if __name__ == '__main__': ## Command line parsing parser = getCommandLineOptions() opts, args = parser.parse_args() ## Add pwd to search paths if opts.PATH_PWD: rivet.addAnalysisLibPath(os.path.abspath(".")) rivet.addAnalysisDataPath(os.path.abspath(".")) ## Split the input file names and the associated plotting options ## given on the command line into two separate lists filelist, plotoptions = parseArgs(args) ## Remove the PLOT dummy file from the file list if "PLOT" in filelist: filelist.remove("PLOT") ## Check that the files exist for f in filelist: if not os.access(f, os.R_OK): print("Error: cannot read from %s" % f) sys.exit(1) ## Read the .plot files plotdirs = opts.PLOTINFODIRS + [os.path.abspath(os.path.dirname(f)) for f in filelist] plotparser = rivet.mkStdPlotParser(plotdirs, opts.CONFIGFILES) ## Create a list of all histograms to be plotted, and identify if they are 2D histos (which need special plotting) try: refhistos, mchistos = getHistos(filelist) except IOError as e: print("File reading error: ", e.strerror) exit(1) hpaths, h2ds = [], [] for aos in mchistos.values(): for p in aos.keys(): - if p and p not in hpaths: - hpaths.append(p) + ps = rivet.stripOptions(p) + if ps and ps not in hpaths: + hpaths.append(ps) firstaop = aos[p][sorted(aos[p].keys())[0]] # TODO: Would be nicer to test via isHisto and dim or similar, or yoda.Scatter/Histo/Profile base classes - if type(firstaop) in (yoda.Histo2D, yoda.Profile2D) and p not in h2ds: - h2ds.append(p) + if type(firstaop) in (yoda.Histo2D, yoda.Profile2D) and ps not in h2ds: + h2ds.append(ps) ## Take reference data from the Rivet search paths, if there is not already if opts.RIVETREFS: try: refhistos2 = getRivetRefData() except IOError as e: print("File reading error: ", e.strerror) exit(1) refhistos2.update(refhistos) refhistos = refhistos2 ## Purge unmatched ref data entries to save memory keylist = list(refhistos.keys()) # can't modify for-loop target for refhpath in keylist: if refhpath not in hpaths: del refhistos[refhpath] ## Now loop over all MC histograms and plot them # TODO: factorize much of this into a rivet.utils mkplotfile(mchists, refhist, kwargs, is2d=False) function for hpath in hpaths: #print('Currently looking at', h) ## The analysis objects to be plotted anaobjects = [] ## List of histos to be drawn, to sync the legend and plotted lines mainlines = [] varlines = [] ## Is this a 2D histo? is2d = (hpath in h2ds) ## Will we be drawing a ratio plot? showratio = opts.RATIO and not is2d ## A Plot object to represent the PLOT section in the .dat file plot = Plot() if not is2d: plot['Legend'] = '1' plot['LogY'] = '1' headers = plotparser.getHeaders(hpath) if headers: plot.update(headers) # for key, val in headers.items(): # plot[key] = val if "PLOT" in plotoptions: for key_val in plotoptions["PLOT"]: key, val = [s.strip() for s in key_val.split("=",1)] plot[key] = val if opts.LINEAR: plot['LogY'] = '0' if opts.NOPLOTTITLE: plot['Title'] = '' if showratio and opts.RATIO_DEVIATION: plot['RatioPlotMode'] = 'deviation' if opts.STYLE == 'talk': plot['PlotSize'] = '8,6' elif opts.STYLE == 'bw' and showratio: plot['RatioPlotErrorBandColor'] = 'black!10' ## Get a special object, if there is one for this path special = plotparser.getSpecial(hpath) ## Handle reference data histogram, if there is one ratioreference, hasdataref = None, False if hpath in refhistos: hasdataref = True refdata = refhistos[hpath] refdata.setAnnotation('Title', opts.REFTITLE) if not is2d: refdata.setAnnotation('ErrorBars', '1') refdata.setAnnotation('PolyMarker', '*') refdata.setAnnotation('ConnectBins', '0') if showratio: ratioreference = hpath ## For 1D anaobjects.append(refdata) mainlines.append(hpath) ## For 2D if is2d: s = mkOutput(hpath, [refdata], plot, special) writeOutput(s, hpath) ## Loop over the MC files to plot all instances of the histogram styleidx = 0 for infile in filelist: - if infile in mchistos and hpath in mchistos[infile]: - hmcs = mchistos[infile][hpath] - ## For now, just plot all the different variation histograms (reversed, so [0] is on top) - # TODO: calculate and plot an appropriate error band, somehow... - for i in sorted(hmcs.keys(), reverse=True): - iscanonical = (str(i) == "0") - hmc = hmcs[i] - ## Default linecolor, linestyle - if not is2d: - setStyle(hmc, styleidx, not iscanonical) - if opts.MC_ERRS: - hmc.setAnnotation('ErrorBars', '1') - ## Plot defaults from .plot files - histopts = plotparser.getHistogramOptions(hpath) - if histopts: - for key, val in histopts.items(): - hmc.setAnnotation(key, val) - ## Command line plot options - setOptions(hmc, plotoptions[infile]) - ## Set path attribute - fullpath = "/"+infile+hpath - if not iscanonical: - fullpath += "["+str(i)+"]" - hmc.setAnnotation('Path', fullpath) - ## Add object / path to appropriate lists - anaobjects.append(hmc) - if iscanonical: - mainlines.append(fullpath) - else: - varlines.append(fullpath) - if showratio and ratioreference is None and iscanonical: - ratioreference = fullpath - ## For 2D, plot each histo now (since overlay makes no sense) - if is2d: - s = mkOutput(hpath, [hmc], plot, special) - writeOutput(s, fullpath) - styleidx += 1 + if infile in mchistos: + for xpath in mchistos[infile]: + if rivet.stripOptions(xpath) != hpath: + continue + hmcs = mchistos[infile][xpath] + ## For now, just plot all the different variation histograms (reversed, so [0] is on top) + # TODO: calculate and plot an appropriate error band, somehow... + for i in sorted(hmcs.keys(), reverse=True): + iscanonical = (str(i) == "0") + hmc = hmcs[i] + ## Default linecolor, linestyle + if not is2d: + setStyle(hmc, styleidx, not iscanonical) + if opts.MC_ERRS: + hmc.setAnnotation('ErrorBars', '1') + ## Plot defaults from .plot files + histopts = plotparser.getHistogramOptions(hpath) + if histopts: + for key, val in histopts.items(): + hmc.setAnnotation(key, val) + ## Command line plot options + setOptions(hmc, plotoptions[infile]) + ## Set path attribute + fullpath = "/"+infile+xpath + if not iscanonical: + fullpath += "["+str(i)+"]" + hmc.setAnnotation('Path', fullpath) + ## Add object / path to appropriate lists + if hmc.hasAnnotation("Title"): + hmc.setAnnotation("Title", hmc.annotation("Title") + + rivet.extractOptionString(xpath)) + anaobjects.append(hmc) + if iscanonical: + mainlines.append(fullpath) + else: + varlines.append(fullpath) + if showratio and ratioreference is None and iscanonical: + ratioreference = fullpath + ## For 2D, plot each histo now (since overlay makes no sense) + if is2d: + s = mkOutput(hpath, [hmc], plot, special) + writeOutput(s, fullpath) + styleidx += 1 ## Finally render the combined plots; only show the first one if it's 2D # TODO: Only show the first *MC* one if 2D? if is2d: anaobjects = anaobjects[:1] ## Add final attrs to Plot plot['DrawOnly'] = ' '.join(varlines + mainlines).strip() plot['LegendOnly'] = ' '.join(mainlines).strip() if showratio and len(varlines + mainlines) > 1: plot['RatioPlot'] = '1' plot['RatioPlotReference'] = ratioreference if not hasdataref and "RatioPlotYLabel" not in plot: if plot.get('RatioPlotMode', '') == 'deviation': plot['RatioPlotYLabel'] = 'Deviation' #r'$\text{MC}-\text{MC}_\text{ref}$' else: plot['RatioPlotYLabel'] = 'Ratio' #r'$\text{MC}/\text{MC}_\text{ref}$' ## Make the output and write to file o = mkOutput(hpath, anaobjects, plot, special) writeOutput(o, hpath) diff --git a/bin/rivet-mkhtml b/bin/rivet-mkhtml --- a/bin/rivet-mkhtml +++ b/bin/rivet-mkhtml @@ -1,507 +1,508 @@ #! /usr/bin/env python """\ %prog [options] [ ...] [PLOT:Key1=Val1:...] Make web pages from histogram files written out by Rivet. You can specify multiple Monte Carlo YODA files to be compared in the same syntax as for rivet-cmphistos, i.e. including plotting options. Reference data, analysis metadata, and plot style information should be found automatically (if not, set the RIVET_ANALYSIS_PATH or similar variables appropriately). Any existing output directory will be overwritten. ENVIRONMENT: * RIVET_ANALYSIS_PATH: list of paths to be searched for plugin analysis libraries at runtime * RIVET_DATA_PATH: list of paths to be searched for data files """ from __future__ import print_function import rivet, sys, os rivet.util.check_python_version() rivet.util.set_process_name(os.path.basename(__file__)) import glob, shutil from subprocess import Popen,PIPE from optparse import OptionParser, OptionGroup parser = OptionParser(usage=__doc__) parser.add_option("-o", "--outputdir", dest="OUTPUTDIR", default="./rivet-plots", help="directory for Web page output") parser.add_option("-c", "--config", dest="CONFIGFILES", action="append", default=["~/.make-plots"], help="plot config file(s) to be used with rivet-cmphistos") parser.add_option("-n", "--num-threads", metavar="NUMTHREADS", dest="NUMTHREADS", type=int, default=None, help="request make-plots to use a specific number of threads") parser.add_option("--ignore-missing", dest="IGNORE_MISSING", action="store_true", default=False, help="ignore missing YODA files") parser.add_option("-i", "--ignore-unvalidated", dest="IGNORE_UNVALIDATED", action="store_true", default=False, help="ignore unvalidated analyses") # parser.add_option("--ref", "--refid", dest="REF_ID", # default=None, help="ID of reference data set (file path for non-REF data)") parser.add_option("--dry-run", help="don't actually do any plotting or HTML building", dest="DRY_RUN", action="store_true", default=False) parser.add_option("--no-cleanup", dest="NO_CLEANUP", action="store_true", default=False, help="keep plotting temporary directory") parser.add_option("--no-subproc", dest="NO_SUBPROC", action="store_true", default=False, help="don't use subprocesses to render the plots in parallel -- useful for debugging") parser.add_option("--pwd", dest="PATH_PWD", action="store_true", default=False, help="append the current directory (pwd) to the analysis/data search paths (cf. $RIVET_ANALYSIS_PATH)") stygroup = OptionGroup(parser, "Style options") stygroup.add_option("-t", "--title", dest="TITLE", default="Plots from Rivet analyses", help="title to be displayed on the main web page") stygroup.add_option("--reftitle", dest="REFTITLE", default="Data", help="legend entry for reference data") stygroup.add_option("--no-plottitle", dest="NOPLOTTITLE", action="store_true", default=False, help="don't show the plot title on the plot " "(useful when the plot description should only be given in a caption)") stygroup.add_option("-s", "--single", dest="SINGLE", action="store_true", default=False, help="display plots on single webpage.") stygroup.add_option("--no-ratio", dest="SHOW_RATIO", action="store_false", default=True, help="don't draw a ratio plot under each main plot.") stygroup.add_option("--errs", "--mc-errs", dest="MC_ERRS", action="store_true", default=False, help="plot error bars.") stygroup.add_option("--offline", dest="OFFLINE", action="store_true", default=False, help="generate HTML that does not use external URLs.") stygroup.add_option("--pdf", dest="VECTORFORMAT", action="store_const", const="PDF", default="PDF", help="use PDF as the vector plot format.") stygroup.add_option("--ps", dest="VECTORFORMAT", action="store_const", const="PS", default="PDF", help="use PostScript as the vector plot format. DEPRECATED") stygroup.add_option("--booklet", dest="BOOKLET", action="store_true", default=False, help="create booklet (currently only available for PDF with pdftk or pdfmerge).") stygroup.add_option("--font", dest="OUTPUT_FONT", choices="palatino,cm,times,helvetica,minion".split(","), default="palatino", help="choose the font to be used in the plots") stygroup.add_option("--palatino", dest="OUTPUT_FONT", action="store_const", const="palatino", default="palatino", help="use Palatino as font (default). DEPRECATED: Use --font") stygroup.add_option("--cm", dest="OUTPUT_FONT", action="store_const", const="cm", default="palatino", help="use Computer Modern as font. DEPRECATED: Use --font") stygroup.add_option("--times", dest="OUTPUT_FONT", action="store_const", const="times", default="palatino", help="use Times as font. DEPRECATED: Use --font") stygroup.add_option("--helvetica", dest="OUTPUT_FONT", action="store_const", const="helvetica", default="palatino", help="use Helvetica as font. DEPRECATED: Use --font") stygroup.add_option("--minion", dest="OUTPUT_FONT", action="store_const", const="minion", default="palatino", help="use Adobe Minion Pro as font. DEPRECATED: Use --font") parser.add_option_group(stygroup) selgroup = OptionGroup(parser, "Selective plotting") selgroup.add_option("-m", "--match", action="append", dest="PATHPATTERNS", default=[], help="only write out histograms whose $path/$name string matches any of these regexes") selgroup.add_option("-M", "--unmatch", action="append", dest="PATHUNPATTERNS", default=[], help="exclude histograms whose $path/$name string matches any of these regexes") selgroup.add_option(#"-a", #< these were confusing, and -m should be enough "--ana-match", action="append", dest="ANAPATTERNS", default=[], help="only write out histograms from analyses whose name matches any of these regexes") selgroup.add_option(#"-A", #< these were confusing, and -M should be enough "--ana-unmatch", action="append", dest="ANAUNPATTERNS", default=[], help="exclude histograms from analyses whose name matches any of these regexes") parser.add_option_group(selgroup) vrbgroup = OptionGroup(parser, "Verbosity control") vrbgroup.add_option("-v", "--verbose", help="add extra debug messages", dest="VERBOSE", action="store_true", default=False) parser.add_option_group(vrbgroup) opts, yodafiles = parser.parse_args() ## Add pwd to search paths if opts.PATH_PWD: rivet.addAnalysisLibPath(os.path.abspath(".")) rivet.addAnalysisDataPath(os.path.abspath(".")) ## Check that there are some arguments! if not yodafiles: print("Error: You need to specify some YODA files to be plotted!") sys.exit(1) ## Make output directory if not opts.DRY_RUN: if os.path.exists(opts.OUTPUTDIR) and not os.path.realpath(opts.OUTPUTDIR)==os.getcwd(): import shutil shutil.rmtree(opts.OUTPUTDIR) try: os.makedirs(opts.OUTPUTDIR) except: print("Error: failed to make new directory '%s'" % opts.OUTPUTDIR) sys.exit(1) ## Get set of analyses involved in the runs plotarg = None analyses = set() blocked_analyses = set() import yoda for yodafile in yodafiles: if yodafile.startswith("PLOT:"): plotarg = yodafile continue yodafilepath = os.path.abspath(yodafile.split(":")[0]) if not os.access(yodafilepath, os.R_OK): print("Error: cannot read from %s" % yodafilepath) if opts.IGNORE_MISSING: continue else: sys.exit(2) try: ## Note: we use -m/-M flags here as well as when calling rivet-cmphistos, to potentially speed this initial loading analysisobjects = yoda.read(yodafilepath, patterns=opts.PATHPATTERNS, unpatterns=opts.PATHUNPATTERNS) except IOError as e: print("File reading error: ", e.strerror) sys.exit(1) for path, ao in analysisobjects.items(): ## Make a path object and ensure the path is in standard form try: aop = rivet.AOPath(path) except Exception as e: #print(e) print("Found analysis object with non-standard path structure:", path, "... skipping") continue ## We don't plot data objects with path components hidden by an underscore prefix if aop.istmp(): continue ## Identify analysis/histo name parts analysis = "ANALYSIS" if aop.basepathparts(keepref=False): - analysis = aop.basepathparts(keepref=False)[0] #< TODO: for compatibility with rivet-cmphistos... generalise? + analysis = rivet.stripOptions(aop.basepathparts(keepref=False)[0]) + #< TODO: for compatibility with rivet-cmphistos... generalise? #analysis = "_".join(aop.dirnameparts(keepref=False)[:-1]) #< TODO: would this be nicer? Currently incompatible with rivet-cmphistos ## Optionally veto on analysis name pattern matching if analysis in analyses.union(blocked_analyses): continue import re matched = True if opts.ANAPATTERNS: matched = False for patt in opts.ANAPATTERNS: if re.match(patt, analysis) is not None: matched = True break if matched and opts.ANAUNPATTERNS: for patt in opts.ANAUNPATTERNS: if re.match(patt, analysis) is not None: matched = False break if matched: analyses.add(analysis) else: blocked_analyses.add(analysis) ## Sort analyses: group ascending by analysis name (could specialise grouping by collider), then ## descending by year, and finally descending by bibliographic archive ID code (INSPIRE first). def anasort(name): rtn = (1, name) if name.startswith("MC"): rtn = (99999999, name) else: stdparts = name.split("_") try: year = int(stdparts[1]) rtn = (0, stdparts[0], -year, 0) idcode = (0 if stdparts[2][0] == "I" else 1e10) - int(stdparts[2][1:]) rtn = (0, stdparts[0], -year, idcode) if len(stdparts) > 3: rtn += stdparts[3:] except: pass return rtn analyses = sorted(analyses, key=anasort) ## Uncomment to test analysis ordering on index page # print(analyses) # sys.exit(0) ## Run rivet-cmphistos to get plain .dat files from .yoda ## We do this here since it also makes the necessary directories ch_cmd = ["rivet-cmphistos"] if opts.MC_ERRS: ch_cmd.append("--mc-errs") if not opts.SHOW_RATIO: ch_cmd.append("--no-ratio") if opts.NOPLOTTITLE: ch_cmd.append("--no-plottitle") # if opts.REF_ID is not None: # ch_cmd.append("--refid=%s" % os.path.abspath(opts.REF_ID)) if opts.REFTITLE: ch_cmd.append("--reftitle=%s" % opts.REFTITLE ) if opts.PATHPATTERNS: for patt in opts.PATHPATTERNS: ch_cmd += ["-m", patt] #"'"+patt+"'"] if opts.PATHUNPATTERNS: for patt in opts.PATHUNPATTERNS: ch_cmd += ["-M", patt] #"'"+patt+"'"] ch_cmd.append("--hier-out") # TODO: Need to be able to override this: provide a --plotinfodir cmd line option? ch_cmd.append("--plotinfodir=%s" % os.path.abspath("../")) for af in yodafiles: yodafilepath = os.path.abspath(af.split(":")[0]) if af.startswith("PLOT:"): yodafilepath = "PLOT" elif not os.access(yodafilepath, os.R_OK): continue newarg = yodafilepath if ":" in af: newarg += ":" + af.split(":", 1)[1] # print(newarg) ch_cmd.append(newarg) ## Pass rivet-mkhtml -c args to rivet-cmphistos for configfile in opts.CONFIGFILES: configfile = os.path.abspath(os.path.expanduser(configfile)) if os.access(configfile, os.R_OK): ch_cmd += ["-c", configfile] if opts.VERBOSE: ch_cmd.append("--verbose") print("Calling rivet-cmphistos with the following command:") print(" ".join(ch_cmd)) ## Run rivet-cmphistos in a subdir, after fixing any relative paths in Rivet env vars if not opts.DRY_RUN: for var in ("RIVET_ANALYSIS_PATH", "RIVET_DATA_PATH", "RIVET_REF_PATH", "RIVET_INFO_PATH", "RIVET_PLOT_PATH"): if var in os.environ: abspaths = [os.path.abspath(p) for p in os.environ[var].split(":")] os.environ[var] = ":".join(abspaths) subproc = Popen(ch_cmd, cwd=opts.OUTPUTDIR, stdout=PIPE, stderr=PIPE) out, err = subproc.communicate() retcode = subproc.returncode if opts.VERBOSE or retcode != 0: print('Output from rivet-cmphistos:\n', out) if err : print('Errors from rivet-cmphistos:\n', err) if retcode != 0: print('Crash in rivet-cmphistos code = ', retcode, ' exiting') exit(retcode) ## Write web page containing all (matched) plots ## Make web pages first so that we can load it locally in ## a browser to view the output before all plots are made if not opts.DRY_RUN: style = ''' ''' ## Include MathJax configuration script = '' if not opts.OFFLINE: script = '''\ ''' ## A helper function for metadata LaTeX -> HTML conversion from rivet.util import htmlify ## A timestamp HTML fragment to be used on each page: import datetime timestamp = '

Generated at %s

\n' % datetime.datetime.now().strftime("%A, %d. %B %Y %I:%M%p") index = open(os.path.join(opts.OUTPUTDIR, "index.html"), "w") index.write('\n\n%s\n%s\n' % (opts.TITLE, style + script)) if opts.BOOKLET and opts.VECTORFORMAT == "PDF": index.write('

%s

\n\n' % opts.TITLE) else: index.write('

%s

\n\n' % opts.TITLE) if opts.SINGLE: ## Write table of contents index.write('
    \n') for analysis in analyses: summary = analysis ana = rivet.AnalysisLoader.getAnalysis(analysis) if ana: summary = "%s (%s)" % (ana.summary(), analysis) if opts.IGNORE_UNVALIDATED and ana.status() != "VALIDATED": continue index.write('
  • %s\n' % (analysis, htmlify(summary)) ) index.write('
\n') for analysis in analyses: references = [] summary = htmlify(analysis) description, inspireid, spiresid = None, None, None if analysis.find("_I") > 0: inspireid = analysis[analysis.rfind('_I')+2:len(analysis)] elif analysis.find("_S") > 0: spiresid = analysis[analysis.rfind('_S')+2:len(analysis)] ana = rivet.AnalysisLoader.getAnalysis(analysis) if ana: if ana.summary(): summary = htmlify("%s (%s)" % (ana.summary(), analysis)) references = ana.references() description = htmlify(ana.description()) spiresid = ana.spiresId() if opts.IGNORE_UNVALIDATED and ana.status().upper() != "VALIDATED": continue if opts.SINGLE: index.write('\n

%s

\n' % (analysis, summary)) else: index.write('\n

%s

\n' % (analysis, summary)) reflist = [] if inspireid: reflist.append('Inspire' % inspireid) reflist.append('HepData' % inspireid) elif spiresid: # elif ana.spiresId(): reflist.append('Inspire' % spiresid) reflist.append('HepData' % spiresid) reflist += references index.write('

%s

\n' % " | ".join(reflist)) if description: try: index.write('

%s

\n' % description) except UnicodeEncodeError as ue: print("Unicode error in analysis description for " + analysis + ": " + str(ue)) anapath = os.path.join(opts.OUTPUTDIR, analysis) if not opts.SINGLE: if not os.path.exists(anapath): os.makedirs(anapath) anaindex = open(os.path.join(anapath, "index.html"), 'w') anaindex.write('\n\n%s – %s\n%s\n\n' % (htmlify(opts.TITLE), analysis, style + script)) anaindex.write('

%s

\n' % htmlify(analysis)) anaindex.write('

Back to index

\n') if description: try: anaindex.write('

\n %s\n

\n' % description) except UnicodeEncodeError as ue: print("Unicode error in analysis description for " + analysis + ": " + str(ue)) else: anaindex = index datfiles = glob.glob("%s/*.dat" % anapath) #print(datfiles) anaindex.write('
\n') for datfile in sorted(datfiles): obsname = os.path.basename(datfile).replace(".dat", "") pngfile = obsname+".png" vecfile = obsname+"."+opts.VECTORFORMAT.lower() srcfile = obsname+".dat" if opts.SINGLE: pngfile = os.path.join(analysis, pngfile) vecfile = os.path.join(analysis, vecfile) srcfile = os.path.join(analysis, srcfile) anaindex.write('
\n') anaindex.write(' %s:
\n' % (analysis, obsname, srcfile, os.path.splitext(vecfile)[0]) ) anaindex.write(' \n' % (analysis, obsname, vecfile) ) anaindex.write(' \n' % pngfile ) anaindex.write(' \n') anaindex.write('
\n') anaindex.write('
\n') if not opts.SINGLE: anaindex.write('
%s\n
\n' % timestamp) anaindex.close() index.write('
%s\n' % timestamp) index.close() # http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python def which(program): import os def is_exe(fpath): return os.path.isfile(fpath) and os.access(fpath, os.X_OK) fpath, fname = os.path.split(program) if fpath: if is_exe(program): return program else: for path in os.environ["PATH"].split(os.pathsep): path = path.strip('"') exe_file = os.path.join(path, program) if is_exe(exe_file): return exe_file return None ## Run make-plots on all generated .dat files # sys.exit(0) mp_cmd = ["make-plots"] if opts.NUMTHREADS: mp_cmd.append("--num-threads=%d" % opts.NUMTHREADS) if opts.NO_CLEANUP: mp_cmd.append("--no-cleanup") if opts.NO_SUBPROC: mp_cmd.append("--no-subproc") if opts.VECTORFORMAT == "PDF": mp_cmd.append("--pdfpng") elif opts.VECTORFORMAT == "PS": mp_cmd.append("--pspng") if opts.OUTPUT_FONT: mp_cmd.append("--font=%s" % opts.OUTPUT_FONT) # if opts.OUTPUT_FONT.upper() == "PALATINO": # mp_cmd.append("--palatino") # if opts.OUTPUT_FONT.upper() == "CM": # mp_cmd.append("--cm") # elif opts.OUTPUT_FONT.upper() == "TIMES": # mp_cmd.append("--times") # elif opts.OUTPUT_FONT.upper() == "HELVETICA": # mp_cmd.append("--helvetica") # elif opts.OUTPUT_FONT.upper() == "MINION": # mp_cmd.append("--minion") datfiles = [] for analysis in analyses: anapath = os.path.join(opts.OUTPUTDIR, analysis) #print(anapath) anadatfiles = glob.glob("%s/*.dat" % anapath) datfiles += sorted(anadatfiles) if datfiles: mp_cmd += datfiles if opts.VERBOSE: mp_cmd.append("--verbose") print("Calling make-plots with the following options:") print(" ".join(mp_cmd)) if not opts.DRY_RUN: Popen(mp_cmd).wait() if opts.BOOKLET and opts.VECTORFORMAT=="PDF": if which("pdftk") is not None: bookletcmd = ["pdftk"] for analysis in analyses: anapath = os.path.join(opts.OUTPUTDIR, analysis) bookletcmd += sorted(glob.glob("%s/*.pdf" % anapath)) bookletcmd += ["cat", "output", "%s/booklet.pdf" % opts.OUTPUTDIR] print(bookletcmd) Popen(bookletcmd).wait() elif which("pdfmerge") is not None: bookletcmd = ["pdfmerge"] for analysis in analyses: anapath = os.path.join(opts.OUTPUTDIR, analysis) bookletcmd += sorted(glob.glob("%s/*.pdf" % anapath)) bookletcmd += ["%s/booklet.pdf" % opts.OUTPUTDIR] print(bookletcmd) Popen(bookletcmd).wait() else: print("Neither pdftk nor pdfmerge available --- not booklet output possible") diff --git a/include/Rivet/Analysis.hh b/include/Rivet/Analysis.hh --- a/include/Rivet/Analysis.hh +++ b/include/Rivet/Analysis.hh @@ -1,1072 +1,1102 @@ // -*- C++ -*- #ifndef RIVET_Analysis_HH #define RIVET_Analysis_HH #include "Rivet/Config/RivetCommon.hh" #include "Rivet/AnalysisInfo.hh" #include "Rivet/Event.hh" #include "Rivet/Projection.hh" #include "Rivet/ProjectionApplier.hh" #include "Rivet/ProjectionHandler.hh" #include "Rivet/AnalysisLoader.hh" #include "Rivet/Tools/Cuts.hh" #include "Rivet/Tools/Logging.hh" #include "Rivet/Tools/ParticleUtils.hh" #include "Rivet/Tools/BinnedHistogram.hh" #include "Rivet/Tools/RivetMT2.hh" #include "Rivet/Tools/RivetYODA.hh" /// @def vetoEvent /// Preprocessor define for vetoing events, including the log message and return. #define vetoEvent \ do { MSG_DEBUG("Vetoing event on line " << __LINE__ << " of " << __FILE__); return; } while(0) namespace Rivet { // Forward declaration class AnalysisHandler; /// @brief This is the base class of all analysis classes in Rivet. /// /// There are /// three virtual functions which should be implemented in base classes: /// /// void init() is called by Rivet before a run is started. Here the /// analysis class should book necessary histograms. The needed /// projections should probably rather be constructed in the /// constructor. /// /// void analyze(const Event&) is called once for each event. Here the /// analysis class should apply the necessary Projections and fill the /// histograms. /// /// void finalize() is called after a run is finished. Here the analysis /// class should do whatever manipulations are necessary on the /// histograms. Writing the histograms to a file is, however, done by /// the Rivet class. class Analysis : public ProjectionApplier { /// The AnalysisHandler is a friend. friend class AnalysisHandler; public: /// @name Standard constructors and destructors. //@{ // /// The default constructor. // Analysis(); /// Constructor Analysis(const std::string& name); /// The destructor. virtual ~Analysis() {} //@} public: /// @name Main analysis methods //@{ /// Initialize this analysis object. A concrete class should here /// book all necessary histograms. An overridden function must make /// sure it first calls the base class function. virtual void init() { } /// Analyze one event. A concrete class should here apply the /// necessary projections on the \a event and fill the relevant /// histograms. An overridden function must make sure it first calls /// the base class function. virtual void analyze(const Event& event) = 0; /// Finalize this analysis object. A concrete class should here make /// all necessary operations on the histograms. Writing the /// histograms to a file is, however, done by the Rivet class. An /// overridden function must make sure it first calls the base class /// function. virtual void finalize() { } //@} public: /// @name Metadata /// Metadata is used for querying from the command line and also for /// building web pages and the analysis pages in the Rivet manual. //@{ /// Get the actual AnalysisInfo object in which all this metadata is stored. const AnalysisInfo& info() const { assert(_info && "No AnalysisInfo object :O"); return *_info; } /// @brief Get the name of the analysis. /// - /// By default this is computed by combining the results of the experiment, - /// year and Spires ID metadata methods and you should only override it if - /// there's a good reason why those won't work. + /// By default this is computed by combining the results of the + /// experiment, year and Spires ID metadata methods and you should + /// only override it if there's a good reason why those won't + /// work. If options has been set for this instance, a + /// corresponding string is appended at the end. virtual std::string name() const { - return (info().name().empty()) ? _defaultname : info().name(); + return ( (info().name().empty()) ? _defaultname : info().name() ) + _optstring; } /// Get the Inspire ID code for this analysis. virtual std::string inspireId() const { return info().inspireId(); } /// Get the SPIRES ID code for this analysis (~deprecated). virtual std::string spiresId() const { return info().spiresId(); } /// @brief Names & emails of paper/analysis authors. /// /// Names and email of authors in 'NAME \' format. The first /// name in the list should be the primary contact person. virtual std::vector authors() const { return info().authors(); } /// @brief Get a short description of the analysis. /// /// Short (one sentence) description used as an index entry. /// Use @a description() to provide full descriptive paragraphs /// of analysis details. virtual std::string summary() const { return info().summary(); } /// @brief Get a full description of the analysis. /// /// Full textual description of this analysis, what it is useful for, /// what experimental techniques are applied, etc. Should be treated /// as a chunk of restructuredText (http://docutils.sourceforge.net/rst.html), /// with equations to be rendered as LaTeX with amsmath operators. virtual std::string description() const { return info().description(); } /// @brief Information about the events needed as input for this analysis. /// /// Event types, energies, kinematic cuts, particles to be considered /// stable, etc. etc. Should be treated as a restructuredText bullet list /// (http://docutils.sourceforge.net/rst.html) virtual std::string runInfo() const { return info().runInfo(); } /// Experiment which performed and published this analysis. virtual std::string experiment() const { return info().experiment(); } /// Collider on which the experiment ran. virtual std::string collider() const { return info().collider(); } /// When the original experimental analysis was published. virtual std::string year() const { return info().year(); } /// The luminosity in inverse femtobarn virtual std::string luminosityfb() const { return info().luminosityfb(); } /// Journal, and preprint references. virtual std::vector references() const { return info().references(); } /// BibTeX citation key for this article. virtual std::string bibKey() const { return info().bibKey(); } /// BibTeX citation entry for this article. virtual std::string bibTeX() const { return info().bibTeX(); } /// Whether this analysis is trusted (in any way!) virtual std::string status() const { return (info().status().empty()) ? "UNVALIDATED" : info().status(); } /// Any work to be done on this analysis. virtual std::vector todos() const { return info().todos(); } /// Return the allowed pairs of incoming beams required by this analysis. virtual const std::vector& requiredBeams() const { return info().beams(); } /// Declare the allowed pairs of incoming beams required by this analysis. virtual Analysis& setRequiredBeams(const std::vector& requiredBeams) { info().setBeams(requiredBeams); return *this; } /// Sets of valid beam energy pairs, in GeV virtual const std::vector >& requiredEnergies() const { return info().energies(); } /// Get vector of analysis keywords virtual const std::vector & keywords() const { return info().keywords(); } /// Declare the list of valid beam energy pairs, in GeV virtual Analysis& setRequiredEnergies(const std::vector >& requiredEnergies) { info().setEnergies(requiredEnergies); return *this; } /// Return true if this analysis needs to know the process cross-section. /// @todo Remove this and require HepMC >= 2.06 bool needsCrossSection() const { return info().needsCrossSection(); } /// Declare whether this analysis needs to know the process cross-section from the generator. /// @todo Remove this and require HepMC >= 2.06 Analysis& setNeedsCrossSection(bool needed=true) { info().setNeedsCrossSection(needed); return *this; } //@} /// @name Internal metadata modifying methods //@{ /// Get the actual AnalysisInfo object in which all this metadata is stored (non-const). AnalysisInfo& info() { assert(_info && "No AnalysisInfo object :O"); return *_info; } //@} /// @name Run conditions //@{ /// Incoming beams for this run const ParticlePair& beams() const; /// Incoming beam IDs for this run const PdgIdPair beamIds() const; /// Centre of mass energy for this run double sqrtS() const; //@} /// @name Analysis / beam compatibility testing //@{ /// Check if analysis is compatible with the provided beam particle IDs and energies bool isCompatible(const ParticlePair& beams) const; /// Check if analysis is compatible with the provided beam particle IDs and energies bool isCompatible(PdgId beam1, PdgId beam2, double e1, double e2) const; /// Check if analysis is compatible with the provided beam particle IDs and energies bool isCompatible(const PdgIdPair& beams, const std::pair& energies) const; //@} /// Set the cross section from the generator Analysis& setCrossSection(double xs); /// Access the controlling AnalysisHandler object. AnalysisHandler& handler() const { return *_analysishandler; } protected: /// Get a Log object based on the name() property of the calling analysis object. Log& getLog() const; /// Get the process cross-section in pb. Throws if this hasn't been set. double crossSection() const; /// Get the process cross-section per generated event in pb. Throws if this /// hasn't been set. double crossSectionPerEvent() const; /// @brief Get the number of events seen (via the analysis handler). /// /// @note Use in the finalize phase only. size_t numEvents() const; /// @brief Get the sum of event weights seen (via the analysis handler). /// /// @note Use in the finalize phase only. double sumW() const; /// Alias double sumOfWeights() const { return sumW(); } /// @brief Get the sum of squared event weights seen (via the analysis handler). /// /// @note Use in the finalize phase only. double sumW2() const; protected: /// @name Histogram paths //@{ /// Get the canonical histogram "directory" path for this analysis. const std::string histoDir() const; /// Get the canonical histogram path for the named histogram in this analysis. const std::string histoPath(const std::string& hname) const; /// Get the canonical histogram path for the numbered histogram in this analysis. const std::string histoPath(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const; /// Get the internal histogram name for given d, x and y (cf. HepData) const std::string mkAxisCode(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const; /// Alias /// @deprecated Prefer the "mk" form, consistent with other "making function" names const std::string makeAxisCode(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const { return mkAxisCode(datasetId, xAxisId, yAxisId); } //@} /// @name Histogram reference data //@{ /// Get reference data for a named histo /// @todo SFINAE to ensure that the type inherits from YODA::AnalysisObject? template const T& refData(const string& hname) const { _cacheRefData(); MSG_TRACE("Using histo bin edges for " << name() << ":" << hname); if (!_refdata[hname]) { MSG_ERROR("Can't find reference histogram " << hname); throw Exception("Reference data " + hname + " not found."); } return dynamic_cast(*_refdata[hname]); } /// Get reference data for a numbered histo /// @todo SFINAE to ensure that the type inherits from YODA::AnalysisObject? template const T& refData(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const { const string hname = makeAxisCode(datasetId, xAxisId, yAxisId); return refData(hname); } //@} /// @name Counter booking //@{ /// Book a counter. CounterPtr bookCounter(const std::string& name, const std::string& title=""); // const std::string& valtitle="" /// Book a counter, using a path generated from the dataset and axis ID codes /// /// The paper, dataset and x/y-axis IDs will be used to build the histo name in the HepData standard way. CounterPtr bookCounter(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId, const std::string& title=""); // const std::string& valtitle="" //@} /// @name 1D histogram booking //@{ /// Book a 1D histogram with @a nbins uniformly distributed across the range @a lower - @a upper . Histo1DPtr bookHisto1D(const std::string& name, size_t nbins, double lower, double upper, const std::string& title="", const std::string& xtitle="", const std::string& ytitle=""); /// Book a 1D histogram with non-uniform bins defined by the vector of bin edges @a binedges . Histo1DPtr bookHisto1D(const std::string& name, const std::vector& binedges, const std::string& title="", const std::string& xtitle="", const std::string& ytitle=""); /// Book a 1D histogram with non-uniform bins defined by the vector of bin edges @a binedges . Histo1DPtr bookHisto1D(const std::string& name, const std::initializer_list& binedges, const std::string& title="", const std::string& xtitle="", const std::string& ytitle=""); /// Book a 1D histogram with binning from a reference scatter. Histo1DPtr bookHisto1D(const std::string& name, const Scatter2D& refscatter, const std::string& title="", const std::string& xtitle="", const std::string& ytitle=""); /// Book a 1D histogram, using the binnings in the reference data histogram. Histo1DPtr bookHisto1D(const std::string& name, const std::string& title="", const std::string& xtitle="", const std::string& ytitle=""); /// Book a 1D histogram, using the binnings in the reference data histogram. /// /// The paper, dataset and x/y-axis IDs will be used to build the histo name in the HepData standard way. Histo1DPtr bookHisto1D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId, const std::string& title="", const std::string& xtitle="", const std::string& ytitle=""); //@} /// @name 2D histogram booking //@{ /// Book a 2D histogram with @a nxbins and @a nybins uniformly /// distributed across the ranges @a xlower - @a xupper and @a /// ylower - @a yupper respectively along the x- and y-axis. Histo2DPtr bookHisto2D(const std::string& name, size_t nxbins, double xlower, double xupper, size_t nybins, double ylower, double yupper, const std::string& title="", const std::string& xtitle="", const std::string& ytitle="", const std::string& ztitle=""); /// Book a 2D histogram with non-uniform bins defined by the /// vectors of bin edges @a xbinedges and @a ybinedges. Histo2DPtr bookHisto2D(const std::string& name, const std::vector& xbinedges, const std::vector& ybinedges, const std::string& title="", const std::string& xtitle="", const std::string& ytitle="", const std::string& ztitle=""); /// Book a 2D histogram with non-uniform bins defined by the /// vectors of bin edges @a xbinedges and @a ybinedges. Histo2DPtr bookHisto2D(const std::string& name, const std::initializer_list& xbinedges, const std::initializer_list& ybinedges, const std::string& title="", const std::string& xtitle="", const std::string& ytitle="", const std::string& ztitle=""); /// Book a 2D histogram with binning from a reference scatter. Histo2DPtr bookHisto2D(const std::string& name, const Scatter3D& refscatter, const std::string& title="", const std::string& xtitle="", const std::string& ytitle="", const std::string& ztitle=""); /// Book a 2D histogram, using the binnings in the reference data histogram. Histo2DPtr bookHisto2D(const std::string& name, const std::string& title="", const std::string& xtitle="", const std::string& ytitle="", const std::string& ztitle=""); /// Book a 2D histogram, using the binnings in the reference data histogram. /// /// The paper, dataset and x/y-axis IDs will be used to build the histo name in the HepData standard way. Histo2DPtr bookHisto2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId, const std::string& title="", const std::string& xtitle="", const std::string& ytitle="", const std::string& ztitle=""); //@} /// @name 1D profile histogram booking //@{ /// Book a 1D profile histogram with @a nbins uniformly distributed across the range @a lower - @a upper . Profile1DPtr bookProfile1D(const std::string& name, size_t nbins, double lower, double upper, const std::string& title="", const std::string& xtitle="", const std::string& ytitle=""); /// Book a 1D profile histogram with non-uniform bins defined by the vector of bin edges @a binedges . Profile1DPtr bookProfile1D(const std::string& name, const std::vector& binedges, const std::string& title="", const std::string& xtitle="", const std::string& ytitle=""); /// Book a 1D profile histogram with non-uniform bins defined by the vector of bin edges @a binedges . Profile1DPtr bookProfile1D(const std::string& name, const std::initializer_list& binedges, const std::string& title="", const std::string& xtitle="", const std::string& ytitle=""); /// Book a 1D profile histogram with binning from a reference scatter. Profile1DPtr bookProfile1D(const std::string& name, const Scatter2D& refscatter, const std::string& title="", const std::string& xtitle="", const std::string& ytitle=""); /// Book a 1D profile histogram, using the binnings in the reference data histogram. Profile1DPtr bookProfile1D(const std::string& name, const std::string& title="", const std::string& xtitle="", const std::string& ytitle=""); /// Book a 1D profile histogram, using the binnings in the reference data histogram. /// /// The paper, dataset and x/y-axis IDs will be used to build the histo name in the HepData standard way. Profile1DPtr bookProfile1D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId, const std::string& title="", const std::string& xtitle="", const std::string& ytitle=""); //@} /// @name 2D profile histogram booking //@{ /// Book a 2D profile histogram with @a nxbins and @a nybins uniformly /// distributed across the ranges @a xlower - @a xupper and @a ylower - @a /// yupper respectively along the x- and y-axis. Profile2DPtr bookProfile2D(const std::string& name, size_t nxbins, double xlower, double xupper, size_t nybins, double ylower, double yupper, const std::string& title="", const std::string& xtitle="", const std::string& ytitle="", const std::string& ztitle=""); /// Book a 2D profile histogram with non-uniform bins defined by the vectorx /// of bin edges @a xbinedges and @a ybinedges. Profile2DPtr bookProfile2D(const std::string& name, const std::vector& xbinedges, const std::vector& ybinedges, const std::string& title="", const std::string& xtitle="", const std::string& ytitle="", const std::string& ztitle=""); /// Book a 2D profile histogram with non-uniform bins defined by the vectorx /// of bin edges @a xbinedges and @a ybinedges. Profile2DPtr bookProfile2D(const std::string& name, const std::initializer_list& xbinedges, const std::initializer_list& ybinedges, const std::string& title="", const std::string& xtitle="", const std::string& ytitle="", const std::string& ztitle=""); /// Book a 2D profile histogram with binning from a reference scatter. Profile2DPtr bookProfile2D(const std::string& name, const Scatter3D& refscatter, const std::string& title="", const std::string& xtitle="", const std::string& ytitle="", const std::string& ztitle=""); /// Book a 2D profile histogram, using the binnings in the reference data histogram. Profile2DPtr bookProfile2D(const std::string& name, const std::string& title="", const std::string& xtitle="", const std::string& ytitle="", const std::string& ztitle=""); /// Book a 2D profile histogram, using the binnings in the reference data histogram. /// /// The paper, dataset and x/y-axis IDs will be used to build the histo name in the HepData standard way. Profile2DPtr bookProfile2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId, const std::string& title="", const std::string& xtitle="", const std::string& ytitle="", const std::string& ztitle=""); //@} /// @name 2D scatter booking //@{ /// @brief Book a 2-dimensional data point set with the given name. /// /// @note Unlike histogram booking, scatter booking by default makes no /// attempt to use reference data to pre-fill the data object. If you want /// this, which is sometimes useful e.g. when the x-position is not really /// meaningful and can't be extracted from the data, then set the @a /// copy_pts parameter to true. This creates points to match the reference /// data's x values and errors, but with the y values and errors zeroed... /// assuming that there is a reference histo with the same name: if there /// isn't, an exception will be thrown. Scatter2DPtr bookScatter2D(const std::string& name, bool copy_pts=false, const std::string& title="", const std::string& xtitle="", const std::string& ytitle=""); /// @brief Book a 2-dimensional data point set, using the binnings in the reference data histogram. /// /// The paper, dataset and x/y-axis IDs will be used to build the histo name in the HepData standard way. /// /// @note Unlike histogram booking, scatter booking by default makes no /// attempt to use reference data to pre-fill the data object. If you want /// this, which is sometimes useful e.g. when the x-position is not really /// meaningful and can't be extracted from the data, then set the @a /// copy_pts parameter to true. This creates points to match the reference /// data's x values and errors, but with the y values and errors zeroed. Scatter2DPtr bookScatter2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId, bool copy_pts=false, const std::string& title="", const std::string& xtitle="", const std::string& ytitle=""); /// @brief Book a 2-dimensional data point set with equally spaced x-points in a range. /// /// The y values and errors will be set to 0. Scatter2DPtr bookScatter2D(const std::string& name, size_t npts, double lower, double upper, const std::string& title="", const std::string& xtitle="", const std::string& ytitle=""); /// @brief Book a 2-dimensional data point set based on provided contiguous "bin edges". /// /// The y values and errors will be set to 0. Scatter2DPtr bookScatter2D(const std::string& hname, const std::vector& binedges, const std::string& title, const std::string& xtitle, const std::string& ytitle); //@} public: + /// @name accessing options for this Anslysis instance. + //@{ + + /// Get an option for this analysis instance as a string. + std::string getOption(std::string optname) { + if ( _options.find(optname) != _options.end() ) + return _options.find(optname)->second; + return ""; + } + + /// Get an option for this analysis instance converted to a + /// specific type (given by the specified @a def value). + template + T getOption(std::string optname, T def) { + if (_options.find(optname) == _options.end()) return def; + std::stringstream ss; + ss << _options.find(optname)->second; + T ret; + ss >> ret; + return ret; + } + + //@} /// @name Analysis object manipulation /// @todo Should really be protected: only public to keep BinnedHistogram happy for now... //@{ /// Multiplicatively scale the given counter, @a cnt, by factor @s factor. void scale(CounterPtr cnt, double factor); /// Multiplicatively scale the given counters, @a cnts, by factor @s factor. /// @note Constness intentional, if weird, to allow passing rvalue refs of smart ptrs (argh) /// @todo Use SFINAE for a generic iterable of CounterPtrs void scale(const std::vector& cnts, double factor) { for (auto& c : cnts) scale(c, factor); } /// @todo YUCK! template void scale(const CounterPtr (&cnts)[array_size], double factor) { // for (size_t i = 0; i < std::extent::value; ++i) scale(cnts[i], factor); for (auto& c : cnts) scale(c, factor); } /// Normalize the given histogram, @a histo, to area = @a norm. void normalize(Histo1DPtr histo, double norm=1.0, bool includeoverflows=true); /// Normalize the given histograms, @a histos, to area = @a norm. /// @note Constness intentional, if weird, to allow passing rvalue refs of smart ptrs (argh) /// @todo Use SFINAE for a generic iterable of Histo1DPtrs void normalize(const std::vector& histos, double norm=1.0, bool includeoverflows=true) { for (auto& h : histos) normalize(h, norm, includeoverflows); } /// @todo YUCK! template void normalize(const Histo1DPtr (&histos)[array_size], double norm=1.0, bool includeoverflows=true) { for (auto& h : histos) normalize(h, norm, includeoverflows); } /// Multiplicatively scale the given histogram, @a histo, by factor @s factor. void scale(Histo1DPtr histo, double factor); /// Multiplicatively scale the given histograms, @a histos, by factor @s factor. /// @note Constness intentional, if weird, to allow passing rvalue refs of smart ptrs (argh) /// @todo Use SFINAE for a generic iterable of Histo1DPtrs void scale(const std::vector& histos, double factor) { for (auto& h : histos) scale(h, factor); } /// @todo YUCK! template void scale(const Histo1DPtr (&histos)[array_size], double factor) { for (auto& h : histos) scale(h, factor); } /// Normalize the given histogram, @a histo, to area = @a norm. void normalize(Histo2DPtr histo, double norm=1.0, bool includeoverflows=true); /// Normalize the given histograms, @a histos, to area = @a norm. /// @note Constness intentional, if weird, to allow passing rvalue refs of smart ptrs (argh) /// @todo Use SFINAE for a generic iterable of Histo2DPtrs void normalize(const std::vector& histos, double norm=1.0, bool includeoverflows=true) { for (auto& h : histos) normalize(h, norm, includeoverflows); } /// @todo YUCK! template void normalize(const Histo2DPtr (&histos)[array_size], double norm=1.0, bool includeoverflows=true) { for (auto& h : histos) normalize(h, norm, includeoverflows); } /// Multiplicatively scale the given histogram, @a histo, by factor @s factor. void scale(Histo2DPtr histo, double factor); /// Multiplicatively scale the given histograms, @a histos, by factor @s factor. /// @note Constness intentional, if weird, to allow passing rvalue refs of smart ptrs (argh) /// @todo Use SFINAE for a generic iterable of Histo2DPtrs void scale(const std::vector& histos, double factor) { for (auto& h : histos) scale(h, factor); } /// @todo YUCK! template void scale(const Histo2DPtr (&histos)[array_size], double factor) { for (auto& h : histos) scale(h, factor); } /// Helper for counter division. /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void divide(CounterPtr c1, CounterPtr c2, Scatter1DPtr s) const; /// Helper for histogram division with raw YODA objects. /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void divide(const YODA::Counter& c1, const YODA::Counter& c2, Scatter1DPtr s) const; /// Helper for histogram division. /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void divide(Histo1DPtr h1, Histo1DPtr h2, Scatter2DPtr s) const; /// Helper for histogram division with raw YODA objects. /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void divide(const YODA::Histo1D& h1, const YODA::Histo1D& h2, Scatter2DPtr s) const; /// Helper for profile histogram division. /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void divide(Profile1DPtr p1, Profile1DPtr p2, Scatter2DPtr s) const; /// Helper for profile histogram division with raw YODA objects. /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void divide(const YODA::Profile1D& p1, const YODA::Profile1D& p2, Scatter2DPtr s) const; /// Helper for 2D histogram division. /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void divide(Histo2DPtr h1, Histo2DPtr h2, Scatter3DPtr s) const; /// Helper for 2D histogram division with raw YODA objects. /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void divide(const YODA::Histo2D& h1, const YODA::Histo2D& h2, Scatter3DPtr s) const; /// Helper for 2D profile histogram division. /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void divide(Profile2DPtr p1, Profile2DPtr p2, Scatter3DPtr s) const; /// Helper for 2D profile histogram division with raw YODA objects /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void divide(const YODA::Profile2D& p1, const YODA::Profile2D& p2, Scatter3DPtr s) const; /// Helper for histogram efficiency calculation. /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void efficiency(Histo1DPtr h1, Histo1DPtr h2, Scatter2DPtr s) const; /// Helper for histogram efficiency calculation. /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void efficiency(const YODA::Histo1D& h1, const YODA::Histo1D& h2, Scatter2DPtr s) const; /// Helper for histogram asymmetry calculation. /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void asymm(Histo1DPtr h1, Histo1DPtr h2, Scatter2DPtr s) const; /// Helper for histogram asymmetry calculation. /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void asymm(const YODA::Histo1D& h1, const YODA::Histo1D& h2, Scatter2DPtr s) const; /// Helper for converting a differential histo to an integral one. /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void integrate(Histo1DPtr h, Scatter2DPtr s) const; /// Helper for converting a differential histo to an integral one. /// /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target. void integrate(const Histo1D& h, Scatter2DPtr s) const; //@} public: /// List of registered analysis data objects const vector& analysisObjects() const { return _analysisobjects; } protected: /// @name Data object registration, retrieval, and removal //@{ /// Register a data object in the histogram system void addAnalysisObject(AnalysisObjectPtr ao); /// Get a data object from the histogram system template const std::shared_ptr getAnalysisObject(const std::string& name) const { foreach (const AnalysisObjectPtr& ao, analysisObjects()) { if (ao->path() == histoPath(name)) return dynamic_pointer_cast(ao); } throw Exception("Data object " + histoPath(name) + " not found"); } /// Get a data object from the histogram system (non-const) template std::shared_ptr getAnalysisObject(const std::string& name) { foreach (const AnalysisObjectPtr& ao, analysisObjects()) { if (ao->path() == histoPath(name)) return dynamic_pointer_cast(ao); } throw Exception("Data object " + histoPath(name) + " not found"); } /// Unregister a data object from the histogram system (by name) void removeAnalysisObject(const std::string& path); /// Unregister a data object from the histogram system (by pointer) void removeAnalysisObject(AnalysisObjectPtr ao); /// Get a named Histo1D object from the histogram system const Histo1DPtr getHisto1D(const std::string& name) const { return getAnalysisObject(name); } /// Get a named Histo1D object from the histogram system (non-const) Histo1DPtr getHisto1D(const std::string& name) { return getAnalysisObject(name); } /// Get a Histo1D object from the histogram system by axis ID codes (non-const) const Histo1DPtr getHisto1D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const { return getAnalysisObject(makeAxisCode(datasetId, xAxisId, yAxisId)); } /// Get a Histo1D object from the histogram system by axis ID codes (non-const) Histo1DPtr getHisto1D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) { return getAnalysisObject(makeAxisCode(datasetId, xAxisId, yAxisId)); } /// Get a named Histo2D object from the histogram system const Histo2DPtr getHisto2D(const std::string& name) const { return getAnalysisObject(name); } /// Get a named Histo2D object from the histogram system (non-const) Histo2DPtr getHisto2D(const std::string& name) { return getAnalysisObject(name); } /// Get a Histo2D object from the histogram system by axis ID codes (non-const) const Histo2DPtr getHisto2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const { return getAnalysisObject(makeAxisCode(datasetId, xAxisId, yAxisId)); } /// Get a Histo2D object from the histogram system by axis ID codes (non-const) Histo2DPtr getHisto2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) { return getAnalysisObject(makeAxisCode(datasetId, xAxisId, yAxisId)); } /// Get a named Profile1D object from the histogram system const Profile1DPtr getProfile1D(const std::string& name) const { return getAnalysisObject(name); } /// Get a named Profile1D object from the histogram system (non-const) Profile1DPtr getProfile1D(const std::string& name) { return getAnalysisObject(name); } /// Get a Profile1D object from the histogram system by axis ID codes (non-const) const Profile1DPtr getProfile1D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const { return getAnalysisObject(makeAxisCode(datasetId, xAxisId, yAxisId)); } /// Get a Profile1D object from the histogram system by axis ID codes (non-const) Profile1DPtr getProfile1D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) { return getAnalysisObject(makeAxisCode(datasetId, xAxisId, yAxisId)); } /// Get a named Profile2D object from the histogram system const Profile2DPtr getProfile2D(const std::string& name) const { return getAnalysisObject(name); } /// Get a named Profile2D object from the histogram system (non-const) Profile2DPtr getProfile2D(const std::string& name) { return getAnalysisObject(name); } /// Get a Profile2D object from the histogram system by axis ID codes (non-const) const Profile2DPtr getProfile2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const { return getAnalysisObject(makeAxisCode(datasetId, xAxisId, yAxisId)); } /// Get a Profile2D object from the histogram system by axis ID codes (non-const) Profile2DPtr getProfile2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) { return getAnalysisObject(makeAxisCode(datasetId, xAxisId, yAxisId)); } /// Get a named Scatter2D object from the histogram system const Scatter2DPtr getScatter2D(const std::string& name) const { return getAnalysisObject(name); } /// Get a named Scatter2D object from the histogram system (non-const) Scatter2DPtr getScatter2D(const std::string& name) { return getAnalysisObject(name); } /// Get a Scatter2D object from the histogram system by axis ID codes (non-const) const Scatter2DPtr getScatter2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const { return getAnalysisObject(makeAxisCode(datasetId, xAxisId, yAxisId)); } /// Get a Scatter2D object from the histogram system by axis ID codes (non-const) Scatter2DPtr getScatter2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) { return getAnalysisObject(makeAxisCode(datasetId, xAxisId, yAxisId)); } //@} private: /// Name passed to constructor (used to find .info analysis data file, and as a fallback) string _defaultname; /// Pointer to analysis metadata object unique_ptr _info; /// Storage of all plot objects /// @todo Make this a map for fast lookup by path? vector _analysisobjects; /// @name Cross-section variables //@{ double _crossSection; bool _gotCrossSection; //@} /// The controlling AnalysisHandler object. AnalysisHandler* _analysishandler; /// Collection of cached refdata to speed up many autobookings: the /// reference data file should only be read once. mutable std::map _refdata; + /// Options the (this instance of) the analysis + map _options; + + /// The string of options. + string _optstring; private: /// @name Utility functions //@{ /// Get the reference data for this paper and cache it. void _cacheRefData() const; //@} /// The assignment operator is private and must never be called. /// In fact, it should not even be implemented. Analysis& operator=(const Analysis&); }; } // Include definition of analysis plugin system so that analyses automatically see it when including Analysis.hh #include "Rivet/AnalysisBuilder.hh" /// @def DECLARE_RIVET_PLUGIN /// Preprocessor define to prettify the global-object plugin hook mechanism. #define DECLARE_RIVET_PLUGIN(clsname) Rivet::AnalysisBuilder plugin_ ## clsname /// @def DECLARE_ALIASED_RIVET_PLUGIN /// Preprocessor define to prettify the global-object plugin hook mechanism, with an extra alias name for this analysis. // #define DECLARE_ALIASED_RIVET_PLUGIN(clsname, alias) Rivet::AnalysisBuilder plugin_ ## clsname ## ( ## #alias ## ) #define DECLARE_ALIASED_RIVET_PLUGIN(clsname, alias) DECLARE_RIVET_PLUGIN(clsname)( #alias ) /// @def DEFAULT_RIVET_ANALYSIS_CONSTRUCTOR /// Preprocessor define to prettify the manky constructor with name string argument #define DEFAULT_RIVET_ANALYSIS_CONSTRUCTOR(clsname) clsname() : Analysis(# clsname) {} /// @def DEFAULT_RIVET_ANALYSIS_CTOR /// Slight abbreviation for DEFAULT_RIVET_ANALYSIS_CONSTRUCTOR #define DEFAULT_RIVET_ANALYSIS_CTOR(clsname) DEFAULT_RIVET_ANALYSIS_CONSTRUCTOR(clsname) #endif diff --git a/include/Rivet/AnalysisInfo.hh b/include/Rivet/AnalysisInfo.hh --- a/include/Rivet/AnalysisInfo.hh +++ b/include/Rivet/AnalysisInfo.hh @@ -1,258 +1,279 @@ // -*- C++ -*- #ifndef RIVET_AnalysisInfo_HH #define RIVET_AnalysisInfo_HH #include "Rivet/Config/RivetCommon.hh" #include namespace Rivet { class AnalysisInfo { public: /// Static factory method: returns null pointer if no metadata found static unique_ptr make(const std::string& name); /// @name Standard constructors and destructors. //@{ /// The default constructor. AnalysisInfo() { clear(); } /// The destructor. ~AnalysisInfo() { } //@} public: /// @name Metadata /// Metadata is used for querying from the command line and also for /// building web pages and the analysis pages in the Rivet manual. //@{ /// Get the name of the analysis. By default this is computed using the /// experiment, year and Inspire/Spires ID metadata methods. std::string name() const { if (!_name.empty()) return _name; if (!experiment().empty() && !year().empty()) { if (!inspireId().empty()) { return experiment() + "_" + year() + "_I" + inspireId(); } else if (!spiresId().empty()) { return experiment() + "_" + year() + "_S" + spiresId(); } } return ""; } /// Set the name of the analysis. void setName(const std::string& name) { _name = name; } /// Get the Inspire (SPIRES replacement) ID code for this analysis. const std::string& inspireId() const { return _inspireId; } /// Set the Inspire (SPIRES replacement) ID code for this analysis. void setInspireId(const std::string& inspireId) { _inspireId = inspireId; } /// Get the SPIRES ID code for this analysis. const std::string& spiresId() const { return _spiresId; } /// Set the SPIRES ID code for this analysis. void setSpiresId(const std::string& spiresId) { _spiresId = spiresId; } /// @brief Names & emails of paper/analysis authors. /// Names and email of authors in 'NAME \' format. The first /// name in the list should be the primary contact person. const std::vector& authors() const { return _authors; } /// Set the author list. void setAuthors(const std::vector& authors) { _authors = authors; } /// @brief Get a short description of the analysis. /// Short (one sentence) description used as an index entry. /// Use @a description() to provide full descriptive paragraphs /// of analysis details. const std::string& summary() const { return _summary; } /// Set the short description for this analysis. void setSummary(const std::string& summary) { _summary = summary; } /// @brief Get a full description of the analysis. /// Full textual description of this analysis, what it is useful for, /// what experimental techniques are applied, etc. Should be treated /// as a chunk of restructuredText (http://docutils.sourceforge.net/rst.html), /// with equations to be rendered as LaTeX with amsmath operators. const std::string& description() const { return _description; } /// Set the full description for this analysis. void setDescription(const std::string& description) { _description = description; } /// @brief Information about the events needed as input for this analysis. /// Event types, energies, kinematic cuts, particles to be considered /// stable, etc. etc. Should be treated as a restructuredText bullet list /// (http://docutils.sourceforge.net/rst.html) const std::string& runInfo() const { return _runInfo; } /// Set the full description for this analysis. void setRunInfo(const std::string& runInfo) { _runInfo = runInfo; } /// Beam particle types const std::vector& beams() const { return _beams; } /// Set beam particle types void setBeams(const std::vector& beams) { _beams = beams; } /// Sets of valid beam energies const std::vector >& energies() const { return _energies; } /// Set the valid beam energies void setEnergies(const std::vector >& energies) { _energies = energies; } /// Experiment which performed and published this analysis. const std::string& experiment() const { return _experiment; } /// Set the experiment which performed and published this analysis. void setExperiment(const std::string& experiment) { _experiment = experiment; } /// Collider on which the experiment ran. const std::string& collider() const { return _collider; } /// Set the collider on which the experiment ran. void setCollider(const std::string& collider) { _collider = collider; } /// @brief When the original experimental analysis was published. /// When the refereed paper on which this is based was published, /// according to SPIRES. const std::string& year() const { return _year; } /// Set the year in which the original experimental analysis was published. void setYear(const std::string& year) { _year = year; } /// The integrated data luminosity of the data set const std::string& luminosityfb() const { return _luminosityfb; } /// Set the integrated data luminosity of the data set void setLuminosityfb(const std::string& luminosityfb) { _luminosityfb = luminosityfb; } /// Journal and preprint references. const std::vector& references() const { return _references; } /// Set the journal and preprint reference list. void setReferences(const std::vector& references) { _references = references; } /// Analysis Keywords for grouping etc const std::vector& keywords() const { return _keywords; } /// BibTeX citation key for this article. const std::string& bibKey() const { return _bibKey;} /// Set the BibTeX citation key for this article. void setBibKey(const std::string& bibKey) { _bibKey = bibKey; } /// BibTeX citation entry for this article. const std::string& bibTeX() const { return _bibTeX; } /// Set the BibTeX citation entry for this article. void setBibTeX(const std::string& bibTeX) { _bibTeX = bibTeX; } /// Whether this analysis is trusted (in any way!) const std::string& status() const { return _status; } /// Set the analysis code status. void setStatus(const std::string& status) { _status = status; } /// Any work to be done on this analysis. const std::vector& todos() const { return _todos; } /// Set the to-do list. void setTodos(const std::vector& todos) { _todos = todos; } + /// Get the option list. + const std::vector& options() const { return _options; } + + /// Check if the given option is valid. + bool validOption(std::string key, std::string val) const; + + /// Set the option list. + void setOptions(const std::vector& opts) { + _options = opts; + buildOptionMap(); + } + + /// Build a map of options to facilitate checking. + void buildOptionMap(); + + /// Return true if this analysis needs to know the process cross-section. bool needsCrossSection() const { return _needsCrossSection; } /// Return true if this analysis needs to know the process cross-section. void setNeedsCrossSection(bool needXsec) { _needsCrossSection = needXsec; } //@} private: std::string _name; std::string _spiresId, _inspireId; std::vector _authors; std::string _summary; std::string _description; std::string _runInfo; std::string _experiment; std::string _collider; std::vector > _beams; std::vector > _energies; std::string _year; std::string _luminosityfb; std::vector _references; std::vector _keywords; std::string _bibKey; std::string _bibTeX; //std::string _bibTeXBody; ///< Was thinking of avoiding duplication of BibKey... std::string _status; std::vector _todos; bool _needsCrossSection; + std::vector _options; + std::map< std::string, std::set > _optionmap; + void clear() { _name = ""; _spiresId = ""; _inspireId = ""; _authors.clear(); _summary = ""; _description = ""; _runInfo = ""; _experiment = ""; _collider = ""; _beams.clear(); _energies.clear(); _year = ""; _luminosityfb = ""; _references.clear(); _keywords.clear(); _bibKey = ""; _bibTeX = ""; //_bibTeXBody = ""; _status = ""; _todos.clear(); _needsCrossSection = false; + _options.clear(); + _optionmap.clear(); } }; /// String representation std::string toString(const AnalysisInfo& ai); /// Stream an AnalysisInfo as a text description inline std::ostream& operator<<(std::ostream& os, const AnalysisInfo& ai) { os << toString(ai); return os; } } #endif diff --git a/pyext/rivet/aopaths.py b/pyext/rivet/aopaths.py --- a/pyext/rivet/aopaths.py +++ b/pyext/rivet/aopaths.py @@ -1,87 +1,102 @@ def isRefPath(path): return path.startswith("/REF") +def stripOptions(path): + import re + return re.sub(r':\w+=[^:/]+', "", path) + +def extractOptionString(path): + import re + re_opts = re.compile(r"^.*(:\w+=[^:/]+)+") + m = re_opts.match(path) + if not m: + return "" + opts = list(m.groups()) + for i in range(len(opts)): + opts[i] = opts[i].strip(':') + return " [" + ",".join(opts) + "]" + def isRefAO(ao): return int(ao.annotation("IsRef")) == 1 or isRefPath(ao.path) def isTmpPath(path): return "/_" in path #< match *any* underscore-prefixed path component def isTmpAO(ao): return isTmpPath(ao.path) class AOPath(object): """ Object representation of analysis object path structures. TODO: move to YODA? """ import re re_aopath = re.compile(r"^(/[^\[\]\@\#]+)(\[[A-Za-z\d\._]+\])?(#\d+|@[\d\.]+)?$") def __init__(self, path): import os self.origpath = path m = self.re_aopath.match(path) if not m: raise Exception("Supplied path '%s' does not meet required structure" % path) self._basepath = m.group(1) self._varid = m.group(2).lstrip("[").rstrip("]") if m.group(2) else None self._binid = int(m.group(3).lstrip("#")) if m.group(3) else None self._isref = isRefPath(self._basepath) def basepath(self, keepref=False): "Main 'Unix-like' part of the AO path, optionally including a /REF prefix" p = self._basepath.rstrip("/") if not keepref and p.startswith("/REF"): p = p[4:] return p def varpath(self, keepref=False, defaultvarid=None): "The basepath, plus any bracketed variation identifier" p = self.basepath(keepref) if self.varid(defaultvarid) is not None: p += "[%s]" % str(self.varid(defaultvarid)) return p def binpath(self, keepref=False, defaultbinid=None, defaultvarid=None): "The varpath, plus any #-prefixed bin number identifier" p = self.varpath(keepref, defaultvarid) if self.binid(defaultbinid) is not None: p += "#%d" % self.binid(defaultbinid) return p def basepathparts(self, keepref=False): "List of basepath components, split by forward slashes" return self.basepath(keepref).strip("/").split("/") # TODO: basepathhead, basepathtail def dirname(self, keepref=False): "The non-final (i.e. dir-like) part of the basepath" return os.path.dirname(self.basepath(keepref)) def dirnameparts(self, keepref=False): "List of dirname components, split by forward slashes" return self.dirname(keepref).strip("/").split("/") def basename(self): "The final (i.e. file-like) part of the basepath" return os.path.basename(self._basepath) def varid(self, default=None): "The variation identifier (without brackets) if there is one, otherwise None" return self._varid if self._varid is not None else default def binid(self, default=None): "The bin identifier (without #) if there is one, otherwise None" return self._binid if self._binid is not None else default def isref(self): "Is there a /REF prefix in the original path?" return self._isref def istmp(self): "Do any basepath components start with an underscore, used to hide them from plotting?" return isTmpPath(self.basepath()) diff --git a/src/Core/Analysis.cc b/src/Core/Analysis.cc --- a/src/Core/Analysis.cc +++ b/src/Core/Analysis.cc @@ -1,895 +1,895 @@ // -*- C++ -*- #include "Rivet/Config/RivetCommon.hh" #include "Rivet/Analysis.hh" #include "Rivet/AnalysisHandler.hh" #include "Rivet/AnalysisInfo.hh" #include "Rivet/Tools/BeamConstraint.hh" namespace Rivet { Analysis::Analysis(const string& name) : _crossSection(-1.0), _gotCrossSection(false), _analysishandler(NULL) { ProjectionApplier::_allowProjReg = false; _defaultname = name; unique_ptr ai = AnalysisInfo::make(name); assert(ai); _info = move(ai); assert(_info); } double Analysis::sqrtS() const { return handler().sqrtS(); } const ParticlePair& Analysis::beams() const { return handler().beams(); } const PdgIdPair Analysis::beamIds() const { return handler().beamIds(); } const string Analysis::histoDir() const { /// @todo Cache in a member variable string _histoDir; if (_histoDir.empty()) { _histoDir = "/" + name(); if (handler().runName().length() > 0) { _histoDir = "/" + handler().runName() + _histoDir; } replace_all(_histoDir, "//", "/"); //< iterates until none } return _histoDir; } const string Analysis::histoPath(const string& hname) const { const string path = histoDir() + "/" + hname; return path; } const string Analysis::histoPath(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const { return histoDir() + "/" + mkAxisCode(datasetId, xAxisId, yAxisId); } const string Analysis::mkAxisCode(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const { stringstream axisCode; axisCode << "d"; if (datasetId < 10) axisCode << 0; axisCode << datasetId; axisCode << "-x"; if (xAxisId < 10) axisCode << 0; axisCode << xAxisId; axisCode << "-y"; if (yAxisId < 10) axisCode << 0; axisCode << yAxisId; return axisCode.str(); } Log& Analysis::getLog() const { string logname = "Rivet.Analysis." + name(); return Log::getLog(logname); } /////////////////////////////////////////// size_t Analysis::numEvents() const { return handler().numEvents(); } double Analysis::sumW() const { return handler().sumW(); } double Analysis::sumW2() const { return handler().sumW(); } /////////////////////////////////////////// bool Analysis::isCompatible(const ParticlePair& beams) const { return isCompatible(beams.first.pid(), beams.second.pid(), beams.first.energy(), beams.second.energy()); } bool Analysis::isCompatible(PdgId beam1, PdgId beam2, double e1, double e2) const { PdgIdPair beams(beam1, beam2); pair energies(e1, e2); return isCompatible(beams, energies); } bool Analysis::isCompatible(const PdgIdPair& beams, const pair& energies) const { // First check the beam IDs bool beamIdsOk = false; foreach (const PdgIdPair& bp, requiredBeams()) { if (compatible(beams, bp)) { beamIdsOk = true; break; } } if (!beamIdsOk) return false; // Next check that the energies are compatible (within 1% or 1 GeV, whichever is larger, for a bit of UI forgiveness) /// @todo Use some sort of standard ordering to improve comparisons, esp. when the two beams are different particles bool beamEnergiesOk = requiredEnergies().size() > 0 ? false : true; typedef pair DoublePair; foreach (const DoublePair& ep, requiredEnergies()) { if ((fuzzyEquals(ep.first, energies.first, 0.01) && fuzzyEquals(ep.second, energies.second, 0.01)) || (fuzzyEquals(ep.first, energies.second, 0.01) && fuzzyEquals(ep.second, energies.first, 0.01)) || (abs(ep.first - energies.first) < 1*GeV && abs(ep.second - energies.second) < 1*GeV) || (abs(ep.first - energies.second) < 1*GeV && abs(ep.second - energies.first) < 1*GeV)) { beamEnergiesOk = true; break; } } return beamEnergiesOk; /// @todo Need to also check internal consistency of the analysis' /// beam requirements with those of the projections it uses. } /////////////////////////////////////////// Analysis& Analysis::setCrossSection(double xs) { _crossSection = xs; _gotCrossSection = true; return *this; } double Analysis::crossSection() const { if (!_gotCrossSection || std::isnan(_crossSection)) { string errMsg = "You did not set the cross section for the analysis " + name(); throw Error(errMsg); } return _crossSection; } double Analysis::crossSectionPerEvent() const { const double sumW = sumOfWeights(); assert(sumW != 0.0); return _crossSection / sumW; } //////////////////////////////////////////////////////////// // Histogramming void Analysis::_cacheRefData() const { if (_refdata.empty()) { MSG_TRACE("Getting refdata cache for paper " << name()); - _refdata = getRefData(name()); + _refdata = getRefData(( (info().name().empty()) ? _defaultname : info().name() )); } } CounterPtr Analysis::bookCounter(const string& cname, const string& title) { // const string& xtitle, // const string& ytitle) { const string path = histoPath(cname); CounterPtr ctr = make_shared(path, title); addAnalysisObject(ctr); MSG_TRACE("Made counter " << cname << " for " << name()); // hist->setAnnotation("XLabel", xtitle); // hist->setAnnotation("YLabel", ytitle); return ctr; } CounterPtr Analysis::bookCounter(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId, const string& title) { // const string& xtitle, // const string& ytitle) { const string axisCode = mkAxisCode(datasetId, xAxisId, yAxisId); return bookCounter(axisCode, title); } Histo1DPtr Analysis::bookHisto1D(const string& hname, size_t nbins, double lower, double upper, const string& title, const string& xtitle, const string& ytitle) { Histo1DPtr hist; try { // try to bind to pre-existing // AnalysisObjectPtr ao = getAnalysisObject(path); // hist = dynamic_pointer_cast(ao); hist = getHisto1D(hname); /// @todo Test that cast worked /// @todo Also test that binning is as expected? MSG_TRACE("Bound pre-existing histogram " << hname << " for " << name()); } catch (...) { // binding failed; make it from scratch hist = make_shared(nbins, lower, upper, histoPath(hname), title); addAnalysisObject(hist); MSG_TRACE("Made histogram " << hname << " for " << name()); } hist->setTitle(title); hist->setAnnotation("XLabel", xtitle); hist->setAnnotation("YLabel", ytitle); return hist; } Histo1DPtr Analysis::bookHisto1D(const string& hname, const vector& binedges, const string& title, const string& xtitle, const string& ytitle) { Histo1DPtr hist; try { // try to bind to pre-existing // AnalysisObjectPtr ao = getAnalysisObject(path); // hist = dynamic_pointer_cast(ao); hist = getHisto1D(hname); /// @todo Test that cast worked /// @todo Also test that binning is as expected? MSG_TRACE("Bound pre-existing histogram " << hname << " for " << name()); } catch (...) { // binding failed; make it from scratch hist = make_shared(binedges, histoPath(hname), title); addAnalysisObject(hist); MSG_TRACE("Made histogram " << hname << " for " << name()); } hist->setTitle(title); hist->setAnnotation("XLabel", xtitle); hist->setAnnotation("YLabel", ytitle); return hist; } Histo1DPtr Analysis::bookHisto1D(const string& hname, const initializer_list& binedges, const string& title, const string& xtitle, const string& ytitle) { return bookHisto1D(hname, vector{binedges}, title, xtitle, ytitle); } Histo1DPtr Analysis::bookHisto1D(const string& hname, const Scatter2D& refscatter, const string& title, const string& xtitle, const string& ytitle) { Histo1DPtr hist; try { // try to bind to pre-existing // AnalysisObjectPtr ao = getAnalysisObject(path); // hist = dynamic_pointer_cast(ao); hist = getHisto1D(hname); /// @todo Test that cast worked /// @todo Also test that binning is as expected? MSG_TRACE("Bound pre-existing histogram " << hname << " for " << name()); } catch (...) { // binding failed; make it from scratch hist = make_shared(refscatter, histoPath(hname)); if (hist->hasAnnotation("IsRef")) hist->rmAnnotation("IsRef"); addAnalysisObject(hist); MSG_TRACE("Made histogram " << hname << " for " << name()); } hist->setTitle(title); hist->setAnnotation("XLabel", xtitle); hist->setAnnotation("YLabel", ytitle); return hist; } Histo1DPtr Analysis::bookHisto1D(const string& hname, const string& title, const string& xtitle, const string& ytitle) { const Scatter2D& refdata = refData(hname); return bookHisto1D(hname, refdata, title, xtitle, ytitle); } Histo1DPtr Analysis::bookHisto1D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId, const string& title, const string& xtitle, const string& ytitle) { const string axisCode = mkAxisCode(datasetId, xAxisId, yAxisId); return bookHisto1D(axisCode, title, xtitle, ytitle); } /// @todo Add booking methods which take a path, titles and *a reference Scatter from which to book* ///////////////// Histo2DPtr Analysis::bookHisto2D(const string& hname, size_t nxbins, double xlower, double xupper, size_t nybins, double ylower, double yupper, const string& title, const string& xtitle, const string& ytitle, const string& ztitle) { const string path = histoPath(hname); Histo2DPtr hist = make_shared(nxbins, xlower, xupper, nybins, ylower, yupper, path, title); addAnalysisObject(hist); MSG_TRACE("Made 2D histogram " << hname << " for " << name()); hist->setAnnotation("XLabel", xtitle); hist->setAnnotation("YLabel", ytitle); hist->setAnnotation("ZLabel", ztitle); return hist; } Histo2DPtr Analysis::bookHisto2D(const string& hname, const vector& xbinedges, const vector& ybinedges, const string& title, const string& xtitle, const string& ytitle, const string& ztitle) { const string path = histoPath(hname); Histo2DPtr hist = make_shared(xbinedges, ybinedges, path, title); addAnalysisObject(hist); MSG_TRACE("Made 2D histogram " << hname << " for " << name()); hist->setAnnotation("XLabel", xtitle); hist->setAnnotation("YLabel", ytitle); hist->setAnnotation("ZLabel", ztitle); return hist; } Histo2DPtr Analysis::bookHisto2D(const string& hname, const initializer_list& xbinedges, const initializer_list& ybinedges, const string& title, const string& xtitle, const string& ytitle, const string& ztitle) { return bookHisto2D(hname, vector{xbinedges}, vector{ybinedges}, title, xtitle, ytitle, ztitle); } Histo2DPtr Analysis::bookHisto2D(const string& hname, const Scatter3D& refscatter, const string& title, const string& xtitle, const string& ytitle, const string& ztitle) { const string path = histoPath(hname); Histo2DPtr hist( new Histo2D(refscatter, path) ); addAnalysisObject(hist); MSG_TRACE("Made 2D histogram " << hname << " for " << name()); if (hist->hasAnnotation("IsRef")) hist->rmAnnotation("IsRef"); hist->setTitle(title); hist->setAnnotation("XLabel", xtitle); hist->setAnnotation("YLabel", ytitle); hist->setAnnotation("ZLabel", ztitle); return hist; } Histo2DPtr Analysis::bookHisto2D(const string& hname, const string& title, const string& xtitle, const string& ytitle, const string& ztitle) { const Scatter3D& refdata = refData(hname); return bookHisto2D(hname, refdata, title, xtitle, ytitle, ztitle); } Histo2DPtr Analysis::bookHisto2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId, const string& title, const string& xtitle, const string& ytitle, const string& ztitle) { const string axisCode = mkAxisCode(datasetId, xAxisId, yAxisId); return bookHisto2D(axisCode, title, xtitle, ytitle, ztitle); } ///////////////// Profile1DPtr Analysis::bookProfile1D(const string& hname, size_t nbins, double lower, double upper, const string& title, const string& xtitle, const string& ytitle) { const string path = histoPath(hname); Profile1DPtr prof = make_shared(nbins, lower, upper, path, title); addAnalysisObject(prof); MSG_TRACE("Made profile histogram " << hname << " for " << name()); prof->setAnnotation("XLabel", xtitle); prof->setAnnotation("YLabel", ytitle); return prof; } Profile1DPtr Analysis::bookProfile1D(const string& hname, const vector& binedges, const string& title, const string& xtitle, const string& ytitle) { const string path = histoPath(hname); Profile1DPtr prof = make_shared(binedges, path, title); addAnalysisObject(prof); MSG_TRACE("Made profile histogram " << hname << " for " << name()); prof->setAnnotation("XLabel", xtitle); prof->setAnnotation("YLabel", ytitle); return prof; } Profile1DPtr Analysis::bookProfile1D(const string& hname, const initializer_list& binedges, const string& title, const string& xtitle, const string& ytitle) { return bookProfile1D(hname, vector{binedges}, title, xtitle, ytitle); } Profile1DPtr Analysis::bookProfile1D(const string& hname, const Scatter2D& refscatter, const string& title, const string& xtitle, const string& ytitle) { const string path = histoPath(hname); Profile1DPtr prof = make_shared(refscatter, path); addAnalysisObject(prof); MSG_TRACE("Made profile histogram " << hname << " for " << name()); if (prof->hasAnnotation("IsRef")) prof->rmAnnotation("IsRef"); prof->setTitle(title); prof->setAnnotation("XLabel", xtitle); prof->setAnnotation("YLabel", ytitle); return prof; } Profile1DPtr Analysis::bookProfile1D(const string& hname, const string& title, const string& xtitle, const string& ytitle) { const Scatter2D& refdata = refData(hname); return bookProfile1D(hname, refdata, title, xtitle, ytitle); } Profile1DPtr Analysis::bookProfile1D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId, const string& title, const string& xtitle, const string& ytitle) { const string axisCode = mkAxisCode(datasetId, xAxisId, yAxisId); return bookProfile1D(axisCode, title, xtitle, ytitle); } /////////////////// Profile2DPtr Analysis::bookProfile2D(const string& hname, size_t nxbins, double xlower, double xupper, size_t nybins, double ylower, double yupper, const string& title, const string& xtitle, const string& ytitle, const string& ztitle) { const string path = histoPath(hname); Profile2DPtr prof = make_shared(nxbins, xlower, xupper, nybins, ylower, yupper, path, title); addAnalysisObject(prof); MSG_TRACE("Made 2D profile histogram " << hname << " for " << name()); prof->setAnnotation("XLabel", xtitle); prof->setAnnotation("YLabel", ytitle); prof->setAnnotation("ZLabel", ztitle); return prof; } Profile2DPtr Analysis::bookProfile2D(const string& hname, const vector& xbinedges, const vector& ybinedges, const string& title, const string& xtitle, const string& ytitle, const string& ztitle) { const string path = histoPath(hname); Profile2DPtr prof = make_shared(xbinedges, ybinedges, path, title); addAnalysisObject(prof); MSG_TRACE("Made 2D profile histogram " << hname << " for " << name()); prof->setAnnotation("XLabel", xtitle); prof->setAnnotation("YLabel", ytitle); prof->setAnnotation("ZLabel", ztitle); return prof; } Profile2DPtr Analysis::bookProfile2D(const string& hname, const initializer_list& xbinedges, const initializer_list& ybinedges, const string& title, const string& xtitle, const string& ytitle, const string& ztitle) { return bookProfile2D(hname, vector{xbinedges}, vector{ybinedges}, title, xtitle, ytitle, ztitle); } Profile2DPtr Analysis::bookProfile2D(const string& hname, const Scatter3D& refscatter, const string& title, const string& xtitle, const string& ytitle, const string& ztitle) { const string path = histoPath(hname); Profile2DPtr prof( new Profile2D(refscatter, path) ); addAnalysisObject(prof); MSG_TRACE("Made 2D profile histogram " << hname << " for " << name()); if (prof->hasAnnotation("IsRef")) prof->rmAnnotation("IsRef"); prof->setTitle(title); prof->setAnnotation("XLabel", xtitle); prof->setAnnotation("YLabel", ytitle); prof->setAnnotation("ZLabel", ztitle); return prof; } Profile2DPtr Analysis::bookProfile2D(const string& hname, const string& title, const string& xtitle, const string& ytitle, const string& ztitle) { const Scatter3D& refdata = refData(hname); return bookProfile2D(hname, refdata, title, xtitle, ytitle, ztitle); } Profile2DPtr Analysis::bookProfile2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId, const string& title, const string& xtitle, const string& ytitle, const string& ztitle) { const string axisCode = mkAxisCode(datasetId, xAxisId, yAxisId); return bookProfile2D(axisCode, title, xtitle, ytitle, ztitle); } ///////////////// Scatter2DPtr Analysis::bookScatter2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId, bool copy_pts, const string& title, const string& xtitle, const string& ytitle) { const string axisCode = mkAxisCode(datasetId, xAxisId, yAxisId); return bookScatter2D(axisCode, copy_pts, title, xtitle, ytitle); } Scatter2DPtr Analysis::bookScatter2D(const string& hname, bool copy_pts, const string& title, const string& xtitle, const string& ytitle) { Scatter2DPtr s; const string path = histoPath(hname); if (copy_pts) { const Scatter2D& refdata = refData(hname); s = make_shared(refdata, path); for (Point2D& p : s->points()) p.setY(0, 0); } else { s = make_shared(path); } addAnalysisObject(s); MSG_TRACE("Made scatter " << hname << " for " << name()); if (s->hasAnnotation("IsRef")) s->rmAnnotation("IsRef"); s->setTitle(title); s->setAnnotation("XLabel", xtitle); s->setAnnotation("YLabel", ytitle); return s; } Scatter2DPtr Analysis::bookScatter2D(const string& hname, size_t npts, double lower, double upper, const string& title, const string& xtitle, const string& ytitle) { Scatter2DPtr s; const string path = histoPath(hname); try { // try to bind to pre-existing s = getAnalysisObject(hname); /// @todo Also test that binning is as expected? MSG_TRACE("Bound pre-existing scatter " << path << " for " << name()); } catch (...) { // binding failed; make it from scratch s = make_shared(path); const double binwidth = (upper-lower)/npts; for (size_t pt = 0; pt < npts; ++pt) { const double bincentre = lower + (pt + 0.5) * binwidth; s->addPoint(bincentre, 0, binwidth/2.0, 0); } addAnalysisObject(s); MSG_TRACE("Made scatter " << hname << " for " << name()); } s->setTitle(title); s->setAnnotation("XLabel", xtitle); s->setAnnotation("YLabel", ytitle); return s; } Scatter2DPtr Analysis::bookScatter2D(const string& hname, const vector& binedges, const string& title, const string& xtitle, const string& ytitle) { const string path = histoPath(hname); Scatter2DPtr s = make_shared(path); for (size_t pt = 0; pt < binedges.size()-1; ++pt) { const double bincentre = (binedges[pt] + binedges[pt+1]) / 2.0; const double binwidth = binedges[pt+1] - binedges[pt]; s->addPoint(bincentre, 0, binwidth/2.0, 0); } addAnalysisObject(s); MSG_TRACE("Made scatter " << hname << " for " << name()); s->setTitle(title); s->setAnnotation("XLabel", xtitle); s->setAnnotation("YLabel", ytitle); return s; } ///////////////////// void Analysis::divide(CounterPtr c1, CounterPtr c2, Scatter1DPtr s) const { const string path = s->path(); *s = *c1 / *c2; s->setPath(path); } void Analysis::divide(const Counter& c1, const Counter& c2, Scatter1DPtr s) const { const string path = s->path(); *s = c1 / c2; s->setPath(path); } void Analysis::divide(Histo1DPtr h1, Histo1DPtr h2, Scatter2DPtr s) const { const string path = s->path(); *s = *h1 / *h2; s->setPath(path); } void Analysis::divide(const Histo1D& h1, const Histo1D& h2, Scatter2DPtr s) const { const string path = s->path(); *s = h1 / h2; s->setPath(path); } void Analysis::divide(Profile1DPtr p1, Profile1DPtr p2, Scatter2DPtr s) const { const string path = s->path(); *s = *p1 / *p2; s->setPath(path); } void Analysis::divide(const Profile1D& p1, const Profile1D& p2, Scatter2DPtr s) const { const string path = s->path(); *s = p1 / p2; s->setPath(path); } void Analysis::divide(Histo2DPtr h1, Histo2DPtr h2, Scatter3DPtr s) const { const string path = s->path(); *s = *h1 / *h2; s->setPath(path); } void Analysis::divide(const Histo2D& h1, const Histo2D& h2, Scatter3DPtr s) const { const string path = s->path(); *s = h1 / h2; s->setPath(path); } void Analysis::divide(Profile2DPtr p1, Profile2DPtr p2, Scatter3DPtr s) const { const string path = s->path(); *s = *p1 / *p2; s->setPath(path); } void Analysis::divide(const Profile2D& p1, const Profile2D& p2, Scatter3DPtr s) const { const string path = s->path(); *s = p1 / p2; s->setPath(path); } /// @todo Counter and Histo2D efficiencies and asymms void Analysis::efficiency(Histo1DPtr h1, Histo1DPtr h2, Scatter2DPtr s) const { const string path = s->path(); *s = YODA::efficiency(*h1, *h2); s->setPath(path); } void Analysis::efficiency(const Histo1D& h1, const Histo1D& h2, Scatter2DPtr s) const { const string path = s->path(); *s = YODA::efficiency(h1, h2); s->setPath(path); } void Analysis::asymm(Histo1DPtr h1, Histo1DPtr h2, Scatter2DPtr s) const { const string path = s->path(); *s = YODA::asymm(*h1, *h2); s->setPath(path); } void Analysis::asymm(const Histo1D& h1, const Histo1D& h2, Scatter2DPtr s) const { const string path = s->path(); *s = YODA::asymm(h1, h2); s->setPath(path); } void Analysis::scale(CounterPtr cnt, double factor) { if (!cnt) { MSG_WARNING("Failed to scale counter=NULL in analysis " << name() << " (scale=" << factor << ")"); return; } if (std::isnan(factor) || std::isinf(factor)) { MSG_WARNING("Failed to scale counter=" << cnt->path() << " in analysis: " << name() << " (invalid scale factor = " << factor << ")"); factor = 0; } MSG_TRACE("Scaling counter " << cnt->path() << " by factor " << factor); try { cnt->scaleW(factor); } catch (YODA::Exception& we) { MSG_WARNING("Could not scale counter " << cnt->path()); return; } } void Analysis::normalize(Histo1DPtr histo, double norm, bool includeoverflows) { if (!histo) { MSG_WARNING("Failed to normalize histo=NULL in analysis " << name() << " (norm=" << norm << ")"); return; } MSG_TRACE("Normalizing histo " << histo->path() << " to " << norm); try { histo->normalize(norm, includeoverflows); } catch (YODA::Exception& we) { MSG_WARNING("Could not normalize histo " << histo->path()); return; } } void Analysis::scale(Histo1DPtr histo, double factor) { if (!histo) { MSG_WARNING("Failed to scale histo=NULL in analysis " << name() << " (scale=" << factor << ")"); return; } if (std::isnan(factor) || std::isinf(factor)) { MSG_WARNING("Failed to scale histo=" << histo->path() << " in analysis: " << name() << " (invalid scale factor = " << factor << ")"); factor = 0; } MSG_TRACE("Scaling histo " << histo->path() << " by factor " << factor); try { histo->scaleW(factor); } catch (YODA::Exception& we) { MSG_WARNING("Could not scale histo " << histo->path()); return; } } void Analysis::normalize(Histo2DPtr histo, double norm, bool includeoverflows) { if (!histo) { MSG_ERROR("Failed to normalize histo=NULL in analysis " << name() << " (norm=" << norm << ")"); return; } MSG_TRACE("Normalizing histo " << histo->path() << " to " << norm); try { histo->normalize(norm, includeoverflows); } catch (YODA::Exception& we) { MSG_WARNING("Could not normalize histo " << histo->path()); return; } } void Analysis::scale(Histo2DPtr histo, double factor) { if (!histo) { MSG_ERROR("Failed to scale histo=NULL in analysis " << name() << " (scale=" << factor << ")"); return; } if (std::isnan(factor) || std::isinf(factor)) { MSG_ERROR("Failed to scale histo=" << histo->path() << " in analysis: " << name() << " (invalid scale factor = " << factor << ")"); factor = 0; } MSG_TRACE("Scaling histo " << histo->path() << " by factor " << factor); try { histo->scaleW(factor); } catch (YODA::Exception& we) { MSG_WARNING("Could not scale histo " << histo->path()); return; } } void Analysis::integrate(Histo1DPtr h, Scatter2DPtr s) const { // preserve the path info const string path = s->path(); *s = toIntegralHisto(*h); s->setPath(path); } void Analysis::integrate(const Histo1D& h, Scatter2DPtr s) const { // preserve the path info const string path = s->path(); *s = toIntegralHisto(h); s->setPath(path); } /// @todo 2D versions of integrate... defined how, exactly?!? ////////////////////////////////// void Analysis::addAnalysisObject(AnalysisObjectPtr ao) { _analysisobjects.push_back(ao); } void Analysis::removeAnalysisObject(const string& path) { for (vector::iterator it = _analysisobjects.begin(); it != _analysisobjects.end(); ++it) { if ((*it)->path() == path) { _analysisobjects.erase(it); break; } } } void Analysis::removeAnalysisObject(AnalysisObjectPtr ao) { for (vector::iterator it = _analysisobjects.begin(); it != _analysisobjects.end(); ++it) { if (*it == ao) { _analysisobjects.erase(it); break; } } } } diff --git a/src/Core/AnalysisHandler.cc b/src/Core/AnalysisHandler.cc --- a/src/Core/AnalysisHandler.cc +++ b/src/Core/AnalysisHandler.cc @@ -1,357 +1,375 @@ // -*- C++ -*- #include "Rivet/Config/RivetCommon.hh" #include "Rivet/AnalysisHandler.hh" #include "Rivet/Analysis.hh" #include "Rivet/Tools/ParticleName.hh" #include "Rivet/Tools/BeamConstraint.hh" #include "Rivet/Tools/Logging.hh" #include "Rivet/Projections/Beam.hh" #include "YODA/IO.h" namespace Rivet { AnalysisHandler::AnalysisHandler(const string& runname) : _runname(runname), _eventcounter("/_EVTCOUNT"), _xs(NAN), _xserr(NAN), _initialised(false), _ignoreBeams(false) { } AnalysisHandler::~AnalysisHandler() { } Log& AnalysisHandler::getLog() const { return Log::getLog("Rivet.Analysis.Handler"); } void AnalysisHandler::init(const GenEvent& ge) { if (_initialised) throw UserError("AnalysisHandler::init has already been called: cannot re-initialize!"); setRunBeams(Rivet::beams(ge)); MSG_DEBUG("Initialising the analysis handler"); _eventcounter.reset(); // Check that analyses are beam-compatible, and remove those that aren't const size_t num_anas_requested = analysisNames().size(); vector anamestodelete; for (const AnaHandle a : _analyses) { if (!_ignoreBeams && !a->isCompatible(beams())) { //MSG_DEBUG(a->name() << " requires beams " << a->requiredBeams() << " @ " << a->requiredEnergies() << " GeV"); anamestodelete.push_back(a->name()); } } for (const string& aname : anamestodelete) { MSG_WARNING("Analysis '" << aname << "' is incompatible with the provided beams: removing"); removeAnalysis(aname); } if (num_anas_requested > 0 && analysisNames().empty()) { cerr << "All analyses were incompatible with the first event's beams\n" << "Exiting, since this probably wasn't intentional!" << endl; exit(1); } // Warn if any analysis' status is not unblemished for (const AnaHandle a : analyses()) { if (toUpper(a->status()) == "PRELIMINARY") { MSG_WARNING("Analysis '" << a->name() << "' is preliminary: be careful, it may change and/or be renamed!"); } else if (toUpper(a->status()) == "OBSOLETE") { MSG_WARNING("Analysis '" << a->name() << "' is obsolete: please update!"); } else if (toUpper(a->status()).find("UNVALIDATED") != string::npos) { MSG_WARNING("Analysis '" << a->name() << "' is unvalidated: be careful, it may be broken!"); } } // Initialize the remaining analyses for (AnaHandle a : _analyses) { MSG_DEBUG("Initialising analysis: " << a->name()); try { // Allow projection registration in the init phase onwards a->_allowProjReg = true; a->init(); //MSG_DEBUG("Checking consistency of analysis: " << a->name()); //a->checkConsistency(); } catch (const Error& err) { cerr << "Error in " << a->name() << "::init method: " << err.what() << endl; exit(1); } MSG_DEBUG("Done initialising analysis: " << a->name()); } _initialised = true; MSG_DEBUG("Analysis handler initialised"); } void AnalysisHandler::analyze(const GenEvent& ge) { // Call init with event as template if not already initialised if (!_initialised) init(ge); assert(_initialised); // Ensure that beam details match those from the first event (if we're checking beams) if ( !_ignoreBeams ) { const PdgIdPair beams = Rivet::beamIds(ge); const double sqrts = Rivet::sqrtS(ge); if (!compatible(beams, _beams) || !fuzzyEquals(sqrts, sqrtS())) { cerr << "Event beams mismatch: " << PID::toBeamsString(beams) << " @ " << sqrts/GeV << " GeV" << " vs. first beams " << this->beams() << " @ " << this->sqrtS()/GeV << " GeV" << endl; exit(1); } } // Create the Rivet event wrapper /// @todo Filter/normalize the event here Event event(ge); // Weights /// @todo Drop this / just report first weight when we support multiweight events _eventcounter.fill(event.weight()); MSG_DEBUG("Event #" << _eventcounter.numEntries() << " weight = " << event.weight()); // Cross-section #ifdef HEPMC_HAS_CROSS_SECTION if (ge.cross_section()) { _xs = ge.cross_section()->cross_section(); _xserr = ge.cross_section()->cross_section_error(); } #endif // Run the analyses for (AnaHandle a : _analyses) { MSG_TRACE("About to run analysis " << a->name()); try { a->analyze(event); } catch (const Error& err) { cerr << "Error in " << a->name() << "::analyze method: " << err.what() << endl; exit(1); } MSG_TRACE("Finished running analysis " << a->name()); } } void AnalysisHandler::analyze(const GenEvent* ge) { if (ge == nullptr) { MSG_ERROR("AnalysisHandler received null pointer to GenEvent"); //throw Error("AnalysisHandler received null pointer to GenEvent"); } analyze(*ge); } void AnalysisHandler::finalize() { if (!_initialised) return; MSG_INFO("Finalising analyses"); for (AnaHandle a : _analyses) { a->setCrossSection(_xs); try { a->finalize(); } catch (const Error& err) { cerr << "Error in " << a->name() << "::finalize method: " << err.what() << endl; exit(1); } } // Print out number of events processed const int nevts = _eventcounter.numEntries(); MSG_INFO("Processed " << nevts << " event" << (nevts != 1 ? "s" : "")); // // Delete analyses // MSG_DEBUG("Deleting analyses"); // _analyses.clear(); // Print out MCnet boilerplate cout << endl; cout << "The MCnet usage guidelines apply to Rivet: see http://www.montecarlonet.org/GUIDELINES" << endl; cout << "Please acknowledge plots made with Rivet analyses, and cite arXiv:1003.0694 (http://arxiv.org/abs/1003.0694)" << endl; } AnalysisHandler& AnalysisHandler::addAnalysis(const string& analysisname) { // Check for a duplicate analysis /// @todo Might we want to be able to run an analysis twice, with different params? /// Requires avoiding histo tree clashes, i.e. storing the histos on the analysis objects. for (const AnaHandle& a : _analyses) { if (a->name() == analysisname) { MSG_WARNING("Analysis '" << analysisname << "' already registered: skipping duplicate"); return *this; } } - AnaHandle analysis( AnalysisLoader::getAnalysis(analysisname) ); + string ananame = analysisname; + vector anaopt = split(analysisname, ":"); + if ( anaopt.size() > 1 ) ananame = anaopt[0]; + AnaHandle analysis( AnalysisLoader::getAnalysis(ananame) ); if (analysis.get() != 0) { // < Check for null analysis. MSG_DEBUG("Adding analysis '" << analysisname << "'"); + for ( int i = 1, N = anaopt.size(); i < N; ++i ) { + vector opt = split(anaopt[i], "="); + if ( opt.size() != 2 ) { + MSG_WARNING("Error in option specification. Skipping analysis " + << analysisname); + return *this; + } + if ( !analysis->info().validOption(opt[0], opt[1]) ) { + MSG_WARNING("Cannot set option '" << opt[0] << "' to '" << opt[1] + << "'. Skipping analysis " << analysisname); + return *this; + } + analysis->_options[opt[0]] = opt[1]; + analysis->_optstring += ":" + anaopt[i]; + } analysis->_analysishandler = this; _analyses.insert(analysis); } else { MSG_WARNING("Analysis '" << analysisname << "' not found."); } // MSG_WARNING(_analyses.size()); // for (const AnaHandle& a : _analyses) MSG_WARNING(a->name()); return *this; } AnalysisHandler& AnalysisHandler::removeAnalysis(const string& analysisname) { std::shared_ptr toremove; for (const AnaHandle a : _analyses) { if (a->name() == analysisname) { toremove = a; break; } } if (toremove.get() != 0) { MSG_DEBUG("Removing analysis '" << analysisname << "'"); _analyses.erase(toremove); } return *this; } ///////////////////////////// void AnalysisHandler::addData(const std::vector& aos) { for (const AnalysisObjectPtr ao : aos) { const string path = ao->path(); if (path.size() > 1) { // path > "/" try { const string ananame = split(path, "/")[0]; AnaHandle a = analysis(ananame); a->addAnalysisObject(ao); /// @todo Need to statistically merge... } catch (const Error& e) { MSG_WARNING(e.what()); } } } } void AnalysisHandler::readData(const string& filename) { vector aos; try { /// @todo Use new YODA SFINAE to fill the smart ptr vector directly vector aos_raw; YODA::read(filename, aos_raw); for (AnalysisObject* aor : aos_raw) aos.push_back(AnalysisObjectPtr(aor)); } catch (...) { //< YODA::ReadError& throw UserError("Unexpected error in reading file: " + filename); } if (!aos.empty()) addData(aos); } vector AnalysisHandler::getData() const { vector rtn; // Event counter rtn.push_back( make_shared(_eventcounter) ); // Cross-section + err as scatter YODA::Scatter1D::Points pts; pts.insert(YODA::Point1D(_xs, _xserr)); rtn.push_back( make_shared(pts, "/_XSEC") ); // Analysis histograms for (const AnaHandle a : analyses()) { vector aos = a->analysisObjects(); // MSG_WARNING(a->name() << " " << aos.size()); for (const AnalysisObjectPtr ao : aos) { // Exclude paths from final write-out if they contain a "TMP" layer (i.e. matching "/TMP/") /// @todo This needs to be much more nuanced for re-entrant histogramming if (ao->path().find("/TMP/") != string::npos) continue; rtn.push_back(ao); } } // Sort histograms alphanumerically by path before write-out sort(rtn.begin(), rtn.end(), [](AnalysisObjectPtr a, AnalysisObjectPtr b) {return a->path() < b->path();}); return rtn; } void AnalysisHandler::writeData(const string& filename) const { const vector aos = getData(); try { YODA::write(filename, aos.begin(), aos.end()); } catch (...) { //< YODA::WriteError& throw UserError("Unexpected error in writing file: " + filename); } } std::vector AnalysisHandler::analysisNames() const { std::vector rtn; for (AnaHandle a : _analyses) { rtn.push_back(a->name()); } return rtn; } const AnaHandle AnalysisHandler::analysis(const std::string& analysisname) const { for (const AnaHandle a : analyses()) if (a->name() == analysisname) return a; throw Error("No analysis named '" + analysisname + "' registered in AnalysisHandler"); } AnalysisHandler& AnalysisHandler::addAnalyses(const std::vector& analysisnames) { for (const string& aname : analysisnames) { //MSG_DEBUG("Adding analysis '" << aname << "'"); addAnalysis(aname); } return *this; } AnalysisHandler& AnalysisHandler::removeAnalyses(const std::vector& analysisnames) { for (const string& aname : analysisnames) { removeAnalysis(aname); } return *this; } bool AnalysisHandler::needCrossSection() const { bool rtn = false; for (const AnaHandle a : _analyses) { if (!rtn) rtn = a->needsCrossSection(); if (rtn) break; } return rtn; } AnalysisHandler& AnalysisHandler::setCrossSection(double xs) { _xs = xs; return *this; } bool AnalysisHandler::hasCrossSection() const { return (!std::isnan(crossSection())); } AnalysisHandler& AnalysisHandler::addAnalysis(Analysis* analysis) { analysis->_analysishandler = this; _analyses.insert(AnaHandle(analysis)); return *this; } PdgIdPair AnalysisHandler::beamIds() const { return Rivet::beamIds(beams()); } double AnalysisHandler::sqrtS() const { return Rivet::sqrtS(beams()); } void AnalysisHandler::setIgnoreBeams(bool ignore) { _ignoreBeams=ignore; } } diff --git a/src/Core/AnalysisInfo.cc b/src/Core/AnalysisInfo.cc --- a/src/Core/AnalysisInfo.cc +++ b/src/Core/AnalysisInfo.cc @@ -1,248 +1,272 @@ #include "Rivet/Config/RivetCommon.hh" #include "Rivet/AnalysisInfo.hh" #include "Rivet/Tools/RivetPaths.hh" #include "Rivet/Tools/Utils.hh" #include "Rivet/Tools/Logging.hh" #include "yaml-cpp/yaml.h" #include #include #include #ifdef YAML_NAMESPACE #define YAML YAML_NAMESPACE #endif namespace Rivet { namespace { Log& getLog() { return Log::getLog("Rivet.AnalysisInfo"); } } /// Static factory method unique_ptr AnalysisInfo::make(const std::string& ananame) { // Returned AI, in semi-null state unique_ptr ai( new AnalysisInfo ); ai->_beams += make_pair(PID::ANY, PID::ANY); ai->_name = ananame; /// If no ana data file found, return null AI const string datapath = findAnalysisInfoFile(ananame + ".info"); if (datapath.empty()) { MSG_DEBUG("No datafile " << ananame + ".info found"); return ai; } // Read data from YAML document MSG_DEBUG("Reading analysis data from " << datapath); YAML::Node doc; try { #if YAMLCPP_API == 3 std::ifstream file(datapath.c_str()); YAML::Parser parser(file); parser.GetNextDocument(doc); #elif YAMLCPP_API == 5 doc = YAML::LoadFile(datapath); #endif } catch (const YAML::ParserException& ex) { MSG_ERROR("Parse error when reading analysis data from " << datapath << " (" << ex.what() << ")"); return ai; } #define THROW_INFOERR(KEY) throw InfoError("Problem in info parsing while accessing key " + string(KEY) + " in file " + datapath) // Simple scalars (test for nullness before casting) #if YAMLCPP_API == 3 #define TRY_GETINFO(KEY, VAR) try { if (doc.FindValue(KEY)) { string val; doc[KEY] >> val; ai->_ ## VAR = val; } } catch (...) { THROW_INFOERR(KEY); } #elif YAMLCPP_API == 5 #define TRY_GETINFO(KEY, VAR) try { if (doc[KEY] && !doc[KEY].IsNull()) ai->_ ## VAR = doc[KEY].as(); } catch (...) { THROW_INFOERR(KEY); } #endif TRY_GETINFO("Name", name); TRY_GETINFO("Summary", summary); TRY_GETINFO("Status", status); TRY_GETINFO("RunInfo", runInfo); TRY_GETINFO("Description", description); TRY_GETINFO("Experiment", experiment); TRY_GETINFO("Collider", collider); TRY_GETINFO("Year", year); TRY_GETINFO("Luminosity_fb", luminosityfb); TRY_GETINFO("SpiresID", spiresId); TRY_GETINFO("InspireID", inspireId); TRY_GETINFO("BibKey", bibKey); TRY_GETINFO("BibTeX", bibTeX); #undef TRY_GETINFO // Sequences (test the seq *and* each entry for nullness before casting) #if YAMLCPP_API == 3 #define TRY_GETINFO_SEQ(KEY, VAR) try { \ if (const YAML::Node* VAR = doc.FindValue(KEY)) { \ for (size_t i = 0; i < VAR->size(); ++i) { \ string val; (*VAR)[i] >> val; ai->_ ## VAR += val; \ } } } catch (...) { THROW_INFOERR(KEY); } #elif YAMLCPP_API == 5 #define TRY_GETINFO_SEQ(KEY, VAR) try { \ if (doc[KEY] && !doc[KEY].IsNull()) { \ const YAML::Node& VAR = doc[KEY]; \ for (size_t i = 0; i < VAR.size(); ++i) \ if (!VAR[i].IsNull()) ai->_ ## VAR += VAR[i].as(); \ } } catch (...) { THROW_INFOERR(KEY); } #endif TRY_GETINFO_SEQ("Authors", authors); TRY_GETINFO_SEQ("References", references); TRY_GETINFO_SEQ("ToDo", todos); TRY_GETINFO_SEQ("Keywords", keywords); + TRY_GETINFO_SEQ("Options", options); #undef TRY_GETINFO_SEQ + // Build the option map + ai->buildOptionMap(); // A boolean with some name flexibility try { #if YAMLCPP_API == 3 bool val; if (const YAML::Node* n = doc.FindValue("NeedsCrossSection")) { *n >> val; ai->_needsCrossSection = val; } if (const YAML::Node* n = doc.FindValue("NeedCrossSection")) { *n >> val; ai->_needsCrossSection = val; } #elif YAMLCPP_API == 5 if (doc["NeedsCrossSection"]) ai->_needsCrossSection = doc["NeedsCrossSection"].as(); else if (doc["NeedCrossSection"]) ai->_needsCrossSection = doc["NeedCrossSection"].as(); #endif } catch (...) { THROW_INFOERR("NeedsCrossSection|NeedCrossSection"); } // Beam particle identities try { #if YAMLCPP_API == 3 if (const YAML::Node* pbeampairs = doc.FindValue("Beams")) { const YAML::Node& beampairs = *pbeampairs; vector beam_pairs; if (beampairs.size() == 2 && beampairs[0].Type() == YAML::NodeType::Scalar && beampairs[1].Type() == YAML::NodeType::Scalar) { string bstr0, bstr1; beampairs[0] >> bstr0; beampairs[1] >> bstr1; beam_pairs += PID::make_pdgid_pair(bstr0, bstr1); } else { for (YAML::Iterator bpi = beampairs.begin(); bpi != beampairs.end(); ++bpi) { const YAML::Node& bp = *bpi; if (bp.size() == 2 && bp[0].Type() == YAML::NodeType::Scalar && bp[1].Type() == YAML::NodeType::Scalar) { string bstr0, bstr1; bp[0] >> bstr0; bp[1] >> bstr1; beam_pairs += PID::make_pdgid_pair(bstr0, bstr1); } else { throw InfoError("Beam ID pairs have to be either a 2-tuple or a list of 2-tuples of particle names"); } } } ai->_beams = beam_pairs; } #elif YAMLCPP_API == 5 if (doc["Beams"]) { const YAML::Node& beams = doc["Beams"]; vector beam_pairs; if (beams.size() == 2 && beams[0].IsScalar() && beams[0].IsScalar()) { beam_pairs += PID::make_pdgid_pair(beams[0].as(), beams[1].as()); } else { for (size_t i = 0; i < beams.size(); ++i) { const YAML::Node& bp = beams[i]; if (bp.size() != 2 || !bp[0].IsScalar() || !bp[0].IsScalar()) throw InfoError("Beam ID pairs have to be either a 2-tuple or a list of 2-tuples of particle names"); beam_pairs += PID::make_pdgid_pair(bp[0].as(), bp[1].as()); } } ai->_beams = beam_pairs; } #endif } catch (...) { THROW_INFOERR("Beams"); } // Beam energies try { #if YAMLCPP_API == 3 if (const YAML::Node* penergies = doc.FindValue("Energies")) { const YAML::Node& energies = *penergies; vector > beam_energy_pairs; for (YAML::Iterator be = energies.begin(); be != energies.end(); ++be) { if (be->Type() == YAML::NodeType::Scalar) { // If beam energy is a scalar, then assume symmetric beams each with half that energy double sqrts; *be >> sqrts; beam_energy_pairs += make_pair(sqrts/2.0, sqrts/2.0); } else if (be->Type() == YAML::NodeType::Sequence) { const YAML::Node& beseq = *be; // If the sub-sequence is of length 1, then it's another scalar sqrt(s)! if (beseq.size() == 1) { double sqrts; (*be)[0] >> sqrts; beam_energy_pairs += make_pair(sqrts/2.0, sqrts/2.0); } else if (beseq.size() == 2) { vector beamenergies; double beamenergy0, beamenergy1; beseq[0] >> beamenergy0; beseq[1] >> beamenergy1; beam_energy_pairs += make_pair(beamenergy0, beamenergy1); } else { throw InfoError("Beam energies have to be a list of either numbers or pairs of numbers"); } } else { throw InfoError("Beam energies have to be a list of either numbers or pairs of numbers"); } } ai->_energies = beam_energy_pairs; } #elif YAMLCPP_API == 5 if (doc["Energies"]) { vector< pair > beam_energy_pairs; for (size_t i = 0; i < doc["Energies"].size(); ++i) { const YAML::Node& be = doc["Energies"][i]; if (be.IsScalar()) { // If beam energy is a scalar, then assume symmetric beams each with half that energy beam_energy_pairs += make_pair(be.as()/2.0, be.as()/2.0); } else if (be.IsSequence()) { if (be.size() != 2) throw InfoError("Beam energies have to be a list of either numbers or pairs of numbers"); beam_energy_pairs += make_pair(be[0].as(), be[1].as()); } else { throw InfoError("Beam energies have to be a list of either numbers or pairs of numbers"); } } ai->_energies = beam_energy_pairs; } #endif } catch (...) { THROW_INFOERR("Energies"); } #undef THROW_INFOERR MSG_TRACE("AnalysisInfo pointer = " << ai.get()); return ai; } string toString(const AnalysisInfo& ai) { stringstream ss; ss << ai.name(); ss << " - " << ai.summary(); // ss << " - " << ai.beams(); // ss << " - " << ai.energies(); ss << " (" << ai.status() << ")"; return ss.str(); } +void AnalysisInfo::buildOptionMap() { + _optionmap.clear(); + for ( auto opttag : _options ) { + std::vector optv = split(opttag, "="); + std::string optname = optv[0]; + for ( auto opt : split(optv[1], ",") ) + _optionmap[optname].insert(opt); + } +} + +bool AnalysisInfo::validOption(std::string key, std::string val) const { + auto opt = _optionmap.find(key); + if ( opt == _optionmap.end() ) return false; + if ( opt->second.find(val) != opt->second.end() ) return true; + if ( opt->second.size() == 1 && *opt->second.begin() == "#" ) { + std::istringstream ss(val); + double test; + if ( ss >> test ) return true; + } + return false; +} }