diff --git a/analyses/pluginALICE/ALICE_2016_I1507157.cc b/analyses/pluginALICE/ALICE_2016_I1507157.cc --- a/analyses/pluginALICE/ALICE_2016_I1507157.cc +++ b/analyses/pluginALICE/ALICE_2016_I1507157.cc @@ -1,187 +1,187 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" -#include "Rivet/Tools/AliceCommon.hh" +#include "Rivet/Projections/AliceCommon.hh" #include "Rivet/Projections/PrimaryParticles.hh" #include "Rivet/Projections/ChargedFinalState.hh" #include "Rivet/Projections/EventMixingFinalState.hh" namespace Rivet { /// @brief Correlations of identified particles in pp. /// Also showcasing use of EventMixingFinalState. class ALICE_2016_I1507157 : public Analysis { public: /// Constructor DEFAULT_RIVET_ANALYSIS_CTOR(ALICE_2016_I1507157); /// @name Analysis methods //@{ /// @brief Calculate angular distance between particles. double phaseDif(double a1, double a2){ double dif = a1 - a2; while (dif < -M_PI/2) dif += 2*M_PI; while (dif > 3*M_PI/2) dif -= 2*M_PI; return dif; } /// Book histograms and initialise projections before the run void init() { double etamax = 0.8; double pTmin = 0.5; // GeV // Trigger declare(ALICE::V0AndTrigger(), "V0-AND"); // Charged tracks used to manage the mixing observable. ChargedFinalState cfsMult(Cuts::abseta < etamax); addProjection(cfsMult, "CFSMult"); // Primary particles. PrimaryParticles pp({Rivet::PID::PIPLUS, Rivet::PID::KPLUS, Rivet::PID::K0S, Rivet::PID::K0L, Rivet::PID::PROTON, Rivet::PID::NEUTRON, Rivet::PID::LAMBDA, Rivet::PID::SIGMAMINUS, Rivet::PID::SIGMAPLUS, Rivet::PID::XIMINUS, Rivet::PID::XI0, Rivet::PID::OMEGAMINUS},Cuts::abseta < etamax && Cuts::pT > pTmin*GeV); addProjection(pp,"APRIM"); // The event mixing projection declare(EventMixingFinalState(&cfsMult, pp, 5, 0, 100, 10),"EVM"); // The particle pairs. pid = {{211, -211}, {321, -321}, {2212, -2212}, {3122, -3122}, {211, 211}, {321, 321}, {2212, 2212}, {3122, 3122}, {2212, 3122}, {2212, -3122}}; // The associated histograms in the data file. vector refdata = {"d04-x01-y01","d04-x01-y02","d04-x01-y03", "d06-x01-y02","d05-x01-y01","d05-x01-y02","d05-x01-y03","d06-x01-y01", "d01-x01-y02","d02-x01-y02"}; for (int i = 0, N = refdata.size(); i < N; ++i) { // The ratio plots. ratio.push_back(bookScatter2D(refdata[i], true)); // Signal and mixed background. signal.push_back(bookHisto1D("/TMP/" + refdata[i] + "-s", *ratio[i], refdata[i] + "-s")); background.push_back(bookHisto1D("/TMP/" + refdata[i] + "-b", *ratio[i], refdata[i] + "-b")); // Number of signal and mixed pairs. nsp.push_back(0.); nmp.push_back(0.); } } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = event.weight(); // Triggering if (!apply(event, "V0-AND")()) return; // The projections const PrimaryParticles& pp = applyProjection(event,"APRIM"); const EventMixingFinalState& evm = applyProjection(event, "EVM"); // Test if we have enough mixing events available to continue. if (!evm.hasMixingEvents()) return; for(const Particle& p1 : pp.particles()) { // Start by doing the signal distributions for(const Particle& p2 : pp.particles()) { if(isSame(p1,p2)) continue; double dEta = abs(p1.eta() - p2.eta()); double dPhi = phaseDif(p1.phi(), p2.phi()); if(dEta < 1.3) { for (int i = 0, N = pid.size(); i < N; ++i) { int pid1 = pid[i].first; int pid2 = pid[i].second; bool samesign = (pid1 * pid2 > 0); if (samesign && ((pid1 == p1.pid() && pid2 == p2.pid()) || (pid1 == -p1.pid() && pid2 == -p2.pid()))) { signal[i]->fill(dPhi, weight); nsp[i] += 1.0; } if (!samesign && abs(pid1) == abs(pid2) && pid1 == p1.pid() && pid2 == p2.pid()) { signal[i]->fill(dPhi, weight); nsp[i] += 1.0; } if (!samesign && abs(pid1) != abs(pid2) && ( (pid1 == p1.pid() && pid2 == p2.pid()) || (pid2 == p1.pid() && pid1 == p2.pid()) ) ) { signal[i]->fill(dPhi, weight); nsp[i] += 1.0; } } } } // Then do the background distribution for(const Particle& pMix : evm.particles()){ double dEta = abs(p1.eta() - pMix.eta()); double dPhi = phaseDif(p1.phi(), pMix.phi()); if(dEta < 1.3) { for (int i = 0, N = pid.size(); i < N; ++i) { int pid1 = pid[i].first; int pid2 = pid[i].second; bool samesign = (pid1 * pid2 > 0); if (samesign && ((pid1 == p1.pid() && pid2 == pMix.pid()) || (pid1 == -p1.pid() && pid2 == -pMix.pid()))) { background[i]->fill(dPhi, weight); nmp[i] += 1.0; } if (!samesign && abs(pid1) == abs(pid2) && pid1 == p1.pid() && pid2 == pMix.pid()) { background[i]->fill(dPhi, weight); nmp[i] += 1.0; } if (!samesign && abs(pid1) != abs(pid2) && ( (pid1 == p1.pid() && pid2 == pMix.pid()) || (pid2 == p1.pid() && pid1 == pMix.pid()) ) ) { background[i]->fill(dPhi, weight); nmp[i] += 1.0; } } } } } } /// Normalise histograms etc., after the run void finalize() { for (int i = 0, N = pid.size(); i < N; ++i) { double sc = nmp[i] / nsp[i]; signal[i]->scaleW(sc); divide(signal[i],background[i],ratio[i]); } } //@} /// @name Histograms //@{ vector > pid; vector signal; vector background; vector ratio; vector nsp; vector nmp; //@} }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ALICE_2016_I1507157); } diff --git a/analyses/pluginMC/MC_Cent_pPb_Eta.info b/analyses/pluginMC/MC_Cent_pPb_Eta.info --- a/analyses/pluginMC/MC_Cent_pPb_Eta.info +++ b/analyses/pluginMC/MC_Cent_pPb_Eta.info @@ -1,42 +1,47 @@ Name: MC_Cent_pPb_Eta Summary: Template analysis for ontaining eta distributions binned in centrality Status: UNVALIDATED REENTRANT Authors: - Leif Lönnblad Options: - cent=REF,GEN,IMP NumEvents: 50000 References: - arXiv:1508.00848 [hep-ex], Eur.Phys.J. C76 (2016) no.4, 199 RunInfo: Any! Description: Template analysis for obtaining eta distributions binned in centrality using the CentralityProjection and Percentile<> classes. The example is pPb collisions at 5 TeV and is based on the ATLAS analysis arXiv:1508.00848 [hep-ex]. The reference YODA file contains the corresponding plots from HepData. The generator should be run in minimum-bias mode with a cut on the transverse momentum of charged particles of 0.1 GeV, and setting particles with tcau>10 fm stable. Note that a calibration histogram for the generated centrality may be preloaded with the output of a corresponding MC_Cent_pPb_Calib analysis. BibKey: Aad:2015zza BibTeX: '@article{Aad:2015zza, author = "Aad, Georges and others", title = "{Measurement of the centrality dependence of the charged-particle pseudorapidity distribution in proton–lead collisions at $\sqrt{s_{_\text {NN}}} = 5.02$ TeV with the ATLAS detector}", collaboration = "ATLAS", journal = "Eur. Phys. J.", volume = "C76", year = "2016", number = "4", pages = "199", doi = "10.1140/epjc/s10052-016-4002-3", eprint = "1508.00848", archivePrefix = "arXiv", primaryClass = "hep-ex", reportNumber = "CERN-PH-EP-2015-160", SLACcitation = "%%CITATION = ARXIV:1508.00848;%%" }' + +Validation: + - $A-GEN LHC-pPb-5-minbias MC_Cent_pPb_Calib.yoda; + rivet LHC-pPb-5-minbias -p MC_Cent_pPb_Calib.yoda -a $A:cent=GEN -o $A.yoda + diff --git a/bin/rivet-mkvaldir b/bin/rivet-mkvaldir --- a/bin/rivet-mkvaldir +++ b/bin/rivet-mkvaldir @@ -1,68 +1,78 @@ #! /usr/bin/env python """\ Build a directory with a Makefile for running a validation suit. Examples: %(prog)s ENVIRONMENT: * RIVET_ANALYSIS_PATH: list of paths to be searched for analysis plugin libraries * RIVET_DATA_PATH: list of paths to be searched for data files """ import os, sys ## Load the rivet module try: import rivet except: ## If rivet loading failed, try to bootstrap the Python path! try: # TODO: Is this a good idea? Maybe just notify the user that their PYTHONPATH is wrong? import commands modname = sys.modules[__name__].__file__ binpath = os.path.dirname(modname) rivetconfigpath = os.path.join(binpath, "rivet-config") rivetpypath = commands.getoutput(rivetconfigpath + " --pythonpath") sys.path.append(rivetpypath) import rivet except: sys.stderr.write("The rivet Python module could not be loaded: is your PYTHONPATH set correctly?\n") sys.exit(5) rivet.util.check_python_version() rivet.util.set_process_name("rivet-merge") import time, datetime, logging, signal ## Parse command line options import argparse parser = argparse.ArgumentParser(description=__doc__) extragroup = parser.add_argument_group("Run settings") extragroup.add_argument("YODAFILES", nargs="+", help="data files to merge") extragroup.add_argument("-o", "--output-file", dest="OUTPUTFILE", default="Rivet.yoda", help="specify the output histo file path (default = %(default)s)") extragroup.add_argument("-e", "--equiv", dest="EQUIV", action="store_true", default=False, help="assume that the yoda files are equivalent but statistically independent (default= assume that different files contains different processes)") extragroup.add_argument("-O", "--merge-option", dest="MERGEOPTIONS", action="append", default=[], help="specify an analysis option name where different options should be merged into the default analysis.") args = parser.parse_args() ############################ ## Actual analysis runs ## Get all analysis names -all_analyses = rivet.AnalysisLoader.analysisNames() -for aname in all_analyses: - ana = rivet.AnalysisLoader.getAnalysis(aname) - if ana.validation(): - print(ana.validation()) +#all_analyses = rivet.AnalysisLoader.analysisNames() +#for aname in all_analyses: +ananame = "MC_Cent_pPb_Eta" +ana = rivet.AnalysisLoader.getAnalysis(ananame) +for line in ana.validation(): + line = line.replace("$A", ananame) + sublines = line.split(";"); + targets = sublines[0].split(" ") + targetname = targets[0] + hepmcname = targets[1] + if len(sublines) == 1: + print(targetname + ".yoda: " + hepmcname + ".yoda " + " ".join(targets[2:])) + print("\tyoda2yoda -m " + targetname + " " + hepmcname + ".yoda -o " + targetname + ".yoda") + else: + print(targetname + ".yoda: " + hepmcname + " " + " ".join(targets[2:])) + for subline in sublines[1:]: + print("\t" + subline.strip()) - - diff --git a/include/Rivet/AnalysisInfo.hh b/include/Rivet/AnalysisInfo.hh --- a/include/Rivet/AnalysisInfo.hh +++ b/include/Rivet/AnalysisInfo.hh @@ -1,355 +1,355 @@ // -*- C++ -*- #ifndef RIVET_AnalysisInfo_HH #define RIVET_AnalysisInfo_HH #include "Rivet/Config/RivetCommon.hh" #include namespace Rivet { class AnalysisInfo { public: /// Static factory method: returns null pointer if no metadata found static unique_ptr make(const std::string& name); /// @name Standard constructors and destructors. //@{ /// The default constructor. AnalysisInfo() { clear(); } /// The destructor. ~AnalysisInfo() { } //@} public: /// @name Metadata /// Metadata is used for querying from the command line and also for /// building web pages and the analysis pages in the Rivet manual. //@{ /// Get the name of the analysis. By default this is computed using the /// experiment, year and Inspire/Spires ID metadata methods. std::string name() const { if (!_name.empty()) return _name; if (!experiment().empty() && !year().empty()) { if (!inspireId().empty()) { return experiment() + "_" + year() + "_I" + inspireId(); } else if (!spiresId().empty()) { return experiment() + "_" + year() + "_S" + spiresId(); } } return ""; } /// Set the name of the analysis. void setName(const std::string& name) { _name = name; } /// Get the reference data name of the analysis (if different from plugin name). std::string getRefDataName() const { if (!_refDataName.empty()) return _refDataName; return name(); } /// Set the reference data name of the analysis (if different from plugin name). void setRefDataName(const std::string& name) { _refDataName = name; } /// Get the Inspire (SPIRES replacement) ID code for this analysis. const std::string& inspireId() const { return _inspireId; } /// Set the Inspire (SPIRES replacement) ID code for this analysis. void setInspireId(const std::string& inspireId) { _inspireId = inspireId; } /// Get the SPIRES ID code for this analysis. const std::string& spiresId() const { return _spiresId; } /// Set the SPIRES ID code for this analysis. void setSpiresId(const std::string& spiresId) { _spiresId = spiresId; } /// @brief Names & emails of paper/analysis authors. /// Names and email of authors in 'NAME \' format. The first /// name in the list should be the primary contact person. const std::vector& authors() const { return _authors; } /// Set the author list. void setAuthors(const std::vector& authors) { _authors = authors; } /// @brief Get a short description of the analysis. /// Short (one sentence) description used as an index entry. /// Use @a description() to provide full descriptive paragraphs /// of analysis details. const std::string& summary() const { return _summary; } /// Set the short description for this analysis. void setSummary(const std::string& summary) { _summary = summary; } /// @brief Get a full description of the analysis. /// Full textual description of this analysis, what it is useful for, /// what experimental techniques are applied, etc. Should be treated /// as a chunk of restructuredText (http://docutils.sourceforge.net/rst.html), /// with equations to be rendered as LaTeX with amsmath operators. const std::string& description() const { return _description; } /// Set the full description for this analysis. void setDescription(const std::string& description) { _description = description; } /// @brief Information about the events needed as input for this analysis. /// Event types, energies, kinematic cuts, particles to be considered /// stable, etc. etc. Should be treated as a restructuredText bullet list /// (http://docutils.sourceforge.net/rst.html) const std::string& runInfo() const { return _runInfo; } /// Set the full description for this analysis. void setRunInfo(const std::string& runInfo) { _runInfo = runInfo; } /// Beam particle types const std::vector& beams() const { return _beams; } /// Set beam particle types void setBeams(const std::vector& beams) { _beams = beams; } /// Sets of valid beam energies const std::vector >& energies() const { return _energies; } /// Set the valid beam energies void setEnergies(const std::vector >& energies) { _energies = energies; } /// Experiment which performed and published this analysis. const std::string& experiment() const { return _experiment; } /// Set the experiment which performed and published this analysis. void setExperiment(const std::string& experiment) { _experiment = experiment; } /// Collider on which the experiment ran. const std::string& collider() const { return _collider; } /// Set the collider on which the experiment ran. void setCollider(const std::string& collider) { _collider = collider; } /// @brief When the original experimental analysis was published. /// When the refereed paper on which this is based was published, /// according to SPIRES. const std::string& year() const { return _year; } /// Set the year in which the original experimental analysis was published. void setYear(const std::string& year) { _year = year; } /// The integrated data luminosity of the data set const std::string& luminosityfb() const { return _luminosityfb; } /// Set the integrated data luminosity of the data set void setLuminosityfb(const std::string& luminosityfb) { _luminosityfb = luminosityfb; } /// Journal and preprint references. const std::vector& references() const { return _references; } /// Set the journal and preprint reference list. void setReferences(const std::vector& references) { _references = references; } /// Analysis Keywords for grouping etc const std::vector& keywords() const { return _keywords; } /// BibTeX citation key for this article. const std::string& bibKey() const { return _bibKey;} /// Set the BibTeX citation key for this article. void setBibKey(const std::string& bibKey) { _bibKey = bibKey; } /// BibTeX citation entry for this article. const std::string& bibTeX() const { return _bibTeX; } /// Set the BibTeX citation entry for this article. void setBibTeX(const std::string& bibTeX) { _bibTeX = bibTeX; } /// Whether this analysis is trusted (in any way!) const std::string& status() const { return _status; } /// Set the analysis code status. void setStatus(const std::string& status) { _status = status; } /// Any work to be done on this analysis. const std::vector& todos() const { return _todos; } /// Set the to-do list. void setTodos(const std::vector& todos) { _todos = todos; } /// Get the option list. const std::vector& options() const { return _options; } /// Check if the given option is valid. bool validOption(std::string key, std::string val) const; /// Set the option list. void setOptions(const std::vector& opts) { _options = opts; buildOptionMap(); } /// Build a map of options to facilitate checking. void buildOptionMap(); /// List a series of command lines to be used for valdation - const vector & validation() const { + const std::vector & validation() const { return _validation; } /// Return true if this analysis needs to know the process cross-section. bool needsCrossSection() const { return _needsCrossSection; } /// Return true if this analysis needs to know the process cross-section. void setNeedsCrossSection(bool needXsec) { _needsCrossSection = needXsec; } /// Return true if finalize() can be run multiple times for this analysis. bool reentrant() const { return _reentrant; } /// setReentrant void setReentrant(bool ree = true) { _reentrant = ree; } /// Return true if validated bool validated() const { return statuscheck("VALIDATED"); } /// Return true if preliminary bool preliminary() const { return statuscheck("PRELIMINARY"); } /// Return true if obsolete bool obsolete() const { return statuscheck("OBSOLETE"); } /// Return true if unvalidated bool unvalidated() const { return statuscheck("UNVALIDATED"); } /// Return true if includes random variations bool random() const { return statuscheck("RANDOM"); } /// Return true if the analysis uses generator-dependent /// information. bool unphysical() const { return statuscheck("UNPHYSICAL"); } /// Check if refdata comes automatically from Hepdata. bool hepdata() const { return !statuscheck("NOHEPDATA"); } /// Check if This analysis can handle mulltiple weights. bool multiweight() const { return !statuscheck("SINGLEWEIGHT"); } bool statuscheck(string word) const { auto pos =_status.find(word); if ( pos == string::npos ) return false; if ( pos > 0 && isalnum(_status[pos - 1]) ) return false; if ( pos + word.length() < _status.length() && isalnum(_status[pos + word.length()]) ) return false; return true; } //@} private: std::string _name; std::string _refDataName; std::string _spiresId, _inspireId; std::vector _authors; std::string _summary; std::string _description; std::string _runInfo; std::string _experiment; std::string _collider; std::vector > _beams; std::vector > _energies; std::string _year; std::string _luminosityfb; std::vector _references; std::vector _keywords; std::string _bibKey; std::string _bibTeX; //std::string _bibTeXBody; ///< Was thinking of avoiding duplication of BibKey... std::string _status; std::vector _todos; bool _needsCrossSection; std::vector _options; std::map< std::string, std::set > _optionmap; std::vector _validation; bool _reentrant; void clear() { _name = ""; _refDataName = ""; _spiresId = ""; _inspireId = ""; _authors.clear(); _summary = ""; _description = ""; _runInfo = ""; _experiment = ""; _collider = ""; _beams.clear(); _energies.clear(); _year = ""; _luminosityfb = ""; _references.clear(); _keywords.clear(); _bibKey = ""; _bibTeX = ""; //_bibTeXBody = ""; _status = ""; _todos.clear(); _needsCrossSection = false; _options.clear(); _optionmap.clear(); _validation.clear(); _reentrant = false; } }; /// String representation std::string toString(const AnalysisInfo& ai); /// Stream an AnalysisInfo as a text description inline std::ostream& operator<<(std::ostream& os, const AnalysisInfo& ai) { os << toString(ai); return os; } } #endif diff --git a/pyext/rivet/core.pyx b/pyext/rivet/core.pyx --- a/pyext/rivet/core.pyx +++ b/pyext/rivet/core.pyx @@ -1,246 +1,250 @@ # distutils: language = c++ cimport rivet as c from cython.operator cimport dereference as deref # Need to be careful with memory management -- perhaps use the base object that # we used in YODA? cdef extern from "" namespace "std" nogil: cdef c.unique_ptr[c.Analysis] move(c.unique_ptr[c.Analysis]) cdef class AnalysisHandler: cdef c.AnalysisHandler *_ptr def __cinit__(self): self._ptr = new c.AnalysisHandler() def __del__(self): del self._ptr def setIgnoreBeams(self, ignore=True): self._ptr.setIgnoreBeams(ignore) def addAnalysis(self, name): self._ptr.addAnalysis(name.encode('utf-8')) return self def analysisNames(self): anames = self._ptr.analysisNames() return [ a.decode('utf-8') for a in anames ] # def analysis(self, aname): # cdef c.Analysis* ptr = self._ptr.analysis(aname) # cdef Analysis pyobj = Analysis.__new__(Analysis) # if not ptr: # return None # pyobj._ptr = ptr # return pyobj def readData(self, name): self._ptr.readData(name.encode('utf-8')) def writeData(self, name): self._ptr.writeData(name.encode('utf-8')) def crossSection(self): return self._ptr.crossSection() def finalize(self): self._ptr.finalize() def dump(self, file, period): self._ptr.dump(file, period) def mergeYodas(self, filelist, delopts, equiv): self._ptr.mergeYodas(filelist, delopts, equiv) cdef class Run: cdef c.Run *_ptr def __cinit__(self, AnalysisHandler h): self._ptr = new c.Run(h._ptr[0]) def __del__(self): del self._ptr def setCrossSection(self, double x): self._ptr.setCrossSection(x) return self def setListAnalyses(self, choice): self._ptr.setListAnalyses(choice) return self def init(self, name, weight=1.0): return self._ptr.init(name.encode('utf-8'), weight) def openFile(self, name, weight=1.0): return self._ptr.openFile(name.encode('utf-8'), weight) def readEvent(self): return self._ptr.readEvent() # def skipEvent(self): # return self._ptr.skipEvent() def processEvent(self): return self._ptr.processEvent() def finalize(self): return self._ptr.finalize() cdef class Analysis: cdef c.unique_ptr[c.Analysis] _ptr def __init__(self): raise RuntimeError('This class cannot be instantiated') def requiredBeams(self): return deref(self._ptr).requiredBeams() def requiredEnergies(self): return deref(self._ptr).requiredEnergies() def keywords(self): kws = deref(self._ptr).keywords() return [ k.decode('utf-8') for k in kws ] + def validation(self): + vld = deref(self._ptr).validation() + return [ k.decode('utf-8') for k in vld ] + def authors(self): auths = deref(self._ptr).authors() return [ a.decode('utf-8') for a in auths ] def bibKey(self): return deref(self._ptr).bibKey().decode('utf-8') def name(self): return deref(self._ptr).name().decode('utf-8') def bibTeX(self): return deref(self._ptr).bibTeX().decode('utf-8') def references(self): refs = deref(self._ptr).references() return [ r.decode('utf-8') for r in refs ] def collider(self): return deref(self._ptr).collider().decode('utf-8') def description(self): return deref(self._ptr).description().decode('utf-8') def experiment(self): return deref(self._ptr).experiment().decode('utf-8') def inspireId(self): return deref(self._ptr).inspireId().decode('utf-8') def spiresId(self): return deref(self._ptr).spiresId().decode('utf-8') def runInfo(self): return deref(self._ptr).runInfo().decode('utf-8') def status(self): return deref(self._ptr).status().decode('utf-8') def summary(self): return deref(self._ptr).summary().decode('utf-8') def year(self): return deref(self._ptr).year().decode('utf-8') def luminosityfb(self): return deref(self._ptr).luminosityfb().decode('utf-8') #cdef object LEVELS = dict(TRACE = 0, DEBUG = 10, INFO = 20, WARN = 30, WARNING = 30, ERROR = 40, CRITICAL = 50, ALWAYS = 50) cdef class AnalysisLoader: @staticmethod def analysisNames(): names = c.AnalysisLoader_analysisNames() return [ n.decode('utf-8') for n in names ] @staticmethod def getAnalysis(name): name = name.encode('utf-8') cdef c.unique_ptr[c.Analysis] ptr = c.AnalysisLoader_getAnalysis(name) cdef Analysis pyobj = Analysis.__new__(Analysis) if not ptr: return None pyobj._ptr = move(ptr) # Create python object return pyobj ## Convenience versions in main rivet namespace def analysisNames(): return AnalysisLoader.analysisNames() def getAnalysis(name): return AnalysisLoader.getAnalysis(name) ## Path functions def getAnalysisLibPaths(): ps = c.getAnalysisLibPaths() return [ p.decode('utf-8') for p in ps ] def setAnalysisLibPaths(xs): bs = [ x.encode('utf-8') for x in xs ] c.setAnalysisLibPaths(bs) def addAnalysisLibPath(path): c.addAnalysisLibPath(path.encode('utf-8')) def setAnalysisDataPaths(xs): bs = [ x.encode('utf-8') for x in xs ] c.setAnalysisDataPaths(bs) def addAnalysisDataPath(path): c.addAnalysisDataPath(path.encode('utf-8')) def getAnalysisDataPaths(): ps = c.getAnalysisDataPaths() return [ p.decode('utf-8') for p in ps ] def findAnalysisDataFile(q): f = c.findAnalysisDataFile(q.encode('utf-8')) return f.decode('utf-8') def getAnalysisRefPaths(): ps = c.getAnalysisRefPaths() return [ p.decode('utf-8') for p in ps ] def findAnalysisRefFile(q): f = c.findAnalysisRefFile(q.encode('utf-8')) return f.decode('utf-8') def getAnalysisInfoPaths(): ps = c.getAnalysisInfoPaths() return [ p.decode('utf-8') for p in ps ] def findAnalysisInfoFile(q): f = c.findAnalysisInfoFile(q.encode('utf-8')) return f.decode('utf-8') def getAnalysisPlotPaths(): ps = c.getAnalysisPlotPaths() return [ p.decode('utf-8') for p in ps ] def findAnalysisPlotFile(q): f = c.findAnalysisPlotFile(q.encode('utf-8')) return f.decode('utf-8') def version(): return c.version().decode('utf-8') def setLogLevel(name, level): c.setLogLevel(name.encode('utf-8'), level)