diff --git a/.hgignore b/.hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -1,70 +1,72 @@
 ~$
 ^run.*$
 \.orig$
 \.(o|Po|lo|Plo|la|a|so|dylib|pyc|tar\.bz2|tar\.gz|fifo|hepmc)$
 ^config\.guess$
 ^config\.status$
 ^config\.sub$
 ^config\.log$
 ^configure$
 ^depcomp$
 ^compile$
 ^ar-lib$
 ^install-sh$
 ^INSTALL$
 ^libtool$
 ^test-driver$
 ^ltmain\.sh$
 ^m4/ltoptions\.m4$
 ^m4/ltsugar\.m4$
 ^m4/ltversion\.m4$
 ^m4/lt~obsolete\.m4$
 ^missing$
 ^autom4te\.cache$
 ^include/Rivet/Config/stamp-h.$
 ^tmp$
 ^rivet.pc$
 Makefile$
 Makefile\.in$
 \.(pdf|toc|bbl|blg|sty|bib|html|tex)$
 Doxyfile$
 \.libs$
 \.deps$
 #Rivet.yoda
 #.*/Rivet\.yoda
 ^bin/rivet-buildplugin$
 ^bin/rivet-config$
 ^bin/rivet-nopy$
 ^aclocal\.m4$
 ^pyext/build$
 ^pyext/rivet/core.cpp$
 ^pyext/rivet/rivetwrap\.py$
 ^pyext/rivet/rivetwrap_wrap\.cc$
 ^pyext/rivet/fix-out-of-source$
 ^pyext/setup\.py$
 ^rivetenv\.(sh|csh)$
 ^test/test(Api|Cmp|NaN)$
 ^include/Rivet/Config/DummyConfig\.hh\.in$
 ^include/Rivet/Config/BuildOptions\.hh$
 ^include/Rivet/Config/DummyConfig\.hh$
 ^include/Rivet/Config/RivetConfig\.hh$
 ^doc/.*\.(log|aux|out)$
 ^doc/diffanas$
 ^test/log$
 ^test/.*\.log$
 ^test/out\.yoda$
 ^test/NaN.aida$
 ^test/Rivet.yoda$
 ^test/testBoost$
 ^test/.*\.trs$
 ^test/testMatVec$
 ^test/testMath$
+^test/testBeams$
 ^dev$
 ^devval$
 ^newanalyses$
 ^.*plots$
 ^a\.out$
 ^a\.out\.dSYM$
 ^Rivet-.\..\..$
 ^local/.*$
 ^(for|analyses)\d\d\d$
+^src/Analyses/.*__all\.cc$
diff --git a/ChangeLog b/ChangeLog
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5717 +1,5813 @@
+2016-12-01  Andy Buckley  <andy.buckley@cern.ch>
+
+	* Adding ALICE_2012_I1116147 (eta and pi0 pTs and ratio) and
+	ATLAS_2011_I929691 (7 TeV jet frag)
+
+2016-11-30  Andy Buckley  <andy.buckley@cern.ch>
+
+	* Fix bash bugs in rivet-buildplugin, including fixing the --cmd mode.
+
+2016-11-28  Andy Buckley  <andy.buckley@cern.ch>
+
+	* Add LHC Run 2 BSM analyses ATLAS_2016_CONF_2016_037 (3-lepton
+	and same-sign 2-lepton), ATLAS_2016_CONF_2016_054 (1-lepton +
+	jets), ATLAS_2016_CONF_2016_078 (ICHEP jets + MET),
+	ATLAS_2016_CONF_2016_094 (1-lepton + many jets), CMS_2013_I1223519
+	(alphaT + b-jets), and CMS_2016_PAS_SUS_16_14 (jets + MET).
+
+	* Provide convenience reversed-argument versions of apply and
+	declare methods, to allow presentational choice of declare syntax
+	in situations where the projection argument is very long, and
+	reduce requirements on the user's memory since this is one
+	situation in Rivet where there is no 'most natural' ordering
+	choice.
+
+2016-11-24  Andy Buckley  <andy.buckley@cern.ch>
+
+	* Adding pTvec() function to 4-vectors and ParticleBase.
+
+	* Fix --pwd option of the rivet script
+
+2016-11-21  Andy Buckley  <andy.buckley@cern.ch>
+
+	* Add weights and scaling to Cutflow/s.
+
+2016-11-19  Andy Buckley  <andy.buckley@cern.ch>
+
+	* Add Et(const ParticleBase&) unbound function.
+
+2016-11-18  Andy Buckley  <andy.buckley@cern.ch>
+
+	* Fix missing YAML quote mark in rivet-mkanalysis.
+
+2016-11-15  Andy Buckley  <andy.buckley@cern.ch>
+
+	* Fix constness requirements on ifilter_select() and Particle/JetEffFilter::operator().
+
+	* src/Analyses/ATLAS_2016_I1458270.cc: Fix inverted particle efficiency filtering.
+
+2016-10-24  Andy Buckley  <andy.buckley@cern.ch>
+
+	* Add rough ATLAS and CMS photon reco efficiency functions from
+	Delphes (ATLAS and CMS versions are identical, hmmm)
+
+2016-10-12  Andy Buckley  <andy.buckley@cern.ch>
+
+	* Tidying/fixing make-plots custom z-ticks code. Thanks to Dmitry Kalinkin.
+
+2016-10-03  Holger Schulz  <holger.schulz@cern.ch>
+
+	* Fix SpiresID -> InspireID in some analyses (show-analysis pointed to
+	non-existing web page)
+
+2016-09-29  Holger Schulz  <holger.schulz@cern.ch>
+
+	* Add Luminosity_fb to AnalysisInfo
+
+	* Added some keywords and Lumi to ATLAS_2016_I1458270
+
+2016-09-28  Andy Buckley  <andy.buckley@cern.ch>
+
+	* Merge the ATLAS and CMS from-Delphes electron and muon tracking
+	efficiency functions into generic trkeff functions -- this is how
+	it should be.
+
+	* Fix return type typo in Jet::bTagged(FN) templated method.
+
+	* Add eta and pT cuts to ATLAS truth b-jet definition.
+
+	* Use rounding rather than truncation in Cutflow percentage efficiency printing.
+
+2016-09-28  Frank Siegert  <frank.siegert@cern.ch>
+
+       * make-plots bugfix in y-axis labels for RatioPlotMode=deviation
+
+2016-09-27  Andy Buckley  <andy.buckley@cern.ch>
+
+       * Add vector and scalar pT (rather than Et) to MissingMomentum.
+
+2016-09-27  Holger Schulz  <holger.schulz@cern.ch>
+
+	* Analysis keyword machinery
+
+	* rivet -a @semileptonic
+
+	* rivet -a @semileptonic@^bdecays -a @semileptonic@^ddecays
+
 2016-09-22  Holger Schulz  <holger.schulz@cern.ch>
 
 	* Release version 2.5.2
 
 2016-09-21  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add a requirement to DressedLeptons that the FinalState passed
 	as 'bareleptons' will be filtered to only contain charged leptons,
 	if that is not already the case. Thanks to Markus Seidel for the
 	suggestion.
 
 2016-09-21  Holger Schulz  <holger.schulz@cern.ch>
 
 	* Add Simone Amoroso's plugin for hadron spectra (ALEPH_1995_I382179)
 
 	* Add Simone Amoroso's plugin for hadron spectra (OPAL_1993_I342766)
 
 2016-09-20  Holger Schulz  <holger.schulz@cern.ch>
 
 	* Add CMS ttbar analysis from contrib, mark validated (CMS_2016_I1473674)
 
 	* Extend rivet-mkhtml --booklet to also work with pdfmerge
 
 2016-09-20  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Fix make-plots automatic YMax calculation, which had a typo from code cleaning (mea culpa!).
 
 	* Fix ChargedLeptons projection, which failed to exclude neutrinos!!! Thanks to Markus Seidel.
 
 	* Add templated FN filtering arg versions of the Jet::*Tags() and
 	Jet::*Tagged() functions.
 
 2016-09-18  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add CMS partonic top analysis (CMS_2015_I1397174)
 
 2016-09-18  Holger Schulz  <holger.schulz@cern.ch>
 
 	* Add L3 xp analysis of eta mesons, thanks Simone (L3_1992_I336180)
 
 	* Add D0 1.8 TeV jet shapes analysis, thanks Simone (D0_1995_I398175)
 
 2016-09-17  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add has{Ancestor,Parent,Child,Descendant}With functions and
 	HasParticle{Ancestor,Parent,Child,Descendant}With functors.
 
 2016-09-16  Holger Schulz  <holger.schulz@cern.ch>
 
 	* Add ATLAS 8TeV ttbar analysis from contrib (ATLAS_2015_I1404878)
 
 2016-09-16  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add particles(GenParticlePtr) to RivetHepMC.hh
 
 	* Add hasParent, hasParentWith, and hasAncestorWith to Particle.
 
 2016-09-15  Holger Schulz  <holger.schulz@cern.ch>
 
 	* Add ATLAS 8TeV dijet analysis from contrib (ATLAS_2015_I1393758)
 
 	* Add ATLAS 8TeV 'number of tracks in jets' analysis from contrib (ATLAS_2016_I1419070)
 
 	* Add ATLAS 8TeV g->H->WW->enumunu analysis from contrib (ATLAS_2016_I1444991)
 
 2016-09-14  Holger Schulz  <holger.schulz@cern.ch>
 
 	* Explicit std::toupper and std::tolower to make clang happy
 
 2016-09-14  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add ATLAS Run 2 0-lepton SUSY and monojet search papers (ATLAS_2016_I1452559, ATLAS_2016_I1458270)
 
 2016-09-13  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add experimental Cutflow and Cutflows objects for BSM cut tracking.
 
 	* Add 'direct' versions of any, all, none to Utils.hh, with an
 	implicity bool() transforming function.
 
 2016-09-13  Holger Schulz  <holger.schulz@cern.ch>
 
 	* Add and mark validated B+ to omega analysis (BABAR_2013_I1116411)
 
 	* Add and mark validated D0 to pi- analysis (BABAR_2015_I1334693)
 
 	* Add a few more particle names and use PID names in recently added
 	  analyses
 
 	* Add Simone's OPAL b-frag analysis (OPAL_2003_I599181) after some
 	cleanup and heavy usage of new features
 
 	* Restructured DELPHI_2011_I890503 in the same manner --- picks up
 	a few more B-hadrons now (e.g. 20523 and such)
 
 	* Clean up and add ATLAS 8TeV MinBias (from contrib ATLAS_2016_I1426695)
 
 2016-09-12  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add a static constexpr DBL_NAN to Utils.hh for convenience, and
 	move some utils stuff out of MathHeader.hh
 
 2016-09-12  Holger Schulz  <holger.schulz@cern.ch>
 
 	* Add count function to Tools/Utils.h
 
 	* Add and mark validated B0bar and Bminus-decay to pi analysis (BELLE_2013_I1238273)
 
 	* Add and mark validated B0-decay analysis (BELLE_2011_I878990)
 
 	* Add and mark validated B to D decay analysis (BELLE_2011_I878990)
 
 2016-09-08  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add C-array version of multi-target Analysis::scale() and normalize(), and fix (semantic) constness.
 
 	* Add == and != operators for cuts applied to integers.
 
 	* Add missing delta{Phi,Eta,Rap}{Gtr,Less} functors to ParticleBaseUtils.hh
 
 2016-09-07  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add templated functor filtering args to the Particle parent/child/descendent methods.
 
 2016-09-06  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add ATLAS Run 1 medium and tight electron ID efficiency functions.
 
 	* Update configure scripts to use newer (Py3-safe) Python testing macros.
 
 2016-09-02  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add isFirstWith(out), isLastWith(out) functions, and functor
 	wrappers, using Cut and templated function/functor args.
 
 	* Add Particle::parent() method.
 
 	* Add using import/typedef of HepMC *Ptr types (useful step for HepMC 2.07 and 3.00).
 
 	* Various typo fixes (and canonical renaming) in ParticleBaseUtils functor collection.
 
 	* Add ATLAS MV2c10 and MV2c20 b-tagging effs to SmearingFunctions.hh collection.
 
 2016-09-01  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add a PartonicTops projection.
 
 	* Add overloaded versions of the Event::allParticles() method with
 	selection Cut or templated selection function arguments.
 
 2016-08-25  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add rapidity scheme arg to DeltaR functor constructors.
 
 2016-08-23  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Provide an Analysis::bookCounter(d,x,y, title) function, for
 	convenience and making the mkanalysis template valid.
 
 	* Improve container utils functions, and provide combined
 	remove_if+erase filter_* functions for both select- and
 	discard-type selector functions.
 
 2016-08-22  Holger Schulz  <holger.schulz@cern.ch>
 
 	* Bugfix in rivet-mkhtml (NoneType: ana.spiresID() --> spiresid)
 
 	* Added <numeric> include to Rivet/Tools/Utils.h to make gcc6 happy
 
 2016-08-22  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add efffilt() functions and Particle/JetEffFilt functors to SmearingFunctions.hh
 
 2016-08-20  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding filterBy methods for Particle and Jet which accept
 	generic boolean functions as well as the Cut specialisation.
 
 2016-08-18  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add a Jet::particles(Cut&) method, for inline filtering of jet constituents.
 
 	* Add 'conjugate' behaviours to container head and tail functions
 	via negative length arg values.
 
 2016-08-15  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add convenience headers for including all final-state and
 	smearing projections, to save user typing.
 
 2016-08-12  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add standard MET functions for ATLAS R1 (and currently copies
 	for R2 and CMS).
 
 	* Add lots of vector/container helpers for e.g. container slicing,
 	summing, and min/max calculation.
 
 	* Adapt SmearedMET to take *two* arguments, since SET is typically
 	used to calculate MET resolution.
 
 	* Adding functors for computing vector & ParticleBase differences
 	w.r.t. another vector.
 
 2016-08-12  Holger Schulz  <holger.schulz@cern.ch>
 
         * Implemented a few more cuts in prompt photon analysis
 	(CDF_1993_S2742446) but to no avail, the rise of the data towards
 	larger costheta values cannot be reproduced --- maybe this is a
 	candidate for more scrutiny and using the boosting machinery such that
 	the c.m. cuts can be done in a non-approximate way
 
 2016-08-11  Holger Schulz  <holger.schulz@cern.ch>
 
 	* Rename CDF_2009_S8383952 to CDF_2009_I856131 due to invalid Spires
 	entry.
 
 	* Add InspireID to all analysis known by their Spires key
 
 2016-08-09  Holger Schulz  <holger.schulz@cern.ch>
 
 	* Release 2.5.1
 
 2016-08-08  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add a simple MC_MET analysis for out-of-the-box MET distribution testing.
 
 2016-08-08  Holger Schulz  <holger.schulz@cern.ch>
 
 	* Add DELPHI_2011_I890503 b-quark fragmentation function measurement,
 	which superseded DELPHI_2002_069_CONF_603. The latter is marked
 	OBSOLETE.
 
 2016-08-05  Holger Schulz  <holger.schulz@cern.ch>
 
 	* Use Jet mass and energy smearing in CDF_1997_... six-jet analysis,
 	mark validated.
 
 	* Mark CDF_2001_S4563131 validated
 
 	* D0_1996_S3214044 --- cut on jet Et rather than pT, fix filling of
 	costheta and theta plots, mark validated. Concerning the jet
 	algorithm, I tried with the implementation of fastjet
 	fastjet/D0RunIConePlugin.hh but that really does not help.
 
 	* D0_1996_S3324664 --- fix normalisations, sorting jets properly now,
 	cleanup and mark validated.
 
 
 2016-08-04  Holger Schulz  <holger.schulz@cern.ch>
 
 	* Use Jet mass and energy smearing in CDF_1996_S310 ... jet properties
 	analysis. Cleanup analysis and mark validated. Added some more run info.
 	The same for CDF_1996_S334... (pretty much the same cuts, different
 	observables).
 
 	* Minor fixes in SmearedJets projection
 
 2016-08-03  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Protect SmearedJets against loss of tagging information if a
 	momentum smearing function is used (rather than a dedicated Jet
 	smearing fn) via implicit casts.
 
 2016-08-02  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add SmearedMET projection, wrapping MissingMomentum.
 
 	* Include base truth-level projections in SmearedParticles/Jets compare() methods.
 
 2016-07-29  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Rename TOTEM_2012_002 to proper TOTEM_2012_I1220862 name.
 
 	* Remove conditional building of obsolete, preliminary and
 	unvalidated analyses. Now always built, since there are sufficient
 	warnings.
 
 2016-07-28  Holger Schulz  <holger.schulz@cern.ch>
 
         * Mark D0_2000... W pT analysis validated
 
 	* Mark LHCB_2011_S919... phi meson analysis validated
 
 2016-07-25  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add unbound accessors for momentum properties of ParticleBase objects.
 
 	* Add Rivet/Tools/ParticleBaseUtils.hh to collect tools like functors for particle & jet filtering.
 
 	* Add vector<ptr> versions of Analysis::scale() and ::normalize(), for batched scaling.
 
 	* Add Analysis::scale() and Analysis::divide() methods for Counter types.
 
 	* Utils.hh: add a generic sum() function for containers, and use auto in loop to support arrays.
 
 	* Set data path as well as lib path in scripts with --pwd option, and use abs path to $PWD.
 
 	* Add setAnalysisDataPaths and addAnalysisDataPath to RivetPaths.hh/cc and Python.
 
 	* Pass absolutized RIVET_DATA_PATH from rivet-mkhtml to rivet-cmphistos.
 
 2016-07-24  Holger Schulz  <holger.schulz@cern.ch>
 
         * Mark CDF_2008_S77... b jet shapes validated
 
 	* Added protection against low stats yoda exception
 	  in finalize for that analysis
 
 2016-07-22  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Fix newly introduced bug in make-plots which led to data point
 	markers being skipped for all but the last bin.
 
 2016-07-21  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add pid, abspid, charge, abscharge, charge3, and abscharge3 Cut
 	enums, handled by Particle cut targets.
 
 	* Add abscharge() and abscharge3() methods to Particle.
 
 	* Add optional Cut and duplicate-removal flags to Particle children & descendants methods.
 
 	* Add unbound versions of Particle is* and from* methods, for easier functor use.
 
 	* Add Particle::isPrompt() as a member rather than unbound function.
 
 	* Add protections against -ve mass from numerical precision errors in smearing functions.
 
 2016-07-20  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Move several internal system headers into the include/Rivet/Tools directory.
 
 	* Fix median-computing safety logic in ATLAS_2010_S8914702 and
 	tidy this and @todo markers in several similar analyses.
 
 	* Add to_str/toString and stream functions for Particle, and a bit
 	of Particle util function reorganisation.
 
 	* Add isStrange/Charm/Bottom PID and Particle functions.
 
 	* Add RangeError exception throwing from MathUtils.hh stats
 	functions if given empty/mismatched datasets.
 
 	* Add Rivet/Tools/PrettyPrint.hh, based on https://louisdx.github.io/cxx-prettyprint/
 
 	* Allow use of path regex group references in .plot file keyed values.
 
 2016-07-20  Holger Schulz  <holger.schulz@cern.ch>
 
 	* Fix the --nskip behaviour on the main rivet script.
 
 2016-07-07  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Release version 2.5.0
 
 2016-07-01  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Fix pandoc interface flag version detection.
 
 2016-06-28  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Release version 2.4.3
 
 	* Add ATLAS_2016_I1468168 early ttbar fully leptonic fiducial
 	cross-section analysis at 13 TeV.
 
 2016-06-21  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add ATLAS_2016_I1457605 inclusive photon analysis at 8 TeV.
 
 2016-06-15  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add a --show-bibtex option to the rivet script, for convenient
 	outputting of a BibTeX db for the used analyses.
 
 2016-06-14  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add and rename 4-vector boost calculation methods: new methods
 	beta, betaVec, gamma & gammaVec are now preferred to the
 	deprecated boostVector method.
 
 2016-06-13  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add and use projection handling methods declare(proj, pname) and
 	apply<PROJ>(evt, pname) rather than the longer and explicitly
 	'projectiony' addProjection & applyProjection.
 
 	* Start using the DEFAULT_RIVET_ANALYSIS_CTOR macro (newly created
 	preferred alias to long-present DEFAULT_RIVET_ANA_CONSTRUCTOR)
 
 	* Add a DEFAULT_RIVET_PROJ_CLONE macro for implementing the
 	clone() method boiler-plate code in projections.
 
 2016-06-10  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add a NonPromptFinalState projection, and tweak the
 	PromptFinalState and unbound Particle functions a little in
 	response. May need some more finessing.
 
 	* Add user-facing aliases to ProjectionApplier add, get, and apply
 	methods... the templated versions of which can now be called
 	without using the word 'projection', which makes the function
 	names a bit shorter and pithier, and reduces semantic repetition.
 
 2016-06-10  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding ATLAS_2015_I1397635 Wt at 8 TeV analysis.
 
 	* Adding ATLAS_2015_I1390114 tt+b(b) at 8 TeV analysis.
 
 2016-06-09  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Downgrade some non-fatal error messages from ERROR to WARNING
 	status, because *sigh* ATLAS's software treats any appearance of
 	the word 'ERROR' in its log file as a reason to report the job as
 	failed (facepalm).
 
 2016-06-07  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding ATLAS 13 TeV minimum bias analysis, ATLAS_2016_I1419652.
 
 2016-05-30  Andy Buckley  <andy.buckley@cern.ch>
 
 	* pyext/rivet/util.py: Add pandoc --wrap/--no-wrap CLI detection
 	and batch conversion.
 
 	* bin/rivet: add -o as a more standard 'output' option flag alias to -H.
 
 2016-05-23  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Remove the last ref-data bin from table 16 of
 	ATLAS_2010_S8918562, due to data corruption. The corresponding
 	HepData record will be amended by ATLAS.
 
 2016-05-12  Holger Schulz  <holger.schulz@durham.ac.uk>
 
         * Mark ATLAS_2012_I1082009 as validated after exhaustive tests with
 	Pythia8 and Sherpa in inclusive QCD mode.
 
 2016-05-11  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Specialise return error codes from the rivet script.
 
 2016-05-11  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add Event::allParticles() to provide neater (but not *helpful*)
 	access to Rivet-wrapped versions of the raw particles in the
 	Event::genEvent() record, and hence reduce HepMC digging.
 
 2016-05-05  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Version 2.4.2 release!
 
 	* Update SLD_2002_S4869273 ref data to match publication erratum,
 	now updated in HepData. Thanks to Peter Skands for the report and
 	Mike Whalley / Graeme Watt for the quick fix and heads-up.
 
 2016-04-27  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add CMS_2014_I1305624 event shapes analysis, with standalone
 	variable calculation struct embedded in an unnamed namespace.
 
 2016-04-19  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Various clean-ups and fixes in ATLAS analyses using isolated
 	photons with median pT density correction.
 
 2016-04-18  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add transformBy(LT) methods to Particle and Jet.
 
 	* Add mkObjectTransform and mkFrameTransform factory methods to LorentzTransform.
 
 2016-04-17  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add null GenVertex protection in Particle children & descendants methods.
 
 2016-04-15  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add ATLAS_2015_I1397637, ATLAS 8 TeV boosted top cross-section vs. pT
 
 2016-04-14  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add a --no-histos argument to the rivet script.
 
 2016-04-13  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add ATLAS_2015_I1351916 (8 TeV Z FB asymmetry) and
 	ATLAS_2015_I1408516 (8 TeV Z phi* and pT) analyses, and their _EL,
 	_MU variants.
 
 2016-04-12  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Patch PID utils for ordering issues in baryon decoding.
 
 2016-04-11  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Actually implement ZEUS_2001_S4815815... only 10 years late!
 
 2016-04-08  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add a --guess-prefix flag to rivet-config, cf. fastjet-config.
 
 	* Add RIVET_DATA_PATH variable and related functions in C++ and
 	Python as a common first-fallback for RIVET_REF_PATH,
 	RIVET_INFO_PATH, and RIVET_PLOT_PATH.
 
 	* Add --pwd options to rivet-mkhtml and rivet-cmphistos
 
 2016-04-07  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Remove implicit conventional event rotation for HERA -- this
 	needs to be done explicitly from now.
 
 	* Add comBoost functions and methods to Beam.hh, and tidy
 	LorentzTransformation.
 
 	* Restructure Beam projection functions for beam particle and
 	sqrtS extraction, and add asqrtS functions.
 
 	* Rename and improve PID and Particle Z,A,lambda functions ->
 	nuclZ,nuclA,nuclNlambda.
 
 2016-04-05  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Improve binIndex function, with an optional argument to allow
 	overflow lookup, and add it to testMath.
 
 	* Adding setPE, setPM, setPtEtaPhiM, etc. methods and
 	corresponding mk* static methods to FourMomentum, as well as
 	adding more convenience aliases and vector attributes for
 	completeness. Coordinate conversion functions taken from
 	HEPUtils::P4. New attrs also mapped to ParticleBase.
 
 2016-03-29  Andy Buckley  <andy.buckley@cern.ch>
 
 	* ALEPH_1996_S3196992.cc, ATLAS_2010_S8914702.cc,
 	ATLAS_2011_I921594.cc, ATLAS_2011_S9120807.cc,
 	ATLAS_2012_I1093738.cc, ATLAS_2012_I1199269.cc,
 	ATLAS_2013_I1217867.cc, ATLAS_2013_I1244522.cc,
 	ATLAS_2013_I1263495.cc, ATLAS_2014_I1307756.cc,
 	ATLAS_2015_I1364361.cc, CDF_2008_S7540469.cc,
 	CMS_2015_I1370682.cc, MC_JetSplittings.cc, STAR_2006_S6870392.cc:
 	Updates for new FastJets interface, and other cleaning.
 
 	* Deprecate 'standalone' FastJets constructors -- they are
 	misleading.
 
 	* More improvements around jets, including unbound conversion and
 	filtering routines between collections of Particles, Jets, and
 	PseudoJets.
 
 	* Place 'Cut' forward declaration in a new Cuts.fhh header.
 
 	* Adding a Cuts::OPEN extern const (a bit more standard- and
 	constant-looking than Cuts::open())
 
 2016-03-28  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Improvements to FastJets constructors, including specification
 	of optional AreaDefinition as a constructor arg, disabling dodgy
 	no-FS constructors which I suspect don't work properly in the
 	brave new world of automatic ghost tagging, using a bit of
 	judicious constructor delegation, and completing/exposing use of
 	shared_ptr for internal memory management.
 
 2016-03-26  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Remove Rivet/Tools/RivetBoost.hh and Boost references from
 	rivet-config, rivet-buildplugin, and configure.ac. It's gone ;-)
 
 	* Replace Boost assign usage with C++11 brace initialisers. All
 	Boost use is gone from Rivet!
 
 	* Replace Boost lexical_cast and string algorithms.
 
 2016-03-25  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Bug-fix in semi-leptonic top selection of CMS_2015_I1370682.
 
 2016-03-12  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Allow multi-line major tick labels on make-plots linear x and y
 	axes. Linebreaks are indicated by \n in the .dat file.
 
 2016-03-09  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Release 2.4.1
 
 2016-03-03  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add a --nskip flag to the rivet command-line tool, to allow
 	processing to begin in the middle of an event file (useful for
 	batched processing of large files, in combination with --nevts)
 
 2016-03-03  Holger Schulz  <holger.schulz@durham.ac.uk>
 
 	* Add ATLAS 7 TeV event shapes in Z+jets analysis (ATLAS_2016_I1424838)
 
 2016-02-29  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Update make-plots to use multiprocessing rather than threading.
 
 	* Add FastJets::trimJet method, thanks to James Monk for the
 	suggestion and patch.
 
 	* Add new preferred name PID::charge3 in place of
 	PID::threeCharge, and also convenience PID::abscharge and
 	PID::abscharge3 functions -- all derived from changes in external
 	HEPUtils.
 
 	* Add analyze(const GenEvent*) and analysis(string&) methods to
 	AnalysisHandler, plus some docstring improvements.
 
 2016-02-23  Andy Buckley  <andy.buckley@cern.ch>
 
 	* New ATLAS_2015_I1394679 analysis.
 
 	* New MC_HHJETS analysis from Andreas Papaefstathiou.
 
 	* Ref data updates for ATLAS_2013_I1219109, ATLAS_2014_I1312627,
 	and ATLAS_2014_I1319490.
 
 	* Add automatic output paging to 'rivet --show-analyses'
 
 2016-02-16  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Apply cross-section unit fixes and plot styling improvements to
 	ATLAS_2013_I1217863 analyses, thanks to Christian Gutschow.
 
 	* Fix to rivet-cmphistos to avoid overwriting RatioPlotYLabel if
 	already set via e.g. the PLOT pseudo-file. Thanks to Johann Felix
 	v. Soden-Fraunhofen.
 
 2016-02-15  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add Analysis::bookCounter and some machinery in rivet-cmphistos
 	to avoid getting tripped up by unplottable (for now) data types.
 
 	* Add --font and --format options to rivet-mkhtml and make-plots,
 	to replace the individual flags used for that purpose. Not fully
 	cleaned up, but a necessary step.
 
 	* Add new plot styling options to rivet-cmphistos and
 	rivet-mkhtml. Thanks to Gavin Hesketh.
 
 	* Modify rivet-cmphistos and rivet-mkhtml to apply plot hiding if
 	*any* path component is hidden by an underscore prefix, as
 	implemented in AOPath, plus other tidying using new AOPath
 	methods.
 
 	* Add pyext/rivet/aopaths.py, containing AOPath object for central
 	& standard decoding of Rivet-standard analysis object path
 	structures.
 
 2016-02-12  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Update ParticleIdUtils.hh (i.e. PID:: functions) to use the
 	functions from the latest version of MCUtils' PIDUtils.h.
 
 2016-01-15  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Change rivet-cmphistos path matching logic from match to search
 	(user can add explicit ^ marker if they want match semantics).
 
 2015-12-20  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Improve linspace (and hence also logspace) precision errors by
 	using multiplication rather than repeated addition to build edge
 	list (thanks to Holger Schulz for the suggestion).
 
 2015-12-15  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add cmphistos and make-plots machinery for handling 'suffix'
 	variations on plot paths, currently just by plotting every line,
 	with the variations in a 70% faded tint.
 
 	* Add Beam::pv() method for finding the beam interaction primary
 	vertex 4-position.
 
 	* Add a new Particle::setMomentum(E,x,y,z) method, and an origin
 	position member which is automatically populated from the
 	GenParticle, with access methods corresponding to the momentum
 	ones.
 
 2015-12-10  Andy Buckley  <andy.buckley@cern.ch>
 
 	* make-plots: improve custom tick attribute handling, allowing
 	empty lists. Also, any whitespace now counts as a tick separator
 	-- explicit whitespace in labels should be done via ~ or similar
 	LaTeX markup.
 
 2015-12-04  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Pro-actively use -m/-M arguments when initially loading
 	histograms in mkhtml, *before* passing them to cmphistos.
 
 2015-12-03  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Move contains() and has_key() functions on STL containers from std to Rivet namespaces.
 
 	* Adding IsRef attributes to all YODA refdata files; this will be
 	used to replace the /REF prefix in Rivet v3 onwards. The migration
 	has also removed leading # characters from BEGIN/END blocks, as
 	per YODA format evolution: new YODA versions as required by
 	current Rivet releases are able to read both the old and new
 	formats.
 
 2015-12-02  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add handling of a command-line PLOT 'file' argument to rivet-mkhtml, cf. rivet-cmphistos.
 
 	* Improvements to rivet-mkhtml behaviour re. consistency with
 	rivet-cmphistos in how muti-part histo paths are decomposed into
 	analysis-name + histo name, and removal of 'NONE' strings.
 
 2015-11-30  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Relax rivet/plotinfo.py pattern matching on .plot file
 	components, to allow leading whitespace and around = signs, and to
 	make the leading # optional on BEGIN/END blocks.
 
 2015-11-26  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Write out intermediate histogram files by default, with event interval of 10k.
 
 2015-11-25  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Protect make-plots against lock-up due to partial pstricks command when there are no data points.
 
 2015-11-17  Andy Buckley  <andy.buckley@cern.ch>
 
 	* rivet-cmphistos: Use a ratio label that doesn't mention 'data' when plotting MC vs. MC.
 
 2015-11-12  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Tweak plot and subplot sizing defaults in make-plots so the
 	total canvas is always the same size by default.
 
 2015-11-10  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Handle 2D histograms better in rivet-cmphistos (since they can't be overlaid)
 
 2015-11-05  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Allow comma-separated analysis name lists to be passed to a single -a/--analysis/--analyses option.
 
 	* Convert namespace-global const variables to be static, to suppress compiler warnings.
 
 	* Use standard MAX_DBL and MAX_INT macros as a source for MAXDOUBLE and MAXINT, to suppress GCC5 warnings.
 
 2015-11-04  Holger Schulz  <holger.schulz@durham.ac.uk>
 
 	* Adding LHCB inelastic xsection measurement (LHCB_2015_I1333223)
 
 	* Adding ATLAS colour flow in ttbar->semileptonic measurement (ATLAS_2015_I1376945)
 
 2015-10-07  Chris Pollard  <cpollard@cern.ch>
 
 	* Release 2.4.0
 
 2015-10-06  Holger Schulz  <holger.schulz@durham.ac.uk>
 
 	* Adding CMS_2015_I1327224 dijet analysis (Mjj>2 TeV)
 
 2015-10-03  Holger Schulz  <holger.schulz@durham.ac.uk>
 
 	* Adding CMS_2015_I1346843 Z+gamma
 
 2015-09-30  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Important improvement in FourVector & FourMomentum: new reverse()
 	method to return a 4-vector in which only the spatial component
 	has been inverted cf. operator- which flips the t/E component as
 	well.
 
 2015-09-28  Holger Schulz  <holger.schulz@durham.ac.uk>
 
 	* Adding D0_2000_I503361 ZPT at 1800 GeV
 
 2015-09-29  Chris Pollard  <cpollard@cern.ch>
 
 	* Adding ATLAS_2015_CONF_2015_041
 
 2015-09-29  Chris Pollard  <cpollard@cern.ch>
 
 	* Adding ATLAS_2015_I1387176
 
 2015-09-29  Chris Pollard  <cpollard@cern.ch>
 
 	* Adding ATLAS_2014_I1327229
 
 2015-09-28  Chris Pollard  <cpollard@cern.ch>
 
 	* Adding ATLAS_2014_I1326641
 
 2015-09-28  Holger Schulz  <holger.schulz@durham.ac.uk>
 
 	* Adding CMS_2013_I1122847 FB assymetry in DY analysis
 
 2015-09-28  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding CMS_2015_I1385107 LHA pp 2.76 TeV track-jet underlying event.
 
 2015-09-27  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding CMS_2015_I1384119 LHC Run 2 minimum bias dN/deta with no B field.
 
 2015-09-25  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding TOTEM_2014_I1328627 forward charged density in eta analysis.
 
 2015-09-23  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add CMS_2015_I1310737 Z+jets analysis.
 
 	* Allow running MC_{W,Z}INC, MC_{W,Z}JETS as separate bare lepton
 	analyses.
 
 2015-09-23  Andy Buckley  <andy.buckley@cern.ch>
 
 	* FastJets now allows use of FastJet pure ghosts, by excluding
 	them from the constituents of Rivet Jet objects. Thanks to James
 	Monk for raising the issue and providing a patch.
 
 2015-09-15  Andy Buckley  <andy.buckley@cern.ch>
 
 	* More MissingMomentum changes: add optional 'mass target'
 	argument when retrieving the vector sum as a 4-momentum, with the
 	mass defaulting to 0 rather than sqrt(sum(E)^2 - sum(p)^2).
 
 	* Require Boost 1.55 for robust compilation, as pointed out by
 	Andrii Verbytskyi.
 
 2015-09-10  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Allow access to MissingMomentum projection via WFinder.
 
 	* Adding extra methods to MissingMomentum, to make it more user-friendly.
 
 2015-09-09  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Fix factor of 2 in LHCB_2013_I1218996 normalisation, thanks to
 	Felix Riehn for the report.
 
 2015-08-20  Frank Siegert  <frank.siegert@cern.ch>
 
 	* Add function to ZFinder to retrieve all fiducial dressed
 	leptons, e.g. to allow vetoing on a third one (proposed by
 	Christian Gutschow).
 
 2015-08-18  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Rename xs and counter AOs to start with underscores, and modify
 	rivet-cmphistos to skip AOs whose basenames start with _.
 
 2015-08-17  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add writing out of cross-section and total event counter by
 	default. Need to add some name protection to avoid them being
 	plotted.
 
 2015-08-16  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add templated versions of Analysis::refData() to use data types
 	other than Scatter2DPtr, and convert the cached ref data store to
 	generic AnalysisObjectPtrs to make it possible.
 
 2015-07-29  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add optional Cut arguments to all the Jet tag methods.
 
 	* Add exception handling and pre-emptive testing for a
 	non-writeable output directory (based on patch from Lukas
 	Heinrich).
 
 2015-07-24  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Version 2.3.0 release.
 
 2015-07-02  Holger Schulz  <holger.schulz@durham.ac.uk>
 
 	* Tidy up ATLAS higgs combination analysis.
 	* Add ALICE kaon, pion analysis (ALICE_2015_I1357424)
 	* Add ALICE strange baryon analysis (ALICE_2014_I1300380)
 	* Add CDF ZpT measurement in Z->ee events analysis (CDF_2012_I1124333)
 	* Add validated ATLAS W+charm measurement (ATLAS_2014_I1282447)
 	* Add validated CMS jet and dijet analysis (CMS_2013_I1208923)
 
 2015-07-01  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Define a private virtual operator= on Projection, to block
 	'sliced' accidental value copies of derived class instances.
 
 	* Add new muon-in-jet options to FastJet constructors, pass that
 	and invisibles enums correctly to JetAlg, tweak the default
 	strategies, and add a FastJets constructor from a
 	fastjet::JetDefinition (while deprecating the plugin-by-reference
 	constructor).
 
 2015-07-01  Holger Schulz  <holger.schulz@durham.ac.uk>
 
 	* Add D0 phi* measurement (D0_2015_I1324946).
 
 	* Remove WUD and MC_PHOTONJETUE analyses
 
 	* Don't abort ATLAS_2015_I1364361 if there is no stable Higgs
 	  print a warning instead and veto event
 
 2015-07-01  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add all, none, from-decay muon filtering options to JetAlg and
 	FastJets.
 
 	* Rename NONPROMPT_INVISIBLES to DECAY_INVISIBLES for clarity &
 	extensibility.
 
 	* Remove FastJets::ySubJet, splitJet, and filterJet methods --
 	they're BDRS-paper-specific and you can now use the FastJet
 	objects directly to do this and much more.
 
 	* Adding InvisiblesStrategy to JetAlg, using it rather than a bool
 	in the useInvisibles method, and updating FastJets to use this
 	approach for its particle filtering and to optionally use the enum
 	in the constructor arguments. The new default invisibles-using
 	behaviour is to still exclude _prompt_ invisibles, and the default
 	is still to exclude them all. Only one analysis
 	(src/Analyses/STAR_2006_S6870392.cc) required updating, since it
 	was the only one to be using the FastJets legacy seed_threshold
 	constructor argument.
 
 	* Adding isVisible method to Particle, taken from
 	VisibleFinalState (which now uses this).
 
 2015-06-30  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Marking many old & superseded ATLAS analyses as obsolete.
 
 	* Adding cmpMomByMass and cmpMomByAscMass sorting functors.
 
 	* Bump version to 2.3.0 and require YODA > 1.4.0 (current head at
 	time of development).
 2015-06-08  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add handling of -m/-M flags on rivet-cmphistos and rivet-mkhtml,
 	moving current rivet-mkhtml -m/-M to -a/-A (for analysis name
 	pattern matching). Requires YODA head (will be YODA 1.3.2 of
 	1.4.0).
 
 	* src/Analyses/ATLAS_2015_I1364361.cc: Now use the built-in prompt
 	photon selecting functions.
 
 	* Tweak legend positions in MC_JETS .plot file.
 
 	* Add a bit more debug output from ZFinder and WFinder.
 
 2015-05-24  Holger Schulz  <holger.schulz@durham.ac.uk>
 
 	* Normalisation discussion concerning ATLAS_2014_I1325553
 	is resolved. Changed YLabel accordingly.
 
 2015-05-19  Holger Schulz  <holger.schulz@durham.ac.uk>
 
 	* Add (preliminary) ATLAS combined Higgs analysis
 	(ATLAS_2015_I1364361). Data will be updated and more
 	histos added as soon as paper is published in journal.
 	For now using data taken from public ressource
 	https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PAPERS/HIGG-2014-11/
 
 2015-05-19  Peter Richardson <peter.richardson@durham.ac.uk>
 
 	* Fix ATLAS_2014_I1325553 normalisation of histograms was wrong by
 	factor of two |y| vs y problem
 
 2015-05-01  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Fix MC_HJETS/HINC/HKTSPLITTINGS analyses to (ab)use the ZFinder
 	with a mass range of 115-135 GeV and a mass target of 125 GeV (was
 	previously 115-125 and mass target of mZ)
 
 2015-04-30  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Removing uses of boost::assign::list_of, preferring the existing
 	comma-based assign override for now, for C++11 compatibility.
 
 	* Convert MC_Z* analysis finalize methods to use scale() rather than normalize().
 
 2015-04-01  Holger Schulz  <holger.schulz@durham.ac.uk>
 
 	* Add CMS 7 TeV rapidity gap analysis (CMS_2015_I1356998).
 
 	* Remove FinalState Projection.
 
 2015-03-30  Holger Schulz  <holger.schulz@durham.ac.uk>
 
 	* Add ATLAS 7 TeV photon + jets analysis (ATLAS_2013_I1244522).
 
 2015-03-26  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Updates for HepMC 2.07 interface constness improvements.
 
 2015-03-25  Holger Schulz  <holger.schulz@durham.ac.uk>
 
 	* Add ATLAS double parton scattering in W+2j analysis (ATLAS_2013_I1216670).
 
 2015-03-24  Andy Buckley  <andy.buckley@cern.ch>
 
 	* 2.2.1 release!
 
 2015-03-23  Holger Schulz  <holger.schulz@durham.ac.uk>
 
 	* Add ATLAS differential Higgs analysis (ATLAS_2014_I1306615).
 
 2015-03-19  Chris Pollard <cpollard@cern.ch>
 
 	* Add ATLAS V+gamma analyses (ATLAS_2013_I1217863)
 
 2015-03-20  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding ATLAS R-jets analysis i.e. ratios of W+jets and Z+jets
 	observables (ATLAS_2014_I1312627 and _EL, _MU variants)
 
 	* include/Rivet/Tools/ParticleUtils.hh: Adding same/oppSign and
 	same/opp/diffCharge functions, operating on two Particles.
 
 	* include/Rivet/Tools/ParticleUtils.hh: Adding HasAbsPID functor
 	and removing optional abs arg from HasPID.
 
 2015-03-19  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Mark ATLAS_2012_I1083318 as VALIDATED and fix d25-x01-y02 ref data.
 
 2015-03-19  Chris Pollard <cpollard@cern.ch>
 
 	* Add ATLAS W and Z angular analyses (ATLAS_2011_I928289)
 
 2015-03-19  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add LHCb charged particle multiplicities and densities analysis (LHCB_2014_I1281685)
 
 	* Add LHCb Z y and phi* analysis (LHCB_2012_I1208102)
 
 2015-03-19  Holger Schulz  <holger.schulz@durham.ac.uk>
 
 	* Add ATLAS dijet analysis (ATLAS_2014_I1325553).
 
 	* Add ATLAS Z pT analysis (ATLAS_2014_I1300647).
 
 	* Add ATLAS low-mass Drell-Yan analysis (ATLAS_2014_I1288706).
 
 	* Add ATLAS gap fractions analysis (ATLAS_2014_I1307243).
 
 2015-03-18  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding CMS_2014_I1298810 and CMS_2014_I1303894 analyses.
 
 2015-03-18  Holger Schulz  <holger.schulz@durham.ac.uk>
 
 	* Add PDG_TAUS analysis which makes use of the TauFinder.
 
 	* Add ATLAS 'traditional' Underlying Event in Z->mumu analysis (ATLAS_2014_I1315949).
 
 2015-03-18  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Change UnstableFinalState duplicate resolution to use the last
 	in a chain rather than the first.
 
 2015-03-17  Holger Schulz  <holger.schulz@durham.ac.uk>
 
 	* Update Taufinder to use decaytyoe (can be HADRONIC, LEPTONIC or
 	ANY), in FastJet.cc --- set TauFinder mode to hadronic for tau-tagging
 
 2015-03-16  Chris Pollard  <cpollard@cern.ch>
 
 	* Removed fuzzyEquals() from Vector3::angle()
 
 2015-03-16  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding Cuts-based constructor to PrimaryHadrons.
 
 	* Adding missing compare() method to HeavyHadrons projection.
 
 2015-03-15  Chris Pollard  <cpollard@cern.ch>
 
 	* Adding FinalPartons projection which selects the quarks and
 	gluons immediately before hadronization
 
 2015-03-05  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding Cuts-based constructors and other tidying in UnstableFinalState and HeavyHadrons
 
 2015-03-03  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add support for a PLOT meta-file argument to rivet-cmphistos.
 
 2015-02-27  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Improved time reporting.
 
 2015-02-24  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add Particle::fromHadron and Particle::fromPromptTau, and add a
 	boolean 'prompt' argument to Particle::fromTau.
 
 	* Fix WFinder use-transverse-mass property setting. Thanks to Christian Gutschow.
 
 2015-02-04  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add more protection against math domain errors with log axes.
 
 	* Add some protection against nan-valued points and error bars in make-plots.
 
 2015-02-03  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Converting 'bitwise' to 'logical' Cuts combinations in all analyses.
 
 2015-02-02  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Use vector MET rather than scalar VET (doh...) in WFinder
 	cut. Thanks to Ines Ochoa for the bug report.
 
 	* Updating and tidying analyses with deprecation warnings.
 
 	* Adding more Cuts/FS constructors for Charged,Neutral,UnstableFinalState.
 
 	* Add &&, || and ! operators for without-parens-warnings Cut
 	combining. Note these don't short-circuit, but this is ok since
 	Cut comparisons don't have side-effects.
 
 	* Add absetaIn, absrapIn Cut range definitions.
 
 	* Updating use of sorted particle/jet access methods and cmp
 	functors in projections and analyses.
 
 2014-12-09  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding a --cmd arg to rivet-buildplugin to allow the output
 	paths to be sed'ed (to help deal with naive Grid
 	distribution). For example BUILDROOT=`rivet-config --prefix`;
 	rivet-buildplugin PHOTONS.cc --cmd | sed -e
 	"s:$BUILDROOT:$SITEROOT:g"
 
 2014-11-26  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Interface improvements in DressedLeptons constructor.
 
 	* Adding DEPRECATED macro to throw compiler deprecation warnings when using deprecated features.
 
 2014-11-25  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding Cut-based constructors, and various constructors with
 	lists of PDG codes to IdentifiedFinalState.
 
 2014-11-20  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Analysis updates (ATLAS, CMS, CDF, D0) to apply the changes below.
 
 	* Adding JetAlg jets(Cut, Sorter) methods, and other interface
 	improvements for cut and sorted ParticleBase retrieval from JetAlg
 	and ParticleFinder projections. Some old many-doubles versions
 	removed, syntactic sugar sorting methods deprecated.
 
 	* Adding Cuts::Et and Cuts::ptIn, Cuts::etIn, Cuts::massIn.
 
 	* Moving FastJet includes, conversions, uses etc. into Tools/RivetFastJet.hh
 
 2014-10-07  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Fix a bug in the isCharmHadron(pid) function and remove isStrange* functions.
 
 2014-09-30  Andy Buckley  <andy.buckley@cern.ch>
 
 	* 2.2.0 release!
 
 	* Mark Jet::containsBottom and Jet::containsCharm as deprecated
 	methods: use the new methods. Analyses updated.
 
 	* Add Jet::bTagged(), Jet::cTagged() and Jet::tauTagged() as
 	ghost-assoc-based replacements for the 'contains' tagging methods.
 
 2014-09-17  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding support for 1D and 3D YODA scatters, and helper methods
 	for calling the efficiency, asymm and 2D histo divide functions.
 
 2014-09-12  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding 5 new ATLAS analyses:
 	ATLAS_2011_I921594: Inclusive isolated prompt photon analysis with full 2010 LHC data
 	ATLAS_2013_I1263495: Inclusive isolated prompt photon analysis with 2011 LHC data
 	ATLAS_2014_I1279489: Measurements of electroweak production of dijets + $Z$ boson, and distributions sensitive to vector boson fusion
 	ATLAS_2014_I1282441: The differential production cross section of the $\phi(1020)$ meson in $\sqrt{s}=7$ TeV $pp$ collisions measured with the ATLAS detector
 	ATLAS_2014_I1298811: Leading jet underlying event at 7 TeV in ATLAS
 
 	* Adding a median(vector<NUM>) function and fixing the other stats
 	functions to operate on vector<NUM> rather than vector<int>.
 
 2014-09-03  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Fix wrong behaviour of LorentzTransform with a null boost vector
 	-- thanks to Michael Grosse.
 
 2014-08-26  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add calc() methods to Hemispheres as requested, to allow it to
 	be used with Jet or FourMomentum inputs outside the normal
 	projection system.
 
 2014-08-17  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Improvements to the particles methods on
 	ParticleFinder/FinalState, in particular adding the range of cuts
 	arguments cf. JetAlg (and tweaking the sorted jets equivalent) and
 	returning as a copy rather than a reference if cut/sorted to avoid
 	accidentally messing up the cached copy.
 
 	* Creating ParticleFinder projection base class, and moving
 	Particles-accessing methods from FinalState into it.
 
 	* Adding basic forms of MC_ELECTRONS, MC_MUONS, and MC_TAUS
 	analyses.
 
 2014-08-15  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Version bump to 2.2.0beta1 for use at BOOST and MCnet school.
 
 2014-08-13  Andy Buckley  <andy.buckley@cern.ch>
 
 	* New analyses:
 	ATLAS_2014_I1268975 (high mass dijet cross-section at 7 TeV)
 	ATLAS_2014_I1304688 (jet multiplicity and pT at 7 TeV)
 	ATLAS_2014_I1307756 (scalar diphoton resonance search at 8 TeV -- no histograms!)
 	CMSTOTEM_2014_I1294140 (charged particle pseudorapidity at 8 TeV)
 
 2014-08-09  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding PromptFinalState, based on code submitted by Alex
 	Grohsjean and Will Bell. Thanks!
 
 2014-08-06  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding MC_HFJETS and MC_JETTAGS analyses.
 
 2014-08-05  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Update all analyses to use the xMin/Max/Mid, xMean, xWidth,
 	etc. methods on YODA classes rather than the deprecated lowEdge
 	etc.
 
 	* Merge new HasPID functor from Holger Schulz into
 	Rivet/Tools/ParticleUtils.hh, mainly for use with the any()
 	function in Rivet/Tools/Utils.hh
 
 2014-08-04  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add ghost tagging of charms, bottoms and taus to FastJets, and
 	tag info accessors to Jet.
 
 	* Add constructors from and cast operators to FastJet's PseudoJet
 	object from Particle and Jet.
 
 	* Convert inRange to not use fuzzy comparisons on closed
 	intervals, providing old version as fuzzyInRange.
 
 2014-07-30  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Remove classifier functions accepting a Particle from the PID
 	inner namespace.
 
 2014-07-29  Andy Buckley  <andy.buckley@cern.ch>
 
 	* MC_JetAnalysis.cc: re-enable +- ratios for eta and y, now that
 	YODA divide doesn't throw an exception.
 
 	* ATLAS_2012_I1093734: fix a loop index error which led to the
 	first bin value being unfilled for half the dphi plots.
 
 	* Fix accidental passing of a GenParticle pointer as a PID code
 	int in HeavyHadrons.cc. Effect limited to incorrect deductions
 	about excited HF decay chains and should be small. Thanks to
 	Tomasz Przedzinski for finding and reporting the issue during
 	HepMC3 design work!
 
 2014-07-23  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Fix to logspace: make sure that start and end values are exact,
 	not the result of exp(log(x)).
 
 2014-07-16  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Fix setting of library paths for doc building: Python can't
 	influence the dynamic loader in its own process by setting an
 	environment variable because the loader only looks at the variable
 	once, when it starts.
 
 2014-07-02  Andy Buckley  <andy.buckley@cern.ch>
 
 	* rivet-cmphistos now uses the generic yoda.read() function rather
 	than readYODA() -- AIDA files can also be compared and plotted
 	directly now.
 
 2014-06-24  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add stupid missing <string> include and std:: prefix in Rivet.hh
 
 2014-06-20  Holger Schulz  <hschulz@physik.hu-berlin.de>
 
 	* bin/make-plots: Automatic generation of minor xtick labels if LogX is requested
 	but data resides e.g. in [200, 700]. Fixes m_12 plots of, e.g.
 	ATLAS_2010_S8817804
 
 2014-06-17  David Grellscheid  <David.Grellscheid@durham.ac.uk>
 
 	* pyext/rivet/Makefile.am: 'make distcheck' and out-of-source
 	builds should work now.
 
 2014-06-10  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Fix use of the install command for bash completion installation on Macs.
 
 2014-06-07  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Removing direct includes of MathUtils.hh and others from analysis code files.
 
 2014-06-02  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Rivet 2.1.2 release!
 
 2014-05-30  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Using Particle absrap(), abseta() and abspid() where automatic conversion was feasible.
 
 	* Adding a few extra kinematics mappings to ParticleBase.
 
 	* Adding p3() accessors to the 3-momentum on FourMomentum, Particle, and Jet.
 
 	* Using Jet and Particle kinematics methods directly (without momentum()) where possible.
 
 	* More tweaks to make-plots 2D histo parsing behaviour.
 
 2014-05-30  Holger Schulz  <hschulz@physik.hu-berlin.de>
 
 	* Actually fill the XQ 2D histo, .plot decorations.
 
 	* Have make-plots produce colourmaps using YODA_3D_SCATTER
 	objects. Remove the grid in colourmaps.
 
 	* Some tweaks for the SFM analysis, trying to contact Martin Wunsch
 	who did the unfolding back then.
 
 2014-05-29  Holger Schulz  <hschulz@physik.hu-berlin.de>
 
 	* Re-enable 2D histo in MC_PDFS
 
 2014-05-28  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Updating analysis and project routines to use Particle::pid() by
 	preference to Particle::pdgId(), and Particle::abspid() by
 	preference to abs(Particle::pdgId()), etc.
 
 	* Adding interfacing of smart pointer types and booking etc. for
 	YODA 2D histograms and profiles.
 
 	* Improving ParticleIdUtils and ParticleUtils functions based on
 	merging of improved function collections from MCUtils, and
 	dropping the compiled ParticleIdUtils.cc file.
 
 2014-05-27  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding CMS_2012_I1090423 (dijet angular distributions),
 	CMS_2013_I1256943 (Zbb xsec and angular correlations),
 	CMS_2013_I1261026 (jet and UE properties vs. Nch) and
 	D0_2000_I499943 (bbbar production xsec and angular correlations).
 
 2014-05-26  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Fixing a bug in plot file handling, and adding a texpand()
 	routine to rivet.util, to be used to expand some 'standard'
 	physics TeX macros.
 
 	* Adding ATLAS_2012_I1124167 (min bias event shapes),
 	ATLAS_2012_I1203852 (ZZ cross-section), and ATLAS_2013_I1190187
 	(WW cross-section) analyses.
 
 2014-05-16  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding any(iterable, fn) and all(iterable, fn) template functions for convenience.
 
 2014-05-15  Holger Schulz  <holger.schulz@cern.ch>
 
 	* Fix some bugs in identified hadron PIDs in OPAL_1998_S3749908.
 
 2014-05-13  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Writing out [UNVALIDATED], [PRELIMINARY], etc. in the
 	--list-analyses output if analysis is not VALIDATED.
 
 2014-05-12  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding CMS_2013_I1265659 colour coherence analysis.
 
 2014-05-07  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Bug fixes in CMS_2013_I1209721 from Giulio Lenzi.
 
 	* Fixing compiler warnings from clang, including one which
 	indicated a misapplied cut bug in CDF_2006_S6653332.
 
 2014-05-05  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Fix missing abs() in Particle::abspid()!!!!
 
 2014-04-14  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding the namespace protection workaround for Boost described
 	at http://www.boost.org/doc/libs/1_55_0/doc/html/foreach.html
 
 2014-04-13  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding a rivet.pc template file and installation rule for pkg-config to use.
 
 	* Updating data/refdata/ALEPH_2001_S4656318.yoda to corrected version in HepData.
 
 2014-03-27  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Flattening PNG output of make-plots (i.e. no transparency) and other tweaks.
 
 2014-03-23  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Renaming the internal meta-particle class in DressedLeptons (and
 	exposed in the W/ZFinders) from ClusteredLepton to DressedLepton
 	for consistency with the change in name of its containing class.
 
 	* Removing need for cmake and unportable yaml-cpp trickery by
 	using libtool to build an embedded symbol-mangled copy of yaml-cpp
 	rather than trying to mangle and build direct from the tarball.
 
 2014-03-10  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Rivet 2.1.1 release.
 
 2014-03-07  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding ATLAS multilepton search (no ref data file), ATLAS_2012_I1204447.
 
 2014-03-05  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Also renaming Breit-Wigner functions to cdfBW, invcdfBW and bwspace.
 
 	* Renaming index_between() to the more Rivety binIndex(), since that's the only real use of such a function... plus a bit of SFINAE type relaxation trickery.
 
 2014-03-04  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding programmatic access to final histograms via AnalysisHandler::getData().
 
 	* Adding CMS 4 jet correlations analysis, CMS_2013_I1273574.
 
 	* Adding CMS W + 2 jet double parton scattering analysis, CMS_2013_I1272853.
 
 	* Adding ATLAS isolated diphoton measurement, ATLAS_2012_I1199269.
 
 	* Improving the index_between function so the numeric types don't
 	have to exactly match.
 
 	* Adding better momentum comparison functors and sortBy, sortByX
 	functions to use them easily on containers of Particle, Jet, and
 	FourMomentum.
 
 2014-02-10  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Removing duplicate and unused ParticleBase sorting functors.
 
 	* Removing unused HT increment and units in ATLAS_2012_I1180197 (unvalidated SUSY).
 
 	* Fixing photon isolation logic bug in CMS_2013_I1258128 (Z rapidity).
 
 	* Replacing internal uses of #include Rivet/Rivet.hh with
 	Rivet/Config/RivetCommon.hh, removing the MAXRAPIDITY const, and
 	repurposing Rivet/Rivet.hh as a convenience include for external
 	API users.
 
 	* Adding isStable, children, allDescendants, stableDescendants,
 	and flightLength functions to Particle.
 
 	* Replacing Particle and Jet deltaX functions with generic ones on
 	ParticleBase, and adding deltaRap variants.
 
 	* Adding a Jet.fhh forward declaration header, including fastjet::PseudoJet.
 
 	* Adding a RivetCommon.hh header to allow Rivet.hh to be used externally.
 
 	* Fixing HeavyHadrons to apply pT cuts if specified.
 
 2014-02-06  Andy Buckley  <andy.buckley@cern.ch>
 
 	* 2.1.0 release!
 
 2014-02-05  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Protect against invalid prefix value if the --prefix configure option is unused.
 
 2014-02-03  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding the ATLAS_2012_I1093734 fwd-bwd / azimuthal minbias correlations analysis.
 
 	* Adding the LHCB_2013_I1208105 forward energy flow analysis.
 
 2014-01-31  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Checking the YODA minimum version in the configure script.
 
 	* Fixing the JADE_OPAL analysis ycut values to the midpoints,
 	thanks to information from Christoph Pahl / Stefan Kluth.
 
 2014-01-29  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Removing unused/overrestrictive Isolation* headers.
 
 2014-01-27  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Re-bundling yaml-cpp, now built as a mangled static lib based on
 	the LHAPDF6 experience.
 
 	* Throw a UserError rather than an assert if AnalysisHandler::init
 	is called more than once.
 
 2014-01-25  David Grellscheid <david.grellscheid@durham.ac.uk>
 
 	* src/Core/Cuts.cc: New Cuts machinery, already used in FinalState.
 	Old-style "mineta, maxeta, minpt" constructors kept around for ease of
 	transition. Minimal set of convenience functions available, like EtaIn(),
 	should be expanded as needed.
 
 2014-01-22  Andy Buckley  <andy.buckley@cern.ch>
 
 	* configure.ac: Remove opportunistic C++11 build, until this
 	becomes mandatory (in version 2.2.0?). Anyone who wants C++11 can
 	explicitly set the CXXFLAGS (and DYLDFLAGS for pre-Mavericks Macs)
 
 2014-01-21  Leif Lonnblad  <Leif.Lonnblad@thep.lu.se>
 
 	* src/Core/Analysis.cc: Fixed bug in Analysis::isCompatible where
 	an 'abs' was left out when checking that beam energes does not
 	differ by more than 1GeV.
 
 	* src/Analyses/CMS_2011_S8978280.cc: Fixed checking of beam energy
 	and booking corresponding histograms.
 
 2013-12-19  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding pid() and abspid() methods to Particle.
 
 	* Adding hasCharm and hasBottom methods to Particle.
 
 	* Adding a sorting functor arg version of the ZFinder::constituents() method.
 
 	* Adding pTmin cut accessors to HeavyHadrons.
 
 	* Tweak to the WFinder constructor to place the target W (trans) mass argument last.
 
 2013-12-18  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding a GenParticle* cast operator to Particle, removing the
 	Particle and Jet copies of the momentum cmp functors, and general
 	tidying/improvement/unification of the momentum properties of jets
 	and particles.
 
 2013-12-17  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Using SFINAE techniques to improve the math util functions.
 
 	* Adding isNeutrino to ParticleIdUtils, and
 	isHadron/isMeson/isBaryon/isLepton/isNeutrino methods to Particle.
 
 	* Adding a FourMomentum cast operator to ParticleBase, so that
 	Particle and Jet objects can be used directly as FourMomentums.
 
 2013-12-16  Andy Buckley  <andy@duality>
 
 	* LeptonClusters renamed to DressedLeptons.
 
 	* Adding singular particle accessor functions to WFinder and ZFinder.
 
 	* Removing ClusteredPhotons and converting ATLAS_2010_S8919674.
 
 2013-12-12  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Fixing a problem with --disable-analyses (thanks to David Hall)
 
 	* Require FastJet version 3.
 
 	* Bumped version to 2.1.0a0
 
 	* Adding -DNDEBUG to the default build flags, unless in --enable-debug mode.
 
 	* Adding a special treatment of RIVET_*_PATH variables: if they
 	end in :: the default search paths will not be appended. Used
 	primarily to restrict the doc builds to look only inside the build
 	dirs, but potentially also useful in other special circumstances.
 
 	* Adding a definition of exec_prefix to rivet-buildplugin.
 
 	* Adding -DNDEBUG to the default non-debug build flags.
 
 2013-11-27  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Removing accidentally still-present no-as-needed linker flag from rivet-config.
 
 	* Lots of analysis clean-up and migration to use new features and W/Z finder APIs.
 
 	* More momentum method forwarding on ParticleBase and adding
 	abseta(), absrap() etc. functions.
 
 	* Adding the DEFAULT_RIVET_ANA_CONSTRUCTOR cosmetic macro.
 
 	* Adding deltaRap() etc. function variations
 
 	* Adding no-decay photon clustering option to WFinder and ZFinder,
 	and replacing opaque bool args with enums.
 
 	* Adding an option for ignoring photons from hadron/tau decays in LeptonClusters.
 
 2013-11-22  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding Particle::fromBottom/Charm/Tau() members. LHCb were
 	aready mocking this up, so it seemed sensible to add it to the
 	interface as a more popular (and even less dangerous) version of
 	hasAncestor().
 
 	* Adding an empty() member to the JetAlg interface.
 
 2013-11-07  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding the GSL lib path to the library path in the env scripts
 	and the rivet-config --ldflags output.
 
 2013-10-25  Andy Buckley  <andy.buckley@cern.ch>
 
 	* 2.0.0 release!!!!!!
 
 2013-10-24  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Supporting zsh completion via bash completion compatibility.
 
 2013-10-22  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Updating the manual to describe YODA rather than AIDA and the new rivet-cmphistos script.
 
 	* bin/make-plots: Adding paths to error messages in histogram combination.
 
 	* CDF_2005_S6217184: fixes to low stats errors and final scatter plot binning.
 
 2013-10-21  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Several small fixes in jet shape analyses, SFM_1984, etc. found
 	in the last H++ validation run.
 
 2013-10-18  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Updates to configure and the rivetenv scripts to try harder to discover YODA.
 
 2013-09-26  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Now bundling Cython-generated files in the tarballs, so Cython
 	is not a build requirement for non-developers.
 
 2013-09-24  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Removing unnecessary uses of a momentum() indirection when
 	accessing kinematic variables.
 
 	* Clean-up in Jet, Particle, and ParticleBase, in particular
 	splitting PID functions on Particle from those on PID codes, and
 	adding convenience kinematic functions to ParticleBase.
 
 2013-09-23  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add the -avoid-version flag to libtool.
 
 	* Final analysis histogramming issues resolved.
 
 2013-08-16  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding a ConnectBins flag in make-plots, to decide whether to
 	connect adjacent, gapless bins with a vertical line. Enabled by
 	default (good for the step-histo default look of MC lines), but
 	now rivet-cmphistos disables it for the reference data.
 
 2013-08-14  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Making 2.0.0beta3 -- just a few remaining analysis migration
 	issues remaining but it's worth making another beta since there
 	were lots of framework fixes/improvements.
 
 2013-08-11  Andy Buckley  <andy.buckley@cern.ch>
 
 	* ARGUS_1993_S2669951 also fixed using scatter autobooking.
 
 	* Fixing remaining issues with booking in BABAR_2007_S7266081
 	using the feature below (far nicer than hard-coding).
 
 	* Adding a copy_pts param to some Analysis::bookScatter2D methods:
 	pre-setting the points with x values is sometimes genuinely
 	useful.
 
 2013-07-26  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Removed the (officially) obsolete CDF 2008 LEADINGJETS and
 	NOTE_9351 underlying event analyses -- superseded by the proper
 	versions of these analyses based on the final combined paper.
 
 	* Removed the semi-demo Multiplicity projection -- only the
 	EXAMPLE analysis and the trivial ALEPH_1991_S2435284 needed
 	adaptation.
 
 2013-07-24  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding a rejection of histo paths containing /TMP/ from the
 	writeData function. Use this to handle booked temporary
 	histograms... for now.
 
 2013-07-23  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Make rivet-cmphistos _not_ draw a ratio plot if there is only one line.
 
 	* Improvements and fixes to HepData lookup with rivet-mkanalysis.
 
 2013-07-22  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add -std=c++11 or -std=c++0x to the Rivet compiler flags if supported.
 
 	* Various fixes to analyses with non-zero numerical diffs.
 
 2013-06-12  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding a new HeavyHadrons projection.
 
 	* Adding optional extra include_end args to logspace() and linspace().
 
 2013-06-11  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Moving Rivet/RivetXXX.hh tools headers into Rivet/Tools/.
 
 	* Adding PrimaryHadrons projection.
 
 	* Adding particles_in/out functions on GenParticle to RivetHepMC.
 
 	* Moved STL extensions from Utils.hh to RivetSTL.hh and tidying.
 
 	* Tidying, improving, extending, and documenting in RivetSTL.hh.
 
 	* Adding a #include of Logging.hh into Projection.hh, and removing
 	unnecessary #includes from all Projection headers.
 
 2013-06-10  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Moving htmlify() and detex() Python functions into rivet.util.
 
 	* Add HepData URL for Inspire ID lookup to the rivet script.
 
 	* Fix analyses' info files which accidentally listed the Inspire
 	ID under the SpiresID metadata key.
 
 2013-06-07  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Updating mk-analysis-html script to produce MathJax output
 
 	* Adding a version of Analysis::removeAnalysisObject() which takes
 	an AO pointer as its argument.
 
 	* bin/rivet: Adding pandoc-based conversion of TeX summary and
 	description strings to plain text on the terminal output.
 
 	* Add MathJax to rivet-mkhtml output, set up so the .info entries should render ok.
 
 	* Mark the OPAL 1993 analysis as UNVALIDATED: from the H++
 	benchmark runs it looks nothing like the data, and there are some
 	outstanding ambiguities.
 
 2013-06-06  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Releasing 2.0.0b2 beta version.
 
 2013-06-05  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Renaming Analysis::add() etc. to very explicit
 	addAnalysisObject(), sorting out shared_pointer polymorphism
 	issues via the Boost dynamic_pointer_cast, and adding a full set
 	of getHisto1D(), etc. explicitly named and typed accessors,
 	including ones with HepData dataset/axis ID signatures.
 
 	* Adding histo booking from an explicit reference Scatter2D (and
 	more placeholders for 2D histos / 3D scatters) and rewriting
 	existing autobooking to use this.
 
 	* Converting inappropriate uses of size_t to unsigned int in Analysis.
 
 	* Moving Analysis::addPlot to Analysis::add() (or reg()?) and
 	adding get() and remove() (or unreg()?)
 
 	* Fixing attempted abstraction of import fallbacks in rivet.util.import_ET().
 
 	* Removing broken attempt at histoDir() caching which led to all
 	histograms being registered under the same analysis name.
 
 2013-06-04  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Updating the Cython version requirement to 0.18
 
 	* Adding Analysis::integrate() functions and tidying the Analysis.hh file a bit.
 
 2013-06-03  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding explicit protection against using inf/nan scalefactors in
 	ATLAS_2011_S9131140 and H1_2000_S4129130.
 
 	* Making Analysis::scale noisly complain about invalid
 	scalefactors.
 
 2013-05-31  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Reducing the TeX main memory to ~500MB. Turns out that it *can*
 	be too large with new versions of TeXLive!
 
 2013-05-30  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Reverting bookScatter2D behaviour to never look at ref data, and
 	updating a few affected analyses. This should fix some bugs with
 	doubled datapoints introduced by the previous behaviour+addPoint.
 
 	* Adding a couple of minor Utils.hh and MathUtils.hh features.
 
 2013-05-29  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Removing Constraints.hh header.
 
 	* Minor bugfixes and improvements in Scatter2D booking and MC_JetAnalysis.
 
 2013-05-28  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Removing defunct HistoFormat.hh and HistoHandler.{hh,cc}
 
 2013-05-27  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Removing includes of Logging.hh, RivetYODA.hh, and
 	ParticleIdUtils.hh from analyses (and adding an include of
 	ParticleIdUtils.hh to Analysis.hh)
 
 	* Removing now-unused .fhh files.
 
 	* Removing lots of unnecessary .fhh includes from core classes:
 	everything still compiling ok. A good opportunity to tidy this up
 	before the release.
 
 	* Moving the rivet-completion script from the data dir to bin (the
 	completion is for scripts in bin, and this makes development
 	easier).
 
 	* Updating bash completion scripts for YODA format and
 	compare-histos -> rivet-cmphistos.
 
 2013-05-23  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding Doxy comments and a couple of useful convenience functions to Utils.hh.
 
 	* Final tweaks to ATLAS ttbar jet veto analysis (checked logic with Kiran Joshi).
 
 2013-05-15  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Many 1.0 -> weight bugfixes in ATLAS_2011_I945498.
 
 	* yaml-cpp v3 support re-introduced in .info parsing.
 
 	* Lots of analysis clean-ups for YODA TODO issues.
 
 2013-05-13  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Analysis histo booking improvements for Scatter2D, placeholders
 	for 2D histos, and general tidying.
 
 2013-05-12  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding configure-time differentiation between yaml-cpp API versions 3 and 5.
 
 2013-05-07  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Converting info file reading to use the yaml-cpp 0.5.x API.
 
 2013-04-12  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Tagging as 2.0.0b1
 
 2013-04-04  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Removing bundling of yaml-cpp: it needs to be installed by the
 	user / bootstrap script from now on.
 
 2013-04-03  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Removing svn:external m4 directory, and converting Boost
 	detection to use better boost.m4 macros.
 
 2013-03-22  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Moving PID consts to the PID namespace and corresponding code
 	updates and opportunistic clean-ups.
 
 	* Adding Particle::fromDecay() method.
 
 2013-03-09  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Version bump to 2.0.0b1 in anticipation of first beta release.
 
 	* Adding many more 'popular' particle ID code named-consts and
 	aliases, and updating the RapScheme enum with ETA -> ETARAP, and
 	fixing affected analyses (plus other opportunistic tidying / minor
 	bug-fixing).
 
 	* Fixing a symbol misnaming in ATLAS_2012_I1119557.
 
 2013-03-07  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Renaming existing uses of ParticleVector to the new 'Particles' type.
 
 	* Updating util classes, projections, and analyses to deal with
 	the HepMC return value changes.
 
 	* Adding new Particle(const GenParticle*) constructor.
 
 	* Converting Particle::genParticle() to return a const pointer
 	rather than a reference, for the same reason as below (+
 	consistency within Rivet and with the HepMC pointer-centric coding
 	design).
 
 	* Converting Event to use a different implementation of original
 	and modified GenParticles, and to manage the memory in a more
 	future-proof way. Event::genParticle() now returns a const pointer
 	rather than a reference, to signal that the user is leaving the
 	happy pastures of 'normal' Rivet behind.
 
 	* Adding a Particles typedef by analogy to Jets, and in preference
 	to the cumbersome ParticleVector.
 
 	* bin/: Lots of tidying/pruning of messy/defunct scripts.
 
 	* Creating spiresbib, util, and plotinfo rivet python module
 	submodules: this eliminates lighthisto and the standalone
 	spiresbib modules. Util contains convenience functions for Python
 	version testing, clean ElementTree import, and process renaming,
 	for primary use by the rivet-* scripts.
 
 	* Removing defunct scripts that have been replaced/obsoleted by YODA.
 
 2013-03-06  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Fixing doc build so that the reference histos and titles are
 	~correctly documented. We may want to truncate some of the lists!
 
 2013-03-06  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Added ATLAS_2012_I1125575 analysis
 
 	* Converted rivet-mkhtml to yoda
 
 	* Introduced rivet-cmphistos as yoda based replacement for compare-histos
 
 2013-03-05  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Replacing all AIDA ref data with YODA versions.
 
 	* Fixing the histograms entries in the documentation to be
 	tolerant to plotinfo loading failures.
 
 	* Making the findDatafile() function primarily find YODA data
 	files, then fall back to AIDA. The ref data loader will use the
 	appropriate YODA format reader.
 
 2013-02-05  David Grellscheid  <David.Grellscheid@durham.ac.uk>
 
 	* include/Rivet/Math/MathUtils.hh: added BWspace bin edge method
 	to give equal-area Breit-Wigner bins
 
 2013-02-01  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding an element to the PhiMapping enum and a new mapAngle(angle, mapping) function.
 
 	* Fixes to Vector3::azimuthalAngle and Vector3::polarAngle calculation (using the mapAngle functions).
 
 2013-01-25  Frank Siegert  <frank.siegert@cern.ch>
 
 	* Split MC_*JETS analyses into three separate bits:
 	MC_*INC (inclusive properties)
 	MC_*JETS (jet properties)
 	MC_*KTSPLITTINGS (kT splitting scales).
 
 2013-01-22  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Fix TeX variable in the rivetenv scripts, especially for csh
 
 2012-12-21  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Version 1.8.2 release!
 
 2012-12-20  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding ATLAS_2012_I1119557 analysis (from Roman Lysak and Lily Asquith).
 
 2012-12-18  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding TOTEM_2012_002 analysis, from Sercan Sen.
 
 2012-12-18  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Added CMS_2011_I954992 analysis
 
 2012-12-17  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Added CMS_2012_I1193338 analysis
 
 	* Fixed xi cut in ATLAS_2011_I894867
 
 2012-12-17  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding analysis descriptions to the HTML analysis page ToC.
 
 2012-12-14  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Added CMS_2012_PAS_FWD_11_003 analysis
 
 	* Added LHCB_2012_I1119400 analysis
 
 2012-12-12  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Correction to jet acceptance in CMS_2011_S9120041, from Sercan Sen: thanks!
 
 2012-12-12  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Added CMS_2012_PAS_QCD_11_010 analysis
 
 2012-12-07  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Version number bump to 1.8.2 -- release approaching.
 
 	* Rewrite of ALICE_2012_I1181770 analysis to make it a bit more sane and acceptable.
 
 	* Adding a note on FourVector and FourMomentum that operator- and
 	operator-= invert both the space and time components: use of -=
 	can result in a vector with negative energy.
 
 	* Adding particlesByRapidity and particlesByAbsRapidity to FinalState.
 
 2012-12-07  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Added ALICE_2012_I1181770 analysis
 
 	* Bump version to 1.8.2
 
 2012-12-06  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Added ATLAS_2012_I1188891 analysis
 
 	* Added ATLAS_2012_I1118269 analysis
 
 	* Added CMS_2012_I1184941 analysis
 
 	* Added LHCB_2010_I867355 analysis
 
 	* Added TGraphErrors support to root2flat
 
 2012-11-27  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Converting CMS_2012_I1102908 analysis to use YODA.
 
 	* Adding XLabel and YLabel setting in histo/profile/scatter booking.
 
 2012-11-27  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Fix make-plots png creation for SL5
 
 2012-11-23  Peter Richardson <peter.richardson@durham.ac.uk>
 
 	* Added ATLAS_2012_CONF_2012_153 4-lepton SUSY search
 
 2012-11-17  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding MC_PHOTONS by Steve Lloyd and AB, for testing general
 	unisolated photon properties, especially those associated with
 	charged leptons (e and mu).
 
 2012-11-16  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding MC_PRINTEVENT, a convenient (but verbose!) analysis for
 	printing out event details to stdout.
 
 2012-11-15  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Removing the long-unused/defunct autopackage system.
 
 2012-11-15  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Added LHCF_2012_I1115479 analysis
 	* Added ATLAS_2011_I894867 analysis
 
 2012-11-14  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Added CMS_2012_I1102908 analysis
 
 2012-11-14  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Converting the argument order of logspace, clarifying the
 	arguments, updating affected code, and removing Analysis::logBinEdges.
 
 	* Merging updates from the AIDA maintenance branch up to r4002
 	(latest revision for next merges is r4009).
 
 2012-11-11  Andy Buckley  <andy.buckley@cern.ch>
 
 	* include/Math/: Various numerical fixes to Vector3::angle and
 	changing the 4 vector mass treatment to permit spacelike
 	virtualities (in some cases even the fuzzy isZero assert check was
 	being violated). The angle check allows a clean-up of some
 	workaround code in MC_VH2BB.
 
 2012-10-15  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Added CMS_2012_I1107658 analysis
 
 2012-10-11  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Added CDF_2012_NOTE10874 analysis
 
 2012-10-04  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Added ATLAS_2012_I1183818 analysis
 
 2012-07-17  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Cleanup and multiple fixes in CMS_2011_S9120041
 
 	* Bugfixed in ALEPH_2004_S5765862 and ATLAS_2010_CONF_2010_049
 	(thanks to Anil Pratap)
 
 2012-08-09  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Fixing aida2root command-line help message and converting to TH*
 	rather than TGraph by default.
 
 2012-07-24  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Improvements/migrations to rivet-mkhtml, rivet-mkanalysis, and
 	rivet-buildplugin.
 
 2012-07-17  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Add CMS_2012_I1087342
 
 2012-07-12  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Fix rivet-mkanalysis a bit for YODA compatibility.
 
 2012-07-05  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Version 1.8.1!
 
 2012-07-05  Holger Schulz  <holger.schulz@physik.hu-berlin.de>
 
 	* Add ATLAS_2011_I945498
 
 2012-07-03  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Bugfix for transverse mass (thanks to Gavin Hesketh)
 
 2012-06-29  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Merge YODA branch into trunk. YODA is alive!!!!!!
 
 2012-06-26  Holger Schulz  <holger.schulz@physik.hu-berlin.de>
 
 	* Add ATLAS_2012_I1091481
 
 2012-06-20  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Added D0_2011_I895662: 3-jet mass
 
 2012-04-24  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* fixed a few bugs in rivet-rmgaps
 
 	* Added new TOTEM dN/deta analysis
 
 2012-03-19  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Version 1.8.0!
 
 	* src/Projections/UnstableFinalState.cc: Fix compiler warning.
 
 	* Version bump for testing: 1.8.0beta1.
 
 	* src/Core/AnalysisInfo.cc: Add printout of YAML parser exception error messages to aid debugging.
 
 	* bin/Makefile.am: Attempt to fix rivet-nopy build on SLC5.
 
 	* src/Analyses/LHCB_2010_S8758301.cc: Add two missing entries to the PDGID -> lifetime map.
 
 	* src/Projections/UnstableFinalState.cc: Extend list of vetoed particles to include reggeons.
 
 2012-03-16  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Version change to 1.8.0beta0 -- nearly ready for long-awaited release!
 
 	* pyext/setup.py.in: Adding handling for the YAML library: fix for
 	Genser build from Anton Karneyeu.
 
 	* src/Analyses/LHCB_2011_I917009.cc: Hiding lifetime-lookup error
 	message if the offending particle is not a hadron.
 
 	* include/Rivet/Math/MathHeader.hh: Using unnamespaced std::isnan
 	and std::isinf as standard.
 
 2012-03-16  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Improve default plot behaviour for 2D histograms
 
 2012-03-15  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Make ATLAS_2012_I1084540 less verbose, and general code
 	cleanup of that analysis.
 
 	* New-style plugin hook in ATLAS_2011_I926145,
 	ATLAS_2011_I944826 and ATLAS_2012_I1084540
 
 	* Fix compiler warnings in ATLAS_2011_I944826 and CMS_2011_S8973270
 
 	* CMS_2011_S8941262: Weights are double, not int.
 
 	* disable inRange() tests in test/testMath.cc until we have a proper
 	fix for the compiler warnings we see on SL5.
 
 2012-03-07  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Marking ATLAS_2011_I919017 as VALIDATED (this should have
 	happened a long time ago) and adding more references.
 
 2012-02-28  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* lighthisto.py: Caching for re.compile(). This speeds up aida2flat
 	and flat2aida by more than an order of magnitude.
 
 2012-02-27  Andy Buckley  <andy.buckley@cern.ch>
 
 	* doc/mk-analysis-html: Adding more LaTeX/text -> HTML conversion
 	replacements, including better <,> handling.
 
 2012-02-26  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding CMS_2011_S8973270, CMS_2011_S8941262, CMS_2011_S9215166,
 	CMS_QCD_10_024, from CMS.
 
 	* Adding LHCB_2011_I917009 analysis, from Alex Grecu.
 
 	* src/Core/Analysis.cc, include/Rivet/Analysis.hh: Add a numeric-arg version of histoPath().
 
 2012-02-24  Holger Schulz  <holger.schulz@physik.hu-berlin.de>
 
 	* Adding ATLAS Ks/Lambda analysis.
 
 2012-02-20  Andy Buckley  <andy.buckley@cern.ch>
 
 	* src/Analyses/ATLAS_2011_I925932.cc: Using new overflow-aware
 	normalize() in place of counters and scale(..., 1/count)
 
 2012-02-14  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Splitting MC_GENERIC to put the PDF and PID plotting into
 	MC_PDFS and MC_IDENTIFIED respectively.
 
 	* Renaming MC_LEADINGJETS to MC_LEADJETUE.
 
 2012-02-14  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* DELPHI_1996_S3430090 and ALEPH_1996_S3486095:
 	fix rapidity vs {Thrust,Sphericity}-axis.
 
 2012-02-14  Andy Buckley  <andy.buckley@cern.ch>
 
 	* bin/compare-histos: Don't attempt to remove bins from MC histos
 	where they aren't found in the ref file, if the ref file is not
 	expt data, or if the new --no-rmgapbins arg is given.
 
 	* bin/rivet: Remove the conversion of requested analysis names to
 	upper-case: mixed-case analysis names will now work.
 
 2012-02-14  Frank Siegert  <frank.siegert@cern.ch>
 
 	* Bugfixes and improvements for MC_TTBAR:
 	  - Avoid assert failure with logspace starting at 0.0
 	  - Ignore charged lepton in jet finding (otherwise jet multi is always
 	    +1).
 	  - Add some dR/deta/dphi distributions as noted in TODO
 	  - Change pT plots to logspace as well (to avoid low-stat high pT bins)
 
 2012-02-10  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* rivet-mkhtml -c option now has the semantics of a .plot
 	file. The contents are appended to the dat output by
 	compare-histos.
 
 2012-02-09  David Grellscheid <david.grellscheid@durham.ac.uk>
 
 	* Fixed broken UnstableFS behaviour
 
 2012-01-25 Frank Siegert  <frank.siegert@cern.ch>
 
 	* Improvements in make-plots:
 	  - Add PlotTickLabels and RatioPlotTickLabels options (cf.
 	    make-plots.txt)
 	  - Make ErrorBars and ErrorBands non-exclusive (and change
 	    their order, such that Bars are on top of Bands)
 
 2012-01-25 Holger Schulz  <holger.schulz@physik.hu-berlin.de>
 
 	* Add ATLAS diffractive gap analysis
 
 2012-01-23  Andy Buckley  <andy.buckley@cern.ch>
 
 	* bin/rivet: When using --list-analyses, the analysis summary is
 	now printed out when log level is <= INFO, rather than < INFO.
 	The effect on command line behaviour is that useful identifying
 	info is now printed by default when using --list-analyses, rather
 	than requiring --list-analyses -v. To get the old behaviour,
 	e.g. if using the output of rivet --list-analyses for scripting,
 	now use --list-analyses -q.
 
 2012-01-22  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Tidying lighthisto, including fixing the order in which +- error
 	values are passed to the Bin constructor in fromFlatHisto.
 
 2012-01-16 Frank Siegert  <frank.siegert@cern.ch>
 
 	* Bugfix in ATLAS_2012_I1083318: Include non-signal neutrinos in
 	jet clustering.
 
 	* Add first version of ATLAS_2012_I1083318 (W+jets). Still
 	UNVALIDATED until final happiness with validation plots arises and
 	data is in Hepdata.
 
 	* Bugfix in ATLAS_2010_S8919674: Really use neutrino with highest
 	pT for Etmiss. Doesn't seem to make very much difference, but is
 	more correct in principle.
 
 2012-01-16 Peter Richardson  <peter.richardson@durham.ac.uk>
 
 	* Fixes to ATLAS_20111_S9225137 to include reference data
 
 2012-01-13 Holger Schulz  <holger.schulz@physik.hu-berlin.de>
 
 	* Add ATLAS inclusive lepton analysis
 
 2012-01-12 Hendrik Hoeth  <hendrik.hoeth@durham.ac.uk>
 
 	* Font selection support in rivet-mkhtml
 
 2012-01-11 Peter Richardson  <peter.richardson@durham.ac.uk>
 
 	* Added pi0 to list of particles.
 
 2012-01-11  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Removing references to Boost random numbers.
 
 2011-12-30  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding a placeholder rivet-which script (not currently
 	installed).
 
 	* Tweaking to avoid a very time-consuming debug printout in
 	compare-histos with the -v flag, and modifying the Rivet env vars
 	in rivet-mkhtml before calling compare-histos to eliminate
 	problems induced by relative paths (i.e. "." does not mean the
 	same thing when the directory is changed within the script).
 
 2011-12-12  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding a command line completion function for rivet-mkhtml.
 
 2011-12-12  Frank Siegert  <frank.siegert@cern.ch>
 
 	* Fix for factor of 2.0 in normalisation of CMS_2011_S9086218
 
 	* Add --ignore-missing option to rivet-mkhtml to ignore non-existing
 	AIDA files.
 
 2011-12-06  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Include underflow and overflow bins in the normalisation when
 	calling Analysis::normalise(h)
 
 2011-11-23  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Bumping version to 1.8.0alpha0 since the Jet interface changes
 	are quite a major break with backward compatibility (although the
 	vast majority of analyses should be unaffected).
 
 	* Removing crufty legacy stuff from the Jet class -- there is
 	never any ambiguity between whether Particle or FourMomentum
 	objects are the constituents now, and the jet 4-momentum is set
 	explicitly by the jet alg so that e.g. there is no mismatch if the
 	FastJet pt recombination scheme is used.
 
 	* Adding default do-nothing implementations of Analysis::init()
 	and Analysis::finalize(), since it is possible for analysis
 	implementations to not need to do anything in these methods, and
 	forcing analysis authors to write do-nothing boilerplate code is
 	not "the Rivet way"!
 
 2011-11-19  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding variant constructors to FastJets with a more natural
 	Plugin* argument, and decrufting the constructor implementations a
 	bit.
 
 	* bin/rivet: Adding a more helpful error message if the rivet
 	module can't be loaded, grouping the option parser options,
 	removing the -A option (this just doesn't seem useful anymore),
 	and providing a --pwd option as a shortcut to append "." to the
 	search path.
 
 2011-11-18  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding a guide to compiling a new analysis template to the
 	output message of rivet-mkanalysis.
 
 2011-11-11  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Version 1.7.0 release!
 
 	* Protecting the OPAL 2004 analysis against NaNs in the
 	hemispheres projection -- I can't track the origin of these and
 	suspect some occasional memory corruption.
 
 2011-11-09  Andy Buckley  <andy@insectnation.org>
 
 	* Renaming source files for EXAMPLE and
 	PDG_HADRON_MULTIPLICITIES(_RATIOS) analyses to match the analysis
 	names.
 
 	* Cosmetic fixes in ATLAS_2011_S9212183 SUSY analysis.
 
 	* Adding new ATLAS W pT analysis from Elena Yatsenko (slightly adapted).
 
 2011-10-20  Frank Siegert  <frank.siegert@cern.ch>
 
 	* Extend API of W/ZFinder to allow for specification of input final
 	state in which to search for leptons/photons.
 
 2011-10-19  Andy Buckley  <andy@insectnation.org>
 
 	* Adding new version of LHCB_2010_S8758301, based on submission
 	from Alex Grecu. There is some slightly dodgy-looking GenParticle*
 	fiddling going on, but apparently it's necessary (and hopefully robust).
 
 2011-10-17  Andy Buckley  <andy@insectnation.org>
 
 	* bin/rivet-nopy linker line tweak to make compilation work with
 	GCC 4.6 (-lHepMC has to be explicitly added for some reason).
 
 2011-10-13  Frank Siegert  <frank.siegert@cern.ch>
 
 	* Add four CMS QCD analyses kindly provided by CMS.
 
 2011-10-12  Andy Buckley  <andy@insectnation.org>
 
 	* Adding a separate test program for non-matrix/vector math
 	functions, and adding a new set of int/float literal arg tests for
 	the inRange functions in it.
 
 	* Adding a jet multiplicity plot for jets with pT > 30 GeV to
 	MC_TTBAR.
 
 2011-10-11  Andy Buckley  <andy@insectnation.org>
 
 	* Removing SVertex.
 
 2011-10-11  James Monk    <jmonk@cern.ch>
 
 	* root2flat was missing the first bin (plus spurious last bin)
 
 	* My version of bash does not understand the pipe syntax |& in rivet-buildplugin
 
 2011-09-30  James Monk    <jmonk@cern.ch>
 
 	* Fix bug in ATLAS_2010_S8817804 that misidentified the akt4 jets
 	as akt6
 
 2011-09-29  Andy Buckley  <andy@insectnation.org>
 
 	* Converting FinalStateHCM to a slightly more general
 	DISFinalState.
 
 2011-09-26  Andy Buckley  <andy@insectnation.org>
 
 	* Adding a default libname argument to rivet-buildplugin. If the
 	first argument doesn't have a .so library suffix, then use
 	RivetAnalysis.so as the default.
 
 2011-09-19  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* make-plots: Fixing regex for \physicscoor. Adding "FrameColor"
 	option.
 
 2011-09-17  Andy Buckley  <andy@insectnation.org>
 
 	* Improving interactive metadata printout, by not printing
 	headings for missing info.
 
 	* Bumping the release number to 1.7.0alpha0, since with these
 	SPIRES/Inspire changes and the MissingMomentum API change we need
 	more than a minor release.
 
 	* Updating the mkanalysis, BibTeX-grabbing and other places that
 	care about analysis SPIRES IDs to also be able to handle the new
 	Inspire system record IDs. The missing link is getting to HepData
 	from an Inspire code...
 
 	* Using the .info file rather than an in-code declaration to
 	specify that an analysis needs cross-section information.
 
 	* Adding Inspire support to the AnalysisInfo and Analysis
 	interfaces. Maybe we can find a way to combine the two,
 	e.g. return the SPIRES code prefixed with an "S" if no Inspire ID
 	is available...
 
 2011-09-17  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Added ALICE_2011_S8909580 (strange particle production at 900 GeV)
 
 	* Feed-down correction in ALICE_2011_S8945144
 
 2011-09-16  Andy Buckley  <andy@insectnation.org>
 
 	* Adding ATLAS track jet analysis, modified from the version
 	provided by Seth Zenz: ATLAS_2011_I919017. Note that this analysis
 	is currently using the Inspire ID rather than the Spires one:
 	we're clearly going to have to update the API to handle Inspire
 	codes, so might as well start now...
 
 2011-09-14  Andy Buckley  <andy@insectnation.org>
 
 	* Adding the ATLAS Z pT measurement at 7 TeV (ATLAS_2011_S9131140)
 	and an MC analysis for VH->bb events (MC_VH2BB).
 
 2011-09-12  Andy Buckley  <andy@insectnation.org>
 
 	* Removing uses of getLog, cout, cerr, and endl from all standard
 	analyses and projections, except in a very few special cases.
 
 2011-09-10  Andy Buckley  <andy@insectnation.org>
 
 	* Changing the behaviour and interface of the MissingMomentum
 	projection to calculate vector ET correctly. This was previously
 	calculated according to the common definition of -E*sin(theta) of
 	the summed visible 4-momentum in the event, but that is incorrect
 	because the timelike term grows monotonically. Instead, transverse
 	2-vectors of size ET need to be constructed for each visible
 	particle, and vector-summed in the transverse plane.
 
 	The rewrite of this behaviour made it opportune to make an API
 	improvement: the previous method names scalarET/vectorET() have
 	been renamed to scalar/vectorEt() to better match the Rivet
 	FourMomentum::Et() method, and MissingMomentum::vectorEt() now
 	returns a Vector3 rather than a double so that the transverse
 	missing Et direction is also available.
 
 	Only one data analysis has been affected by this change in
 	behaviour: the D0_2004_S5992206 dijet delta(phi) analysis. It's
 	expected that this change will not be very significant, as it is
 	a *veto* on significant missing ET to reduce non-QCD
 	contributions. MC studies using this analysis ~always run with QCD
 	events only, so these contributions should be small. The analysis
 	efficiency may have been greatly improved, as fewer events will
 	now fail the missing ET veto cut.
 
 	* Add sorting of the ParticleVector returned by the ChargedLeptons
 	projection.
 
 	* configure.ac: Adding a check to make sure that no-one tries to
 	install into --prefix=$PWD.
 
 2011-09-04  Andy Buckley  <andy@insectnation.org>
 
 	* lighthisto fixes from Christian Roehr.
 
 2011-08-26  Andy Buckley  <andy@insectnation.org>
 
 	* Removing deprecated features: the setBeams(...) method on
 	Analysis, the MaxRapidity constant, the split(...) function, the
 	default init() method from AnalysisHandler and its test, and the
 	deprecated TotalVisibleMomentum and PVertex projections.
 
 2011-08-23  Andy Buckley  <andy@insectnation.org>
 
 	* Adding a new DECLARE_RIVET_PLUGIN wrapper macro to hide the
 	details of the plugin hook system from analysis authors. Migration
 	of all analyses and the rivet-mkanalysis script to use this as the
 	standard plugin hook syntax.
 
 	* Also call the --cflags option on root-config when using the
 	--root option with rivet-biuldplugin (thanks to Richard Corke for
 	the report)
 
 2011-08-23  Frank Siegert <frank.siegert@cern.ch>
 
 	* Added ATLAS_2011_S9126244
 	* Added ATLAS_2011_S9128077
 
 2011-08-23  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Added ALICE_2011_S8945144
 
 	* Remove obsolete setBeams() from the analyses
 
 	* Update CMS_2011_S8957746 reference data to the official numbers
 
 	* Use Inspire rather than Spires.
 
 2011-08-19  Frank Siegert <frank.siegert@cern.ch>
 
 	* More NLO parton level generator friendliness: Don't crash or fail when
 	there are no beam particles.
 
 	* Add --ignore-beams option to skip compatibility check.
 
 2011-08-09  David Mallows <dave.mallows@gmail.com>
 
 	* Fix aida2flat to ignore empty dataPointSet
 
 2011-08-07  Andy Buckley  <andy@insectnation.org>
 
 	* Adding TEXINPUTS and LATEXINPUTS prepend definitions to the
 	variables provided by rivetenv.(c)sh. A manual setting of these
 	variables that didn't include the Rivet TEXMFHOME path was
 	breaking make-plots on lxplus, presumably since the system LaTeX
 	packages are so old there.
 
 2011-08-02  Frank Siegert  <frank.siegert@cern.ch>
 
 	Version 1.6.0 release!
 
 2011-08-01  Frank Siegert  <frank.siegert@cern.ch>
 
 	* Overhaul of the WFinder and ZFinder projections, including a change
 	of interface. This solves potential problems with leptons which are not
 	W/Z constituents being excluded from the RemainingFinalState.
 
 2011-07-29  Andy Buckley  <andy@insectnation.org>
 
 	* Version 1.5.2 release!
 
 	* New version of aida2root from James Monk.
 
 2011-07-29  Frank Siegert  <frank.siegert@cern.ch>
 
 	* Fix implementation of --config file option in make-plots.
 
 2011-07-27  David Mallows <dave.mallows@gmail.com>
 
 	* Updated MC_TTBAR.plot to reflect updated analysis.
 
 2011-07-25  Andy Buckley  <andy@insectnation.org>
 
 	* Adding a useTransverseMass flag method and implementation to
 	InvMassFinalState, and using it in the WFinder, after feedback
 	from Gavin Hesketh. This was the neatest way I could do it :S Some
 	other tidying up happened along the way.
 
 	* Adding transverse mass massT and massT2 methods and functions
 	for FourMomentum.
 
 2011-07-22  Frank Siegert  <frank.siegert@cern.ch>
 
 	* Added ATLAS_2011_S9120807
 
 	* Add two more observables to MC_DIPHOTON and make its isolation cut
 	more LHC-like
 
 	* Add linear photon pT histo to MC_PHOTONJETS
 
 2011-07-20  Andy Buckley  <andy@insectnation.org>
 
 	* Making MC_TTBAR work with semileptonic ttbar events and generally
 	tidying the code.
 
 2011-07-19  Andy Buckley  <andy@insectnation.org>
 
 	* Version bump to 1.5.2.b01 in preparation for a release in the
 	very near future.
 
 2011-07-18  David Mallows <dave.mallows@gmail.com>
 
 	* Replaced MC_TTBAR: Added t,tbar reconstruction. Not yet working.
 
 2011-07-18  Andy Buckley  <andy@insectnation.org>
 
 	* bin/rivet-buildplugin.in: Pass the AM_CXXFLAGS
 	variable (including the warning flags) to the C++ compiler when
 	building user analysis plugins.
 
 	* include/LWH/DataPointSet.h: Fix accidental setting of errorMinus
 	= scalefactor * error_Plus_. Thanks to Anton Karneyeu for the bug
 	report!
 
 2011-07-18  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Added CMS_2011_S8884919 (charged hadron multiplicity in NSD
 	events corrected to pT>=0).
 
 	* Added CMS_2010_S8656010 and CMS_2010_S8547297 (charged
 	hadron pT and eta in NSD events)
 
 	* Added CMS_2011_S8968497 (chi_dijet)
 
 	* Added CMS_2011_S8978280 (strangeness)
 
 2011-07-13  Andy Buckley  <andy@insectnation.org>
 
 	* Rivet PDF manual updates, to not spread disinformation about
 	bootstrapping a Genser repo.
 
 2011-07-12  Andy Buckley  <andy@insectnation.org>
 
 	* bin/make-plots: Protect property reading against unstripped \r
 	characters from DOS newlines.
 
 	* bin/rivet-mkhtml: Add a -M unmatch regex flag (note that these
 	are matching the analysis path rather than individual histos on
 	this script), and speed up the initial analysis identification and
 	selection by avoiding loops of regex comparisons for repeats of
 	strings which have already been analysed.
 
 	* bin/compare-histos: remove the completely (?) unused histogram
 	list, and add -m and -M regex flags, as for aida2flat and
 	flat2aida.
 
 2011-06-30  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* fix fromFlat() in lighthistos: It would ignore histogram paths
 	before.
 
 	* flat2aida: preserve histogram order from .dat files
 
 2011-06-27  Andy Buckley  <andy@insectnation.org>
 
 	* pyext/setup.py.in: Use CXXFLAGS and LDFLAGS safely in the Python
 	extension build, and improve the use of build/src directory
 	arguments.
 
 2011-06-23  Andy Buckley  <andy@insectnation.org>
 
 	* Adding a tentative rivet-updateanalyses script, based on
 	lhapdf-getdata, which will download new analyses as requested. We
 	could change our analysis-providing behaviour a bit to allow this
 	sort of delivery mechanism to be used as the normal way of getting
 	analysis updates without us having to make a whole new Rivet
 	release. It is nice to be able to identify analyses with releases,
 	though, for tracking whether bugs have been addressed.
 
 2011-06-10  Frank Siegert  <frank.siegert@cern.ch>
 
 	* Bugfixes in WFinder.
 
 2011-06-10  Andy Buckley  <andy@insectnation.org>
 
 	* Adding \physicsxcoor and \physicsycoor treatment to make-plots.
 
 2011-06-06  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Allow for negative cross-sections. NLO tools need this.
 
 	* make-plots: For RatioPlotMode=deviation also consider the MC
 	uncertainties, not just data.
 
 2011-06-04  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Add support for goodness-of-fit calculations to make-plots.
 	The results are shown in the legend, and one histogram can
 	be selected to determine the color of the plot margin. See
 	the documentation for more details.
 
 2011-06-04  Andy Buckley  <andy@insectnation.org>
 
 	* Adding auto conversion of Histogram2D to DataPointSets in the
 	AnalysisHandler _normalizeTree method.
 
 2011-06-03  Andy Buckley  <andy@insectnation.org>
 
 	* Adding a file-weight feature to the Run object, which will
 	optionally rescale the weights in the provided HepMC files. This
 	should be useful for e.g. running on multiple differently-weighted
 	AlpGen HepMC files/streams. The new functionality is used by the
 	rivet command via an optional weight appended to the filename with
 	a colon delimiter, e.g. "rivet fifo1.hepmc fifo2.hepmc:2.31"
 
 2011-06-01  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Add BeamThrust projection
 
 2011-05-31  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Fix LIBS for fastjet-3.0
 
 	* Add basic infrastructure for Taylor plots in make-plots
 
 	* Fix OPAL_2004_S6132243: They are using charged+neutral.
 
 	* Release 1.5.1
 
 2011-05-22  Andy Buckley  <andy@insectnation.org>
 
 	* Adding plots of stable and decayed PID multiplicities to
 	MC_GENERIC (useful for sanity-checking generator setups).
 
 	* Removing actually-unused ProjectionApplier.fhh forward
 	declaration header.
 
 2011-05-20  Andy Buckley  <andy@insectnation.org>
 
 	* Removing import of ipython shell from rivet-rescale, having just
 	seen it throw a multi-coloured warning message on a student's
 	lxplus Rivet session!
 
 	* Adding support for the compare-histos --no-ratio flag when using
 	rivet-mkhtml. Adding --rel-ratio, --linear, etc. is an exercise
 	for the enthusiast ;-)
 
 2011-05-10  Andy Buckley  <andy@insectnation.org>
 
 	* Internal minor changes to the ProjectionHandler and
 	ProjectionApplier interfaces, in particular changing the
 	ProjectionHandler::create() function to be called getInstance and
 	to return a reference rather than a pointer. The reference change
 	is to make way for an improved singleton implementation, which
 	cannot yet be used due to a bug in projection memory
 	management. The code of the improved singleton is available, but
 	commented out, in ProjectionManager.hh to allow for easier
 	migration and to avoid branching.
 
 2011-05-08  Andy Buckley  <andy@insectnation.org>
 
 	* Extending flat2aida to be able to read from and write to
 	stdin/out as for aida2flat, and also eliminating the internal
 	histo parsing representation in favour of the one in
 	lighthisto. lighthisto's fromFlat also needed a bit of an
 	overhaul: it has been extended to parse each histo's chunk of
 	text (including BEGIN and END lines) in fromFlatHisto, and for
 	fromFlat to parse a collection of histos from a file, in keeping
 	with the behaviour of fromDPS/fromAIDA. Merging into Professor is
 	now needed.
 
 	* Extending aida2flat to have a better usage message, to accept
 	input from stdin for command chaining via pipes, and to be a bit
 	more sensibly internally structured (although it also now has to
 	hold all histos in memory before writing out -- that shouldn't be
 	a problem for anything other than truly huge histo files).
 
 2011-05-04  Andy Buckley  <andy@insectnation.org>
 
 	* compare-histos: If using --mc-errs style, prefer dotted and
 	dashdotted line styles to dashed, since dashes are often too long
 	to be distinguishable from solid lines. Even better might be to
 	always use a solid line for MC errs style, and to add more colours.
 
 	* rivet-mkhtml: use a no-mc-errors drawing style by default,
 	to match the behaviour of compare-histos, which it calls. The
 	--no-mc-errs option has been replaced with an --mc-errs option.
 
 2011-05-04  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Ignore duplicate files in compare-histos.
 
 2011-04-25  Andy Buckley  <andy@insectnation.org>
 
 	* Adding some hadron-specific N and sumET vs. |eta| plots to MC_GENERIC.
 
 	* Re-adding an explicit attempt to get the beam particles, since
 	HepMC's IO_HERWIG seems to not always set them even though it's
 	meant to.
 
 2011-04-19  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Added ATLAS_2011_S9002537 W asymmetry analysis
 
 2011-04-14  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* deltaR, deltaPhi, deltaEta now available in all combinations of
 	FourVector, FourMomentum, Vector3, doubles. They also accept jets
 	and particles as arguments now.
 
 2011-04-13  David Grellscheid <david.grellscheid@durham.ac.uk>
 
 	* added ATLAS 8983313: 0-lepton BSM
 
 2011-04-01  Andy Buckley  <andy@insectnation.org>
 
 	* bin/rivet-mkanalysis: Don't try to download SPIRES or HepData
 	info if it's not a standard analysis (i.e. if the SPIRES ID is not
 	known), and make the default .info file validly parseable by YAML,
 	which was an unfortunate gotcha for anyone writing a first
 	analysis.
 
 2011-03-31  Andy Buckley  <andy@insectnation.org>
 
 	* bin/compare-histos: Write more appropriate ratio plot labels
 	when not comparing to data, and use the default make-plots labels
 	when comparing to data.
 
 	* bin/rivet-mkhtml: Adding a timestamp to the generated pages, and
 	a -t/--title option to allow setting the main HTML page title on
 	the command line: otherwise it becomes impossible to tell these
 	pages apart when you have a lot of them, except by URL!
 
 2011-03-24  Andy Buckley  <andy@insectnation.org>
 
 	* bin/aida2flat: Adding a -M option to *exclude* histograms whose
 	paths match a regex. Writing a negative lookahead regex with
 	positive matching was far too awkward!
 
 2011-03-18  Leif Lonnblad  <Leif.Lonnblad@thep.lu.se>
 
 	* src/Core/AnalysisHandler.cc (AnalysisHandler::removeAnalysis):
 	Fixed strange shared pointer assignment that caused seg-fault.
 
 2011-03-13  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* filling of functions works now in a more intuitive way (I hope).
 
 2011-03-09  Andy Buckley  <andy@insectnation.org>
 
 	* Version 1.5.0 release!
 
 2011-03-08  Andy Buckley  <andy@insectnation.org>
 
 	* Adding some extra checks for external packages in make-plots.
 
 2011-03-07  Andy Buckley  <andy@insectnation.org>
 
 	* Changing the accuracy of the beam energy checking to 1%, to make
 	the UI a bit more forgiving. It's still best to specify exactly the right
 	energy of course!
 
 2011-03-01  Andy Buckley  <andy@insectnation.org>
 
 	* Adding --no-plottitle to compare-histos (+ completion).
 
 	* Fixing segfaults in UA1_1990_S2044935 and UA5_1982_S875503.
 
 	* Bump ABI version numbers for 1.5.0 release.
 
 	* Use AnalysisInfo for storage of the NeedsCrossSection analysis flag.
 
 	* Allow field setting in AnalysisInfo.
 
 2011-02-27  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Support LineStyle=dashdotted in make-plots
 
 	* New command line option --style for compare-histos. Options are
 	"default", "bw" and "talk".
 
 	* cleaner uninstall
 
 2011-02-26  Andy Buckley  <andy@insectnation.org>
 
 	* Changing internal storage and return type of Particle::pdgId()
 	to PdgId, and adding Particle::energy().
 
 	* Renaming Analysis::energies() as Analysis::requiredEnergies().
 
 	* Adding beam energies into beam consistency checking:
 	Analysis::isCompatible methods now also require the beam energies
 	to be provided.
 
 	* Removing long-deprecated AnalysisHandler::init() constructor and
 	AnalysisHandler::removeIncompatibleAnalyses() methods.
 
 2011-02-25  Andy Buckley  <andy@insectnation.org>
 
 	* Adding --disable-obsolete, which takes its value from the value
 	of --disable-preliminary by default.
 
 	* Replacing RivetUnvalidated and RivetPreliminary plugin libraries
 	with optionally-configured analysis contents in the
 	experiment-specific plugin libraries. This avoids issues with
 	making libraries rebuild consistently when sources were reassigned
 	between libraries.
 
 2011-02-24  Andy Buckley  <andy@insectnation.org>
 
 	* Changing analysis plugin registration to fall back through
 	available paths rather than have RIVET_ANALYSIS_PATH totally
 	override the built-in paths. The first analysis hook of a given
 	name to be found is now the one that's used: any duplicates found
 	will be warned about but unused. getAnalysisLibPaths() now returns
 	*all* the search paths, in keeping with the new search behaviour.
 
 2011-02-22  Andy Buckley  <andy@insectnation.org>
 
 	* Moving the definition of the MSG_* macros into the Logging.hh
 	header. They can't be used everywhere, though, as they depend on
 	the existence of a this->getLog() method in the call scope. This
 	move makes them available in e.g. AnalysisHandler and other bits
 	of framework other than projections and analyses.
 
 	* Adding a gentle print-out from the Rivet AnalysisHandler if
 	preliminary analyses are being used, and strengthening the current
 	warning if unvalidated analyses are used.
 
 	* Adding documentation about the validation "process" and
 	the (un)validated and preliminary analysis statuses.
 
 	* Adding the new RivetPreliminary analysis library, and the
 	corresponding --disable-preliminary configure flag. Analyses in
 	this library are subject to change names, histograms, reference
 	data values, etc. between releases: make sure you check any
 	dependences on these analyses when upgrading Rivet.
 
 	* Change the Python script ref data search behaviours to use Rivet
 	ref data by default where available, rather than requiring a -R
 	option. Where relevant, -R is still a valid option, to avoid
 	breaking legacy scripts, and there is a new --no-rivet-refs option
 	to turn the default searching *off*.
 
 	* Add the prepending and appending optional arguments to the path
 	searching functions. This will make it easier to combine the
 	search functions with user-supplied paths in Python scripts.
 
 	* Make make-plots killable!
 
 	* Adding Rivet version to top of run printout.
 
 	* Adding Run::crossSection() and printing out the cross-section in
 	pb at the end of a Rivet run.
 
 2011-02-22  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Make lighthisto.py aware of 2D histograms
 
 	* Adding published versions of the CDF_2008 leading jets and DY
 	analyses, and marking the preliminary ones as "OBSOLETE".
 
 2011-02-21  Andy Buckley  <andy@insectnation.org>
 
 	* Adding PDF documentation for path searching and .info/.plot
 	files, and tidying overfull lines.
 
 	* Removing unneeded const declarations from various return by
 	value path and internal binning functions. Should not affect ABI
 	compatibility but will force recompilation of external packages
 	using the RivetPaths.hh and Utils.hh headers.
 
 	* Adding findAnalysis*File(fname) functions, to be used by Rivet
 	scripts and external programs to find files known to Rivet
 	according to Rivet's (newly standard) lookup rule.
 
 	* Changing search path function behaviour to always return *all*
 	search directories rather than overriding the built-in locations
 	if the environment variables are set.
 
 2011-02-20  Andy Buckley  <andy@insectnation.org>
 
 	* Adding the ATLAS 2011 transverse jet shapes analysis.
 
 2011-02-18  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Support for transparency in make-plots
 
 2011-02-18  Frank Siegert <frank.siegert@cern.ch>
 
 	* Added ATLAS prompt photon analysis ATLAS_2010_S8914702
 
 2011-02-10  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Simple NOOP constructor for Thrust projection
 
 	* Add CMS event shape analysis. Data read off the plots. We
 	will get the final numbers when the paper is accepted by
 	the journal.
 
 2011-02-10  Frank Siegert <frank.siegert@cern.ch>
 
 	* Add final version of ATLAS dijet azimuthal decorrelation
 
 2011-02-10  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* remove ATLAS conf note analyses for which we have final data
 	* reshuffle histograms in ATLAS minbias analysis to match Hepdata
 	* small pT-cut fix in ATLAS track based UE analysis
 
 2011-01-31  Andy Buckley  <andy@insectnation.org>
 
 	* Doc tweaks and adding cmp-by-|p| functions for Jets, to match
 	those added by Hendrik for Particles.
 
 	* Don't sum photons around muons in the D0 2010 Z pT analysis.
 
 2011-01-27  Andy Buckley  <andy@insectnation.org>
 
 	* Adding ATLAS 2010 min bias and underlying event analyses and data.
 
 2011-01-23  Andy Buckley  <andy@insectnation.org>
 
 	* Make make-plots write out PDF rather than PS by default.
 
 2011-01-12  Andy Buckley  <andy@insectnation.org>
 
 	* Fix several rendering and comparison bugs in rivet-mkhtml.
 
 	* Allow make-plots to write into an existing directory, at the
 	user's own risk.
 
 	* Make rivet-mkhtml produce PDF-based output rather than PS by
 	default (most people want PDF these days). Can we do the same
 	change of default for make-plots?
 
 	* Add getAnalysisPlotPaths() function, and use it in compare-histos
 
 	* Use proper .info file search path function internally in AnalysisInfo::make.
 
 2011-01-11  Andy Buckley  <andy@insectnation.org>
 
 	* Clean up ATLAS dijet analysis.
 
 2010-12-30  Andy Buckley  <andy@insectnation.org>
 
 	* Adding a run timeout option, and small bug-fixes to the event
 	timeout handling, and making first event timeout work nicely with
 	the run timeout. Run timeout is intended to be used in conjunction
 	with timed batch token expiry, of the type that likes to make 0
 	byte AIDA files on LCG when Grid proxies time out.
 
 2010-12-21  Andy Buckley  <andy@insectnation.org>
 
 	* Fix the cuts in the CDF 1994 colour coherence analysis.
 
 2010-12-19  Andy Buckley  <andy@insectnation.org>
 
 	* Fixing CDF midpoint cone jet algorithm default construction to
 	have an overlap threshold of 0.5 rather than 0.75. This was
 	recommended by the FastJet manual, and noticed while adding the
 	ATLAS and CMS cones.
 
 	* Adding ATLAS and CMS old iterative cones as "official" FastJets
 	constructor options (they could always have been used by explicit
 	instantiation and attachment of a Fastjet plugin object).
 
 	* Removing defunct and unused ClosestJetShape projection.
 
 2010-12-16  Andy Buckley  <andy@insectnation.org>
 
 	* bin/compare-histos, pyext/lighthisto.py: Take ref paths from
 	rivet module API rather than getting the environment by hand.
 
 	* pyext/lighthisto.py: Only read .plot info from the first
 	matching file (speed-up compare-histos).
 
 2010-12-14  Andy Buckley  <andy@insectnation.org>
 
 	* Augmenting the physics vector functionality to make FourMomentum
 	support maths operators with the correct return type (FourMomentum
 	rather than FourVector).
 
 2010-12-11  Andy Buckley  <andy@insectnation.org>
 
 	* Adding a --event-timeout option to control the event timeout,
 	adding it to the completion script, and making sure that the init
 	time check is turned OFF once successful!
 
 	* Adding an 3600 second timeout for initialising an event file. If
 	it takes longer than (or anywhere close to) this long, chances are
 	that the event source is inactive for some reason (perhaps
 	accidentally unspecified and stdin is not active, or the event
 	generator has died at the other end of the pipe. The reason for
 	not making it something shorter is that e.g. Herwig++ or Sherpa
 	can have long initialisation times to set up the MPI handler or to
 	run the matrix element integration. An timeout after an hour is
 	still better than a batch job which runs for two days before you
 	realise that you forgot to generate any events!
 
 2010-12-10  Andy Buckley  <andy@insectnation.org>
 
 	* Fixing unbooked-histo segfault in UA1_1990_S2044935 at 63 GeV.
 
 2010-12-08  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Fixes in ATLAS_2010_CONF_083, declaring it validated
 
 	* Added ATLAS_2010_CONF_046, only two plots are implemented.
 	The paper will be out soon, and we don't need the other plots
 	right now. Data is read off the plots in the note.
 
 	* New option "SmoothLine" for HISTOGRAM sections in make-plots
 
 	* Changed CustomTicks to CustomMajorTicks and added CustomMinorTicks
 	in make-plots.
 
 2010-12-07  Andy Buckley  <andy@insectnation.org>
 
 	* Update the documentation to explain this latest bump to path
 	lookup behaviours.
 
 	* Various improvements to existing path lookups. In particular,
 	the analysis lib path locations are added to the info and ref
 	paths to avoid having to set three variables when you have all
 	three file types in the same personal plugin directory.
 
 	* Adding setAnalysisLibPaths and addAnalysisLibPath
 	functions. rivet --analysis-path{,-append} now use these and work
 	correctly. Hurrah!
 
 	* Add --show-analyses as an alias for --show-analysis, following a
 	comment at the ATLAS tutorial.
 
 2010-12-07  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Change LegendXPos behaviour in make-plots. Now the top left
 	corner of the legend is used as anchor point.
 
 2010-12-03  Andy Buckley  <andy@insectnation.org>
 
 	* 1.4.0 release.
 
 	* Add bin-skipping to compare-histos to avoid one use of
 	rivet-rmgaps (it's still needed for non-plotting post-processing
 	like Professor).
 
 2010-12-03  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Fix normalisation issues in UA5 and ALEPH analyses
 
 2010-11-27  Andy Buckley  <andy@insectnation.org>
 
 	* MathUtils.hh: Adding fuzzyGtrEquals and fuzzyLessEquals, and
 	tidying up the math utils collection a bit.
 
 	* CDF 1994 colour coherence analysis overhauled and
 	correction/norm factors fixed. Moved to VALIDATED status.
 
 	* Adding programmable completion for aida2flat and flat2aida.
 
 	* Improvements to programmable completion using the neat _filedir
 	completion shell function which I just discovered.
 
 2010-11-26  Andy Buckley  <andy@insectnation.org>
 
 	* Tweak to floating point inRange to use fuzzyEquals for CLOSED
 	interval equality comparisons.
 
 	* Some BibTeX generation improvements, and fixing the ATLAS dijet
 	BibTeX key.
 
 	* Resolution upgrade in PNG make-plots output.
 
 	* CDF_2005_S6217184.cc, CDF_2008_S7782535.cc: Updates to use the
 	new per-jet JetAlg interface (and some other fixes).
 
 	* JetAlg.cc: Changed the interface on request to return per-jet
 	rather than per-event jet shapes, with an extra jet index argument.
 
 	* MathUtils.hh: Adding index_between(...) function, which is handy
 	for working out which bin a value falls in, given a set of bin edges.
 
 2010-11-25  Andy Buckley  <andy@insectnation.org>
 
 	* Cmp.hh: Adding ASC/DESC (and ANTISORTED) as preferred
 	non-EQUIVALENT enum value synonyms over misleading
 	SORTED/UNSORTED.
 
 	* Change of rapidity scheme enum name to RapScheme
 
 	* Reworking JetShape a bit further: constructor args now avoid
 	inconsistencies (it was previously possible to define incompatible
 	range-ends and interval). Internal binning implementation also
 	reworked to use a vector of bin edges: the bin details are
 	available via the interface. The general jet pT cuts can be
 	applied via the JetShape constructor.
 
 	* MathUtils.hh: Adding linspace and logspace utility
 	functions. Useful for defining binnings.
 
 	* Adding more general cuts on jet pT and (pseudo)rapidity.
 
 2010-11-11  Andy Buckley  <andy@insectnation.org>
 
 	* Adding special handling of FourMomentum::mass() for computed
 	zero-mass vectors for which mass2 can go (very slightly) negative
 	due to numerical precision.
 
 2010-11-10  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Adding ATLAS-CONF-2010-083 conference note. Data is read from plots.
 	When I run Pythia 6 the bins close to pi/2 are higher than in the note,
 	so I call this "unvalidated". But then ... the note doesn't specify
 	a tune or even just a version for the generators in the plots. Not even
 	if they used Pythia 6 or Pythia 8. Probably 6, since they mention AGILe.
 
 2010-11-10  Andy Buckley  <andy@insectnation.org>
 
 	* Adding a JetAlg::useInvisibles(bool) mechanism to allow ATLAS
 	jet studies to include neutrinos. Anyone who chooses to use this
 	mechanism had better be careful to remove hard neutrinos manually
 	in the provided FinalState object.
 
 2010-11-09  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Adding ATLAS-CONF-2010-049 conference note. Data is read from plots.
 	Fragmentation functions look good, but I can't reproduce the MC lines
 	(or even the relative differences between them) in the jet cross-section
 	plots. So consider those unvalidated for now. Oh, and it seems ATLAS
 	screwed up the error bands in their ratio plots, too. They are
 	upside-down.
 
 2010-11-07  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Adding ATLAS-CONF-2010-081 conference note. Data is read from plots.
 
 2010-11-06  Andy Buckley  <andy@insectnation.org>
 
 	* Deprecating the old JetShape projection and renaming to
 	ClosestJetShape: the algorithm has a tenuous relationship with
 	that actually used in the CDF (and ATLAS) jet shape analyses. CDF
 	analyses to be migrated to the new JetShape projection... and some
 	of that projection's features, design elements, etc. to be
 	finished off: we may as well take this opportunity to clear up
 	what was one of our nastiest pieces of code.
 
 2010-11-05  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Adding ATLAS-CONF-2010-031 conference note. Data is read from plots.
 
 2010-10-29  Andy Buckley  <andy@insectnation.org>
 
 	* Making rivet-buildplugin use the same C++ compiler and CXXFLAGS
 	variable as used for the Rivet system build.
 
 	* Fixing NeutralFinalState projection to, erm, actually select
 	neutral particles (by Hendrik).
 
 	* Allow passing a general FinalState reference to the JetShape
 	projection, rather than requiring a VetoedFS.
 
 2010-10-07  Andy Buckley  <andy@insectnation.org>
 
 	* Adding a --with-root flag to rivet-buildplugin to add
 	root-config --libs flags to the plugin build command.
 
 2010-09-24  Andy Buckley  <andy@insectnation.org>
 
 	* Releasing as Rivet 1.3.0.
 
 	* Bundling underscore.sty to fix problems with running make-plots
 	on dat files generated by compare-histos from AIDA files with
 	underscores in their names.
 
 2010-09-16  Andy Buckley  <andy@insectnation.org>
 
 	* Fix error in N_effective definition for weighted profile errors.
 
 2010-08-18  Andy Buckley  <andy@insectnation.org>
 
 	* Adding MC_GENERIC analysis. NB. Frank Siegert also added MC_HJETS.
 
 2010-08-03  Andy Buckley  <andy@insectnation.org>
 
 	* Fixing compare-histos treatment of what is now a ref file, and
 	speeding things up... again. What a mess!
 
 2010-08-02  Andy Buckley  <andy@insectnation.org>
 
 	* Adding rivet-nopy: a super-simple Rivet C++ command line
 	interface which avoids Python to make profiling and debugging
 	easier.
 
 	* Adding graceful exception handling to the AnalysisHandler event
 	loop methods.
 
 	* Changing compare-histos behaviour to always show plots for which
 	there is at least one MC histo. The default behaviour should now
 	be the correct one in 99% of use cases.
 
 2010-07-30  Andy Buckley  <andy@insectnation.org>
 
 	* Merging in a fix for shared_ptrs not being compared for
 	insertion into a set based on raw pointer value.
 
 2010-07-16  Andy Buckley  <andy@insectnation.org>
 
 	* Adding an explicit library dependency declaration on libHepMC,
 	and hence removing the -lHepMC from the rivet-config --libs
 	output.
 
 2010-07-14  Andy Buckley  <andy@insectnation.org>
 
 	* Adding a manual section on use of Rivet (and AGILe) as
 	libraries, and how to use the -config scripts to aid compilation.
 
 	* FastJets projection now allows setting of a jet area definition,
 	plus a hacky mapping for getting the area-enabled cluster
 	sequence. Requested by Pavel Starovoitov & Paolo Francavilla.
 
 	* Lots of script updates in last two weeks!
 
 2010-06-30  Andy Buckley  <andy@insectnation.org>
 
 	* Minimising amount of Log class mapped into SWIG.
 
 	* Making Python ext build checks fail with error rather than
 	warning if it has been requested (or, rather, not explicitly
 	disabled).
 
 2010-06-28  Andy Buckley  <andy@insectnation.org>
 
 	* Converting rivet Python module to be a package, with the dlopen
 	flag setting etc. done around the SWIG generated core wrapper
 	module (rivet.rivetwrap).
 
 	* Requiring Python >= 2.4.0 in rivet scripts (and adding a Python
 	version checker function to rivet module)
 
 	* Adding --epspng option to make-plots (and converting to use subprocess.Popen).
 
 2010-06-27  Andy Buckley  <andy@insectnation.org>
 
 	* Converting JADE_OPAL analysis to use the fastjet
 	exclusive_ymerge_*max* function, rather than just
 	exclusive_ymerge: everything looks good now. It seems that fastjet
 	>= 2.4.2 is needed for this to work properly.
 
 2010-06-24  Andy Buckley  <andy@insectnation.org>
 
 	* Making rivet-buildplugin look in its own bin directory when
 	trying to find rivet-config.
 
 2010-06-23  Andy Buckley  <andy@insectnation.org>
 
 	* Adding protection and warning about numerical precision issues
 	in jet mass calculation/histogramming to the MC_JetAnalysis
 	analysis.
 
 	* Numerical precision improvement in calculation of
 	Vector4::mass2.
 
 	* Adding relative scale ratio plot flag to compare-histos
 
 	* Extended command completion to rivet-config, compare-histos, and
 	make-plots.
 
 	* Providing protected log messaging macros,
 	MSG_{TRACE,DEBUG,INFO,WARNING,ERROR} cf. Athena.
 
 	* Adding environment-aware functions for Rivet search path list access.
 
 2010-06-21  Andy Buckley  <andy@insectnation.org>
 
 	* Using .info file beam ID and energy info in HTML and LaTeX documentation.
 
 	* Using .info file beam ID and energy info in command-line printout.
 
 	* Fixing a couple of references to temporary variables in the
 	analysis beam info, which had been introduced during refactoring:
 	have reinstated reference-type returns as the more efficient
 	solution. This should not affect API compatibility.
 
 	* Making SWIG configure-time check include testing for
 	incompatibilities with the C++ compiler (re. the recurring _const_
 	char* literals issue).
 
 	* Various tweaks to scripts: make-plots and compare-histos
 	processes are now renamed (on Linux), rivet-config is avoided when
 	computing the Rivet version,and RIVET_REF_PATH is also set using
 	the rivet --analysis-path* flags. compare-histos now uses multiple
 	ref data paths for .aida file globbing.
 
 	* Hendrik changed VetoedFinalState comparison to always return
 	UNDEFINED if vetoing on the results of other FS projections is
 	being used. This is the only simple way to avoid problems
 	emanating from the remainingFinalState thing.
 
 2010-06-19  Andy Buckley  <andy@insectnation.org>
 
 	* Adding --analysis-path and --analysis-path-append command-line
 	flags to the rivet script, as a "persistent" way to set or extend
 	the RIVET_ANALYSIS_PATH variable.
 
 	* Changing -Q/-V script verbosity arguments to more standard
 	-q/-v, after Hendrik moaned about it ;)
 
 	* Small fix to TinyXML operator precendence: removes a warning,
 	and I think fixes a small bug.
 
 	* Adding plotinfo entries for new jet rapidity and jet mass plots
 	in MC_JetAnalysis derivatives.
 
 	* Moving MC_JetAnalysis base class into a new
 	libRivetAnalysisTools library, with analysis base class and helper
 	headers to be stored in the reinstated Rivet/Analyses include
 	directory.
 
 2010-06-08  Andy Buckley  <andy@insectnation.org>
 
 	* Removing check for CEDARSTD #define guard, since we no longer
 	compile against AGILe and don't have to be careful about
 	duplication.
 
 	* Moving crappy closest approach and decay significance functions
 	from Utils into SVertex, which is the only place they have ever
 	been used (and is itself almost entirely pointless).
 
 	* Overhauling particle ID <-> name system to clear up ambiguities
 	between enums, ints, particles and beams. There are no more enums,
 	although the names are still available as const static ints, and
 	names are now obtained via a singleton class which wraps an STL
 	map for name/ID lookups in both directions.
 
 2010-05-18  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Fixing factor-of-2 bug in the error calculation when scaling
 	histograms.
 
 	* Fixing D0_2001_S4674421 analysis.
 
 2010-05-11  Andy Buckley  <andy@insectnation.org>
 
 	* Replacing TotalVisibleMomentum with MissingMomentum in analyses
 	and WFinder. Using vector ET rather than scalar ET in some places.
 
 2010-05-07  Andy Buckley  <andy@insectnation.org>
 
 	* Revamping the AnalysisHandler constructor and data writing, with
 	some LWH/AIDA mangling to bypass the stupid AIDA idea of having to
 	specify the sole output file and format when making the data
 	tree. Preferred AnalysisHandler constructor now takes only one arg
 	-- the runname -- and there is a new AH.writeData(outfile) method
 	to replace AH.commitData(). Doing this now to begin migration to
 	more flexible histogramming in the long term.
 
 2010-04-21  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Fixing LaTeX problems (make-plots) on ancient machines, like lxplus.
 
 2010-04-29  Andy Buckley  <andy@insectnation.org>
 
 	* Fixing (I hope!) the treatment of weighted profile bin errors in LWH.
 
 2010-04-21  Andy Buckley  <andy@insectnation.org>
 
 	* Removing defunct and unused KtJets and Configuration classes.
 
 	* Hiding some internal details from Doxygen.
 
 	* Add @brief Doxygen comments to all analyses, projections and
 	core classes which were missing them.
 
 2010-04-21  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* remove obsolete reference to InitialQuarks from DELPHI_2002
 	* fix normalisation in CDF_2000_S4155203
 
 2010-04-20  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* bin/make-plots: real support for 2-dim histograms plotted as
 	colormaps, updated the documentation accordingly.
 	* fix misleading help comment in configure.ac
 
 2010-04-08  Andy Buckley  <andy@insectnation.org>
 
 	* bin/root2flat: Adding this little helper script, minimally
 	modified from one which Holger Schulz made for internal use in
 	ATLAS.
 
 2010-04-05  Andy Buckley  <andy@insectnation.org>
 
 	* Using spiresbib in rivet-mkanalysis: analysis templates made
 	with rivet-mkanalysis will now contain a SPIRES-dumped BibTeX key
 	and entry if possible!
 
 	* Adding BibKey and BibTeX entries to analysis metadata files, and
 	updating doc build to use them rather than the time-consuming
 	SPIRES screen-scraping. Added SPIRES BibTeX dumps to all analysis
 	metadata using new (uninstalled & unpackaged) doc/get-spires-data
 	script hack.
 
 	* Updating metadata files to add Energies, Beams and PtCuts
 	entries to all of them.
 
 	* Adding ToDo, NeedsCrossSection, and better treatment of Beams
 	and Energies entries in metadata files and in AnalysisInfo and
 	Analysis interfaces.
 
 2010-04-03  Andy Buckley  <andy@insectnation.org>
 
 	* Frank Siegert: Update of rivet-mkhtml to conform to improved
 	compare-histos.
 
 	* Frank Siegert: LWH output in precision-8 scientific notation, to
 	solve a binning precision problem... the first time weve noticed a
 	problem!
 
 	* Improved treatment of data/reference datasets and labels in
 	compare-histos.
 
 	* Rewrite of rivet-mkanalysis in Python to make way for neat
 	additions.
 
 	* Improving SWIG tests, since once again the user's biuld system
 	must include SWIG (no test to check that it's a 'good SWIG', since
 	the meaning of that depends on which compiler is being used and we
 	hope that the user system is consistent... evidence from Finkified
 	Macs and bloody SLC5 notwithstanding).
 
 2010-03-23  Andy Buckley  <andy@insectnation.org>
 
 	* Tag as patch release 1.2.1.
 
 2010-03-22  Andy Buckley  <andy@insectnation.org>
 
 	* General tidying of return arguments and intentionally unused
 	parameters to keep -Wextra happy (some complaints remain from
 	TinyXML, FastJet, and HepMC).
 
 	* Some extra bug fixes: in FastJets projection with explicit
 	plugin argument, removing muon veto cut on FoxWolframMoments.
 
 	* Adding UNUSED macro to help with places where compiler warnings
 	can't be helped.
 
 	* Turning on -Wextra warnings, and fixing some violations.
 
 2010-03-21  Andy Buckley  <andy@insectnation.org>
 
 	* Adding MissingMomentum projection, as replacement for ~all uses
 	of now-deprecated TotalVisibleMomentum projection.
 
 	* Fixing bug with TotalVisibleMomentum projection usage in MC_SUSY
 	analysis.
 
 	* Frank Siegert fixed major bug in pTmin param passing to FastJets
 	projection. D'oh: requires patch release.
 
 2010-03-02  Andy Buckley  <andy@insectnation.org>
 
 	* Tagging for 1.2.0 release... at last!
 
 2010-03-01  Andy Buckley  <andy@insectnation.org>
 
 	* Updates to manual, manual generation scripts, analysis info etc.
 
 	* Add HepData URL to metadata print-out with rivet --show-analysis
 
 	* Fix average Et plot in UA1 analysis to only apply to the tracker
 	acceptance (but to include neutral particle contributions,
 	i.e. the region of the calorimeter in the tracker acceptance).
 
 	* Use Et rather than pT in filling the scalar Et measure in
 	TotalVisibleMomentum.
 
 	* Fixes to UA1 normalisation (which is rather funny in the paper).
 
 2010-02-26  Andy Buckley  <andy@insectnation.org>
 
 	* Update WFinder to not place cuts and other restrictions on the
 	neutrino.
 
 2010-02-11  Andy Buckley  <andy@insectnation.org>
 
 	* Change analysis loader behaviour to use ONLY RIVET_ANALYSIS_PATH
 	locations if set, otherwise use ONLY the standard Rivet analysis
 	install path. Should only impact users of personal plugin
 	analyses, who should now explicitly set RIVET_ANALYSIS_PATH to
 	load their analysis... and who can now create personal versions of
 	standard analyses without getting an error message about duplicate
 	loading.
 
 2010-01-15  Andy Buckley  <andy@insectnation.org>
 
 	* Add tests for "stable" heavy flavour hadrons in jets (rather
 	than just testing for c/b hadrons in the ancestor lists of stable
 	jet constituents)
 
 2009-12-23  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* New option "RatioPlotMode=deviation" in make-plots.
 
 2009-12-14  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* New option "MainPlot" in make-plots. For people who only want
 	the ratio plot and nothing else.
 
 	* New option "ConnectGaps" in make-plots. Set to 1 if you
 	want to connect gaps in histograms with a line when ErrorBars=0.
 	Works both in PLOT and in HISTOGRAM sections.
 
 	* Eliminated global variables for coordinates in make-plots and
 	enabled multithreading.
 
 2009-12-14  Andy Buckley  <andy@insectnation.org>
 
 	* AnalysisHandler::execute now calls AnalysisHandler::init(event)
 	if it has not yet been initialised.
 
 	* Adding more beam configuration features to Beam and
 	AnalysisHandler: the setRunBeams(...) methods on the latter now
 	allows a beam configuration for the run to be specified without
 	using the Run class.
 
 2009-12-11  Andy Buckley  <andy@insectnation.org>
 
 	* Removing use of PVertex from few remaining analyses. Still used
 	by SVertex, which is itself hardly used and could maybe be
 	removed...
 
 2009-12-10  Andy Buckley  <andy@insectnation.org>
 
 	* Updating JADE_OPAL to do the histo booking in init(), since
 	sqrtS() is now available at that stage.
 
 	* Renaming and slightly re-engineering all MC_*_* analyses to not
 	be collider-specific (now the Analysis::sqrtS/beams()) methods
 	mean that histograms can be dynamically binned.
 
 	* Creating RivetUnvalidated.so plugin library for unvalidated
 	analyses. Unvalidated analyses now need to be explicitly enabled
 	with a --enable-unvalidated flag on the configure script.
 
 	* Various min bias analyses updated and validated.
 
 2009-12-10  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Propagate SPECIAL and HISTOGRAM sections from .plot files
 	through compare-histos
 
 	* STAR_2006_S6860818: <pT> vs particle mass, validate analysis
 
 2009-12-04  Andy Buckley  <andy@insectnation.org>
 
 	* Use scaling rather than normalising in DELPHI_1996: this is
 	generally desirable, since normalizing to 1 for 1/sig dsig/dx
 	observables isn't correct if any events fall outside the histo
 	bounds.
 
 	* Many fixes to OPAL_2004.
 
 	* Improved Hemispheres interface to remove unnecessary consts on
 	returned doubles, and to also return non-squared versions
 	of (scaled) hemisphere masses.
 
 	* Add "make pyclean" make target at the top level to make it
 	easier for developers to clean their Python module build when the
 	API is extended.
 
 	* Identify use of unvalidated analyses with a warning message at
 	runtime.
 
 	* Providing Analysis::sqrtS() and Analysis::beams(), and making
 	sure they're available by the time the init methods are called.
 
 2009-12-02  Andy Buckley  <andy@insectnation.org>
 
 	* Adding passing of first event sqrt(s) and beams to analysis handler.
 
 	* Restructuring running to only use one HepMC input file (no-one
 	was using multiple ones, right?) and to break down the Run class
 	to cleanly separate the init and event loop phases. End of file is
 	now neater.
 
 2009-12-01  Andy Buckley  <andy@insectnation.org>
 
 	* Adding parsing of beam types and pairs of energies from YAML.
 
 2009-12-01  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Fixing trigger efficiency in CDF_2009_S8233977
 
 2009-11-30  Andy Buckley  <andy@insectnation.org>
 
 	* Using shared pointers to make I/O object memory management
 	neater and less error-prone.
 
 	* Adding crossSectionPerEvent() method [==
 	crossSection()/sumOfWeights()] to Analysis. Useful for histogram
 	scaling since numerator of sumW_passed/sumW_total (to calculate
 	pass-cuts xsec) is cancelled by dividing histo by sumW_passed.
 
 	* Clean-up of Particle class and provision of inline PID::
 	functions which take a Particle as an argument to avoid having to
 	explicitly call the Particle::pdgId() method.
 
 2009-11-30  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Fixing division by zero in Profile1D bin errors for
 	bins with just a single entry.
 
 2009-11-24  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* First working version of STAR_2006_S6860818
 
 2009-11-23  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Adding missing CDF_2001_S4751469 plots to uemerge
 	* New "ShowZero" option in make-plots
 	* Improving lots of plot defaults
 	* Fixing typos / non-existing bins in CDF_2001_S4751469 and
 	CDF_2004_S5839831 reference data
 
 2009-11-19  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Fixing our compare() for doubles.
 
 2009-11-17  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Zeroth version of STAR_2006_S6860818 analysis (identified
 	strange particles). Not working yet for unstable particles.
 
 2009-11-11  Andy Buckley  <andy@insectnation.org>
 
 	* Adding separate jet-oriented and photon-oriented observables to
 	MC PHOTONJETUE analysis.
 
 	* Bug fix in MC leading jets analysis, and general tidying of
 	leading jet analyses to insert units, etc. (should not affect any
 	current results)
 
 2009-11-10  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Fixing last issues in STAR_2006_S6500200 and setting it to
 	VALIDATED.
 	* Noramlise STAR_2006_S6870392 to cross-section
 
 2009-11-09  Andy Buckley  <andy@insectnation.org>
 
 	* Overhaul of jet caching and ParticleBase interface.
 
 	* Adding lists of analyses' histograms (obtained by scanning the
 	plot info files) to the LaTeX documentation.
 
 2009-11-07  Andy Buckley  <andy@insectnation.org>
 
 	* Adding checking system to ensure that Projections aren't
 	registered before the init phase of analyses.
 
 	* Now that the ProjHandler isn't full of defunct pointers (which
 	tend to coincidentally point to *new* Projection pointers rather
 	than undefined memory, hence it wasn't noticed until recently!),
 	use of a duplicate projection name is now banned with a helpful
 	message at runtime.
 
 	* (Huge) overhaul of ProjectionHandler system to use shared_ptr:
 	projections are now deleted much more efficiently, naturally
 	cleaning themselves out of the central repository as they go out
 	of scope.
 
 2009-11-06  Andy Buckley  <andy@insectnation.org>
 
 	* Adding Cmp<double> specialisation, using fuzzyEquals().
 
 2009-11-05  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Fixing histogram division code.
 
 2009-11-04  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* New analysis STAR_2006_S6500200 (pion and proton pT spectra in
 	pp collisions at 200 GeV). It is still unclear if they used a cut
 	in rapidity or pseudorapidity, thus the analysis is declared
 	"UNDER DEVELOPMENT" and "DO NOT USE".
 	* Fixing compare() in NeutralFinalState and MergedFinalState
 
 2009-11-04  Andy Buckley  <andy@insectnation.org>
 
 	* Adding consistence checking on beam ID and sqrt(s) vs. those
 	from first event.
 
 2009-11-03  Andy Buckley  <andy@insectnation.org>
 
 	* Adding more assertion checks to linear algebra testing.
 
 2009-11-02  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Fixing normalisation issue with stacked histograms in
 	make-plots.
 
 2009-10-30  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* CDF_2009_S8233977: Updating data and axes labels to match final
 	paper. Normalise to cross-section instead of data.
 
 2009-10-23  Andy Buckley  <andy@insectnation.org>
 
 	* Fixing Cheese-3 plot in CDF 2004... at last!
 
 2009-10-23  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Fix muon veto in CDF_1994_S2952106, CDF_2005_S6217184,
 	CDF_2008_S7782535, and D0_2004_S5992206
 
 2009-10-19  Andy Buckley  <andy@insectnation.org>
 
 	* Adding analysis info files for MC SUSY and PHOTONJETUE analyses.
 
 	* Adding MC UE analysis in photon+jet events.
 
 2009-10-19  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Adding new NeutralFinalState projection. Note that this final
 	state takes E_T instead of p_T as argument (makes more sense for
 	neutral particles). The compare() method does not yet work as
 	expected (E_T comparison still missing).
 
 	* Adding new MergedFinalState projection. This merges two final
 	states, removing duplicate particles. Duplicates are identified by
 	looking at the genParticle(), so users need to take care of any
 	manually added particles themselves.
 
 	* Fixing most open issues with the STAR_2009_UE_HELEN analysis.
 	There is only one question left, regarding the away region.
 
 	* Set the default split-merge value for SISCone in our FastJets
 	projection to the recommended (but not Fastjet-default!) value of
 	0.75.
 
 2009-10-17  Andy Buckley  <andy@insectnation.org>
 
 	* Adding parsing of units in cross-sections passed to the "-x"
 	flag, i.e. "-x 101 mb" is parsed internally into 1.01e11 pb.
 
 2009-10-16  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Disabling DELPHI_2003_WUD_03_11 in the Makefiles, since I don't
 	trust the data.
 
 	* Getting STAR_2009_UE_HELEN to work.
 
 2009-10-04  Andy Buckley  <andy@insectnation.org>
 
 	* Adding triggers and other tidying to (still unvalidated)
 	UA1_1990 analysis.
 
 	* Fixing definition of UA5 trigger to not be intrinscally
 	different for pp and ppbar: this is corrected for (although it
 	takes some readng to work this out) in the 1982 paper, which I
 	think is the only one to compare the two modes.
 
 	* Moving projection setup and registration into init() method for
 	remaining analyses.
 
 	* Adding trigger implementations as projections for CDF Runs 0 &
 	1, and for UA5.
 
 2009-10-01  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Moving projection setup and registration into init() method for
 	analyses from ALEPH, CDF and the MC_ group.
 
 	* Adding generic SUSY validation analysis, based on plots used in
 	ATLAS Herwig++ validation.
 
 	* Adding sorted particle accessors to FinalState (cf. JetAlg).
 
 2009-09-29  Andy Buckley  <andy@insectnation.org>
 
 	* Adding optional use of args as regex match expressions with
 	-l/--list-analyses.
 
 2009-09-03  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Passing GSL include path to compiler, since its absence was
 	breaking builds on systems with no GSL installation in a standard
 	location (such as SLC5, for some mysterious reason!)
 
 	* Removing lib extension passing to compiler from the configure
 	script, because Macs and Linux now both use .so extensions for the
 	plugin analysis modules.
 
 2009-09-02  Andy Buckley  <andy@insectnation.org>
 
 	* Adding analysis info file path search with RIVET_DATA_PATH
 	variable (and using this to fix doc build.)
 
 	* Improvements to AnalysisLoader path search.
 
 	* Moving analysis sources back into single directory, after a
 	proletarian uprising ;)
 
 2009-09-01  Andy Buckley  <andy@insectnation.org>
 
 	* Adding WFinder and WAnalysis, based on Z proj and analysis, with
 	some tidying of the Z code.
 
 	* ClusteredPhotons now uses an IdentifiedFS to pick the photons to
 	be looped over, and only clusters photons around *charged* signal
 	particles.
 
 2009-08-31  Andy Buckley  <andy@insectnation.org>
 
 	* Splitting analyses by directory, to make it easier to disable
 	building of particular analysis group plugin libs.
 
 	* Removing/merging headers for all analyses except for the special
 	MC_JetAnalysis base class.
 
 	* Exit with an error message if addProjection is used twice from
 	the same parent with distinct projections.
 
 2009-08-28  Andy Buckley  <andy@insectnation.org>
 
 	* Changed naming convention for analysis plugin libraries, since
 	the loader has changed so much: they must now *start* with the
 	word "Rivet" (i.e. no lib prefix).
 
 	* Split standard plugin analyses into several plugin libraries:
 	these will eventually move into separate subdirs for extra build
 	convenience.
 
 	* Started merging analysis headers into the source files, now that
 	we can (the plugin hooks previously forbade this).
 
 	* Replacement of analysis loader system with a new one based on
 	ideas from ThePEG, which uses dlopen-time instantiation of
 	templated global variables to reduce boilerplate plugin hooks to
 	one line in analyses.
 
 2009-07-14  Frank Siegert  <frank.siegert@durham.ac.uk>
 
 	* Replacing in-source histo-booking metadata with .plot files.
 
 2009-07-14  Andy Buckley  <andy@insectnation.org>
 
 	* Making Python wrapper files copy into place based on bundled
 	versions for each active HepMC interface (2.3, 2.4 & 2.5), using a
 	new HepMC version detector test in configure.
 
 	* Adding YAML metadata files and parser, removing same metadata
 	from the analysis classes' source headers.
 
 2009-07-07  Andy Buckley  <andy@insectnation.org>
 
 	* Adding Jet::hadronicEnergy()
 
 	* Adding VisibleFinalState and automatically using it in JetAlg
 	projections.
 
 	* Adding YAML parser for new metadata (and eventually ref data)
 	files.
 
 2009-07-02  Andy Buckley  <andy@insectnation.org>
 
 	* Adding Jet::neutralEnergy() (and Jet::totalEnergy() for
 	convenience/symmetry).
 
 2009-06-25  Andy Buckley  <andy@insectnation.org>
 
 	* Tidying and small efficiency improvements in CDF_2008_S7541902
 	W+jets analysis (remove unneeded second stage of jet storing,
 	sorting the jets twice, using foreach, etc.).
 
 2009-06-24  Andy Buckley  <andy@insectnation.org>
 
 	* Fixing Jet's containsBottom and containsCharm methods, since B
 	hadrons are not necessarily to be found in the final
 	state. Discovered at the same time that HepMC::GenParticle defines
 	a massively unhelpful copy constructor that actually loses the
 	tree information; it would be better to hide it entirely!
 
 	* Adding RivetHepMC.hh, which defines container-type accessors to
 	HepMC particles and vertices, making it possible to use Boost
 	foreach and hence avoiding the usual huge boilerplate for-loops.
 
 2009-06-11  Andy Buckley  <andy@insectnation.org>
 
 	* Adding --disable-pdfmanual option, to make the bootstrap a bit
 	more robust.
 
 	* Re-enabling D0IL in FastJets: adding 10^-10 to the pTmin removes
 	the numerical instability!
 
 	* Fixing CDF_2004 min/max cone analysis to use calo jets for the
 	leading jet Et binning. Thanks to Markus Warsinsky
 	for (re)discovering this bug: I was sure it had been fixed. I'm
 	optimistic that this will fix the main distributions, although
 	Swiss Cheese "minus 3" is still likely to be broken. Early tests
 	look okay, but it'll take more stats before we can remove the "do
 	not trust" sign.
 
 2009-06-10  Andy Buckley  <andy@insectnation.org>
 
 	* Providing "calc" methods so that Thrust and Sphericity
 	projections can be used as calculators without having to use the
 	projecting/caching system.
 
 2009-06-09  Andy Buckley  <andy@insectnation.org>
 
 	* 1.1.3 release!
 
 	* More doc building and SWIG robustness tweaks.
 
 2009-06-07  Andy Buckley  <andy@insectnation.org>
 
 	* Make doc build from metadata work even before the library is
 	installed.
 
 2009-06-07  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Fix phi rotation in CDF_2008_LEADINGJETS.
 
 2009-06-07  Andy Buckley  <andy@insectnation.org>
 
 	* Disabling D0 IL midpoint cone (using CDF modpoint instead),
 	since there seems to be a crashing bug in FastJet's
 	implementation: we can't release that way, since ~no D0 analyses
 	will run.
 
 2009-06-03  Andy Buckley  <andy@insectnation.org>
 
 	* Putting SWIG-generated source files under SVN control to make
 	life easier for people who we advise to check out the SVN head
 	version, but who don't have a sufficiently modern copy of SWIG to
 
 	* Adding the --disable-analyses option, for people who just want
 	to use Rivet as a framework for their own analyses.
 
 	* Enabling HepMC cross-section reading, now that HepMC 2.5.0 has
 	been released.
 
 2009-05-23  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Using gsl-config to locate libgsl
 
 	* Fix the paths for linking such that our own libraries are found
 	before any system libraries, e.g. for the case that there is an
 	outdated fastjet version installed on the system while we want to
 	use our own up-to-date version.
 
 	* Change dmerge to ymerge in the e+e- analyses using JADE or
 	DURHAM from fastjet. That's what it is called in fastjet-2.4 now.
 
 2009-05-18  Andy Buckley  <andy@insectnation.org>
 
 	* Adding use of gsl-config in configure script.
 
 2009-05-16  Andy Buckley  <andy@insectnation.org>
 
 	* Removing argument to vetoEvent macro, since no weight
 	subtraction is now needed. It's now just an annotated return, with
 	built-in debug log message.
 
 	* Adding an "open" FinalState, which is only calculated once per
 	even, then used by all other FSes, avoiding the loop over
 	non-status 1 particles.
 
 2009-05-15  Andy Buckley  <andy@insectnation.org>
 
 	* Removing incorrect setting of DPS x-errs in CDF_2008 jet shape
 	analysis: the DPS autobooking already gets this bit right.
 
 	* Using Jet rather than FastJet::PseudoJet where possible, as it
 	means that the phi ranges match up nicely between Particle and the
 	Jet object. The FastJet objects are only really needed if you want
 	to do detailed things like look at split/merge scales for
 	e.g. diff jet rates or "y-split" analyses.
 
 	* Tidying and debugging CDF jet shape analyses and jet shape
 	plugin... ongoing, but I think I've found at least one real bug,
 	plus a lot of stuff that can be done a lot more nicely.
 
 	* Fully removing deprecated math functions and updating affected
 	analyses.
 
 2009-05-14  Andy Buckley  <andy@insectnation.org>
 
 	* Removing redundant rotation in DISKinematics... this was a
 	legacy of Peter using theta rather than pi-theta in his rotation.
 
 	* Adding convenience phi, rho, eta, theta, and perp,perp2 methods
 	to the 3 and 4 vector classes.
 
 2009-05-12  Andy Buckley  <andy@insectnation.org>
 
 	* Adding event auto-rotation for events with one proton... more
 	complete approach?
 
 2009-05-09  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Renaming CDF_2008_NOTE_9337 to CDF_2009_S8233977.
 
 	* Numerous small bug fixes in ALEPH_1996_S3486095.
 
 	* Adding data for one of the Rick-Field-style STAR UE analyses.
 
 2009-05-08  Andy Buckley  <andy@insectnation.org>
 
 	* Adding rivet-mkanalysis script, to make generating new analysis
 	source templates easier.
 
 2009-05-07  Andy Buckley  <andy@insectnation.org>
 
 	* Adding null vector check to Vector3::azimuthalAngle().
 
 	* Fixing definition of HCM/Breit frames in DISKinematics, and
 	adding asserts to check that the transformation is doing what it
 	should.
 
 2009-05-05  Andy Buckley  <andy@insectnation.org>
 
 	* Removing eta and Et cuts from CDF 2000 Z pT analysis, based on
 	our reading of the paper, and converting most of the analysis to a
 	call of the ZFinder projection.
 
 2009-05-05  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Support non-default seed_threshold in CDF cone jet algorithms.
 
 	* New analyses STAR_2006_S6870392 and STAR_2008_S7993412. In
 	STAR_2008_S7993412 only the first distribution is filled at the
 	moment. STAR_2006_S6870392 is normalised to data instead of the
 	Monte Carlo cross-section, since we don't have that available in
 	the HepMC stream yet.
 
 2009-05-05  Andy Buckley  <andy@insectnation.org>
 
 	* Changing Event wrapper to copy whole GenEvents rather than
 	pointers, use std units if supported in HepMC, and run a
 	placeholder function for event auto-orientation.
 
 2009-04-28  Andy Buckley  <andy@insectnation.org>
 
 	* Removing inclusion of IsolationTools header by analyses that
 	aren't actually using the isolation tools... which is all of
 	them. Leaving the isolation tools in place for now, as there might
 	still be use cases for them and there's quite a lot of code there
 	that deserves a second chance to be used!
 
 2009-04-24  Andy Buckley  <andy@insectnation.org>
 
 	* Deleting Rivet implementations of TrackJet and D0ILConeJets: the
 	code from these has now been incorporated into FastJet 2.4.0.
 
 	* Removed all mentions of the FastJet JADE patch and the HAVE_JADE
 	preprocessor macro.
 
 	* Bug fix to D0_2008_S6879055 to ensure that cuts compare to both
 	electron and positron momenta (was just comparing against
 	electrons, twice, probably thanks to the miracle of cut and
 	paste).
 
 	* Converting all D0 IL Cone jets to use FastJets. Involved tidying
 	D0_2004 jet azimuthal decorrelation analysis and D0_2008_S6879055
 	as part of migration away from using the getLorentzJets method,
 	and removing the D0ILConeJets header from quite a few analyses
 	that weren't using it at all.
 
 	* Updating CDF 2001 to use FastJets in place of TrackJet, and
 	adding axis labels to its plots.
 
 	* Note that ZEUS_2001_S4815815 uses the wrong jet definition: it
 	should be a cone but curently uses kT.
 
 	* Fixing CDF_2005_S6217184 to use correct (midpoint, R=0.7) jet
 	definition. That this was using a kT definition with R=1.0 was
 	only made obvious when the default FastJets constructor was
 	removed.
 
 	* Removing FastJets default constructor: since there are now
 	several good (IRC safe) jet definitions available, there is no
 	obvious safe default and analyses should have to specify which
 	they use.
 
 	* Moving FastJets constructors into implementation file to reduce
 	recompilation dependencies, and adding new plugins.
 
 	* Ensuring that axis labels actually get written to the output
 	data file.
 
 2009-04-22  Andy Buckley  <andy@insectnation.org>
 
 	* Adding explicit FastJet CDF jet alg overlap_threshold
 	constructor param values, since the default value from 2.3.x is
 	now removed in version 2.4.0.
 
 	* Removing use of HepMC ThreeVector::mag method (in one place
 	only) since this has been removed in version 2.5.0b.
 
 	* Fix to hepmc.i (included by rivet.i) to ignore new HepMC 2.5.0b
 	GenEvent stream operator.
 
 2009-04-21  Andy Buckley  <andy@insectnation.org>
 
 	* Dependency on FastJet now requires version 2.4.0 or later. Jade
 	algorithm is now native.
 
 	* Moving all analysis constructors and Projection headers from the
 	analysis header files into their .cc implementation files, cutting
 	header dependencies.
 
 	* Removing AIDA headers: now using LWH headers only, with
 	enhancement to use axis labels. This facility is now used by the
 	histo booking routines, and calling the booking function versions
 	which don't specify axis labels will result in a runtime warning.
 
 2009-04-07  Andy Buckley  <andy@insectnation.org>
 
 	* Adding $(DESTDIR) prefix to call to Python module "setup.py
 	install"
 
 	* Moving HepMC SWIG mappings into Python Rivet module for now:
 	seems to work-around the SL type-mapping bug.
 
 2009-04-03  Andy Buckley  <andy@insectnation.org>
 
 	* Adding MC analysis for LHC UE: higher-pT replica of Tevatron
 	2008 leading jets study.
 
 	* Adding CDF_1990 pseudorapidity analysis.
 
 	* Moving CDF_2001 constructor into implementation file.
 
 	* Cleaning up CDF_2008_LEADINGJETS a bit, e.g. using foreach
 	loops.
 
 	* Adding function interface for specifying axis labels in histo
 	bookings. Currently has no effect, since AIDA doesn't seem to have
 	a mechanism for axis labels. It really is a piece of crap.
 
 2009-03-18  Andy Buckley  <andy@insectnation.org>
 
 	* Adding docs "make upload" and other tweaks to make the doc files
 	fit in with the Rivet website.
 
 	* Improving LaTex docs to show email addresses in printable form
 	and to group analyses by collider or other metadata.
 
 	* Adding doc script to include run info in LaTeX docs, and to make
 	HTML docs.
 
 	* Removing WZandh projection, which wasn't generator independent
 	and whose sole usage was already replaced by ZFinder.
 
 	* Improvements to constructors of ZFinder and InvMassFS.
 
 	* Changing ExampleTree to use real FS-based Z finding.
 
 2009-03-16  Andy Buckley  <andy@insectnation.org>
 
 	* Allow the -H histo file spec to give a full name if wanted. If
 	it doesn't end in the desired extension, it will be added.
 
 	* Adding --runname option (and API elements) to choose a run name
 	to be prepended as a "top level directory" in histo paths. An
 	empty value results in no extra TLD.
 
 2009-03-06  Andy Buckley  <andy@insectnation.org>
 
 	* Adding R=0.2 photon clustering to the electrons in the CDF 2000
 	Z pT analysis.
 
 2009-03-04  Andy Buckley  <andy@insectnation.org>
 
 	* Fixing use of fastjet-config to not use the user's PATH
 	variable.
 
 	* Fixing SWIG type table for HepMC object interchange.
 
 2009-02-20  Andy Buckley  <andy@insectnation.org>
 
 	* Adding use of new metadata in command line analysis querying
 	with the rivet command, and in building the PDF Rivet manual.
 
 	* Adding extended metadata methods to the Analysis interface and
 	the Python wrapper. All standard analyses comply with this new
 	interface.
 
 2009-02-19  Andy Buckley  <andy@insectnation.org>
 
 	* Adding usefully-scoped config headers, a Rivet::version()
 	function which uses them, and installing the generated headers to
 	fix "external" builds against an installed copy of Rivet. The
 	version() function has been added to the Python wrapper.
 
 2009-02-05  Andy Buckley  <andy@insectnation.org>
 
 	* Removing ROOT dependency and linking. Woo! There's no need for
 	this now, because the front-end accepts no histo format switch and
 	we'll just use aida2root for output conversions. Simpler this way,
 	and it avoids about half of our compilation bug reports from 32/64
 	bit ROOT build confusions.
 
 2009-02-04  Andy Buckley  <andy@insectnation.org>
 
 	* Adding automatic generation of LaTeX manual entries for the
 	standard analyses.
 
 2009-01-20  Andy Buckley  <andy@insectnation.org>
 
 	* Removing RivetGun and TCLAP source files!
 
 2009-01-19  Andy Buckley  <andy@insectnation.org>
 
 	* Added psyco Python optimiser to rivet, make-plots and
 	compare-histos.
 
 	* bin/aida2root: Added "-" -> "_" mangling, following requests.
 
 2009-01-17  Andy Buckley  <andy@insectnation.org>
 
 	* 1.1.2 release.
 
 2009-01-15  Andy Buckley  <andy@insectnation.org>
 
 	* Converting Python build system to bundle SWIG output in tarball.
 
 2009-01-14  Andy Buckley  <andy@insectnation.org>
 
 	* Converting AIDA/LWH divide function to return a DPS so that bin
 	width factors don't get all screwed up. Analyses adapted to use
 	the new division operation (a DPS/DPS divide would also be
 	nice... but can wait for YODA).
 
 2009-01-06  Andy Buckley  <andy@insectnation.org>
 
 	* bin/make-plots: Added --png option for making PNG output files,
 	using 'convert' (after making a PDF --- it's a bit messy)
 
 	* bin/make-plots: Added --eps option for output filtering through
 	ps2eps.
 
 2009-01-05  Andy Buckley  <andy@insectnation.org>
 
 	* Python: reworking Python extension build to use distutils and
 	newer m4 Python macros. Probably breaks distcheck but is otherwise
 	more robust and platform independent (i.e. it should now work on
 	Macs).
 
 2008-12-19  Andy Buckley  <andy@insectnation.org>
 
 	* make-plots: Multi-threaded make-plots and cleaned up the LaTeX
 	building a bit (necessary to remove the implicit single global
 	state).
 
 2008-12-18  Andy Buckley  <andy@insectnation.org>
 
 	* make-plots: Made LaTeX run in no-stop mode.
 
 	* compare-histos: Updated to use a nicer labelling syntax on the
 	command line and to successfully build MC-MC plots.
 
 2008-12-16  Andy Buckley  <andy@insectnation.org>
 
 	* Made LWH bin edge comparisons safe against numerical errors.
 
 	* Added Particle comparison functions for sorting.
 
 	* Removing most bad things from ExampleTree and tidying up. Marked
 	WZandh projection for removal.
 
 2008-12-03  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Added the two missing observables to the CDF_2008_NOTE_9337 analysis,
 	i.e. track pT and sum(ET). There is a small difference between our MC
 	output and the MC plots of the analysis' author, we're still waiting
 	for the author's comments.
 
 2008-12-02  Andy Buckley  <andy@insectnation.org>
 
 	* Overloading use of a std::set in the interface, since the
 	version of SWIG on Sci Linux doesn't have a predefined mapping for
 	STL sets.
 
 2008-12-02  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Fixed uemerge. The output was seriously broken by a single line
 	of debug information in fillAbove(). Also changed uemerge output
 	to exponential notation.
 
 	* Unified ref and mc histos in compare-histos. Histos with one
 	bin are plotted linear. Option for disabling the ratio plot.
 	Several fixes for labels, legends, output directories, ...
 
 	* Changed rivetgun's fallback directory for parameter files to
 	$PREFIX/share/AGILe, since that's where the steering files now are.
 
 	* Running aida2flat in split mode now produces make-plots compatible
 	dat-files for direct plotting.
 
 2008-11-28  Andy Buckley  <andy@insectnation.org>
 
 	* Replaced binreloc with an upgraded and symbol-independent copy.
 
 2008-11-25  Andy Buckley  <andy@insectnation.org>
 
 	* Added searching of $RIVET_REF_PATH for AIDA reference data
 	files.
 
 2008-11-24  Andy Buckley  <andy@insectnation.org>
 
 	* Removing "get"s and other obsfucated syntax from
 	ProjectionApplier (Projection and Analysis) interfaces.
 
 2008-11-21  Andy Buckley  <andy@insectnation.org>
 
 	* Using new "global" Jet and V4 sorting functors in
 	TrackJet. Looks like there was a sorting direction problem before...
 
 	* Verbose mode with --list-analyses now shows descriptions as well
 	as analysis names.
 
 	* Moved data/Rivet to data/refdata and moved data/RivetGun
 	contents to AGILe (since generator steering is no longer a Rivet
 	thing)
 
 	* Added unchecked ratio plots to D0 Run II jet + photon analysis.
 
 	* Added D0 inclusive photon analysis.
 
 	* Added D0 Z rapidity analysis.
 
 	* Tidied up constructor interface and projection chain
 	implementation of InvMassFinalState.
 
 	* Added ~complete set of Jet and FourMomentum sorting functors.
 
 2008-11-20  Andy Buckley  <andy@insectnation.org>
 
 	* Added IdentifiedFinalState.
 
 	* Moved a lot of TrackJet and Jet code into .cc files.
 
 	* Fixed a caching bug in Jet: cache flag resets should never be
 	conditional, since they are then sensitive to initialisation
 	errors.
 
 	* Added quark enum values to ParticleName.
 
 	* Rationalised JetAlg interfaces somewhat, with "size()" and
 	"jets()" methods in the interface.
 
 	* Added D0 W charge asymmetry and D0 inclusive jets analyses.
 
 2008-11-18  Andy Buckley  <andy@insectnation.org>
 
 	* Adding D0 inclusive Z pT shape analysis.
 
 	* Added D0 Z + jet pT and photon + jet pT spectrum analyses.
 
 	* Lots of tidying up of particle, event, particle name etc.
 
 	* Now the first event is used to detect the beam type and remove
 	incompatible analyses.
 
 2008-11-17  Andy Buckley  <andy@insectnation.org>
 
 	* Added bash completion for rivetgun.
 
 	* Starting to provide stand-alone call methods on projections so
 	they can be used without the caching infrastructure. This could
 	also be handy for unit testing.
 
 	* Adding functionality (sorting function and built-in sorting
 	schemes) to the JetAlg interface.
 
 2008-11-10  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Fix floating point number output format in aida2flat and flat2aida
 
 	* Added CDF_2002_S4796047: CDF Run-I charged multiplicity distribution
 
 	* Renamed CDF_2008_MINBIAS to CDF_2008_NOTE_9337, since the
 	note is publicly available now.
 
 2008-11-10  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Added DELPHI_2003_WUD_03_11: Delphi 4-jet angular distributions.
 	There is still a problem with the analysis, so don't use it yet.
 	But I need to commit the code because my laptop is broken ...
 
 2008-11-06  Andy Buckley  <andy@insectnation.org>
 
 	* Code review: lots of tidying up of projections and analyses.
 
 	* Fixes for compatibility with the LLVM C & C++ compiler.
 
 	* Change of Particle interface to remove "get"-prefixed method
 	names.
 
 2008-11-05  Andy Buckley  <andy@insectnation.org>
 
 	* Adding ability to query analysis metadata from the command line.
 
 	* Example of a plugin analyis now in plugindemo, with a make check
 	test to make sure that the plugin analysis is recognised by the
 	command line "rivet" tool.
 
 	* GCC 4.3 fix to mat-vec tests.
 
 2008-11-04  Andy Buckley  <andy@insectnation.org>
 
 	* Adding native logger control from Python interface.
 
 2008-11-03  Andy Buckley  <andy@insectnation.org>
 
 	* Adding bash_completion for rivet executable.
 
 2008-10-31  Andy Buckley  <andy@insectnation.org>
 
 	* Clean-up of histo titles and analysis code review.
 
 	* Added momentum construction functions from FastJet PseudoJets.
 
 2008-10-28  Andy Buckley  <andy@insectnation.org>
 
 	* Auto-booking of histograms with a name, rather than the HepData
 	ID 3-tuple is now possible.
 
 	* Fix in CDF 2001 pT spectra to get the normalisations to depend
 	on the pT_lead cutoff.
 
 2008-10-23  Andy Buckley  <andy@insectnation.org>
 
 	* rivet handles signals neatly, as for rivetgun, so that premature
 	killing of the analysis process will still result in an analysis
 	file.
 
 	* rivet now accepts cross-section as a command line argument and,
 	if it is missing and is required, will prompt the user for it
 	interactively.
 
 2008-10-22  Andy Buckley  <andy@insectnation.org>
 
 	* rivet (Python interface) now can list analyses, check when
 	adding analyses that the given names are valid, specify histo file
 	name, and provide sensibly graded event number logging.
 
 2008-10-20  Andy Buckley  <andy@insectnation.org>
 
 	* Corrections to CDF 2004 analysis based on correspondance with
 	Joey Huston. M bias dbns now use whole event within |eta| < 0.7,
 	and Cheese plots aren't filled at all if there are insufficient
 	jets (and the correct ETlead is used).
 
 2008-10-08  Andy Buckley  <andy@insectnation.org>
 
 	* Added AnalysisHandler::commitData() method, to allow the Python
 	interface to write out a histo file without having to know
 	anything about the histogramming API.
 
 	* Reduced SWIG interface file to just map a subset of Analysis and
 	AnalysisHandler functionality. This will be the basis for a new
 	command line interface.
 
 2008-10-06  Andy Buckley  <andy@insectnation.org>
 
 	* Converted FastJets plugin to use a Boost shared_pointer to the
 	cached ClusterSequence. The nullness of the pointer is now used to
 	indicate an empty tracks (and hence jets) set. Once FastJet
 	natively support empty CSeqs, we can rewrite this a bit more
 	neatly and ditch the shared_ptr.
 
 2008-10-02  Andy Buckley  <andy@insectnation.org>
 
 	* The CDF_2004 (Acosta) data file now includes the full range of
 	pT for the min bias data at both 630 and 1800 GeV. Previously,
 	only the small low-pT insert plot had been entered into HepData.
 
 2008-09-30  Andy Buckley  <andy@insectnation.org>
 
 	* Lots of updates to CDF_2004 (Acosta) UE analysis, including
 	sorting jets by E rather than Et, and factorising transverse cone
 	code into a function so that it can be called with a random
 	"leading jet" in min bias mode. Min bias histos are now being
 	trial-filled just with tracks in the transverse cones, since the
 	paper is very unclear on this.
 
 	* Discovered a serious caching problem in FastJets projection when
 	an empty tracks vector is passed from the
 	FinalState. Unfortunately, FastJet provides no API way to solve
 	the problem, so we'll have to report this upstream. For now, we're
 	solving this for CDF_2004 by explicitly vetoing events with no
 	tracks.
 
 	* Added Doxygen to the build with target "dox"
 
 	* Moved detection of whether cross-section information is needed
 	into the AnalysisHandler, with dynamic computation by scanning
 	contained analyses.
 
 	* Improved robustness of event reading to detect properly when the
 	input file is smaller than expected.
 
 2008-09-29  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* New analysis: CDF_2000_S4155203
 
 2008-09-23  Andy Buckley  <andy@insectnation.org>
 
 	* rivetgun can now be built and run without AGILe. Based on a
 	patch by Frank Siegert.
 
 2008-09-23  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Some preliminary numbers for the CDF_2008_LEADINGJETS analysis
 	(only transverse region and not all observables. But all we have now.)
 
 2008-09-17  Andy Buckley  <andy@insectnation.org>
 
 	* Breaking up the mammoth "generate" function, to make Python
 	mapping easier, among other reasons.
 
 	* Added if-zero-return-zero checking to angle mapping functions,
 	to avoid problems where 1e-19 gets mapped on to 2 pi and then
 	fails boundary asserts.
 
 	* Added HistoHandler singleton class, which will be a central
 	repository for holding analyses' histogram objects to be accessed
 	via a user-chosen name.
 
 2008-08-26  Andy Buckley  <andy@insectnation.org>
 
 	* Allowing rivet-config to return combined flags.
 
 2008-08-14  Andy Buckley  <andy@insectnation.org>
 
 	* Fixed some g++ 4.3 compilation bugs, including "vector" not
 	being a valid name for a method which returns a physics vector,
 	since it clashes with std::vector (which we globally import). Took
 	the opportunity to rationalise the Jet interface a bit, since
 	"particle" was used to mean "FourMomentum", and "Particle" types
 	required a call to "getFullParticle". I removed the "gets" at the
 	same time, as part of our gradual migration to a coherent naming
 	policy.
 
 2008-08-11  Andy Buckley  <andy@insectnation.org>
 
 	* Tidying of FastJets and added new data files from HepData.
 
 2008-08-10  James Monk  <jmonk@hep.ucl.ac.uk>
 
 	* FastJets now uses user_index property of fastjet::PseudoJet to
 	reconstruct PID information in jet contents.
 
 2008-08-07  Andy Buckley  <andy@insectnation.org>
 
 	* Reworking of param file and command line parsing. Tab characters
 	are now handled by the parser, in a way equivalent to spaces.
 
 2008-08-06  Andy Buckley  <andy@insectnation.org>
 
 	* Added extra histos and filling to Acosta analysis - all HepData
 	histos should now be filled, depending on sqrt{s}. Also trialling
 	use of LaTeX math mode in titles.
 
 2008-08-05  Andy Buckley  <andy@insectnation.org>
 
 	* More data files for CDF analyses (2 x 2008, 1 x 1994), and moved
 	the RivetGun AtlasPythia6.params file to more standard
 	fpythia-atlas.params (and added to the install list).
 
 2008-08-04  Andy Buckley  <andy@insectnation.org>
 
 	* Reduced size of available options blocks in RivetGun help text
 	by removing "~" negating variants (which are hardly ever used in
 	practice) and restricting beam particles to
 	PROTON, ANTIPROTON,ELECTRON and POSITRON.
 
 	* Fixed Et sorting in Acosta analysis.
 
 2008-08-01  Andy Buckley  <andy@insectnation.org>
 
 	* Added AIDA headers to the install list, since
 	external (plugin-type) analyses need them to be present for
 	compilation to succeed.
 
 2008-07-29  Andy Buckley  <andy@insectnation.org>
 
 	* Fixed missing ROOT compile flags for libRivet.
 
 	* Added command line repetition to logging.
 
 2008-07-29  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Included the missing numbers and three more observables
 	in the CDF_2008_NOTE_9351 analysis.
 
 2008-07-29  Andy Buckley  <andy@insectnation.org>
 
 	* Fixed wrong flags on rivet-config
 
 2008-07-28  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Renamed CDF_2008_DRELLYAN to CDF_2008_NOTE_9351. Updated
 	numbers and cuts to the final version of this CDF note.
 
 2008-07-28  Andy Buckley  <andy@insectation.org>
 
 	* Fixed polar angle calcuation to use atan2.
 
 	* Added "mk" prefixes and x/setX convention to math classes.
 
 2008-07-28  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Fixed definition of FourMomentum::pT (it had been returning pT2)
 
 2008-07-27  Andy Buckley  <andy@insectnation.org>
 
 	* Added better tests for Boost headers.
 
 	* Added testing for -ansi, -pedantic and -Wall compiler flags.
 
 2008-07-25  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* updated DELPHI_2002_069_CONF_603 according to information
 	from the author
 
 2008-07-17  Andy Buckley  <andy@insectnation.org>
 
 	* Improvements to aida2flat: now can produce one output file per
 	histo, and there is a -g "gnuplot mode" which comments out the
 	YODA/make_plot headers to make the format readable by gnuplot.
 
 	* Import boost::assign namespace contents into the Rivet namespace
 	--- provides very useful intuitive collection initialising
 	functions.
 
 2008-07-15  Andy Buckley  <andy.buckley@dur.ac.uk>
 
 	* Fixed missing namespace in vector/matrix testing.
 
 	* Removed Boost headers: now a system dependency.
 
 	* Fixed polarRadius infinite loop.
 
 2008-07-09  Andy Buckley  <andy@insectnation.org>
 
 	* Fixed definitions of mapAngleMPiToPi, etc. and used them to fix
 	the Jet::getPhi method.
 
 	* Trialling use of "foreach" loop in CDF_2004: it works! Very nice.
 
 2008-07-08  Andy Buckley  <andy@insectnation.org>
 
 	* Removed accidental reference to an "FS" projection in
 	FinalStateHCM's compare method. rivetgun -A now works again.
 
 	* Added TASSO, SLD and D0_2008 reference data. The TASSO and SLD
 	papers aren't installed or included in the tarball since there are
 	currently no plans to implement these analyses.
 
 	* Added Rivet namespacing to vector, matrix etc. classes. This
 	required some re-writing and the opportunity was taken to move
 	some canonical function definitions inside the classes and to
 	improve the header structure of the Math area.
 
 2008-07-07  Andy Buckley  <andy@insectnation.org>
 
 	* Added Rivet namespace to Units.hh and Constants.hh.
 
 	* Added Doxygen "@brief" flags to analyses.
 
 	* Added "RIVET_" namespacing to all header guards.
 
 	* Merged Giulio Lenzi's isolation/vetoing/invmass projections and
 	D0 2008 analysis.
 
 2008-06-23  Jon Butterworth  <J.Butterworth@ucl.ac.uk>
 
 	* Modified FastJet to fix ysplit and split and filter.
 
 	* Modified ExampleTree to show how to call them.
 
 2008-06-19  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Added first version of the CDF_2008_DRELLYAN analysis described on
 	http://www-cdf.fnal.gov/physics/new/qcd/abstracts/UEinDY_08.html
 	There is a small difference between the analysis and this
 	implementation, but it's too small to be visible.
 	The fpythia-cdfdrellyan.params parameter file is for this analysis.
 
 	* Added first version of the CDF_2008_MINBIAS analysis described on
 	http://www-cdf.fnal.gov/physics/new/qcd/abstracts/minbias_08.html
 	The .aida file is read from the plots on the web and will change.
 	I'm still discussing some open questions about the analysis with
 	the author.
 
 2008-06-18  Jon Butterworth  <J.Butterworth@ucl.ac.uk>
 
 	* Added First versions of splitJet and filterJet methods to
 	fastjet.cc. Not yet tested, buyer beware.
 
 2008-06-18  Andy Buckley  <andy@insectnation.org>
 
 	* Added extra sorted Jets and Pseudojets methods to FastJets, and
 	added ptmin argument to the JetAlg getJets() method, requiring a
 	change to TrackJet.
 
 2008-06-13  Andy Buckley  <andy@insectnation.org>
 
 	* Fixed processing of "RG" parameters to ensure that invalid
 	iterators are never used.
 
 2008-06-10  Andy Buckley  <andy@insectnation.org>
 
 	* Updated AIDA reference files, changing "/HepData" root path to
 	"/REF". Still missing a couple of reference files due to upstream
 	problems with the HepData records.
 
 2008-06-09  Andy Buckley  <andy@insectnation.org>
 
 	* rivetgun now handles termination signals (SIGTERM, SIGINT and
 	SIGHUP) gracefully, finishing the event loop and finalising
 	histograms. This means that histograms will always get written
 	out, even if not all the requested events have been generated.
 
 2008-06-04  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Added DELPHI_2002_069_CONF_603 analysis
 
 2008-05-30  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* Added InitialQuarks projection
 	* Added OPAL_1998_S3780481 analysis
 
 2008-05-29  Andy Buckley  <andy@insectnation.org>
 
 	* distcheck compatibility fixes and autotools tweaks.
 
 2008-05-28  Andy Buckley  <andy@insectnation.org>
 
 	* Converted FastJet to use Boost smart_ptr for its plugin
 	handling, to solve double-delete errors stemming from the heap
 	cloning of projections.
 
 	* Added (a subset of) Boost headers, particularly the smart
 	pointers.
 
 2008-05-24  Andy Buckley  <andy@insectnation.org>
 
 	* Added autopackage spec files.
 
 	* Merged these changes into the trunk.
 
 	* Added a registerClonedProjection(...) method to
 	ProjectionHandler: this is needed so that cloned projections will
 	have valid pointer entries in the ProjectHandler repository.
 
 	* Added clone() methods to all projections (need to use this,
 	since the templated "new PROJ(proj)" approach to cloning can't
 	handle object polymorphism.
 
 2008-05-19  Andy Buckley  <andy@insectnation.org>
 
 	* Moved projection-applying functions into ProjectionApplier base
 	class (from which Projection and Analysis both derive).
 
 	* Added Rivet-specific exceptions in place of std::runtime_error.
 
 	* Removed unused HepML reference files.
 
 	* Added error handling for requested analyses with wrong case
 	convention / missing name.
 
 2008-05-15  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* New analysis PDG_Hadron_Multiplicities
 
 	* flat2aida converter
 
 2008-05-15  Andy Buckley  <andy@insectnation.org>
 
 	* Removed unused mysterious Perl scripts!
 
 	* Added RivetGun.HepMC logging of HepMC event details.
 
 2008-05-14  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* New analysis DELPHI_1995_S3137023. This analysis contains
 	the xp spectra of Xi+- and Sigma(1385)+-.
 
 2008-05-13  Andy Buckley  <andy@insectnation.org>
 
 	* Improved logging interface: log levels are now integers (for
 	cross-library compatibility and level setting also applies to
 	existing loggers.
 
 2008-05-09  Andy Buckley  <andy@insectnation.org>
 
 	* Improvements to robustness of ROOT checks.
 
 	* Added --version flag on config scripts and rivetgun.
 
 2008-05-06  Hendrik Hoeth <hendrik.hoeth@cern.ch>
 
 	* New UnstableFinalState projection which selects all hadrons,
 	leptons and real photons including unstable particles.
 
 	* In the DELPHI_1996_S3430090 analysis the multiplicities for
 	pi+/pi- and p0 are filled, using the UnstableFinalState projection.
 
 2008-05-06  Andy Buckley  <andy@insectnation.org>
 
 	* FastJets projection now protects against the case where no
 	particles exist in the final state (where FastJet throws an
 	exception).
 
 	* AIDA file writing is now separated from the
 	AnalysisHandler::finalize method... API users can choose what to
 	do with the histo objects, be that writing out or further
 	processing.
 
 2008-04-29  Andy Buckley  <andy@insectnation.org>
 
 	* Increased default tolerances in floating point comparisons as
 	they were overly stringent and valid f.p. precision errors were
 	being treated as significant.
 
 	* Implemented remainder of Acosta UE analysis.
 
 	* Added proper getEtSum() to Jet.
 
 	* Added Et2() member and function to FourMomentum.
 
 	* Added aida2flat conversion script.
 
 	* Fixed ambiguity in TrackJet algorithm as to how the iteration
 	continues when tracks are merged into jets in the inner loop.
 
 2008-04-28  Andy Buckley  <andy@insectnation.org>
 
 	* Merged in major "ProjectionHandler" branch. Projections are now
 	all stored centrally in the singleton ProjectionHandler container,
 	rather than as member pointers in projections and analyses. This
 	also affects the applyProjection mechanism, which is now available
 	as a templated method on Analysis and Projection. Still a few
 	wrinkles need to be worked out.
 
 	* The branch changes required a comprehensive review of all
 	existing projections and analyses: lots of tidying up of these
 	classes, as well as the auxiliary code like math utils, has taken
 	place. Too much to list and track, unfortunately!
 
 2008-03-28  Andy Buckley  <buckley@pc54.hep.ucl.ac.uk>
 
 	* Started second CDF UE analysis ("Acosta"): histograms defined.
 
 	* Fixed anomalous factor of 2 in LWH conversion from Profile1D
 	to DataPointSet.
 
 	* Added pT distribution histos to CDF 2001 UE analysis.
 
 2008-03-26  Andy Buckley  <andy@insectnation.org>
 
 	* Removed charged+neutral versions of histograms and projections
 	from DELPHI analysis since they just duplicate the more robust
 	charged-only measurements and aren't really of interest for
 	tuning.
 
 2008-03-10  Andy Buckley  <andy@insectnation.org>
 
 	* Profile histograms now use error computation with proper
 	weighting, as described here:
 	http://en.wikipedia.org/wiki/Weighted_average
 
 2008-02-28  Andy Buckley  <andy@insectnation.org>
 
 	* Added --enable-jade flag for Professor studies with patched
 	FastJet.
 
 	* Minor fixes to LCG tag generator and gfilt m4 macros.
 
 	* Fixed projection slicing issues with Field UE analysis.
 
 	* Added Analysis::vetoEvent(e) function, which keeps track of the
 	correction to the sum of weights due to event vetoing in analysis
 	classes.
 
 2008-02-26  Andy Buckley  <andy@insectnation.org>
 
 	* Vector<N> and derived classes now initialise to have zeroed
 	components when the no-arg constructor is used.
 
 	* Added Analysis::scale() function to scale 1D
 	histograms. Analysis::normalize() uses it internally, and the
 	DELPHI (A)EEC, whose histo weights are not pure event weights, and
 	normalised using scale(h, 1/sumEventWeights).
 
 2008-02-21  Hendrik Hoeth  <hendrik.hoeth@cern.ch>
 
 	* Added EEC and AEEC to the DELPHI_1996_S3430090 analysis. The
 	normalisation of these histograms is still broken (ticket #163).
 
 2008-02-19  Hendrik Hoeth  <hendrik.hoeth@cern.ch>
 
 	* Many fixes to the DELPHI_1996_S3430090 analysis: bugfix in the
 	calulation of eigenvalues/eigenvectors in MatrixDiag.hh for the
 	sphericity, rewrite of Thrust/Major/Minor, fixed scaled momentum,
 	hemisphere masses, normalisation in single particle events,
 	final state slicing problems in the projections for Thrust,
 	Sphericity and Hemispheres.
 
 2008-02-08  Andy Buckley  <andy@insectnation.org>
 
 	* Applied fixes and extensions to DIS classes, based on
 	submissions by Dan Traynor.
 
 2008-02-06  Andy Buckley  <andy@insectnation.org>
 
 	* Made projection pointers used for cut combining into const
 	pointers. Required some redefinition of the Projection* comparison
 	operator.
 
 	* Temporarily added FinalState member to ChargedFinalState to stop
 	projection lifetime crash.
 
 2008-02-01  Andy Buckley  <andy@insectnation.org>
 
 	* Fixed another misplaced factor of bin width in the
 	Analysis::normalize() method.
 
 2008-01-30  Andy Buckley  <andy@insectnation.org>
 
 	* Fixed the conversion of IHistogram1D to DPS, both via the
 	explicit Analysis::normalize() method and the implicit
 	AnalysisHandler::treeNormalize() route. The root of the problem is
 	the AIDA choice of the word "height" to represent the sum of
 	weights in a bin: i.e. the bin width is not taken into account
 	either in computing bin height or error.
 
 2008-01-22  Andy Buckley  <andy@insectnation.org>
 
 	* Beam projection now uses HepMC GenEvent::beam_particles() method
 	to get the beam particles. This is more portable and robust for
 	C++ generators, and equivalent to the existing "first two" method
 	for Fortran generators.
 
 2008-01-17  Andy Buckley  <andy@insectnation.org>
 
 	* Added angle range fix to pseudorapidity function (thanks to
 	Piergiulio Lenzi).
 
 2008-01-10  Andy Buckley  <andy@insectnation.org>
 
 	* Changed autobooking plot codes to use zero-padding (gets the
 	order right in JAS, file browser, ROOT etc.). Also changed the
 	'ds' part to 'd' for consistency. HepData's AIDA output has been
 	correspondingly updated, as have the bundled data files.
 
 2008-01-04  Andy Buckley  <andy@insectnation.org>
 
 	* Tidied up JetShape projection a bit, including making the
 	constructor params const references. This seems to have sorted the
 	runtime segfault in the CDF_2005 analysis.
 
 	* Added caching of the analysis bin edges from the AIDA file -
 	each analysis object will now only read its reference file once,
 	which massively speeds up the rivetgun startup time for analyses
 	with large numbhers of autobooked histos (e.g. the
 	DELPHI_1996_S3430090 analysis).
 
 2008-01-02  Andy Buckley  <andy@insectnation.org>
 
 	* CDF_2001_S4751469 now uses the LossyFinalState projection, with
 	an 8% loss rate.
 
 	* Added LossyFinalState and HadronicFinalState, and fixed a
 	"polarity" bug in the charged final state projection (it was
 	keeping only the *uncharged* particles).
 
 	* Now using isatty(1) to determine whether or not color escapes
 	can be used. Also removed --color argument, since it can't have an
 	effect (TCLAP doesn't do position-based flag toggling).
 
 	* Made Python extension build optional (and disabled by default).
 
 2008-01-01  Andy Buckley  <andy@insectnation.org>
 
 	* Removed some unwanted DEBUG statements, and lowered the level of
 	some infrastructure DEBUGs to TRACE level.
 
 	* Added bash color escapes to the logger system.
 
 2007-12-21  Leif Lonnblad  <Leif.Lonnblad@thep.lu.se>
 
 	* include/LWH/ManagedObject.h: Fixed infinite loop in
 	encodeForXML cf. ticket #135.
 
 2007-12-20  Andy Buckley  <andy@insectnation.org>
 
 	* Removed HepPID, HepPDT and Boost dependencies.
 
 	* Fixed XML entity encoding in LWH. Updated CDF_2007_S7057202
 	analysis to not do its own XML encoding of titles.
 
 2007-12-19  Andy Buckley  <andy@insectnation.org>
 
 	* Changed units header to set GeV = 1 (HepMC convention) and using
 	units in CDF UE analysis.
 
 2007-12-15  Andy Buckley  <andy@insectnation.org>
 
 	* Introduced analysis metadata methods for all analyses (and made
 	them part of the Analysis interface).
 
 2007-12-11  Andy Buckley  <andy@insectnation.org>
 
 	* Added JetAlg base projection for TrackJet, FastJet etc.
 
 2007-12-06  Andy Buckley  <andy@insectnation.org>
 
 	* Added checking for Boost library, and the standard Boost test
 	program for shared_ptr.
 
 	* Got basic Python interface running - required some tweaking
 	since Python and Rivet's uses of dlopen collide (another
 	RTLD_GLOBAL issue - see
 	http://muttley.hates-software.com/2006/01/25/c37456e6.html )
 
 2007-12-05  Andy Buckley  <andy@insectnation.org>
 
 	* Replaced all use of KtJets projection with FastJets
 	projection. KtJets projection disabled but left undeleted for
 	now. CLHEP and KtJet libraries removed from configure searches and
 	Makefile flags.
 
 2007-12-04  Andy Buckley  <andy@insectnation.org>
 
 	* Param file loading now falls back to the share/RivetGun
 	directory if a local file can't be found and the provided name has
 	no directory separators in it.
 
 	* Converted TrackJet projection to update the jet centroid with
 	each particle added, using pT weighting in the eta and phi
 	averaging.
 
 2007-12-03  Andy Buckley  <andy@insectnation.org>
 
 	* Merged all command line handling functions into one large parse
 	function, since only one executable now needs them. This removes a
 	few awkward memory leaks.
 
 	* Removed rivet executable - HepMC file reading functionality will
 	move into rivetgun.
 
 	* Now using HepMC IO_GenEvent format (IO_Ascii and
 	IO_ExtendedAscii are deprecated). Now requires HepMC >= 2.3.0.
 
 	* Added forward declarations of GSL diagonalisation routines,
 	eliminating need for GSL headers to be installed on build machine.
 
 2007-11-27  Andy Buckley  <andy@insectnation.org>
 
 	* Removed charge differentiation from Multiplicity projection (use
 	CFS proj) and updated ExampleAnalysis to produce more useful numbers.
 
 	* Introduced binreloc for runtime path determination.
 
 	* Fixed several bugs in FinalState, ChargedFinalState, TrackJet
 	and Field analysis.
 
 	* Completed move to new analysis naming scheme.
 
 2007-11-26  Andy Buckley  <andy@insectnation.org>
 
 	* Removed conditional HAVE_FASTJET bits: FastJet is now compulsory.
 
 	* Merging appropriate RivetGun parts into Rivet. RivetGun currently broken.
 
 2007-11-23  Andy Buckley  <andy@insectnation.org>
 
 	* Renaming analyses to Spires-ID scheme: currently of form
 	S<SpiresID>, to become <Expt>_<YYYY>_<SpiresID>.
 
 2007-11-20  Andy Buckley  <andy@insectnation.org>
 
 	* Merged replacement vectors, matrices and boosts into trunk.
 
 2007-11-15  Leif Lonnblad  <Leif.Lonnblad@thep.lu.se>
 
 	* src/Analysis.cc, include/Rivet/Analysis.hh: Introduced normalize
 	function. See ticket #126.
 
 2007-10-31  Andy Buckley  <andy@insectnation.org>
 
  	* Tagging as 1.0b2 for HERA-LHC meeting.
 
 
 2007-10-25  Andy Buckley  <andy@insectnation.org>
 
 	* Added AxesDefinition base interface to Sphericity and Thrust,
 	used by Hemispheres.
 
 	* Exposed BinaryCut class, improved its interface and fixed a few
 	bugs. It's now used by VetoedFinalState for momentum cuts.
 
 	* Removed extra output from autobooking AIDA reader.
 
 	* Added automatic DPS booking.
 
 2007-10-12  Andy Buckley  <andy@insectnation.org>
 
 	* Improved a few features of the build system
 
 2007-10-09  James Monk
 
 	* Fixed dylib dlopen on Mac OS X.
 
 2007-10-05  Andy Buckley  <andy@insectnation.org>
 
 	* Added new reference files.
 
 
 2007-10-03  Andy Buckley  <andy@insectnation.org>
 
 	* Fixed bug in configure.ac which led to explicit CXX setting
 	being ignored.
 
 	* Including Logging.hh in Projection.hh, hence new transitive
 	dependency on Logging.hh being installed. Since this is the normal
 	behaviour, I don't think this is a problem.
 
 	* Fixed segfaulting bug due to use of addProjection() in
 	locally-scoped contained projections. This isn't a proper fix,
 	since the whole framework should be designed to avoid the
 	possibility of bugs like this.
 
 	* Added newly built HepML and AIDA reference files for current
 	analyses.
 
 2007-10-02  Andy Buckley  <andy@insectnation.org>
 
 	* Fixed possible null-pointer dereference in Particle copy
 	constructor and copy assignment: this removes one of two blocker
 	segfaults, the other of which is related to the copy-assignment of
 	the TotalVisMomentum projection in the ExampleTree analysis.
 
 2007-10-01  Andy Buckley  <andy@insectnation.org>
 
 	* Fixed portable path to Rivet share directory.
 
 2007-09-28  Andy Buckley  <andy@insectnation.org>
 
 	* Added more functionality to the rivet-config script: now has
 	libdir, includedir, cppflags, ldflags and ldlibs options.
 
 2007-09-26  Andy Buckley  <andy@insectnation.org>
 
 	* Added the analysis library closer function to the
 	AnalysisHandler finalize() method, and also moved the analysis
 	delete loop into AnalysisHandler::finalize() so as not to try
 	deleting objects whose libraries have already closed.
 
 	* Replaced the RivetPaths.cc.in method for portable paths with
 	something using -D defines - much simpler!
 
 2007-09-21  Lars Sonnenschein  <sonne@mail.cern.ch>
 
 	* Added HepEx0505013 analysis and JetShape projection (some fixes
 	by AB.)
 
 	* Added GetLorentzJets member function to D0 RunII cone jet projection
 
 2007-09-21  Andy Buckley  <andy@insectnation.org>
 
 	* Fixed lots if bugs and bad practice in HepEx0505013 (to make it
 	compile-able!)
 
 	* Downclassed the log messages from the Test analysis to DEBUG
 	level.
 
 	* Added isEmpty() method to final state projection.
 
 	* Added testing for empty final state and useful debug log
 	messages to sphericity projection.
 
 2007-09-20  Andy Buckley  <andy@insectnation.org>
 
 	* Added Hemispheres projection, which calculates event hemisphere
 	masses and broadenings.
 
 2007-09-19  Andy Buckley  <andy@insectnation.org>
 
 	* Added an explicit copy assignment operator to Particle: the
 	absence of one of these was responsible for the double-delete
 	error.
 
 	* Added a "fuzzy equals" utility function for float/double types
 	to Utils.hh (which already contains a variety of handy little
 	functions).
 
 	* Removed deprecated Beam::operator().
 
 	* Added ChargedFinalState projection and de-pointered the
 	contained FinalState projection in VetoedFinalState.
 
 2007-09-18  Andy Buckley  <andy@insectnation.org>
 
 	* Major bug fixes to the regularised version of the sphericity
 	projection (and hence the Parisi tensor projection). Don't trust
 	C & D param results from any previous version!
 
 	* Added extra methods to thrust and sphericity projections to get
 	the oblateness and the sphericity basis (currently returns dummy
 	axes since I can't yet work out how to get the similarity
 	transform eigenvectors from CLHEP)
 
 2007-09-14  Andy Buckley  <andy@insectnation.org>
 
 	* Merged in a branch of pluggable analysis mechanisms.
 
 2007-06-25  Jon Butterworth  <jmb@hep.ucl.ac.uk>
 
 	* Fixed some bugs in the root output for DataPoint.h
 
 2007-06-25  Andy Buckley  <andy@insectnation.org>
 
 	* include/Rivet/**/Makefile.am: No longer installing headers for
 	"internal" functionality.
 
 	* include/Rivet/Projections/*.hh: Removed the private restrictions
 	on copy-assignment operators.
 
 2007-06-18  Leif Lonnblad  <Leif.Lonnblad@thep.lu.se>
 
 	* include/LWH/Tree.h: Fixed minor bug in listObjectNames.
 
 	* include/LWH/DataPointSet.h: Fixed setCoordinate functions so
 	that they resize the vector of DataPoints if it initially was
 	empty.
 
 	* include/LWH/DataPoint.h: Added constructor taking a vector of
 	measuremts.
 
 2007-06-16  Leif Lonnblad  <Leif.Lonnblad@thep.lu.se>
 
 	* include/LWH/Tree.h: Implemented the listObjectNames and ls
 	functions.
 
 	* include/Rivet/Projections/FinalStateHCM.hh,
 	include/Rivet/Projections/VetoedFinalState.hh: removed
 	_theParticles and corresponding access function. Use base class
 	variable instead.
 
 	* include/Rivet/Projections/FinalState.hh: Made _theParticles
 	protected.
 
 2007-06-13  Leif Lonnblad  <Leif.Lonnblad@thep.lu.se>
 
 	* src/Projections/FinalStateHCM.cc,
 	src/Projections/DISKinematics.cc: Equality checks using
 	GenParticle::operator== changed to check for pointer equality.
 
 	* include/Rivet/Analysis/HepEx9506012.hh: Uses modified DISLepton
 	projection.
 
 	* include/Rivet/Particle.hh: Added member function to check if a
 	GenParticle is associated.
 
 	* include/Rivet/Projections/DISLepton.hh,
 	src/Projections/DISLepton.cc: Fixed bug in projection. Introduced
 	final state projection to limit searching for scattered
 	lepton. Still not properly tested.
 
 2007-06-08  Leif Lonnblad  <Leif.Lonnblad@thep.lu.se>
 
 	* include/Rivet/Projections/PVertex.hh,
 	src/Projections/PVertex.cc: Fixed the projection to simply get the
 	signal_process_vertex from the GenEvent. This is the way it should
 	work. If the GenEvent does not have a signal_process_vertex
 	properly set up in this way, the problem is with the class that
 	fills the GenEvent.
 
 2007-06-06  Jon Butterworth  <jmb@hep.ucl.ac.uk>
 
 	* Merged TotalVisibleMomentum and CalMET
 	* Added pT ranges to Vetoed final state projection
 
 2007-05-27  Jon Butterworth  <jmb@hep.ucl.ac.uk>
 
 	* Fixed initialization of VetoedFinalStateProjection in ExampleTree
 
 2007-05-27  Leif Lonnblad  <Leif.Lonnblad@thep.lu.se>
 
 	* include/Rivet/Projections/KtJets.*: Make sure the KtEvent is
 	deleted properly.
 
 2007-05-26  Jon Butterworth  <jmb@hep.ucl.ac.uk>
 
 	* Added leptons to the ExampleTree.
 	* Added TotalVisibleEnergy projection, and added output to ExampleTree.
 
 2007-05-25  Jon Butterworth  <jmb@hep.ucl.ac.uk>
 
 	* Added a charged lepton projection
 
 2007-05-23  Andy Buckley  <andy@insectnation.org>
 
 	* src/Analysis/HepEx0409040.cc: Changed range of the histograms to
 	the "pi" range rather than the "128" range.
 
 	* src/Analysis/Analysis.cc: Fixed a bug in the AIDA path building.
 	Histogram auto-booking now works.
 
 2007-05-23  Leif Lonnblad  <Leif.Lonnblad@thep.lu.se>
 
 	* src/Analysis/HepEx9506012.cc: Now uses the histogram booking
 	function in the Analysis class.
 
 2007-05-23  Jon Butterworth  <jmb@hep.ucl.ac.uk>
 
 	* Fixed bug in PRD65092002 (was failing on zero jets)
 
 2007-05-23  Andy Buckley  <andy@insectnation.org>
 
 	* Added (but haven't properly tested) a VetoedFinalState projection.
 
 	* Added normalize() method for AIDA 1D histograms.
 
 	* Added configure checking for Mac OS X version, and setting the
 	development target flag accordingly.
 
 2007-05-22  Andy Buckley  <andy@insectnation.org>
 
 	* Added an ostream method for AnalysisName enums.
 
 	* Converted Analyses and Projections to use projection lists, cuts
 	and beam constraints.
 
 	* Added beam pair combining to the BeamPair sets of Projections
 	by finding set meta-intersections.
 
 	* Added methods to Cuts, Analysis and Projection to make Cut
 	definition easier.
 
 	* Fixed default fall-through in cut handling switch statement and
 	now using -numeric_limits<double>::max() rather than min()
 
 	* Added more control of logging presentation via static flag
 	methods on Log.
 
 2007-05-13  Andy Buckley  <andy@insectnation.org>
 
 	* Added self-consistency checking mechanisms for Cuts and Beam
 
 	* Re-implemented the cut-handling part of RivetInfo as a Cuts class.
 
 	* Changed names of Analysis and Projection name() and handler()
 	methods to getName() and getHandler() to be more consistent with
 	the rest of the public method names in those classes.
 
 2007-05-02  Andy Buckley  <andy@insectnation.org>
 
 	* Added auto-booking of histogram bins from AIDA XML files. The
 	AIDA files are located via a C++ function which is generated from
 	RivetPaths.cc.in by running configure.
 
 
 2007-04-18  Andy Buckley  <andy@insectnation.org>
 
 	* Added a preliminary version of the Rick Field UE analysis, under
 	the name PRD65092002.
 
 2007-04-19  Leif Lonnblad  <Leif.Lonnblad@thep.lu.se>
 
 	* src/Analysis/HepEx0409040.cc: The reason this did not compile
 	under gcc-4 is that some iterators into a vector were wrongly
 	assued to be pointers and were initialized to 0 and later compared
 	to 0. I've changed this to initialize to end() of the
 	corresponding vector and to compare with the same end() later.
 
 2007-04-05  Andy Buckley  <andy@insectnation.org>
 
 	* Lots of name changes in anticipation of the MCNet
 	school. RivetHandler is now AnalysisHandler (since that's what it
 	does!), BeamParticle has become ParticleName, and RivetInfo has
 	been split into Cut and BeamConstraint portions.
 
 	* Added BeamConstraint mechanism, which can be used to determine
 	if an analysis is compatible with the beams being used in the
 	generator. The ParticleName includes an "ANY" wildcard for this
 	purpose.
 
 2006-03-19  Andy Buckley  <andy@insectnation.org>
 
 	* Added "rivet" executable which can read in HepMC ASCII dump
 	files and apply Rivet analyses on the events.
 
 2007-02-24  Leif Lonnblad  <Leif.Lonnblad@thep.lu.se>
 
 	* src/Projections/KtJets.cc: Added comparison of member variables
 	in compare() function
 
 	* all: Merged changes from polymorphic-projections branch into
 	trunk
 
 2007-02-17  Leif Lonnblad  <Leif.Lonnblad@thep.lu.se>
 
 	* all: projections and analysis handlers: All projections which
 	uses other projctions now has a pointer rather than a copy of
 	those projections to allow for polymorphism. The constructors has
 	also been changed to require the used projections themselves,
 	rather than the arguments needed to construct them.
 
 2007-02-17  Leif Lonnblad  <Leif.Lonnblad@thep.lu.se>
 
 	* src/Projections/FinalState.cc,
 	include/Rivet/Projections/FinalState.icc (Rivet),
 	include/Rivet/Projections/FinalState.hh: Added cut in transverse
 	momentum on the particles to be included in the final state.
 
 2007-02-06  Leif Lonnblad  <Leif.Lonnblad@thep.lu.se>
 
 	* include/LWH/HistogramFactory.h: Fixed divide-by-zero in divide
 	function. Also fixed bug in error calculation in divide
 	function. Introduced checkBin function to make sure two histograms
 	are equal even if they have variable bin widths.
 
 	* include/LWH/Histogram1D.h: In normalize(double), do not do anything
 	if the sum of the bins are zero to avoid dividing by zero.
 
 2007-01-20  Leif Lonnblad  <Leif.Lonnblad@thep.lu.se>
 
 	* src/Test/testLWH.cc: Modified to output files using the Tree.
 
 	* configure.ac: Removed AC_CONFIG_AUX_DIR([include/Rivet/Config])
 	since the directory does not exist anymore.
 
 2006-12-21  Andy Buckley  <andy@insectnation.org>
 
 	* Rivet will now conditionally install the AIDA and LWH headers if
 	it can't find them when configure'ing.
 
 	* Started integrating Leif's LWH package to fulfill the AIDA
 	duties.
 
 	* Replaced multitude of CLHEP wrapper headers with a single
 	RivetCLHEP.h header.
 
 2006-11-20  Andy Buckley  <andy@insectnation.org>
 
 	* Introduced log4cpp logging.
 
 	* Added analysis enum, which can be used as input to an analysis
 	factory by Rivet users.
 
 2006-11-02  Andy Buckley  <andy@insectnation.org>
 
 	* Yet more, almost pointless, administrative moving around of
 	things with the intention of making the structure a bit
 	better-defined:
 
 	* The RivetInfo and RivetHandler classes have been
 	moved from src/Analysis into src as they are really the main Rivet
 	interface classes. The Rivet.h header has also been moved into the
 	"header root".
 
 	* The build of a single shared library in lib has been disabled,
 	with the library being built instead in src.
 
 2006-10-14  Andy Buckley  <andy@insectnation.org>
 
 	* Introduced a minimal subset of the Sherpa math tools, such as
 	Vector{3,4}D, Matrix, etc. The intention is to eventually cut the
 	dependency on CLHEP.
 
 2006-07-28  Andy Buckley  <andy@insectnation.org>
 
 	* Moving things around: all sources now in directories under src
 
 2006-06-04  Leif Lonnblad  <Leif.Lonnblad@thep.lu.se>
 
 	* Analysis/Examples/HZ95108.*: Now uses CentralEtHCM. Also set GeV
 	units on the relevant histograms.
 
 	* Projections/CentralEtHCM.*: Making a special class just to get
 	out one number - the summed Et in the central rapidity bin - may
 	seem like an overkill. But in case some one else might nees it...
 
 2006-06-03  Leif Lonnblad  <Leif.Lonnblad@thep.lu.se>
 
 	* Analysis/Examples/HZ95108.*: Added the hz95108 energy flow
 	analysis from HZtool.
 
 	* Projections/DISLepton.*: Since many HERA measurements do not
 	care if we have electron or positron beam, it is now possible to
 	specify lepton or anti-lepton.
 
 	* Projections/Event.*: Added member and access function for the
 	weight of an event (taken from the GenEvent object.weights()[0].
 
 	* Analysis/RivetHandler.*: Now depends explicitly on the AIDA
 	interface. An AIDA analysis factory must be specified in the
 	constructor, where a tree and histogram factory is automatically
 	created. Added access functions to the relevant AIDA objects.
 
 	* Analysis/AnalysisBase.*: Added access to the RivetHandler and
 	its AIDA factories.
 
 2005-12-27  Leif Lonnblad  <Leif.Lonnblad@thep.lu.se>
 
 	* configure.ac: Added -I$THEPEGPATH/include to AM_CPPFLAGS.
 
 	* Config/Rivet.h: Added some std incudes and using std::
 	declaration.
 
 	* Analysis/RivetInfo.*: Fixed some bugs. The RivetInfo facility
 	now works, although it has not been thoroughly tested.
 
 	* Analysis/Examples/TestMultiplicity.*: Re-introduced
 	FinalStateHCM for testing purposes but commented it away again.
 
 	* .: Made a number of changes to implement handling of RivetInfo
 	objects.
diff --git a/bin/make-plots b/bin/make-plots
--- a/bin/make-plots
+++ b/bin/make-plots
@@ -1,2743 +1,2743 @@
 #! /usr/bin/env python
 
 """\
 Usage: %prog [options] file.dat [file2.dat ...]
 
 TODO
  * Optimise output for e.g. lots of same-height bins in a row
  * Add a RatioFullRange directive to show the full range of error bars + MC envelope in the ratio
  * Tidy LaTeX-writing code -- faster to compile one doc only, then split it?
  * Handle boolean values flexibly (yes, no, true, false, etc. as well as 1, 0)
 """
 
 ##
 ## This program is copyright by Hendrik Hoeth <hoeth@linta.de> and
 ## the Rivet team https://rivet.hepforge.org. It may be used
 ## for scientific and private purposes. Patches are welcome, but please don't
 ## redistribute changed versions yourself.
 ##
 
 ## Check the Python version
 import sys
 if sys.version_info[:3] < (2,6,0):
     print "make-plots requires Python version >= 2.6.0... exiting"
     sys.exit(1)
 
 ## Try to rename the process on Linux
 try:
     import ctypes
     libc = ctypes.cdll.LoadLibrary('libc.so.6')
     libc.prctl(15, 'make-plots', 0, 0, 0)
 except Exception, e:
     pass
 
 
 import os, logging, re
 import tempfile
 import getopt
 import string
 from math import *
 
 
 ## Regex patterns
 pat_begin_block = re.compile(r'^#+\s*BEGIN ([A-Z0-9_]+) ?(\S+)?')
 pat_end_block =   re.compile('^#+\s*END ([A-Z0-9_]+)')
 pat_comment = re.compile('^#|^\s*$')
 pat_property = re.compile('^(\w+?)=(.*)$')
 pat_path_property  = re.compile('^(\S+?)::(\w+?)=(.*)$')
 
 
 def fuzzyeq(a, b, tolerance=1e-6):
     "Fuzzy equality comparison function for floats, with given fractional tolerance"
     # if type(a) is not float or type(a) is not float:
     #     print a, b
     if (a == 0 and abs(b) < 1e-12) or (b == 0 and abs(a) < 1e-12):
         return True
     return 2.0*abs(a-b)/abs(a+b) < tolerance
 
 def inrange(x, a, b):
     return x >= a and x < b
 
 def floatify(x):
     if type(x) is str:
         x = x.split()
     if not hasattr(x, "__len__"):
         x = [x]
     x = [float(a) for a in x]
     return x[0] if len(x) == 1 else x
 
 def floatpair(x):
     if type(x) is str:
         x = x.split()
     if hasattr(x, "__len__"):
         assert len(x) == 2
         return [float(a) for a in x]
     return [float(x), float(x)]
 
 
 def is_end_marker(line, blockname):
     m = pat_end_block.match(line)
     return m and m.group(1) == blockname
 
 def is_comment(line):
     return pat_comment.match(line) is not None
 
 
 
 class Described(object):
     "Inherited functionality for objects holding a 'description' dictionary"
 
     def __init__(self):
         pass
 
     def has_attr(self, key):
         return self.description.has_key(key)
 
     def set_attr(self, key, val):
         self.description[key] = val
 
     def attr(self, key, default=None):
         return self.description.get(key, default)
 
     def attr_bool(self, key, default=None):
         x = self.attr(key, default)
         if x is None: return None
         if str(x).lower() in ["1", "true", "yes", "on"]: return True
         if str(x).lower() in ["0", "false", "no", "off"]: return False
         return None
 
     def attr_int(self, key, default=None):
         x = self.attr(key, default)
         try:
             x = int(x)
         except:
             x = None
         return x
 
     def attr_float(self, key, default=None):
         x = self.attr(key, default)
         try:
             x = float(x)
         except:
             x = None
         return x
 
 
 
 class InputData(Described):
 
     def __init__(self, filename):
         self.filename = filename+".dat"
         self.histos = {}
         self.special = {}
         self.functions = {}
 
         self.description = {}
         self.pathdescriptions = []
 
         self.is2dim = False
         f = open(self.filename)
         for line in f:
             m = pat_begin_block.match(line)
             if m:
                 name, path = m.group(1,2)
                 if path is None and name != 'PLOT':
                     raise Exception('BEGIN sections need a path name.')
 
                 ## Pass the reading of the block to separate functions
                 if name == 'PLOT':
                     self.read_input(f);
                 elif name == 'SPECIAL':
                     self.special[path] = Special(f)
                 elif name == 'HISTOGRAM' or name == 'HISTOGRAM2D':
                     self.histos[path] = Histogram(f, p=path)
                     # self.histos[path].path = path
                     self.description['is2dim'] = self.histos[path].is2dim
                 elif name == 'HISTO1D':
                     self.histos[path] = Histo1D(f, p=path)
                 elif name == 'HISTO2D':
                     self.histos[path] = Histo2D(f, p=path)
                     self.description['is2dim'] = True
                 elif name == 'FUNCTION':
                     self.functions[path] = Function(f)
 #            elif is_comment(line):
 #                continue
 #            else:
 #                self.read_path_based_input(line)
         f.close()
 
         self.apply_config_files(opts.CONFIGFILES)
 
         ## Plot (and subplot) sizing
         # TODO: Use attr functions and bools properly
         self.description.setdefault('PlotSizeX', 10.)
         if self.description['is2dim']:
             self.description['PlotSizeX'] -= 1.7
             self.description['MainPlot'] = '1'
             self.description['RatioPlot'] = '0'
 
         if self.description.has_key('PlotSize') and self.description['PlotSize']!='':
             plotsizes = self.description['PlotSize'].split(',')
             self.description['PlotSizeX'] = float(plotsizes[0])
             self.description['PlotSizeY'] = float(plotsizes[1])
             if len(plotsizes) == 3:
                 self.description['RatioPlotSizeY'] = float(plotsizes[2])
             del self.description['PlotSize']
 
         if self.description.get('MainPlot', '1') == '0':
             ## Ratio, no main
             self.description['RatioPlot'] = '1' #< don't allow both to be zero!
             self.description['PlotSizeY'] = 0.
             self.description.setdefault('RatioPlotSizeY', 9.)
         else:
             if self.description.get('RatioPlot', '0') == '1':
                 ## Main and ratio
                 self.description.setdefault('PlotSizeY', 6.)
                 self.description.setdefault('RatioPlotSizeY', self.description.get('RatioPlotYSize', 3.))
             else:
                 ## Main, no ratio
                 self.description.setdefault('PlotSizeY', self.description.get('PlotYSize', 9.))
                 self.description['RatioPlotSizeY'] = 0.
 
         ## Ensure numbers, not strings
         self.description['PlotSizeX'] = float(self.description['PlotSizeX'])
         self.description['PlotSizeY'] = float(self.description['PlotSizeY'])
         self.description['RatioPlotSizeY'] = float(self.description['RatioPlotSizeY'])
         # self.description['TopMargin'] = float(self.description['TopMargin'])
         # self.description['BottomMargin'] = float(self.description['BottomMargin'])
 
         self.description['LogX'] = self.description.has_key('LogX') and self.description['LogX']=='1'
         self.description['LogY'] = self.description.has_key('LogY') and self.description['LogY']=='1'
         self.description['LogZ'] = self.description.has_key('LogZ') and self.description['LogZ']=='1'
         if self.description.has_key('Rebin'):
             for i in self.histos:
                 self.histos[i].description['Rebin'] = self.description['Rebin']
 
         histoordermap = {}
         histolist = self.histos.keys()
         if self.description.has_key('DrawOnly'):
             histolist = filter(self.histos.keys().count, self.description['DrawOnly'].strip().split())
         for histo in histolist:
             order = 0
             if self.histos[histo].description.has_key('PlotOrder'):
                 order = int(self.histos[histo].description['PlotOrder'])
             if not order in histoordermap:
                 histoordermap[order] = []
             histoordermap[order].append(histo)
         sortedhistolist = []
         for i in sorted(histoordermap.keys()):
             sortedhistolist.extend(histoordermap[i])
         self.description['DrawOnly'] = sortedhistolist
 
 
         ## Inherit various values from histograms if not explicitly set
         for k in ['LogX', 'LogY', 'LogZ',
                   'XLabel', 'YLabel', 'ZLabel',
                   'XCustomMajorTicks', 'YCustomMajorTicks', 'ZCustomMajorTicks']:
             self.inherit_from_histos(k)
 
         return
 
 
     @property
     def is2dim(self):
         return self.attr_bool("is2dim", False)
     @is2dim.setter
     def is2dim(self, val):
         self.set_attr("is2dim", val)
 
 
     @property
     def drawonly(self):
         x = self.attr("DrawOnly")
         if type(x) is str:
             self.drawonly = x #< use setter to listify
         return x if x else []
     @drawonly.setter
     def drawonly(self, val):
         if type(val) is str:
             val = val.strip().split()
         self.set_attr("DrawOnly", val)
 
 
     @property
     def stacklist(self):
         x = self.attr("Stack")
         if type(x) is str:
             self.stacklist = x #< use setter to listify
         return x if x else []
     @stacklist.setter
     def stacklist(self, val):
         if type(val) is str:
             val = val.strip().split()
         self.set_attr("Stack", val)
 
 
     @property
     def plotorder(self):
         x = self.attr("PlotOrder")
         if type(x) is str:
             self.plotorder = x #< use setter to listify
         return x if x else []
     @plotorder.setter
     def plotorder(self, val):
         if type(val) is str:
             val = val.strip().split()
         self.set_attr("PlotOrder", val)
 
 
     @property
     def plotsizex(self):
         return self.attr_float("PlotSizeX")
     @plotsizex.setter
     def plotsizex(self, val):
         self.set_attr("PlotSizeX", val)
 
     @property
     def plotsizey(self):
         return self.attr_float("PlotSizeY")
     @plotsizey.setter
     def plotsizey(self, val):
         self.set_attr("PlotSizeY", val)
 
     @property
     def plotsize(self):
         return [self.plotsizex, self.plotsizey]
     @plotsize.setter
     def plotsize(self, val):
         if type(val) is str:
             val = [float(x) for x in val.split(",")]
         assert len(val) == 2
         self.plotsizex = val[0]
         self.plotsizey = val[1]
 
     @property
     def ratiosizey(self):
         return self.attr_float("RatioPlotSizeY")
     @ratiosizey.setter
     def ratiosizey(self, val):
         self.set_attr("RatioPlotSizeY", val)
 
 
     @property
     def scale(self):
         return self.attr_float("Scale")
     @scale.setter
     def scale(self, val):
         self.set_attr("Scale", val)
 
 
     @property
     def xmin(self):
         return self.attr_float("XMin")
     @xmin.setter
     def xmin(self, val):
         self.set_attr("XMin", val)
 
     @property
     def xmax(self):
         return self.attr_float("XMax")
     @xmax.setter
     def xmax(self, val):
         self.set_attr("XMax", val)
 
     @property
     def xrange(self):
         return [self.xmin, self.xmax]
     @xrange.setter
     def xrange(self, val):
         if type(val) is str:
             val = [float(x) for x in val.split(",")]
         assert len(val) == 2
         self.xmin = val[0]
         self.xmax = val[1]
 
 
     @property
     def ymin(self):
         return self.attr_float("YMin")
     @ymin.setter
     def ymin(self, val):
         self.set_attr("YMin", val)
 
     @property
     def ymax(self):
         return self.attr_float("YMax")
     @ymax.setter
     def ymax(self, val):
         self.set_attr("YMax", val)
 
     @property
     def yrange(self):
         return [self.ymin, self.ymax]
     @yrange.setter
     def yrange(self, val):
         if type(val) is str:
             val = [float(y) for y in val.split(",")]
         assert len(val) == 2
         self.ymin = val[0]
         self.ymax = val[1]
 
 
     # TODO: add more rw properties for plotsize(x,y), ratiosize(y),
     #   show_mainplot, show_ratioplot, show_legend, log(x,y,z), rebin,
     #   drawonly, legendonly, plotorder, stack,
     #   label(x,y,z), majorticks(x,y,z), minorticks(x,y,z),
     #   min(x,y,z), max(x,y,z), range(x,y,z)
 
 
     def inherit_from_histos(self, k):
         """Note: this will inherit the key from a random histogram:
         only use if you're sure all histograms have this key!"""
         if not self.description.has_key(k):
             h = list(self.histos.itervalues())[0]
             if h.description.has_key(k):
                 self.description[k] = h.description[k]
 
 
     def read_input(self, f):
         for line in f:
             if is_end_marker(line, 'PLOT'):
                 break
             elif is_comment(line):
                 continue
             m = pat_property.match(line)
             if m:
                 prop, value = m.group(1,2)
                 if prop in self.description:
                     logging.debug("Overwriting property %s = %s -> %s" % (prop, self.description[prop], value))
                 ## Use strip here to deal with DOS newlines containing \r
                 self.description[prop.strip()] = value.strip()
 
 
     def apply_config_files(self, conffiles):
         if conffiles is not None:
             for filename in conffiles:
                 cf = open(filename,'r')
                 lines = cf.readlines()
                 for i in range(0, len(lines)):
                     ## First evaluate PLOT sections
                     m = pat_begin_block.match(lines[i])
                     if m and m.group(1) == 'PLOT' and re.match(m.group(2),self.filename):
                         while i<len(lines)-1:
                             i = i+1
                             if is_end_marker(lines[i], 'PLOT'):
                                 break
                             elif is_comment(lines[i]):
                                 continue
                             m = pat_property.match(lines[i])
                             if m:
                                 prop, value = m.group(1,2)
                                 if prop in self.description:
                                     logging.debug("Overwriting from conffile property %s = %s -> %s" % (prop, self.description[prop], value))
                                 ## Use strip here to deal with DOS newlines containing \r
                                 self.description[prop.strip()] = value.strip()
                     elif is_comment(lines[i]):
                         continue
                     else:
                         ## Then evaluate path-based settings, e.g. for HISTOGRAMs
                         m = pat_path_property.match(lines[i])
                         if m:
                             regex, prop, value = m.group(1,2,3)
                             for obj_dict in [self.special, self.histos, self.functions]:
                                 for path, obj in obj_dict.iteritems():
                                     if re.match(regex, path):
                                         ## Use strip here to deal with DOS newlines containing \r
                                         obj.description.update({prop.strip() : value.strip()})
                 cf.close()
 
 
 
 class Plot(object):
 
     def __init__(self, inputdata):
         pass
 
     def set_normalization(self,inputdata):
         for method in ['NormalizeToIntegral', 'NormalizeToSum']:
             if inputdata.description.has_key(method):
                 for i in inputdata.drawonly:
                     if not inputdata.histos[i].has_attr(method):
                         inputdata.histos[i].set_attr(method, inputdata.attr(method))
         if inputdata.scale:
             for i in inputdata.drawonly:
                 inputdata.histos[i].scale = inputdata.scale
         for i in inputdata.drawonly:
             inputdata.histos[i].mangle_input()
 
     def stack_histograms(self,inputdata):
         if inputdata.description.has_key('Stack'):
             stackhists = [h for h in inputdata.attr('Stack').strip().split() if h in inputdata.histos]
             previous = ''
             for i in stackhists:
                 if previous != '':
                     inputdata.histos[i].add(inputdata.histos[previous])
                 previous = i
 
     def set_histo_options(self,inputdata):
         if inputdata.description.has_key('ConnectGaps'):
             for i in inputdata.histos.keys():
                 if not inputdata.histos[i].description.has_key('ConnectGaps'):
                     inputdata.histos[i].description['ConnectGaps'] = inputdata.description['ConnectGaps']
 
     def set_borders(self, inputdata):
         self.set_xmax(inputdata)
         self.set_xmin(inputdata)
         self.set_ymax(inputdata)
         self.set_ymin(inputdata)
         self.set_zmax(inputdata)
         self.set_zmin(inputdata)
         inputdata.description['Borders'] = (self.xmin, self.xmax, self.ymin, self.ymax, self.zmin, self.zmax)
 
     def set_xmin(self, inputdata):
         self.xmin = inputdata.xmin
         if self.xmin is None:
             self.xmin = min(inputdata.histos[h].getXMin() for h in inputdata.description['DrawOnly'])
 
     def set_xmax(self,inputdata):
         self.xmax = inputdata.xmax
         if self.xmax is None:
             self.xmax = min(inputdata.histos[h].getXMax() for h in inputdata.description['DrawOnly'])
 
     def set_ymin(self,inputdata):
         if inputdata.ymin is not None:
             self.ymin = inputdata.ymin
         else:
             ymins = [inputdata.histos[i].getYMin(self.xmin, self.xmax, inputdata.description['LogY']) for i in inputdata.attr('DrawOnly')]
             minymin = min(ymins)
             if inputdata.description['is2dim']:
                 self.ymin = minymin
             else:
                 showzero = inputdata.attr_bool("ShowZero", True)
                 if showzero:
                     self.ymin = 0. if minymin > -1e-4 else 1.1*minymin
                 else:
                     self.ymin = 1.1*minymin if minymin < -1e-4 else 0 if minymin < 1e-4 else 0.9*minymin
                 if inputdata.description['LogY']:
                     ymins = [ymin for ymin in ymins if ymin > 0.0]
                     if not ymins:
                         if self.ymax == 0:
                             self.ymax = 1
                         ymins.append(2e-7*self.ymax)
                     minymin = min(ymins)
                     fullrange = opts.FULL_RANGE
                     if inputdata.has_attr('FullRange'):
                         fullrange = inputdata.attr_bool('FullRange')
                     self.ymin = minymin/1.7 if fullrange else max(minymin/1.7, 2e-7*self.ymax)
 
                 if self.ymin == self.ymax:
                     self.ymin -= 1
                     self.ymax += 1
 
     def set_ymax(self,inputdata):
         if inputdata.has_attr('YMax'):
             self.ymax = inputdata.attr_float('YMax')
         else:
             self.ymax = max(inputdata.histos[h].getYMax(self.xmin, self.xmax) for h in inputdata.attr('DrawOnly'))
             if not inputdata.is2dim:
                 self.ymax *= (1.7 if inputdata.attr_bool('LogY') else 1.1)
 
     def set_zmin(self,inputdata):
         if inputdata.has_attr('ZMin'):
             self.zmin = inputdata.attr_float('ZMin')
         else:
             zmins = [inputdata.histos[i].getZMin(self.xmin, self.xmax, self.ymin, self.ymax) for i in inputdata.attr('DrawOnly')]
             minzmin = min(zmins)
             self.zmin = minzmin
             if zmins:
                 showzero = inputdata.attr_bool('ShowZero', True)
                 if showzero:
                     self.zmin = 0 if minzmin > -1e-4 else 1.1*minzmin
                 else:
                     self.zmin = 1.1*minzmin if minzmin < -1e-4 else 0. if minzmin < 1e-4 else 0.9*minzmin
                 if inputdata.attr_bool('LogZ', False):
                     zmins = [zmin for zmin in zmins if zmin > 0]
                     if not zmins:
                         if self.zmax == 0:
                             self.zmax = 1
                         zmins.append(2e-7*self.zmax)
                     minzmin = min(zmins)
                     fullrange = inputdata.attr_bool("FullRange", opts.FULL_RANGE)
                     self.zmin = minzmin/1.7 if fullrange else max(minzmin/1.7, 2e-7*self.zmax)
 
                 if self.zmin == self.zmax:
                     self.zmin -= 1
                     self.zmax += 1
 
     def set_zmax(self,inputdata):
         self.zmax = inputdata.attr_float('ZMax')
         if self.zmax is None:
             zmaxs = [inputdata.histos[h].getZMax(self.xmin, self.xmax, self.ymin, self.ymax) for h in inputdata.attr('DrawOnly')]
             self.zmax = max(zmaxs) if zmaxs else 1
 
 
     def draw(self):
         pass
 
 
     def write_header(self,inputdata):
         if inputdata.description.has_key('LeftMargin') and inputdata.description['LeftMargin']!='':
             inputdata.description['LeftMargin'] = float(inputdata.description['LeftMargin'])
         else:
             inputdata.description['LeftMargin'] = 1.4
         if inputdata.description.has_key('RightMargin') and inputdata.description['RightMargin']!='':
             inputdata.description['RightMargin'] = float(inputdata.description['RightMargin'])
         else:
             inputdata.description['RightMargin'] = 0.35
         if inputdata.description.has_key('TopMargin') and inputdata.description['TopMargin']!='':
             inputdata.description['TopMargin'] = float(inputdata.description['TopMargin'])
         else:
             inputdata.description['TopMargin'] = 0.65
         if inputdata.description.has_key('BottomMargin') and inputdata.description['BottomMargin']!='':
             inputdata.description['BottomMargin'] = float(inputdata.description['BottomMargin'])
         else:
             inputdata.description['BottomMargin'] = 0.95
         if inputdata.description['is2dim']:
             inputdata.description['RightMargin'] += 1.7
         papersizex = inputdata.description['PlotSizeX'] + 0.1 + \
                      inputdata.description['LeftMargin'] + inputdata.description['RightMargin']
         papersizey = inputdata.description['PlotSizeY'] + inputdata.description['RatioPlotSizeY'] + 0.1 + \
                      inputdata.description['TopMargin'] + inputdata.description['BottomMargin']
         #
         out = ""
         out += '\\documentclass{article}\n'
         if opts.OUTPUT_FONT == "MINION":
             out += ('\\usepackage{minion}\n')
         elif opts.OUTPUT_FONT == "PALATINO_OSF":
             out += ('\\usepackage[osf,sc]{mathpazo}\n')
         elif opts.OUTPUT_FONT == "PALATINO":
             out += ('\\usepackage{mathpazo}\n')
         elif opts.OUTPUT_FONT == "TIMES":
             out += ('\\usepackage{mathptmx}\n')
         elif opts.OUTPUT_FONT == "HELVETICA":
             out += ('\\renewcommand{\\familydefault}{\\sfdefault}\n')
             out += ('\\usepackage{sfmath}\n')
             out += ('\\usepackage{helvet}\n')
             out += ('\\usepackage[symbolgreek]{mathastext}\n')
         for pkg in opts.LATEXPKGS:
             out += ('\\usepackage{%s}\n' % pkg)
         out += ('\\usepackage{pst-all}\n')
         out += ('\\usepackage{xcolor}\n')
         out += ('\\selectcolormodel{rgb}\n')
         out += ('\\definecolor{red}{HTML}{EE3311}\n') # (Google uses 'DC3912')
         out += ('\\definecolor{blue}{HTML}{3366FF}')
         out += ('\\definecolor{green}{HTML}{109618}')
         out += ('\\definecolor{orange}{HTML}{FF9900}')
         out += ('\\definecolor{lilac}{HTML}{990099}')
         out += ('\\usepackage{amsmath}\n')
         out += ('\\usepackage{amssymb}\n')
         out += ('\\usepackage{relsize}\n')
         out += ('\\usepackage[dvips,\n')
         out += ('  left=%4.3fcm, right=0cm,\n' % (inputdata.description['LeftMargin']-0.45,))
         out += ('  top=%4.3fcm,  bottom=0cm,\n' % (inputdata.description['TopMargin']-0.30,))
         out += ('  paperwidth=%scm,paperheight=%scm\n' % (papersizex,papersizey))
         out += (']{geometry}\n')
         out += ('\\begin{document}\n')
         out += ('\\pagestyle{empty}\n')
         out += ('\\SpecialCoor\n')
         out += ('\\begin{pspicture}(0,0)(0,0)\n')
         out += ('\\psset{xunit=%scm}\n' %(inputdata.description['PlotSizeX']))
         if inputdata.description['is2dim']:
             if inputdata.description.has_key('ColorSeries') and inputdata.description['ColorSeries']!='':
                 colorseries = inputdata.description['ColorSeries']
             else:
                 colorseries = '{hsb}{grad}[rgb]{0,0,1}{-.700,0,0}'
             out += ('\\definecolorseries{gradientcolors}%s\n' % colorseries)
             out += ('\\resetcolorseries[130]{gradientcolors}\n')
         return out
 
     def write_footer(self):
         out = ""
         out += ('\\end{pspicture}\n')
         out += ('\\end{document}\n')
         return out
 
 
 
 class MainPlot(Plot):
 
     def __init__(self, inputdata):
         self.set_normalization(inputdata)
         self.stack_histograms(inputdata)
         if (inputdata.description.has_key('GofLegend')  and inputdata.description['GofLegend']=='1') or \
            (inputdata.description.has_key('GofFrame')   and inputdata.description['GofFrame']!='') and not \
            (inputdata.description.has_key('TaylorPlot') and inputdata.description['TaylorPlot']=='1'):
             self.calculate_gof(inputdata)
         self.set_histo_options(inputdata)
         self.set_borders(inputdata)
         self.yoffset = inputdata.description['PlotSizeY']
         self.coors = Coordinates(inputdata)
 
     def draw(self, inputdata):
         out = ""
         out += ('\n%\n% MainPlot\n%\n')
         out += ('\\psset{yunit=%scm}\n' %(self.yoffset))
         out += ('\\rput(0,-1){%\n')
         out += ('\\psset{yunit=%scm}\n' %(inputdata.description['PlotSizeY']))
         out += self._draw(inputdata)
         out += ('}\n')
         return out
 
     def _draw(self, inputdata):
         out = ""
 
         # TODO: do this more compactly, e.g. by assigning sorting keys!
         if inputdata.attr_bool('DrawSpecialFirst', False):
             for s in inputdata.special:
                 out += s.draw(self.coors)
             if inputdata.attr_bool('DrawFunctionFirst', False):
                 for f in inputdata.functions:
                     out += f.draw(self.coors)
                 for i in inputdata.description['DrawOnly']:
                     out += inputdata.histos[i].draw(self.coors)
             else:
                 for i in inputdata.description['DrawOnly']:
                     out += inputdata.histos[i].draw(self.coors)
                 for f in inputdata.functions:
                     out += f.draw(self.coors)
         else:
             if inputdata.attr_bool('DrawFunctionFirst', False):
                 for f in inputdata.functions:
                     out += f.draw(self.coors)
                 for i in inputdata.description['DrawOnly']:
                     out += inputdata.histos[i].draw(self.coors)
             else:
                 for i in inputdata.description['DrawOnly']:
                     out += inputdata.histos[i].draw(self.coors)
                 for f in inputdata.functions:
                     out += f.draw(self.coors)
             for i in inputdata.special.keys():
                 out += inputdata.special[i].draw(self.coors)
 
         if inputdata.attr_bool('Legend', False):
             legend = Legend(inputdata.description,inputdata.histos,inputdata.functions)
             out += legend.draw()
         if inputdata.description['is2dim']:
             colorscale = ColorScale(inputdata.description, self.coors)
             out += colorscale.draw()
         frame = Frame()
         out += frame.draw(inputdata)
 
         xcustommajortickmarks = inputdata.attr_int('XMajorTickMarks', -1)
         xcustomminortickmarks = inputdata.attr_int('XMinorTickMarks', -1)
 
         xcustommajorticks = xcustomminorticks = None
-        if inputdata.has_attr('XCustomMajorTicks'):
+        if inputdata.attr('XCustomMajorTicks'):
             xcustommajorticks = []
-            x_label_pairs = inputdata.description['XCustomMajorTicks'].strip().split() #'\t')
+            x_label_pairs = inputdata.attr('XCustomMajorTicks').strip().split() #'\t')
             if len(x_label_pairs) % 2 == 0:
                 for i in range(0, len(x_label_pairs), 2):
                     xcustommajorticks.append({'Value': float(x_label_pairs[i]), 'Label': x_label_pairs[i+1]})
             else:
                 print "Warning: XCustomMajorTicks requires an even number of alternating pos/label entries"
 
-        if inputdata.has_attr('XCustomMinorTicks'):
+        if inputdata.attr('XCustomMinorTicks'):
             xs = inputdata.attr('XCustomMinorTicks').strip().split() #'\t')
             xcustomminorticks = [{'Value': float(x)} for x in xs]
 
         xticks = XTicks(inputdata.description, self.coors)
         drawxlabels = inputdata.attr_bool('PlotXTickLabels', True) and not inputdata.attr_bool('RatioPlot', False)
 
         out += xticks.draw(custommajortickmarks=xcustommajortickmarks,
                            customminortickmarks=xcustomminortickmarks,
                            custommajorticks=xcustommajorticks,
                            customminorticks=xcustomminorticks,
                            drawlabels=drawxlabels)
 
         ycustommajortickmarks = inputdata.attr_int('YMajorTickMarks', -1)
         ycustomminortickmarks = inputdata.attr_int('YMinorTickMarks', -1)
 
         ycustommajorticks = ycustomminorticks = None
         if inputdata.description.has_key('YCustomMajorTicks'):
             ycustommajorticks = []
             y_label_pairs = inputdata.description['YCustomMajorTicks'].strip().split() #'\t')
             if len(y_label_pairs) % 2 == 0:
                 for i in range(0, len(y_label_pairs), 2):
                     ycustommajorticks.append({'Value': float(y_label_pairs[i]), 'Label': y_label_pairs[i+1]})
             else:
                 print "Warning: YCustomMajorTicks requires an even number of alternating pos/label entries"
 
         if inputdata.has_attr('YCustomMinorTicks'):
             ys = inputdata.attr('YCustomMinorTicks').strip().split() #'\t')
             ycustomminorticks = [{'Value': float(y)} for y in ys]
 
         yticks = YTicks(inputdata.description, self.coors)
         drawylabels = inputdata.attr_bool('PlotYTickLabels', True)
 
         out += yticks.draw(custommajortickmarks=ycustommajortickmarks,
                            customminortickmarks=ycustomminortickmarks,
                            custommajorticks=ycustommajorticks,
                            customminorticks=ycustomminorticks,
                            drawlabels=drawylabels)
 
         labels = Labels(inputdata.description)
         if inputdata.attr_bool('RatioPlot', False):
             olab = labels.draw(['Title','YLabel'])
         else:
             if not inputdata.description['is2dim']:
                 olab = labels.draw(['Title','XLabel','YLabel'])
             else:
                 olab = labels.draw(['Title','XLabel','YLabel','ZLabel'])
         out += olab
         return out
 
 
     def calculate_gof(self, inputdata):
         refdata = inputdata.description.get('GofReference')
         if refdata is None:
             refdata = inputdata.description.get('RatioPlotReference')
 
         if refdata is None:
             inputdata.description['GofLegend'] = '0'
             inputdata.description['GofFrame'] = ''
             return
 
         def pickcolor(gof):
             color = None
             colordefs = {}
             for i in inputdata.description.setdefault('GofFrameColor', '0:green 3:yellow 6:red!70').strip().split():
                 foo = i.split(':')
                 if len(foo) != 2:
                     continue
                 colordefs[float(foo[0])] = foo[1]
             for i in sorted(colordefs.keys()):
                 if gof>=i:
                     color=colordefs[i]
             return color
 
         inputdata.description.setdefault('GofLegend', '0')
         inputdata.description.setdefault('GofFrame', '')
         inputdata.description.setdefault('FrameColor', None)
 
         for i in inputdata.description['DrawOnly']:
             if i == refdata:
                 continue
             if inputdata.description['GofLegend']!='1' and i!=inputdata.description['GofFrame']:
                 continue
 
             if inputdata.description.has_key('GofType') and inputdata.description['GofType']!='chi2':
                 return
             gof = inputdata.histos[i].getChi2(inputdata.histos[refdata])
             if i == inputdata.description['GofFrame'] and inputdata.description['FrameColor'] is None:
                 inputdata.description['FrameColor'] = pickcolor(gof)
             if inputdata.histos[i].description.setdefault('Title', '') != '':
                 inputdata.histos[i].description['Title'] += ', '
             inputdata.histos[i].description['Title'] += '$\\chi^2/n={}$%1.2f' %gof
 
 
 
 class TaylorPlot(Plot):
 
     def __init__(self, inputdata):
         self.refdata = inputdata.description['TaylorPlotReference']
         self.calculate_taylorcoordinates(inputdata)
 
     def calculate_taylorcoordinates(self,inputdata):
         foo = inputdata.description['DrawOnly'].pop(inputdata.description['DrawOnly'].index(self.refdata))
         inputdata.description['DrawOnly'].append(foo)
         for i in inputdata.description['DrawOnly']:
             print i
             print 'meanbinval  = ', inputdata.histos[i].getMeanBinValue()
             print 'sigmabinval = ', inputdata.histos[i].getSigmaBinValue()
             print 'chi2/nbins  = ', inputdata.histos[i].getChi2(inputdata.histos[self.refdata])
             print 'correlation = ', inputdata.histos[i].getCorrelation(inputdata.histos[self.refdata])
             print 'distance    = ', inputdata.histos[i].getRMSdistance(inputdata.histos[self.refdata])
 
 
 
 class RatioPlot(Plot):
 
     def __init__(self, inputdata):
 
         self.refdata = inputdata.description['RatioPlotReference']
         self.yoffset = inputdata.description['PlotSizeY'] + inputdata.description['RatioPlotSizeY']
 
         inputdata.description['RatioPlotStage'] = True
         inputdata.description['PlotSizeY'] = inputdata.description['RatioPlotSizeY']
         inputdata.description['LogY'] = False
 
         # TODO: It'd be nice it this wasn't so MC-specific
         if inputdata.description.has_key('RatioPlotMode') and inputdata.description['RatioPlotMode']=='deviation':
             inputdata.description['YLabel'] = '$(\\text{MC}-\\text{data})$'
             inputdata.description['YMin'] = -3.5
             inputdata.description['YMax'] = 3.5
         elif inputdata.description.has_key('RatioPlotMode') and inputdata.description['RatioPlotMode']=='datamc':
             inputdata.description['YLabel'] = 'Data/MC'
             inputdata.description['YMin'] = 0.5
             inputdata.description['YMax'] = 1.5
         else:
             inputdata.description['YLabel'] = 'MC/Data'
             inputdata.description['YMin'] = 0.5
             inputdata.description['YMax'] = 1.5
 
         if inputdata.description.has_key('RatioPlotYLabel'):
             inputdata.description['YLabel'] = inputdata.description['RatioPlotYLabel']
         inputdata.description['YLabel']='\\rput(-%s,0){%s}'%(0.5*inputdata.description['PlotSizeY']/inputdata.description['PlotSizeX'],inputdata.description['YLabel'])
 
         if inputdata.description.has_key('RatioPlotYMin'):
             inputdata.description['YMin'] = inputdata.description['RatioPlotYMin']
         if inputdata.description.has_key('RatioPlotYMax'):
             inputdata.description['YMax'] = inputdata.description['RatioPlotYMax']
 
         if not inputdata.description.has_key('RatioPlotErrorBandColor'):
             inputdata.description['RatioPlotErrorBandColor'] = 'yellow'
         if not inputdata.description.has_key('RatioPlotSameStyle') or inputdata.description['RatioPlotSameStyle'] == '0':
             inputdata.histos[self.refdata].description['ErrorBandColor'] = inputdata.description['RatioPlotErrorBandColor']
             inputdata.histos[self.refdata].description['ErrorBands'] = '1'
             inputdata.histos[self.refdata].description['ErrorBars'] = '0'
             inputdata.histos[self.refdata].description['LineStyle'] = 'solid'
             inputdata.histos[self.refdata].description['LineColor'] = 'black'
             inputdata.histos[self.refdata].description['LineWidth'] = '0.3pt'
             inputdata.histos[self.refdata].description['PolyMarker'] = ''
             inputdata.histos[self.refdata].description['ConnectGaps'] = '1'
 
         self.calculate_ratios(inputdata)
         self.set_borders(inputdata)
         self.coors = Coordinates(inputdata)
 
     def draw(self, inputdata):
         out = ""
         out += ('\n%\n% RatioPlot\n%\n')
         out += ('\\psset{yunit=%scm}\n' %(self.yoffset))
         out += ('\\rput(0,-1){%\n')
         out += ('\\psset{yunit=%scm}\n' %(inputdata.description['PlotSizeY']))
         out += self._draw(inputdata)
         out += ('}\n')
         return out
 
     def calculate_ratios(self, inputdata):
         foo = inputdata.description['DrawOnly'].pop(inputdata.description['DrawOnly'].index(self.refdata))
         if inputdata.histos[self.refdata].description.has_key('ErrorBands') and inputdata.histos[self.refdata].description['ErrorBands']=='1':
             inputdata.description['DrawOnly'].insert(0,foo)
         else:
             inputdata.description['DrawOnly'].append(foo)
         for i in inputdata.description['DrawOnly']:
             if i != self.refdata:
                 if inputdata.description.has_key('RatioPlotMode') and inputdata.description['RatioPlotMode'] == 'deviation':
                     inputdata.histos[i].deviation(inputdata.histos[self.refdata])
                 elif inputdata.description.has_key('RatioPlotMode') and inputdata.description['RatioPlotMode'] == 'datamc':
                     inputdata.histos[i].dividereverse(inputdata.histos[self.refdata])
                     inputdata.histos[i].description['ErrorBars'] = '1'
                 else:
                     inputdata.histos[i].divide(inputdata.histos[self.refdata])
         if inputdata.description.has_key('RatioPlotMode') and inputdata.description['RatioPlotMode'] == 'deviation':
             inputdata.histos[self.refdata].deviation(inputdata.histos[self.refdata])
         elif inputdata.description.has_key('RatioPlotMode') and inputdata.description['RatioPlotMode'] == 'datamc':
             inputdata.histos[self.refdata].dividereverse(inputdata.histos[self.refdata])
         else:
             inputdata.histos[self.refdata].divide(inputdata.histos[self.refdata])
 
     def _draw(self, inputdata):
         out = ""
         for i in inputdata.description['DrawOnly']:
             if inputdata.description.has_key('RatioPlotMode') and inputdata.description['RatioPlotMode'] == 'datamc':
                 if i != self.refdata:
                     out += inputdata.histos[i].draw(self.coors)
             else:
                 out += inputdata.histos[i].draw(self.coors)
 
         frame = Frame()
         out += frame.draw(inputdata)
 
         # TODO: so much duplication with MainPlot... yuck!
         if inputdata.description.has_key('XMajorTickMarks') and inputdata.description['XMajorTickMarks'] != '':
             xcustommajortickmarks = int(inputdata.description['XMajorTickMarks'])
         else:
             xcustommajortickmarks = -1
         if inputdata.description.has_key('XMinorTickMarks') and inputdata.description['XMinorTickMarks'] != '':
             xcustomminortickmarks = int(inputdata.description['XMinorTickMarks'])
         else:
             xcustomminortickmarks =- 1
 
         xcustommajorticks = None
         if inputdata.description.has_key('XCustomMajorTicks'): # and inputdata.description['XCustomMajorTicks']!='':
             xcustommajorticks = []
             tickstr = inputdata.description['XCustomMajorTicks'].strip().split() #'\t')
             if not len(tickstr) % 2:
                 for i in range(0, len(tickstr), 2):
                     xcustommajorticks.append({'Value': float(tickstr[i]), 'Label': tickstr[i+1]})
 
         xcustomminorticks = None
         if inputdata.description.has_key('XCustomMinorTicks'): # and inputdata.description['XCustomMinorTicks']!='':
             xcustomminorticks = []
             tickstr = inputdata.description['XCustomMinorTicks'].strip().split() #'\t')
             for i in range(len(tickstr)):
                 xcustomminorticks.append({'Value': float(tickstr[i])})
 
         xticks = XTicks(inputdata.description, self.coors)
         drawlabels = not (inputdata.description.has_key('RatioPlotTickLabels') and inputdata.description['RatioPlotTickLabels']=='0')
         out += xticks.draw(custommajortickmarks=xcustommajortickmarks,
                            customminortickmarks=xcustomminortickmarks,
                            custommajorticks=xcustommajorticks,
                            customminorticks=xcustomminorticks,
                            drawlabels=drawlabels)
 
 
         ycustommajortickmarks = inputdata.attr('YMajorTickMarks', '')
         ycustommajortickmarks = int(ycustommajortickmarks) if ycustommajortickmarks else -1
 
         ycustomminortickmarks = inputdata.attr('YMinorTickMarks', '')
         ycustomminortickmarks = int(ycustomminortickmarks) if ycustomminortickmarks else -1
 
         ycustommajorticks = None
         if inputdata.description.has_key('YCustomMajorTicks'):
             ycustommajorticks = []
             tickstr = inputdata.description['YCustomMajorTicks'].strip().split() #'\t')
             if not len(tickstr) % 2:
                 for i in range(0, len(tickstr), 2):
                     ycustommajorticks.append({'Value': float(tickstr[i]), 'Label': tickstr[i+1]})
 
         ycustomminorticks = None
         if inputdata.description.has_key('YCustomMinorTicks'):
             ycustomminorticks = []
             tickstr = inputdata.description['YCustomMinorTicks'].strip().split() #'\t')
             for i in range(len(tickstr)):
                 ycustomminorticks.append({'Value': float(tickstr[i])})
 
         yticks = YTicks(inputdata.description, self.coors)
         out += yticks.draw(custommajortickmarks=ycustommajortickmarks,
                            customminortickmarks=ycustomminortickmarks,
                            custommajorticks=ycustommajorticks,
                            customminorticks=ycustomminorticks)
 
         if not inputdata.attr_bool('MainPlot', True) and not inputdata.attr_bool('Legend', False):
             legend = Legend(inputdata.description, inputdata.histos, inputdata.functions)
             out += legend.draw()
 
         labels = Labels(inputdata.description)
         lnames = ['XLabel','YLabel']
         if not inputdata.attr_bool('MainPlot', True):
             lnames.append("Title")
         out += labels.draw(lnames)
         return out
 
 
 
 class Legend(Described):
 
     def __init__(self, description, histos, functions):
         self.histos = histos
         self.functions = functions
         self.description = description
 
     def draw(self):
         out = ""
         out += '\n%\n% Legend\n%\n'
         out += '\\rput[tr](%s,%s){%%\n' % (self.getLegendXPos(), self.getLegendYPos())
         ypos = -0.05*6/self.description['PlotSizeY']
 
         legendordermap = {}
         legendlist = self.description['DrawOnly']+self.functions.keys()
         if self.description.has_key('LegendOnly'):
             legendlist = []
             for legend in self.description['LegendOnly'].strip().split():
                 if legend in self.histos.keys() or legend in self.functions.keys():
                     legendlist.append(legend)
         for legend in legendlist:
             order = 0
             if self.histos.has_key(legend) and self.histos[legend].description.has_key('LegendOrder'):
                 order = int(self.histos[legend].description['LegendOrder'])
             if self.functions.has_key(legend) and self.functions[legend].description.has_key('LegendOrder'):
                 order = int(self.functions[legend].description['LegendOrder'])
             if not order in legendordermap:
                 legendordermap[order] = []
             legendordermap[order].append(legend)
         foo=[]
         for i in sorted(legendordermap.keys()):
             foo.extend(legendordermap[i])
 
         rel_xpos_sign = 1.0
         if self.getLegendAlign()=='r':
             rel_xpos_sign = -1.0
         xpos1 = -0.10*rel_xpos_sign
         xpos2 = -0.02*rel_xpos_sign
 
         for i in foo:
             if self.histos.has_key(i):
                 drawobject=self.histos[i]
             elif self.functions.has_key(i):
                 drawobject=self.functions[i]
             else:
                 continue
             title = drawobject.getTitle()
             if title == '':
                 continue
             else:
                 out += ('\\rput[B%s](%s,%s){%s}\n' %(self.getLegendAlign(),rel_xpos_sign*0.1,ypos,title))
                 out += ('\\rput[B%s](%s,%s){%s\n' %(self.getLegendAlign(),rel_xpos_sign*0.1,ypos,'%'))
                 if drawobject.getErrorBands():
                     out += ('\\psframe[linewidth=0pt,linestyle=none,fillstyle=solid,fillcolor=%s,opacity=%s]' %(drawobject.getErrorBandColor(),drawobject.getErrorBandOpacity()))
                     out += ('(%s, 0.033)(%s, 0.001)\n' %(xpos1, xpos2))
                 out += ('\\psline[linestyle=' + drawobject.getLineStyle() \
                             + ', linecolor=' + drawobject.getLineColor() \
                             + ', linewidth=' + drawobject.getLineWidth() \
                             + ', strokeopacity=' + drawobject.getLineOpacity() \
                             + ', opacity=' + drawobject.getFillOpacity())
                 if drawobject.getLineDash() != '':
                     out += (', dash=' + drawobject.getLineDash())
                 if drawobject.getFillStyle()!='none':
                     out += (', fillstyle=' + drawobject.getFillStyle() \
                                 + ', fillcolor='  + drawobject.getFillColor() \
                                 + ', hatchcolor=' + drawobject.getHatchColor() \
                                 + ']{C-C}(%s, 0.030)(%s, 0.030)(%s, 0.004)(%s, 0.004)(%s, 0.030)\n' \
                                 %(xpos1, xpos2, xpos2, xpos1, xpos1))
                 else:
                     out += ('](%s, 0.016)(%s, 0.016)\n' %(xpos1, xpos2))
                 if drawobject.getPolyMarker() != '':
                     out += ('  \\psdot[dotstyle=' + drawobject.getPolyMarker() \
                                 + ', dotsize='    + drawobject.getDotSize()   \
                                 + ', dotscale='   + drawobject.getDotScale()  \
                                 + ', linecolor='  + drawobject.getLineColor() \
                                 + ', linewidth='  + drawobject.getLineWidth() \
                                 + ', linestyle='  + drawobject.getLineStyle() \
                                 + ', fillstyle='  + drawobject.getFillStyle() \
                                 + ', fillcolor='  + drawobject.getFillColor() \
                                 + ', strokeopacity=' + drawobject.getLineOpacity() \
                                 + ', opacity=' + drawobject.getFillOpacity() \
                                 + ', hatchcolor=' + drawobject.getHatchColor())
                     if drawobject.getFillStyle()!='none':
                         out += ('](%s, 0.028)\n' % (rel_xpos_sign*-0.06))
                     else:
                         out += ('](%s, 0.016)\n' % (rel_xpos_sign*-0.06))
                 out += ('}\n')
                 ypos -= 0.075*6/self.description['PlotSizeY']
         if self.description.has_key('CustomLegend'):
             for i in self.description['CustomLegend'].strip().split('\\\\'):
                 out += ('\\rput[B%s](%s,%s){%s}\n' %(self.getLegendAlign(),rel_xpos_sign*0.1,ypos,i))
                 ypos -= 0.075*6/self.description['PlotSizeY']
         out += ('}\n')
         return out
 
     def getLegendXPos(self):
         if self.description.has_key('LegendXPos'):
             return self.description['LegendXPos']
         else:
             if self.getLegendAlign()=='r':
                 return '0.95'
             else:
                 return '0.53'
 
     def getLegendYPos(self):
         if self.description.has_key('LegendYPos'):
             return self.description['LegendYPos']
         else:
             return '0.93'
 
     def getLegendAlign(self):
         if self.description.has_key('LegendAlign'):
             return self.description['LegendAlign']
         else:
             return 'l'
 
 
 class ColorScale(Described):
 
     def __init__(self, description, coors):
         self.description = description
         self.coors = coors
 
     def draw(self):
         out = ''
         out += '\n%\n% ColorScale\n%\n'
         out += '\\rput(1,0){\n'
         out += '  \\psset{xunit=4mm}\n'
         out += '  \\rput(0.5,0){\n'
         out += '    \\psset{yunit=0.0076923, linestyle=none, fillstyle=solid}\n'
         out += '    \\multido{\\ic=0+1,\\id=1+1}{130}{\n'
         out += '      \\psframe[fillcolor={gradientcolors!![\\ic]},dimen=inner,linewidth=0.1pt](0, \\ic)(1, \\id)\n'
         out += '    }\n'
         out += '  }\n'
         out += '  \\rput(0.5,0){\n'
         out += '    \\psframe[linewidth=0.3pt,dimen=middle](0,0)(1,1)\n'
 
-        zcustommajortickmarks = int(self.description.get('ZMajorTickMarks', -1))
-        zcustomminortickmarks = int(self.description.get('ZMinorTickMarks', -1))
-        zcustommajorticks=[]
-        zcustomminorticks=[]
-        if self.description.has_key('ZCustomMajorTicks') and self.description['ZCustomMajorTicks']!='':
-            # TODO: Would be nice to have less invisible separation of the custom ticks than split on tabs
-            ticks = self.description['ZCustomMajorTicks'].strip().split('\t')
-            if not len(ticks)%2:
-                for i in range(0,len(ticks),2):
-                    zcustommajorticks.append({'Value': float(ticks[i]), 'Label': ticks[i+1]})
-        if self.description.has_key('ZCustomMinorTicks') and self.description['ZCustomMinorTicks']!='':
-            # TODO: Would be nice to have less invisible separation of the custom ticks than split on tabs
-            ticks = self.description['ZCustomMinorTicks'].strip().split('\t')
-            for i in range(len(ticks)):
-                zcustomminorticks.append({'Value': float(ticks[i])})
-        if (self.description.has_key('PlotZTickLabels') and self.description['PlotZTickLabels']=='0'):
-            drawzlabels=False
-        else:
-            drawzlabels=True
+        zcustommajortickmarks = self.attr_int('ZMajorTickMarks', -1)
+        zcustomminortickmarks = self.attr_int('ZMinorTickMarks', -1)
+
+        zcustommajorticks = zcustomminorticks = None
+        if self.attr('ZCustomMajorTicks'):
+            zcustommajorticks = []
+            z_label_pairs = self.attr('ZCustomMajorTicks').strip().split() #'\t')
+            if len(z_label_pairs) % 2 == 0:
+                for i in range(0, len(z_label_pairs), 2):
+                    zcustommajorticks.append({'Value': float(x_label_pairs[i]), 'Label': x_label_pairs[i+1]})
+            else:
+                print "Warning: ZCustomMajorTicks requires an even number of alternating pos/label entries"
+
+        if self.attr('ZCustomMinorTicks'):
+            zs = self.attr('ZCustomMinorTicks').strip().split() #'\t')
+            zcustomminorticks = [{'Value': float(x)} for x in xs]
+
+        drawzlabels = self.attr_bool('PlotZTickLabels', True)
+
         zticks = ZTicks(self.description, self.coors)
         out += zticks.draw(custommajortickmarks=zcustommajortickmarks,\
                            customminortickmarks=zcustomminortickmarks,\
                            custommajorticks=zcustommajorticks,\
                            customminorticks=zcustomminorticks,
                            drawlabels=drawzlabels)
         out += '  }\n'
         out += '}\n'
         return out
 
 
 
 class Labels(Described):
 
     def __init__(self, description):
         self.description = description
 
     def draw(self, axis=[]):
         out = ""
         out += ('\n%\n% Labels\n%\n')
         if self.description.has_key('Title') and (axis.count('Title') or axis==[]):
             out += ('\\rput(0,1){\\rput[lB](0, 1.7\\labelsep){\\normalsize '+self.description['Title']+'}}\n')
         if self.description.has_key('XLabel') and (axis.count('XLabel') or axis==[]):
             xlabelsep=4.7
             if self.description.has_key('XLabelSep'):
                 xlabelsep=float(self.description['XLabelSep'])
             out += ('\\rput(1,0){\\rput[rB](0,-%4.3f\\labelsep){\\normalsize '%(xlabelsep) +self.description['XLabel']+'}}\n')
         if self.description.has_key('YLabel') and (axis.count('YLabel') or axis==[]):
             ylabelsep=6.5
             if self.description.has_key('YLabelSep'):
                 ylabelsep=float(self.description['YLabelSep'])
             out += ('\\rput(0,1){\\rput[rB]{90}(-%4.3f\\labelsep,0){\\normalsize '%(ylabelsep) +self.description['YLabel']+'}}\n')
         if self.description.has_key('ZLabel') and (axis.count('ZLabel') or axis==[]):
             zlabelsep=5.3
             if self.description.has_key('ZLabelSep'):
                 zlabelsep=float(self.description['ZLabelSep'])
             out += ('\\rput(1,1){\\rput(%4.3f\\labelsep,0){\\psset{xunit=4mm}\\rput[lB]{270}(1.5,0){\\normalsize '%(zlabelsep) +self.description['ZLabel']+'}}}\n')
         return out
 
 
 
 class Special(Described):
 
     def __init__(self, f):
         self.description = {}
         self.data = []
         self.read_input(f)
 
     def read_input(self, f):
         for line in f:
             if is_end_marker(line, 'SPECIAL'):
                 break
             elif is_comment(line):
                 continue
             else:
                 self.data.append(line)
 
     def draw(self, coors):
         out = ""
         out += ('\n%\n% Special\n%\n')
         import re
         regex = re.compile(r'^(.*?)(\\physics[xy]?coor)\(\s?([0-9\.eE+-]+)\s?,\s?([0-9\.eE+-]+)\s?\)(.*)')
         # TODO: More precise number string matching, something like this:
         # num = r"-?[0-9]*(?:\.[0-9]*)(?:[eE][+-]?\d+]"
         # regex = re.compile(r'^(.*?)(\\physics[xy]?coor)\(\s?(' + num + ')\s?,\s?(' + num + ')\s?\)(.*)')
         for l in self.data:
             while regex.search(l):
                 match = regex.search(l)
                 xcoor, ycoor = float(match.group(3)), float(match.group(4))
                 if match.group(2)[1:] in ["physicscoor", "physicsxcoor"]:
                     xcoor = coors.phys2frameX(xcoor)
                 if match.group(2)[1:] in ["physicscoor", "physicsycoor"]:
                     ycoor = coors.phys2frameY(ycoor)
                 line = "%s(%f, %f)%s" % (match.group(1), xcoor, ycoor, match.group(5))
                 l = line
             out += l + "\n"
         return out
 
 
 
 class DrawableObject(Described):
 
     def __init__(self, f):
         pass
 
     def getTitle(self):
         return self.description.get("Title", "")
 
     def getLineStyle(self):
         if self.description.has_key('LineStyle'):
             ## I normally like there to be "only one way to do it", but providing
             ## this dashdotted/dotdashed synonym just seems humane ;-)
             if self.description['LineStyle'] in ('dashdotted', 'dotdashed'):
                 self.description['LineStyle']='dashed'
                 self.description['LineDash']='3pt 3pt .8pt 3pt'
             return self.description['LineStyle']
         else:
             return 'solid'
 
     def getLineDash(self):
         if self.description.has_key('LineDash'):
             # Check if LineStyle=='dashdotted' before returning something
             self.getLineStyle()
             return self.description['LineDash']
         else:
             return ''
 
     def getLineWidth(self):
         return self.description.get("LineWidth", "0.8pt")
 
     def getLineColor(self):
         return self.description.get("LineColor", "black")
 
     def getLineOpacity(self):
         return self.description.get("LineOpacity", "1.0")
 
     def getFillColor(self):
         return self.description.get("FillColor", "white")
 
     def getFillOpacity(self):
         return self.description.get("FillOpacity", "1.0")
 
     def getHatchColor(self):
         return self.description.get("HatchColor", "black")
 
     def getFillStyle(self):
         return self.description.get("FillStyle", "none")
 
     def getPolyMarker(self):
         return self.description.get("PolyMarker", "")
 
     def getDotSize(self):
         return self.description.get("DotSize", "2pt 2")
 
     def getDotScale(self):
         return self.description.get("DotScale", "1")
 
     def getErrorBars(self):
         return bool(int(self.description.get("ErrorBars", "0")))
 
     def getErrorBands(self):
         return bool(int(self.description.get("ErrorBands", "0")))
 
     def getErrorBandColor(self):
         return self.description.get("ErrorBandColor", "yellow")
 
     def getErrorBandOpacity(self):
         return self.description.get("ErrorBandOpacity", "1.0")
 
     def getSmoothLine(self):
         return bool(int(self.description.get("SmoothLine", "0")))
 
     def startclip(self):
         return '\\psclip{\\psframe[linewidth=0, linestyle=none](0,0)(1,1)}\n'
 
     def stopclip(self):
         return '\\endpsclip\n'
 
     def startpsset(self):
         out = ""
         out += ('\\psset{linecolor='+self.getLineColor()+'}\n')
         out += ('\\psset{linewidth='+self.getLineWidth()+'}\n')
         out += ('\\psset{linestyle='+self.getLineStyle()+'}\n')
         out += ('\\psset{fillstyle='+self.getFillStyle()+'}\n')
         out += ('\\psset{fillcolor='+self.getFillColor()+'}\n')
         out += ('\\psset{hatchcolor='+self.getHatchColor()+'}\n')
         out += ('\\psset{strokeopacity='+self.getLineOpacity()+'}\n')
         out += ('\\psset{opacity='+self.getFillOpacity()+'}\n')
         if self.getLineDash()!='':
             out += ('\\psset{dash='+self.getLineDash()+'}\n')
         return out
 
     def stoppsset(self):
         out = ""
         out += ('\\psset{linecolor=black}\n')
         out += ('\\psset{linewidth=0.8pt}\n')
         out += ('\\psset{linestyle=solid}\n')
         out += ('\\psset{fillstyle=none}\n')
         out += ('\\psset{fillcolor=white}\n')
         out += ('\\psset{hatchcolor=black}\n')
         out += ('\\psset{strokeopacity=1.0}\n')
         out += ('\\psset{opacity=1.0}\n')
         return out
 
 
 
 class Function(DrawableObject, Described):
 
     def __init__(self, f):
         self.description = {}
         self.read_input(f)
 
     def read_input(self, f):
         self.code='def plotfunction(x):\n'
         iscode=False
         for line in f:
             if is_end_marker(line, 'FUNCTION'):
                 break
             elif is_comment(line):
                 continue
             else:
                 m = pat_property.match(line)
                 if iscode:
                     self.code+='    '+line
                 elif m:
                     prop, value = m.group(1,2)
                     if prop=='Code':
                         iscode=True
                     else:
                         self.description[prop] = value
         if not iscode:
             print '++++++++++ ERROR: No code in function'
         else:
             foo = compile(self.code, '<string>', 'exec')
             exec(foo)
             self.plotfunction = plotfunction
 
 
     def draw(self,coors):
         out = ""
         out += self.startclip()
         out += self.startpsset()
         xmin = coors.xmin()
         if self.description.has_key('XMin') and self.description['XMin']:
             xmin = float(self.description['XMin'])
         xmax=coors.xmax()
         if self.description.has_key('XMax') and self.description['XMax']:
             xmax=float(self.description['XMax'])
         # TODO: Space sample points logarithmically if LogX=1
         dx = (xmax-xmin)/500.
         x = xmin-dx
         out += '\\pscurve'
         if self.description.has_key('FillStyle') and self.description['FillStyle']!='none':
             out += '(%s,%s)\n' % (coors.strphys2frameX(xmin),coors.strphys2frameY(coors.ymin()))
         while x < (xmax+2*dx):
             y = self.plotfunction(x)
             out += ('(%s,%s)\n' % (coors.strphys2frameX(x), coors.strphys2frameY(y)))
             x += dx
         if self.description.has_key('FillStyle') and self.description['FillStyle']!='none':
             out += '(%s,%s)\n' % (coors.strphys2frameX(xmax),coors.strphys2frameY(coors.ymin()))
         out += self.stoppsset()
         out += self.stopclip()
         return out
 
 
 class BinData(object):
     """\
     Store bin edge and value+error(s) data for a 1D or 2D bin.
 
     TODO: generalise/alias the attr names to avoid mention of x and y
     """
 
     def __init__(self, low, high, val, err):
         #print "@", low, high, val, err
         self.low = floatify(low)
         self.high = floatify(high)
         self.val = float(val)
         self.err = floatpair(err)
 
     @property
     def is2D(self):
         return hasattr(self.low, "__len__") and hasattr(self.high, "__len__")
 
     @property
     def isValid(self):
         invalid_val = (isnan(self.val) or isnan(self.err[0]) or isnan(self.err[1]))
         if invalid_val:
             return False
         if self.is2D:
             invalid_low = any(isnan(x) for x in self.low)
             invalid_high = any(isnan(x) for x in self.high)
         else:
             invalid_low, invalid_high = isnan(self.low), isnan(self.high)
         return not (invalid_low or invalid_high)
 
     @property
     def xmin(self):
         return self.low
     @xmin.setter
     def xmin(self,x):
         self.low = x
 
     @property
     def xmax(self):
         return self.high
     @xmax.setter
     def xmax(self,x):
         self.high = x
 
     @property
     def xmid(self):
         # TODO: Generalise to 2D
         return (self.xmin + self.xmax) / 2.0
 
     @property
     def xwidth(self):
         # TODO: Generalise to 2D
         assert self.xmin <= self.xmax
         return self.xmax - self.xmin
 
     @property
     def y(self):
         return self.val
     @y.setter
     def y(self, x):
         self.val = x
 
     @property
     def ey(self):
         return self.err
     @ey.setter
     def ey(self, x):
         self.err = x
 
     @property
     def ymin(self):
         return self.y - self.ey[0]
 
     @property
     def ymax(self):
         return self.y + self.ey[1]
 
     def __getitem__(self, key):
         "dict-like access for backward compatibility"
         if key in ("LowEdge"):
             return self.xmin
         elif key == ("UpEdge", "HighEdge"):
             return self.xmax
         elif key == "Content":
             return self.y
         elif key == "Errors":
             return self.ey
 
 
 class Histogram(DrawableObject, Described):
 
     def __init__(self, f, p=None):
         self.description = {}
         self.is2dim = False
         self.data = []
         self.read_input_data(f)
         self.sigmabinvalue = None
         self.meanbinvalue = None
         self.path = p
 
     def read_input_data(self, f):
         for line in f:
             if is_end_marker(line, 'HISTOGRAM'):
                 break
             elif is_comment(line):
                 continue
             else:
                 line = line.rstrip()
                 m = pat_property.match(line)
                 if m:
                     prop, value = m.group(1,2)
                     self.description[prop] = value
                 else:
                     ## Detect symm errs
                     linearray = line.split()
                     if len(linearray) == 4:
                         self.data.append(BinData(*linearray))
                     ## Detect asymm errs
                     elif len(linearray) == 5:
                         self.data.append(BinData(linearray[0], linearray[1], linearray[2], [linearray[3],linearray[4]]))
                     ## Detect two-dimensionality
                     elif len(linearray) in [6,7]:
                         self.is2dim = True
                         # If asymm z error, use the max or average of +- error
                         err = float(linearray[5])
                         if len(linearray) == 7:
                             if self.description.get("ShowMaxZErr", 1):
                                 err = max(err, float(linearray[6]))
                             else:
                                 err = 0.5 * (err + float(linearray[6]))
                         self.data.append(BinData([linearray[0], linearray[2]], [linearray[1], linearray[3]], linearray[4], err))
                     ## Unknown histo format
                     else:
                         raise RuntimeError("Unknown HISTOGRAM data line format with %d entries" % len(linearray))
 
 
     def mangle_input(self):
         norm2int = self.attr_bool("NormalizeToIntegral", False)
         norm2sum = self.attr_bool("NormalizeToSum", False)
         if norm2int or norm2sum:
             if norm2int and norm2sum:
                 print "Can't normalize to Integral and to Sum at the same time. Will normalize to the Sum."
             foo = 0
             # TODO: change to "in self.data"?
             for i in range(len(self.data)):
                 if norm2sum:
                     foo += self.data[i].val
                 else:
                     foo += self.data[i].val*(self.data[i].xmax-self.data[i].xmin)
+
             # TODO: change to "in self.data"?
-            for i in range(len(self.data)):
-                self.data[i].val /= foo
-                self.data[i].err[0] /= foo
-                self.data[i].err[1] /= foo
+            if foo != 0:
+                for i in range(len(self.data)):
+                    self.data[i].val /= foo
+                    self.data[i].err[0] /= foo
+                    self.data[i].err[1] /= foo
         scale = self.attr_float('Scale', 1.0)
         if scale != 1.0:
             # TODO: change to "in self.data"?
             for i in range(len(self.data)):
                 self.data[i].val *= scale
                 self.data[i].err[0] *= scale
                 self.data[i].err[1] *= scale
         if self.attr_int("Rebin", 1) > 1:
             rebin = self.attr_int("Rebin", 1)
             errortype = self.attr("ErrorType", "stat")
             newdata = []
             for i in range(0, (len(self.data)//rebin)*rebin, rebin):
                 foo = 0.
                 barl = 0.
                 baru = 0.
                 for j in range(rebin):
                     binwidth = self.data[i+j].xwidth
                     foo += self.data[i+j].val * binwidth
                     if errortype == "stat":
                         barl += (binwidth * self.data[i+j].err[0])**2
                         baru += (binwidth * self.data[i+j].err[1])**2
                     elif errortype == "env":
                         barl += self.data[i+j].ymin * binwidth
                         baru += self.data[i+j].ymax * binwidth
                     else:
                         logging.error("Rebinning for ErrorType not implemented.")
                         sys.exit(1)
                 newbinwidth = self.data[i+rebin-1].xmax - self.data[i].xmin
                 newcentral = foo/newbinwidth
                 if errortype == "stat":
                     newerror = [sqrt(barl)/newbinwidth, sqrt(baru)/newbinwidth]
                 elif errortype == "env":
                     newerror = [(foo-barl)/newbinwidth, (baru-foo)/newbinwidth]
                 newdata.append(BinData(self.data[i].xmin, self.data[i+rebin-1].xmax, newcentral, newerror))
             self.data = newdata
 
     def add(self, name):
         if len(self.data) != len(name.data):
             print '+++ Error in Histogram.add() for %s: different numbers of bins' % self.path
         for i in range(len(self.data)):
             if fuzzyeq(self.data[i].xmin, name.data[i].xmin) and \
                fuzzyeq(self.data[i].xmax, name.data[i].xmax):
                 self.data[i].val += name.data[i].val
                 self.data[i].err[0] = sqrt(self.data[i].err[0]**2 + name.data[i].err[0]**2)
                 self.data[i].err[1] = sqrt(self.data[i].err[1]**2 + name.data[i].err[1]**2)
             else:
                 print '+++ Error in Histogram.add() for %s: binning of histograms differs' % self.path
 
     def divide(self, name):
         #print name.path, self.path
         if len(self.data) != len(name.data):
             print '+++ Error in Histogram.divide() for %s: different numbers of bins' % self.path
         for i in range(len(self.data)):
             if fuzzyeq(self.data[i].xmin, name.data[i].xmin) and \
                fuzzyeq(self.data[i].xmax, name.data[i].xmax):
                 try:
                     self.data[i].err[0] /= name.data[i].val
                 except ZeroDivisionError:
                     self.data[i].err[0]=0.
                 try:
                     self.data[i].err[1] /= name.data[i].val
                 except ZeroDivisionError:
                     self.data[i].err[1]=0.
                 try:
                     self.data[i].val /= name.data[i].val
                 except ZeroDivisionError:
                     self.data[i].val=1.
 #                self.data[i].err[0] = sqrt(self.data[i].err[0]**2 + name.data[i].err[0]**2)
 #                self.data[i].err[1] = sqrt(self.data[i].err[1]**2 + name.data[i].err[1]**2)
             else:
                 print '+++ Error in Histogram.divide() for %s: binning of histograms differs' % self.path
 
     def dividereverse(self, name):
         if len(self.data) != len(name.data):
             print '+++ Error in Histogram.dividereverse() for %s: different numbers of bins' % self.path
         for i in range(len(self.data)):
             if fuzzyeq(self.data[i].xmin, name.data[i].xmin) and \
                fuzzyeq(self.data[i].xmax, name.data[i].xmax):
                 try:
                     self.data[i].err[0] = name.data[i].err[0]/self.data[i].val
                 except ZeroDivisionError:
                     self.data[i].err[0]=0.
                 try:
                     self.data[i].err[1] = name.data[i].err[1]/self.data[i].val
                 except ZeroDivisionError:
                     self.data[i].err[1]=0.
                 try:
                     self.data[i].val = name.data[i].val/self.data[i].val
                 except ZeroDivisionError:
                     self.data[i].val=1.
             else:
                 print '+++ Error in Histogram.dividereverse(): binning of histograms differs'
 
     def deviation(self, name):
         if len(self.data) != len(name.data):
             print '+++ Error in Histogram.deviation() for %s: different numbers of bins' % self.path
         for i in range(len(self.data)):
             if fuzzyeq(self.data[i].xmin, name.data[i].xmin) and \
                fuzzyeq(self.data[i].xmax, name.data[i].xmax):
                 self.data[i].val -= name.data[i].val
                 try:
                     self.data[i].val /= 0.5*sqrt((name.data[i].err[0] + name.data[i].err[1])**2 + \
                                                         (self.data[i].err[0] + self.data[i].err[1])**2)
                 except ZeroDivisionError:
                     self.data[i].val = 0.0
                 try:
                     self.data[i].err[0] /= name.data[i].err[0]
                 except ZeroDivisionError:
                     self.data[i].err[0] = 0.0
                 try:
                     self.data[i].err[1] /= name.data[i].err[1]
                 except ZeroDivisionError:
                     self.data[i].err[1] = 0.0
             else:
                 print '+++ Error in Histogram.deviation() for %s: binning of histograms differs' % self.path
 
     def getChi2(self, name):
         chi2 = 0.
         for i in range(len(self.data)):
             if fuzzyeq(self.data[i].xmin, name.data[i].xmin) and \
                fuzzyeq(self.data[i].xmax, name.data[i].xmax):
                 try:
                     chi2 += (self.data[i].val-name.data[i].val)**2/((0.5*self.data[i].err[0]+0.5*self.data[i].err[1])**2 + (0.5*name.data[i].err[0]+0.5*name.data[i].err[1])**2)
                 except ZeroDivisionError:
                     pass
             else:
                 print '+++ Error in Histogram.getChi2() for %s: binning of histograms differs' % self.path
         return chi2/len(self.data)
 
     def getSigmaBinValue(self):
         if self.sigmabinvalue==None:
             self.sigmabinvalue = 0.
             sumofweights = 0.
             for i in range(len(self.data)):
                 if self.is2dim:
                     binwidth = abs( (self.data[i].xmax[0] - self.data[i].xmin[0])
                                    *(self.data[i].xmax[1] - self.data[i].xmin[1]))
                 else:
                     binwidth = abs(self.data[i].xmax - self.data[i].xmin)
                 self.sigmabinvalue += binwidth*(self.data[i].val-self.getMeanBinValue())**2
                 sumofweights += binwidth
             self.sigmabinvalue = sqrt(self.sigmabinvalue/sumofweights)
         return self.sigmabinvalue
 
     def getMeanBinValue(self):
         if self.meanbinvalue==None:
             self.meanbinvalue = 0.
             sumofweights = 0.
             for i in range(len(self.data)):
                 if self.is2dim:
                     binwidth = abs( (self.data[i].xmax[0] - self.data[i].xmin[0])
                                    *(self.data[i].xmax[1] - self.data[i].xmin[1]))
                 else:
                     binwidth = abs(self.data[i].xmax - self.data[i].xmin)
                 self.meanbinvalue += binwidth*self.data[i].val
                 sumofweights += binwidth
             self.meanbinvalue /= sumofweights
         return self.meanbinvalue
 
     def getCorrelation(self, name):
         correlation = 0.
         sumofweights = 0.
         for i in range(len(self.data)):
             if fuzzyeq(self.data[i].xmin, name.data[i].xmin) and \
                fuzzyeq(self.data[i].xmax, name.data[i].xmax):
                 if self.is2dim:
                     binwidth = abs( (self.data[i].xmax[0] - self.data[i].xmin[0])
                                   * (self.data[i].xmax[1] - self.data[i].xmin[1]) )
                 else:
                     binwidth = abs(self.data[i].xmax - self.data[i].xmin)
                 correlation += binwidth * ( self.data[i].val - self.getMeanBinValue() ) \
                                         * ( name.data[i].val - name.getMeanBinValue() )
                 sumofweights += binwidth
             else:
                 print '+++ Error in Histogram.getCorrelation(): binning of histograms differs' % self.path
         correlation /= sumofweights
         try:
             correlation /= self.getSigmaBinValue()*name.getSigmaBinValue()
         except ZeroDivisionError:
             correlation = 0
         return correlation
 
     def getRMSdistance(self,name):
         distance = 0.
         sumofweights = 0.
         for i in range(len(self.data)):
             if fuzzyeq(self.data[i].xmin, name.data[i].xmin) and \
                fuzzyeq(self.data[i].xmax, name.data[i].xmax):
                 if self.is2dim:
                     binwidth = abs( (self.data[i].xmax[0] - self.data[i].xmin[0])
                                   * (self.data[i].xmax[1] - self.data[i].xmin[1]) )
                 else:
                     binwidth = abs(self.data[i].xmax - self.data[i].xmin)
                 distance += binwidth * ( (self.data[i].val - self.getMeanBinValue())
                                         -(name.data[i].val - name.getMeanBinValue()))**2
                 sumofweights += binwidth
             else:
                 print '+++ Error in Histogram.getRMSdistance() for %s: binning of histograms differs' % self.path
         distance = sqrt(distance/sumofweights)
         return distance
 
     def draw(self,coors):
         seen_nan = False
         out = ""
         out += self.startclip()
         out += self.startpsset()
         if any(b.isValid for b in self.data):
             out += "% START DATA\n"
             if self.is2dim:
                 for b in self.data:
                     out += ('\\psframe')
                     color = int(129*coors.phys2frameZ(b.val))
                     if b.val > coors.zmax():
                         color = 129
                     if b.val < coors.zmin():
                         color = 0
                     if b.val <= coors.zmin():
                         out += ('[linewidth=0pt, linestyle=none, fillstyle=solid, fillcolor=white]')
                     else:
                         out += ('[linewidth=0pt, linestyle=none, fillstyle=solid, fillcolor={gradientcolors!!['+str(color)+']}]')
                     out += ('(' + coors.strphys2frameX(b.low[0]) + ', ' \
                                 + coors.strphys2frameY(b.low[1]) + ')(' \
                                 + coors.strphys2frameX(b.high[0])  + ', ' \
                                 + coors.strphys2frameY(b.high[1])  + ')\n')
             else:
                 if self.getErrorBands():
                     self.description['SmoothLine'] = 0
                     for b in self.data:
                         out += ('\\psframe[dimen=inner,linewidth=0pt,linestyle=none,fillstyle=solid,fillcolor=%s,opacity=%s]' % (self.getErrorBandColor(),self.getErrorBandOpacity()))
                         out += ('(' + coors.strphys2frameX(b.xmin) + ', ' \
                                     + coors.strphys2frameY(b.val - b.err[0]) + ')(' \
                                     + coors.strphys2frameX(b.xmax)  + ', ' \
                                     + coors.strphys2frameY(b.val + b.err[1]) + ')\n')
                 if self.getErrorBars():
                     for b in self.data:
                         if isnan(b.val) or isnan(b.err[0]) or isnan(b.err[1]):
                             seen_nan = True
                             continue
                         if b.val == 0. and b.err == [0.,0.]:
                             continue
                         out += ('\\psline')
                         out += ('(' + coors.strphys2frameX(b.xmin) + ', ' \
                                     + coors.strphys2frameY(b.val) + ')(' \
                                     + coors.strphys2frameX(b.xmax)  + ', ' \
                                     + coors.strphys2frameY(b.val) + ')\n')
                         out += ('\\psline')
                         bincenter = coors.strphys2frameX(.5*(b.xmin+b.xmax))
                         out += ('(' + bincenter + ', ' \
                                     + coors.strphys2frameY(b.val-b.err[0]) + ')(' \
                                     + bincenter + ', ' \
                                     + coors.strphys2frameY(b.val+b.err[1]) + ')\n')
                 if self.getSmoothLine():
                     out += '\\psbezier'
                 else:
                     out += '\\psline'
                 if self.getFillStyle() != 'none':   # make sure that filled areas go all the way down to the x-axis
                     if coors.phys2frameX(self.data[0].xmin) > 1e-4:
                         out += '(' + coors.strphys2frameX(self.data[0].xmin) + ', -0.1)\n'
                     else:
                         out += '(-0.1, -0.1)\n'
                 for i, b in enumerate(self.data):
                     if isnan(b.val):
                         seen_nan = True
                         continue
                     if self.getSmoothLine():
                         out += ('(' + coors.strphys2frameX(0.5*(b.xmin+b.xmax)) + ', ' \
                                     + coors.strphys2frameY(b.val) + ')\n')
                     else:
                         out += ('(' + coors.strphys2frameX(b.xmin) + ', ' \
                                     + coors.strphys2frameY(b.val) + ')(' \
                                     + coors.strphys2frameX(b.xmax)  + ', ' \
                                     + coors.strphys2frameY(b.val) + ')\n')
                         ## Join/separate data points, with vertical/diagonal lines
                         if i+1 < len(self.data): #< If this is not the last point
                             if self.description.get('ConnectBins', '1') != '1':
                                 out += ('\\psline')
                             else:
                                 ## If bins are joined, but there is a gap in binning, choose whether to fill the gap
                                 if (abs(coors.phys2frameX(b.xmax) - coors.phys2frameX(self.data[i+1].xmin)) > 1e-4):
                                     if self.description.get('ConnectGaps', '0') != '1':
                                         out += ('\\psline')
                                         # TODO: Perhaps use a new dashed line to fill the gap?
-                    if self.getFillStyle() != 'none':  # make sure that filled areas go all the way down to the x-axis
-                        if (coors.phys2frameX(self.data[-1].xmax) < 1-1e-4):
-                            out += '(' + coors.strphys2frameX(self.data[-1].xmax) + ', -0.1)\n'
-                        else:
-                            out += '(1.1, -0.1)\n'
+                if self.getFillStyle() != 'none':  # make sure that filled areas go all the way down to the x-axis
+                    if (coors.phys2frameX(self.data[-1].xmax) < 1-1e-4):
+                        out += '(' + coors.strphys2frameX(self.data[-1].xmax) + ', -0.1)\n'
+                    else:
+                        out += '(1.1, -0.1)\n'
             #
             if self.getPolyMarker() != '':
                 for b in self.data:
                     if isnan(b.val):
                         seen_nan = True
                         continue
                     if b.val == 0. and b.err == [0.,0.]:
                         continue
                     out += ('\\psdot[dotstyle=%s,dotsize=%s,dotscale=%s](' % (self.getPolyMarker(),self.getDotSize(),self.getDotScale()) \
                                 + coors.strphys2frameX(.5*(b.xmin+b.xmax)) + ', ' \
                                 + coors.strphys2frameY(b.val) + ')\n')
 
             out += "% END DATA\n"
         else:
             print "WARNING: No valid bin value/errors/edges to plot!"
             out += "% NO DATA!\n"
 
         out += self.stoppsset()
         out += self.stopclip()
         if seen_nan:
             print "WARNING: NaN-valued value or error bar!"
         return out
 
     # def is2dimensional(self):
     #     return self.is2dim
 
     def getXMin(self):
         if not self.data:
             return 0
         elif self.is2dim:
             return min(b.low[0] for b in self.data)
         else:
             return min(b.xmin for b in self.data)
 
     def getXMax(self):
         if not self.data:
             return 1
         elif self.is2dim:
             return max(b.high[0] for b in self.data)
         else:
             return max(b.xmax for b in self.data)
 
     def getYMin(self, xmin, xmax, logy):
         if not self.data:
             return 0
         elif self.is2dim:
             return min(b.low[1] for b in self.data)
         else:
             yvalues = []
             for b in self.data:
                 if (b.xmax > xmin or b.xmin >= xmin) and (b.xmin < xmax or b.xmax <= xmax):
                     foo = b.val
                     if self.getErrorBars() or self.getErrorBands():
                         foo -= b.err[0]
                     if not isnan(foo) and (not logy or foo > 0):
                         yvalues.append(foo)
             return min(yvalues) if yvalues else self.data[0].val
 
     def getYMax(self, xmin, xmax):
         if not self.data:
             return 1
         elif self.is2dim:
             return max(b.high[1] for b in self.data)
         else:
             yvalues = []
             for b in self.data:
                 if (b.xmax > xmin or b.xmin >= xmin) and (b.xmin < xmax or b.xmax <= xmax):
                     foo = b.val
                     if self.getErrorBars() or self.getErrorBands():
                         foo += b.err[1]
                     if not isnan(foo): # and (not logy or foo > 0):
                         yvalues.append(foo)
             return max(yvalues) if yvalues else self.data[0].val
 
     def getZMin(self, xmin, xmax, ymin, ymax):
         if not self.is2dim:
             return 0
         zvalues = []
         for b in self.data:
             if (b.xmax[0] > xmin and b.xmin[0] < xmax) and (b.xmax[1] > ymin and b.xmin[1] < ymax):
                 zvalues.append(b.val)
         return min(zvalues)
 
     def getZMax(self, xmin, xmax, ymin, ymax):
         if not self.is2dim:
             return 0
         zvalues = []
         for b in self.data:
             if (b.xmax[0] > xmin and b.xmin[0] < xmax) and (b.xmax[1] > ymin and b.xmin[1] < ymax):
                 zvalues.append(b.val)
         return max(zvalues)
 
 
 
 class Histo1D(Histogram):
 
     def read_input_data(self, f):
         for line in f:
             if is_end_marker(line, 'HISTO1D'):
                 break
             elif is_comment(line):
                 continue
             else:
                 line = line.rstrip()
                 m = pat_property.match(line)
                 if m:
                     prop, value = m.group(1,2)
                     self.description[prop] = value
                 else:
                     linearray = line.split()
                     ## Detect symm errs
                     # TODO: Not sure what the 8-param version is for... auto-compatibility with YODA format?
                     if len(linearray) in [4,8]:
                         self.data.append(BinData(linearray[0], linearray[1], linearray[2], linearray[3]))
                     ## Detect asymm errs
                     elif len(linearray) == 5:
                         self.data.append(BinData(linearray[0], linearray[1], linearray[2], [linearray[3],linearray[4]]))
                     else:
                         raise Exception('Histo1D does not have the expected number of columns. ' + line)
 
     # TODO: specialise draw() here
 
 
 class Histo2D(Histogram):
 
     def read_input_data(self, f):
         self.is2dim = True #< Should really be done in a constructor, but this is easier for now...
 
         for line in f:
             if is_end_marker(line, 'HISTO2D'):
                 break
             elif is_comment(line):
                 continue
             else:
                 line = line.rstrip()
                 m = pat_property.match(line)
                 if m:
                     prop, value = m.group(1,2)
                     self.description[prop] = value
                 else:
                     linearray = line.split()
                     if len(linearray) in [6,7]:
                         # If asymm z error, use the max or average of +- error
                         err = float(linearray[5])
                         if len(linearray) == 7:
                             if self.description.get("ShowMaxZErr", 1):
                                 err = max(err, float(linearray[6]))
                             else:
                                 err = 0.5 * (err + float(linearray[6]))
                         self.data.append(BinData([linearray[0], linearray[2]], [linearray[1], linearray[3]], float(linearray[4]), err))
                     else:
                         raise Exception('Histo2D does not have the expected number of columns. '+line)
 
     # TODO: specialise draw() here
 
 
 
 #############################
 
 
 
 class Frame(object):
 
     def __init__(self):
         self.framelinewidth = '0.3pt'
 
     def draw(self,inputdata):
         out = ('\n%\n% Frame\n%\n')
         if inputdata.description.has_key('FrameColor') and inputdata.description['FrameColor']!=None:
             color = inputdata.description['FrameColor']
             # We want to draw this frame only once, so set it to False for next time:
             inputdata.description['FrameColor']=None
 
             # Calculate how high and wide the overall plot is
             height = [0,0]
             width  = inputdata.attr('PlotSizeX')
             if inputdata.attr_bool('RatioPlot', False):
                 height[1] = -inputdata.description['RatioPlotSizeY']
             if not inputdata.attr_bool('MainPlot', True):
                 height[0] = inputdata.description['PlotSizeY']
             else:
                 height[0] = -height[1]
                 height[1] = 0
 
             # Get the margin widths
             left = inputdata.description['LeftMargin']+0.1
             right = inputdata.description['RightMargin']+0.1
             top = inputdata.description['TopMargin']+0.1
             bottom = inputdata.description['BottomMargin']+0.1
 
             #
             out += ('\\rput(0,1){\\psline[linewidth=%scm,linecolor=%s](%scm,%scm)(%scm,%scm)}\n' %(top, color, -left, top/2, width+right, top/2))
             out += ('\\rput(0,%scm){\\psline[linewidth=%scm,linecolor=%s](%scm,%scm)(%scm,%scm)}\n' %(height[1], bottom, color, -left, -bottom/2, width+right, -bottom/2))
             out += ('\\rput(0,0){\\psline[linewidth=%scm,linecolor=%s](%scm,%scm)(%scm,%scm)}\n' %(left, color, -left/2, height[1]-0.05, -left/2, height[0]+0.05))
             out += ('\\rput(1,0){\\psline[linewidth=%scm,linecolor=%s](%scm,%scm)(%scm,%scm)}\n' %(right, color, right/2, height[1]-0.05, right/2, height[0]+0.05))
 
 
         out += ('\\psframe[linewidth='+self.framelinewidth+',dimen=middle](0,0)(1,1)\n')
         return out
 
 
 
 class Ticks(object):
 
     def __init__(self, description, coors):
         self.majorticklinewidth = '0.3pt'
         self.minorticklinewidth = '0.3pt'
         self.majorticklength    = '9pt'
         self.minorticklength    = '4pt'
         self.description = description
         self.coors = coors
 
     def draw_ticks(self, vmin, vmax, plotlog=False, custommajorticks=None, customminorticks=None, custommajortickmarks=-1, customminortickmarks=-1, drawlabels=True, twosided=False):
         out = ""
         if plotlog:
             if vmin <= 0 or vmax <= 0:
                 raise Exception("Cannot place log axis min or max tick <= 0")
             if custommajorticks is None:
                 x = int(log10(vmin))
                 n_labels = 0
                 while x < log10(vmax) + 1:
                     if 10**x >= vmin:
                         ticklabel = 10**x
                         if ticklabel > vmin and ticklabel < vmax:
                             out += self.draw_majortick(ticklabel,twosided)
                             if drawlabels:
                                 out += self.draw_majorticklabel(ticklabel)
                                 n_labels += 1
                         if ticklabel == vmin or ticklabel == vmax:
                             if drawlabels:
                                 out += self.draw_majorticklabel(ticklabel)
                                 n_labels+=1
                         for i in range(2,10):
                             ticklabel = i*10**(x-1)
                             if ticklabel > vmin and ticklabel < vmax:
                                 out += self.draw_minortick(ticklabel,twosided)
                                 if drawlabels and n_labels == 0:
                                     if (i+1)*10**(x-1) < vmax: # some special care for the last minor tick
                                         out += self.draw_minorticklabel(ticklabel)
                                     else:
                                         out += self.draw_minorticklabel(ticklabel, last=True)
                     x += 1
             else:
                 print "Warning: custom major ticks not currently supported on log axes -- please contact the developers to request!"
         elif custommajorticks is not None or customminorticks is not None:
             if custommajorticks:
                 for i in range(len(custommajorticks)):
                     value = custommajorticks[i]['Value']
                     label = custommajorticks[i]['Label']
                     if value >= vmin and value <= vmax:
                         out += self.draw_majortick(value,twosided)
                     if drawlabels:
                         out += self.draw_majorticklabel(value, label=label)
             if customminorticks:
                 for i in range(len(customminorticks)):
                     value = customminorticks[i]['Value']
                     if value >= vmin and value <= vmax:
                         out += self.draw_minortick(value,twosided)
         else:
             vrange = vmax - vmin
             if isnan(vrange):
                 vrange, vmin, vmax = 1, 1, 2
             digits = int(log10(vrange))+1
             if vrange <= 1:
                 digits -= 1
             foo = int(vrange/(10**(digits-1)))
             if foo/9. > 0.5:
                 tickmarks = 10
             elif foo/9. > 0.2:
                 tickmarks = 5
             elif foo/9. > 0.1:
                 tickmarks = 2
 
             if custommajortickmarks > -1:
                 if custommajortickmarks not in [1, 2, 5, 10, 20]:
                     print '+++ Error in Ticks.draw_ticks(): MajorTickMarks must be in [1, 2, 5, 10, 20]'
                 else:
                     tickmarks = custommajortickmarks
 
             if tickmarks == 2 or tickmarks == 20:
                 minortickmarks = 3
             else:
                 minortickmarks = 4
             if customminortickmarks > -1:
                 minortickmarks = customminortickmarks
             #
             x = 0
             while x > vmin*10**digits:
                 x -= tickmarks*100**(digits-1)
             while x <= vmax*10**digits:
                 if x >= vmin*10**digits - tickmarks*100**(digits-1):
                     ticklabel = 1.*x/10**digits
                     if int(ticklabel) == ticklabel:
                         ticklabel = int(ticklabel)
                     if float(ticklabel-vmin)/vrange >= -1e-5:
                         if abs(ticklabel-vmin)/vrange > 1e-5 and abs(ticklabel-vmax)/vrange > 1e-5:
                             out += self.draw_majortick(ticklabel,twosided)
                         if drawlabels:
                             out += self.draw_majorticklabel(ticklabel)
 
                     xminor = x
                     for i in range(minortickmarks):
                         xminor += 1.*tickmarks*100**(digits-1)/(minortickmarks+1)
                         ticklabel = 1.*xminor/10**digits
                         if ticklabel > vmin and ticklabel < vmax:
                             if abs(ticklabel-vmin)/vrange > 1e-5 and abs(ticklabel-vmax)/vrange > 1e-5:
                                 out += self.draw_minortick(ticklabel,twosided)
                 x += tickmarks*100**(digits-1)
         return out
 
     def draw(self):
         pass
 
     def draw_minortick(self, ticklabel, twosided):
         pass
 
     def draw_majortick(self, ticklabel, twosided):
         pass
 
     def draw_majorticklabel(self, ticklabel):
         pass
 
     def draw_minorticklabel(self, value, label='', last=False):
         return ''
 
     def get_ticklabel(self, value, plotlog=False, minor=False, lastminor=False):
         label=''
         prefix = ''
         if plotlog:
             bar = int(log10(value))
 	    if bar < 0:
 	        sign='-'
 	    else:
 	        sign='\\,'
             if minor: # The power of ten is only to be added to the last minor tick label
                 if lastminor:
                     label = str(int(value/(10**bar))) + "\\cdot" + '10$^{'+sign+'\\text{'+str(abs(bar))+'}}$'
                 else:
                     label = str(int(value/(10**bar))) # The naked prefactor
             else:
                 if bar==0:
                     label = '1'
                 else:
                     label = '10$^{'+sign+'\\text{'+str(abs(bar))+'}}$'
         else:
             if fabs(value) < 1e-10:
                 value = 0
             label = str(value)
             if "e" in label:
                 a, b = label.split("e")
                 astr = "%2.1f" % float(a)
                 bstr = str(int(b))
                 label = "\\smaller{%s $\\!\\cdot 10^{%s} $}" % (astr, bstr)
         return label
 
 
 
 class XTicks(Ticks):
 
-    def draw(self, custommajorticks=[], customminorticks=[], custommajortickmarks=-1, customminortickmarks=-1,drawlabels=True):
+    def draw(self, custommajorticks=None, customminorticks=None, custommajortickmarks=-1, customminortickmarks=-1,drawlabels=True):
         twosided = bool(int(self.description.get('XTwosidedTicks', '0')))
         out = ""
         out += ('\n%\n% X-Ticks\n%\n')
         out += ('\\def\\majortickmarkx{\\psline[linewidth='+self.majorticklinewidth+'](0,0)(0,'+self.majorticklength+')}%\n')
         out += ('\\def\\minortickmarkx{\\psline[linewidth='+self.minorticklinewidth+'](0,0)(0,'+self.minorticklength+')}%\n')
         uselog = self.description['LogX'] and (self.coors.xmin() > 0 and self.coors.xmax() > 0)
         out += self.draw_ticks(self.coors.xmin(), self.coors.xmax(),\
                                    plotlog=uselog,\
                                    custommajorticks=custommajorticks,\
                                    customminorticks=customminorticks,\
                                    custommajortickmarks=custommajortickmarks,\
                                    customminortickmarks=customminortickmarks,\
                                    drawlabels=drawlabels,\
                                    twosided=twosided)
         return out
 
     def draw_minortick(self, ticklabel, twosided):
         out = ''
         out += '\\rput('+self.coors.strphys2frameX(ticklabel)+', 0){\\minortickmarkx}\n'
         if twosided:
             out += '\\rput{180}('+self.coors.strphys2frameX(ticklabel)+', 1){\\minortickmarkx}\n'
         return out
 
     def draw_minorticklabel(self, value, label='', last=False):
         if not label:
             label=self.get_ticklabel(value, int(self.description['LogX']), minor=True, lastminor=last)
         if last: # Some more indentation for the last minor label
             return ('\\rput('+self.coors.strphys2frameX(value)+', 0){\\rput[B](1.9\\labelsep,-2.3\\labelsep){\\strut{}'+label+'}}\n')
         else:
             return ('\\rput('+self.coors.strphys2frameX(value)+', 0){\\rput[B](0,-2.3\\labelsep){\\strut{}'+label+'}}\n')
 
     def draw_majortick(self, ticklabel, twosided):
         out = ''
         out += '\\rput('+self.coors.strphys2frameX(ticklabel)+', 0){\\majortickmarkx}\n'
         if twosided:
             out += '\\rput{180}('+self.coors.strphys2frameX(ticklabel)+', 1){\\majortickmarkx}\n'
         return out
 
     def draw_majorticklabel(self, value, label=''):
         if not label:
             label = self.get_ticklabel(value, int(self.description['LogX']) and self.coors.xmin() > 0 and self.coors.xmax() > 0)
         labelparts = label.split("\\n")
         labelcode = label if len(labelparts) == 1 else ("\\shortstack{" + "\\\\ ".join(labelparts) +  "}")
         rtn = "\\rput(" + self.coors.strphys2frameX(value) + ", 0){\\rput[t](0,-\\labelsep){" + labelcode + "}}\n"
         return rtn
 
 
 
 class YTicks(Ticks):
 
-    def draw(self, custommajorticks=[], customminorticks=[], custommajortickmarks=-1, customminortickmarks=-1, drawlabels=True):
+    def draw(self, custommajorticks=None, customminorticks=None, custommajortickmarks=-1, customminortickmarks=-1, drawlabels=True):
         twosided = bool(int(self.description.get('YTwosidedTicks', '0')))
         out = ""
         out += ('\n%\n% Y-Ticks\n%\n')
         out += ('\\def\\majortickmarky{\\psline[linewidth=%s](0,0)(%s,0)}%%\n' % (self.majorticklinewidth, self.majorticklength))
         out += ('\\def\\minortickmarky{\\psline[linewidth=%s](0,0)(%s,0)}%%\n' % (self.minorticklinewidth, self.minorticklength))
         uselog = self.description['LogY'] and self.coors.ymin() > 0 and self.coors.ymax() > 0
         out += self.draw_ticks(self.coors.ymin(), self.coors.ymax(),
                                plotlog=uselog,
                                custommajorticks=custommajorticks,
                                customminorticks=customminorticks,
                                custommajortickmarks=custommajortickmarks,
                                customminortickmarks=customminortickmarks,
                                twosided=twosided,
                                drawlabels=drawlabels)
         return out
 
     def draw_minortick(self, ticklabel, twosided):
         out = ''
         out += '\\rput(0, '+self.coors.strphys2frameY(ticklabel)+'){\\minortickmarky}\n'
         if twosided:
             out += '\\rput{180}(1, '+self.coors.strphys2frameY(ticklabel)+'){\\minortickmarky}\n'
         return out
 
     def draw_majortick(self, ticklabel, twosided):
         out = ''
         out += '\\rput(0, '+self.coors.strphys2frameY(ticklabel)+'){\\majortickmarky}\n'
         if twosided:
             out += '\\rput{180}(1, '+self.coors.strphys2frameY(ticklabel)+'){\\majortickmarky}\n'
         return out
 
     def draw_majorticklabel(self, value, label=''):
         if not label:
             label = self.get_ticklabel(value, int(self.description['LogY']) and self.coors.ymin() > 0 and self.coors.ymax() > 0)
         if self.description.has_key('RatioPlotMode') and self.description['RatioPlotMode'] == 'deviation' and \
-           self.description.has_key('RatioPlotStage') and self.description['RatioPlotStage'] == '1':
+           self.description.has_key('RatioPlotStage') and self.description['RatioPlotStage']:
             rtn = '\\uput[180]{0}(0, '+self.coors.strphys2frameY(value)+'){\\strut{}'+label+'\\,$\\sigma$}\n'
         else:
             labelparts = label.split("\\n")
             labelcode = label if len(labelparts) == 1 else ("\\shortstack{" + "\\\\ ".join(labelparts) +  "}")
             rtn = "\\rput(0, " + self.coors.strphys2frameY(value) + "){\\rput[r](-\\labelsep,0){" + labelcode + "}}\n"
         return rtn
 
 
 
 class ZTicks(Ticks):
 
     def __init__(self, description, coors):
         self.majorticklinewidth = '0.3pt'
         self.minorticklinewidth = '0.3pt'
         self.majorticklength    = '6pt'
         self.minorticklength    = '2.6pt'
         self.description = description
         self.coors = coors
 
-    def draw(self, custommajorticks=[], customminorticks=[],
-            custommajortickmarks=-1, customminortickmarks=-1,
-            drawlabels=True):
+    def draw(self, custommajorticks=None, customminorticks=None, custommajortickmarks=-1, customminortickmarks=-1, drawlabels=True):
         out = ""
         out += ('\n%\n% Z-Ticks\n%\n')
         out += ('\\def\\majortickmarkz{\\psline[linewidth='+self.majorticklinewidth+'](0,0)('+self.majorticklength+',0)}%\n')
         out += ('\\def\\minortickmarkz{\\psline[linewidth='+self.minorticklinewidth+'](0,0)('+self.minorticklength+',0)}%\n')
         out += self.draw_ticks(self.coors.zmin(), self.coors.zmax(),\
                                    plotlog=self.description['LogZ'],\
                                    custommajorticks=custommajorticks,\
                                    customminorticks=customminorticks,\
                                    custommajortickmarks=custommajortickmarks,\
                                    customminortickmarks=customminortickmarks,\
                                    twosided=False,\
                                    drawlabels=drawlabels)
         return out
 
     def draw_minortick(self, ticklabel, twosided):
         return '\\rput{180}(1, '+self.coors.strphys2frameZ(ticklabel)+'){\\minortickmarkz}\n'
 
     def draw_majortick(self, ticklabel, twosided):
         return '\\rput{180}(1, '+self.coors.strphys2frameZ(ticklabel)+'){\\majortickmarkz}\n'
 
     def draw_majorticklabel(self, value, label=''):
         if label=='':
             label = self.get_ticklabel(value, int(self.description['LogZ']))
         if self.description.has_key('RatioPlotMode') and self.description['RatioPlotMode']=='deviation' \
                 and self.description.has_key('RatioPlotStage') and self.description['RatioPlotStage']:
             return ('\\uput[0]{0}(1, '+self.coors.strphys2frameZ(value)+'){\\strut{}'+label+'\\,$\\sigma$}\n')
         else:
             return ('\\uput[0]{0}(1, '+self.coors.strphys2frameZ(value)+'){\\strut{}'+label+'}\n')
 
 
 
 class Coordinates(object):
 
     def __init__(self, inputdata):
         self.description = inputdata.description
 
     def phys2frameX(self, x):
         if self.description['LogX']:
             if x>0:
                 result = 1.*(log10(x)-log10(self.xmin()))/(log10(self.xmax())-log10(self.xmin()))
             else:
                 return -10
         else:
             result = 1.*(x-self.xmin())/(self.xmax()-self.xmin())
         if (fabs(result) < 1e-4):
             return 0
         else:
             return min(max(result,-10),10)
 
     def phys2frameY(self, y):
         if self.description['LogY']:
             if y > 0 and self.ymin() > 0 and self.ymax() > 0:
                 result = 1.*(log10(y)-log10(self.ymin()))/(log10(self.ymax())-log10(self.ymin()))
             else:
                 return -10
         else:
             result = 1.*(y-self.ymin())/(self.ymax()-self.ymin())
         if (fabs(result) < 1e-4):
             return 0
         else:
             return min(max(result,-10),10)
 
     def phys2frameZ(self, z):
         if self.description['LogZ']:
             if z>0:
                 result = 1.*(log10(z)-log10(self.zmin()))/(log10(self.zmax())-log10(self.zmin()))
             else:
                 return -10
         else:
             result = 1.*(z-self.zmin())/(self.zmax()-self.zmin())
         if (fabs(result) < 1e-4):
             return 0
         else:
             return min(max(result,-10),10)
 
     # TODO: Add frame2phys functions (to allow linear function sampling in the frame space rather than the physical space)
 
     def strphys2frameX(self, x):
         return str(self.phys2frameX(x))
 
     def strphys2frameY(self, y):
         return str(self.phys2frameY(y))
 
     def strphys2frameZ(self, z):
         return str(self.phys2frameZ(z))
 
     def xmin(self):
         return self.description['Borders'][0]
 
     def xmax(self):
         return self.description['Borders'][1]
 
     def ymin(self):
         return self.description['Borders'][2]
 
     def ymax(self):
         return self.description['Borders'][3]
 
     def zmin(self):
         return self.description['Borders'][4]
 
     def zmax(self):
         return self.description['Borders'][5]
 
 
 ####################
 
 
 def try_cmd(args):
     "Run the given command + args and return True/False if it succeeds or not"
     import subprocess
     try:
         subprocess.check_output(args, stderr=subprocess.STDOUT)
         return True
     except:
         return False
 
 def have_cmd(cmd):
     return try_cmd(["which", cmd])
 
 
 import shutil, subprocess
 def process_datfile(datfile):
     global opts
     if not os.access(datfile, os.R_OK):
         raise Exception("Could not read data file '%s'" % datfile)
 
     dirname = os.path.dirname(datfile)
     datfile = os.path.basename(datfile)
     filename = datfile.replace('.dat','')
 
     ## Create a temporary directory
     cwd = os.getcwd()
     datpath = os.path.join(cwd, dirname, datfile)
     tempdir = tempfile.mkdtemp('.make-plots')
     tempdatpath = os.path.join(tempdir, datfile)
     shutil.copy(datpath, tempdir)
     if opts.NO_CLEANUP:
         logging.info('Keeping temp-files in %s' % tempdir)
 
     ## Make TeX file
     inputdata = InputData(os.path.join(dirname,filename))
     texpath = os.path.join(tempdir, '%s.tex' % filename)
     texfile = open(texpath, 'w')
     p = Plot(inputdata)
     texfile.write(p.write_header(inputdata))
     if inputdata.attr_bool("MainPlot", True):
         mp = MainPlot(inputdata)
         texfile.write(mp.draw(inputdata))
     if not inputdata.attr_bool("is2dim", False) and inputdata.attr_bool("RatioPlot", True) and inputdata.attr("RatioPlotReference"): # is not None:
         rp = RatioPlot(inputdata)
         texfile.write(rp.draw(inputdata))
     texfile.write(p.write_footer())
     texfile.close()
 
     if opts.OUTPUT_FORMAT != ["TEX"]:
 
         ## Check for the required programs
         latexavailable = have_cmd("latex")
         dvipsavailable = have_cmd("dvips")
         convertavailable = have_cmd("convert")
         ps2pnmavailable = have_cmd("ps2pnm")
         pnm2pngavailable = have_cmd("pnm2png")
 
         # TODO: It'd be nice to be able to control the size of the PNG between thumb and full-size...
         #   currently defaults (and is used below) to a size suitable for thumbnails
         def mkpng(infile, outfile, density=100):
             if convertavailable:
                 pngcmd = ["convert", "-flatten", "-density", str(density), infile, "-quality", "100", "-sharpen", "0x1.0", outfile]
                 logging.debug(" ".join(pngcmd))
                 pngproc = subprocess.Popen(pngcmd, stdout=subprocess.PIPE, cwd=tempdir)
                 pngproc.wait()
             else:
                 raise Exception("Required PNG maker program (convert) not found")
             # elif ps2pnmavailable and pnm2pngavailable:
             #     pstopnm = "pstopnm -stdout -xsize=461 -ysize=422 -xborder=0.01 -yborder=0.01 -portrait " + infile
             #     p1 = subprocess.Popen(pstopnm.split(), stdout=subprocess.PIPE, stderr=open("/dev/null", "w"), cwd=tempdir)
             #     p2 = subprocess.Popen(["pnmtopng"], stdin=p1.stdout, stdout=open("%s/%s.png" % (tempdir, outfile), "w"), stderr=open("/dev/null", "w"), cwd=tempdir)
             #     p2.wait()
             # else:
             #     raise Exception("Required PNG maker programs (convert, or ps2pnm and pnm2png) not found")
 
         ## Run LaTeX (in no-stop mode)
         logging.debug(os.listdir(tempdir))
         texcmd = ["latex", "\scrollmode\input", texpath]
         logging.debug("TeX command: " + " ".join(texcmd))
         texproc = subprocess.Popen(texcmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=tempdir)
         logging.debug(texproc.communicate()[0])
         logging.debug(os.listdir(tempdir))
 
         ## Run dvips
         dvcmd = ["dvips", filename]
         if not logging.getLogger().isEnabledFor(logging.DEBUG):
             dvcmd.append("-q")
         ## Handle Minion Font
         if opts.OUTPUT_FONT == "MINION":
             dvcmd.append('-Pminion')
 
         ## Choose format
         # TODO: Rationalise... this is a mess! Maybe we can use tex2pix?
         if "PS" in opts.OUTPUT_FORMAT:
             dvcmd += ["-o", "%s.ps" % filename]
             logging.debug(" ".join(dvcmd))
             dvproc = subprocess.Popen(dvcmd, stdout=subprocess.PIPE, cwd=tempdir)
             dvproc.wait()
         if "PDF" in opts.OUTPUT_FORMAT:
             dvcmd.append("-f")
             logging.debug(" ".join(dvcmd))
             dvproc = subprocess.Popen(dvcmd, stdout=subprocess.PIPE, cwd=tempdir)
             cnvproc = subprocess.Popen(["ps2pdf", "-"], stdin=dvproc.stdout, stdout=subprocess.PIPE, cwd=tempdir)
             f = open(os.path.join(tempdir, "%s.pdf" % filename), "w")
             f.write(cnvproc.communicate()[0])
             f.close()
         if "EPS" in opts.OUTPUT_FORMAT:
             dvcmd.append("-f")
             logging.debug(" ".join(dvcmd))
             dvproc = subprocess.Popen(dvcmd, stdout=subprocess.PIPE, cwd=tempdir)
             cnvproc = subprocess.Popen(["ps2eps"], stdin=dvproc.stdout, stderr=subprocess.PIPE, stdout=subprocess.PIPE, cwd=tempdir)
             f = open(os.path.join(tempdir, "%s.eps" % filename), "w")
             f.write(cnvproc.communicate()[0])
             f.close()
         if "PNG" in opts.OUTPUT_FORMAT:
             dvcmd.append("-f")
             logging.debug(" ".join(dvcmd))
             dvproc = subprocess.Popen(dvcmd, stdout=subprocess.PIPE, cwd=tempdir)
             pngcmd = ["convert", "-flatten", "-density", "100", "-", "-quality", "100", "-sharpen", "0x1.0", "%s.png" % filename]
             logging.debug(" ".join(pngcmd))
             pngproc = subprocess.Popen(pngcmd, stdin=dvproc.stdout, stdout=subprocess.PIPE, cwd=tempdir)
             pngproc.wait()
         logging.debug(os.listdir(tempdir))
 
     ## Copy results back to main dir
     for fmt in opts.OUTPUT_FORMAT:
         outname = "%s.%s" % (filename, fmt.lower())
         outpath = os.path.join(tempdir, outname)
         if os.path.exists(outpath):
             shutil.copy(outpath, os.path.join(cwd,dirname))
         else:
             logging.error("No output file '%s' from processing %s" % (outname, datfile))
 
     ## Clean up
     if not opts.NO_CLEANUP:
         shutil.rmtree(tempdir, ignore_errors=True)
 
 
 ####################
 
 
 if __name__ == '__main__':
 
     ## Try to rename the process on Linux
     try:
         import ctypes
         libc = ctypes.cdll.LoadLibrary('libc.so.6')
         libc.prctl(15, 'make-plots', 0, 0, 0)
     except Exception:
         pass
 
     ## Try to use Psyco optimiser
     try:
         import psyco
         psyco.full()
     except ImportError:
         pass
 
     ## Find number of (virtual) processing units
     import multiprocessing
     try:
         numcores = multiprocessing.cpu_count()
     except:
         numcores = 1
 
     ## Parse command line options
     from optparse import OptionParser, OptionGroup
     parser = OptionParser(usage=__doc__)
     parser.add_option("-n", "-j", "--num-threads", dest="NUM_THREADS", type="int",
                       default=numcores, help="max number of threads to be used [%s]" % numcores)
     parser.add_option("--font", dest="OUTPUT_FONT", choices="palatino,cm,times,helvetica,minion".split(","),
                       default="palatino", help="choose the font to be used in the plots")
     parser.add_option("--palatino", dest="OUTPUT_FONT", action="store_const", const="palatino", default="palatino",
                       help="use Palatino as font (default). DEPRECATED: Use --font")
     parser.add_option("--cm", dest="OUTPUT_FONT", action="store_const", const="cm", default="palatino",
                       help="use Computer Modern as font. DEPRECATED: Use --font")
     parser.add_option("--times", dest="OUTPUT_FONT", action="store_const", const="times", default="palatino",
                       help="use Times as font. DEPRECATED: Use --font")
     parser.add_option("--minion", dest="OUTPUT_FONT", action="store_const", const="minion", default="palatino",
                       help="use Adobe Minion Pro as font. Note: You need to set TEXMFHOME first. DEPRECATED: Use --font")
     parser.add_option("--helvetica", dest="OUTPUT_FONT", action="store_const", const="helvetica", default="palatino",
                       help="use Helvetica as font. DEPRECATED: Use --font")
     parser.add_option("--format", dest="OUTPUT_FORMAT", default="PDF",
                       help="choose plot format, perhaps multiple comma-separated formats e.g. 'pdf' or 'tex,pdf,png' (default = PDF).")
     parser.add_option("--ps", dest="OUTPUT_FORMAT", action="store_const", const="PS", default="PDF",
                       help="create PostScript output (default). DEPRECATED")
     parser.add_option("--pdf", dest="OUTPUT_FORMAT", action="store_const", const="PDF", default="PDF",
                       help="create PDF output. DEPRECATED")
     parser.add_option("--eps", dest="OUTPUT_FORMAT", action="store_const", const="EPS", default="PDF",
                       help="create Encapsulated PostScript output. DEPRECATED")
     parser.add_option("--png", dest="OUTPUT_FORMAT", action="store_const", const="PNG", default="PDF",
                      help="create PNG output. DEPRECATED")
     parser.add_option("--pspng", dest="OUTPUT_FORMAT", action="store_const", const="PS,PNG", default="PDF",
                      help="create PS and PNG output. DEPRECATED")
     parser.add_option("--pdfpng", dest="OUTPUT_FORMAT", action="store_const", const="PDF,PNG", default="PDF",
                      help="create PDF and PNG output. DEPRECATED")
     parser.add_option("--epspng", dest="OUTPUT_FORMAT", action="store_const", const="EPS,PNG", default="PDF",
                      help="create EPS and PNG output. DEPRECATED")
     parser.add_option("--tex", dest="OUTPUT_FORMAT", action="store_const", const="TEX", default="PDF",
                       help="create TeX/LaTeX output.")
     parser.add_option("--no-cleanup", dest="NO_CLEANUP", action="store_true", default=False,
                       help="keep temporary directory and print its filename.")
     parser.add_option("--no-subproc", dest="NO_SUBPROC", action="store_true", default=False,
                       help="don't use subprocesses to render the plots in parallel -- useful for debugging.")
     parser.add_option("--full-range", dest="FULL_RANGE", action="store_true", default=False,
                       help="plot full y range in LogY plots.")
     parser.add_option("-c", "--config", dest="CONFIGFILES", action="append", default=None,
                       help="plot config file to be used. Overrides internal config blocks.")
     verbgroup = OptionGroup(parser, "Verbosity control")
     verbgroup.add_option("-v", "--verbose", action="store_const", const=logging.DEBUG, dest="LOGLEVEL",
                          default=logging.INFO, help="print debug (very verbose) messages")
     verbgroup.add_option("-q", "--quiet", action="store_const", const=logging.WARNING, dest="LOGLEVEL",
                          default=logging.INFO, help="be very quiet")
     parser.add_option_group(verbgroup)
 
     opts, args = parser.parse_args()
     logging.basicConfig(level=opts.LOGLEVEL, format="%(message)s")
     opts.OUTPUT_FONT = opts.OUTPUT_FONT.upper()
     opts.OUTPUT_FORMAT = opts.OUTPUT_FORMAT.upper().split(",")
 
     ## Check for no args
     if len(args) == 0:
         logging.error(parser.get_usage())
         sys.exit(2)
 
     ## Check that the files exist
     for f in args:
         if not os.access(f, os.R_OK):
             print "Error: cannot read from %s" % f
             sys.exit(1)
 
     ## Test for external programs (kpsewhich, latex, dvips, ps2pdf/ps2eps, and convert)
     opts.LATEXPKGS = []
     if opts.OUTPUT_FORMAT != ["TEX"]:
         try:
             ## latex
             if not have_cmd("latex"):
                 logging.error("ERROR: required program 'latex' could not be found. Exiting...")
                 sys.exit(1)
             ## dvips
             if not have_cmd("dvips"):
                 logging.error("ERROR: required program 'dvips' could not be found. Exiting...")
                 sys.exit(1)
 
             ## ps2pdf / ps2eps
             if "PDF" in opts.OUTPUT_FORMAT:
                 if not have_cmd("ps2pdf"):
                     logging.error("ERROR: required program 'ps2pdf' (for PDF output) could not be found. Exiting...")
                     sys.exit(1)
             elif "EPS" in opts.OUTPUT_FORMAT:
                 if not have_cmd("ps2eps"):
                     logging.error("ERROR: required program 'ps2eps' (for EPS output) could not be found. Exiting...")
                     sys.exit(1)
             ## PNG output converter
             if "PNG" in opts.OUTPUT_FORMAT:
                 if not have_cmd("convert"):
                     logging.error("ERROR: required program 'convert' (for PNG output) could not be found. Exiting...")
                     sys.exit(1)
 
             ## kpsewhich: required for LaTeX package testing
             if not have_cmd("kpsewhich"):
                 logging.warning("WARNING: required program 'kpsewhich' (for LaTeX package checks) could not be found")
             else:
                 ## Check minion font
                 if opts.OUTPUT_FONT == "MINION":
                     p = subprocess.Popen(["kpsewhich", "minion.sty"], stdout=subprocess.PIPE)
                     p.wait()
                     if p.returncode != 0:
                         logging.warning('Warning: Using "--minion" requires minion.sty to be installed. Ignoring it.')
                         opts.OUTPUT_FONT = "PALATINO"
 
                 ## Check for HEP LaTeX packages
                 # TODO: remove HEP-specifics/non-standards?
                 for pkg in ["hepnames", "hepunits", "underscore"]:
                     p = subprocess.Popen(["kpsewhich", "%s.sty" % pkg], stdout=subprocess.PIPE)
                     p.wait()
                     if p.returncode == 0:
                         opts.LATEXPKGS.append(pkg)
 
                 ## Check for Palatino old style figures and small caps
                 if opts.OUTPUT_FONT == "PALATINO":
                     p = subprocess.Popen(["kpsewhich", "ot1pplx.fd"], stdout=subprocess.PIPE)
                     p.wait()
                     if p.returncode == 0:
                         opts.OUTPUT_FONT = "PALATINO_OSF"
         except Exception, e:
             logging.warning("Problem while testing for external packages. I'm going to try and continue without testing, but don't hold your breath...")
 
     def init_worker():
         import signal
         signal.signal(signal.SIGINT, signal.SIG_IGN)
 
     ## Run rendering jobs
     datfiles = args
     plotword = "plots" if len(datfiles) > 1 else "plot"
     logging.info("Making %d %s" % (len(datfiles), plotword))
     if opts.NO_SUBPROC:
         init_worker()
         for i, df in enumerate(datfiles):
             logging.info("Plotting %s (%d/%d remaining)" % (df, len(datfiles)-i, len(datfiles)))
             process_datfile(df)
     else:
         pool = multiprocessing.Pool(opts.NUM_THREADS, init_worker)
         try:
             for i, _ in enumerate(pool.imap(process_datfile, datfiles)):
                 logging.info("Plotting %s (%d/%d remaining)" % (datfiles[i], len(datfiles)-i, len(datfiles)))
             pool.close()
         except KeyboardInterrupt:
             print "Caught KeyboardInterrupt, terminating workers"
             pool.terminate()
         pool.join()
diff --git a/bin/make-plots-fast b/bin/make-plots-fast
new file mode 100755
--- /dev/null
+++ b/bin/make-plots-fast
@@ -0,0 +1,2762 @@
+#! /usr/bin/env python
+
+"""\
+Usage: %prog [options] file.dat [file2.dat ...]
+
+TODO
+ * Optimise output for e.g. lots of same-height bins in a row
+ * Add a RatioFullRange directive to show the full range of error bars + MC envelope in the ratio
+ * Tidy LaTeX-writing code -- faster to compile one doc only, then split it?
+ * Handle boolean values flexibly (yes, no, true, false, etc. as well as 1, 0)
+"""
+
+##
+## This program is copyright by Hendrik Hoeth <hoeth@linta.de> and
+## the Rivet team https://rivet.hepforge.org. It may be used
+## for scientific and private purposes. Patches are welcome, but please don't
+## redistribute changed versions yourself.
+##
+
+## Check the Python version
+import sys
+if sys.version_info[:3] < (2,6,0):
+    print "make-plots requires Python version >= 2.6.0... exiting"
+    sys.exit(1)
+
+## Try to rename the process on Linux
+try:
+    import ctypes
+    libc = ctypes.cdll.LoadLibrary('libc.so.6')
+    libc.prctl(15, 'make-plots', 0, 0, 0)
+except Exception, e:
+    pass
+
+
+import os, logging, re
+import tempfile
+import getopt
+import string
+from math import *
+
+
+## Regex patterns
+pat_begin_block = re.compile(r'^#+\s*BEGIN ([A-Z0-9_]+) ?(\S+)?')
+pat_end_block =   re.compile('^#+\s*END ([A-Z0-9_]+)')
+pat_comment = re.compile('^#|^\s*$')
+pat_property = re.compile('^(\w+?)=(.*)$')
+pat_path_property  = re.compile('^(\S+?)::(\w+?)=(.*)$')
+
+
+def fuzzyeq(a, b, tolerance=1e-6):
+    "Fuzzy equality comparison function for floats, with given fractional tolerance"
+    # if type(a) is not float or type(a) is not float:
+    #     print a, b
+    if (a == 0 and abs(b) < 1e-12) or (b == 0 and abs(a) < 1e-12):
+        return True
+    return 2.0*abs(a-b)/abs(a+b) < tolerance
+
+def inrange(x, a, b):
+    return x >= a and x < b
+
+def floatify(x):
+    if type(x) is str:
+        x = x.split()
+    if not hasattr(x, "__len__"):
+        x = [x]
+    x = [float(a) for a in x]
+    return x[0] if len(x) == 1 else x
+
+def floatpair(x):
+    if type(x) is str:
+        x = x.split()
+    if hasattr(x, "__len__"):
+        assert len(x) == 2
+        return [float(a) for a in x]
+    return [float(x), float(x)]
+
+
+def is_end_marker(line, blockname):
+    m = pat_end_block.match(line)
+    return m and m.group(1) == blockname
+
+def is_comment(line):
+    return pat_comment.match(line) is not None
+
+
+
+class Described(object):
+    "Inherited functionality for objects holding a 'description' dictionary"
+
+    def __init__(self):
+        pass
+
+    def has_attr(self, key):
+        return self.description.has_key(key)
+
+    def set_attr(self, key, val):
+        self.description[key] = val
+
+    def attr(self, key, default=None):
+        return self.description.get(key, default)
+
+    def attr_bool(self, key, default=None):
+        x = self.attr(key, default)
+        if x is None: return None
+        if str(x).lower() in ["1", "true", "yes", "on"]: return True
+        if str(x).lower() in ["0", "false", "no", "off"]: return False
+        return None
+
+    def attr_int(self, key, default=None):
+        x = self.attr(key, default)
+        try:
+            x = int(x)
+        except:
+            x = None
+        return x
+
+    def attr_float(self, key, default=None):
+        x = self.attr(key, default)
+        try:
+            x = float(x)
+        except:
+            x = None
+        return x
+
+
+
+class InputData(Described):
+
+    def __init__(self, filename):
+        self.filename = filename+".dat"
+        self.histos = {}
+        self.special = {}
+        self.functions = {}
+
+        self.description = {}
+        self.pathdescriptions = []
+
+        self.is2dim = False
+        f = open(self.filename)
+        for line in f:
+            m = pat_begin_block.match(line)
+            if m:
+                name, path = m.group(1,2)
+                if path is None and name != 'PLOT':
+                    raise Exception('BEGIN sections need a path name.')
+
+                ## Pass the reading of the block to separate functions
+                if name == 'PLOT':
+                    self.read_input(f);
+                elif name == 'SPECIAL':
+                    self.special[path] = Special(f)
+                elif name == 'HISTOGRAM' or name == 'HISTOGRAM2D':
+                    self.histos[path] = Histogram(f, p=path)
+                    # self.histos[path].path = path
+                    self.description['is2dim'] = self.histos[path].is2dim
+                elif name == 'HISTO1D':
+                    self.histos[path] = Histo1D(f, p=path)
+                elif name == 'HISTO2D':
+                    self.histos[path] = Histo2D(f, p=path)
+                    self.description['is2dim'] = True
+                elif name == 'FUNCTION':
+                    self.functions[path] = Function(f)
+#            elif is_comment(line):
+#                continue
+#            else:
+#                self.read_path_based_input(line)
+        f.close()
+
+        self.apply_config_files(opts.CONFIGFILES)
+
+        ## Plot (and subplot) sizing
+        # TODO: Use attr functions and bools properly
+        self.description.setdefault('PlotSizeX', 10.)
+        if self.description['is2dim']:
+            self.description['PlotSizeX'] -= 1.7
+            self.description['MainPlot'] = '1'
+            self.description['RatioPlot'] = '0'
+
+        if self.description.has_key('PlotSize') and self.description['PlotSize']!='':
+            plotsizes = self.description['PlotSize'].split(',')
+            self.description['PlotSizeX'] = float(plotsizes[0])
+            self.description['PlotSizeY'] = float(plotsizes[1])
+            if len(plotsizes) == 3:
+                self.description['RatioPlotSizeY'] = float(plotsizes[2])
+            del self.description['PlotSize']
+
+        if self.description.get('MainPlot', '1') == '0':
+            ## Ratio, no main
+            self.description['RatioPlot'] = '1' #< don't allow both to be zero!
+            self.description['PlotSizeY'] = 0.
+            self.description.setdefault('RatioPlotSizeY', 9.)
+        else:
+            if self.description.get('RatioPlot', '0') == '1':
+                ## Main and ratio
+                self.description.setdefault('PlotSizeY', 6.)
+                self.description.setdefault('RatioPlotSizeY', self.description.get('RatioPlotYSize', 3.))
+            else:
+                ## Main, no ratio
+                self.description.setdefault('PlotSizeY', self.description.get('PlotYSize', 9.))
+                self.description['RatioPlotSizeY'] = 0.
+
+        ## Ensure numbers, not strings
+        self.description['PlotSizeX'] = float(self.description['PlotSizeX'])
+        self.description['PlotSizeY'] = float(self.description['PlotSizeY'])
+        self.description['RatioPlotSizeY'] = float(self.description['RatioPlotSizeY'])
+        # self.description['TopMargin'] = float(self.description['TopMargin'])
+        # self.description['BottomMargin'] = float(self.description['BottomMargin'])
+
+        self.description['LogX'] = self.description.has_key('LogX') and self.description['LogX']=='1'
+        self.description['LogY'] = self.description.has_key('LogY') and self.description['LogY']=='1'
+        self.description['LogZ'] = self.description.has_key('LogZ') and self.description['LogZ']=='1'
+        if self.description.has_key('Rebin'):
+            for i in self.histos:
+                self.histos[i].description['Rebin'] = self.description['Rebin']
+
+        histoordermap = {}
+        histolist = self.histos.keys()
+        if self.description.has_key('DrawOnly'):
+            histolist = filter(self.histos.keys().count, self.description['DrawOnly'].strip().split())
+        for histo in histolist:
+            order = 0
+            if self.histos[histo].description.has_key('PlotOrder'):
+                order = int(self.histos[histo].description['PlotOrder'])
+            if not order in histoordermap:
+                histoordermap[order] = []
+            histoordermap[order].append(histo)
+        sortedhistolist = []
+        for i in sorted(histoordermap.keys()):
+            sortedhistolist.extend(histoordermap[i])
+        self.description['DrawOnly'] = sortedhistolist
+
+
+        ## Inherit various values from histograms if not explicitly set
+        for k in ['LogX', 'LogY', 'LogZ',
+                  'XLabel', 'YLabel', 'ZLabel',
+                  'XCustomMajorTicks', 'YCustomMajorTicks', 'ZCustomMajorTicks']:
+            self.inherit_from_histos(k)
+
+        return
+
+
+    @property
+    def is2dim(self):
+        return self.attr_bool("is2dim", False)
+    @is2dim.setter
+    def is2dim(self, val):
+        self.set_attr("is2dim", val)
+
+
+    @property
+    def drawonly(self):
+        x = self.attr("DrawOnly")
+        if type(x) is str:
+            self.drawonly = x #< use setter to listify
+        return x if x else []
+    @drawonly.setter
+    def drawonly(self, val):
+        if type(val) is str:
+            val = val.strip().split()
+        self.set_attr("DrawOnly", val)
+
+
+    @property
+    def stacklist(self):
+        x = self.attr("Stack")
+        if type(x) is str:
+            self.stacklist = x #< use setter to listify
+        return x if x else []
+    @stacklist.setter
+    def stacklist(self, val):
+        if type(val) is str:
+            val = val.strip().split()
+        self.set_attr("Stack", val)
+
+
+    @property
+    def plotorder(self):
+        x = self.attr("PlotOrder")
+        if type(x) is str:
+            self.plotorder = x #< use setter to listify
+        return x if x else []
+    @plotorder.setter
+    def plotorder(self, val):
+        if type(val) is str:
+            val = val.strip().split()
+        self.set_attr("PlotOrder", val)
+
+
+    @property
+    def plotsizex(self):
+        return self.attr_float("PlotSizeX")
+    @plotsizex.setter
+    def plotsizex(self, val):
+        self.set_attr("PlotSizeX", val)
+
+    @property
+    def plotsizey(self):
+        return self.attr_float("PlotSizeY")
+    @plotsizey.setter
+    def plotsizey(self, val):
+        self.set_attr("PlotSizeY", val)
+
+    @property
+    def plotsize(self):
+        return [self.plotsizex, self.plotsizey]
+    @plotsize.setter
+    def plotsize(self, val):
+        if type(val) is str:
+            val = [float(x) for x in val.split(",")]
+        assert len(val) == 2
+        self.plotsizex = val[0]
+        self.plotsizey = val[1]
+
+    @property
+    def ratiosizey(self):
+        return self.attr_float("RatioPlotSizeY")
+    @ratiosizey.setter
+    def ratiosizey(self, val):
+        self.set_attr("RatioPlotSizeY", val)
+
+
+    @property
+    def scale(self):
+        return self.attr_float("Scale")
+    @scale.setter
+    def scale(self, val):
+        self.set_attr("Scale", val)
+
+
+    @property
+    def xmin(self):
+        return self.attr_float("XMin")
+    @xmin.setter
+    def xmin(self, val):
+        self.set_attr("XMin", val)
+
+    @property
+    def xmax(self):
+        return self.attr_float("XMax")
+    @xmax.setter
+    def xmax(self, val):
+        self.set_attr("XMax", val)
+
+    @property
+    def xrange(self):
+        return [self.xmin, self.xmax]
+    @xrange.setter
+    def xrange(self, val):
+        if type(val) is str:
+            val = [float(x) for x in val.split(",")]
+        assert len(val) == 2
+        self.xmin = val[0]
+        self.xmax = val[1]
+
+
+    @property
+    def ymin(self):
+        return self.attr_float("YMin")
+    @ymin.setter
+    def ymin(self, val):
+        self.set_attr("YMin", val)
+
+    @property
+    def ymax(self):
+        return self.attr_float("YMax")
+    @ymax.setter
+    def ymax(self, val):
+        self.set_attr("YMax", val)
+
+    @property
+    def yrange(self):
+        return [self.ymin, self.ymax]
+    @yrange.setter
+    def yrange(self, val):
+        if type(val) is str:
+            val = [float(y) for y in val.split(",")]
+        assert len(val) == 2
+        self.ymin = val[0]
+        self.ymax = val[1]
+
+
+    # TODO: add more rw properties for plotsize(x,y), ratiosize(y),
+    #   show_mainplot, show_ratioplot, show_legend, log(x,y,z), rebin,
+    #   drawonly, legendonly, plotorder, stack,
+    #   label(x,y,z), majorticks(x,y,z), minorticks(x,y,z),
+    #   min(x,y,z), max(x,y,z), range(x,y,z)
+
+
+    def inherit_from_histos(self, k):
+        """Note: this will inherit the key from a random histogram:
+        only use if you're sure all histograms have this key!"""
+        if not self.description.has_key(k):
+            h = list(self.histos.itervalues())[0]
+            if h.description.has_key(k):
+                self.description[k] = h.description[k]
+
+
+    def read_input(self, f):
+        for line in f:
+            if is_end_marker(line, 'PLOT'):
+                break
+            elif is_comment(line):
+                continue
+            m = pat_property.match(line)
+            if m:
+                prop, value = m.group(1,2)
+                if prop in self.description:
+                    logging.debug("Overwriting property %s = %s -> %s" % (prop, self.description[prop], value))
+                ## Use strip here to deal with DOS newlines containing \r
+                self.description[prop.strip()] = value.strip()
+
+
+    def apply_config_files(self, conffiles):
+        if conffiles is not None:
+            for filename in conffiles:
+                cf = open(filename,'r')
+                lines = cf.readlines()
+                for i in range(0, len(lines)):
+                    ## First evaluate PLOT sections
+                    m = pat_begin_block.match(lines[i])
+                    if m and m.group(1) == 'PLOT' and re.match(m.group(2),self.filename):
+                        while i<len(lines)-1:
+                            i = i+1
+                            if is_end_marker(lines[i], 'PLOT'):
+                                break
+                            elif is_comment(lines[i]):
+                                continue
+                            m = pat_property.match(lines[i])
+                            if m:
+                                prop, value = m.group(1,2)
+                                if prop in self.description:
+                                    logging.debug("Overwriting from conffile property %s = %s -> %s" % (prop, self.description[prop], value))
+                                ## Use strip here to deal with DOS newlines containing \r
+                                self.description[prop.strip()] = value.strip()
+                    elif is_comment(lines[i]):
+                        continue
+                    else:
+                        ## Then evaluate path-based settings, e.g. for HISTOGRAMs
+                        m = pat_path_property.match(lines[i])
+                        if m:
+                            regex, prop, value = m.group(1,2,3)
+                            for obj_dict in [self.special, self.histos, self.functions]:
+                                for path, obj in obj_dict.iteritems():
+                                    if re.match(regex, path):
+                                        ## Use strip here to deal with DOS newlines containing \r
+                                        obj.description.update({prop.strip() : value.strip()})
+                cf.close()
+
+
+
+class Plot(object):
+
+    def __init__(self, inputdata):
+        pass
+
+    def set_normalization(self,inputdata):
+        for method in ['NormalizeToIntegral', 'NormalizeToSum']:
+            if inputdata.description.has_key(method):
+                for i in inputdata.drawonly:
+                    if not inputdata.histos[i].has_attr(method):
+                        inputdata.histos[i].set_attr(method, inputdata.attr(method))
+        if inputdata.scale:
+            for i in inputdata.drawonly:
+                inputdata.histos[i].scale = inputdata.scale
+        for i in inputdata.drawonly:
+            inputdata.histos[i].mangle_input()
+
+    def stack_histograms(self,inputdata):
+        if inputdata.description.has_key('Stack'):
+            stackhists = [h for h in inputdata.attr('Stack').strip().split() if h in inputdata.histos]
+            previous = ''
+            for i in stackhists:
+                if previous != '':
+                    inputdata.histos[i].add(inputdata.histos[previous])
+                previous = i
+
+    def set_histo_options(self,inputdata):
+        if inputdata.description.has_key('ConnectGaps'):
+            for i in inputdata.histos.keys():
+                if not inputdata.histos[i].description.has_key('ConnectGaps'):
+                    inputdata.histos[i].description['ConnectGaps'] = inputdata.description['ConnectGaps']
+
+    def set_borders(self, inputdata):
+        self.set_xmax(inputdata)
+        self.set_xmin(inputdata)
+        self.set_ymax(inputdata)
+        self.set_ymin(inputdata)
+        self.set_zmax(inputdata)
+        self.set_zmin(inputdata)
+        inputdata.description['Borders'] = (self.xmin, self.xmax, self.ymin, self.ymax, self.zmin, self.zmax)
+
+    def set_xmin(self, inputdata):
+        self.xmin = inputdata.xmin
+        if self.xmin is None:
+            self.xmin = min(inputdata.histos[h].getXMin() for h in inputdata.description['DrawOnly'])
+
+    def set_xmax(self,inputdata):
+        self.xmax = inputdata.xmax
+        if self.xmax is None:
+            self.xmax = min(inputdata.histos[h].getXMax() for h in inputdata.description['DrawOnly'])
+
+    def set_ymin(self,inputdata):
+        if inputdata.ymin is not None:
+            self.ymin = inputdata.ymin
+        else:
+            ymins = [inputdata.histos[i].getYMin(self.xmin, self.xmax, inputdata.description['LogY']) for i in inputdata.attr('DrawOnly')]
+            minymin = min(ymins)
+            if inputdata.description['is2dim']:
+                self.ymin = minymin
+            else:
+                showzero = inputdata.attr_bool("ShowZero", True)
+                if showzero:
+                    self.ymin = 0. if minymin > -1e-4 else 1.1*minymin
+                else:
+                    self.ymin = 1.1*minymin if minymin < -1e-4 else 0 if minymin < 1e-4 else 0.9*minymin
+                if inputdata.description['LogY']:
+                    ymins = [ymin for ymin in ymins if ymin > 0.0]
+                    if not ymins:
+                        if self.ymax == 0:
+                            self.ymax = 1
+                        ymins.append(2e-7*self.ymax)
+                    minymin = min(ymins)
+                    fullrange = opts.FULL_RANGE
+                    if inputdata.has_attr('FullRange'):
+                        fullrange = inputdata.attr_bool('FullRange')
+                    self.ymin = minymin/1.7 if fullrange else max(minymin/1.7, 2e-7*self.ymax)
+
+                if self.ymin == self.ymax:
+                    self.ymin -= 1
+                    self.ymax += 1
+
+    def set_ymax(self,inputdata):
+        if inputdata.has_attr('YMax'):
+            self.ymax = inputdata.attr_float('YMax')
+        else:
+            self.ymax = max(inputdata.histos[h].getYMax(self.xmin, self.xmax) for h in inputdata.attr('DrawOnly'))
+            if not inputdata.is2dim:
+                self.ymax *= (1.7 if inputdata.attr_bool('LogY') else 1.1)
+
+    def set_zmin(self,inputdata):
+        if inputdata.has_attr('ZMin'):
+            self.zmin = inputdata.attr_float('ZMin')
+        else:
+            zmins = [inputdata.histos[i].getZMin(self.xmin, self.xmax, self.ymin, self.ymax) for i in inputdata.attr('DrawOnly')]
+            minzmin = min(zmins)
+            self.zmin = minzmin
+            if zmins:
+                showzero = inputdata.attr_bool('ShowZero', True)
+                if showzero:
+                    self.zmin = 0 if minzmin > -1e-4 else 1.1*minzmin
+                else:
+                    self.zmin = 1.1*minzmin if minzmin < -1e-4 else 0. if minzmin < 1e-4 else 0.9*minzmin
+                if inputdata.attr_bool('LogZ', False):
+                    zmins = [zmin for zmin in zmins if zmin > 0]
+                    if not zmins:
+                        if self.zmax == 0:
+                            self.zmax = 1
+                        zmins.append(2e-7*self.zmax)
+                    minzmin = min(zmins)
+                    fullrange = inputdata.attr_bool("FullRange", opts.FULL_RANGE)
+                    self.zmin = minzmin/1.7 if fullrange else max(minzmin/1.7, 2e-7*self.zmax)
+
+                if self.zmin == self.zmax:
+                    self.zmin -= 1
+                    self.zmax += 1
+
+    def set_zmax(self,inputdata):
+        self.zmax = inputdata.attr_float('ZMax')
+        if self.zmax is None:
+            zmaxs = [inputdata.histos[h].getZMax(self.xmin, self.xmax, self.ymin, self.ymax) for h in inputdata.attr('DrawOnly')]
+            self.zmax = max(zmaxs) if zmaxs else 1
+
+
+    def draw(self):
+        pass
+
+
+    def write_header(self,inputdata):
+        out = '\\begin{multipage}\n'
+        out += '\\begin{pspicture}(0,0)(0,0)\n'
+        out += '\\psset{xunit=%scm}\n' %(inputdata.description['PlotSizeX'])
+        if inputdata.description['is2dim']:
+            colorseries = '{hsb}{grad}[rgb]{0,0,1}{-.700,0,0}'
+            if inputdata.description.has_key('ColorSeries') and inputdata.description['ColorSeries']!='':
+                colorseries = inputdata.description['ColorSeries']
+            out += '\\definecolorseries{gradientcolors}%s\n' % colorseries
+            out += '\\resetcolorseries[130]{gradientcolors}\n'
+        return out
+
+    def write_footer(self):
+        out = '\\end{pspicture}\n'
+        out += '\\end{multipage}\n'
+        out += '%\n%\n'
+        return out
+
+
+
+class MainPlot(Plot):
+
+    def __init__(self, inputdata):
+        self.set_normalization(inputdata)
+        self.stack_histograms(inputdata)
+        if (inputdata.description.has_key('GofLegend')  and inputdata.description['GofLegend']=='1') or \
+           (inputdata.description.has_key('GofFrame')   and inputdata.description['GofFrame']!='') and not \
+           (inputdata.description.has_key('TaylorPlot') and inputdata.description['TaylorPlot']=='1'):
+            self.calculate_gof(inputdata)
+        self.set_histo_options(inputdata)
+        self.set_borders(inputdata)
+        self.yoffset = inputdata.description['PlotSizeY']
+        self.coors = Coordinates(inputdata)
+
+    def draw(self, inputdata):
+        out = ""
+        out += ('\n%\n% MainPlot\n%\n')
+        out += ('\\psset{yunit=%scm}\n' %(self.yoffset))
+        out += ('\\rput(0,-1){%\n')
+        out += ('\\psset{yunit=%scm}\n' %(inputdata.description['PlotSizeY']))
+        out += self._draw(inputdata)
+        out += ('}\n')
+        return out
+
+    def _draw(self, inputdata):
+        out = ""
+
+        # TODO: do this more compactly, e.g. by assigning sorting keys!
+        if inputdata.attr_bool('DrawSpecialFirst', False):
+            for s in inputdata.special:
+                out += s.draw(self.coors)
+            if inputdata.attr_bool('DrawFunctionFirst', False):
+                for f in inputdata.functions:
+                    out += f.draw(self.coors)
+                for i in inputdata.description['DrawOnly']:
+                    out += inputdata.histos[i].draw(self.coors)
+            else:
+                for i in inputdata.description['DrawOnly']:
+                    out += inputdata.histos[i].draw(self.coors)
+                for f in inputdata.functions:
+                    out += f.draw(self.coors)
+        else:
+            if inputdata.attr_bool('DrawFunctionFirst', False):
+                for f in inputdata.functions:
+                    out += f.draw(self.coors)
+                for i in inputdata.description['DrawOnly']:
+                    out += inputdata.histos[i].draw(self.coors)
+            else:
+                for i in inputdata.description['DrawOnly']:
+                    out += inputdata.histos[i].draw(self.coors)
+                for f in inputdata.functions:
+                    out += f.draw(self.coors)
+            for i in inputdata.special.keys():
+                out += inputdata.special[i].draw(self.coors)
+
+        if inputdata.attr_bool('Legend', False):
+            legend = Legend(inputdata.description,inputdata.histos,inputdata.functions)
+            out += legend.draw()
+        if inputdata.description['is2dim']:
+            colorscale = ColorScale(inputdata.description, self.coors)
+            out += colorscale.draw()
+        frame = Frame()
+        out += frame.draw(inputdata)
+
+        xcustommajortickmarks = inputdata.attr_int('XMajorTickMarks', -1)
+        xcustomminortickmarks = inputdata.attr_int('XMinorTickMarks', -1)
+
+        xcustommajorticks = xcustomminorticks = None
+        if inputdata.attr('XCustomMajorTicks'):
+            xcustommajorticks = []
+            x_label_pairs = inputdata.attr('XCustomMajorTicks').strip().split() #'\t')
+            if len(x_label_pairs) % 2 == 0:
+                for i in range(0, len(x_label_pairs), 2):
+                    xcustommajorticks.append({'Value': float(x_label_pairs[i]), 'Label': x_label_pairs[i+1]})
+            else:
+                print "Warning: XCustomMajorTicks requires an even number of alternating pos/label entries"
+
+        if inputdata.attr('XCustomMinorTicks'):
+            xs = inputdata.attr('XCustomMinorTicks').strip().split() #'\t')
+            xcustomminorticks = [{'Value': float(x)} for x in xs]
+
+        xticks = XTicks(inputdata.description, self.coors)
+        drawxlabels = inputdata.attr_bool('PlotXTickLabels', True) and not inputdata.attr_bool('RatioPlot', False)
+
+        out += xticks.draw(custommajortickmarks=xcustommajortickmarks,
+                           customminortickmarks=xcustomminortickmarks,
+                           custommajorticks=xcustommajorticks,
+                           customminorticks=xcustomminorticks,
+                           drawlabels=drawxlabels)
+
+        ycustommajortickmarks = inputdata.attr_int('YMajorTickMarks', -1)
+        ycustomminortickmarks = inputdata.attr_int('YMinorTickMarks', -1)
+
+        ycustommajorticks = ycustomminorticks = None
+        if inputdata.description.has_key('YCustomMajorTicks'):
+            ycustommajorticks = []
+            y_label_pairs = inputdata.description['YCustomMajorTicks'].strip().split() #'\t')
+            if len(y_label_pairs) % 2 == 0:
+                for i in range(0, len(y_label_pairs), 2):
+                    ycustommajorticks.append({'Value': float(y_label_pairs[i]), 'Label': y_label_pairs[i+1]})
+            else:
+                print "Warning: YCustomMajorTicks requires an even number of alternating pos/label entries"
+
+        if inputdata.has_attr('YCustomMinorTicks'):
+            ys = inputdata.attr('YCustomMinorTicks').strip().split() #'\t')
+            ycustomminorticks = [{'Value': float(y)} for y in ys]
+
+        yticks = YTicks(inputdata.description, self.coors)
+        drawylabels = inputdata.attr_bool('PlotYTickLabels', True)
+
+        out += yticks.draw(custommajortickmarks=ycustommajortickmarks,
+                           customminortickmarks=ycustomminortickmarks,
+                           custommajorticks=ycustommajorticks,
+                           customminorticks=ycustomminorticks,
+                           drawlabels=drawylabels)
+
+        labels = Labels(inputdata.description)
+        if inputdata.attr_bool('RatioPlot', False):
+            olab = labels.draw(['Title','YLabel'])
+        else:
+            if not inputdata.description['is2dim']:
+                olab = labels.draw(['Title','XLabel','YLabel'])
+            else:
+                olab = labels.draw(['Title','XLabel','YLabel','ZLabel'])
+        out += olab
+        return out
+
+
+    def calculate_gof(self, inputdata):
+        refdata = inputdata.description.get('GofReference')
+        if refdata is None:
+            refdata = inputdata.description.get('RatioPlotReference')
+
+        if refdata is None:
+            inputdata.description['GofLegend'] = '0'
+            inputdata.description['GofFrame'] = ''
+            return
+
+        def pickcolor(gof):
+            color = None
+            colordefs = {}
+            for i in inputdata.description.setdefault('GofFrameColor', '0:green 3:yellow 6:red!70').strip().split():
+                foo = i.split(':')
+                if len(foo) != 2:
+                    continue
+                colordefs[float(foo[0])] = foo[1]
+            for i in sorted(colordefs.keys()):
+                if gof>=i:
+                    color=colordefs[i]
+            return color
+
+        inputdata.description.setdefault('GofLegend', '0')
+        inputdata.description.setdefault('GofFrame', '')
+        inputdata.description.setdefault('FrameColor', None)
+
+        for i in inputdata.description['DrawOnly']:
+            if i == refdata:
+                continue
+            if inputdata.description['GofLegend']!='1' and i!=inputdata.description['GofFrame']:
+                continue
+
+            if inputdata.description.has_key('GofType') and inputdata.description['GofType']!='chi2':
+                return
+            gof = inputdata.histos[i].getChi2(inputdata.histos[refdata])
+            if i == inputdata.description['GofFrame'] and inputdata.description['FrameColor'] is None:
+                inputdata.description['FrameColor'] = pickcolor(gof)
+            if inputdata.histos[i].description.setdefault('Title', '') != '':
+                inputdata.histos[i].description['Title'] += ', '
+            inputdata.histos[i].description['Title'] += '$\\chi^2/n={}$%1.2f' %gof
+
+
+
+class TaylorPlot(Plot):
+
+    def __init__(self, inputdata):
+        self.refdata = inputdata.description['TaylorPlotReference']
+        self.calculate_taylorcoordinates(inputdata)
+
+    def calculate_taylorcoordinates(self,inputdata):
+        foo = inputdata.description['DrawOnly'].pop(inputdata.description['DrawOnly'].index(self.refdata))
+        inputdata.description['DrawOnly'].append(foo)
+        for i in inputdata.description['DrawOnly']:
+            print i
+            print 'meanbinval  = ', inputdata.histos[i].getMeanBinValue()
+            print 'sigmabinval = ', inputdata.histos[i].getSigmaBinValue()
+            print 'chi2/nbins  = ', inputdata.histos[i].getChi2(inputdata.histos[self.refdata])
+            print 'correlation = ', inputdata.histos[i].getCorrelation(inputdata.histos[self.refdata])
+            print 'distance    = ', inputdata.histos[i].getRMSdistance(inputdata.histos[self.refdata])
+
+
+
+class RatioPlot(Plot):
+
+    def __init__(self, inputdata):
+
+        self.refdata = inputdata.description['RatioPlotReference']
+        self.yoffset = inputdata.description['PlotSizeY'] + inputdata.description['RatioPlotSizeY']
+
+        inputdata.description['RatioPlotStage'] = True
+        inputdata.description['PlotSizeY'] = inputdata.description['RatioPlotSizeY']
+        inputdata.description['LogY'] = False
+
+        # TODO: It'd be nice it this wasn't so MC-specific
+        if inputdata.description.has_key('RatioPlotMode') and inputdata.description['RatioPlotMode']=='deviation':
+            inputdata.description['YLabel'] = '$(\\text{MC}-\\text{data})$'
+            inputdata.description['YMin'] = -3.5
+            inputdata.description['YMax'] = 3.5
+        elif inputdata.description.has_key('RatioPlotMode') and inputdata.description['RatioPlotMode']=='datamc':
+            inputdata.description['YLabel'] = 'Data/MC'
+            inputdata.description['YMin'] = 0.5
+            inputdata.description['YMax'] = 1.5
+        else:
+            inputdata.description['YLabel'] = 'MC/Data'
+            inputdata.description['YMin'] = 0.5
+            inputdata.description['YMax'] = 1.5
+
+        if inputdata.description.has_key('RatioPlotYLabel'):
+            inputdata.description['YLabel'] = inputdata.description['RatioPlotYLabel']
+        inputdata.description['YLabel']='\\rput(-%s,0){%s}'%(0.5*inputdata.description['PlotSizeY']/inputdata.description['PlotSizeX'],inputdata.description['YLabel'])
+
+        if inputdata.description.has_key('RatioPlotYMin'):
+            inputdata.description['YMin'] = inputdata.description['RatioPlotYMin']
+        if inputdata.description.has_key('RatioPlotYMax'):
+            inputdata.description['YMax'] = inputdata.description['RatioPlotYMax']
+
+        if not inputdata.description.has_key('RatioPlotErrorBandColor'):
+            inputdata.description['RatioPlotErrorBandColor'] = 'yellow'
+        if not inputdata.description.has_key('RatioPlotSameStyle') or inputdata.description['RatioPlotSameStyle'] == '0':
+            inputdata.histos[self.refdata].description['ErrorBandColor'] = inputdata.description['RatioPlotErrorBandColor']
+            inputdata.histos[self.refdata].description['ErrorBands'] = '1'
+            inputdata.histos[self.refdata].description['ErrorBars'] = '0'
+            inputdata.histos[self.refdata].description['LineStyle'] = 'solid'
+            inputdata.histos[self.refdata].description['LineColor'] = 'black'
+            inputdata.histos[self.refdata].description['LineWidth'] = '0.3pt'
+            inputdata.histos[self.refdata].description['PolyMarker'] = ''
+            inputdata.histos[self.refdata].description['ConnectGaps'] = '1'
+
+        self.calculate_ratios(inputdata)
+        self.set_borders(inputdata)
+        self.coors = Coordinates(inputdata)
+
+    def draw(self, inputdata):
+        out = ""
+        out += ('\n%\n% RatioPlot\n%\n')
+        out += ('\\psset{yunit=%scm}\n' %(self.yoffset))
+        out += ('\\rput(0,-1){%\n')
+        out += ('\\psset{yunit=%scm}\n' %(inputdata.description['PlotSizeY']))
+        out += self._draw(inputdata)
+        out += ('}\n')
+        return out
+
+    def calculate_ratios(self, inputdata):
+        foo = inputdata.description['DrawOnly'].pop(inputdata.description['DrawOnly'].index(self.refdata))
+        if inputdata.histos[self.refdata].description.has_key('ErrorBands') and inputdata.histos[self.refdata].description['ErrorBands']=='1':
+            inputdata.description['DrawOnly'].insert(0,foo)
+        else:
+            inputdata.description['DrawOnly'].append(foo)
+        for i in inputdata.description['DrawOnly']:
+            if i != self.refdata:
+                if inputdata.description.has_key('RatioPlotMode') and inputdata.description['RatioPlotMode'] == 'deviation':
+                    inputdata.histos[i].deviation(inputdata.histos[self.refdata])
+                elif inputdata.description.has_key('RatioPlotMode') and inputdata.description['RatioPlotMode'] == 'datamc':
+                    inputdata.histos[i].dividereverse(inputdata.histos[self.refdata])
+                    inputdata.histos[i].description['ErrorBars'] = '1'
+                else:
+                    inputdata.histos[i].divide(inputdata.histos[self.refdata])
+        if inputdata.description.has_key('RatioPlotMode') and inputdata.description['RatioPlotMode'] == 'deviation':
+            inputdata.histos[self.refdata].deviation(inputdata.histos[self.refdata])
+        elif inputdata.description.has_key('RatioPlotMode') and inputdata.description['RatioPlotMode'] == 'datamc':
+            inputdata.histos[self.refdata].dividereverse(inputdata.histos[self.refdata])
+        else:
+            inputdata.histos[self.refdata].divide(inputdata.histos[self.refdata])
+
+    def _draw(self, inputdata):
+        out = ""
+        for i in inputdata.description['DrawOnly']:
+            if inputdata.description.has_key('RatioPlotMode') and inputdata.description['RatioPlotMode'] == 'datamc':
+                if i != self.refdata:
+                    out += inputdata.histos[i].draw(self.coors)
+            else:
+                out += inputdata.histos[i].draw(self.coors)
+
+        frame = Frame()
+        out += frame.draw(inputdata)
+
+        # TODO: so much duplication with MainPlot... yuck!
+        if inputdata.description.has_key('XMajorTickMarks') and inputdata.description['XMajorTickMarks'] != '':
+            xcustommajortickmarks = int(inputdata.description['XMajorTickMarks'])
+        else:
+            xcustommajortickmarks = -1
+        if inputdata.description.has_key('XMinorTickMarks') and inputdata.description['XMinorTickMarks'] != '':
+            xcustomminortickmarks = int(inputdata.description['XMinorTickMarks'])
+        else:
+            xcustomminortickmarks =- 1
+
+        xcustommajorticks = None
+        if inputdata.description.has_key('XCustomMajorTicks'): # and inputdata.description['XCustomMajorTicks']!='':
+            xcustommajorticks = []
+            tickstr = inputdata.description['XCustomMajorTicks'].strip().split() #'\t')
+            if not len(tickstr) % 2:
+                for i in range(0, len(tickstr), 2):
+                    xcustommajorticks.append({'Value': float(tickstr[i]), 'Label': tickstr[i+1]})
+
+        xcustomminorticks = None
+        if inputdata.description.has_key('XCustomMinorTicks'): # and inputdata.description['XCustomMinorTicks']!='':
+            xcustomminorticks = []
+            tickstr = inputdata.description['XCustomMinorTicks'].strip().split() #'\t')
+            for i in range(len(tickstr)):
+                xcustomminorticks.append({'Value': float(tickstr[i])})
+
+        xticks = XTicks(inputdata.description, self.coors)
+        drawlabels = not (inputdata.description.has_key('RatioPlotTickLabels') and inputdata.description['RatioPlotTickLabels']=='0')
+        out += xticks.draw(custommajortickmarks=xcustommajortickmarks,
+                           customminortickmarks=xcustomminortickmarks,
+                           custommajorticks=xcustommajorticks,
+                           customminorticks=xcustomminorticks,
+                           drawlabels=drawlabels)
+
+
+        ycustommajortickmarks = inputdata.attr('YMajorTickMarks', '')
+        ycustommajortickmarks = int(ycustommajortickmarks) if ycustommajortickmarks else -1
+
+        ycustomminortickmarks = inputdata.attr('YMinorTickMarks', '')
+        ycustomminortickmarks = int(ycustomminortickmarks) if ycustomminortickmarks else -1
+
+        ycustommajorticks = None
+        if inputdata.description.has_key('YCustomMajorTicks'):
+            ycustommajorticks = []
+            tickstr = inputdata.description['YCustomMajorTicks'].strip().split() #'\t')
+            if not len(tickstr) % 2:
+                for i in range(0, len(tickstr), 2):
+                    ycustommajorticks.append({'Value': float(tickstr[i]), 'Label': tickstr[i+1]})
+
+        ycustomminorticks = None
+        if inputdata.description.has_key('YCustomMinorTicks'):
+            ycustomminorticks = []
+            tickstr = inputdata.description['YCustomMinorTicks'].strip().split() #'\t')
+            for i in range(len(tickstr)):
+                ycustomminorticks.append({'Value': float(tickstr[i])})
+
+        yticks = YTicks(inputdata.description, self.coors)
+        out += yticks.draw(custommajortickmarks=ycustommajortickmarks,
+                           customminortickmarks=ycustomminortickmarks,
+                           custommajorticks=ycustommajorticks,
+                           customminorticks=ycustomminorticks)
+
+        if not inputdata.attr_bool('MainPlot', True) and not inputdata.attr_bool('Legend', False):
+            legend = Legend(inputdata.description, inputdata.histos, inputdata.functions)
+            out += legend.draw()
+
+        labels = Labels(inputdata.description)
+        lnames = ['XLabel','YLabel']
+        if not inputdata.attr_bool('MainPlot', True):
+            lnames.append("Title")
+        out += labels.draw(lnames)
+        return out
+
+
+
+class Legend(Described):
+
+    def __init__(self, description, histos, functions):
+        self.histos = histos
+        self.functions = functions
+        self.description = description
+
+    def draw(self):
+        out = ""
+        out += '\n%\n% Legend\n%\n'
+        out += '\\rput[tr](%s,%s){%%\n' % (self.getLegendXPos(), self.getLegendYPos())
+        ypos = -0.05*6/self.description['PlotSizeY']
+
+        legendordermap = {}
+        legendlist = self.description['DrawOnly']+self.functions.keys()
+        if self.description.has_key('LegendOnly'):
+            legendlist = []
+            for legend in self.description['LegendOnly'].strip().split():
+                if legend in self.histos.keys() or legend in self.functions.keys():
+                    legendlist.append(legend)
+        for legend in legendlist:
+            order = 0
+            if self.histos.has_key(legend) and self.histos[legend].description.has_key('LegendOrder'):
+                order = int(self.histos[legend].description['LegendOrder'])
+            if self.functions.has_key(legend) and self.functions[legend].description.has_key('LegendOrder'):
+                order = int(self.functions[legend].description['LegendOrder'])
+            if not order in legendordermap:
+                legendordermap[order] = []
+            legendordermap[order].append(legend)
+        foo=[]
+        for i in sorted(legendordermap.keys()):
+            foo.extend(legendordermap[i])
+
+        rel_xpos_sign = 1.0
+        if self.getLegendAlign()=='r':
+            rel_xpos_sign = -1.0
+        xpos1 = -0.10*rel_xpos_sign
+        xpos2 = -0.02*rel_xpos_sign
+
+        for i in foo:
+            if self.histos.has_key(i):
+                drawobject=self.histos[i]
+            elif self.functions.has_key(i):
+                drawobject=self.functions[i]
+            else:
+                continue
+            title = drawobject.getTitle()
+            if title == '':
+                continue
+            else:
+                out += ('\\rput[B%s](%s,%s){%s}\n' %(self.getLegendAlign(),rel_xpos_sign*0.1,ypos,title))
+                out += ('\\rput[B%s](%s,%s){%s\n' %(self.getLegendAlign(),rel_xpos_sign*0.1,ypos,'%'))
+                if drawobject.getErrorBands():
+                    out += ('\\psframe[linewidth=0pt,linestyle=none,fillstyle=solid,fillcolor=%s,opacity=%s]' %(drawobject.getErrorBandColor(),drawobject.getErrorBandOpacity()))
+                    out += ('(%s, 0.033)(%s, 0.001)\n' %(xpos1, xpos2))
+                out += ('\\psline[linestyle=' + drawobject.getLineStyle() \
+                            + ', linecolor=' + drawobject.getLineColor() \
+                            + ', linewidth=' + drawobject.getLineWidth() \
+                            + ', strokeopacity=' + drawobject.getLineOpacity() \
+                            + ', opacity=' + drawobject.getFillOpacity())
+                if drawobject.getLineDash() != '':
+                    out += (', dash=' + drawobject.getLineDash())
+                if drawobject.getFillStyle()!='none':
+                    out += (', fillstyle=' + drawobject.getFillStyle() \
+                                + ', fillcolor='  + drawobject.getFillColor() \
+                                + ', hatchcolor=' + drawobject.getHatchColor() \
+                                + ']{C-C}(%s, 0.030)(%s, 0.030)(%s, 0.004)(%s, 0.004)(%s, 0.030)\n' \
+                                %(xpos1, xpos2, xpos2, xpos1, xpos1))
+                else:
+                    out += ('](%s, 0.016)(%s, 0.016)\n' %(xpos1, xpos2))
+                if drawobject.getPolyMarker() != '':
+                    out += ('  \\psdot[dotstyle=' + drawobject.getPolyMarker() \
+                                + ', dotsize='    + drawobject.getDotSize()   \
+                                + ', dotscale='   + drawobject.getDotScale()  \
+                                + ', linecolor='  + drawobject.getLineColor() \
+                                + ', linewidth='  + drawobject.getLineWidth() \
+                                + ', linestyle='  + drawobject.getLineStyle() \
+                                + ', fillstyle='  + drawobject.getFillStyle() \
+                                + ', fillcolor='  + drawobject.getFillColor() \
+                                + ', strokeopacity=' + drawobject.getLineOpacity() \
+                                + ', opacity=' + drawobject.getFillOpacity() \
+                                + ', hatchcolor=' + drawobject.getHatchColor())
+                    if drawobject.getFillStyle()!='none':
+                        out += ('](%s, 0.028)\n' % (rel_xpos_sign*-0.06))
+                    else:
+                        out += ('](%s, 0.016)\n' % (rel_xpos_sign*-0.06))
+                out += ('}\n')
+                ypos -= 0.075*6/self.description['PlotSizeY']
+        if self.description.has_key('CustomLegend'):
+            for i in self.description['CustomLegend'].strip().split('\\\\'):
+                out += ('\\rput[B%s](%s,%s){%s}\n' %(self.getLegendAlign(),rel_xpos_sign*0.1,ypos,i))
+                ypos -= 0.075*6/self.description['PlotSizeY']
+        out += ('}\n')
+        return out
+
+    def getLegendXPos(self):
+        if self.description.has_key('LegendXPos'):
+            return self.description['LegendXPos']
+        else:
+            if self.getLegendAlign()=='r':
+                return '0.95'
+            else:
+                return '0.53'
+
+    def getLegendYPos(self):
+        if self.description.has_key('LegendYPos'):
+            return self.description['LegendYPos']
+        else:
+            return '0.93'
+
+    def getLegendAlign(self):
+        if self.description.has_key('LegendAlign'):
+            return self.description['LegendAlign']
+        else:
+            return 'l'
+
+
+class ColorScale(Described):
+
+    def __init__(self, description, coors):
+        self.description = description
+        self.coors = coors
+
+    def draw(self):
+        out = ''
+        out += '\n%\n% ColorScale\n%\n'
+        out += '\\rput(1,0){\n'
+        out += '  \\psset{xunit=4mm}\n'
+        out += '  \\rput(0.5,0){\n'
+        out += '    \\psset{yunit=0.0076923, linestyle=none, fillstyle=solid}\n'
+        out += '    \\multido{\\ic=0+1,\\id=1+1}{130}{\n'
+        out += '      \\psframe[fillcolor={gradientcolors!![\\ic]},dimen=inner,linewidth=0.1pt](0, \\ic)(1, \\id)\n'
+        out += '    }\n'
+        out += '  }\n'
+        out += '  \\rput(0.5,0){\n'
+        out += '    \\psframe[linewidth=0.3pt,dimen=middle](0,0)(1,1)\n'
+
+        zcustommajortickmarks = self.attr_int('ZMajorTickMarks', -1)
+        zcustomminortickmarks = self.attr_int('ZMinorTickMarks', -1)
+
+        zcustommajorticks = zcustomminorticks = None
+        if self.attr('ZCustomMajorTicks'):
+            zcustommajorticks = []
+            z_label_pairs = inputdata.attr('ZCustomMajorTicks').strip().split() #'\t')
+            if len(z_label_pairs) % 2 == 0:
+                for i in range(0, len(z_label_pairs), 2):
+                    zcustommajorticks.append({'Value': float(x_label_pairs[i]), 'Label': x_label_pairs[i+1]})
+            else:
+                print "Warning: ZCustomMajorTicks requires an even number of alternating pos/label entries"
+
+        if inputdata.attr('ZCustomMinorTicks'):
+            zs = inputdata.attr('ZCustomMinorTicks').strip().split() #'\t')
+            zcustomminorticks = [{'Value': float(x)} for x in xs]
+
+        drawzlabels = self.description.attr_bool('PlotZTickLabels', True)
+
+        zticks = ZTicks(self.description, self.coors)
+        out += zticks.draw(custommajortickmarks=zcustommajortickmarks,\
+                           customminortickmarks=zcustomminortickmarks,\
+                           custommajorticks=zcustommajorticks,\
+                           customminorticks=zcustomminorticks,
+                           drawlabels=drawzlabels)
+        out += '  }\n'
+        out += '}\n'
+        return out
+
+
+
+class Labels(Described):
+
+    def __init__(self, description):
+        self.description = description
+
+    def draw(self, axis=[]):
+        out = ""
+        out += ('\n%\n% Labels\n%\n')
+        if self.description.has_key('Title') and (axis.count('Title') or axis==[]):
+            out += ('\\rput(0,1){\\rput[lB](0, 1.7\\labelsep){\\normalsize '+self.description['Title']+'}}\n')
+        if self.description.has_key('XLabel') and (axis.count('XLabel') or axis==[]):
+            xlabelsep=4.7
+            if self.description.has_key('XLabelSep'):
+                xlabelsep=float(self.description['XLabelSep'])
+            out += ('\\rput(1,0){\\rput[rB](0,-%4.3f\\labelsep){\\normalsize '%(xlabelsep) +self.description['XLabel']+'}}\n')
+        if self.description.has_key('YLabel') and (axis.count('YLabel') or axis==[]):
+            ylabelsep=6.5
+            if self.description.has_key('YLabelSep'):
+                ylabelsep=float(self.description['YLabelSep'])
+            out += ('\\rput(0,1){\\rput[rB]{90}(-%4.3f\\labelsep,0){\\normalsize '%(ylabelsep) +self.description['YLabel']+'}}\n')
+        if self.description.has_key('ZLabel') and (axis.count('ZLabel') or axis==[]):
+            zlabelsep=5.3
+            if self.description.has_key('ZLabelSep'):
+                zlabelsep=float(self.description['ZLabelSep'])
+            out += ('\\rput(1,1){\\rput(%4.3f\\labelsep,0){\\psset{xunit=4mm}\\rput[lB]{270}(1.5,0){\\normalsize '%(zlabelsep) +self.description['ZLabel']+'}}}\n')
+        return out
+
+
+
+class Special(Described):
+
+    def __init__(self, f):
+        self.description = {}
+        self.data = []
+        self.read_input(f)
+
+    def read_input(self, f):
+        for line in f:
+            if is_end_marker(line, 'SPECIAL'):
+                break
+            elif is_comment(line):
+                continue
+            else:
+                self.data.append(line)
+
+    def draw(self, coors):
+        out = ""
+        out += ('\n%\n% Special\n%\n')
+        import re
+        regex = re.compile(r'^(.*?)(\\physics[xy]?coor)\(\s?([0-9\.eE+-]+)\s?,\s?([0-9\.eE+-]+)\s?\)(.*)')
+        # TODO: More precise number string matching, something like this:
+        # num = r"-?[0-9]*(?:\.[0-9]*)(?:[eE][+-]?\d+]"
+        # regex = re.compile(r'^(.*?)(\\physics[xy]?coor)\(\s?(' + num + ')\s?,\s?(' + num + ')\s?\)(.*)')
+        for l in self.data:
+            while regex.search(l):
+                match = regex.search(l)
+                xcoor, ycoor = float(match.group(3)), float(match.group(4))
+                if match.group(2)[1:] in ["physicscoor", "physicsxcoor"]:
+                    xcoor = coors.phys2frameX(xcoor)
+                if match.group(2)[1:] in ["physicscoor", "physicsycoor"]:
+                    ycoor = coors.phys2frameY(ycoor)
+                line = "%s(%f, %f)%s" % (match.group(1), xcoor, ycoor, match.group(5))
+                l = line
+            out += l + "\n"
+        return out
+
+
+
+class DrawableObject(Described):
+
+    def __init__(self, f):
+        pass
+
+    def getTitle(self):
+        return self.description.get("Title", "")
+
+    def getLineStyle(self):
+        if self.description.has_key('LineStyle'):
+            ## I normally like there to be "only one way to do it", but providing
+            ## this dashdotted/dotdashed synonym just seems humane ;-)
+            if self.description['LineStyle'] in ('dashdotted', 'dotdashed'):
+                self.description['LineStyle']='dashed'
+                self.description['LineDash']='3pt 3pt .8pt 3pt'
+            return self.description['LineStyle']
+        else:
+            return 'solid'
+
+    def getLineDash(self):
+        if self.description.has_key('LineDash'):
+            # Check if LineStyle=='dashdotted' before returning something
+            self.getLineStyle()
+            return self.description['LineDash']
+        else:
+            return ''
+
+    def getLineWidth(self):
+        return self.description.get("LineWidth", "0.8pt")
+
+    def getLineColor(self):
+        return self.description.get("LineColor", "black")
+
+    def getLineOpacity(self):
+        return self.description.get("LineOpacity", "1.0")
+
+    def getFillColor(self):
+        return self.description.get("FillColor", "white")
+
+    def getFillOpacity(self):
+        return self.description.get("FillOpacity", "1.0")
+
+    def getHatchColor(self):
+        return self.description.get("HatchColor", "black")
+
+    def getFillStyle(self):
+        return self.description.get("FillStyle", "none")
+
+    def getPolyMarker(self):
+        return self.description.get("PolyMarker", "")
+
+    def getDotSize(self):
+        return self.description.get("DotSize", "2pt 2")
+
+    def getDotScale(self):
+        return self.description.get("DotScale", "1")
+
+    def getErrorBars(self):
+        return bool(int(self.description.get("ErrorBars", "0")))
+
+    def getErrorBands(self):
+        return bool(int(self.description.get("ErrorBands", "0")))
+
+    def getErrorBandColor(self):
+        return self.description.get("ErrorBandColor", "yellow")
+
+    def getErrorBandOpacity(self):
+        return self.description.get("ErrorBandOpacity", "1.0")
+
+    def getSmoothLine(self):
+        return bool(int(self.description.get("SmoothLine", "0")))
+
+    def startclip(self):
+        return '\\psclip{\\psframe[linewidth=0, linestyle=none](0,0)(1,1)}\n'
+
+    def stopclip(self):
+        return '\\endpsclip\n'
+
+    def startpsset(self):
+        out = ""
+        out += ('\\psset{linecolor='+self.getLineColor()+'}\n')
+        out += ('\\psset{linewidth='+self.getLineWidth()+'}\n')
+        out += ('\\psset{linestyle='+self.getLineStyle()+'}\n')
+        out += ('\\psset{fillstyle='+self.getFillStyle()+'}\n')
+        out += ('\\psset{fillcolor='+self.getFillColor()+'}\n')
+        out += ('\\psset{hatchcolor='+self.getHatchColor()+'}\n')
+        out += ('\\psset{strokeopacity='+self.getLineOpacity()+'}\n')
+        out += ('\\psset{opacity='+self.getFillOpacity()+'}\n')
+        if self.getLineDash()!='':
+            out += ('\\psset{dash='+self.getLineDash()+'}\n')
+        return out
+
+    def stoppsset(self):
+        out = ""
+        out += ('\\psset{linecolor=black}\n')
+        out += ('\\psset{linewidth=0.8pt}\n')
+        out += ('\\psset{linestyle=solid}\n')
+        out += ('\\psset{fillstyle=none}\n')
+        out += ('\\psset{fillcolor=white}\n')
+        out += ('\\psset{hatchcolor=black}\n')
+        out += ('\\psset{strokeopacity=1.0}\n')
+        out += ('\\psset{opacity=1.0}\n')
+        return out
+
+
+
+class Function(DrawableObject, Described):
+
+    def __init__(self, f):
+        self.description = {}
+        self.read_input(f)
+
+    def read_input(self, f):
+        self.code='def plotfunction(x):\n'
+        iscode=False
+        for line in f:
+            if is_end_marker(line, 'FUNCTION'):
+                break
+            elif is_comment(line):
+                continue
+            else:
+                m = pat_property.match(line)
+                if iscode:
+                    self.code+='    '+line
+                elif m:
+                    prop, value = m.group(1,2)
+                    if prop=='Code':
+                        iscode=True
+                    else:
+                        self.description[prop] = value
+        if not iscode:
+            print '++++++++++ ERROR: No code in function'
+        else:
+            foo = compile(self.code, '<string>', 'exec')
+            exec(foo)
+            self.plotfunction = plotfunction
+
+
+    def draw(self,coors):
+        out = ""
+        out += self.startclip()
+        out += self.startpsset()
+        xmin = coors.xmin()
+        if self.description.has_key('XMin') and self.description['XMin']:
+            xmin = float(self.description['XMin'])
+        xmax=coors.xmax()
+        if self.description.has_key('XMax') and self.description['XMax']:
+            xmax=float(self.description['XMax'])
+        # TODO: Space sample points logarithmically if LogX=1
+        dx = (xmax-xmin)/500.
+        x = xmin-dx
+        out += '\\pscurve'
+        if self.description.has_key('FillStyle') and self.description['FillStyle']!='none':
+            out += '(%s,%s)\n' % (coors.strphys2frameX(xmin),coors.strphys2frameY(coors.ymin()))
+        while x < (xmax+2*dx):
+            y = self.plotfunction(x)
+            out += ('(%s,%s)\n' % (coors.strphys2frameX(x), coors.strphys2frameY(y)))
+            x += dx
+        if self.description.has_key('FillStyle') and self.description['FillStyle']!='none':
+            out += '(%s,%s)\n' % (coors.strphys2frameX(xmax),coors.strphys2frameY(coors.ymin()))
+        out += self.stoppsset()
+        out += self.stopclip()
+        return out
+
+
+class BinData(object):
+    """\
+    Store bin edge and value+error(s) data for a 1D or 2D bin.
+
+    TODO: generalise/alias the attr names to avoid mention of x and y
+    """
+
+    def __init__(self, low, high, val, err):
+        #print "@", low, high, val, err
+        self.low = floatify(low)
+        self.high = floatify(high)
+        self.val = float(val)
+        self.err = floatpair(err)
+
+    @property
+    def is2D(self):
+        return hasattr(self.low, "__len__") and hasattr(self.high, "__len__")
+
+    @property
+    def isValid(self):
+        invalid_val = (isnan(self.val) or isnan(self.err[0]) or isnan(self.err[1]))
+        if invalid_val:
+            return False
+        if self.is2D:
+            invalid_low = any(isnan(x) for x in self.low)
+            invalid_high = any(isnan(x) for x in self.high)
+        else:
+            invalid_low, invalid_high = isnan(self.low), isnan(self.high)
+        return not (invalid_low or invalid_high)
+
+    @property
+    def xmin(self):
+        return self.low
+    @xmin.setter
+    def xmin(self,x):
+        self.low = x
+
+    @property
+    def xmax(self):
+        return self.high
+    @xmax.setter
+    def xmax(self,x):
+        self.high = x
+
+    @property
+    def xmid(self):
+        # TODO: Generalise to 2D
+        return (self.xmin + self.xmax) / 2.0
+
+    @property
+    def xwidth(self):
+        # TODO: Generalise to 2D
+        assert self.xmin <= self.xmax
+        return self.xmax - self.xmin
+
+    @property
+    def y(self):
+        return self.val
+    @y.setter
+    def y(self, x):
+        self.val = x
+
+    @property
+    def ey(self):
+        return self.err
+    @ey.setter
+    def ey(self, x):
+        self.err = x
+
+    @property
+    def ymin(self):
+        return self.y - self.ey[0]
+
+    @property
+    def ymax(self):
+        return self.y + self.ey[1]
+
+    def __getitem__(self, key):
+        "dict-like access for backward compatibility"
+        if key in ("LowEdge"):
+            return self.xmin
+        elif key == ("UpEdge", "HighEdge"):
+            return self.xmax
+        elif key == "Content":
+            return self.y
+        elif key == "Errors":
+            return self.ey
+
+
+class Histogram(DrawableObject, Described):
+
+    def __init__(self, f, p=None):
+        self.description = {}
+        self.is2dim = False
+        self.data = []
+        self.read_input_data(f)
+        self.sigmabinvalue = None
+        self.meanbinvalue = None
+        self.path = p
+
+    def read_input_data(self, f):
+        for line in f:
+            if is_end_marker(line, 'HISTOGRAM'):
+                break
+            elif is_comment(line):
+                continue
+            else:
+                line = line.rstrip()
+                m = pat_property.match(line)
+                if m:
+                    prop, value = m.group(1,2)
+                    self.description[prop] = value
+                else:
+                    ## Detect symm errs
+                    linearray = line.split()
+                    if len(linearray) == 4:
+                        self.data.append(BinData(*linearray))
+                    ## Detect asymm errs
+                    elif len(linearray) == 5:
+                        self.data.append(BinData(linearray[0], linearray[1], linearray[2], [linearray[3],linearray[4]]))
+                    ## Detect two-dimensionality
+                    elif len(linearray) in [6,7]:
+                        self.is2dim = True
+                        # If asymm z error, use the max or average of +- error
+                        err = float(linearray[5])
+                        if len(linearray) == 7:
+                            if self.description.get("ShowMaxZErr", 1):
+                                err = max(err, float(linearray[6]))
+                            else:
+                                err = 0.5 * (err + float(linearray[6]))
+                        self.data.append(BinData([linearray[0], linearray[2]], [linearray[1], linearray[3]], linearray[4], err))
+                    ## Unknown histo format
+                    else:
+                        raise RuntimeError("Unknown HISTOGRAM data line format with %d entries" % len(linearray))
+
+
+    def mangle_input(self):
+        norm2int = self.attr_bool("NormalizeToIntegral", False)
+        norm2sum = self.attr_bool("NormalizeToSum", False)
+        if norm2int or norm2sum:
+            if norm2int and norm2sum:
+                print "Can't normalize to Integral and to Sum at the same time. Will normalize to the Sum."
+            foo = 0
+            # TODO: change to "in self.data"?
+            for i in range(len(self.data)):
+                if norm2sum:
+                    foo += self.data[i].val
+                else:
+                    foo += self.data[i].val*(self.data[i].xmax-self.data[i].xmin)
+
+            # TODO: change to "in self.data"?
+            if foo != 0:
+                for i in range(len(self.data)):
+                    self.data[i].val /= foo
+                    self.data[i].err[0] /= foo
+                    self.data[i].err[1] /= foo
+        scale = self.attr_float('Scale', 1.0)
+        if scale != 1.0:
+            # TODO: change to "in self.data"?
+            for i in range(len(self.data)):
+                self.data[i].val *= scale
+                self.data[i].err[0] *= scale
+                self.data[i].err[1] *= scale
+        if self.attr_int("Rebin", 1) > 1:
+            rebin = self.attr_int("Rebin", 1)
+            errortype = self.attr("ErrorType", "stat")
+            newdata = []
+            for i in range(0, (len(self.data)//rebin)*rebin, rebin):
+                foo = 0.
+                barl = 0.
+                baru = 0.
+                for j in range(rebin):
+                    binwidth = self.data[i+j].xwidth
+                    foo += self.data[i+j].val * binwidth
+                    if errortype == "stat":
+                        barl += (binwidth * self.data[i+j].err[0])**2
+                        baru += (binwidth * self.data[i+j].err[1])**2
+                    elif errortype == "env":
+                        barl += self.data[i+j].ymin * binwidth
+                        baru += self.data[i+j].ymax * binwidth
+                    else:
+                        logging.error("Rebinning for ErrorType not implemented.")
+                        sys.exit(1)
+                newbinwidth = self.data[i+rebin-1].xmax - self.data[i].xmin
+                newcentral = foo/newbinwidth
+                if errortype == "stat":
+                    newerror = [sqrt(barl)/newbinwidth, sqrt(baru)/newbinwidth]
+                elif errortype == "env":
+                    newerror = [(foo-barl)/newbinwidth, (baru-foo)/newbinwidth]
+                newdata.append(BinData(self.data[i].xmin, self.data[i+rebin-1].xmax, newcentral, newerror))
+            self.data = newdata
+
+    def add(self, name):
+        if len(self.data) != len(name.data):
+            print '+++ Error in Histogram.add() for %s: different numbers of bins' % self.path
+        for i in range(len(self.data)):
+            if fuzzyeq(self.data[i].xmin, name.data[i].xmin) and \
+               fuzzyeq(self.data[i].xmax, name.data[i].xmax):
+                self.data[i].val += name.data[i].val
+                self.data[i].err[0] = sqrt(self.data[i].err[0]**2 + name.data[i].err[0]**2)
+                self.data[i].err[1] = sqrt(self.data[i].err[1]**2 + name.data[i].err[1]**2)
+            else:
+                print '+++ Error in Histogram.add() for %s: binning of histograms differs' % self.path
+
+    def divide(self, name):
+        #print name.path, self.path
+        if len(self.data) != len(name.data):
+            print '+++ Error in Histogram.divide() for %s: different numbers of bins' % self.path
+        for i in range(len(self.data)):
+            if fuzzyeq(self.data[i].xmin, name.data[i].xmin) and \
+               fuzzyeq(self.data[i].xmax, name.data[i].xmax):
+                try:
+                    self.data[i].err[0] /= name.data[i].val
+                except ZeroDivisionError:
+                    self.data[i].err[0]=0.
+                try:
+                    self.data[i].err[1] /= name.data[i].val
+                except ZeroDivisionError:
+                    self.data[i].err[1]=0.
+                try:
+                    self.data[i].val /= name.data[i].val
+                except ZeroDivisionError:
+                    self.data[i].val=1.
+#                self.data[i].err[0] = sqrt(self.data[i].err[0]**2 + name.data[i].err[0]**2)
+#                self.data[i].err[1] = sqrt(self.data[i].err[1]**2 + name.data[i].err[1]**2)
+            else:
+                print '+++ Error in Histogram.divide() for %s: binning of histograms differs' % self.path
+
+    def dividereverse(self, name):
+        if len(self.data) != len(name.data):
+            print '+++ Error in Histogram.dividereverse() for %s: different numbers of bins' % self.path
+        for i in range(len(self.data)):
+            if fuzzyeq(self.data[i].xmin, name.data[i].xmin) and \
+               fuzzyeq(self.data[i].xmax, name.data[i].xmax):
+                try:
+                    self.data[i].err[0] = name.data[i].err[0]/self.data[i].val
+                except ZeroDivisionError:
+                    self.data[i].err[0]=0.
+                try:
+                    self.data[i].err[1] = name.data[i].err[1]/self.data[i].val
+                except ZeroDivisionError:
+                    self.data[i].err[1]=0.
+                try:
+                    self.data[i].val = name.data[i].val/self.data[i].val
+                except ZeroDivisionError:
+                    self.data[i].val=1.
+            else:
+                print '+++ Error in Histogram.dividereverse(): binning of histograms differs'
+
+    def deviation(self, name):
+        if len(self.data) != len(name.data):
+            print '+++ Error in Histogram.deviation() for %s: different numbers of bins' % self.path
+        for i in range(len(self.data)):
+            if fuzzyeq(self.data[i].xmin, name.data[i].xmin) and \
+               fuzzyeq(self.data[i].xmax, name.data[i].xmax):
+                self.data[i].val -= name.data[i].val
+                try:
+                    self.data[i].val /= 0.5*sqrt((name.data[i].err[0] + name.data[i].err[1])**2 + \
+                                                        (self.data[i].err[0] + self.data[i].err[1])**2)
+                except ZeroDivisionError:
+                    self.data[i].val = 0.0
+                try:
+                    self.data[i].err[0] /= name.data[i].err[0]
+                except ZeroDivisionError:
+                    self.data[i].err[0] = 0.0
+                try:
+                    self.data[i].err[1] /= name.data[i].err[1]
+                except ZeroDivisionError:
+                    self.data[i].err[1] = 0.0
+            else:
+                print '+++ Error in Histogram.deviation() for %s: binning of histograms differs' % self.path
+
+    def getChi2(self, name):
+        chi2 = 0.
+        for i in range(len(self.data)):
+            if fuzzyeq(self.data[i].xmin, name.data[i].xmin) and \
+               fuzzyeq(self.data[i].xmax, name.data[i].xmax):
+                try:
+                    chi2 += (self.data[i].val-name.data[i].val)**2/((0.5*self.data[i].err[0]+0.5*self.data[i].err[1])**2 + (0.5*name.data[i].err[0]+0.5*name.data[i].err[1])**2)
+                except ZeroDivisionError:
+                    pass
+            else:
+                print '+++ Error in Histogram.getChi2() for %s: binning of histograms differs' % self.path
+        return chi2/len(self.data)
+
+    def getSigmaBinValue(self):
+        if self.sigmabinvalue==None:
+            self.sigmabinvalue = 0.
+            sumofweights = 0.
+            for i in range(len(self.data)):
+                if self.is2dim:
+                    binwidth = abs( (self.data[i].xmax[0] - self.data[i].xmin[0])
+                                   *(self.data[i].xmax[1] - self.data[i].xmin[1]))
+                else:
+                    binwidth = abs(self.data[i].xmax - self.data[i].xmin)
+                self.sigmabinvalue += binwidth*(self.data[i].val-self.getMeanBinValue())**2
+                sumofweights += binwidth
+            self.sigmabinvalue = sqrt(self.sigmabinvalue/sumofweights)
+        return self.sigmabinvalue
+
+    def getMeanBinValue(self):
+        if self.meanbinvalue==None:
+            self.meanbinvalue = 0.
+            sumofweights = 0.
+            for i in range(len(self.data)):
+                if self.is2dim:
+                    binwidth = abs( (self.data[i].xmax[0] - self.data[i].xmin[0])
+                                   *(self.data[i].xmax[1] - self.data[i].xmin[1]))
+                else:
+                    binwidth = abs(self.data[i].xmax - self.data[i].xmin)
+                self.meanbinvalue += binwidth*self.data[i].val
+                sumofweights += binwidth
+            self.meanbinvalue /= sumofweights
+        return self.meanbinvalue
+
+    def getCorrelation(self, name):
+        correlation = 0.
+        sumofweights = 0.
+        for i in range(len(self.data)):
+            if fuzzyeq(self.data[i].xmin, name.data[i].xmin) and \
+               fuzzyeq(self.data[i].xmax, name.data[i].xmax):
+                if self.is2dim:
+                    binwidth = abs( (self.data[i].xmax[0] - self.data[i].xmin[0])
+                                  * (self.data[i].xmax[1] - self.data[i].xmin[1]) )
+                else:
+                    binwidth = abs(self.data[i].xmax - self.data[i].xmin)
+                correlation += binwidth * ( self.data[i].val - self.getMeanBinValue() ) \
+                                        * ( name.data[i].val - name.getMeanBinValue() )
+                sumofweights += binwidth
+            else:
+                print '+++ Error in Histogram.getCorrelation(): binning of histograms differs' % self.path
+        correlation /= sumofweights
+        try:
+            correlation /= self.getSigmaBinValue()*name.getSigmaBinValue()
+        except ZeroDivisionError:
+            correlation = 0
+        return correlation
+
+    def getRMSdistance(self,name):
+        distance = 0.
+        sumofweights = 0.
+        for i in range(len(self.data)):
+            if fuzzyeq(self.data[i].xmin, name.data[i].xmin) and \
+               fuzzyeq(self.data[i].xmax, name.data[i].xmax):
+                if self.is2dim:
+                    binwidth = abs( (self.data[i].xmax[0] - self.data[i].xmin[0])
+                                  * (self.data[i].xmax[1] - self.data[i].xmin[1]) )
+                else:
+                    binwidth = abs(self.data[i].xmax - self.data[i].xmin)
+                distance += binwidth * ( (self.data[i].val - self.getMeanBinValue())
+                                        -(name.data[i].val - name.getMeanBinValue()))**2
+                sumofweights += binwidth
+            else:
+                print '+++ Error in Histogram.getRMSdistance() for %s: binning of histograms differs' % self.path
+        distance = sqrt(distance/sumofweights)
+        return distance
+
+    def draw(self,coors):
+        seen_nan = False
+        out = ""
+        out += self.startclip()
+        out += self.startpsset()
+        if any(b.isValid for b in self.data):
+            out += "% START DATA\n"
+            if self.is2dim:
+                for b in self.data:
+                    out += ('\\psframe')
+                    color = int(129*coors.phys2frameZ(b.val))
+                    if b.val > coors.zmax():
+                        color = 129
+                    if b.val < coors.zmin():
+                        color = 0
+                    if b.val <= coors.zmin():
+                        out += ('[linewidth=0pt, linestyle=none, fillstyle=solid, fillcolor=white]')
+                    else:
+                        out += ('[linewidth=0pt, linestyle=none, fillstyle=solid, fillcolor={gradientcolors!!['+str(color)+']}]')
+                    out += ('(' + coors.strphys2frameX(b.low[0]) + ', ' \
+                                + coors.strphys2frameY(b.low[1]) + ')(' \
+                                + coors.strphys2frameX(b.high[0])  + ', ' \
+                                + coors.strphys2frameY(b.high[1])  + ')\n')
+            else:
+                if self.getErrorBands():
+                    self.description['SmoothLine'] = 0
+                    for b in self.data:
+                        out += ('\\psframe[dimen=inner,linewidth=0pt,linestyle=none,fillstyle=solid,fillcolor=%s,opacity=%s]' % (self.getErrorBandColor(),self.getErrorBandOpacity()))
+                        out += ('(' + coors.strphys2frameX(b.xmin) + ', ' \
+                                    + coors.strphys2frameY(b.val - b.err[0]) + ')(' \
+                                    + coors.strphys2frameX(b.xmax)  + ', ' \
+                                    + coors.strphys2frameY(b.val + b.err[1]) + ')\n')
+                if self.getErrorBars():
+                    for b in self.data:
+                        if isnan(b.val) or isnan(b.err[0]) or isnan(b.err[1]):
+                            seen_nan = True
+                            continue
+                        if b.val == 0. and b.err == [0.,0.]:
+                            continue
+                        out += ('\\psline')
+                        out += ('(' + coors.strphys2frameX(b.xmin) + ', ' \
+                                    + coors.strphys2frameY(b.val) + ')(' \
+                                    + coors.strphys2frameX(b.xmax)  + ', ' \
+                                    + coors.strphys2frameY(b.val) + ')\n')
+                        out += ('\\psline')
+                        bincenter = coors.strphys2frameX(.5*(b.xmin+b.xmax))
+                        out += ('(' + bincenter + ', ' \
+                                    + coors.strphys2frameY(b.val-b.err[0]) + ')(' \
+                                    + bincenter + ', ' \
+                                    + coors.strphys2frameY(b.val+b.err[1]) + ')\n')
+                if self.getSmoothLine():
+                    out += '\\psbezier'
+                else:
+                    out += '\\psline'
+                if self.getFillStyle() != 'none':   # make sure that filled areas go all the way down to the x-axis
+                    if coors.phys2frameX(self.data[0].xmin) > 1e-4:
+                        out += '(' + coors.strphys2frameX(self.data[0].xmin) + ', -0.1)\n'
+                    else:
+                        out += '(-0.1, -0.1)\n'
+                for i, b in enumerate(self.data):
+                    if isnan(b.val):
+                        seen_nan = True
+                        continue
+                    if self.getSmoothLine():
+                        out += ('(' + coors.strphys2frameX(0.5*(b.xmin+b.xmax)) + ', ' \
+                                    + coors.strphys2frameY(b.val) + ')\n')
+                    else:
+                        out += ('(' + coors.strphys2frameX(b.xmin) + ', ' \
+                                    + coors.strphys2frameY(b.val) + ')(' \
+                                    + coors.strphys2frameX(b.xmax)  + ', ' \
+                                    + coors.strphys2frameY(b.val) + ')\n')
+                        ## Join/separate data points, with vertical/diagonal lines
+                        if i+1 < len(self.data): #< If this is not the last point
+                            if self.description.get('ConnectBins', '1') != '1':
+                                out += ('\\psline')
+                            else:
+                                ## If bins are joined, but there is a gap in binning, choose whether to fill the gap
+                                if (abs(coors.phys2frameX(b.xmax) - coors.phys2frameX(self.data[i+1].xmin)) > 1e-4):
+                                    if self.description.get('ConnectGaps', '0') != '1':
+                                        out += ('\\psline')
+                                        # TODO: Perhaps use a new dashed line to fill the gap?
+                    if self.getFillStyle() != 'none':  # make sure that filled areas go all the way down to the x-axis
+                        if (coors.phys2frameX(self.data[-1].xmax) < 1-1e-4):
+                            out += '(' + coors.strphys2frameX(self.data[-1].xmax) + ', -0.1)\n'
+                        else:
+                            out += '(1.1, -0.1)\n'
+            #
+            if self.getPolyMarker() != '':
+                for b in self.data:
+                    if isnan(b.val):
+                        seen_nan = True
+                        continue
+                    if b.val == 0. and b.err == [0.,0.]:
+                        continue
+                    out += ('\\psdot[dotstyle=%s,dotsize=%s,dotscale=%s](' % (self.getPolyMarker(),self.getDotSize(),self.getDotScale()) \
+                                + coors.strphys2frameX(.5*(b.xmin+b.xmax)) + ', ' \
+                                + coors.strphys2frameY(b.val) + ')\n')
+
+            out += "% END DATA\n"
+        else:
+            print "WARNING: No valid bin value/errors/edges to plot!"
+            out += "% NO DATA!\n"
+
+        out += self.stoppsset()
+        out += self.stopclip()
+        if seen_nan:
+            print "WARNING: NaN-valued value or error bar!"
+        return out
+
+    # def is2dimensional(self):
+    #     return self.is2dim
+
+    def getXMin(self):
+        if not self.data:
+            return 0
+        elif self.is2dim:
+            return min(b.low[0] for b in self.data)
+        else:
+            return min(b.xmin for b in self.data)
+
+    def getXMax(self):
+        if not self.data:
+            return 1
+        elif self.is2dim:
+            return max(b.high[0] for b in self.data)
+        else:
+            return max(b.xmax for b in self.data)
+
+    def getYMin(self, xmin, xmax, logy):
+        if not self.data:
+            return 0
+        elif self.is2dim:
+            return min(b.low[1] for b in self.data)
+        else:
+            yvalues = []
+            for b in self.data:
+                if (b.xmax > xmin or b.xmin >= xmin) and (b.xmin < xmax or b.xmax <= xmax):
+                    foo = b.val
+                    if self.getErrorBars() or self.getErrorBands():
+                        foo -= b.err[0]
+                    if not isnan(foo) and (not logy or foo > 0):
+                        yvalues.append(foo)
+            return min(yvalues) if yvalues else self.data[0].val
+
+    def getYMax(self, xmin, xmax):
+        if not self.data:
+            return 1
+        elif self.is2dim:
+            return max(b.high[1] for b in self.data)
+        else:
+            yvalues = []
+            for b in self.data:
+                if (b.xmax > xmin or b.xmin >= xmin) and (b.xmin < xmax or b.xmax <= xmax):
+                    foo = b.val
+                    if self.getErrorBars() or self.getErrorBands():
+                        foo += b.err[1]
+                    if not isnan(foo): # and (not logy or foo > 0):
+                        yvalues.append(foo)
+            return max(yvalues) if yvalues else self.data[0].val
+
+    def getZMin(self, xmin, xmax, ymin, ymax):
+        if not self.is2dim:
+            return 0
+        zvalues = []
+        for b in self.data:
+            if (b.xmax[0] > xmin and b.xmin[0] < xmax) and (b.xmax[1] > ymin and b.xmin[1] < ymax):
+                zvalues.append(b.val)
+        return min(zvalues)
+
+    def getZMax(self, xmin, xmax, ymin, ymax):
+        if not self.is2dim:
+            return 0
+        zvalues = []
+        for b in self.data:
+            if (b.xmax[0] > xmin and b.xmin[0] < xmax) and (b.xmax[1] > ymin and b.xmin[1] < ymax):
+                zvalues.append(b.val)
+        return max(zvalues)
+
+
+
+class Histo1D(Histogram):
+
+    def read_input_data(self, f):
+        for line in f:
+            if is_end_marker(line, 'HISTO1D'):
+                break
+            elif is_comment(line):
+                continue
+            else:
+                line = line.rstrip()
+                m = pat_property.match(line)
+                if m:
+                    prop, value = m.group(1,2)
+                    self.description[prop] = value
+                else:
+                    linearray = line.split()
+                    ## Detect symm errs
+                    # TODO: Not sure what the 8-param version is for... auto-compatibility with YODA format?
+                    if len(linearray) in [4,8]:
+                        self.data.append(BinData(linearray[0], linearray[1], linearray[2], linearray[3]))
+                    ## Detect asymm errs
+                    elif len(linearray) == 5:
+                        self.data.append(BinData(linearray[0], linearray[1], linearray[2], [linearray[3],linearray[4]]))
+                    else:
+                        raise Exception('Histo1D does not have the expected number of columns. ' + line)
+
+    # TODO: specialise draw() here
+
+
+class Histo2D(Histogram):
+
+    def read_input_data(self, f):
+        self.is2dim = True #< Should really be done in a constructor, but this is easier for now...
+
+        for line in f:
+            if is_end_marker(line, 'HISTO2D'):
+                break
+            elif is_comment(line):
+                continue
+            else:
+                line = line.rstrip()
+                m = pat_property.match(line)
+                if m:
+                    prop, value = m.group(1,2)
+                    self.description[prop] = value
+                else:
+                    linearray = line.split()
+                    if len(linearray) in [6,7]:
+                        # If asymm z error, use the max or average of +- error
+                        err = float(linearray[5])
+                        if len(linearray) == 7:
+                            if self.description.get("ShowMaxZErr", 1):
+                                err = max(err, float(linearray[6]))
+                            else:
+                                err = 0.5 * (err + float(linearray[6]))
+                        self.data.append(BinData([linearray[0], linearray[2]], [linearray[1], linearray[3]], float(linearray[4]), err))
+                    else:
+                        raise Exception('Histo2D does not have the expected number of columns. '+line)
+
+    # TODO: specialise draw() here
+
+
+
+#############################
+
+
+
+class Frame(object):
+
+    def __init__(self):
+        self.framelinewidth = '0.3pt'
+
+    def draw(self,inputdata):
+        out = ('\n%\n% Frame\n%\n')
+        if inputdata.description.has_key('FrameColor') and inputdata.description['FrameColor']!=None:
+            color = inputdata.description['FrameColor']
+            # We want to draw this frame only once, so set it to False for next time:
+            inputdata.description['FrameColor']=None
+
+            # Calculate how high and wide the overall plot is
+            height = [0,0]
+            width  = inputdata.attr('PlotSizeX')
+            if inputdata.attr_bool('RatioPlot', False):
+                height[1] = -inputdata.description['RatioPlotSizeY']
+            if not inputdata.attr_bool('MainPlot', True):
+                height[0] = inputdata.description['PlotSizeY']
+            else:
+                height[0] = -height[1]
+                height[1] = 0
+
+            # Get the margin widths
+            left = inputdata.description['LeftMargin']+0.1
+            right = inputdata.description['RightMargin']+0.1
+            top = inputdata.description['TopMargin']+0.1
+            bottom = inputdata.description['BottomMargin']+0.1
+
+            #
+            out += ('\\rput(0,1){\\psline[linewidth=%scm,linecolor=%s](%scm,%scm)(%scm,%scm)}\n' %(top, color, -left, top/2, width+right, top/2))
+            out += ('\\rput(0,%scm){\\psline[linewidth=%scm,linecolor=%s](%scm,%scm)(%scm,%scm)}\n' %(height[1], bottom, color, -left, -bottom/2, width+right, -bottom/2))
+            out += ('\\rput(0,0){\\psline[linewidth=%scm,linecolor=%s](%scm,%scm)(%scm,%scm)}\n' %(left, color, -left/2, height[1]-0.05, -left/2, height[0]+0.05))
+            out += ('\\rput(1,0){\\psline[linewidth=%scm,linecolor=%s](%scm,%scm)(%scm,%scm)}\n' %(right, color, right/2, height[1]-0.05, right/2, height[0]+0.05))
+
+
+        out += ('\\psframe[linewidth='+self.framelinewidth+',dimen=middle](0,0)(1,1)\n')
+        return out
+
+
+
+class Ticks(object):
+
+    def __init__(self, description, coors):
+        self.majorticklinewidth = '0.3pt'
+        self.minorticklinewidth = '0.3pt'
+        self.majorticklength    = '9pt'
+        self.minorticklength    = '4pt'
+        self.description = description
+        self.coors = coors
+
+    def draw_ticks(self, vmin, vmax, plotlog=False, custommajorticks=None, customminorticks=None, custommajortickmarks=-1, customminortickmarks=-1, drawlabels=True, twosided=False):
+        out = ""
+        if plotlog:
+            if vmin <= 0 or vmax <= 0:
+                raise Exception("Cannot place log axis min or max tick <= 0")
+            if custommajorticks is None:
+                x = int(log10(vmin))
+                n_labels = 0
+                while x < log10(vmax) + 1:
+                    if 10**x >= vmin:
+                        ticklabel = 10**x
+                        if ticklabel > vmin and ticklabel < vmax:
+                            out += self.draw_majortick(ticklabel,twosided)
+                            if drawlabels:
+                                out += self.draw_majorticklabel(ticklabel)
+                                n_labels += 1
+                        if ticklabel == vmin or ticklabel == vmax:
+                            if drawlabels:
+                                out += self.draw_majorticklabel(ticklabel)
+                                n_labels+=1
+                        for i in range(2,10):
+                            ticklabel = i*10**(x-1)
+                            if ticklabel > vmin and ticklabel < vmax:
+                                out += self.draw_minortick(ticklabel,twosided)
+                                if drawlabels and n_labels == 0:
+                                    if (i+1)*10**(x-1) < vmax: # some special care for the last minor tick
+                                        out += self.draw_minorticklabel(ticklabel)
+                                    else:
+                                        out += self.draw_minorticklabel(ticklabel, last=True)
+                    x += 1
+            else:
+                print "Warning: custom major ticks not currently supported on log axes -- please contact the developers to request!"
+        elif custommajorticks is not None or customminorticks is not None:
+            if custommajorticks:
+                for i in range(len(custommajorticks)):
+                    value = custommajorticks[i]['Value']
+                    label = custommajorticks[i]['Label']
+                    if value >= vmin and value <= vmax:
+                        out += self.draw_majortick(value,twosided)
+                    if drawlabels:
+                        out += self.draw_majorticklabel(value, label=label)
+            if customminorticks:
+                for i in range(len(customminorticks)):
+                    value = customminorticks[i]['Value']
+                    if value >= vmin and value <= vmax:
+                        out += self.draw_minortick(value,twosided)
+        else:
+            vrange = vmax - vmin
+            if isnan(vrange):
+                vrange, vmin, vmax = 1, 1, 2
+            digits = int(log10(vrange))+1
+            if vrange <= 1:
+                digits -= 1
+            foo = int(vrange/(10**(digits-1)))
+            if foo/9. > 0.5:
+                tickmarks = 10
+            elif foo/9. > 0.2:
+                tickmarks = 5
+            elif foo/9. > 0.1:
+                tickmarks = 2
+
+            if custommajortickmarks > -1:
+                if custommajortickmarks not in [1, 2, 5, 10, 20]:
+                    print '+++ Error in Ticks.draw_ticks(): MajorTickMarks must be in [1, 2, 5, 10, 20]'
+                else:
+                    tickmarks = custommajortickmarks
+
+            if tickmarks == 2 or tickmarks == 20:
+                minortickmarks = 3
+            else:
+                minortickmarks = 4
+            if customminortickmarks > -1:
+                minortickmarks = customminortickmarks
+            #
+            x = 0
+            while x > vmin*10**digits:
+                x -= tickmarks*100**(digits-1)
+            while x <= vmax*10**digits:
+                if x >= vmin*10**digits - tickmarks*100**(digits-1):
+                    ticklabel = 1.*x/10**digits
+                    if int(ticklabel) == ticklabel:
+                        ticklabel = int(ticklabel)
+                    if float(ticklabel-vmin)/vrange >= -1e-5:
+                        if abs(ticklabel-vmin)/vrange > 1e-5 and abs(ticklabel-vmax)/vrange > 1e-5:
+                            out += self.draw_majortick(ticklabel,twosided)
+                        if drawlabels:
+                            out += self.draw_majorticklabel(ticklabel)
+
+                    xminor = x
+                    for i in range(minortickmarks):
+                        xminor += 1.*tickmarks*100**(digits-1)/(minortickmarks+1)
+                        ticklabel = 1.*xminor/10**digits
+                        if ticklabel > vmin and ticklabel < vmax:
+                            if abs(ticklabel-vmin)/vrange > 1e-5 and abs(ticklabel-vmax)/vrange > 1e-5:
+                                out += self.draw_minortick(ticklabel,twosided)
+                x += tickmarks*100**(digits-1)
+        return out
+
+    def draw(self):
+        pass
+
+    def draw_minortick(self, ticklabel, twosided):
+        pass
+
+    def draw_majortick(self, ticklabel, twosided):
+        pass
+
+    def draw_majorticklabel(self, ticklabel):
+        pass
+
+    def draw_minorticklabel(self, value, label='', last=False):
+        return ''
+
+    def get_ticklabel(self, value, plotlog=False, minor=False, lastminor=False):
+        label=''
+        prefix = ''
+        if plotlog:
+            bar = int(log10(value))
+	    if bar < 0:
+	        sign='-'
+	    else:
+	        sign='\\,'
+            if minor: # The power of ten is only to be added to the last minor tick label
+                if lastminor:
+                    label = str(int(value/(10**bar))) + "\\cdot" + '10$^{'+sign+'\\text{'+str(abs(bar))+'}}$'
+                else:
+                    label = str(int(value/(10**bar))) # The naked prefactor
+            else:
+                if bar==0:
+                    label = '1'
+                else:
+                    label = '10$^{'+sign+'\\text{'+str(abs(bar))+'}}$'
+        else:
+            if fabs(value) < 1e-10:
+                value = 0
+            label = str(value)
+            if "e" in label:
+                a, b = label.split("e")
+                astr = "%2.1f" % float(a)
+                bstr = str(int(b))
+                label = "\\smaller{%s $\\!\\cdot 10^{%s} $}" % (astr, bstr)
+        return label
+
+
+
+class XTicks(Ticks):
+
+    def draw(self, custommajorticks=None, customminorticks=None, custommajortickmarks=-1, customminortickmarks=-1,drawlabels=True):
+        twosided = bool(int(self.description.get('XTwosidedTicks', '0')))
+        out = ""
+        out += ('\n%\n% X-Ticks\n%\n')
+        out += ('\\def\\majortickmarkx{\\psline[linewidth='+self.majorticklinewidth+'](0,0)(0,'+self.majorticklength+')}%\n')
+        out += ('\\def\\minortickmarkx{\\psline[linewidth='+self.minorticklinewidth+'](0,0)(0,'+self.minorticklength+')}%\n')
+        uselog = self.description['LogX'] and (self.coors.xmin() > 0 and self.coors.xmax() > 0)
+        out += self.draw_ticks(self.coors.xmin(), self.coors.xmax(),\
+                                   plotlog=uselog,\
+                                   custommajorticks=custommajorticks,\
+                                   customminorticks=customminorticks,\
+                                   custommajortickmarks=custommajortickmarks,\
+                                   customminortickmarks=customminortickmarks,\
+                                   drawlabels=drawlabels,\
+                                   twosided=twosided)
+        return out
+
+    def draw_minortick(self, ticklabel, twosided):
+        out = ''
+        out += '\\rput('+self.coors.strphys2frameX(ticklabel)+', 0){\\minortickmarkx}\n'
+        if twosided:
+            out += '\\rput{180}('+self.coors.strphys2frameX(ticklabel)+', 1){\\minortickmarkx}\n'
+        return out
+
+    def draw_minorticklabel(self, value, label='', last=False):
+        if not label:
+            label=self.get_ticklabel(value, int(self.description['LogX']), minor=True, lastminor=last)
+        if last: # Some more indentation for the last minor label
+            return ('\\rput('+self.coors.strphys2frameX(value)+', 0){\\rput[B](1.9\\labelsep,-2.3\\labelsep){\\strut{}'+label+'}}\n')
+        else:
+            return ('\\rput('+self.coors.strphys2frameX(value)+', 0){\\rput[B](0,-2.3\\labelsep){\\strut{}'+label+'}}\n')
+
+    def draw_majortick(self, ticklabel, twosided):
+        out = ''
+        out += '\\rput('+self.coors.strphys2frameX(ticklabel)+', 0){\\majortickmarkx}\n'
+        if twosided:
+            out += '\\rput{180}('+self.coors.strphys2frameX(ticklabel)+', 1){\\majortickmarkx}\n'
+        return out
+
+    def draw_majorticklabel(self, value, label=''):
+        if not label:
+            label = self.get_ticklabel(value, int(self.description['LogX']) and self.coors.xmin() > 0 and self.coors.xmax() > 0)
+        labelparts = label.split("\\n")
+        labelcode = label if len(labelparts) == 1 else ("\\shortstack{" + "\\\\ ".join(labelparts) +  "}")
+        rtn = "\\rput(" + self.coors.strphys2frameX(value) + ", 0){\\rput[t](0,-\\labelsep){" + labelcode + "}}\n"
+        return rtn
+
+
+
+class YTicks(Ticks):
+
+    def draw(self, custommajorticks=None, customminorticks=None, custommajortickmarks=-1, customminortickmarks=-1, drawlabels=True):
+        twosided = bool(int(self.description.get('YTwosidedTicks', '0')))
+        out = ""
+        out += ('\n%\n% Y-Ticks\n%\n')
+        out += ('\\def\\majortickmarky{\\psline[linewidth=%s](0,0)(%s,0)}%%\n' % (self.majorticklinewidth, self.majorticklength))
+        out += ('\\def\\minortickmarky{\\psline[linewidth=%s](0,0)(%s,0)}%%\n' % (self.minorticklinewidth, self.minorticklength))
+        uselog = self.description['LogY'] and self.coors.ymin() > 0 and self.coors.ymax() > 0
+        out += self.draw_ticks(self.coors.ymin(), self.coors.ymax(),
+                               plotlog=uselog,
+                               custommajorticks=custommajorticks,
+                               customminorticks=customminorticks,
+                               custommajortickmarks=custommajortickmarks,
+                               customminortickmarks=customminortickmarks,
+                               twosided=twosided,
+                               drawlabels=drawlabels)
+        return out
+
+    def draw_minortick(self, ticklabel, twosided):
+        out = ''
+        out += '\\rput(0, '+self.coors.strphys2frameY(ticklabel)+'){\\minortickmarky}\n'
+        if twosided:
+            out += '\\rput{180}(1, '+self.coors.strphys2frameY(ticklabel)+'){\\minortickmarky}\n'
+        return out
+
+    def draw_majortick(self, ticklabel, twosided):
+        out = ''
+        out += '\\rput(0, '+self.coors.strphys2frameY(ticklabel)+'){\\majortickmarky}\n'
+        if twosided:
+            out += '\\rput{180}(1, '+self.coors.strphys2frameY(ticklabel)+'){\\majortickmarky}\n'
+        return out
+
+    def draw_majorticklabel(self, value, label=''):
+        if not label:
+            label = self.get_ticklabel(value, int(self.description['LogY']) and self.coors.ymin() > 0 and self.coors.ymax() > 0)
+        if self.description.has_key('RatioPlotMode') and self.description['RatioPlotMode'] == 'deviation' and \
+           self.description.has_key('RatioPlotStage') and self.description['RatioPlotStage']:
+            rtn = '\\uput[180]{0}(0, '+self.coors.strphys2frameY(value)+'){\\strut{}'+label+'\\,$\\sigma$}\n'
+        else:
+            labelparts = label.split("\\n")
+            labelcode = label if len(labelparts) == 1 else ("\\shortstack{" + "\\\\ ".join(labelparts) +  "}")
+            rtn = "\\rput(0, " + self.coors.strphys2frameY(value) + "){\\rput[r](-\\labelsep,0){" + labelcode + "}}\n"
+        return rtn
+
+
+
+class ZTicks(Ticks):
+
+    def __init__(self, description, coors):
+        self.majorticklinewidth = '0.3pt'
+        self.minorticklinewidth = '0.3pt'
+        self.majorticklength    = '6pt'
+        self.minorticklength    = '2.6pt'
+        self.description = description
+        self.coors = coors
+
+    def draw(self, custommajorticks=None, customminorticks=None, custommajortickmarks=-1, customminortickmarks=-1, drawlabels=True):
+        out = ""
+        out += ('\n%\n% Z-Ticks\n%\n')
+        out += ('\\def\\majortickmarkz{\\psline[linewidth='+self.majorticklinewidth+'](0,0)('+self.majorticklength+',0)}%\n')
+        out += ('\\def\\minortickmarkz{\\psline[linewidth='+self.minorticklinewidth+'](0,0)('+self.minorticklength+',0)}%\n')
+        out += self.draw_ticks(self.coors.zmin(), self.coors.zmax(),\
+                                   plotlog=self.description['LogZ'],\
+                                   custommajorticks=custommajorticks,\
+                                   customminorticks=customminorticks,\
+                                   custommajortickmarks=custommajortickmarks,\
+                                   customminortickmarks=customminortickmarks,\
+                                   twosided=False,\
+                                   drawlabels=drawlabels)
+        return out
+
+    def draw_minortick(self, ticklabel, twosided):
+        return '\\rput{180}(1, '+self.coors.strphys2frameZ(ticklabel)+'){\\minortickmarkz}\n'
+
+    def draw_majortick(self, ticklabel, twosided):
+        return '\\rput{180}(1, '+self.coors.strphys2frameZ(ticklabel)+'){\\majortickmarkz}\n'
+
+    def draw_majorticklabel(self, value, label=''):
+        if label=='':
+            label = self.get_ticklabel(value, int(self.description['LogZ']))
+        if self.description.has_key('RatioPlotMode') and self.description['RatioPlotMode']=='deviation' \
+                and self.description.has_key('RatioPlotStage') and self.description['RatioPlotStage']:
+            return ('\\uput[0]{0}(1, '+self.coors.strphys2frameZ(value)+'){\\strut{}'+label+'\\,$\\sigma$}\n')
+        else:
+            return ('\\uput[0]{0}(1, '+self.coors.strphys2frameZ(value)+'){\\strut{}'+label+'}\n')
+
+
+
+class Coordinates(object):
+
+    def __init__(self, inputdata):
+        self.description = inputdata.description
+
+    def phys2frameX(self, x):
+        if self.description['LogX']:
+            if x>0:
+                result = 1.*(log10(x)-log10(self.xmin()))/(log10(self.xmax())-log10(self.xmin()))
+            else:
+                return -10
+        else:
+            result = 1.*(x-self.xmin())/(self.xmax()-self.xmin())
+        if (fabs(result) < 1e-4):
+            return 0
+        else:
+            return min(max(result,-10),10)
+
+    def phys2frameY(self, y):
+        if self.description['LogY']:
+            if y > 0 and self.ymin() > 0 and self.ymax() > 0:
+                result = 1.*(log10(y)-log10(self.ymin()))/(log10(self.ymax())-log10(self.ymin()))
+            else:
+                return -10
+        else:
+            result = 1.*(y-self.ymin())/(self.ymax()-self.ymin())
+        if (fabs(result) < 1e-4):
+            return 0
+        else:
+            return min(max(result,-10),10)
+
+    def phys2frameZ(self, z):
+        if self.description['LogZ']:
+            if z>0:
+                result = 1.*(log10(z)-log10(self.zmin()))/(log10(self.zmax())-log10(self.zmin()))
+            else:
+                return -10
+        else:
+            result = 1.*(z-self.zmin())/(self.zmax()-self.zmin())
+        if (fabs(result) < 1e-4):
+            return 0
+        else:
+            return min(max(result,-10),10)
+
+    # TODO: Add frame2phys functions (to allow linear function sampling in the frame space rather than the physical space)
+
+    def strphys2frameX(self, x):
+        return str(self.phys2frameX(x))
+
+    def strphys2frameY(self, y):
+        return str(self.phys2frameY(y))
+
+    def strphys2frameZ(self, z):
+        return str(self.phys2frameZ(z))
+
+    def xmin(self):
+        return self.description['Borders'][0]
+
+    def xmax(self):
+        return self.description['Borders'][1]
+
+    def ymin(self):
+        return self.description['Borders'][2]
+
+    def ymax(self):
+        return self.description['Borders'][3]
+
+    def zmin(self):
+        return self.description['Borders'][4]
+
+    def zmax(self):
+        return self.description['Borders'][5]
+
+
+####################
+
+import shutil, subprocess
+
+def try_cmd(args):
+    "Run the given command + args and return True/False if it succeeds or not"
+    try:
+        subprocess.check_output(args, stderr=subprocess.STDOUT)
+        return True
+    except:
+        return False
+
+def have_cmd(cmd):
+    return try_cmd(["which", cmd])
+
+
+
+####################
+
+
+
+if __name__ == '__main__':
+
+    ## Try to rename the process on Linux
+    try:
+        import ctypes
+        libc = ctypes.cdll.LoadLibrary('libc.so.6')
+        libc.prctl(15, 'make-plots', 0, 0, 0)
+    except Exception:
+        pass
+
+    ## Try to use Psyco optimiser
+    try:
+        import psyco
+        psyco.full()
+    except ImportError:
+        pass
+
+    ## Find number of (virtual) processing units
+    import multiprocessing
+    try:
+        numcores = multiprocessing.cpu_count()
+    except:
+        numcores = 1
+
+    ## Parse command line options
+    from optparse import OptionParser, OptionGroup
+    parser = OptionParser(usage=__doc__)
+    parser.add_option("-n", "-j", "--num-threads", dest="NUM_THREADS", type="int",
+                      default=numcores, help="max number of threads to be used [%s]" % numcores)
+    parser.add_option("--font", dest="OUTPUT_FONT", choices="palatino,cm,times,helvetica,minion".split(","),
+                      default="palatino", help="choose the font to be used in the plots")
+    parser.add_option("--palatino", dest="OUTPUT_FONT", action="store_const", const="palatino", default="palatino",
+                      help="use Palatino as font (default). DEPRECATED: Use --font")
+    parser.add_option("--cm", dest="OUTPUT_FONT", action="store_const", const="cm", default="palatino",
+                      help="use Computer Modern as font. DEPRECATED: Use --font")
+    parser.add_option("--times", dest="OUTPUT_FONT", action="store_const", const="times", default="palatino",
+                      help="use Times as font. DEPRECATED: Use --font")
+    parser.add_option("--minion", dest="OUTPUT_FONT", action="store_const", const="minion", default="palatino",
+                      help="use Adobe Minion Pro as font. Note: You need to set TEXMFHOME first. DEPRECATED: Use --font")
+    parser.add_option("--helvetica", dest="OUTPUT_FONT", action="store_const", const="helvetica", default="palatino",
+                      help="use Helvetica as font. DEPRECATED: Use --font")
+    parser.add_option("--format", dest="OUTPUT_FORMAT", default="PDF",
+                      help="choose plot format, perhaps multiple comma-separated formats e.g. 'pdf' or 'tex,pdf,png' (default = PDF).")
+    parser.add_option("--ps", dest="OUTPUT_FORMAT", action="store_const", const="PS", default="PDF",
+                      help="create PostScript output (default). DEPRECATED")
+    parser.add_option("--pdf", dest="OUTPUT_FORMAT", action="store_const", const="PDF", default="PDF",
+                      help="create PDF output. DEPRECATED")
+    parser.add_option("--eps", dest="OUTPUT_FORMAT", action="store_const", const="EPS", default="PDF",
+                      help="create Encapsulated PostScript output. DEPRECATED")
+    parser.add_option("--png", dest="OUTPUT_FORMAT", action="store_const", const="PNG", default="PDF",
+                     help="create PNG output. DEPRECATED")
+    parser.add_option("--pspng", dest="OUTPUT_FORMAT", action="store_const", const="PS,PNG", default="PDF",
+                     help="create PS and PNG output. DEPRECATED")
+    parser.add_option("--pdfpng", dest="OUTPUT_FORMAT", action="store_const", const="PDF,PNG", default="PDF",
+                     help="create PDF and PNG output. DEPRECATED")
+    parser.add_option("--epspng", dest="OUTPUT_FORMAT", action="store_const", const="EPS,PNG", default="PDF",
+                     help="create EPS and PNG output. DEPRECATED")
+    parser.add_option("--tex", dest="OUTPUT_FORMAT", action="store_const", const="TEX", default="PDF",
+                      help="create TeX/LaTeX output.")
+    parser.add_option("--no-cleanup", dest="NO_CLEANUP", action="store_true", default=False,
+                      help="keep temporary directory and print its filename.")
+    parser.add_option("--no-subproc", dest="NO_SUBPROC", action="store_true", default=False,
+                      help="don't use subprocesses to render the plots in parallel -- useful for debugging.")
+    parser.add_option("--full-range", dest="FULL_RANGE", action="store_true", default=False,
+                      help="plot full y range in LogY plots.")
+    parser.add_option("-c", "--config", dest="CONFIGFILES", action="append", default=None,
+                      help="plot config file to be used. Overrides internal config blocks.")
+    verbgroup = OptionGroup(parser, "Verbosity control")
+    verbgroup.add_option("-v", "--verbose", action="store_const", const=logging.DEBUG, dest="LOGLEVEL",
+                         default=logging.INFO, help="print debug (very verbose) messages")
+    verbgroup.add_option("-q", "--quiet", action="store_const", const=logging.WARNING, dest="LOGLEVEL",
+                         default=logging.INFO, help="be very quiet")
+    parser.add_option_group(verbgroup)
+
+    opts, args = parser.parse_args()
+    logging.basicConfig(level=opts.LOGLEVEL, format="%(message)s")
+    opts.OUTPUT_FONT = opts.OUTPUT_FONT.upper()
+    opts.OUTPUT_FORMAT = opts.OUTPUT_FORMAT.upper().split(",")
+
+    ## Check for no args
+    if len(args) == 0:
+        logging.error(parser.get_usage())
+        sys.exit(2)
+
+    ## Check that the files exist
+    for f in args:
+        if not os.access(f, os.R_OK):
+            print "Error: cannot read from %s" % f
+            sys.exit(1)
+
+    ## Test for external programs (kpsewhich, latex, dvips, ps2pdf/ps2eps, and convert)
+    opts.LATEXPKGS = []
+    if opts.OUTPUT_FORMAT != ["TEX"]:
+        try:
+            ## latex
+            if not have_cmd("pdflatex"):
+                logging.error("ERROR: required program 'latex' could not be found. Exiting...")
+                sys.exit(1)
+            # ## dvips
+            # if not have_cmd("dvips"):
+            #     logging.error("ERROR: required program 'dvips' could not be found. Exiting...")
+            #     sys.exit(1)
+
+            # ## ps2pdf / ps2eps
+            # if "PDF" in opts.OUTPUT_FORMAT:
+            #     if not have_cmd("ps2pdf"):
+            #         logging.error("ERROR: required program 'ps2pdf' (for PDF output) could not be found. Exiting...")
+            #         sys.exit(1)
+            # elif "EPS" in opts.OUTPUT_FORMAT:
+            #     if not have_cmd("ps2eps"):
+            #         logging.error("ERROR: required program 'ps2eps' (for EPS output) could not be found. Exiting...")
+            #         sys.exit(1)
+
+            ## PNG output converter
+            if "PNG" in opts.OUTPUT_FORMAT:
+                if not have_cmd("convert"):
+                    logging.error("ERROR: required program 'convert' (for PNG output) could not be found. Exiting...")
+                    sys.exit(1)
+
+            ## kpsewhich: required for LaTeX package testing
+            if not have_cmd("kpsewhich"):
+                logging.warning("WARNING: required program 'kpsewhich' (for LaTeX package checks) could not be found")
+            else:
+                ## Check minion font
+                if opts.OUTPUT_FONT == "MINION":
+                    p = subprocess.Popen(["kpsewhich", "minion.sty"], stdout=subprocess.PIPE)
+                    p.wait()
+                    if p.returncode != 0:
+                        logging.warning('Warning: Using "--minion" requires minion.sty to be installed. Ignoring it.')
+                        opts.OUTPUT_FONT = "PALATINO"
+
+                ## Check for HEP LaTeX packages
+                # TODO: remove HEP-specifics/non-standards?
+                for pkg in ["hepnames", "hepunits", "underscore"]:
+                    p = subprocess.Popen(["kpsewhich", "%s.sty" % pkg], stdout=subprocess.PIPE)
+                    p.wait()
+                    if p.returncode == 0:
+                        opts.LATEXPKGS.append(pkg)
+
+                ## Check for Palatino old style figures and small caps
+                if opts.OUTPUT_FONT == "PALATINO":
+                    p = subprocess.Popen(["kpsewhich", "ot1pplx.fd"], stdout=subprocess.PIPE)
+                    p.wait()
+                    if p.returncode == 0:
+                        opts.OUTPUT_FONT = "PALATINO_OSF"
+        except Exception, e:
+            logging.warning("Problem while testing for external packages. I'm going to try and continue without testing, but don't hold your breath...")
+
+    # def init_worker():
+    #     import signal
+    #     signal.signal(signal.SIGINT, signal.SIG_IGN)
+
+    ## Run rendering jobs
+    datfiles = args
+    plotword = "plots" if len(datfiles) > 1 else "plot"
+    logging.info("Making %d %s" % (len(datfiles), plotword))
+
+    ## Create a temporary directory
+    tempdir = tempfile.mkdtemp('.make-plots')
+    if opts.NO_CLEANUP:
+        logging.info('Keeping temp-files in %s' % tempdir)
+
+    ## Create TeX file
+    texpath = os.path.join(tempdir, 'plots.tex')
+    texfile = open(texpath, 'w')
+    # if inputdata.description.has_key('LeftMargin') and inputdata.description['LeftMargin']!='':
+    #     inputdata.description['LeftMargin'] = float(inputdata.description['LeftMargin'])
+    # else:
+    #     inputdata.description['LeftMargin'] = 1.4
+    # if inputdata.description.has_key('RightMargin') and inputdata.description['RightMargin']!='':
+    #     inputdata.description['RightMargin'] = float(inputdata.description['RightMargin'])
+    # else:
+    #     inputdata.description['RightMargin'] = 0.35
+    # if inputdata.description.has_key('TopMargin') and inputdata.description['TopMargin']!='':
+    #     inputdata.description['TopMargin'] = float(inputdata.description['TopMargin'])
+    # else:
+    #     inputdata.description['TopMargin'] = 0.65
+    # if inputdata.description.has_key('BottomMargin') and inputdata.description['BottomMargin']!='':
+    #     inputdata.description['BottomMargin'] = float(inputdata.description['BottomMargin'])
+    # else:
+    #     inputdata.description['BottomMargin'] = 0.95
+    # if inputdata.description['is2dim']:
+    #     inputdata.description['RightMargin'] += 1.7
+    # papersizex = inputdata.description['PlotSizeX'] + 0.1 + \
+    #              inputdata.description['LeftMargin'] + inputdata.description['RightMargin']
+    # papersizey = inputdata.description['PlotSizeY'] + inputdata.description['RatioPlotSizeY'] + 0.1 + \
+    #              inputdata.description['TopMargin'] + inputdata.description['BottomMargin']
+    #
+    out = ""
+    # out += '\\documentclass{article}\n'
+    # out += '\\documentclass[pstricks,multi]{standalone}\n'
+    out += '\\documentclass[multi=multipage]{standalone}\n'
+    if opts.OUTPUT_FONT == "MINION":
+        out += ('\\usepackage{minion}\n')
+    elif opts.OUTPUT_FONT == "PALATINO_OSF":
+        out += ('\\usepackage[osf,sc]{mathpazo}\n')
+    elif opts.OUTPUT_FONT == "PALATINO":
+        out += ('\\usepackage{mathpazo}\n')
+    elif opts.OUTPUT_FONT == "TIMES":
+        out += ('\\usepackage{mathptmx}\n')
+    elif opts.OUTPUT_FONT == "HELVETICA":
+        out += ('\\renewcommand{\\familydefault}{\\sfdefault}\n')
+        out += ('\\usepackage{sfmath}\n')
+        out += ('\\usepackage{helvet}\n')
+        out += ('\\usepackage[symbolgreek]{mathastext}\n')
+    for pkg in opts.LATEXPKGS:
+        out += ('\\usepackage{%s}\n' % pkg)
+    out += ('\\usepackage{pst-all}\n')
+    out += ('\\usepackage{xcolor}\n')
+    out += ('\\selectcolormodel{rgb}\n')
+    out += ('\\definecolor{red}{HTML}{EE3311}\n') # (Google uses 'DC3912')
+    out += ('\\definecolor{blue}{HTML}{3366FF}')
+    out += ('\\definecolor{green}{HTML}{109618}')
+    out += ('\\definecolor{orange}{HTML}{FF9900}')
+    out += ('\\definecolor{lilac}{HTML}{990099}')
+    out += ('\\usepackage{amsmath}\n')
+    out += ('\\usepackage{amssymb}\n')
+    out += ('\\usepackage{relsize}\n')
+    # out += ('\\usepackage[dvips,\n')
+    # out += ('  left=%4.3fcm, right=0cm,\n' % (inputdata.description['LeftMargin']-0.45,))
+    # out += ('  top=%4.3fcm,  bottom=0cm,\n' % (inputdata.description['TopMargin']-0.30,))
+    # out += ('  paperwidth=%scm,paperheight=%scm\n' % (papersizex,papersizey))
+    # out += (']{geometry}\n')
+    # out += ('\\usepackage[pdfcrop={--margins 10}]{auto-pst-pdf}\n')
+    out += ('\\usepackage{auto-pst-pdf}\n')
+    out += '\n'
+    out += ('\\begin{document}\n')
+    #out += ('\\pagestyle{empty}\n')
+    out += ('\\SpecialCoor\n')
+    texfile.write(out)
+
+    ## Process each datfile into the TeX doc
+    for i, datfile in enumerate(datfiles):
+        if not os.access(datfile, os.R_OK):
+            raise Exception("Could not read data file '%s'" % datfile)
+
+        ## Get std paths and copy datfile into tempdir
+        dirname = os.path.dirname(datfile)
+        datfile = os.path.basename(datfile)
+        filename = datfile.replace('.dat','')
+        cwd = os.getcwd()
+        datpath = os.path.join(cwd, dirname, datfile)
+        tempdatpath = os.path.join(tempdir, datfile)
+        shutil.copy(datpath, tempdir)
+
+        ## Append TeX to file
+        inputdata = InputData(os.path.join(dirname,filename))
+        p = Plot(inputdata)
+        texfile.write("\n\n")
+        texfile.write(p.write_header(inputdata))
+        if inputdata.attr_bool("MainPlot", True):
+            mp = MainPlot(inputdata)
+            texfile.write(mp.draw(inputdata))
+        if not inputdata.attr_bool("is2dim", False) and inputdata.attr_bool("RatioPlot", True) and inputdata.attr("RatioPlotReference"): # is not None:
+            rp = RatioPlot(inputdata)
+            texfile.write(rp.draw(inputdata))
+        texfile.write(p.write_footer())
+
+    texfile.write('\\end{document}\n')
+    texfile.close()
+
+    filename = "plots" #< TODO: unhack
+
+    if opts.OUTPUT_FORMAT != ["TEX"]:
+
+        ## Check for the required programs
+        latexavailable = have_cmd("latex")
+        dvipsavailable = have_cmd("dvips")
+        convertavailable = have_cmd("convert")
+        ps2pnmavailable = have_cmd("ps2pnm")
+        pnm2pngavailable = have_cmd("pnm2png")
+
+        # TODO: It'd be nice to be able to control the size of the PNG between thumb and full-size...
+        #   currently defaults (and is used below) to a size suitable for thumbnails
+        def mkpng(infile, outfile, density=100):
+            if convertavailable:
+                pngcmd = ["convert", "-flatten", "-density", str(density), infile, "-quality", "100", "-sharpen", "0x1.0", outfile]
+                logging.debug(" ".join(pngcmd))
+                pngproc = subprocess.Popen(pngcmd, stdout=subprocess.PIPE, cwd=tempdir)
+                pngproc.wait()
+            else:
+                raise Exception("Required PNG maker program (convert) not found")
+            # elif ps2pnmavailable and pnm2pngavailable:
+            #     pstopnm = "pstopnm -stdout -xsize=461 -ysize=422 -xborder=0.01 -yborder=0.01 -portrait " + infile
+            #     p1 = subprocess.Popen(pstopnm.split(), stdout=subprocess.PIPE, stderr=open("/dev/null", "w"), cwd=tempdir)
+            #     p2 = subprocess.Popen(["pnmtopng"], stdin=p1.stdout, stdout=open("%s/%s.png" % (tempdir, outfile), "w"), stderr=open("/dev/null", "w"), cwd=tempdir)
+            #     p2.wait()
+            # else:
+            #     raise Exception("Required PNG maker programs (convert, or ps2pnm and pnm2png) not found")
+
+        ## Run LaTeX (in no-stop mode)
+        logging.debug(os.listdir(tempdir))
+        texcmd = ["pdflatex", "-shell-escape", "\scrollmode\input", texpath]
+        logging.debug("TeX command: " + " ".join(texcmd))
+        texproc = subprocess.Popen(texcmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=tempdir)
+        logging.debug(texproc.communicate()[0])
+        logging.debug(os.listdir(tempdir))
+
+        # ## Run dvips
+        # dvcmd = ["dvips", filename]
+        # if not logging.getLogger().isEnabledFor(logging.DEBUG):
+        #     dvcmd.append("-q")
+        # ## Handle Minion Font
+        # if opts.OUTPUT_FONT == "MINION":
+        #     dvcmd.append('-Pminion')
+
+        ## Choose format
+        # TODO: Rationalise... this is a mess! Maybe we can use tex2pix?
+        # if "PS" in opts.OUTPUT_FORMAT:
+        #     dvcmd += ["-o", "%s.ps" % filename]
+        #     logging.debug(" ".join(dvcmd))
+        #     dvproc = subprocess.Popen(dvcmd, stdout=subprocess.PIPE, cwd=tempdir)
+        #     dvproc.wait()
+        # if "PDF" in opts.OUTPUT_FORMAT:
+        #     dvcmd.append("-f")
+        #     logging.debug(" ".join(dvcmd))
+        #     dvproc = subprocess.Popen(dvcmd, stdout=subprocess.PIPE, cwd=tempdir)
+        #     cnvproc = subprocess.Popen(["ps2pdf", "-"], stdin=dvproc.stdout, stdout=subprocess.PIPE, cwd=tempdir)
+        #     f = open(os.path.join(tempdir, "%s.pdf" % filename), "w")
+        #     f.write(cnvproc.communicate()[0])
+        #     f.close()
+        # if "EPS" in opts.OUTPUT_FORMAT:
+        #     dvcmd.append("-f")
+        #     logging.debug(" ".join(dvcmd))
+        #     dvproc = subprocess.Popen(dvcmd, stdout=subprocess.PIPE, cwd=tempdir)
+        #     cnvproc = subprocess.Popen(["ps2eps"], stdin=dvproc.stdout, stderr=subprocess.PIPE, stdout=subprocess.PIPE, cwd=tempdir)
+        #     f = open(os.path.join(tempdir, "%s.eps" % filename), "w")
+        #     f.write(cnvproc.communicate()[0])
+        #     f.close()
+        # if "PNG" in opts.OUTPUT_FORMAT:
+        #     dvcmd.append("-f")
+        #     logging.debug(" ".join(dvcmd))
+        #     dvproc = subprocess.Popen(dvcmd, stdout=subprocess.PIPE, cwd=tempdir)
+        #     pngcmd = ["convert", "-flatten", "-density", "100", "-", "-quality", "100", "-sharpen", "0x1.0", "%s.png" % filename]
+        #     logging.debug(" ".join(pngcmd))
+        #     pngproc = subprocess.Popen(pngcmd, stdin=dvproc.stdout, stdout=subprocess.PIPE, cwd=tempdir)
+        #     pngproc.wait()
+        logging.debug(os.listdir(tempdir))
+
+    ## Copy results back to main dir
+    # shutil.copy(os.path.join(tempdir,"plots-pics.pdf"), os.path.join(cwd,dirname))
+    for fmt in opts.OUTPUT_FORMAT:
+        outname = "%s.%s" % (filename, fmt.lower())
+        outpath = os.path.join(tempdir, outname)
+        if os.path.exists(outpath):
+            shutil.copy(outpath, os.path.join(cwd,dirname))
+        else:
+            logging.error("No output file '%s' from processing %s" % (outname, datfile))
+
+    ## Clean up
+    if not opts.NO_CLEANUP:
+        shutil.rmtree(tempdir, ignore_errors=True)
+
+
+
+    # if opts.NO_SUBPROC:
+    #     init_worker()
+    #     for i, df in enumerate(datfiles):
+    #         logging.info("Plotting %s (%d/%d remaining)" % (df, len(datfiles)-i, len(datfiles)))
+    #         process_datfile(df)
+    # else:
+    #     pool = multiprocessing.Pool(opts.NUM_THREADS, init_worker)
+    #     try:
+    #         for i, _ in enumerate(pool.imap(process_datfile, datfiles)):
+    #             logging.info("Plotting %s (%d/%d remaining)" % (datfiles[i], len(datfiles)-i, len(datfiles)))
+    #         pool.close()
+    #     except KeyboardInterrupt:
+    #         print "Caught KeyboardInterrupt, terminating workers"
+    #         pool.terminate()
+    #     pool.join()
diff --git a/bin/rivet b/bin/rivet
--- a/bin/rivet
+++ b/bin/rivet
@@ -1,588 +1,652 @@
 #! /usr/bin/env python
 
 """\
 Run Rivet analyses on inputted events from file or Unix pipe
 
 Examples:
   %prog [options] <hepmcfile> [<hepmcfile2> ...]
   my_generator -o myfifo & \ %prog [options] myfifo
   agile-runmc <genname> -n 100k -o- | %prog [options]
 
 ENVIRONMENT:
  * RIVET_ANALYSIS_PATH: list of paths to be searched for plugin
      analysis libraries at runtime
  * RIVET_DATA_PATH: list of paths to be searched for data files
 """
 
 import os, sys
 
 ## Load the rivet module
 try:
     import rivet
 except:
     ## If rivet loading failed, try to bootstrap the Python path!
     try:
         # TODO: Is this a good idea? Maybe just notify the user that their PYTHONPATH is wrong?
         import commands
         modname = sys.modules[__name__].__file__
         binpath = os.path.dirname(modname)
         rivetconfigpath = os.path.join(binpath, "rivet-config")
         rivetpypath = commands.getoutput(rivetconfigpath + " --pythonpath")
         sys.path.append(rivetpypath)
         import rivet
     except:
         sys.stderr.write("The rivet Python module could not be loaded: is your PYTHONPATH set correctly?\n")
         sys.exit(5)
 
 rivet.util.check_python_version()
 rivet.util.set_process_name("rivet")
 
 import time, datetime, logging, signal
 
 ## Parse command line options
 from optparse import OptionParser, OptionGroup
 parser = OptionParser(usage=__doc__, version="rivet v%s" % rivet.version())
 
 anagroup = OptionGroup(parser, "Analysis handling")
 anagroup.add_option("-a", "--analysis", "--analyses", dest="ANALYSES", action="append",
                     default=[], metavar="ANA",
                     help="add an analysis (or comma-separated list of analyses) to the processing list.")
 anagroup.add_option("--list-analyses", "--list", dest="LIST_ANALYSES", action="store_true",
                     default=False, help="show the list of available analyses' names. With -v, it shows the descriptions, too")
+anagroup.add_option("--list-keywords", "--keywords", dest="LIST_KEYWORDS", action="store_true",
+                    default=False, help="show the list of available keywords.")
 anagroup.add_option("--list-used-analyses", action="store_true", dest="LIST_USED_ANALYSES",
                     default=False, help="list the analyses used by this command (after subtraction of inappropriate ones)")
 anagroup.add_option("--show-analysis", "--show-analyses", "--show", dest="SHOW_ANALYSES", action="append",
                     default=[], help="show the details of an analysis")
 anagroup.add_option("--show-bibtex", dest="SHOW_BIBTEX", action="store_true",
                     default=False, help="show BibTeX entries for all used analyses")
 anagroup.add_option("--analysis-path", dest="ANALYSIS_PATH", metavar="PATH", default=None,
                     help="specify the analysis search path (cf. $RIVET_ANALYSIS_PATH).")
 # TODO: remove/deprecate the append?
 anagroup.add_option("--analysis-path-append", dest="ANALYSIS_PATH_APPEND", metavar="PATH", default=None,
                     help="append to the analysis search path (cf. $RIVET_ANALYSIS_PATH).")
 anagroup.add_option("--pwd", dest="ANALYSIS_PATH_PWD", action="store_true", default=False,
                     help="append the current directory (pwd) to the analysis/data search paths (cf. $RIVET_ANALYSIS_PATH).")
 # TODO: add control for more paths?
 parser.add_option_group(anagroup)
 
 
 extragroup = OptionGroup(parser, "Extra run settings")
 extragroup.add_option("-o", "-H", "--histo-file", dest="HISTOFILE",
                       default="Rivet.yoda", help="specify the output histo file path (default = %default)")
 extragroup.add_option("--no-histo-file", dest="WRITE_DATA", action="store_false", default=True,
                       help="don't write out any histogram file at the end of the run (default = write)")
 extragroup.add_option("-x", "--cross-section", dest="CROSS_SECTION",
                       default=None, metavar="XS",
                       help="specify the signal process cross-section in pb")
 extragroup.add_option("-n", "--nevts", dest="MAXEVTNUM", type="int",
                       default=None, metavar="NUM",
                       help="restrict the max number of events to process")
 extragroup.add_option("--nskip", dest="EVTSKIPNUM", type="int",
                       default=0, metavar="NUM",
                       help="skip NUM events read from input before beginning processing")
 extragroup.add_option("--runname", dest="RUN_NAME", default=None, metavar="NAME",
                       help="give an optional run name, to be prepended as a 'top level directory' in histo paths")
 extragroup.add_option("--ignore-beams", dest="IGNORE_BEAMS", action="store_true", default=False,
                       help="ignore input event beams when checking analysis compatibility. "
                       "WARNING: analyses may not work correctly, or at all, with inappropriate beams")
 parser.add_option_group(extragroup)
 
 
 timinggroup = OptionGroup(parser, "Timeouts and periodic operations")
 timinggroup.add_option("--event-timeout", dest="EVENT_TIMEOUT", type="int",
                        default=21600, metavar="NSECS",
                        help="max time in whole seconds to wait for an event to be generated from the specified source (default = %default)")
 timinggroup.add_option("--run-timeout", dest="RUN_TIMEOUT", type="int",
                        default=None, metavar="NSECS",
                        help="max time in whole seconds to wait for the run to finish. This can be useful on batch systems such "
                        "as the LCG Grid where tokens expire on a fixed wall-clock and can render long Rivet runs unable to write "
                        "out the final histogram file (default = unlimited)")
 timinggroup.add_option("--histo-interval", dest="HISTO_WRITE_INTERVAL", type=int,
                        default=1000, help="specify the number of events between histogram file updates, default = %default. "
                        "Set to 0 to only write out at the end of the run. Note that intermediate histograms will be those "
                        "from the analyze step only: analysis finalizing is currently not executed until the end of the run.")
 parser.add_option_group(timinggroup)
 
 
 verbgroup = OptionGroup(parser, "Verbosity control")
 parser.add_option("-l", dest="NATIVE_LOG_STRS", action="append",
                   default=[], help="set a log level in the Rivet library")
 verbgroup.add_option("-v", "--verbose", action="store_const", const=logging.DEBUG, dest="LOGLEVEL",
                      default=logging.INFO, help="print debug (very verbose) messages")
 verbgroup.add_option("-q", "--quiet", action="store_const", const=logging.WARNING, dest="LOGLEVEL",
                      default=logging.INFO, help="be very quiet")
 parser.add_option_group(verbgroup)
 opts, args = parser.parse_args()
 
 
+## Override/modify analysis search path
+if opts.ANALYSIS_PATH:
+    rivet.setAnalysisLibPaths(opts.ANALYSIS_PATH.split(":"))
+    rivet.setAnalysisDataPaths(opts.ANALYSIS_PATH.split(":"))
+if opts.ANALYSIS_PATH_APPEND:
+    for ap in opts.ANALYSIS_PATH_APPEND.split(":"):
+        rivet.addAnalysisLibPath(ap)
+        rivet.addAnalysisDataPath(ap)
+if opts.ANALYSIS_PATH_PWD:
+    rivet.addAnalysisLibPath(os.path.abspath("."))
+    rivet.addAnalysisDataPath(os.path.abspath("."))
+
+
 ## Configure logging
 logging.basicConfig(level=opts.LOGLEVEL, format="%(message)s")
-
-## Control native Rivet library logger
 for l in opts.NATIVE_LOG_STRS:
     name, level = None, None
     try:
         name, level = l.split("=")
     except:
         name = "Rivet"
         level = l
     ## Fix name
     if name != "Rivet" and not name.startswith("Rivet."):
         name = "Rivet." + name
     try:
         ## Get right error type
         level = rivet.LEVELS.get(level.upper(), None)
         logging.debug("Setting log level: %s %d" % (name, level))
         rivet.setLogLevel(name, level)
     except:
         logging.warning("Couldn't process logging string '%s'" % l)
 
 
-## We allow comma-separated lists of analysis names -- normalise the list here
-newanas = []
-for a in opts.ANALYSES:
-    if "," in a:
-        newanas += a.split(",")
-    else:
-        newanas.append(a)
-opts.ANALYSES = newanas
 
+############################
+## Listing available analyses/keywords
 
-## Parse supplied cross-section
-if opts.CROSS_SECTION is not None:
-    xsstr = opts.CROSS_SECTION
-    try:
-        opts.CROSS_SECTION = float(xsstr)
-    except:
-        import re
-        suffmatch = re.search(r"[^\d.]", xsstr)
-        if not suffmatch:
-            raise ValueError("Bad cross-section string: %s" % xsstr)
-        factor = base = None
-        suffstart = suffmatch.start()
-        if suffstart != -1:
-            base = xsstr[:suffstart]
-            suffix = xsstr[suffstart:].lower()
-            if suffix == "mb":
-                factor = 1e+9
-            elif suffix == "mub":
-                factor = 1e+6
-            elif suffix == "nb":
-                factor = 1e+3
-            elif suffix == "pb":
-                factor = 1
-            elif suffix == "fb":
-                factor = 1e-3
-            elif suffix == "ab":
-                factor = 1e-6
-        if factor is None or base is None:
-            raise ValueError("Bad cross-section string: %s" % xsstr)
-        xs = float(base) * factor
-        opts.CROSS_SECTION = xs
 
+def getAnalysesByKeyword(alist, kstring):
+    add, veto, ret = [], [], []
+    bits = [i for i in kstring.replace("^@", "@^").split("@") if len(i) > 0]
+    for b in bits:
+        if b.startswith("^"):
+            veto.append(b.strip("^"))
+        else:
+            add.append(b)
 
-## Print the available CLI options!
-#if opts.LIST_OPTIONS:
-#    for o in parser.option_list:
-#        print o.get_opt_string()
-#    sys.exit(0)
+    add = set(add)
+    veto = set(veto)
 
-
-## Set up signal handling
-RECVD_KILL_SIGNAL = None
-def handleKillSignal(signum, frame):
-    "Declare us as having been signalled, and return to default handling behaviour"
-    global RECVD_KILL_SIGNAL
-    logging.critical("Signal handler called with signal " + str(signum))
-    RECVD_KILL_SIGNAL = signum
-    signal.signal(signum, signal.SIG_DFL)
-## Signals to handle
-signal.signal(signal.SIGTERM, handleKillSignal);
-signal.signal(signal.SIGHUP,  handleKillSignal);
-signal.signal(signal.SIGINT,  handleKillSignal);
-signal.signal(signal.SIGUSR1, handleKillSignal);
-signal.signal(signal.SIGUSR2, handleKillSignal);
-try:
-    signal.signal(signal.SIGXCPU, handleKillSignal);
-except:
-    pass
-
-
-## Override/modify analysis search path
-if opts.ANALYSIS_PATH:
-    rivet.setAnalysisLibPaths(opts.ANALYSIS_PATH.split(":"))
-    rivet.setAnalysisDataPaths(opts.ANALYSIS_PATH.split(":"))
-if opts.ANALYSIS_PATH_APPEND:
-    for ap in opts.ANALYSIS_PATH_APPEND.split(":"):
-        rivet.addAnalysisLibPath(ap)
-        rivet.addAnalysisDataPath(ap)
-if opts.ANALYSIS_PATH_PWD:
-    rivet.addAnalysisLibPath(os.path.abspath("."))
-    rivet.addAnalysisDataPath(os.path.abspath("."))
+    for a in alist:
+        kwds = set([i.lower() for i in rivet.AnalysisLoader.getAnalysis(a).keywords()])
+        if kwds.intersection(veto) and len(kwds.intersection(add)) == len(list(add)):
+            ret.append(a)
+    return ret
 
 
 ## List of analyses
 all_analyses = rivet.AnalysisLoader.analysisNames()
 if opts.LIST_ANALYSES:
     ## Treat args as case-insensitive regexes if present
     regexes = None
     if args:
         import re
         regexes = [re.compile(arg, re.I) for arg in args]
     try:
         # import tempfile, subprocess
         # tf, tfpath = tempfile.mkstemp(prefix="rivet-list.")
         for aname in all_analyses:
             if not regexes:
                 toshow = True
             else:
                 toshow = False
                 for regex in regexes:
                     if regex.search(aname):
                         toshow = True
                         break
             if toshow:
                 msg = aname
                 if opts.LOGLEVEL <= logging.INFO:
                     a = rivet.AnalysisLoader.getAnalysis(aname)
                     st = "" if a.status() == "VALIDATED" else ("[" + a.status() + "] ")
                     msg = "%-25s   %s" % (aname, st + rivet.util.detex(a.summary()))
+                    if opts.LOGLEVEL < logging.INFO:
+                        if a.keywords():
+                            msg += "  [" + " ".join(a.keywords()) + "]"
+                        if a.luminosityfb():
+                            msg += "  [ \int L = %s fb^{-1} ]"%a.luminosityfb()
                 print msg
                 #os.write(tf, msg + "\n")
         # if os.path.getsize(tfpath) > 0:
         #     pager = subprocess.Popen(["less", "-FX", tfpath]) #, stdin=subprocess.PIPE)
         #     pager.communicate()
     finally:
         # os.unlink(tfpath) #< always clean up
         pass
     sys.exit(0)
 
 
+def getKeywords(alist):
+    all_keywords = []
+    for a in alist:
+        all_keywords.extend(rivet.AnalysisLoader.getAnalysis(a).keywords())
+    all_keywords = [i.lower() for i in all_keywords]
+    return sorted(list(set(all_keywords)))
+
+
+## List keywords
+if opts.LIST_KEYWORDS:
+    # a = rivet.AnalysisLoader.getAnalysis(aname)
+    for k in getKeywords(all_analyses):
+        print k
+    sys.exit(0)
+
+
 ## Show analyses' details
 if len(opts.SHOW_ANALYSES) > 0:
     toshow = []
     for i, a in enumerate(opts.SHOW_ANALYSES):
         a_up = a.upper()
         if a_up in all_analyses and a_up not in toshow:
             toshow.append(a_up)
         else:
             ## Treat as a case-insensitive regex
             import re
             regex = re.compile(a, re.I)
             for ana in all_analyses:
                 if regex.search(ana) and a_up not in toshow:
                     toshow.append(ana)
 
     msgs = []
     for i, name in enumerate(sorted(toshow)):
         import textwrap
         ana = rivet.AnalysisLoader.getAnalysis(name)
 
         msg = ""
         msg += name + "\n"
         msg += (len(name) * "=") + "\n\n"
         msg += rivet.util.detex(ana.summary()) + "\n\n"
         msg += "Status: " + ana.status() + "\n\n"
 
         # TODO: reduce to only show Inspire in v3
         if ana.inspireId():
             msg += "Inspire ID: " + ana.inspireId() + "\n"
             msg += "Inspire URL: http://inspire-hep.net/record/" + ana.inspireId() + "\n"
             msg += "HepData URL: http://hepdata.cedar.ac.uk/view/ins" + ana.inspireId() + "\n"
         elif ana.spiresId():
             msg += "Spires ID: " + ana.spiresId() + "\n"
             msg += "Inspire URL: http://inspire-hep.net/search?p=find+key+" + ana.spiresId() + "\n"
             msg += "HepData URL: http://hepdata.cedar.ac.uk/view/irn" + ana.spiresId() + "\n"
 
         if ana.experiment():
             msg += "Experiment: " + ana.experiment()
             if ana.collider():
                 msg += "(%s)" % ana.collider()
             msg += "\n"
 
         if ana.year():
             msg += "Year of publication: " + ana.year() + "\n"
 
         msg += "Authors:\n"
         for a in ana.authors():
             msg += "  " + a + "\n"
         msg += "\n"
 
         msg += "Description:\n"
         twrap = textwrap.TextWrapper(width=75, initial_indent=2*" ", subsequent_indent=2*" ")
         msg += twrap.fill(rivet.util.detex(ana.description())) + "\n\n"
 
         # TODO: move this formatting into Analysis or a helper function?
         if ana.requiredBeams():
             def pid_to_str(pid):
                 if pid == 11:
                     return "e-"
                 elif pid == -11:
                     return "e+"
                 elif pid == 2212:
                     return "p+"
                 elif pid == -2212:
                     return "p-"
                 elif pid == 10000:
                     return "*"
                 else:
                     return str(pid)
             beamstrs = []
             for bp in ana.requiredBeams():
                 beamstrs.append(pid_to_str(bp[0]) + " " + pid_to_str(bp[1]))
             msg += "Beams:" + ", ".join(beamstrs) + "\n"
 
         if ana.requiredEnergies():
             msg += "Beam energies:" + "; ".join(["(%0.1f, %0.1f) GeV\n" % (epair[0], epair[1]) for epair in ana.requiredEnergies()])
         else:
             msg += "Beam energies: ANY\n"
 
         if ana.runInfo():
             msg += "Run details:\n"
             twrap = textwrap.TextWrapper(width=75, initial_indent=2*" ", subsequent_indent=4*" ")
             for l in ana.runInfo().split("\n"):
                 msg += twrap.fill(l) + "\n"
 
+        if ana.luminosityfb():
+            msg+= "\nIntegrated data luminosity = %s inverse fb.\n"%ana.luminosityfb()
+
+        if ana.keywords():
+            msg += "\nAnalysis keywords:"
+            for k in ana.keywords():
+                msg += " %s"%k
+            msg+= "\n\n"
+
         if ana.references():
             msg += "\n" + "References:"
             for r in ana.references():
                 url = None
                 if r.startswith("arXiv:"):
                     code = r.split()[0].replace("arXiv:", "")
                     url = "http://arxiv.org/abs/" + code
                 elif r.startswith("doi:"):
                     code = r.replace("doi:", "")
                     url = "http://dx.doi.org/" + code
                 if url is not None:
                     r += " - " + url
                 msg += "  " + r + "\n"
 
         ## Add to the output
         msgs.append(msg)
 
-
     ## Write the combined messages to a temporary file and page it
     if msgs:
         try:
             import tempfile, subprocess
             tffd, tfpath = tempfile.mkstemp(prefix="rivet-show.")
             os.write(tffd, "\n\n".join(msgs))
             if sys.stdout.isatty():
                 pager = subprocess.Popen(["less", "-FX", tfpath]) #, stdin=subprocess.PIPE)
                 pager.communicate()
             else:
                 f = open(tfpath)
                 print f.read()
                 f.close()
         finally:
             os.unlink(tfpath) #< always clean up
     sys.exit(0)
 
 
+
+############################
+## Actual analysis runs
+
+
+
+## We allow comma-separated lists of analysis names -- normalise the list here
+newanas = []
+for a in opts.ANALYSES:
+    if "," in a:
+        newanas += a.split(",")
+    elif "@" in a: #< NB. this bans combination of ana lists and keywords in a single arg
+        temp = getAnalysesByKeyword(all_analyses, a)
+        for i in temp:
+            newanas.append(i)
+    else:
+        newanas.append(a)
+opts.ANALYSES = newanas
+
+
+## Parse supplied cross-section
+if opts.CROSS_SECTION is not None:
+    xsstr = opts.CROSS_SECTION
+    try:
+        opts.CROSS_SECTION = float(xsstr)
+    except:
+        import re
+        suffmatch = re.search(r"[^\d.]", xsstr)
+        if not suffmatch:
+            raise ValueError("Bad cross-section string: %s" % xsstr)
+        factor = base = None
+        suffstart = suffmatch.start()
+        if suffstart != -1:
+            base = xsstr[:suffstart]
+            suffix = xsstr[suffstart:].lower()
+            if suffix == "mb":
+                factor = 1e+9
+            elif suffix == "mub":
+                factor = 1e+6
+            elif suffix == "nb":
+                factor = 1e+3
+            elif suffix == "pb":
+                factor = 1
+            elif suffix == "fb":
+                factor = 1e-3
+            elif suffix == "ab":
+                factor = 1e-6
+        if factor is None or base is None:
+            raise ValueError("Bad cross-section string: %s" % xsstr)
+        xs = float(base) * factor
+        opts.CROSS_SECTION = xs
+
+
+## Print the available CLI options!
+#if opts.LIST_OPTIONS:
+#    for o in parser.option_list:
+#        print o.get_opt_string()
+#    sys.exit(0)
+
+
+## Set up signal handling
+RECVD_KILL_SIGNAL = None
+def handleKillSignal(signum, frame):
+    "Declare us as having been signalled, and return to default handling behaviour"
+    global RECVD_KILL_SIGNAL
+    logging.critical("Signal handler called with signal " + str(signum))
+    RECVD_KILL_SIGNAL = signum
+    signal.signal(signum, signal.SIG_DFL)
+## Signals to handle
+signal.signal(signal.SIGTERM, handleKillSignal);
+signal.signal(signal.SIGHUP,  handleKillSignal);
+signal.signal(signal.SIGINT,  handleKillSignal);
+signal.signal(signal.SIGUSR1, handleKillSignal);
+signal.signal(signal.SIGUSR2, handleKillSignal);
+try:
+    signal.signal(signal.SIGXCPU, handleKillSignal);
+except:
+    pass
+
+
+
 ## Identify HepMC files/streams
 ## TODO: check readability, deal with stdin
 if len(args) > 0:
     HEPMCFILES = args
 else:
     HEPMCFILES = ["-"]
 
 
 ## Event number logging
 def logNEvt(n, starttime, maxevtnum):
     if n % 10000 == 0:
         nevtloglevel = logging.CRITICAL
     elif n % 1000 == 0:
         nevtloglevel = logging.WARNING
     elif n % 100 == 0:
         nevtloglevel = logging.INFO
     else:
         nevtloglevel = logging.DEBUG
     currenttime = datetime.datetime.now().replace(microsecond=0)
     elapsedtime = currenttime - starttime
     logging.log(nevtloglevel, "Event %d (%s elapsed)" % (n, str(elapsedtime)))
     # if maxevtnum is None:
     #     logging.log(nevtloglevel, "Event %d (%s elapsed)" % (n, str(elapsedtime)))
     # else:
     #     remainingtime = (maxevtnum-n) * elapsedtime.total_seconds() / float(n)
     #     eta = time.strftime("%a %b %d %H:%M", datetime.localtime(currenttime + remainingtime))
     #     logging.log(nevtloglevel, "Event %d (%d s elapsed / %d s left) -> ETA: %s" %
     #                 (n, elapsedtime, remainingtime, eta))
 
 
 ## Do some checks on output histo file, before we stat the event loop
 histo_parentdir = os.path.dirname(os.path.abspath(opts.HISTOFILE))
 if not os.path.exists(histo_parentdir):
   logging.error('Parent path of output histogram file does not exist: %s\nExiting.' % histo_parentdir)
   sys.exit(4)
 if not os.access(histo_parentdir,os.W_OK):
   logging.error('Insufficient permissions to write output histogram file to directory %s\nExiting.' % histo_parentdir)
   sys.exit(4)
 
 
 ## Set up analysis handler
 RUNNAME = opts.RUN_NAME or ""
 ah = rivet.AnalysisHandler(RUNNAME)
 ah.setIgnoreBeams(opts.IGNORE_BEAMS)
 for a in opts.ANALYSES:
     ## Print warning message and exit if not a valid analysis name
     if not a in all_analyses:
         logging.warning("'%s' is not a known Rivet analysis! Do you need to set RIVET_ANALYSIS_PATH or use the --pwd switch?\n" % a)
         # TODO: lay out more neatly, or even try for a "did you mean XXXX?" heuristic?
         logging.warning("There are %d currently available analyses:\n" % len(all_analyses) + ", ".join(all_analyses))
         sys.exit(1)
     logging.debug("Adding analysis '%s'" % a)
     ah.addAnalysis(a)
 
 
 if opts.SHOW_BIBTEX:
     bibs = []
     for aname in sorted(ah.analysisNames()):
         ana = rivet.AnalysisLoader.getAnalysis(aname)
         bibs.append("% " + aname + "\n" + ana.bibTeX())
     if bibs:
         print "\nBibTeX for used Rivet analyses:\n"
         print "% --------------------------\n"
         print "\n\n".join(bibs) + "\n"
         print "% --------------------------\n"
 
 
 ## Read and process events
 run = rivet.Run(ah)
 if opts.CROSS_SECTION is not None:
     logging.info("User-supplied cross-section = %e pb" % opts.CROSS_SECTION)
     run.setCrossSection(opts.CROSS_SECTION)
 if opts.LIST_USED_ANALYSES is not None:
     run.setListAnalyses(opts.LIST_USED_ANALYSES)
 
 ## Print platform type
 import platform
 starttime = datetime.datetime.now().replace(microsecond=0)
 logging.info("Rivet %s running on machine %s (%s) at %s" % \
              (rivet.version(), platform.node(), platform.machine(), str(starttime)))
 
 
 def min_nonnull(a, b):
     "A version of min which considers None to always be greater than a real number"
     rtn = min(a, b)
     if rtn is not None:
         return rtn
     if a is not None:
         return a
     return b
 
 
 ## Set up an event timeout handler
 class TimeoutException(Exception):
     pass
 if opts.EVENT_TIMEOUT or opts.RUN_TIMEOUT:
     def evttimeouthandler(signum, frame):
         logging.warn("It has taken more than %d secs to get an event! Is the input event stream working?" %
                      min_nonnull(opts.EVENT_TIMEOUT, opts.RUN_TIMEOUT))
         raise TimeoutException("Event timeout")
     signal.signal(signal.SIGALRM, evttimeouthandler)
 
 
 ## Init run based on one event
 hepmcfile = HEPMCFILES[0]
 ## Apply a file-level weight derived from the filename
 hepmcfileweight = 1.0
 if ":" in hepmcfile:
     hepmcfile, hepmcfileweight = hepmcfile.rsplit(":", 1)
     hepmcfileweight = float(hepmcfileweight)
 try:
     if opts.EVENT_TIMEOUT or opts.RUN_TIMEOUT:
         signal.alarm(min_nonnull(opts.EVENT_TIMEOUT, opts.RUN_TIMEOUT))
     init_ok = run.init(hepmcfile, hepmcfileweight)
     signal.alarm(0)
     if not init_ok:
         logging.error("Failed to initialise using event file '%s'... exiting" % hepmcfile)
         sys.exit(2)
 except TimeoutException, te:
     logging.error("Timeout in initialisation from event file '%s'... exiting" % hepmcfile)
     sys.exit(3)
 
 
 ## Event loop
 evtnum = 0
 for fileidx, hepmcfile in enumerate(HEPMCFILES):
     ## Apply a file-level weight derived from the filename
     hepmcfileweight = 1.0
     if ":" in hepmcfile:
         hepmcfile, hepmcfileweight = hepmcfile.rsplit(":", 1)
         hepmcfileweight = float(hepmcfileweight)
 
     ## Open next HepMC file (NB. this doesn't apply to the first file: it was already used for the run init)
     if fileidx > 0:
         run.openFile(hepmcfile, hepmcfileweight)
         if not run.readEvent():
             logging.warning("Could not read events from '%s'" % hepmcfile)
             continue
 
     ## Announce new file
     msg = "Reading events from '%s'" % hepmcfile
     if hepmcfileweight != 1.0:
         msg += " (file weight = %e)" % hepmcfileweight
     logging.info(msg)
 
     ## The event loop
     while opts.MAXEVTNUM is None or evtnum-opts.EVTSKIPNUM < opts.MAXEVTNUM:
         evtnum += 1
 
         ## Optional event skipping
         if evtnum <= opts.EVTSKIPNUM:
             logging.debug("Skipping event #%i" % evtnum)
             run.skipEvent();
             continue
 
         ## Only log the event number once we're actually processing
         logNEvt(evtnum, starttime, opts.MAXEVTNUM)
 
         ## Process this event
         processed_ok = run.processEvent()
         if not processed_ok:
             logging.warn("Event processing failed for evt #%i!" % evtnum)
             break
 
         ## Set flag to exit event loop if run timeout exceeded
         if opts.RUN_TIMEOUT and (time.time() - starttime) > opts.RUN_TIMEOUT:
             logging.warning("Run timeout of %d secs exceeded... exiting gracefully" % opts.RUN_TIMEOUT)
             RECVD_KILL_SIGNAL = True
 
         ## Exit the loop if signalled
         if RECVD_KILL_SIGNAL is not None:
             break
 
         ## Read next event (with timeout handling if requested)
         try:
             if opts.EVENT_TIMEOUT:
                 signal.alarm(opts.EVENT_TIMEOUT)
             read_ok = run.readEvent()
             signal.alarm(0)
             if not read_ok:
                 break
         except TimeoutException, te:
             logging.error("Timeout in reading event from '%s'... exiting" % hepmcfile)
             sys.exit(3)
 
         ## Write a histo file snapshot if appropriate
         if opts.HISTO_WRITE_INTERVAL is not None and opts.HISTO_WRITE_INTERVAL > 0:
             if evtnum % opts.HISTO_WRITE_INTERVAL == 0:
                 ah.writeData(opts.HISTOFILE)
 
 
 ## Print end-of-loop messages
 loopendtime = datetime.datetime.now().replace(microsecond=0)
 logging.info("Finished event loop at %s" % str(loopendtime))
 logging.info("Cross-section = %e pb" % ah.crossSection())
 print
 
 
 ## Finalize and write out data file
 run.finalize()
 if opts.WRITE_DATA:
     ah.writeData(opts.HISTOFILE)
 print
 endtime = datetime.datetime.now().replace(microsecond=0)
 logging.info("Rivet run completed at %s, time elapsed = %s" % (str(endtime), str(endtime-starttime)))
 print
 logging.info("Histograms written to %s" % os.path.abspath(opts.HISTOFILE))
diff --git a/bin/rivet-buildplugin.in b/bin/rivet-buildplugin.in
--- a/bin/rivet-buildplugin.in
+++ b/bin/rivet-buildplugin.in
@@ -1,149 +1,150 @@
 #!/usr/bin/env bash
 ## -*- sh -*-
 ## @configure_input@
 
-## Print help
+## Get program name
 PROG=$(basename $0)
-tmp=$(echo $* | egrep -- '--\<help\>|-\<h\>')
-if test $# -lt 1 || test -n "$tmp"; then
+
+## Print help message
+tmp=$(echo "$*" | egrep -- '--\<help\>|-\<h\>')
+if test -n "$tmp"; then # || test $# -lt 1
     echo "$PROG: compilation helper for Rivet analysis plugins"
     echo
     echo "Usage: $PROG [<libname>] <source1> [<source2> [compiler_flags] ...]"
     echo
     echo "<libname> can be a path, provided the filename is of the form 'Rivet*.so'"
     echo "If <libname> is not specified, the default name is 'RivetAnalysis.so'."
     echo
     echo "To make special build variations you can add appropriate compiler flags"
     echo "to the arguments and these will be passed directly to the compiler. For"
     echo "example, for a debug build of your plugin library, add '-g', and for a"
     echo "32 bit build on a 64 bit system add '-m32'."
     echo
     echo "Options:"
     echo "  -h | --help: display this help message"
     echo "  --with-root: add ROOT link options (requires root-config on system)"
     echo "  --cmd|--dry-run: just print the generated compiler command, do not execute"
     echo
     echo "TODO:"
     echo "  * is there a GCC option to parallelise the single-command compilation?"
     test -n "$tmp"
     exit $?
 fi
 
 ## These variables need to exist
 ## Note no use of $DESTDIR... we ignore it so that destdir can be used
 ## for temp installs later copied to /
 prefix=@prefix@
 exec_prefix=@exec_prefix@
 datarootdir=@datarootdir@
 
 ## Work out shared library build flags by platform
 shared_flags=
 SWVERS=$(which sw_vers 2> /dev/null)
 if test "$SWVERS" && test -x "$SWVERS"; then
   ## Mac OS X
   shared_flags="-undefined dynamic_lookup -bundle"
 else
   ## Unix
   shared_flags="-shared -fPIC"
 fi
 
 ## Get Rivet system C++ compiler (fall back to $CXX and then g++ if needed)
 mycxx=g++
 rivetcxx=$(which $(echo "@RIVETCXX@" | awk '{print $1}') 2> /dev/null)
 abscxx=$(which "$CXX" 2> /dev/null)
 if [[ -x "$rivetcxx" ]]; then
     mycxx="@CXX@"
 elif [[ -x "$abscxx" ]]; then
     mycxx=$CXX
 fi
 
 ## Get Rivet system C++ compiler flags
 mycxxflags=""
 if [[ -n "@AM_CXXFLAGS@" ]]; then
     mycxxflags="@AM_CXXFLAGS@"
 fi
 if [[ -n "@RIVETCXXFLAGS@" ]]; then
     mycxxflags="$mycxxflags @RIVETCXXFLAGS@"
 fi
 
 ## Get Rivet system C preprocessor flags (duplicating that in rivet-config.in)
 mycppflags=""
 prefix="@prefix@"
 exec_prefix="@exec_prefix@"
 irivet="@includedir@"
 test -n "$irivet" && mycppflags="$mycppflags -I${irivet}"
 ihepmc="@HEPMCINCPATH@"
 test -n "$ihepmc" && mycppflags="$mycppflags -I${ihepmc}"
 iyoda="@YODAINCPATH@"
 test -n "$iyoda" && mycppflags="$mycppflags -I${iyoda}"
 ifastjet="@FASTJETINCPATH@"
 test -n "$ifastjet" && mycppflags="$mycppflags -I${ifastjet}"
 igsl="@GSLINCPATH@"
 test -n "$igsl" && mycppflags="$mycppflags -I${igsl}"
 # iboost="@BOOST_CPPFLAGS@"
 # test -n "$iboost" && mycppflags="$mycppflags ${iboost}"
 
 
 ## Get Rivet system linker flags (duplicating that in rivet-config.in)
 myldflags=""
 lrivet="@libdir@"
 test -n "$lrivet" && myldflags="$myldflags -L${lrivet}"
 lhepmc="@HEPMCLIBPATH@"
 test -n "$lhepmc" && myldflags="$myldflags -L${lhepmc}"
 lyoda="@YODALIBPATH@"
 test -n "$lyoda" && myldflags="$myldflags -L${lyoda}"
 lfastjet="@FASTJETCONFIGLIBADD@"
 test -n "$lfastjet" && myldflags="$myldflags ${lfastjet}"
 ## Detect whether the linker accepts the --no-as-needed flag and prepend the linker flag with it if possible
 if (cd /tmp && echo -e 'int main() { return 0; }' > $$.cc; $mycxx -Wl,--no-as-needed $$.cc -o $$ 2> /dev/null); then
   myldflags="-Wl,--no-as-needed $myldflags"
 fi
 
 
 ## Link against ROOT if requested
-with_root=$(echo $* | egrep -- '--\<with-root\>')
+with_root=$(echo "$*" | egrep -- '--\<with-root\>')
 # echo $with_root
-tmp=${@//--with-root/}
-set $tmp #< Set positional params
-
+tmp="${*//--with-root/}"
 
 ## Just show the compiler command rather than execute it, if requested
-only_show=$(echo $* | egrep -- '--\<cmd\>|--\<dry-run\>')
+only_show=$(echo "$tmp" | egrep -- '--\<cmd\>|--\<dry-run\>')
 # echo $only_show
-tmp=$(echo $@ | sed -e 's/--cmd//g' -e 's/--dry-run//g')
-set $tmp #< Set positional params
+tmp=$(echo "$tmp" | sed -e 's/--cmd//g' -e 's/--dry-run//g')
 
+## Reset positional params now that flags have been removed
+set "$tmp"
 
 ## Get and check the library name
 libname=$1
 match1=$(basename "$libname" | egrep '^.*\.so')
 match2=$(basename "$libname" | egrep '^Rivet.*\.so')
 if test -n "$match1"; then
     if test -z "$match2"; then
         echo "Library name '$libname' does not have the required 'Rivet*.so' name pattern" 1>&2
         exit 1
     fi
     ## If we're using the first arg as the library name, shift it off the positional list
     shift
 else
     if [[ -z $only_show ]]; then
         echo "Using default library name 'RivetAnalysis.so'"
     fi
     libname="RivetAnalysis.so"
 fi
 
 
 ## Get the source files (and more flags)
-sources_and_flags="$@ -lRivet"
+sources_and_flags="$* -lRivet"
 if [[ -n $with_root ]]; then
     root_flags=$(root-config --libs --cflags 2> /dev/null)
     # echo $root_flags
     sources_and_flags="$root_flags $sources_and_flags"
 fi
 
 ## Build
 cmd="$mycxx -o \"$libname\" $shared_flags $mycppflags $mycxxflags $myldflags $sources_and_flags"
 echo $cmd
 if [[ -z $only_show ]]; then
     eval $cmd
 fi
diff --git a/bin/rivet-cmphistos b/bin/rivet-cmphistos
--- a/bin/rivet-cmphistos
+++ b/bin/rivet-cmphistos
@@ -1,474 +1,474 @@
 #! /usr/bin/env python
 
 """\
 %prog - generate histogram comparison plots
 
 USAGE:
  %prog [options] yodafile1[:'PlotOption1=Value':'PlotOption2=Value':...] [path/to/yodafile2 ...] [PLOT:Key1=Val1:...]
 
 where the plot options are described in the make-plots manual in the HISTOGRAM
 section.
 
 ENVIRONMENT:
  * RIVET_ANALYSIS_PATH: list of paths to be searched for plugin
      analysis libraries at runtime
  * RIVET_DATA_PATH: list of paths to be searched for data files
 """
 
 import rivet, yoda, sys, os
 rivet.util.check_python_version()
 rivet.util.set_process_name(os.path.basename(__file__))
 
 
 class Plot(dict):
     "A tiny Plot object to help writing out the head in the .dat file"
     def __repr__(self):
         return "# BEGIN PLOT\n" + "\n".join("%s=%s" % (k,v) for k,v in self.iteritems()) + "\n# END PLOT\n\n"
 
 
 def sanitiseString(s):
     #s = s.replace('_','\\_')
     #s = s.replace('^','\\^{}')
     #s = s.replace('$','\\$')
     s = s.replace('#','\\#')
     s = s.replace('%','\\%')
     return s
 
 
 def getCommandLineOptions():
     "Parse command line options"
     from optparse import OptionParser, OptionGroup
     parser = OptionParser(usage=__doc__)
 
     parser.add_option('-o', '--outdir', dest='OUTDIR',
                       default='.', help='write data files into this directory')
     parser.add_option("--hier-out", action="store_true", dest="HIER_OUTPUT", default=False,
                       help="write output dat files into a directory hierarchy which matches the analysis paths")
     parser.add_option('--plotinfodir', dest='PLOTINFODIRS', action='append',
                       default=['.'], help='directory which may contain plot header information (in addition '
                       'to standard Rivet search paths)')
     parser.add_option("--no-rivet-refs", dest="RIVETREFS", action="store_false",
                       default=True, help="don't use Rivet reference data files")
     # parser.add_option("--refid", dest="REF_ID",
     #                   default="REF", help="ID of reference data set (file path for non-REF data)")
     parser.add_option("--reftitle", dest="REFTITLE",
                         default='Data', help="Reference data legend entry")
     parser.add_option("--pwd", dest="PATH_PWD", action="store_true", default=False,
                       help="append the current directory (pwd) to the analysis/data search paths (cf. $RIVET_ANALYSIS/DATA_PATH)")
     parser.add_option("-v", "--verbose", dest="VERBOSE", action="store_true", default=False,
                       help="produce debug output to the terminal")
 
     stygroup = OptionGroup(parser, "Plot style")
     stygroup.add_option("--linear", action="store_true", dest="LINEAR",
                         default=False, help="plot with linear scale")
     stygroup.add_option("--mc-errs", action="store_true", dest="MC_ERRS",
                         default=False, help="show vertical error bars on the MC lines")
     stygroup.add_option("--no-ratio", action="store_false", dest="RATIO",
                         default=True, help="disable the ratio plot")
     stygroup.add_option("--rel-ratio", action="store_true", dest="RATIO_DEVIATION",
                         default=False, help="show the ratio plots scaled to the ref error")
     stygroup.add_option("--no-plottitle", action="store_true", dest="NOPLOTTITLE",
                         default=False, help="don't show the plot title on the plot "
                         "(useful when the plot description should only be given in a caption)")
     stygroup.add_option("--style", dest="STYLE", default="default",
                         help="change plotting style: default|bw|talk")
     stygroup.add_option("-c", "--config", dest="CONFIGFILES", action="append", default=["~/.make-plots"],
                         help="additional plot config file(s). Settings will be included in the output configuration.")
     parser.add_option_group(stygroup)
 
     selgroup = OptionGroup(parser, "Selective plotting")
     # selgroup.add_option("--show-single", dest="SHOW_SINGLE", choices=("no", "ref", "mc", "all"),
     #                     default="mc", help="control if a plot file is made if there is only one dataset to be plotted "
     #                     "[default=%default]. If the value is 'no', single plots are always skipped, for 'ref' and 'mc', "
     #                     "the plot will be written only if the single plot is a reference plot or an MC "
     #                     "plot respectively, and 'all' will always create single plot files.\n The 'ref' and 'all' values "
     #                     "should be used with great care, as they will also write out plot files for all reference "
     #                     "histograms without MC traces: combined with the -R/--rivet-refs flag, this is a great way to "
     #                     "write out several thousand irrelevant reference data histograms!")
     # selgroup.add_option("--show-mc-only", "--all", action="store_true", dest="SHOW_IF_MC_ONLY",
     #                     default=False, help="make a plot file even if there is only one dataset to be plotted and "
     #                     "it is an MC one. Deprecated and will be removed: use --show-single instead, which overrides this.")
     # # selgroup.add_option("-l", "--histogram-list", dest="HISTOGRAMLIST",
     # #                     default=None, help="specify a file containing a list of histograms to plot, in the format "
     # #                     "/ANALYSIS_ID/histoname, one per line, e.g. '/DELPHI_1996_S3430090/d01-x01-y01'.")
     selgroup.add_option("-m", "--match", action="append",
                         help="only write out histograms whose $path/$name string matches these regexes. The argument "
                         "may also be a text file.",
                         dest="PATHPATTERNS")
     selgroup.add_option("-M", "--unmatch", action="append",
                         help="exclude histograms whose $path/$name string matches these regexes",
                         dest="PATHUNPATTERNS")
     parser.add_option_group(selgroup)
 
     return parser
 
 
 def getHistos(filelist):
     """Loop over all input files. Only use the first occurrence of any REF-histogram
     and the first occurrence in each MC file for every MC-histogram."""
     refhistos, mchistos = {}, {}
     for infile in filelist:
         mchistos.setdefault(infile, {})
         analysisobjects = yoda.read(infile, patterns=opts.PATHPATTERNS, unpatterns=opts.PATHUNPATTERNS)
         #print analysisobjects
         for path, ao in analysisobjects.iteritems():
             ## We can't plot non-histograms yet
             # TODO: support counter plotting with a faked x (or y) position and forced plot width/height
             if ao.type not in ("Histo1D", "Histo2D", "Profile1D", "Profile2D", "Scatter2D", "Scatter3D"):
                 continue
 
             ## Make a path object and ensure the path is in standard form
             try:
                 aop = rivet.AOPath(path)
             except Exception, e:
                 #print e
                 print "Found analysis object with non-standard path structure:", path, "... skipping"
                 continue
 
             ## We don't plot data objects with path components hidden by an underscore prefix
             if aop.istmp():
                 continue
 
             ## Add it to the ref or mc paths, if this path isn't already known
             basepath = aop.basepath(keepref=False)
             if aop.isref() and not refhistos.has_key(basepath):
                 ao.path = aop.varpath(keepref=False, defaultvarid=0)
                 refhistos[basepath] = ao
             else: #if not mchistos[infile].has_key(basepath):
                 mchistos[infile].setdefault(basepath, {})[aop.varid(0)] = ao
 
     return refhistos, mchistos
 
 
 def getRivetRefData(anas=None):
     "Find all Rivet reference data files"
     refhistos = {}
     rivet_data_dirs = rivet.getAnalysisRefPaths()
     dirlist = []
     for d in rivet_data_dirs:
         if anas is None:
             import glob
             dirlist.append(glob.glob(os.path.join(d, '*.yoda')))
         else:
             dirlist.append([os.path.join(d, a+'.yoda') for a in anas])
     for filelist in dirlist:
         # TODO: delegate to getHistos?
         for infile in filelist:
             analysisobjects = yoda.read(infile, patterns=opts.PATHPATTERNS, unpatterns=opts.PATHUNPATTERNS)
             for path, ao in analysisobjects.iteritems():
                 aop = rivet.AOPath(ao.path)
                 if aop.isref():
                     ao.path = aop.basepath(keepref=False)
                     refhistos[ao.path] = ao
     return refhistos
 
 
 def parseArgs(args):
     """Look at the argument list and split it at colons, in order to separate
     the file names from the plotting options. Store the file names and
     file specific plotting options."""
     filelist = []
     plotoptions = {}
     for a in args:
         asplit = a.split(':')
         path = asplit[0]
         filelist.append(path)
         plotoptions[path] = []
         has_title = False
         for i in xrange(1, len(asplit)):
             ## Add 'Title' if there is no = sign before math mode
             if '=' not in asplit[i] or ('$' in asplit[i] and asplit[i].index('$') < asplit[i].index('=')):
                 asplit[i] = 'Title=%s' % asplit[i]
             if asplit[i].startswith('Title='):
                 has_title = True
             plotoptions[path].append(asplit[i])
         if path != "PLOT" and not has_title:
             plotoptions[path].append('Title=%s' % sanitiseString(os.path.basename( os.path.splitext(path)[0] )) )
     return filelist, plotoptions
 
 
 def setStyle(ao, istyle, variation=False):
     """Set default plot styles (color and line width) colors borrowed from Google Ngrams"""
     # LINECOLORS = ['{[HTML]{EE3311}}',  # red (Google uses 'DC3912')
     #               '{[HTML]{3366FF}}',  # blue
     #               '{[HTML]{109618}}',  # green
     #               '{[HTML]{FF9900}}',  # orange
     #               '{[HTML]{990099}}']  # lilac
     LINECOLORS = ['red', 'blue', 'green', 'orange', 'lilac']
     LINESTYLES = ['solid', 'dashed', 'dashdotted', 'dotted']
 
     if opts.STYLE == 'talk':
         ao.setAnnotation('LineWidth', '1pt')
     if opts.STYLE == 'bw':
         LINECOLORS = ['black!90',
                       'black!50',
                       'black!30']
 
     jc = istyle % len(LINECOLORS)
     c = LINECOLORS[jc]
     js = (istyle / len(LINECOLORS)) % len(LINESTYLES)
     s = LINESTYLES[js]
 
     ## If plotting a variation (i.e. band), fade the colour
     if variation:
         c += "!30"
 
     ao.setAnnotation('LineStyle', '%s' % s)
     ao.setAnnotation('LineColor', '%s' % c)
 
 
 def setOptions(ao, options):
     "Set arbitrary annotations"
     for opt in options:
         key, val = opt.split('=', 1)
         ao.setAnnotation(key, val)
 
 
 # TODO: move to rivet.utils
 def mkoutdir(outdir):
     "Function to make output directories"
     if not os.path.exists(outdir):
         try:
             os.makedirs(outdir)
         except:
             msg = "Can't make output directory '%s'" % outdir
             raise Exception(msg)
     if not os.access(outdir, os.W_OK):
         msg = "Can't write to output directory '%s'" % outdir
         raise Exception(msg)
 
 
 def mkOutput(hpath, aos, plot=None, special=None):
     """
     Make the .dat file string. We can't use "yoda.writeFLAT(anaobjects, 'foobar.dat')"
     because the PLOT and SPECIAL blocks don't have a corresponding analysis object.
     """
     output = ''
 
     if plot is not None:
         output += str(plot)
 
     if special is not None:
         output += "\n"
         output += "# BEGIN SPECIAL %s\n" % hpath
         output += special
         output += "# END SPECIAL\n\n"
 
     from cStringIO import StringIO
     sio = StringIO()
     yoda.writeFLAT(aos, sio)
     output += sio.getvalue()
 
     return output
 
 
 def writeOutput(output, h):
     "Choose output file name and dir"
     if opts.HIER_OUTPUT:
         hparts = h.strip("/").split("/", 1)
         ana = "_".join(hparts[:-1]) if len(hparts) > 1 else "ANALYSIS"
         outdir = os.path.join(opts.OUTDIR, ana)
         outfile = '%s.dat' % hparts[-1].replace("/", "_")
     else:
         hparts = h.strip("/").split("/")
         outdir = opts.OUTDIR
         outfile = '%s.dat' % "_".join(hparts)
     mkoutdir(outdir)
     outfilepath = os.path.join(outdir, outfile)
     f = open(outfilepath, 'w')
     f.write(output)
     f.close()
 
 
 #--------------------------------------------------------------------------------------------
 
 
 if __name__ == '__main__':
 
     ## Command line parsing
     parser = getCommandLineOptions()
     opts, args = parser.parse_args()
 
     ## Add pwd to search paths
     if opts.PATH_PWD:
         rivet.addAnalysisLibPath(os.path.abspath("."))
         rivet.addAnalysisDataPath(os.path.abspath("."))
 
     ## Split the input file names and the associated plotting options
     ## given on the command line into two separate lists
     filelist, plotoptions = parseArgs(args)
     ## Remove the PLOT dummy file from the file list
     if "PLOT" in filelist:
         filelist.remove("PLOT")
 
     ## Check that the files exist
     for f in filelist:
         if not os.access(f, os.R_OK):
             print "Error: cannot read from %s" % f
             sys.exit(1)
 
     ## Read the .plot files
     plotdirs = opts.PLOTINFODIRS + [os.path.abspath(os.path.dirname(f)) for f in filelist]
     plotparser = rivet.mkStdPlotParser(plotdirs, opts.CONFIGFILES)
 
     ## Create a list of all histograms to be plotted, and identify if they are 2D histos (which need special plotting)
     try:
         refhistos, mchistos = getHistos(filelist)
     except IOError, e:
         print "File reading error: ", e.strerror
         exit(1)
     hpaths, h2ds = [], []
     for aos in mchistos.values():
         for p in aos.keys():
             if p and p not in hpaths:
                 hpaths.append(p)
             firstaop = aos[p][sorted(aos[p].keys())[0]]
             # TODO: Would be nicer to test via isHisto and dim or similar, or yoda.Scatter/Histo/Profile base classes
             if type(firstaop) in (yoda.Histo2D, yoda.Profile2D) and p not in h2ds:
                 h2ds.append(p)
 
     ## Take reference data from the Rivet search paths, if there is not already
     if opts.RIVETREFS:
         try:
             refhistos2 = getRivetRefData()
         except IOError, e:
             print "File reading error: ", e.strerror
             exit(1)
         refhistos2.update(refhistos)
         refhistos = refhistos2
 
     ## Purge unmatched ref data entries to save memory
     for refhpath in refhistos.keys():
         if refhpath not in hpaths:
             del refhistos[refhpath]
 
 
     ## Now loop over all MC histograms and plot them
     # TODO: factorize much of this into a rivet.utils mkplotfile(mchists, refhist, kwargs, is2d=False) function
     for hpath in hpaths:
         #print 'Currently looking at', h
 
         ## The analysis objects to be plotted
         anaobjects = []
         ## List of histos to be drawn, to sync the legend and plotted lines
         mainlines = []
         varlines = []
         ## Is this a 2D histo?
         is2d = (hpath in h2ds)
         ## Will we be drawing a ratio plot?
         showratio = opts.RATIO and not is2d
 
 
         ## A Plot object to represent the PLOT section in the .dat file
         plot = Plot()
         if not is2d:
             plot['Legend'] = '1'
             plot['LogY'] = '1'
         headers = plotparser.getHeaders(hpath)
         if headers:
             plot.update(headers)
         # for key, val in headers.iteritems():
         #     plot[key] = val
         if plotoptions.has_key("PLOT"):
             for key_val in plotoptions["PLOT"]:
-                key, val = [s.strip() for s in key_val.split("=")]
+                key, val = [s.strip() for s in key_val.split("=",1)]
                 plot[key] = val
         if opts.LINEAR:
             plot['LogY'] = '0'
         if opts.NOPLOTTITLE:
             plot['Title'] = ''
         if showratio and opts.RATIO_DEVIATION:
             plot['RatioPlotMode'] = 'deviation'
         if opts.STYLE == 'talk':
             plot['PlotSize'] = '8,6'
         elif opts.STYLE == 'bw' and showratio:
             plot['RatioPlotErrorBandColor'] = 'black!10'
 
 
         ## Get a special object, if there is one for this path
         special = plotparser.getSpecial(hpath)
 
 
         ## Handle reference data histogram, if there is one
         ratioreference, hasdataref = None, False
         if refhistos.has_key(hpath):
             hasdataref = True
             refdata = refhistos[hpath]
             refdata.setAnnotation('Title', opts.REFTITLE)
             if not is2d:
                 refdata.setAnnotation('ErrorBars', '1')
                 refdata.setAnnotation('PolyMarker', '*')
                 refdata.setAnnotation('ConnectBins', '0')
                 if showratio:
                     ratioreference = hpath
             ## For 1D
             anaobjects.append(refdata)
             mainlines.append(hpath)
             ## For 2D
             if is2d:
                 s = mkOutput(hpath, [refdata], plot, special)
                 writeOutput(s, hpath)
 
 
         ## Loop over the MC files to plot all instances of the histogram
         styleidx = 0
         for infile in filelist:
             if mchistos.has_key(infile) and mchistos[infile].has_key(hpath):
                 hmcs = mchistos[infile][hpath]
                 ## For now, just plot all the different variation histograms (reversed, so [0] is on top)
                 # TODO: calculate and plot an appropriate error band, somehow...
                 for i in sorted(hmcs.keys(), reverse=True):
                     iscanonical = (str(i) == "0")
                     hmc = hmcs[i]
                     ## Default linecolor, linestyle
                     if not is2d:
                         setStyle(hmc, styleidx, not iscanonical)
                         if opts.MC_ERRS:
                             hmc.setAnnotation('ErrorBars', '1')
                     ## Plot defaults from .plot files
                     histopts = plotparser.getHistogramOptions(hpath)
                     if histopts:
                         for key, val in histopts.iteritems():
                             hmc.setAnnotation(key, val)
                     ## Command line plot options
                     setOptions(hmc, plotoptions[infile])
                     ## Set path attribute
                     fullpath = "/"+infile+hpath
                     if not iscanonical:
                         fullpath += "["+str(i)+"]"
                     hmc.setAnnotation('Path', fullpath)
                     ## Add object / path to appropriate lists
                     anaobjects.append(hmc)
                     if iscanonical:
                         mainlines.append(fullpath)
                     else:
                         varlines.append(fullpath)
                     if showratio and ratioreference is None and iscanonical:
                         ratioreference = fullpath
                     ## For 2D, plot each histo now (since overlay makes no sense)
                     if is2d:
                         s = mkOutput(hpath, [hmc], plot, special)
                         writeOutput(s, fullpath)
                 styleidx += 1
 
 
         ## Finally render the combined plots; only show the first one if it's 2D
         # TODO: Only show the first *MC* one if 2D?
         if is2d:
             anaobjects = anaobjects[:1]
         ## Add final attrs to Plot
         plot['DrawOnly'] = ' '.join(varlines + mainlines).strip()
         plot['LegendOnly'] = ' '.join(mainlines).strip()
         if showratio and len(varlines + mainlines) > 1:
             plot['RatioPlot'] = '1'
             plot['RatioPlotReference'] = ratioreference
             if not hasdataref and not plot.has_key("RatioPlotYLabel"):
                 if plot.get('RatioPlotMode', '') == 'deviation':
                     plot['RatioPlotYLabel'] = 'Deviation' #r'$\text{MC}-\text{MC}_\text{ref}$'
                 else:
                     plot['RatioPlotYLabel'] = 'Ratio' #r'$\text{MC}/\text{MC}_\text{ref}$'
 
 
         ## Make the output and write to file
         o = mkOutput(hpath, anaobjects, plot, special)
         writeOutput(o, hpath)
diff --git a/bin/rivet-mkanalysis b/bin/rivet-mkanalysis
--- a/bin/rivet-mkanalysis
+++ b/bin/rivet-mkanalysis
@@ -1,310 +1,310 @@
 #! /usr/bin/env python
 
 """\
 %prog: make templates of analysis source files for Rivet
 
 Usage: %prog [--help|-h] [--srcroot=<srcrootdir>] <analysisname>
 
 Without the --srcroot flag, the analysis files will be created in the current
 directory.
 """
 
 import rivet, sys, os
 rivet.util.check_python_version()
 rivet.util.set_process_name(os.path.basename(__file__))
 import logging
 
 
 ## Handle command line
 from optparse import OptionParser
 parser = OptionParser(usage=__doc__)
 parser.add_option("--srcroot", metavar="DIR", dest="SRCROOT", default=None,
                   help="install the templates into the Rivet source tree (rooted " +
                   "at directory DIR) rather than just creating all in the current dir")
 parser.add_option("-q", "--quiet", dest="LOGLEVEL", default=logging.INFO,
                   action="store_const", const=logging.WARNING, help="only write out warning and error messages")
 parser.add_option("-v", "--verbose", dest="LOGLEVEL", default=logging.INFO,
                   action="store_const", const=logging.DEBUG, help="provide extra debugging messages")
 parser.add_option("-i", "--inline-info", dest="INLINE", action="store_true",
                   default=False, help="Put analysis info into source file instead of separate data file.")
 opts, args = parser.parse_args()
 logging.basicConfig(format="%(msg)s", level=opts.LOGLEVEL)
 ANANAMES = args
 
 
 ## Work out installation paths
 ANAROOT = os.path.abspath(opts.SRCROOT or os.getcwd())
 if not os.access(ANAROOT, os.W_OK):
     logging.error("Can't write to source root directory %s" % ANAROOT)
     sys.exit(1)
 ANASRCDIR = os.getcwd()
 ANAINFODIR = os.getcwd()
 ANAPLOTDIR = os.getcwd()
 if opts.SRCROOT:
     ANASRCDIR = os.path.join(ANAROOT, "src/Analyses")
     ANAINFODIR = os.path.join(ANAROOT, "data/anainfo")
     ANAPLOTDIR = os.path.join(ANAROOT, "data/plotinfo")
     if not (os.path.exists(ANASRCDIR) and os.path.exists(ANAINFODIR) and os.path.exists(ANAPLOTDIR)):
         logging.error("Rivet analysis dirs do not exist under %s" % ANAROOT)
         sys.exit(1)
 if not (os.access(ANASRCDIR, os.W_OK) and os.access(ANAINFODIR, os.W_OK) and os.access(ANAPLOTDIR, os.W_OK)):
     logging.error("Can't write to Rivet analysis dirs under %s" % ANAROOT)
     sys.exit(1)
 
 
 ## Check for disallowed characters in analysis names
 import string
 allowedchars = string.letters + string.digits + "_"
 all_ok = True
 for ananame in ANANAMES:
     for c in ananame:
         if c not in allowedchars:
             logging.error("Analysis name '%s' contains disallowed character '%s'!" % (ananame, c))
             all_ok = False
             break
 if not all_ok:
     logging.error("Exiting... please ensure that all analysis names are valid")
     sys.exit(1)
 
 
 ## Now make each analysis
 for ANANAME in ANANAMES:
     logging.info("Writing templates for %s to %s" % (ANANAME, ANAROOT))
 
     ## Extract some metadata from the name if it matches the standard pattern
     import re
     re_stdana = re.compile(r"^(\w+)_(\d{4})_(I|S)(\d+)$")
     match = re_stdana.match(ANANAME)
     STDANA = False
     ANAEXPT = "<Insert the experiment name>"
     ANACOLLIDER = "<Insert the collider name>"
     ANAYEAR = "<Insert year of publication>"
     INSPIRE_SPIRES = None
     ANAINSPIREID = "<Insert the Inspire ID>"
     if match:
         STDANA = True
         ANAEXPT = match.group(1)
         if ANAEXPT.upper() in ("ALICE", "ATLAS", "CMS", "LHCB"):
             ANACOLLIDER = "LHC"
         elif ANAEXPT.upper() in ("CDF", "D0"):
             ANACOLLIDER = "Tevatron"
         elif ANAEXPT.upper() == "BABAR":
             ANACOLLIDER = "PEP-II"
         elif ANAEXPT.upper() == "BELLE":
             ANACOLLIDER = "KEKB"
         ANAYEAR = match.group(2)
         INSPIRE_SPIRES = match.group(3)
         ANAINSPIREID = match.group(4)
     if INSPIRE_SPIRES == "I":
         ANAREFREPO = "Inspire"
     else:
         ANAREFREPO = "Spires"
     KEYWORDS = {
         "ANANAME" : ANANAME,
         "ANAEXPT" : ANAEXPT,
         "ANACOLLIDER" : ANACOLLIDER,
         "ANAYEAR" : ANAYEAR,
         "ANAREFREPO" : ANAREFREPO,
         "ANAINSPIREID" : ANAINSPIREID
         }
 
     ## Try to get bib info from SPIRES
     ANABIBKEY = ""
     ANABIBTEX = ""
     bibkey, bibtex = None, None
     if STDANA:
         try:
             logging.debug("Getting Inspire/SPIRES biblio data for '%s'" % ANANAME)
             bibkey, bibtex = rivet.spiresbib.get_bibtex_from_repo(INSPIRE_SPIRES, ANAINSPIREID)
         except Exception, e:
             logging.error("Inspire/SPIRES oops: %s" % e)
         if bibkey and bibtex:
             ANABIBKEY = bibkey
             ANABIBTEX = bibtex
     KEYWORDS["ANABIBKEY"] = ANABIBKEY
     KEYWORDS["ANABIBTEX"] = ANABIBTEX
 
 
     ## Try to download YODA data file from HepData
     if STDANA:
         try:
             import urllib
             hdurl = None
             if INSPIRE_SPIRES == "I":
                 hdurl = "http://hepdata.cedar.ac.uk/view/ins%s/yoda" % ANAINSPIREID
             elif INSPIRE_SPIRES == "S":
                 hdurl = "http://hepdata.cedar.ac.uk/view/irn%s/yoda" % ANAINSPIREID
             if hdurl:
                 logging.debug("Getting data file from HepData at %s" % hdurl)
                 httpstream = urllib.urlopen(hdurl)
                 yodastr = httpstream.read()
                 if not yodastr or "<html>" in yodastr:
                     logging.warning("Problem encountered when getting data from HepData (%s). No reference data file written." % hdurl)
                 else:
                     f = open("%s.yoda" % ANANAME, "w")
                     f.write(yodastr)
                     f.close()
                 httpstream.close()
             else:
                 logging.warning("Could not identify a URL for getting reference data from HepData. No reference data file written.")
         except Exception, e:
             logging.error("HepData oops: %s" % e)
 
 
     if opts.INLINE:
         KEYWORDS["ANAREFREPO_LOWER"] = KEYWORDS["ANAREFREPO"].lower()
         INLINEMETHODS="""
   public:
     string experiment()         const { return "%(ANAEXPT)s"; }
     string year()               const { return "%(ANAYEAR)s"; }
     string %(ANAREFREPO_LOWER)sId()          const { return "%(ANAINSPIREID)s"; }
     string collider()           const { return ""; }
     string summary()            const { return ""; }
     string description()        const { return ""; }
     string runInfo()            const { return ""; }
     string bibKey()             const { return "%(ANABIBKEY)s"; }
     string bibTeX()             const { return "%(ANABIBTEX)s"; }
     string status()             const { return "UNVALIDATED"; }
     vector<string> authors()    const { return vector<string>(); }
     vector<string> references() const { return vector<string>(); }
     vector<std::string> todos() const { return vector<string>(); }
     """ % KEYWORDS
         del KEYWORDS["ANAREFREPO_LOWER"]
     else:
         INLINEMETHODS=""
 
     KEYWORDS["INLINEMETHODS"] = INLINEMETHODS
 
 
     ANASRCFILE = os.path.join(ANASRCDIR, ANANAME+".cc")
     logging.debug("Writing implementation template to %s" % ANASRCFILE)
     f = open(ANASRCFILE, "w")
     src = '''\
 // -*- C++ -*-
 #include "Rivet/Analysis.hh"
 #include "Rivet/Projections/FinalState.hh"
 
 namespace Rivet {
 
 
   /// @brief Add a short analysis description here
   class %(ANANAME)s : public Analysis {
   public:
 
     /// Constructor
     DEFAULT_RIVET_ANALYSIS_CTOR(%(ANANAME)s);
 
 
     /// @name Analysis methods
     //@{
 
     /// Book histograms and initialise projections before the run
     void init() {
 
       // Initialise and register projections
       declare(FinalState(Cuts::abseta < 5 && Cuts::pT > 100*MeV), "FS");
 
       // Book histograms
       _h_XXXX = bookProfile1D(1, 1, 1);
       _h_YYYY = bookHisto1D(2, 1, 1);
       _h_ZZZZ = bookCounter(3, 1, 1);
 
     }
 
 
     /// Perform the per-event analysis
     void analyze(const Event& event) {
 
       /// @todo Do the event by event analysis here
 
     }
 
 
     /// Normalise histograms etc., after the run
     void finalize() {
 
       normalize(_h_YYYY); // normalize to unity
       scale(_h_ZZZZ, crossSection()/picobarn/sumOfWeights()); // norm to cross section
 
     }
 
     //@}
 
 
   private:
 
 
     /// @name Histograms
     //@{
     Profile1DPtr _h_XXXX;
     Histo1DPtr _h_YYYY;
     CounterPtr _h_ZZZZ;
     //@}
 %(INLINEMETHODS)s
 
   };
 
 
   // The hook for the plugin system
   DECLARE_RIVET_PLUGIN(%(ANANAME)s);
 
 
 }
 ''' % KEYWORDS
     f.write(src)
     f.close()
 
     ANAPLOTFILE = os.path.join(ANAPLOTDIR, ANANAME+".plot")
     logging.debug("Writing plot template to %s" % ANAPLOTFILE)
     f = open(ANAPLOTFILE, "w")
     src = '''\
 # BEGIN PLOT /%(ANANAME)s/d01-x01-y01
 #Title=[Uncomment and insert title for histogram d01-x01-y01 here]
 #XLabel=[Uncomment and insert x-axis label for histogram d01-x01-y01 here]
 #YLabel=[Uncomment and insert y-axis label for histogram d01-x01-y01 here]
 # + any additional plot settings you might like, see make-plots documentation
 # END PLOT
 
 # ... add more histograms as you need them ...
 ''' % KEYWORDS
     f.write(src)
     f.close()
 
     if opts.INLINE:
         sys.exit(0)
     ANAINFOFILE = os.path.join(ANAINFODIR, ANANAME+".info")
     logging.debug("Writing info template to %s" % ANAINFOFILE)
     f = open(ANAINFOFILE, "w")
     src = """\
 Name: %(ANANAME)s
 Year: %(ANAYEAR)s
 Summary: <Insert short %(ANANAME)s description>
 Experiment: %(ANAEXPT)s
 Collider: %(ANACOLLIDER)s
 %(ANAREFREPO)sID: %(ANAINSPIREID)s
 Status: UNVALIDATED
 Authors:
  - Your Name <your@email.address>
 #References:
 #- '<Example: Eur.Phys.J. C76 (2016) no.7, 392>'
 #- '<Example: DOI:10.1140/epjc/s10052-016-4184-8>'
 #- '<Example: arXiv:1605.03814>'
 RunInfo: <Describe event types, cuts, and other general generator config tips.>
 NumEvents: 1000000
 NeedCrossSection: no
 #Beams: <Insert beam pair(s), e.g. [p+, p+] or [[p-, e-], [p-, e+]]>
 #Energies: <Run energies or beam energy pairs in GeV, e.g. [1960] or [[8.0, 3.5]] or [630, 1800]. Order pairs to match "Beams">
 Description:
   '<A fairly long description, including what is measured
   and if possible what it is useful for in terms of MC validation
-  and tuning. Use LaTeX for maths like $\pT > 50\;\GeV$.>
+  and tuning. Use LaTeX for maths like $\pT > 50\;\GeV$.>'
 BibKey: %(ANABIBKEY)s
 BibTeX: '%(ANABIBTEX)s'
 ToDo:
  - Implement the analysis, test it, remove this ToDo, and mark as VALIDATED :-)
 
 """ % KEYWORDS
     f.write(src)
     f.close()
 
     logging.info("Use e.g. 'rivet-buildplugin Rivet%s.so %s.cc' to compile the plugin" % (ANANAME, ANANAME))
diff --git a/configure.ac b/configure.ac
--- a/configure.ac
+++ b/configure.ac
@@ -1,364 +1,313 @@
 ## Process this file with autoconf to produce a configure script.
 
 AC_PREREQ(2.59)
 AC_INIT([Rivet],[trunk],[rivet@projects.hepforge.org],[Rivet])
 
 ## Check and block installation into the src/build dir
 if test "$prefix" = "$PWD"; then
   AC_MSG_ERROR([Installation into the build directory is not supported: use a different --prefix argument])
 fi
 ## Force default prefix to have a path value rather than NONE
 if test "$prefix" = "NONE"; then
    prefix=/usr/local
 fi
 
 AC_CONFIG_SRCDIR([src/Core/Analysis.cc])
 AC_CONFIG_HEADERS([include/Rivet/Config/DummyConfig.hh include/Rivet/Config/RivetConfig.hh include/Rivet/Config/BuildOptions.hh])
 AM_INIT_AUTOMAKE([dist-bzip2 -Wall])
 m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])])
 m4_ifdef([AM_PROG_AR], [AM_PROG_AR])
 AC_CONFIG_MACRO_DIR([m4])
 AC_SUBST(LT_OBJDIR)
 
 ## Package-specific #defines
 AC_DEFINE_UNQUOTED(RIVET_VERSION, "$PACKAGE_VERSION", "Rivet version string")
 AC_DEFINE_UNQUOTED(RIVET_NAME, "$PACKAGE_NAME", "Rivet name string")
 AC_DEFINE_UNQUOTED(RIVET_STRING, "$PACKAGE_STRING", "Rivet name and version string")
 AC_DEFINE_UNQUOTED(RIVET_TARNAME, "$PACKAGE_TARNAME", "Rivet short name string")
 AC_DEFINE_UNQUOTED(RIVET_BUGREPORT, "$PACKAGE_BUGREPORT", "Rivet contact email address")
 
 ## OS X
 AC_CEDAR_OSX
 
 ## Work out the LCG platform tag
 AC_LCG_TAG
 
 ## Set default compiler flags
 if test "x$CXXFLAGS" == "x"; then CXXFLAGS="-O2"; fi
 
 ## Compiler setup
 AC_LANG(C++)
 AC_PROG_CXX
 AX_CXX_COMPILE_STDCXX([11], [noext], [mandatory])
 
 ## Store and propagate the compiler identity and flags
 RIVETCXX="$CXX"
 AC_SUBST(RIVETCXX)
 RIVETCXXFLAGS="$CXXFLAGS"
 AC_SUBST(RIVETCXXFLAGS)
 
 ## Checks for programs.
 AC_PROG_INSTALL
 AC_PROG_LN_S
 AC_DISABLE_STATIC
 AC_LIBTOOL_DLOPEN
 AC_PROG_LIBTOOL
 
 
 ## YODA histogramming library
 # TODO: we could download, configure, and install YODA automatically... but that is NASTY
 AC_CEDAR_LIBRARYANDHEADERS([YODA], , , [AC_MSG_ERROR([YODA is required])])
 YODABINPATH=$YODALIBPATH/../bin
 AC_SUBST(YODABINPATH)
 AC_PATH_PROG(YODACONFIG, yoda-config, [], [$YODALIBPATH/../bin:$PATH])
 YODA_PYTHONPATH=""
 if test -f "$YODACONFIG"; then
   AC_MSG_CHECKING([YODA version using yoda-config])
   YODA_VERSION=`$YODACONFIG --version`
   AC_MSG_RESULT([$YODA_VERSION])
   YODA_VERSION1=[`echo $YODA_VERSION | cut -d. -f1 | sed -e 's/\([0-9]*\).*/\1/g'`]
   YODA_VERSION2=[`echo $YODA_VERSION | cut -d. -f2 | sed -e 's/\([0-9]*\).*/\1/g'`]
   YODA_VERSION3=[`echo $YODA_VERSION | cut -d. -f3 | sed -e 's/\([0-9]*\).*/\1/g'`]
   let YODA_VERSION_INT=YODA_VERSION1*10000+YODA_VERSION2*100+YODA_VERSION3
   if test $YODA_VERSION_INT -lt 10500; then
     AC_MSG_ERROR([YODA version isn't sufficient: at least version 1.5.0 required])
   fi
   AC_MSG_CHECKING([YODA Python path using yoda-config])
   YODA_PYTHONPATH=`$YODACONFIG --pythonpath`
   AC_MSG_RESULT([$YODA_PYTHONPATH])
 fi
 AC_SUBST(YODA_PYTHONPATH)
 
 
 ## HepMC event record library
 AC_CEDAR_LIBRARYANDHEADERS([HepMC], , , [AC_MSG_ERROR([HepMC is required])])
 oldCPPFLAGS=$CPPFLAGS
 CPPFLAGS="$CPPFLAGS -I$HEPMCINCPATH"
 if test -e "$HEPMCINCPATH/HepMC/HepMCDefs.h"; then
   AC_LANG_CONFTEST([AC_LANG_SOURCE([#include <iostream>
 #include "HepMC/HepMCDefs.h"
 int main() { std::cout << HEPMC_VERSION << std::endl; return 0; }])])
 else
   AC_LANG_CONFTEST([AC_LANG_SOURCE([#include <iostream>
 #include "HepMC/defs.h"
 int main() { std::cout << VERSION << std::endl; return 0; }])])
 fi
 if test -f conftest.cc; then
   $CXX $CPPFLAGS conftest.cc -o conftest 2>&1 1>&5
 elif test -f conftest.C; then
   $CXX $CPPFLAGS conftest.C -o conftest 2>&1 1>&5
 else
   $CXX $CPPFLAGS conftest.cpp -o conftest 2>&1 1>&5
 fi
 hepmc_version=`./conftest`
 if test x$hepmc_version != x; then
   let hepmc_major=`echo "$hepmc_version" | cut -d. -f1`
   let hepmc_minor=`echo "$hepmc_version" | cut -d. -f2`
 fi
 rm -f conftest conftest.cpp conftest.cc conftest.C
 HEPMC_VERSION=$hepmc_major$hepmc_minor
 AC_MSG_NOTICE([HepMC version is $hepmc_version -> $HEPMC_VERSION])
 AC_SUBST(HEPMC_VERSION)
 CPPFLAGS=$oldCPPFLAGS
 
 
 ## FastJet clustering library
 AC_CEDAR_LIBRARYANDHEADERS([fastjet], , , [AC_MSG_ERROR([FastJet is required])])
 AC_PATH_PROG(FJCONFIG, fastjet-config, [], $FASTJETPATH/bin:$PATH)
 if test -f "$FJCONFIG"; then
   AC_MSG_CHECKING([FastJet version using fastjet-config])
   fjversion=`$FJCONFIG --version`
   AC_MSG_RESULT([$fjversion])
   fjmajor=$(echo $fjversion | cut -f1 -d.)
   fjminor=$(echo $fjversion | cut -f2 -d.)
   fjmicro=$(echo $fjversion | cut -f3 -d.)
   if test "$fjmajor" -lt 3; then
     AC_MSG_ERROR([FastJet version 3.0.0 or later is required])
   fi
   FASTJETCONFIGLIBADD="$($FJCONFIG --plugins --shared --libs)"
 else
   FASTJETCONFIGLIBADD="-L$FASTJETLIBPATH -l$FASTJETLIBNAME"
   FASTJETCONFIGLIBADD="$FASTJETCONFIGLIBADD -lSISConePlugin -lsiscone -lsiscone_spherical"
   FASTJETCONFIGLIBADD="$FASTJETCONFIGLIBADD -lCDFConesPlugin -lD0RunIIConePlugin -lNestedDefsPlugin"
   FASTJETCONFIGLIBADD="$FASTJETCONFIGLIBADD -lTrackJetPlugin -lATLASConePlugin -lCMSIterativeConePlugin"
   FASTJETCONFIGLIBADD="$FASTJETCONFIGLIBADD -lEECambridgePlugin -lJadePlugin"
 fi;
 AC_MSG_NOTICE([FastJet LIBADD = $FASTJETCONFIGLIBADD])
 AC_SUBST(FASTJETCONFIGLIBADD)
 # Check for FastJet headers that require the --enable-all(cxx)plugins option
 FASTJET_ERRMSG="Required FastJet plugin headers were not found: did you build FastJet with the --enable-allcxxplugins option?"
 oldCPPFLAGS=$CPPFLAGS
 CPPFLAGS="$CPPFLAGS -I$FASTJETINCPATH"
 AC_CHECK_HEADER([fastjet/D0RunIIConePlugin.hh], [], [AC_MSG_ERROR([$FASTJET_ERRMSG])])
 AC_CHECK_HEADER([fastjet/TrackJetPlugin.hh], [], [AC_MSG_ERROR([$FASTJET_ERRMSG])])
 CPPFLAGS=$oldCPPFLAGS
 
 
 ## GNU Scientific Library
 AC_SEARCH_GSL
 AC_CEDAR_HEADERS([gsl], , , [AC_MSG_ERROR([GSL (GNU Scientific Library) is required])])
 oldCPPFLAGS=$CPPFLAGS
 CPPFLAGS="$CPPFLAGS -I$GSLINCPATH"
 AC_CHECK_HEADER([gsl/gsl_vector.h], [], [AC_MSG_ERROR([GSL vectors not found.])])
 CPPFLAGS=$oldCPPFLAGS
 
 
-dnl ## Boost utility library
-dnl BOOST_REQUIRE([1.55.0])
-dnl BOOST_FOREACH
-dnl BOOST_SMART_PTR
-dnl BOOST_FIND_HEADER([boost/lexical_cast.hpp])
-dnl BOOST_FIND_HEADER([boost/range.hpp])
-dnl BOOST_FIND_HEADER([boost/assign.hpp])
-dnl BOOST_BUG_IN_FOREACH
-
-
 ## Disable build/install of standard analyses
 AC_ARG_ENABLE([analyses],
   [AC_HELP_STRING(--disable-analyses, [don't try to build or install standard analyses])],
   [], [enable_analyses=yes])
 if test x$enable_analyses != xyes; then
    AC_MSG_WARN([Not building standard Rivet analyses, by request])
 fi
 AM_CONDITIONAL(ENABLE_ANALYSES, [test x$enable_analyses = xyes])
 
 
-## Enable build/install of unvalidated analyses
-AC_ARG_ENABLE([unvalidated],
-  [AC_HELP_STRING(--enable-unvalidated, [does nothing, for backward compatibility])],
-  [], [enable_unvalidated=yes])
-dnl AC_ARG_ENABLE([unvalidated],
-dnl   [AC_HELP_STRING(--enable-unvalidated, [build and install unvalidated analyses])],
-dnl   [], [enable_unvalidated=yes])
-dnl if test x$enable_unvalidated = xyes; then
-dnl    AC_MSG_WARN([Building unvalidated Rivet analyses, by request])
-dnl else
-dnl    AC_MSG_NOTICE([Not building unvalidated Rivet analyses])
-dnl fi
-dnl AM_CONDITIONAL(ENABLE_UNVALIDATED, [test x$enable_unvalidated = xyes])
-
-
-## Disable build/install of validated-but-preliminary analyses
-AC_ARG_ENABLE([preliminary],
-  [AC_HELP_STRING(--disable-preliminary, [does nothing, for backward compatibility])],
-  [], [enable_preliminary=yes])
-dnl AC_ARG_ENABLE([preliminary],
-dnl   [AC_HELP_STRING(--disable-preliminary, [build and install validated-but-preliminary analyses])],
-dnl   [], [enable_preliminary=yes])
-dnl if test x$enable_preliminary = xyes; then
-dnl    AC_MSG_NOTICE([Building preliminary Rivet analyses])
-dnl else
-dnl    AC_MSG_NOTICE([Not building preliminary Rivet analyses, by request])
-dnl fi
-dnl AM_CONDITIONAL(ENABLE_PRELIMINARY, [test x$enable_preliminary = xyes])
-
-
-## Disable build/install of now-obsolete preliminary analyses
-AC_ARG_ENABLE([obsolete],
-  [AC_HELP_STRING(--disable-obsolete, [does nothing, for backward compatibility])],
-  [], [enable_obsolete=yes])
-dnl AC_ARG_ENABLE([obsolete],
-dnl   [AC_HELP_STRING(--disable-obsolete, [build and install now-obsolete analyses])],
-dnl   [], [enable_obsolete=$enable_preliminary])
-dnl if test x$enable_obsolete = xyes; then
-dnl    AC_MSG_NOTICE([Building obsolete Rivet analyses])
-dnl else
-dnl    AC_MSG_NOTICE([Not building obsolete Rivet analyses, by request])
-dnl fi
-dnl AM_CONDITIONAL(ENABLE_OBSOLETE, [test x$enable_obsolete = xyes])
-
-
 ## Build LaTeX docs if possible...
 AC_PATH_PROG(PDFLATEX, pdflatex)
 AM_CONDITIONAL(WITH_PDFLATEX, [test x$PDFLATEX != x])
 ## ... unless told otherwise!
 AC_ARG_ENABLE([pdfmanual],
   [AC_HELP_STRING(--enable-pdfmanual, [build and install the manual])],
   [], [enable_pdfmanual=no])
 if test x$enable_pdfmanual = xyes; then
    AC_MSG_WARN([Building Rivet manual, by request])
 fi
 AM_CONDITIONAL(ENABLE_PDFMANUAL, [test x$enable_pdfmanual = xyes])
 
 ## Build Doxygen documentation if possible
 AC_ARG_ENABLE([doxygen],
   [AC_HELP_STRING(--disable-doxygen, [don't try to make Doxygen documentation])],
   [], [enable_doxygen=yes])
 if test x$enable_doxygen = xyes; then
    AC_PATH_PROG(DOXYGEN, doxygen)
 fi
 AM_CONDITIONAL(WITH_DOXYGEN, [test x$DOXYGEN != x])
 
 ## Build asciidoc docs if possible
 AC_PATH_PROG(ASCIIDOC, asciidoc)
 AM_CONDITIONAL(WITH_ASCIIDOC, [test x$ASCIIDOC != x])
 
 
 ## Python extension
 AC_ARG_ENABLE(pyext, [AC_HELP_STRING(--disable-pyext,
   [don't build Python module (default=build)])],
   [], [enable_pyext=yes])
 ## Basic Python checks
 if test x$enable_pyext == xyes; then
   AX_PYTHON_DEVEL([>= '2.6'])
   AC_SUBST(PYTHON_VERSION)
   RIVET_PYTHONPATH=`$PYTHON -c "import distutils.sysconfig; print distutils.sysconfig.get_python_lib(prefix='$prefix', plat_specific=True);"`
   AC_SUBST(RIVET_PYTHONPATH)
   if test -z "$PYTHON"; then
     AC_MSG_ERROR([Can't build Python extension since python can't be found])
     enable_pyext=no
   fi
   if test -z "$PYTHON_CPPFLAGS"; then
     AC_MSG_ERROR([Can't build Python extension since Python.h header file cannot be found])
     enable_pyext=no
   fi
 fi
-dnl if test x$enable_pyext == xyes; then
-dnl   AC_MSG_NOTICE([All Python build checks successful: 'yoda' Python extension will be built])
-dnl fi
 AM_CONDITIONAL(ENABLE_PYEXT, [test x$enable_pyext == xyes])
 
 
 ## Cython checks
 if test x$enable_pyext == xyes; then
-  AM_CHECK_CYTHON([0.18], [:], [:])
+  AM_CHECK_CYTHON([0.23.5], [:], [:])
   if test x$CYTHON_FOUND = xyes; then
-    AC_MSG_NOTICE([Cython >= 0.18 found: Python extension source can be rebuilt (for developers)])
-    cython_compiler=$CXX
+    AC_MSG_NOTICE([Cython >= 0.23.5 found: Python extension source can be rebuilt (for developers)])
   fi
+
+  AC_CHECK_FILE([pyext/rivet/core.cpp],
+                [],
+                [if test "x$CYTHON_FOUND" != "xyes"; then
+                  AC_MSG_ERROR([Cython is required for --enable-pyext, no pre-built core.cpp was found.])
+                fi])
+
+  cython_compiler=$CXX
   ## Set extra Python extension build flags (to cope with Cython output code oddities)
   PYEXT_CXXFLAGS="$CXXFLAGS"
   AC_CEDAR_CHECKCXXFLAG([-Wno-unused-but-set-variable], [PYEXT_CXXFLAGS="$PYEXT_CXXFLAGS -Wno-unused-but-set-variable"])
   AC_CEDAR_CHECKCXXFLAG([-Wno-sign-compare], [PYEXT_CXXFLAGS="$PYEXT_CXXFLAGS -Wno-sign-compare"])
   AC_SUBST(PYEXT_CXXFLAGS)
   AC_MSG_NOTICE([All Python build checks successful: 'rivet' Python extension will be built])
 fi
 AM_CONDITIONAL(WITH_CYTHON, [test x$CYTHON_FOUND = xyes])
 
 
 ## Set default build flags
 AM_CPPFLAGS="-I\$(top_srcdir)/include -I\$(top_builddir)/include"
 #AM_CPPFLAGS="$AM_CPPFLAGS -I\$(top_srcdir)/include/eigen3"
 AM_CPPFLAGS="$AM_CPPFLAGS \$(GSL_CPPFLAGS)"
 dnl AM_CPPFLAGS="$AM_CPPFLAGS \$(BOOST_CPPFLAGS)"
 AM_CPPFLAGS="$AM_CPPFLAGS -I\$(YODAINCPATH)"
 AM_CPPFLAGS="$AM_CPPFLAGS -I\$(HEPMCINCPATH)"
 AM_CPPFLAGS="$AM_CPPFLAGS -I\$(FASTJETINCPATH)"
 AC_CEDAR_CHECKCXXFLAG([-pedantic], [AM_CXXFLAGS="$AM_CXXFLAGS -pedantic"])
 AC_CEDAR_CHECKCXXFLAG([-Wall], [AM_CXXFLAGS="$AM_CXXFLAGS -Wall"])
 AC_CEDAR_CHECKCXXFLAG([-Wno-long-long], [AM_CXXFLAGS="$AM_CXXFLAGS -Wno-long-long"])
 AC_CEDAR_CHECKCXXFLAG([-Wno-format], [AM_CXXFLAGS="$AM_CXXFLAGS -Wno-format"])
 dnl AC_CEDAR_CHECKCXXFLAG([-Wno-unused-variable], [AM_CXXFLAGS="$AM_CXXFLAGS -Wno-unused-variable"])
 AC_CEDAR_CHECKCXXFLAG([-Werror=uninitialized], [AM_CXXFLAGS="$AM_CXXFLAGS -Werror=uninitialized"])
 AC_CEDAR_CHECKCXXFLAG([-Werror=delete-non-virtual-dtor], [AM_CXXFLAGS="$AM_CXXFLAGS -Werror=delete-non-virtual-dtor"])
 
 
 ## Debug flag (default=-DNDEBUG, enabled=-g)
 AC_ARG_ENABLE([debug], [AC_HELP_STRING(--enable-debug,
   [build with debugging symbols  @<:@default=no@:>@])], [], [enable_debug=no])
 if test x$enable_debug == xyes; then
   AM_CXXFLAGS="$AM_CXXFLAGS -g"
 fi
 
 
 ## Extra warnings flag (default=none)
 AC_ARG_ENABLE([extra-warnings], [AC_HELP_STRING(--enable-extra-warnings,
   [build with extra compiler warnings (recommended for developers)  @<:@default=no@:>@])], [], [enable_extra_warnings=no])
 if test x$enable_extra_warnings == xyes; then
    AC_CEDAR_CHECKCXXFLAG([-Wextra], [AM_CXXFLAGS="$AM_CXXFLAGS -Wextra "])
 fi
 
 
 AC_SUBST(AM_CPPFLAGS)
 AC_SUBST(AM_CXXFLAGS)
 
 AC_EMPTY_SUBST
 AC_CONFIG_FILES(include/Makefile include/Rivet/Makefile)
 AC_CONFIG_FILES(src/Makefile)
 AC_CONFIG_FILES(src/Core/Makefile src/Core/yamlcpp/Makefile)
 AC_CONFIG_FILES(src/Tools/Makefile)
 AC_CONFIG_FILES(src/Projections/Makefile)
 AC_CONFIG_FILES(src/Analyses/Makefile)
 AC_CONFIG_FILES(test/Makefile)
 AC_CONFIG_FILES(pyext/Makefile pyext/rivet/Makefile pyext/setup.py)
 AC_CONFIG_FILES(data/Makefile data/refdata/Makefile data/anainfo/Makefile data/plotinfo/Makefile data/texmf/Makefile)
 AC_CONFIG_FILES(doc/Makefile)
 AC_CONFIG_FILES(doc/rivetversion.sty doc/diffanas)
 AC_CONFIG_FILES(bin/Makefile bin/rivet-config bin/rivet-buildplugin)
 AC_CONFIG_FILES(Makefile Doxyfile)
 AC_CONFIG_FILES(rivetenv.sh rivetenv.csh rivet.pc)
 
 AC_OUTPUT
 
 if test x$enable_pyrivet == xyes; then
    cat <<EOF
 
 ************************************************************
 RIVET CONFIGURED!
 
 Now build and install (to the $prefix tree) with e.g.
 make -j2 && make -j2 install
 
 To use Rivet, we recommend reading HepMC files from a file
 or pipe (the latter may be made with mkfifo) using the
 'rivet' executable.
 
 For a more pleasant command line experience, you can include
 the data/rivet-completion file into your .bashrc file,
 or your bash_completion.d directory if you have one.
 
 The rivetenv.*sh files will not be installed, but can help you
 to set up a Rivet runtime environment in future.
 ************************************************************
 EOF
 fi
diff --git a/data/anainfo/ALICE_2010_S8624100.info b/data/anainfo/ALICE_2010_S8624100.info
--- a/data/anainfo/ALICE_2010_S8624100.info
+++ b/data/anainfo/ALICE_2010_S8624100.info
@@ -1,43 +1,43 @@
 Name: ALICE_2010_S8624100
 Year: 2010
-Summary: Charged particle multiplicities at 0.9 and 2.36\;TeV in three different pseudorapidity intervals.
+Summary: Charged particle multiplicities at 0.9 and 2.36\;TeV in three different pseudorapidity intervals
 Experiment: ALICE
 Collider: LHC
 SpiresID: 8624100
 InspireID: 852450
 Status: VALIDATED
 Authors:
  - Holger Schulz <holger.schulz@physik.hu-berlin.de>
  - Jan Fiete Grosse-Oetringhaus@cern.ch <Jan.Fiete.Grosse-Oetringhaus@cern.ch>
 References:
  - Eur.Phys.J.C68:89-108,2010
  - arXiv:1004.3034 [hep-ex]
 RunInfo:
   QCD and diffractive events at $\sqrt{s} = 0.9\;\TeV$ and $\sqrt{s} = 2.36\;\TeV$
 NumEvents: 1000000
 Beams: [p+, p+]
 Energies: [900, 2360]
 Description:
   'This is an ALICE analysis where charged particle multiplicities (including
    the zero bin) have been measured in three different pseudorapidity intervals
    ($|\eta|<0.5; |\eta|<1.0; |\eta|<1.3$. Only the INEL distributions have
    been considered here, i.e. this analysis can only be meaningfully compared to
    PYTHIA 6 with diffractive processes disabled. The data were taken at 900 and
    2360\;GeV.'
 BibKey: Aamodt:2010ft
 BibTeX: '@Article{Aamodt:2010ft,
      author    = "Aamodt, K. and others",
      collaboration = "ALICE",
      title     = "{Charged-particle multiplicity measurement in proton-proton
                   collisions at $\sqrt{s} = 0.9$ and 2.36 TeV with ALICE at
                   LHC}",
      journal   = "Eur. Phys. J.",
      volume    = "C68",
      year      = "2010",
      pages     = "89-108",
      eprint    = "1004.3034",
      archivePrefix = "arXiv",
      primaryClass  =  "hep-ex",
      doi       = "10.1140/epjc/s10052-010-1339-x",
      SLACcitation  = "%%CITATION = 1004.3034;%%"
 }'
diff --git a/data/anainfo/ALICE_2010_S8625980.info b/data/anainfo/ALICE_2010_S8625980.info
--- a/data/anainfo/ALICE_2010_S8625980.info
+++ b/data/anainfo/ALICE_2010_S8625980.info
@@ -1,38 +1,38 @@
 Name: ALICE_2010_S8625980
 Year: 2010
-Summary: Pseudorapidities at three energies, charged multiplicity at 7 TeV.
+Summary: Pseudorapidities at three energies, charged multiplicity at 7 TeV
 Experiment: ALICE
 Collider: LHC
 SpiresID: 8625980
 InspireID: 852264
 Status: VALIDATED
 Authors:
  - Holger Schulz <holger.schulz@physik.hu-berlin.de>
  - Jan Fiete Grosse-Oetringhaus@cern.ch <Jan.Fiete.Grosse-Oetringhaus@cern.ch>
 References:
  - Eur.Phys.J. C68 (2010) 345-354
  - arXiv:1004.3514 [hep-ex]
 RunInfo:
   Diffractive events need to be enabled.
 NumEvents: 1000000
 Beams: [p+, p+]
 Energies: [900, 2360, 7000]
 Description:
   'This is an ALICE publication with pseudorapities for 0.9, 2.36 and $7\;\TeV$
   and the charged multiplicity at $7\;\TeV$. The analysis requires at least on
   charged particle in the event. Only the INEL distributions are considered here'
 BibKey: Aamodt:2010pp
 BibTeX: '@article{Aamodt:2010pp,
       author         = "Aamodt, K. and others",
       title          = "{Charged-particle multiplicity measurement in
                         proton-proton collisions at $\sqrt{s} = 7$ TeV with ALICE at LHC}",
       collaboration  = "ALICE",
       journal        = "Eur.Phys.J.",
       volume         = "C68",
       pages          = "345-354",
       doi            = "10.1140/epjc/s10052-010-1350-2",
       year           = "2010",
       eprint         = "1004.3514",
       archivePrefix  = "arXiv",
       primaryClass   = "hep-ex",
 }'
diff --git a/data/anainfo/ALICE_2011_S8945144.info b/data/anainfo/ALICE_2011_S8945144.info
--- a/data/anainfo/ALICE_2011_S8945144.info
+++ b/data/anainfo/ALICE_2011_S8945144.info
@@ -1,41 +1,41 @@
 Name: ALICE_2011_S8945144
 Year: 2011
-Summary: Tranverse momentum spectra of pions, kaons and protons in pp collisions at 0.9 TeV
+Summary: Transverse momentum spectra of pions, kaons and protons in pp collisions at 0.9 TeV
 Experiment: ALICE
 Collider: LHC
 SpiresID: 8945144
 InspireID: 885104
 Status: VALIDATED
 Authors:
  - Pablo Bueno Gomez <UO189399@uniovi.es>
  - Eva Sicking <esicking@cern.ch>
 References:
  - Eur.Phys.J.C71:1655,2011.
  - arXiv:1101.4110 [hep-ex]
 RunInfo:
   Diffractive events need to be enabled.
 NumEvents: 1000000
 Beams: [p+, p+]
 Energies: [900]
 PtCuts:
 Description:
   'Obtaining the transverse momentum spectra of pions, kaons and protons in $pp$ collisions at
    $\sqrt{s} = 0.9$ TeV with ALICE at the LHC. Mean transverse momentum as a function of the
    mass of the emitted particle is also included.'
 BibKey: Aamodt:2011zj
 BibTeX: '@article{Aamodt:2011zj,
       author         = "Aamodt, K. and others",
       title          = "{Production of pions, kaons and protons in $pp$
                         collisions at $\sqrt{s}= 900$ GeV with ALICE at the LHC}",
       collaboration  = "ALICE",
       journal        = "Eur.Phys.J.",
       volume         = "C71",
       pages          = "1655",
       doi            = "10.1140/epjc/s10052-011-1655-9",
       year           = "2011",
       eprint         = "1101.4110",
       archivePrefix  = "arXiv",
       primaryClass   = "hep-ex",
       reportNumber   = "CERN-PH-EP-2010-085",
       SLACcitation   = "%%CITATION = ARXIV:1101.4110;%%",
 }'
diff --git a/data/anainfo/ALICE_2012_I1116147.info b/data/anainfo/ALICE_2012_I1116147.info
new file mode 100644
--- /dev/null
+++ b/data/anainfo/ALICE_2012_I1116147.info
@@ -0,0 +1,38 @@
+Name: ALICE_2012_I1116147
+Year: 2012
+Summary: pT of neutral pions and $\eta$ mesons in $pp$ collisions at $7\,$TeV and $0.9\,$TeV
+Experiment: ALICE
+Collider: LHC
+InspireID: 1116147
+Status: VALIDATED
+Authors:
+  - Hendrik Poppenborg hendrik.poppenborg@cern.ch
+References:
+  - "Phys.Lett. B717 (2012) 162-172"
+  - "doi:10.1016/j.physletb.2012.09.015"
+  - "arXiv:hep-ex/1205.5724"
+RunInfo: Generic $pp$ collision events.
+Beams: [p+, p+]
+Energies: [[900], [7000]]
+Description:
+  Transverse momentum spectra of neutral pions and $\eta$ mesons and the ratio $\pi^0/\eta$,
+  obtained at mid-rapidity in pp collisions at $\sqrt{s} = 7\,$TeV with ALICE at the LHC.
+  The transverse momentum spectrum of neutral pions is also given for $\sqrt{s} = 0.9\,$TeV.
+BibKey: Abelev:2012cn
+BibTeX: '@article{Abelev:2012cn,
+      author         = "Abelev, B. and others",
+      title          = "{Neutral pion and $\eta$ meson production in
+                        proton-proton collisions at $\sqrt{s}=0.9$ TeV and
+                        $\sqrt{s}=7$ TeV}",
+      collaboration  = "ALICE",
+      journal        = "Phys. Lett.",
+      volume         = "B717",
+      year           = "2012",
+      pages          = "162-172",
+      doi            = "10.1016/j.physletb.2012.09.015",
+      eprint         = "1205.5724",
+      archivePrefix  = "arXiv",
+      primaryClass   = "hep-ex",
+      reportNumber   = "CERN-PH-EP-2012-001",
+      SLACcitation   = "%%CITATION = ARXIV:1205.5724;%%"
+}'
diff --git a/data/anainfo/ALICE_2015_I1357424.info b/data/anainfo/ALICE_2015_I1357424.info
--- a/data/anainfo/ALICE_2015_I1357424.info
+++ b/data/anainfo/ALICE_2015_I1357424.info
@@ -1,39 +1,37 @@
 Name: ALICE_2015_I1357424
 Year: 2015
-Summary: Tranverse momentum spectra of pions, kaons and protons in pp collisions at 7 TeV
+Summary: Transverse momentum spectra of pions, kaons and protons in pp collisions at 7 TeV
 Experiment: ALICE
 Collider: LHC
 InspireID: 1357424
 Status: VALIDATED
 Authors:
  - Andreas Morsch andreas.morsch@cern.ch
 #References:
 # - <Example: Phys.Lett.B639:151-158,2006, Erratum-ibid.B658:285-289,2008>
 # - <Example: doi:10.1016/j.physletb.2006.04.048>
 # - <Example: arXiv:hep-ex/0511054 (plus erratum)>
 RunInfo: Inelastic pp collisions at sqrt(s) = 7 TeV
 NumEvents: 1000000
 NeedCrossSection: no
 Beams: [p+, p+]
 Energies: [7000]
 #PtCuts: <Insert list of kinematic pT cuts in GeV, e.g. [0, 20]>
 #NeedCrossSection: True
 Description:
   'Obtaining the transverse momentum spectra of primary pions, kaons and protons in $pp$ collisions at
    $\sqrt{s} = 7$ TeV with ALICE at the LHC. K/pi and p/pi ratios are also included.'
 BibKey: Adam:2015qaa
 BibTeX: '@article{Adam:2015qaa,
       author         = "Adam, Jaroslav and others",
       title          = "{Measurement of pion, kaon and proton production in
                         proton-proton collisions at $\sqrt{s}=7$ TeV}",
       collaboration  = "ALICE",
       year           = "2015",
       eprint         = "1504.00024",
       archivePrefix  = "arXiv",
       primaryClass   = "nucl-ex",
       reportNumber   = "CERN-PH-EP-2015-068",
       SLACcitation   = "%%CITATION = ARXIV:1504.00024;%%",
 }'
 ToDo:
-
-
diff --git a/data/anainfo/ATLAS_2011_I929691.info b/data/anainfo/ATLAS_2011_I929691.info
new file mode 100644
--- /dev/null
+++ b/data/anainfo/ATLAS_2011_I929691.info
@@ -0,0 +1,45 @@
+Name: ATLAS_2011_I929691
+Year: 2011
+Summary: Jet fragmentation at 7 TeV
+Experiment: ATLAS
+Collider: LHC
+InspireID: 929691
+Status: VALIDATED
+Authors:
+ - Stefan von Buddenbrock <stef.von.b@cern.ch>
+References:
+ - "Eur.Phys.J.C 71 (2011) 1795"
+ - "doi:10.1140/epjc/s10052-011-1795-y"
+ - "arXiv:1109.5816 [hep-ex]"
+RunInfo:
+  Inclusive jet production
+Beams: [p+, p+]
+Energies: [7000]
+PtCuts: [0.5]
+Description:
+   The jet fragmentation function and transverse profile for jets with 25 GeV $< p_\text{T~jet} < 500$ GeV and
+   $|\eta_\text{jet}| < 1.2$ produced in proton-proton collisions with a center-of-mass energy of 7 TeV are presented.
+   The measurement is performed using data with an integrated luminosity of 36 pb${}^{-1}$. Jets are reconstructed and
+   their momentum measured using calorimetric information. The momenta of the charged particle constituents are measured
+   using the tracking system. The distributions corrected for detector effects are compared with various Monte Carlo event
+   generators and generator tunes. Several of these choices show good agreement with the measured fragmentation function.
+   None of these choices reproduce both the transverse profile and fragmentation function over the full kinematic range of
+   the measurement.
+BibKey: Aad:2011sc
+BibTeX: '@article{Aad:2011sc,
+      author         = "Aad, Georges and others",
+      title          = "{Measurement of the jet fragmentation function and
+                        transverse profile in proton-proton collisions at a
+                        center-of-mass energy of 7 TeV with the ATLAS detector}",
+      collaboration  = "ATLAS",
+      journal        = "Eur. Phys. J.",
+      volume         = "C71",
+      year           = "2011",
+      pages          = "1795",
+      doi            = "10.1140/epjc/s10052-011-1795-y",
+      eprint         = "1109.5816",
+      archivePrefix  = "arXiv",
+      primaryClass   = "hep-ex",
+      reportNumber   = "CERN-PH-EP-2011-148",
+      SLACcitation   = "%%CITATION = ARXIV:1109.5816;%%"
+}'
diff --git a/data/anainfo/ATLAS_2012_I1203852.info b/data/anainfo/ATLAS_2012_I1203852.info
--- a/data/anainfo/ATLAS_2012_I1203852.info
+++ b/data/anainfo/ATLAS_2012_I1203852.info
@@ -1,47 +1,47 @@
 Name: ATLAS_2012_I1203852
 Year: 2013
 Summary: Measurement of the $ZZ(*)$ production cross-section in $pp$ collisions at 7 TeV with ATLAS
 Experiment: ATLAS
 Collider: LHC
-SpiresID: 1203852
+InspireID: 1203852
 Status: VALIDATED
 Authors:
  - Oldrich Kepka <oldrich.kepka@cern.ch>
  - Katerina Moudra <katerina.moudra@cern.ch>
 References:
  - arXiv:1211.6096 [hep-ex]
 RunInfo:
   Run with inclusive $Z$ events, with $Z$ decays to 4 leptons or 2 leptons + MET.
 NumEvents: 100k
 Beams: [p+, p+]
 Energies: [7000]
 PtCuts: [20]
 NeedsCrossSection: yes
 Description:
   'Measurement of the fiducial cross section for $ZZ(*)$ production in proton proton collisions
    at a centre-of mass energy of 7 TeV, is presented, using data corresponding to an integrated
    luminosity of 4.6/fb collected by the ATLAS experiment at the Large Hadron Collider. The
    cross-section is measured using processes with two $Z$ bosons decaying to electrons or muons
    or with one $Z$ boson decaying to electrons or muons and a second $Z$ boson decaying to neutrinos.
    The fiducial region contains dressed leptons in restricted $p_T$ and $\eta$ ranges. The
    selection has specific requirements for both production processes. A measurement of the
    normalized fiducial cross-section as a function of $ZZ$ invariant mass, leading $Z$ $p_T$ and
    angle of two leptons coming from the leading $Z$ is also presented for both signal processes.'
 BibKey: Aad:2012awa
 BibTeX: '@article{Aad:2012awa,
       author         = "Aad, Georges and others",
       title          = "{Measurement of $ZZ$ production in $pp$ collisions at
                         $\sqrt{s}=7$ TeV and limits on anomalous $ZZZ$ and
                         $ZZ\gamma$ couplings with the ATLAS detector}",
       collaboration  = "ATLAS Collaboration",
       journal        = "JHEP",
       volume         = "1303",
       pages          = "128",
       doi            = "10.1007/JHEP03(2013)128",
       year           = "2013",
       eprint         = "1211.6096",
       archivePrefix  = "arXiv",
       primaryClass   = "hep-ex",
       reportNumber   = "CERN-PH-EP-2012-318",
       SLACcitation   = "%%CITATION = ARXIV:1211.6096;%%",
 }'
diff --git a/data/anainfo/ATLAS_2013_I1190187.info b/data/anainfo/ATLAS_2013_I1190187.info
--- a/data/anainfo/ATLAS_2013_I1190187.info
+++ b/data/anainfo/ATLAS_2013_I1190187.info
@@ -1,49 +1,49 @@
 Name: ATLAS_2013_I1190187
 Year: 2013
 Summary: Measurement of the $W^+ W^-$ production cross-section at 7 TeV
 Experiment: ATLAS
 Collider: LHC
-SpiresID: 1190187
+InspireID: 1190187
 Status: VALIDATED
 Authors:
  - Oldrich Kepka <oldrich.kepka@cern.ch>
  - Katerina Moudra <katerina.moudra@cern.ch>
 References:
  - arXiv:1210.2979 [hep-ex]
 RunInfo:
   Run with inclusive $W^+ W^-$ events, with $W$ decays to electron + MET,
   muon + MET, or tau + MET.
 NumEvents: 100k
 Beams: [p+, p+]
 Energies: [7000]
 PtCuts: [20]
 NeedsCrossSection: yes
 Description:
   'Measurement of the fiducial cross section for $W^+ W^-$ production in proton proton
    collisions at a centre-of mass energy of 7 TeV, is presented, using data corresponding
    to an integrated luminosity of 4.6/fb collected by the ATLAS experiment at the Large
    Hadron Collider. The cross section is measured in the leptonic decay channels, using
    electron+MET and muon+MET $W$ decays. $W \to \tau$ processes with the tau decaying
    into electron + MET or muon + MET are also included in the measurement. The fiducial
    region contains dressed leptons in restricted $p_T$ and $\eta$ ranges. The selection
    has specific requirements for each production channel. A measurement of the normalized
    fiducial cross section as a function of the leading lepton transverse momentum is also presented.'
 BibKey: ATLAS:2012mec
 BibTeX: '@article{ATLAS:2012mec,
       author         = "Aad, Georges and others",
       title          = "{Measurement of $W^+W^-$ production in $pp$ collisions at
                         $\sqrt{s}$=7  TeV with the ATLAS detector and limits
                         on anomalous $WWZ$ and $WW\gamma$ couplings}",
       collaboration  = "ATLAS Collaboration",
       journal        = "Phys.Rev.",
       number         = "11",
       volume         = "D87",
       pages          = "112001",
       doi            = "10.1103/PhysRevD.87.112001",
       year           = "2013",
       eprint         = "1210.2979",
       archivePrefix  = "arXiv",
       primaryClass   = "hep-ex",
       reportNumber   = "CERN-PH-EP-2012-242",
       SLACcitation   = "%%CITATION = ARXIV:1210.2979;%%",
 }'
diff --git a/data/anainfo/ATLAS_2013_I1217863_W.info b/data/anainfo/ATLAS_2013_I1217863_W.info
--- a/data/anainfo/ATLAS_2013_I1217863_W.info
+++ b/data/anainfo/ATLAS_2013_I1217863_W.info
@@ -1,45 +1,45 @@
 Name: ATLAS_2013_I1217863_W
 Year: 2013
 Summary: W + gamma production at 7 TeV
 Experiment: ATLAS
 Collider: LHC
-InspireID: 121786
+InspireID: 1217863
 Status: VALIDATED
 Authors:
  - Chritian Gutschow <chris.g@cern.ch>
 References:
  - Phys.Rev. D87 (2013) 112003
  - doi:10.1103/PhysRevD.87.112003
  - arXiv:1302.1283 [hep-ex]
 RunInfo:
   W+gamma in the electron channel
 NumEvents: 1000000
 Beams: [p+, p+]
 Energies: [7000]
 PtCuts: [25,15]
 NeedCrossSection: True
 Description:
   Measurements of the differential fiducial cross sections for the production of a W boson in association with a high-energy
   photon are measured using pp collisions at \sqrt{s}=7 TeV. The analysis uses a data sample with an integrated luminosity of 
   4.6 fb-1 collected by the ATLAS detector during the 2011 LHC data-taking period. Events are selected using leptonic decays 
   of the W bosons with the requirement of an associated isolated photon. The default routine will consider the electron decay
   channel of the W boson. Use ATLAS_2013_I1217863_W_EL and ATLAS_2013_I1217863_W_MU to specify the decay channel directly.
 BibKey: Aad:2013iz
 BibTeX: '@article{Aad:2013izg,
       author         = "Aad, Georges and others",
       title          = "{Measurements of Wγ and Zγ production in pp collisions
                         at $\sqrt{s}$=7  TeV with the ATLAS detector at the
                         LHC}",
       collaboration  = "ATLAS Collaboration",
       journal        = "Phys.Rev.",
       number         = "11",
       volume         = "D87",
       pages          = "112003",
       doi            = "10.1103/PhysRevD.87.112003",
       year           = "2013",
       eprint         = "1302.1283",
       archivePrefix  = "arXiv",
       primaryClass   = "hep-ex",
       reportNumber   = "CERN-PH-EP-2012-345",
       SLACcitation   = "%%CITATION = ARXIV:1302.1283;%%",
 }'
diff --git a/data/anainfo/ATLAS_2013_I1217863_W_EL.info b/data/anainfo/ATLAS_2013_I1217863_W_EL.info
--- a/data/anainfo/ATLAS_2013_I1217863_W_EL.info
+++ b/data/anainfo/ATLAS_2013_I1217863_W_EL.info
@@ -1,45 +1,45 @@
 Name: ATLAS_2013_I1217863_W_EL
 Year: 2013
 Summary: W + gamma production at 7 TeV
 Experiment: ATLAS
 Collider: LHC
-InspireID: 121786
+InspireID: 1217863
 Status: VALIDATED
 Authors:
  - Chritian Gutschow <chris.g@cern.ch>
 References:
  - Phys.Rev. D87 (2013) 112003
  - doi:10.1103/PhysRevD.87.112003
  - arXiv:1302.1283 [hep-ex]
 RunInfo:
   W+gamma in the electron channel
 NumEvents: 1000000
 Beams: [p+, p+]
 Energies: [7000]
 PtCuts: [25,15]
 NeedCrossSection: True
 Description:
   Measurements of the differential fiducial cross sections for the production of a W boson in association with a high-energy
   photon are measured using pp collisions at \sqrt{s}=7 TeV. The analysis uses a data sample with an integrated luminosity of 
   4.6 fb-1 collected by the ATLAS detector during the 2011 LHC data-taking period. Events are selected using leptonic decays 
   of the W bosons with the requirement of an associated isolated photon. The default routine will consider the electron decay
   channel of the W boson. Use ATLAS_2013_I1217863_W_EL and ATLAS_2013_I1217863_W_MU to specify the decay channel directly.
 BibKey: Aad:2013iz
 BibTeX: '@article{Aad:2013izg,
       author         = "Aad, Georges and others",
       title          = "{Measurements of Wγ and Zγ production in pp collisions
                         at $\sqrt{s}$=7  TeV with the ATLAS detector at the
                         LHC}",
       collaboration  = "ATLAS Collaboration",
       journal        = "Phys.Rev.",
       number         = "11",
       volume         = "D87",
       pages          = "112003",
       doi            = "10.1103/PhysRevD.87.112003",
       year           = "2013",
       eprint         = "1302.1283",
       archivePrefix  = "arXiv",
       primaryClass   = "hep-ex",
       reportNumber   = "CERN-PH-EP-2012-345",
       SLACcitation   = "%%CITATION = ARXIV:1302.1283;%%",
 }'
diff --git a/data/anainfo/ATLAS_2013_I1217863_W_MU.info b/data/anainfo/ATLAS_2013_I1217863_W_MU.info
--- a/data/anainfo/ATLAS_2013_I1217863_W_MU.info
+++ b/data/anainfo/ATLAS_2013_I1217863_W_MU.info
@@ -1,45 +1,45 @@
 Name: ATLAS_2013_I1217863_W_MU
 Year: 2013
 Summary: W + gamma production at 7 TeV
 Experiment: ATLAS
 Collider: LHC
-InspireID: 121786
+InspireID: 1217863
 Status: VALIDATED
 Authors:
  - Chritian Gutschow <chris.g@cern.ch>
 References:
  - Phys.Rev. D87 (2013) 112003
  - doi:10.1103/PhysRevD.87.112003
  - arXiv:1302.1283 [hep-ex]
 RunInfo:
   W+gamma in the muon channel
 NumEvents: 1000000
 Beams: [p+, p+]
 Energies: [7000]
 PtCuts: [25,15]
 NeedCrossSection: True
 Description:
   Measurements of the differential fiducial cross sections for the production of a W boson in association with a high-energy
   photon are measured using pp collisions at \sqrt{s}=7 TeV. The analysis uses a data sample with an integrated luminosity of 
   4.6 fb-1 collected by the ATLAS detector during the 2011 LHC data-taking period. Events are selected using leptonic decays 
   of the W bosons with the requirement of an associated isolated photon. The default routine will consider the electron decay
   channel of the W boson. Use ATLAS_2013_I1217863_W_EL and ATLAS_2013_I1217863_W_MU to specify the decay channel directly.
 BibKey: Aad:2013iz
 BibTeX: '@article{Aad:2013izg,
       author         = "Aad, Georges and others",
       title          = "{Measurements of Wγ and Zγ production in pp collisions
                         at $\sqrt{s}$=7  TeV with the ATLAS detector at the
                         LHC}",
       collaboration  = "ATLAS Collaboration",
       journal        = "Phys.Rev.",
       number         = "11",
       volume         = "D87",
       pages          = "112003",
       doi            = "10.1103/PhysRevD.87.112003",
       year           = "2013",
       eprint         = "1302.1283",
       archivePrefix  = "arXiv",
       primaryClass   = "hep-ex",
       reportNumber   = "CERN-PH-EP-2012-345",
       SLACcitation   = "%%CITATION = ARXIV:1302.1283;%%",
 }'
diff --git a/data/anainfo/ATLAS_2013_I1217863_Z.info b/data/anainfo/ATLAS_2013_I1217863_Z.info
--- a/data/anainfo/ATLAS_2013_I1217863_Z.info
+++ b/data/anainfo/ATLAS_2013_I1217863_Z.info
@@ -1,45 +1,45 @@
 Name: ATLAS_2013_I1217863_Z
 Year: 2013
 Summary: Z + gamma production at 7 TeV
 Experiment: ATLAS
 Collider: LHC
-InspireID: 121786
+InspireID: 1217863
 Status: VALIDATED
 Authors:
  - Chritian Gutschow <chris.g@cern.ch>
 References:
  - Phys.Rev. D87 (2013) 112003
  - doi:10.1103/PhysRevD.87.112003
  - arXiv:1302.1283 [hep-ex]
 RunInfo:
   Z+gamma in the electron channel
 NumEvents: 1000000
 Beams: [p+, p+]
 Energies: [7000]
 PtCuts: [25,15]
 NeedCrossSection: True
 Description:
   Measurements of the differential fiducial cross sections for the production of a Z boson in association with a high-energy
   photon are measured using pp collisions at \sqrt{s}=7 TeV. The analysis uses a data sample with an integrated luminosity of 
   4.6 fb-1 collected by the ATLAS detector during the 2011 LHC data-taking period. Events are selected using leptonic decays 
   of the Z bosons with the requirement of an associated isolated photon. The default routine will consider the electron decay
   channel of the Z boson. Use ATLAS_2013_I1217863_Z_EL and ATLAS_2013_I1217863_Z_MU to specify the decay channel directly.
 BibKey: Aad:2013iz
 BibTeX: '@article{Aad:2013izg,
       author         = "Aad, Georges and others",
       title          = "{Measurements of Wγ and Zγ production in pp collisions
                         at $\sqrt{s}$=7  TeV with the ATLAS detector at the
                         LHC}",
       collaboration  = "ATLAS Collaboration",
       journal        = "Phys.Rev.",
       number         = "11",
       volume         = "D87",
       pages          = "112003",
       doi            = "10.1103/PhysRevD.87.112003",
       year           = "2013",
       eprint         = "1302.1283",
       archivePrefix  = "arXiv",
       primaryClass   = "hep-ex",
       reportNumber   = "CERN-PH-EP-2012-345",
       SLACcitation   = "%%CITATION = ARXIV:1302.1283;%%",
 }'
diff --git a/data/anainfo/ATLAS_2013_I1217863_Z_EL.info b/data/anainfo/ATLAS_2013_I1217863_Z_EL.info
--- a/data/anainfo/ATLAS_2013_I1217863_Z_EL.info
+++ b/data/anainfo/ATLAS_2013_I1217863_Z_EL.info
@@ -1,45 +1,45 @@
 Name: ATLAS_2013_I1217863_Z_EL
 Year: 2013
 Summary: Z + gamma production at 7 TeV
 Experiment: ATLAS
 Collider: LHC
-InspireID: 121786
+InspireID: 1217863
 Status: VALIDATED
 Authors:
  - Chritian Gutschow <chris.g@cern.ch>
 References:
  - Phys.Rev. D87 (2013) 112003
  - doi:10.1103/PhysRevD.87.112003
  - arXiv:1302.1283 [hep-ex]
 RunInfo:
   Z+gamma in the electron channel
 NumEvents: 1000000
 Beams: [p+, p+]
 Energies: [7000]
 PtCuts: [25,15]
 NeedCrossSection: True
 Description:
   Measurements of the differential fiducial cross sections for the production of a Z boson in association with a high-energy
   photon are measured using pp collisions at \sqrt{s}=7 TeV. The analysis uses a data sample with an integrated luminosity of 
   4.6 fb-1 collected by the ATLAS detector during the 2011 LHC data-taking period. Events are selected using leptonic decays 
   of the Z bosons with the requirement of an associated isolated photon. The default routine will consider the electron decay
   channel of the Z boson. Use ATLAS_2013_I1217863_Z_EL and ATLAS_2013_I1217863_Z_MU to specify the decay channel directly.
 BibKey: Aad:2013iz
 BibTeX: '@article{Aad:2013izg,
       author         = "Aad, Georges and others",
       title          = "{Measurements of Wγ and Zγ production in pp collisions
                         at $\sqrt{s}$=7  TeV with the ATLAS detector at the
                         LHC}",
       collaboration  = "ATLAS Collaboration",
       journal        = "Phys.Rev.",
       number         = "11",
       volume         = "D87",
       pages          = "112003",
       doi            = "10.1103/PhysRevD.87.112003",
       year           = "2013",
       eprint         = "1302.1283",
       archivePrefix  = "arXiv",
       primaryClass   = "hep-ex",
       reportNumber   = "CERN-PH-EP-2012-345",
       SLACcitation   = "%%CITATION = ARXIV:1302.1283;%%",
 }'
diff --git a/data/anainfo/ATLAS_2013_I1217863_Z_MU.info b/data/anainfo/ATLAS_2013_I1217863_Z_MU.info
--- a/data/anainfo/ATLAS_2013_I1217863_Z_MU.info
+++ b/data/anainfo/ATLAS_2013_I1217863_Z_MU.info
@@ -1,45 +1,45 @@
 Name: ATLAS_2013_I1217863_Z_MU
 Year: 2013
 Summary: Z + gamma production at 7 TeV
 Experiment: ATLAS
 Collider: LHC
-InspireID: 121786
+InspireID: 1217863
 Status: VALIDATED
 Authors:
  - Chritian Gutschow <chris.g@cern.ch>
 References:
  - Phys.Rev. D87 (2013) 112003
  - doi:10.1103/PhysRevD.87.112003
  - arXiv:1302.1283 [hep-ex]
 RunInfo:
   Z+gamma in the muon channel
 NumEvents: 1000000
 Beams: [p+, p+]
 Energies: [7000]
 PtCuts: [25,15]
 NeedCrossSection: True
 Description:
   Measurements of the differential fiducial cross sections for the production of a Z boson in association with a high-energy
   photon are measured using pp collisions at \sqrt{s}=7 TeV. The analysis uses a data sample with an integrated luminosity of 
   4.6 fb-1 collected by the ATLAS detector during the 2011 LHC data-taking period. Events are selected using leptonic decays 
   of the Z bosons with the requirement of an associated isolated photon. The default routine will consider the electron decay
   channel of the Z boson. Use ATLAS_2013_I1217863_Z_EL and ATLAS_2013_I1217863_Z_MU to specify the decay channel directly.
 BibKey: Aad:2013iz
 BibTeX: '@article{Aad:2013izg,
       author         = "Aad, Georges and others",
       title          = "{Measurements of Wγ and Zγ production in pp collisions
                         at $\sqrt{s}$=7  TeV with the ATLAS detector at the
                         LHC}",
       collaboration  = "ATLAS Collaboration",
       journal        = "Phys.Rev.",
       number         = "11",
       volume         = "D87",
       pages          = "112003",
       doi            = "10.1103/PhysRevD.87.112003",
       year           = "2013",
       eprint         = "1302.1283",
       archivePrefix  = "arXiv",
       primaryClass   = "hep-ex",
       reportNumber   = "CERN-PH-EP-2012-345",
       SLACcitation   = "%%CITATION = ARXIV:1302.1283;%%",
 }'
diff --git a/data/anainfo/ATLAS_2013_I1243871.info b/data/anainfo/ATLAS_2013_I1243871.info
--- a/data/anainfo/ATLAS_2013_I1243871.info
+++ b/data/anainfo/ATLAS_2013_I1243871.info
@@ -1,32 +1,32 @@
 Name: ATLAS_2013_I1243871
 Year: 2013
 Summary: Measurement of jet shapes in top quark pair events at $\sqrt{s} = 7$ TeV with ATLAS
 Experiment: ATLAS
 Collider: LHC
-SpiresID: 1243871
+InspireID: 1243871
 Status: VALIDATED
 Authors:
  - Javier Llorente <javier.llorente.merino@cern.ch>
 References:
  - arXiv:1307.5749 [hep-ex]
 RunInfo: Top quark pair production in $pp$ collisions at $\sqrt{s} = 7$ TeV
 NumEvents: 1000000
 NeedCrossSection: no
 Beams: [p+, p+]
 Energies: [7000]
 PtCuts: pT(jets) > 30 GeV
 Description:
   'Measurement of jet shapes in top pair events in the ATLAS 7 TeV run. b-jets are shown to
    have a wider energy density distribution than light-quark induced jets.'
 BibKey: Aad:2013fba
 BibTeX: '@article{Aad:2013fba,
       author         = "Aad, Georges and others",
       title          = "{Measurement of jet shapes in top pair events at $\sqrt{s} = 7$ TeV using the ATLAS detector}",
       collaboration  = "ATLAS Collaboration",
       year           = "2013",
       eprint         = "1307.5749",
       archivePrefix  = "arXiv",
       primaryClass   = "hep-ex",
       reportNumber   = "CERN-PH-EP-2013-067",
       SLACcitation   = "%%CITATION = ARXIV:1307.5749;%%",
 }'
diff --git a/data/anainfo/ATLAS_2014_I1268975.info b/data/anainfo/ATLAS_2014_I1268975.info
--- a/data/anainfo/ATLAS_2014_I1268975.info
+++ b/data/anainfo/ATLAS_2014_I1268975.info
@@ -1,59 +1,59 @@
 Name: ATLAS_2014_I1268975
 Year: 2014
 Summary: High-mass dijet cross section
 Experiment: ATLAS
 Collider: LHC
-SpiresID: 1268975
+InspireID: 1268975
 Status: VALIDATED
 Authors:
  - Christopher Meyer <chris.meyer@cern.ch>
 References:
  - arXiv:1312.3524 [hep-ex]
  - JHEP 1405 (2014) 059
 RunInfo:
   QCD jet production with a minimum leading jet pT of 100 GeV and minimum second jet pT of 50 GeV at 7 TeV.
 NeedCrossSection: True
 NumEvents: 10000000
 Beams: [p+, p+]
 Energies: [7000]
 PtCuts: [100,50]
 Description:
   'Double-differential dijet cross sections measured in pp collisions
    at the LHC with a 7 TeV centre-of-mass energy are presented as
    functions of dijet mass and rapidity separation of the two highest-pT
    jets. These measurements are obtained using data corresponding to an
    integrated luminosity of 4.5/fb, recorded by the ATLAS detector in
    2011. The data are corrected for detector effects so that cross
    sections are presented at the particle level. Cross sections are
    measured up to 5 TeV dijet mass using jets reconstructed with the
    anti-kt algorithm for values of the jet radius parameter of 0.4 and
    0.6. The cross sections are compared with next-to-leading-order
    perturbative QCD calculations by NLOJET++ corrected to account for
    non-perturbative effects. Comparisons with POWHEG predictions, using a
    next-to-leading-order matrix element calculation interfaced to a
    parton-shower Monte Carlo simulation, are also shown. Electroweak
    effects are accounted for in both cases. The quantitative comparison
    of data and theoretical predictions obtained using various
    parameterizations of the parton distribution functions is performed
    using a frequentist method. An example setting a lower limit on the
    compositeness scale for a model of contact interactions is presented,
    showing that the unfolded results can be used to constrain
    contributions to dijet production beyond that predicted by the
    Standard Model.'
 BibKey: Aad:2013tea
 BibTeX: '@article{Aad:2013tea,
       author         = "Aad, Georges and others",
       title          = "{Measurement of dijet cross sections in $pp$ collisions
                         at 7 TeV centre-of-mass energy using the ATLAS detector}",
       collaboration  = "ATLAS Collaboration",
       journal        = "JHEP",
       volume         = "1405",
       pages          = "059",
       doi            = "10.1007/JHEP05(2014)059",
       year           = "2014",
       eprint         = "1312.3524",
       archivePrefix  = "arXiv",
       primaryClass   = "hep-ex",
       reportNumber   = "CERN-PH-EP-2013-192",
       SLACcitation   = "%%CITATION = ARXIV:1312.3524;%%",
 }'
diff --git a/data/anainfo/ATLAS_2014_I1282441.info b/data/anainfo/ATLAS_2014_I1282441.info
--- a/data/anainfo/ATLAS_2014_I1282441.info
+++ b/data/anainfo/ATLAS_2014_I1282441.info
@@ -1,40 +1,40 @@
 Name: ATLAS_2014_I1282441
 Year: 2014
 Summary: The differential production cross section of the $\phi(1020)$ meson in $\sqrt{s}=7$ TeV $pp$ collisions measured with the ATLAS detector
 Experiment: ATLAS
 Collider: LHC
-SpiresID: 1282441
+InspireID: 1282441
 Status: VALIDATED
 Authors:
  - Tim Martin <tim.martin@cern.ch>
  - Kiran Joshi <kiran.joshi@cern.ch>
 References:
  - arXiv:1402.6162 [hep-ex]
 RunInfo:
   Run minimum bias events
 NumEvents: 1000k
 Beams: [p+, p+]
 Energies: [7000]
 PtCuts: [0.5, 1.2]
 NeedsCrossSection: yes
 Description:
   'A measurement is presented of the $\phi \rightarrow K^+K^-$ production cross section at $\sqrt{s}$ = 7 TeV using pp collision data
   corresponding to an integrated luminosity of 383 $\mu$b$^{-1}$ collected with the ATLAS experiment at the LHC. Selection of $\phi(1020)$
   mesons is based on the identification of charged kaons by their energy loss in the pixel detector. The differential cross section is
   measured as a function of the transverse momentum, $pT,\phi$, and rapidity, $y_\phi$, of the $\phi(1020)$ meson in the fiducial region
   500 < $pT,\phi$ < 1200 MeV, |$y_\phi$| < 0.8, kaon $pT,K$ > 230 MeV and kaon momentum $p_K$ < 800 MeV. The integrated $\phi(1020)$
   production cross section in this fiducial range is measured to be $\sigma_{\phi \rightarrow K^+K^-}$ = 570 \pm 8 (stat) \pm 66 (syst) \pm 20 (lumi) $\mu$b.'
 BibKey: Aad:2014rca
 BibTeX: '@article{Aad:2014rca,
       author         = "Aad, Georges and others",
       title          = "{The differential production cross section of the
                         $\phi(1020)$ meson in $\sqrt{s}$ = 7 TeV $pp$ collisions
                         measured with the ATLAS detector}",
       collaboration  = "ATLAS Collaboration",
       year           = "2014",
       eprint         = "1402.6162",
       archivePrefix  = "arXiv",
       primaryClass   = "hep-ex",
       reportNumber   = "CERN-PH-EP-2012-269",
       SLACcitation   = "%%CITATION = ARXIV:1402.6162;%%",
 }'
diff --git a/data/anainfo/ATLAS_2014_I1327229.info b/data/anainfo/ATLAS_2014_I1327229.info
--- a/data/anainfo/ATLAS_2014_I1327229.info
+++ b/data/anainfo/ATLAS_2014_I1327229.info
@@ -1,44 +1,44 @@
 Name: ATLAS_2014_I1327229
 Year: 2014
 Summary: Inclusive multilepton search at 8 TeV
 Experiment: ATLAS
 Collider: LHC
 InspireID: 1327229
 Status: VALIDATED
 Authors:
  - Joern Mahlstedt <joern.mahlstedt@cern.ch>
 References:
  - arXiv:1411.2921 [hep-ex]
 RunInfo:
-  any process producing at least 3 leptons (e.g. pair production of doubly-charged Higgs or excited leptons)
+  Any process producing at least 3 leptons (e.g. pair production of doubly-charged Higgs or excited leptons)
 Beams: [p+, p+]
 Energies: [8000]
 PtCuts: [1,5,10,10]
 NeedCrossSection: True
 Description:
   A generic search for anomalous production of events with at least three charged leptons is presented. The data sample consists of
-  $pp$ collisions at $\sqrt{s} = 8$\,TeV collected in 2012 by the ATLAS experiment at the CERN Large Hadron Collider, and corresponds 
-  to an integratedi luminosity of 20.3\,$\text{fb}^{−1}$. Events are required to have at least three selected lepton candidates, at 
-  least two of which must be electrons or muons, while the third may be a hadronically decaying tau. Selected events are categorized 
-  based on their lepton flavour content and signal regions are constructed using several kinematic variables of interest. No significant 
-  deviations from Standard Model predictions are observed. Model-independent upper limits on contributions from beyond the Standard Model 
-  phenomena are provided for each signal region, along with prescription to re-interpret the limits for any model. Constraints are also 
-  placed on models predicting doubly charged Higgs bosons and excited leptons. For doubly charged Higgs bosons decaying to $e\tau$ or 
-  $\muon\tau$, lower limits on the mass are set at 400\,GeV at 95\,\% confidence level. For excited leptons, constraints are provided as 
-  functions of both the mass of the excited state and the compositeness scale $\Lambda$, with the strongest mass constraints arising in 
+  $pp$ collisions at $\sqrt{s} = 8$\,TeV collected in 2012 by the ATLAS experiment at the CERN Large Hadron Collider, and corresponds
+  to an integratedi luminosity of 20.3\,$\text{fb}^{−1}$. Events are required to have at least three selected lepton candidates, at
+  least two of which must be electrons or muons, while the third may be a hadronically decaying tau. Selected events are categorized
+  based on their lepton flavour content and signal regions are constructed using several kinematic variables of interest. No significant
+  deviations from Standard Model predictions are observed. Model-independent upper limits on contributions from beyond the Standard Model
+  phenomena are provided for each signal region, along with prescription to re-interpret the limits for any model. Constraints are also
+  placed on models predicting doubly charged Higgs bosons and excited leptons. For doubly charged Higgs bosons decaying to $e\tau$ or
+  $\muon\tau$, lower limits on the mass are set at 400\,GeV at 95\,\% confidence level. For excited leptons, constraints are provided as
+  functions of both the mass of the excited state and the compositeness scale $\Lambda$, with the strongest mass constraints arising in
   regions where the mass equals $\Lambda$. In such scenarios, lower mass limits are set at 3.0\,TeV for excited electrons and muons,
   2.5\,TeV for excited taus, and 1.6\,TeV for every excited-neutrino flavour.
 BibKey: Aad:2014hja
 BibTeX: '@article{Aad:2014hja,
       author         = "Aad, Georges and others",
       title          = "{Search for new phenomena in events with three or more
                         charged leptons in $pp$ collisions at $\sqrt{s}=8$ TeV
                         with the ATLAS detector}",
       collaboration  = "ATLAS",
       year           = "2014",
       eprint         = "1411.2921",
       archivePrefix  = "arXiv",
       primaryClass   = "hep-ex",
       reportNumber   = "CERN-PH-EP-2014-255",
       SLACcitation   = "%%CITATION = ARXIV:1411.2921;%%"
 }'
diff --git a/data/anainfo/ATLAS_2016_CONF_2016_037.info b/data/anainfo/ATLAS_2016_CONF_2016_037.info
new file mode 100644
--- /dev/null
+++ b/data/anainfo/ATLAS_2016_CONF_2016_037.info
@@ -0,0 +1,23 @@
+Name: ATLAS_2016_CONF_2016_037
+Year: 2016
+Summary: Search for SUSY in events with 2 same-sign leptons or 3 leptons, at 13~\TeV
+Experiment: ATLAS
+Collider: LHC
+Status: UNVALIDATED
+Authors:
+ - Andy Buckley <andy.buckley@cern.ch>
+RunInfo: BSM signal events
+NumEvents: 100000
+NeedCrossSection: no
+Beams: [p+, p+]
+Energies: [13000]
+Description:
+  'A search for strongly produced supersymmetric particles using signatures
+   involving multiple energetic jets and either two isolated same-sign leptons
+   ($e$ or $\mu$) or at least three isolated leptons. The analysis also utilises
+   other observables, such as $b$-tagged jets or missing transverse momentum,
+   to extend its sensitivity. A data sample of proton--proton collisions at
+   $\sqrt{s} = 13~\TeV$ recorded with the ATLAS detector at the Large Hadron Collider
+   in 2015 and 2016, corresponding to a total integrated luminosity of 13.2/fb,
+   is used for the search. No significant excess over the Standard Model
+   expectation is observed.'
diff --git a/data/anainfo/ATLAS_2016_CONF_2016_054.info b/data/anainfo/ATLAS_2016_CONF_2016_054.info
new file mode 100644
--- /dev/null
+++ b/data/anainfo/ATLAS_2016_CONF_2016_054.info
@@ -0,0 +1,19 @@
+Name: ATLAS_2016_CONF_2016_054
+Year: 2016
+Summary: ATLAS 2016 1-lepton SUSY search at 13~\TeV, from 14.8/fb CONF note
+Experiment: ATLAS
+Collider: LHC
+Status: UNVALIDATED
+Authors:
+ - Andy Buckley <andy.buckley@cern.ch>
+RunInfo: BSM signal events
+NumEvents: 100000
+NeedCrossSection: no
+Beams: [p+, p+]
+Energies: [13000]
+Description:
+  'A search for squarks and gluinos in final states with an isolated electron or
+  muon, multiple jets and large missing transverse momentum using proton--proton
+  collision data at a centre-of-mass energy of $\sqrt{s} = 13~\TeV$. The dataset
+  corresponds to an integrated luminosity of 14.8/fb, recorded in 2015 and 2016
+  by the ATLAS experiment at the Large Hadron Collider.'
diff --git a/data/anainfo/ATLAS_2016_CONF_2016_078.info b/data/anainfo/ATLAS_2016_CONF_2016_078.info
new file mode 100644
--- /dev/null
+++ b/data/anainfo/ATLAS_2016_CONF_2016_078.info
@@ -0,0 +1,17 @@
+Name: ATLAS_2016_CONF_2016_078
+Year: 2016
+Summary: ATLAS ICHEP16 0-lepton SUSY search at 13~\TeV with 13.2/fb
+Experiment: ATLAS
+Collider: LHC
+Status: UNVALIDATED
+Authors:
+ - Andy Buckley <andy.buckley@cern.ch>
+RunInfo: BSM signal events.
+NumEvents: 10000
+NeedCrossSection: yes
+Beams: [p+, p+]
+Energies: [13000]
+Description:
+  'ATLAS search for SUSY in 13~TeV $pp$ collisions at LHC Run~2, using 13.2/fb of
+  integrated luminosity and events containing missing transverse momentum and
+  no isolated high-energy leptons.'
diff --git a/data/anainfo/ATLAS_2016_CONF_2016_094.info b/data/anainfo/ATLAS_2016_CONF_2016_094.info
new file mode 100644
--- /dev/null
+++ b/data/anainfo/ATLAS_2016_CONF_2016_094.info
@@ -0,0 +1,18 @@
+Name: ATLAS_2016_CONF_2016_094
+Year: 2016
+Summary: ATLAS 2016 1-lepton + many jets SUSY search at 13~\TeV, from 14.8/fb CONF note
+Experiment: ATLAS
+Collider: LHC
+Status: UNVALIDATED
+Authors:
+ - Andy Buckley <andy.buckley@cern.ch>
+RunInfo: BSM signal events
+NumEvents: 100000
+NeedCrossSection: no
+Beams: [p+, p+]
+Energies: [13000]
+Description:
+  'A search for new phenomena in final states characterized by high jet multiplicity,
+  an isolated lepton (electron or muon), and either zero or at least three $b$-tagged
+  jets is. The search uses 14.8/fb of $\sqrt{s} = 13~\TeV$ proton--proton collision
+  data collected by the ATLAS experiment at the Large Hadron Collider in 2015 and 2016.'
diff --git a/data/anainfo/ATLAS_2016_I1419652.info b/data/anainfo/ATLAS_2016_I1419652.info
--- a/data/anainfo/ATLAS_2016_I1419652.info
+++ b/data/anainfo/ATLAS_2016_I1419652.info
@@ -1,42 +1,42 @@
 Name: ATLAS_2016_I1419652
 Year: 2016
 Summary: Track-based minimum bias at 13 TeV in ATLAS
 Experiment: ATLAS
 Collider: LHC
-SpiresID: I1419652
+InspireID: 1419652
 Status: VALIDATED
 Authors:
  - Roman Lysak <roman.lysak@cern.ch>
 References:
  - arXiv:1602.01633
  - doi:10.1016/j.physletb.2016.04.050
  - Physics Letters B (2016), Vol. 758, pp. 67-88
 RunInfo:
   $pp$ QCD interactions at 13 TeV. Diffractive events should
   be included. Multiple kinematic cuts should not be required.
 Beams: [p+, p+]
 Energies: [13000]
 PtCuts: [0.5]
 Description:
   Measurements from proton-proton collisions at centre-of-mass energy
   of $\sqrt{s} = 13$ TeV recorded with the ATLAS detector at
   the LHC. Events were collected using a single-arm minimum-bias
   trigger. The charged-particle multiplicity, its dependence on
   transverse momentum and pseudorapidity and the relationship between
   the mean transverse momentum and charged-particle multiplicity are
   measured. The observed distributions are corrected to well-defined
   phase-space regions ($p_\text{T} > 500$ MeV and $|\eta| < 2.5$ of the particles),
   using model-independent corrections.
 BibKey: Aad:2016mok
 BibTeX: '@article{Aad:2016mok,
       author         = "Aad, Georges and others",
       title          = "{Charged-particle distributions in $\sqrt{s}=13$ TeV $pp$
                         interactions measured with the ATLAS detector at the LHC}",
       collaboration  = "ATLAS",
       year           = "2016",
       eprint         = "1602.01633",
       archivePrefix  = "arXiv",
       primaryClass   = "hep-ex",
       reportNumber   = "CERN-EP-2016-014",
       SLACcitation   = "%%CITATION = ARXIV:1602.01633;%%"
 }'
diff --git a/data/anainfo/ATLAS_2016_I1426695.info b/data/anainfo/ATLAS_2016_I1426695.info
--- a/data/anainfo/ATLAS_2016_I1426695.info
+++ b/data/anainfo/ATLAS_2016_I1426695.info
@@ -1,46 +1,46 @@
 Name: ATLAS_2016_I1426695
 Year: 2016
 Summary: Track-based minimum bias at 8 TeV
 Experiment: ATLAS
 Collider: LHC
-SpiresID: 1426695
+InspireID: 1426695
 Status: VALIDATED
 Authors:
  - Roman Lysak <roman.lysak@cern.ch>
 References:
  - Eur.Phys.J. C76 (2016) no.7, 403
  - doi:10.1140/epjc/s10052-016-4203-9
  - arXiv:1603.02439 [hep-ex]
 RunInfo:
   $pp$ QCD interactions at 8 TeV. Diffractive events should
   be included. Multiple kinematic cuts should not be required.
 Beams: [p+, p+]
 Energies: [8000]
 PtCuts: [0.1, 0.5] 
 Description:
     This paper presents measurements of distributions of charged particles which are produced in proton-proton collisions at a
     centre-of-mass energy of $\sqrt{s} = 8$ TeV and recorded by the ATLAS detector at the LHC. A special dataset recorded in 2012 
     with a small number of interactions per beam crossing (below 0.004) and corresponding to an integrated luminosity of 
     160 $\mu\text{b}^{-1}$ was used. A minimum-bias trigger was utilised to select a data sample of more than 9 million collision 
     events. The multiplicity, pseudorapidity, and transverse momentum distributions of charged particles are shown in different 
     regions of kinematics and charged-particle multiplicity, including measurements of final states at high multiplicity. The results are
     corrected for detector effects and are compared to the predictions of various Monte Carlo event generator models which simulate
     the full hadronic final state.
 BibKey: Aad:2016xww
 BibTeX: '@article{Aad:2016xww,
       author         = "Aad, Georges and others",
       title          = "{Charged-particle distributions in $pp$ interactions at
                         $\sqrt{s}=$ 8 TeV measured with the ATLAS detector}",
       collaboration  = "ATLAS",
       journal        = "Eur. Phys. J.",
       volume         = "C76",
       year           = "2016",
       number         = "7",
       pages          = "403",
       doi            = "10.1140/epjc/s10052-016-4203-9",
       eprint         = "1603.02439",
       archivePrefix  = "arXiv",
       primaryClass   = "hep-ex",
       reportNumber   = "CERN-EP-2016-020",
       SLACcitation   = "%%CITATION = ARXIV:1603.02439;%%"
 }'
diff --git a/data/anainfo/ATLAS_2016_I1444991.info b/data/anainfo/ATLAS_2016_I1444991.info
--- a/data/anainfo/ATLAS_2016_I1444991.info
+++ b/data/anainfo/ATLAS_2016_I1444991.info
@@ -1,48 +1,48 @@
 Name: ATLAS_2016_I1444991
 Year: 2016
 Summary: Higgs-to-WW differential cross sections at 8 TeV
 Experiment: ATLAS
 Collider: LHC
-SpiresID: 1444991
+InspireID: 1444991
 Status: VALIDATED
 Authors:
  - Kathrin Becker <kathrin.becker@cern.ch>
 References: 
  - JHEP 1608 (2016) 104
  - doi:10.1007/JHEP08(2016)104
  - arXiv:1604.02997 [hep-ex]
 RunInfo:
   gg -> H -> W W* -> enu munu production at 8 TeV
 Beams: [p+, p+]
 Energies: [8000]
 PtCuts: [25]
 NeedCrossSection: yes
 Description:
   This paper describes a measurement of fiducial and differential cross sections of gluon-fusion Higgs boson production in the 
   $H\rightarrow W W^\ast \rightarrow e\nu\mu\nu$ channel, using 20.3 fb$^{-1}$ of proton-proton collision data. The data were 
   produced at a centre-of-mass energy of $\sqrt{s} = 8$ TeV at the CERN Large Hadron Collider and recorded by the ATLAS detector 
   in 2012. Cross sections are measured from the observed $H\rightarrow W W^\ast \rightarrow e\nu\mu\nu$ signal yield in categories 
   distinguished by the number of associated jets. The total cross section is measured in a fiducial region defined by the kinematic 
   properties of the charged leptons and neutrinos. Differential cross sections are reported as a function of the number of jets, 
   the Higgs boson transverse momentum, the dilepton rapidity, and the transverse momentum of the leading jet. The jet-veto efficiency, 
   or fraction of events with no jets above a given transverse momentum threshold, is also reported. All measurements are compared to 
   QCD predictions from Monte Carlo generators and fixed-order calculations, and are in agreement with the Standard Model predictions.
 BibKey: Aad:2016lvc
 BibTeX: '@article{Aad:2016lvc,
       author         = "Aad, Georges and others",
       title          = "{Measurement of fiducial differential cross sections of
                         gluon-fusion production of Higgs bosons decaying to
                         $WW^{*} \rightarrow e\nu\mu\nu$ with the ATLAS detector at $
                         \sqrt{s}=8 $ TeV}",
       collaboration  = "ATLAS",
       journal        = "JHEP",
       volume         = "08",
       year           = "2016",
       pages          = "104",
       doi            = "10.1007/JHEP08(2016)104",
       eprint         = "1604.02997",
       archivePrefix  = "arXiv",
       primaryClass   = "hep-ex",
       reportNumber   = "CERN-EP-2016-019",
       SLACcitation   = "%%CITATION = ARXIV:1604.02997;%%"
 }'
diff --git a/data/anainfo/ATLAS_2016_I1452559.info b/data/anainfo/ATLAS_2016_I1452559.info
--- a/data/anainfo/ATLAS_2016_I1452559.info
+++ b/data/anainfo/ATLAS_2016_I1452559.info
@@ -1,49 +1,50 @@
 Name: ATLAS_2016_I1452559
 Year: 2016
 Summary: Monojet + missing energy search with 3.2/fb of 13 TeV $pp$ data
 Experiment: ATLAS
 Collider: LHC
 InspireID: 1452559
 Status: UNVALIDATED
 Authors:
  - Andy Buckley <andy.buckley@cern.ch>
+ - Shehu AbdusSalam <shehu@cantab.net>
 References:
  - 'Phys.Rev. D94 (2016) no.3, 032005'
  - 'DOI: 10.1103/PhysRevD.94.032005'
  - 'CERN-EP-2016-075'
  - 'arXiv:1604.07773'
 RunInfo: 'BSM signal events'
 NumEvents: 10000
 NeedCrossSection: no
 Beams: [p+, p+]
 Energies: [13000]
 Description:
   'A search for new phenomena in final states with an energetic jet and large missing
   transverse momentum. The search uses proton-proton collision data corresponding to
   an integrated luminosity of 3.2/fb of $pp$ collisions at 13\;\TeV, collected in 2015
   with the ATLAS detector. Events are required to have at least one jet with a
   transverse momentum above 250 GeV and no leptons. Several signal regions are considered
   with increasing missing-transverse-momentum requirements between $E_T^\mathrm{miss} >
   250\;\GeV$ and 700\;\GeV. Good agreement is observed between the number of events
   in data and Standard Model predictions.'
 
 BibKey: Aaboud:2016tnv
 BibTeX: '@article{Aaboud:2016tnv,
       author         = "Aaboud, Morad and others",
       title          = "{Search for new phenomena in final states with an
                         energetic jet and large missing transverse momentum in
                         $pp$ collisions at $\sqrt{s}=13\;\TeV$ using the ATLAS
                         detector}",
       collaboration  = "ATLAS",
       journal        = "Phys. Rev.",
       volume         = "D94",
       year           = "2016",
       number         = "3",
       pages          = "032005",
       doi            = "10.1103/PhysRevD.94.032005",
       eprint         = "1604.07773",
       archivePrefix  = "arXiv",
       primaryClass   = "hep-ex",
       reportNumber   = "CERN-EP-2016-075",
       SLACcitation   = "%%CITATION = ARXIV:1604.07773;%%"
 }'
diff --git a/data/anainfo/ATLAS_2016_I1458270.info b/data/anainfo/ATLAS_2016_I1458270.info
--- a/data/anainfo/ATLAS_2016_I1458270.info
+++ b/data/anainfo/ATLAS_2016_I1458270.info
@@ -1,40 +1,45 @@
 Name: ATLAS_2016_I1458270
 Year: 2016
 Summary: 0-lepton SUSY search with 3.2/fb of 13 TeV $pp$ data
 Experiment: ATLAS
 Collider: LHC
 InspireID: 1458270
 Status: VALIDATED
 Authors:
  - Andy Buckley <andy.buckley@cern.ch>
 References:
 - 'Eur.Phys.J. C76 (2016) no.7, 392'
 - 'DOI:10.1140/epjc/s10052-016-4184-8'
 - 'arXiv:1605.03814'
+Keywords:
+ - susy
+ - 0lepton
+ - met
 RunInfo: 'BSM signal events'
 NumEvents: 10000
+Luminosity_fb: 3.2
 NeedCrossSection: no
 Beams: [p+, p+]
 Energies: [13000]
 Description:
   'ATLAS 0-lepton SUSY search using 3.2/fb of LHC $pp$ data at 13\;\TeV, recorded in
    2015. The event selection is via signal regions requiring from 2-6 high-energy jets,
    and significant missing transverse energy.'
 BibKey: Aaboud:2016zdn
 BibTeX: '@article{Aaboud:2016zdn,
       author         = "Aaboud, Morad and others",
       title          = "{Search for squarks and gluinos in final states with jets
                         and missing transverse momentum at $\sqrt{s} = 13\;\TeV$ with the ATLAS detector}",
       collaboration  = "ATLAS",
       journal        = "Eur. Phys. J.",
       volume         = "C76",
       year           = "2016",
       number         = "7",
       pages          = "392",
       doi            = "10.1140/epjc/s10052-016-4184-8",
       eprint         = "1605.03814",
       archivePrefix  = "arXiv",
       primaryClass   = "hep-ex",
       reportNumber   = "CERN-EP-2016-080",
       SLACcitation   = "%%CITATION = ARXIV:1605.03814;%%"
 }'
diff --git a/data/anainfo/BABAR_2013_I1116411.info b/data/anainfo/BABAR_2013_I1116411.info
--- a/data/anainfo/BABAR_2013_I1116411.info
+++ b/data/anainfo/BABAR_2013_I1116411.info
@@ -1,34 +1,37 @@
 Name: BABAR_2013_I1116411
 Year: 2013
 Summary: Exclusive semileptonic Bplus to omega decays.
 Experiment: BABAR
 Collider: PEP-II
 InspireID: 1116411
 Status: VALIDATED
+Keywords:
+ - bdecays
+ - semileptonic
 Authors:
  - Holger Schulz <holger.schulz@cern.ch>
 RunInfo:
   Events with B-decays, either particle guns or collisions.
 NumEvents: 10000
 NeedCrossSection: no
 Description:
   'Implementation of Lorentz invariant q2 distributions ("form factor") for semileptonic Bplus decays'
 BibKey: Lees:2012mq
 BibTeX: '@article{Lees:2012mq,
       author         = "Lees, J. P. and others",
       title          = "{Branching fraction measurement of $B^+ \to \omega \ell^+
                         \nu$ decays}",
       collaboration  = "BaBar",
       journal        = "Phys. Rev.",
       volume         = "D87",
       year           = "2013",
       number         = "3",
       pages          = "032004",
       doi            = "10.1103/PhysRevD.87.099904, 10.1103/PhysRevD.87.032004",
       note           = "[Erratum: Phys. Rev.D87,no.9,099904(2013)]",
       eprint         = "1205.6245",
       archivePrefix  = "arXiv",
       primaryClass   = "hep-ex",
       reportNumber   = "SLAC-PUB-15029, BABAR-PUB-12-005",
       SLACcitation   = "%%CITATION = ARXIV:1205.6245;%%"
 }'
diff --git a/data/anainfo/BABAR_2015_I1334693.info b/data/anainfo/BABAR_2015_I1334693.info
--- a/data/anainfo/BABAR_2015_I1334693.info
+++ b/data/anainfo/BABAR_2015_I1334693.info
@@ -1,34 +1,37 @@
 Name: BABAR_2015_I1334693
 Year: 2015
 Summary: Exclusive semileptonic D0 to piminus decays.
 Experiment: BABAR
 Collider: PEP-II
 InspireID: 1334693
 Status: VALIDATED
+Keywords:
+ - ddecays
+ - semileptonic
 Authors:
  - Holger Schulz <holger.schulz@cern.ch>
 RunInfo:
   Events with D-decays, either particle guns or collisions.
 NumEvents: 10000
 NeedCrossSection: no
 Description:
   'Implementation of Lorentz invariant q2 distributions ("form factor") for semileptonic D0 decays'
 BibKey: Lees:2014ihu
 BibTeX: '@article{Lees:2014ihu,
       author         = "Lees, J. P. and others",
       title          = "{Measurement of the $D^0 \to \pi^- e^+ \nu_e$
                         differential decay branching fraction as a function of
                         $q^2$ and study of form factor parameterizations}",
       collaboration  = "BaBar",
       journal        = "Phys. Rev.",
       volume         = "D91",
       year           = "2015",
       number         = "5",
       pages          = "052022",
       doi            = "10.1103/PhysRevD.91.052022",
       eprint         = "1412.5502",
       archivePrefix  = "arXiv",
       primaryClass   = "hep-ex",
       reportNumber   = "BABAR-PUB-14-014, SLAC-PUB-16172",
       SLACcitation   = "%%CITATION = ARXIV:1412.5502;%%"
 }'
diff --git a/data/anainfo/BELLE_2011_I878990.info b/data/anainfo/BELLE_2011_I878990.info
--- a/data/anainfo/BELLE_2011_I878990.info
+++ b/data/anainfo/BELLE_2011_I878990.info
@@ -1,32 +1,35 @@
 Name: BELLE_2011_I878990
 Year: 2011
 Summary: Exclusive semileptonic B0 to pi minus decays.
 Experiment: BELLE
 Collider: KEKB
 InspireID: 878990
 Status: VALIDATED
+Keywords:
+ - bdecays
+ - semileptonic
 Authors:
  - Holger Schulz <holger.schulz@cern.ch>
 RunInfo:
   Events with B-decays, either particle guns or collisions.
 NumEvents: 10000
 NeedCrossSection: no
 Description:
   'Implementation of Lorentz invariant q2 distributions ("form factor") for semileptonic B0 decays'
 BibKey: Ha:2010rf
 BibTeX: '@article{Ha:2010rf,
       author         = "Ha, H. and others",
       title          = "{Measurement of the decay $B^0\to\pi^-\ell^+\nu$ and
                         determination of $|V_{ub}|$}",
       collaboration  = "Belle",
       journal        = "Phys. Rev.",
       volume         = "D83",
       year           = "2011",
       pages          = "071101",
       doi            = "10.1103/PhysRevD.83.071101",
       eprint         = "1012.0090",
       archivePrefix  = "arXiv",
       primaryClass   = "hep-ex",
       reportNumber   = "BELLE-PREPRINT-2010-22, KEK-PREPRINT-2010-37",
       SLACcitation   = "%%CITATION = ARXIV:1012.0090;%%"
 }'
diff --git a/data/anainfo/BELLE_2013_I1216515.info b/data/anainfo/BELLE_2013_I1216515.info
--- a/data/anainfo/BELLE_2013_I1216515.info
+++ b/data/anainfo/BELLE_2013_I1216515.info
@@ -1,41 +1,44 @@
 Name: BELLE_2013_I1216515
 Summary: Pion and kaon identified particle spectra at $\sqrt{s}=10.52$ GeV
 Status: VALIDATED
 Experiment: Belle
 Collider: KEKB
 Authors:
  - Peter Richardson
 References:
  - Phys.Rev.Lett. 111 (2013) 6, 062002
  - arXiv:1301.6183
  - doi:10.1103/PhysRevLett.111.062002
 
+Keywords:
+ - bdecays
+ - semileptonic
 RunInfo:
   $e^+ e^-$ analysis at 10.52
 NumEvents: 1000000
 InspireID: 1216515
 Beams: [e+, e-]
 Energies: [[3.5,7.91]]
 PtCuts: [0]
 Description:
   Analysis of the identified particle spectra for charged pions and kaons at 10.52 GeV. This
   is continuum data below the $\Upsilon(4S)$ resonance.
 BibKey: Leitgab:2013qh
 BibTeX: '@article{Leitgab:2013qh,
       author         = "Leitgab, M. and others",
       title          = "{Precision Measurement of Charged Pion and Kaon
                         Differential Cross Sections in e+e- Annihilation at
                         s=10.52  GeV}",
       collaboration  = "Belle Collaboration",
       journal        = "Phys.Rev.Lett.",
       number         = "6",
       volume         = "111",
       pages          = "062002",
       doi            = "10.1103/PhysRevLett.111.062002",
       year           = "2013",
       eprint         = "1301.6183",
       archivePrefix  = "arXiv",
       primaryClass   = "hep-ex",
       reportNumber   = "BELLE-PREPRINT-2013-2, KEK-PREPRINT-2012-38",
       SLACcitation   = "%%CITATION = ARXIV:1301.6183;%%",
 }'
diff --git a/data/anainfo/BELLE_2015_I1397632.info b/data/anainfo/BELLE_2015_I1397632.info
--- a/data/anainfo/BELLE_2015_I1397632.info
+++ b/data/anainfo/BELLE_2015_I1397632.info
@@ -1,34 +1,37 @@
 Name: BELLE_2015_I1397632
 Year: 2015
 Summary: Exclusive semileptonic B to D decays.
 Experiment: BELLE
 Collider: KEKB
 InspireID: 1397632
 Status: VALIDATED
 Authors:
  - Holger Schulz <holger.schulz@cern.ch>
 RunInfo:
   Events with B-decays, either particle guns or collisions.
+Keywords:
+ - bdecays
+ - semileptonic
 NumEvents: 10000
 NeedCrossSection: no
 Description:
   'Implementation of Lorentz invariant recoil w distributions for semileptonic B decays'
 BibKey: Glattauer:2015teq
 BibTeX: '@article{Glattauer:2015teq,
       author         = "Glattauer, R. and others",
       title          = "{Measurement of the decay $B\to D\ell\nu_\ell$ in fully
                         reconstructed events and determination of the
                         Cabibbo-Kobayashi-Maskawa matrix element $|V_{cb}|$}",
       collaboration  = "Belle",
       journal        = "Phys. Rev.",
       volume         = "D93",
       year           = "2016",
       number         = "3",
       pages          = "032006",
       doi            = "10.1103/PhysRevD.93.032006",
       eprint         = "1510.03657",
       archivePrefix  = "arXiv",
       primaryClass   = "hep-ex",
       reportNumber   = "BELLE-PREPRINT-2015-16, KEK-PREPRINT-2015-43",
       SLACcitation   = "%%CITATION = ARXIV:1510.03657;%%"
 }'
diff --git a/data/anainfo/CMS_2012_I1090423.info b/data/anainfo/CMS_2012_I1090423.info
--- a/data/anainfo/CMS_2012_I1090423.info
+++ b/data/anainfo/CMS_2012_I1090423.info
@@ -1,38 +1,38 @@
 Name: CMS_2012_I1090423
 Year: 2012
 Summary: Dijet angular distributions in $pp$ collisions at $\sqrt{s} = 7$ TeV
 Experiment: CMS
 Collider: LHC
-SpiresID: 1090423
+InspireID: 1090423
 Status: VALIDATED
 Authors:
  - A. Hinzmann
 References:
  - JHEP 05 (2012) 055
  - doi:10.1007/JHEP05(2012)055
  - arXiv:hep-ex/1202.5535
 Beams: [p+, p+]
 Energies: [7000]
 Description:
    'Measurement of dijet angular distributions in proton-proton collisions at a
    center-of-mass energy of 7 TeV. The data sample has a total integrated
    luminosity of 2.2 inverse femtobarns, recorded by the CMS experiment at the LHC.
    Normalized dijet angular distributions have been measured for dijet invariant
    masses from 0.4 TeV to above 3 TeV.'
 BibKey: Chatrchyan:2012bf
 BibTeX: '@article{Chatrchyan:2012bf,
       author         = "Chatrchyan, Serguei and others",
       title          = "{Search for quark compositeness in dijet angular
                         distributions from $pp$ collisions at $\sqrt{s}=7$ TeV}",
       collaboration  = "CMS Collaboration",
       journal        = "JHEP",
       volume         = "1205",
       pages          = "055",
       doi            = "10.1007/JHEP05(2012)055",
       year           = "2012",
       eprint         = "1202.5535",
       archivePrefix  = "arXiv",
       primaryClass   = "hep-ex",
       reportNumber   = "CMS-EXO-11-017, CERN-PH-EP-2012-044",
       SLACcitation   = "%%CITATION = ARXIV:1202.5535;%%",
 }'
diff --git a/data/anainfo/CMS_2012_I941555.info b/data/anainfo/CMS_2012_I941555.info
--- a/data/anainfo/CMS_2012_I941555.info
+++ b/data/anainfo/CMS_2012_I941555.info
@@ -1,50 +1,50 @@
 Name: CMS_2012_I941555
 Year: 2010
 Summary: Measurement of differential $Z/\gamma^*$ $p_T$ and y
 Experiment: CMS
 Collider: LHC
-SpiresID: 941555
+InspireID: 941555
 Status: VALIDATED
 Authors:
  - Luca Perrozzi <luca.perrozzi@cern.ch>
  - Justin Hugon <justin.hugon@cern.ch>
 References:
  - Phys.Rev. D85 (2012) 032002
  - arXiv:1110.4973
  - CMS-EWK-10-010
  - CERN-PH-EP-2011-169
 RunInfo:
   $p p \to \mu^+ \mu^-$+X 7 TeV.
   Needs mass cut on lepton pair to avoid photon singularity,
   restrict $Z/\gamma^*$ mass range to roughly $50~\text{GeV}/c^2 < m_{\mu\mu}
     < 130~\text{GeV}/c^2$ for efficiency.
   Result is corrected for QED FSR (i.e. leptons are dressed), so turn off in generator.
 NumEvents: 1000000
 Beams: [p+, p+]
 Energies: [7000]
 PtCuts: [0]
 Description:
   Cross section as a function of $p_T$ and y of the Z boson decaying into muons
   in p p collisions at $\sqrt{s}$ = 7 TeV.  $p_T$ and y cross sections are measured for 
   $60 < m_{\mu\mu} < 120$ GeV.  The $p_T$ cross section is measured for lepton $p_T > 20$ GeV
   and $\eta < 2.1$, while the y cross section is extrapolated to all lepton $p_T$ and $\eta$.
   This measurement was performed using 36 pb$^{-1}$ of data collected during 2010 with 
   the CMS detector at the LHC.
 BibKey: 'Chatrchyan:2011wt'
 BibTeX: '@article{Chatrchyan:2011wt,
       author         = "Chatrchyan, Serguei and others",
       title          = "{Measurement of the Rapidity and Transverse Momentum
                         Distributions of $Z$ Bosons in $pp$ Collisions at
                         $\sqrt{s}=7$ TeV}",
       collaboration  = "CMS Collaboration",
       journal        = "Phys.Rev.",
       volume         = "D85",
       pages          = "032002",
       doi            = "10.1103/PhysRevD.85.032002",
       year           = "2012",
       eprint         = "1110.4973",
       archivePrefix  = "arXiv",
       primaryClass   = "hep-ex",
       reportNumber   = "CMS-EWK-10-010, CERN-PH-EP-2011-169",
       SLACcitation   = "%%CITATION = ARXIV:1110.4973;%%",
 }'
diff --git a/data/anainfo/CMS_2013_I1209721.info b/data/anainfo/CMS_2013_I1209721.info
--- a/data/anainfo/CMS_2013_I1209721.info
+++ b/data/anainfo/CMS_2013_I1209721.info
@@ -1,51 +1,51 @@
 Name: CMS_2013_I1209721
 Year: 2011
 Summary: 'Azimuthal correlations and event shapes in $Z$ + jets in $pp$ collisions at 7 TeV'
 Experiment: CMS
 Collider: LHC
-SpiresID: 1209721
+InspireID: 1209721
 Status: VALIDATED
 Authors:
  - Io Odderskov <io.odderskov@gmail.com>
 References:
  - http://cms.cern.ch/iCMS/analysisadmin/cadi?ancode=EWK-11-021
  - https://cds.cern.ch/record/1503578
  - http://inspirehep.net/record/1209721
  - arXiv:1301.1646 [hep-ex] (http://arxiv.org/abs/arXiv:1301.1646)
  - Submitted to Phys. Lett. B
 RunInfo:
    Run MC generators with Z decaying to leptonic modes at 7TeV comEnergy
 NumEvents: 100k
 Beams: [p+, p+]
 Energies: [7000]
 PtCuts:
   $pT_\ell > 20$\;GeV and $|\eta_\ell| < 2.4$, jet $pT > 50$\;GeV
 Description:
   'Measurements are presented of event shapes and azimuthal correlations in the
   inclusive production of a Z boson in association with jets in proton-proton collisions. The
   data correspond to an integrated luminosity of 5.0/fb, collected with the CMS detector
   at the CERN LHC at $\sqrt{s} = 7$\;TeV. This to test perturbative QCD predictions
   and evaluate a substantial background to most physics channels. Studies performed
   as a function of jet multiplicity for inclusive $Z$ boson production and for $Z$ bosons with
   transverse-momenta greater than 150\;GeV, are compared to predictions from Monte
   Carlo event generators that include leading-order multiparton matrix-element (with
   up to four hard partons in the final state) and next-to-leading-order simulations of
   Z + 1-jet events. The results are corrected for detector effects, and can therefore be
   used as input to improve models for describing these processes.'
 BibKey: 'Chatrchyan:2013tna'
 BibTeX: '@article{Chatrchyan:2013tna,
       author         = "Chatrchyan, Serguei and others",
       title          = "{Event shapes and azimuthal correlations in $Z$ + jets
                         events in $pp$ collisions at $\sqrt{s}=7$ TeV}",
       collaboration  = "CMS Collaboration",
       journal        = "Phys.Lett.",
       volume         = "B722",
       pages          = "238-261",
       doi            = "10.1016/j.physletb.2013.04.025",
       year           = "2013",
       eprint         = "1301.1646",
       archivePrefix  = "arXiv",
       primaryClass   = "hep-ex",
       reportNumber   = "CMS-EWK-11-021, CERN-PH-EP-2013-001",
       SLACcitation   = "%%CITATION = ARXIV:1301.1646;%%",
 }'
diff --git a/data/anainfo/CMS_2013_I1218372.info b/data/anainfo/CMS_2013_I1218372.info
--- a/data/anainfo/CMS_2013_I1218372.info
+++ b/data/anainfo/CMS_2013_I1218372.info
@@ -1,42 +1,42 @@
 Name: CMS_2013_I1218372
 Year: 2013
 Summary: Study of the underlying event at forward rapidity in proton--proton collisions at the LHC
 Experiment: CMS
 Collider: LHC
-SpiresID: 1218372
+InspireID: 1218372
 Status: VALIDATED
 Authors:
  - Samantha Dooling <samantha.dooling@desy.de>
 References:
  - JHEP 1304 (2013) 072
  - 10.1007/JHEP04(2013)072
  - CMS-FWD-11-003
  - arXiv:1302.2394
 RunInfo:
    Inelastic events (non-diffractive and diffractive) at $\sqrt{s}$ = 0.9, 2.76 and 7 TeV.
 NumEvents: 1 Mio
 Beams:  [p+, p+]
 Energies: [900, 2760, 7000]
 Description:
   'Ratio of the energy deposited in the  pseudorapidity range $-6.6 < \eta < -5.2$
    for events with a charged particle jet with $|\eta|<2$ with respect to the
    energy in  inclusive events, as a function of charged particle jet transverse
    momentum for $\sqrt{s}=$0.9, 2.76 and 7 TeV.'
 
 BibKey: 'Chatrchyan:2013gfi'
 BibTeX: '@article{Chatrchyan:2013gfi,
       author         = "Chatrchyan, Serguei and others",
       title          = "{Study of the underlying event at forward rapidity in pp
                         collisions at $\sqrt{s}$ = 0.9, 2.76, and 7 TeV}",
       collaboration  = "CMS Collaboration",
       journal        = "JHEP",
       volume         = "1304",
       pages          = "072",
       doi            = "10.1007/JHEP04(2013)072",
       year           = "2013",
       eprint         = "1302.2394",
       archivePrefix  = "arXiv",
       primaryClass   = "hep-ex",
       reportNumber   = "CMS-FWD-11-003, CERN-PH-EP-2013-012",
       SLACcitation   = "%%CITATION = ARXIV:1302.2394;%%",
 }'
diff --git a/data/anainfo/CMS_2013_I1223519.info b/data/anainfo/CMS_2013_I1223519.info
new file mode 100644
--- /dev/null
+++ b/data/anainfo/CMS_2013_I1223519.info
@@ -0,0 +1,46 @@
+Name: CMS_2013_I1223519
+Year: 2013
+Summary: Searches for SUSY using $\alpha_T$ and $b$-quark multiplicity at 8~\TeV
+Experiment: CMS
+Collider: LHC
+InspireID: 1223519
+Status: UNVALIDATED
+Authors:
+ - Andy Buckley <andy.buckley@cern.ch>
+References:
+- 'arXiv:1303.2985'
+RunInfo: SM background or BSM physics model, depending on interpretation usage
+NumEvents: 50000
+NeedCrossSection: yes
+Beams: [p+, p+]
+Energies: [8000]
+Description:
+  'An inclusive search for supersymmetric processes that produce final states with jets
+   and missing transverse energy in $pp$ collisions at 8\;\TeV. The data sample corresponds
+   to an integrated luminosity of 11.7/fb collected by the CMS experiment. In this search,
+   a dimensionless kinematic variable, $\alpha_T$, is used to discriminate between events
+   with genuine and misreconstructed missing transverse energy. The search was based on
+   an examination of the number of reconstructed jets per event, the scalar sum of
+   transverse energies of these jets, and the number of these jets identified as originating
+   from bottom quarks. No significant excess of events over the standard model expectation
+   was found.'
+BibKey: Chatrchyan:2013mys
+BibTeX: '@article{Chatrchyan:2013mys,
+      author         = "Chatrchyan, Serguei and others",
+      title          = "{Search for supersymmetry in hadronic final states with
+                        missing transverse energy using the variables $\alpha_T$
+                        and b-quark multiplicity in pp collisions at $\sqrt s=8$
+                        TeV}",
+      collaboration  = "CMS",
+      journal        = "Eur. Phys. J.",
+      volume         = "C73",
+      year           = "2013",
+      number         = "9",
+      pages          = "2568",
+      doi            = "10.1140/epjc/s10052-013-2568-6",
+      eprint         = "1303.2985",
+      archivePrefix  = "arXiv",
+      primaryClass   = "hep-ex",
+      reportNumber   = "CMS-SUS-12-028, CERN-PH-EP-2013-037",
+      SLACcitation   = "%%CITATION = ARXIV:1303.2985;%%"
+}'
diff --git a/data/anainfo/CMS_2013_I1258128.info b/data/anainfo/CMS_2013_I1258128.info
--- a/data/anainfo/CMS_2013_I1258128.info
+++ b/data/anainfo/CMS_2013_I1258128.info
@@ -1,65 +1,65 @@
 Name: CMS_2013_I1258128
 Year: 2013
 Summary: Rapidity distributions in exclusive $Z$ + jet and $\gamma$ + jet events in $pp$ collisions at $\sqrt{s} = 7$ TeV
 Experiment: CMS
 Collider: LHC
-InspireID: I1258128
+InspireID: 1258128
 Status: VALIDATED
 Authors:
  - Steve Linn <linn@cern.ch>,
  - Shin-Shan Eiko Yu <syu@cern.ch>,
  - Anil Sing Pratap <singh.ap79@gmail.com>,
  - Lovedeep Kaur Saini <lvdeep9@gmail.com>,
  - Kittikul Kovitanggoon <kovitang.cern@gmail.com>,
  - Luis Lebolo <luis.lebolo@cern.ch>,
  - Vanessa Gaultney Werner <vgaul001@fiu.edu>,
  - Yun-Ju Lu <Yun-Ju.Lu@cern.ch>,
  - Syue-Wei Li <Syue-Wei.Li@cern.ch>,
  - Yu-Hsiang Chang <index0192@yahoo.com.tw>,
  - Sung-Won Lee <Sungwon.Lee@ttu.edu>,
  - Pete E.C. Markowitz <markowit@fiu.edu>,
  - Darko Mekterovic <Darko.Mekterovic@cern.ch>,
  - Jorge Rodriguez <jrodriguez@cern.ch>,
  - Bhawan Uppal
 References:
  - arXiv:1310.3082
  - https://twiki.cern.ch/twiki/bin/view/CMSPublic/PhysicsResultsSMP12004
  - Submitted to Phys. Rev. Lett
 RunInfo:
    Run MC generators with $Z$ decaying to leptonic modes + jets
    and photon + jets at 7 TeV centre-of-mass energy.
 NumEvents: 2500000 for each process
 Beams: [p+, p+]
 Energies: [7000]
 PtCuts:
   '$Z$+jet: lepton $pT > 20$ GeV and lepton $|\eta| < 2.1$;
             jet $pT > 30$ GeV, jet $|\eta| < 2.1$, exclusive 1 jet;
             $Z pT > 40$ GeV, $76 < M_Z < 106$ GeV
    $\gamma+jet: jet $pT > 30$ GeV, jet $|\eta| < 2.1$, exclusive 1 jet;
             photon $pT > 40$ GeV, photon $|\eta| < 1.4442$'
 Description:
   'Rapidity distributions are presented for events containing either a
   $Z$ boson or a photon in association with a single jet in proton-proton
   collisions produced at the CERN LHC. The data, collected with the CMS
   detector at $\sqrt{s} = 7$ TeV, correspond to an integrated luminosity of 5.0/fb.
   The individual rapidity distributions of the boson and the jet are
   consistent within 5\% with expectations from perturbative QCD. However,
   QCD predictions for the sum and the difference in rapidities of the two
   final-state objects show significant discrepancies with CMS data. In
   particular, next-to-leading-order QCD calculations, and two Monte Carlo
   event generators using different methods to merge matrix-element
   partons with evolved parton showers, appear inconsistent with the data
   as well as with each other.'
 BibKey: Chatrchyan:2013oda
 BibTeX: '@article{Chatrchyan:2013oda,
       author         = "Chatrchyan, Serguei and others",
       title          = "{Rapidity distributions in exclusive $Z$+jet and photon+jet events in $pp$ collisions at $\sqrt{s}=7$ TeV}",
       collaboration  = "CMS Collaboration",
       year           = "2013",
       eprint         = "1310.3082",
       archivePrefix  = "arXiv",
       primaryClass   = "hep-ex",
       reportNumber   = "CMS-SMP-12-004, CERN-PH-EP-2013-169",
       SLACcitation   = "%%CITATION = ARXIV:1310.3082;%%",}'
 ToDo:
  - Update reference when paper published
diff --git a/data/anainfo/CMS_2013_I1261026.info b/data/anainfo/CMS_2013_I1261026.info
--- a/data/anainfo/CMS_2013_I1261026.info
+++ b/data/anainfo/CMS_2013_I1261026.info
@@ -1,47 +1,47 @@
 Name: CMS_2013_I1261026
 Year: 2013
 Summary: Jet and underlying event properties as a function of particle multiplicity
 Experiment: CMS
 Collider: LHC
-SpiresID: 1261026
+InspireID: 1261026
 Status: VALIDATED
 Authors:
  - Maxim Azarkin <Maksim.Azarkin@cern.ch>
 References:
  - Eur.Phys.J. C73 (2013) 2674
  - arXiv:1310.4554
  - CMS-FSQ-12-022,
  - CERN-PH-EP-2013-195
 RunInfo: QCD MB
 Beams: [p+, p+]
 Energies: [7000]
 NumEvents: 10000000
 NeedCrossSection: no
 Description:
   'Characteristics of multi-particle production in proton-proton
   collisions at $\sqrt{s} = 7$ TeV are studied as a function of the
   charged-particle multiplicity ($N_{ch}$). The produced particles are separated
   into two classes: those belonging to jets and those belonging to the
   underlying event. Charged particles are measured with pseudorapidity
   $|\eta| < 2.4$ and transverse momentum $p_T > 0.25$ GeV. Jets are
   reconstructed from charged-particles only and required to have
   $\pt > 5$ GeV. The distributions of jet \pt, average \pt of charged particles
   belonging to the underlying event or to jets, jet rates, and jet shapes
   are presented as functions of $N_{ch}$.'
 BibKey: Chatrchyan:2013ala
 BibTeX: '@article{Chatrchyan:2013ala,
       author         = "Chatrchyan, Serguei and others",
       title          = "{Jet and underlying event properties as a function of
                         particle multiplicity in proton-proton collisions at
                         $\sqrt{s} = 7$ TeV}",
       collaboration  = "CMS Collaboration",
       journal        = "Eur.Phys.J.",
       volume         = "C73",
       pages          = "2674",
       doi            = "10.1140/epjc/s10052-013-2674-5",
       year           = "2013",
       eprint         = "1310.4554",
       archivePrefix  = "arXiv",
       primaryClass   = "hep-ex",
       reportNumber   = "CMS-FSQ-12-022, CERN-PH-EP-2013-195",
       SLACcitation   = "%%CITATION = ARXIV:1310.4554;%%",}'
diff --git a/data/anainfo/CMS_2013_I1272853.info b/data/anainfo/CMS_2013_I1272853.info
--- a/data/anainfo/CMS_2013_I1272853.info
+++ b/data/anainfo/CMS_2013_I1272853.info
@@ -1,38 +1,38 @@
 Name: CMS_2013_I1272853
 Year: 2013
 Summary: Study of observables sensitive to double parton scattering in $W + 2$ jets process in $pp$ collisions at $\sqrt{s} = 7$~TeV
 Experiment: CMS
 Collider: LHC
-SpiresID: 1272853
+InspireID: 1272853
 Status: VALIDATED
 Authors:
  - Sunil Bansal (sunil.bansal@cern.ch)
 References:
   - CMS-FSQ-12-028
   - CERN-PH-EP-2013-224
   - arXiv:1312.5729
   - Submitted to JHEP
 RunInfo: Only muonic decay of W boson
 Beams: [p+, p+]
 Energies: [7000]
 NeedCrossSection: yes
 PtCuts: muon with pT > 35 GeV, jets with pT > 20 GeV
 Description:
   Double parton scattering is investigated in proton-proton collisions
   at $\sqrt{s} = 7$~TeV where the final state includes a $W$ boson, which
   decays into a muon and a neutrino, and two jets. The data sample
   corresponds to an integrated luminosity of 5 inverse femtobarns,
   collected with the CMS detector at the LHC.
 BibKey: Chatrchyan:2013xxa
 BibTeX: '@article{Chatrchyan:2013xxa,
       author         = "Chatrchyan, Serguei and others",
       title          = "{Study of double parton scattering using $W + 2$-jet events
                         in proton-proton collisions at $\sqrt{s} = 7$~TeV}",
       collaboration  = " CMS Collaboration",
       year           = "2013",
       eprint         = "1312.5729",
       archivePrefix  = "arXiv",
       primaryClass   = "hep-ex",
       reportNumber   = "CMS-FSQ-12-028, CERN-PH-EP-2013-224",
       SLACcitation   = "%%CITATION = ARXIV:1312.5729;%%",
 }'
diff --git a/data/anainfo/CMS_2013_I1273574.info b/data/anainfo/CMS_2013_I1273574.info
--- a/data/anainfo/CMS_2013_I1273574.info
+++ b/data/anainfo/CMS_2013_I1273574.info
@@ -1,44 +1,44 @@
 Name: CMS_2013_I1273574
 Year: 2013
 Summary: Studies of 4-jet production in proton-proton collisions at $\sqrt{s} = 7$ TeV
 Experiment: CMS
 Collider: LHC
-SpiresID: 1273574
+InspireID: 1273574
 Status: VALIDATED
 Authors:
  - P. Gunnellini
  - A. Buckley
 References:
  - CMS-FSQ-12-013
  - CERN-PH-EP-2013-229
  - arXiv:1312.6440
  - Submitted to Phys. Rev. D
 RunInfo:
    Hard QCD events with \pT cut at generator level of 45 GeV
 NumEvents: 500000
 NeedCrossSection: yes
 Beams: [p+, p+]
 Energies: [7000]
 PtCuts: 2 jets with $\pT > 50$ GeV; 2 jets with $\pT > 20$ GeV
 Description:
   Measurements are presented of exclusive 4-jet production cross
   sections as a function of the transverse momentum $p_T$,
   pseudorapidity $\eta$, as well as of correlations in azimuthal
   angle and $p_T$ balance among the jets. The data sample was
   collected at a centre-of-mass energy of 7 TeV with the CMS
   detector at the LHC, corresponding to an integrated luminosity
   of 36 pb$^{-1}$. The jets are reconstructed with the
   anti-$k_T$ jet algorithm in a range of $|\eta|<4.7$.
 BibKey: Chatrchyan:2013qza
 BibTeX: '@article{Chatrchyan:2013qza,
       author         = "Chatrchyan, Serguei and others",
       title          = "{Measurement of four-jet production in proton-proton
                         collisions at sqrt(s)=7 TeV}",
       collaboration  = " CMS Collaboration",
       year           = "2013",
       eprint         = "1312.6440",
       archivePrefix  = "arXiv",
       primaryClass   = "hep-ex",
       reportNumber   = "CMS-FSQ-12-013, CERN-PH-EP-2013-229",
       SLACcitation   = "%%CITATION = ARXIV:1312.6440;%%",
 }'
diff --git a/data/anainfo/CMS_2014_I1303894.info b/data/anainfo/CMS_2014_I1303894.info
--- a/data/anainfo/CMS_2014_I1303894.info
+++ b/data/anainfo/CMS_2014_I1303894.info
@@ -1,47 +1,47 @@
 Name: CMS_2014_I1303894
 Year: 2011
 Summary: Differential cross-section of $W$ bosons + jets in $pp$ collisions at $\sqrt{s}=7$ TeV
 Experiment: CMS
 Collider: LHC
-InspireID: I1303894
+InspireID: 1303894
 Status: VALIDATED
 Authors:
  - Darin Baumgartel (darinb@cern.ch)
  - Emanuela Barberis (barberis@fnal.gov)
 References:
  - Phys. Lett. B741 (2014) 12-37
  - https://inspirehep.net/record/1303894
  - http://arxiv.org/abs/1406.7533
 RunInfo:
    Run MC generators with $W$ decaying leptonically at 7 TeV CoM energy. A large number of
     events are required to populate the high jet multiplicity region. Suitable results can
     be achieved with 85M events.
 Beams: [p+, p+]
 Energies: [7000]
 PtCuts:
   Leading muon $pT > 25 GeV$ and leading muon $|eta| < 2.1$
   Jet $pT > 30 GeV$ and $|eta| < 2.4$
 Description:
   A study of jet production in association with $W$ bosons has been performed, in
   events with the $W$ decaying to a muon. Jets are required to have $pT > 30$ GeV and $|eta| < 2.4$.
   Muons are required to have $pT > 25$ and $|eta| < 2.1$. Jets are only considered if they are
   separated from the muon by $\Delta{R} > 0.5$. Muons are dressed with photons in a cone of $0.1$
   around the muon.
 BibKey: Khachatryan:2014uva
 BibTeX: '@article{Khachatryan:2014uva,
       author         = "Khachatryan, Vardan and others",
       title          = "{Differential cross section measurements for the
                         production of a $W$ boson in association with jets in
                         proton–proton collisions at $\sqrt{s} = 7$ TeV}",
       collaboration  = "CMS Collaboration",
       journal        = "Phys.Lett.",
       volume         = "B741",
       pages          = "12-37",
       doi            = "10.1016/j.physletb.2014.12.003",
       year           = "2014",
       eprint         = "1406.7533",
       archivePrefix  = "arXiv",
       primaryClass   = "hep-ex",
       reportNumber   = "CMS-SMP-12-023, CERN-PH-EP-2014-134",
       SLACcitation   = "%%CITATION = ARXIV:1406.7533;%%",
 }
diff --git a/data/anainfo/CMS_2015_I1310737.info b/data/anainfo/CMS_2015_I1310737.info
--- a/data/anainfo/CMS_2015_I1310737.info
+++ b/data/anainfo/CMS_2015_I1310737.info
@@ -1,60 +1,60 @@
 Name: CMS_2015_I1310737
 Year: 2015
 Summary: Jet multiplicity and differential cross-sections of $Z$+jets events in $pp$ at $\sqrt{s} = 7$ TeV
 Experiment: CMS
 Collider: LHC
-InspireID: I1310737
+InspireID: 1310737
 Status: VALIDATED
 Authors:
 - Fabio Cossutti (fabio.cossutti@ts.infn.it)
 - Chiara La Licata (chiara.lalicata@ts.infn.it)
 References:
 - Phys.Rev. D91 (2015) 052008
 - http://dx.doi.org/10.1103/PhysRevD.91.052008
 - http://arxiv.org/abs/arXiv:1408.3104
 - http://inspirehep.net/record/1310737
 RunInfo:
   'Run MC generators with Z decaying leptonically into both electrons and muons at 7 TeV CoM energy.
    Order of 5 million unweighted events can give a reasonable global comparison, but precision in the
    high jet multiplicity region/high jet pt may require substantially larger samples or statistical
    enhancement of high jet multiplicities.'
 NeedCrossSection: yes
 Beams: [p+, p+]
 Energies: [7000]
 Description:
   'Measurements of differential cross sections are presented for
    the production of a Z boson and at least one hadronic jet in
    proton-proton collisions at $\sqrt{s}=7$~TeV, recorded by
    the CMS detector, using a data sample corresponding to an integrated
    luminosity of 4.9 $\text{fb}^{-1}$. The jet multiplicity distribution
    is measured for up to six jets. The differential cross sections are
    measured as a function of jet transverse momentum and pseudorapidity
    for the four highest transverse momentum jets. The distribution of
    the scalar sum of jet transverse momenta is also measured as a function
    of the jet multiplicity. The measurements are compared with theoretical
    predictions at leading and next-to-leading order in perturbative QCD.
 
    Cuts:
      First two leading electrons or muons with $p_T > 20$ GeV and $|\eta| < 2.4$
      Dilepton invariant mass in the [71,111] GeV range
      Jets $p_T > 30$ GeV and $|\eta| < 2.4$
      $\Delta{R}($lepton,jet$) > 0.5$'
 BibKey: Khachatryan:2014zya
 BibTeX: '@article{Khachatryan:2014zya,
       author = "Khachatryan, V. and others",
       title = "{Measurements of jet multiplicity and differential
                production cross sections of $Z +$ jets events in
                proton-proton collisions at $\sqrt{s}=7 TeV}",
       collaboration = "CMS",
       journal = "Phys.Rev.",
       number = "5",
       volume = "D91",
       pages = "052008",
       doi = "10.1103/PhysRevD.91.052008",
       year = "2015",
       eprint = "1408.3104",
       archivePrefix = "arXiv",
       primaryClass = "hep-ex",
       reportNumber = "CMS-SMP-12-017, CERN-PH-EP-2014-205",
       SLACcitation = "%%CITATION = ARXIV:1408.3104;%%",
 }'
diff --git a/data/anainfo/CMS_2016_PAS_SUS_16_14.info b/data/anainfo/CMS_2016_PAS_SUS_16_14.info
new file mode 100644
--- /dev/null
+++ b/data/anainfo/CMS_2016_PAS_SUS_16_14.info
@@ -0,0 +1,22 @@
+Name: CMS_2016_PAS_SUS_16_14
+Year: 2013
+Summary: Search for supersymmetry in events with jets and missing transverse momentum at 13~\TeV
+Experiment: CMS
+Collider: LHC
+Status: UNVALIDATED
+Authors:
+ - Andy Buckley <andy.buckley@cern.ch>
+RunInfo: BSM physics signal events
+NumEvents: 50000
+NeedCrossSection: yes
+Beams: [p+, p+]
+Energies: [13000]
+Description:
+  'A search for supersymmetry in all-hadronic events with large missing transverse
+  momentum, produced in proton--proton collisions at $\sqrt{s} = 13~\TeV$. The data
+  sample, corresponding to an integrated luminosity of 12.9/fb, was collected with
+  the CMS detector at the CERN LHC in 2016. The data are examined in search regions
+  of jet multiplicity, tagged bottom quark jet multiplicity, missing transverse momentum,
+  and the scalar sum of jet transverse momenta. The observed numbers of events in all
+  search regions are found to be consistent with the expectations from standard model
+  processes.'
diff --git a/data/anainfo/Makefile.am b/data/anainfo/Makefile.am
--- a/data/anainfo/Makefile.am
+++ b/data/anainfo/Makefile.am
@@ -1,409 +1,417 @@
 dist_pkgdata_DATA = \
   ALEPH_1991_S2435284.info \
   ALEPH_1995_I382179.info \
   ALEPH_1996_S3486095.info \
   ALEPH_1996_S3196992.info \
   ALEPH_1999_S4193598.info \
   ALEPH_2001_S4656318.info \
   ALEPH_2002_S4823664.info \
   ALEPH_2004_S5765862.info \
   ALICE_2010_S8624100.info \
   ALICE_2010_S8625980.info \
   ALICE_2010_S8706239.info \
   ALICE_2011_S8909580.info \
   ALICE_2011_S8945144.info \
+  ALICE_2012_I1116147.info \
   ALICE_2012_I1181770.info \
   ALICE_2015_I1357424.info \
   ALICE_2014_I1300380.info \
   ARGUS_1993_S2653028.info \
   ARGUS_1993_S2669951.info \
   ARGUS_1993_S2789213.info \
   ATLAS_2010_S8591806.info \
   ATLAS_2010_S8817804.info \
   ATLAS_2010_S8894728.info \
   ATLAS_2010_S8914702.info \
   ATLAS_2010_S8918562.info \
   ATLAS_2010_S8919674.info \
   ATLAS_2010_CONF_2010_049.info \
   ATLAS_2011_S8924791.info \
   ATLAS_2011_S8971293.info \
   ATLAS_2011_S8983313.info \
   ATLAS_2011_S8994773.info \
   ATLAS_2011_S9002537.info \
   ATLAS_2011_S9019561.info \
   ATLAS_2011_S9041966.info \
   ATLAS_2011_S9120807.info \
   ATLAS_2011_S9126244.info \
   ATLAS_2011_S9128077.info \
   ATLAS_2011_S9131140.info \
   ATLAS_2011_S9108483.info \
   ATLAS_2011_S9212183.info \
   ATLAS_2011_I894867.info \
   ATLAS_2011_I921594.info \
   ATLAS_2011_I928289_W.info \
   ATLAS_2011_I928289_Z.info \
   ATLAS_2011_I930220.info \
   ATLAS_2011_S9035664.info \
   ATLAS_2011_I919017.info \
   ATLAS_2011_I925932.info \
   ATLAS_2011_I926145.info \
+  ATLAS_2011_I929691.info \
   ATLAS_2011_I944826.info \
   ATLAS_2011_I945498.info \
   ATLAS_2011_I954993.info \
   ATLAS_2011_S9225137.info \
   ATLAS_2011_S9212353.info \
   ATLAS_2011_CONF_2011_090.info \
   ATLAS_2011_CONF_2011_098.info \
   ATLAS_2012_I943401.info \
   ATLAS_2012_I946427.info \
   ATLAS_2012_I1083318.info \
   ATLAS_2012_I1082936.info \
   ATLAS_2012_I1084540.info \
   ATLAS_2012_I1093734.info \
   ATLAS_2012_I1093738.info \
   ATLAS_2012_I1094061.info \
   ATLAS_2012_I1094564.info \
   ATLAS_2012_I1094568.info \
   ATLAS_2012_I1095236.info \
   ATLAS_2012_I1082009.info \
   ATLAS_2012_I1091481.info \
   ATLAS_2012_I1119557.info \
   ATLAS_2012_I1124167.info \
   ATLAS_2012_I1125575.info \
   ATLAS_2012_I1183818.info \
   ATLAS_2012_I1188891.info \
   ATLAS_2012_I1112263.info \
   ATLAS_2012_I1125961.info \
   ATLAS_2012_I1126136.info \
   ATLAS_2012_I1117704.info \
   ATLAS_2012_I1118269.info \
   ATLAS_2012_I1180197.info \
   ATLAS_2012_I1186556.info \
   ATLAS_2012_I1190891.info \
   ATLAS_2012_I1199269.info \
   ATLAS_2012_I1203852.info \
   ATLAS_2012_I1204447.info \
   ATLAS_2012_I1204784.info \
   ATLAS_2012_CONF_2012_001.info \
   ATLAS_2012_CONF_2012_103.info \
   ATLAS_2012_CONF_2012_104.info \
   ATLAS_2012_CONF_2012_105.info \
   ATLAS_2012_CONF_2012_109.info \
   ATLAS_2012_CONF_2012_153.info \
   ATLAS_2013_I1190187.info \
   ATLAS_2013_I1217863_W.info \
   ATLAS_2013_I1217863_W_EL.info \
   ATLAS_2013_I1217863_W_MU.info \
   ATLAS_2013_I1217863_Z.info \
   ATLAS_2013_I1217863_Z_EL.info \
   ATLAS_2013_I1217863_Z_MU.info \
   ATLAS_2013_I1217867.info \
   ATLAS_2013_I1219109.info \
   ATLAS_2013_I1219109_EL.info \
   ATLAS_2013_I1219109_MU.info \
   ATLAS_2013_I1230812.info \
   ATLAS_2013_I1230812_EL.info \
   ATLAS_2013_I1230812_MU.info \
   ATLAS_2013_I1243871.info \
   ATLAS_2013_I1263495.info \
   ATLAS_2014_I1268975.info \
   ATLAS_2014_I1279489.info \
   ATLAS_2014_I1282441.info \
   ATLAS_2014_I1298811.info \
   ATLAS_2014_I1304688.info \
   ATLAS_2014_I1307756.info \
   ATLAS_2014_I1306294.info \
   ATLAS_2014_I1306294_EL.info \
   ATLAS_2014_I1306294_MU.info \
   ATLAS_2014_I1315949.info \
   ATLAS_2014_I1325553.info \
   ATLAS_2014_I1300647.info \
   ATLAS_2014_I1288706.info \
   ATLAS_2014_I1307243.info \
   ATLAS_2014_I1312627.info \
   ATLAS_2014_I1312627_EL.info \
   ATLAS_2014_I1312627_MU.info \
   ATLAS_2014_I1306615.info \
   ATLAS_2015_I1393758.info \
   ATLAS_2015_I1364361.info \
   ATLAS_2015_I1345452.info \
   ATLAS_2015_I1351916.info \
   ATLAS_2015_I1351916_EL.info \
   ATLAS_2015_I1351916_MU.info \
   ATLAS_2013_I1216670.info \
   ATLAS_2013_I1244522.info \
   ATLAS_2014_I1282447.info \
   ATLAS_2014_I1298023.info \
   ATLAS_2014_I1319490.info \
   ATLAS_2014_I1319490_EL.info \
   ATLAS_2014_I1319490_MU.info \
   ATLAS_2014_I1326641.info \
   ATLAS_2014_I1327229.info \
   ATLAS_2015_I1387176.info \
   ATLAS_2015_CONF_2015_041.info \
   ATLAS_2015_CONF_2015_041_EL.info \
   ATLAS_2015_CONF_2015_041_MU.info \
   ATLAS_2015_I1376945.info \
   ATLAS_2015_I1390114.info \
   ATLAS_2015_I1394679.info \
   ATLAS_2015_I1397635.info \
   ATLAS_2015_I1397637.info \
   ATLAS_2015_I1408516.info \
   ATLAS_2015_I1408516_EL.info \
   ATLAS_2015_I1408516_MU.info \
   ATLAS_2015_I1404878.info \
   ATLAS_2016_I1419070.info \
   ATLAS_2016_I1419652.info \
   ATLAS_2016_I1424838.info \
   ATLAS_2016_I1426695.info \
   ATLAS_2016_I1444991.info \
   ATLAS_2016_I1452559.info \
   ATLAS_2016_I1457605.info \
   ATLAS_2016_I1458270.info \
   ATLAS_2016_I1468168.info \
+  ATLAS_2016_CONF_2016_037.info \
+  ATLAS_2016_CONF_2016_054.info \
+  ATLAS_2016_CONF_2016_078.info \
+  ATLAS_2016_CONF_2016_094.info \
   BABAR_2003_I593379.info \
   BABAR_2005_S6181155.info \
   BABAR_2007_S6895344.info \
   BABAR_2007_S7266081.info \
   BABAR_2013_I1116411.info \
   BABAR_2013_I1238276.info \
   BABAR_2015_I1334693.info \
   BELLE_2001_S4598261.info \
   BELLE_2008_I786560.info \
   BELLE_2011_I878990.info \
   BELLE_2013_I1216515.info \
   BELLE_2013_I1238273.info \
   BELLE_2015_I1397632.info \
   CDF_1988_S1865951.info \
   CDF_1990_S2089246.info \
   CDF_1993_S2742446.info \
   CDF_1994_S2952106.info \
   CDF_1996_S3108457.info \
   CDF_1996_S3349578.info \
   CDF_1996_S3418421.info \
   CDF_1997_S3541940.info \
   CDF_1998_S3618439.info \
   CDF_2000_S4155203.info \
   CDF_2000_S4266730.info \
   CDF_2001_S4517016.info \
   CDF_2001_S4563131.info \
   CDF_2001_S4751469.info \
   CDF_2002_S4796047.info \
   CDF_2004_S5839831.info \
   CDF_2005_S6080774.info \
   CDF_2005_S6217184.info \
   CDF_2006_S6450792.info \
   CDF_2006_S6653332.info \
   CDF_2007_S7057202.info \
   CDF_2008_S7540469.info \
   CDF_2008_S7541902.info \
   CDF_2008_S7782535.info \
   CDF_2008_S7828950.info \
   CDF_2008_S8093652.info \
   CDF_2008_S8095620.info \
   CDF_2009_S8233977.info \
   CDF_2009_NOTE_9936.info \
   CDF_2009_I856131.info \
   CDF_2009_S8436959.info \
   CDF_2010_S8591881_DY.info \
   CDF_2010_S8591881_QCD.info \
   CDF_2012_NOTE10874.info \
   CDF_2012_I1124333.info \
   CLEO_2004_S5809304.info\
   CMS_2010_S8547297.info \
   CMS_2010_S8656010.info \
   CMS_2011_S8884919.info \
   CMS_2011_S9215166.info \
   CMS_2012_I941555.info \
   CMS_2011_I954992.info \
   CMS_2011_S8941262.info \
   CMS_2011_S8950903.info \
   CMS_2011_S8957746.info \
   CMS_2011_S8968497.info \
   CMS_2011_S8973270.info \
   CMS_2011_S8978280.info \
   CMS_2011_S9086218.info \
   CMS_2011_S9088458.info \
   CMS_2011_S9120041.info \
   CMS_2012_I1087342.info \
   CMS_2012_I1090423.info \
   CMS_2012_I1102908.info \
   CMS_2012_I1107658.info \
   CMS_2012_I1184941.info \
   CMS_2012_I1193338.info \
   CMS_2013_I1122847.info \
   CMS_2013_I1208923.info \
   CMS_2013_I1209721.info \
   CMS_2013_I1218372.info \
+  CMS_2013_I1223519.info \
   CMS_2013_I1224539_DIJET.info \
   CMS_2013_I1224539_WJET.info \
   CMS_2013_I1224539_ZJET.info \
   CMS_2013_I1256943.info \
   CMS_2013_I1258128.info \
   CMS_2013_I1261026.info \
   CMS_2013_I1265659.info \
   CMS_2013_I1272853.info \
   CMS_2013_I1273574.info \
   CMS_2014_I1298810.info \
   CMS_2014_I1303894.info \
   CMS_2014_I1305624.info \
   CMS_2015_I1310737.info \
   CMS_2015_I1327224.info \
   CMS_2015_I1346843.info \
   CMS_2015_I1356998.info \
   CMS_2015_I1370682.info \
   CMS_2015_I1384119.info \
   CMS_2015_I1385107.info \
   CMS_2015_I1397174.info \
   CMS_2016_I1473674.info \
   CMSTOTEM_2014_I1294140.info \
   CMS_2012_PAS_QCD_11_010.info \
+  CMS_2016_PAS_SUS_16_14.info \
   CMS_QCD_10_024.info \
   D0_1995_I398175.info \
   D0_1996_S3214044.info \
   D0_1996_S3324664.info \
   D0_2000_S4480767.info \
   D0_2000_I499943.info  \
   D0_2001_S4674421.info \
   D0_2004_S5992206.info \
   D0_2006_S6438750.info \
   D0_2007_S7075677.info \
   D0_2008_S6879055.info \
   D0_2008_S7554427.info \
   D0_2008_S7662670.info \
   D0_2008_S7719523.info \
   D0_2008_S7837160.info \
   D0_2008_S7863608.info \
   D0_2009_S8202443.info \
   D0_2009_S8320160.info \
   D0_2009_S8349509.info \
   D0_2010_S8566488.info \
   D0_2010_S8570965.info \
   D0_2010_S8671338.info \
   D0_2010_S8821313.info \
   D0_2011_I895662.info \
   D0_2015_I1324946.info \
   D0_2000_I503361.info \
   E735_1998_S3905616.info \
   DELPHI_1995_S3137023.info \
   DELPHI_1996_S3430090.info \
   DELPHI_1999_S3960137.info \
   DELPHI_2000_S4328825.info \
   DELPHI_2002_069_CONF_603.info \
   DELPHI_2011_I890503.info \
   EXAMPLE.info \
   EXAMPLE_CUTS.info \
   EXAMPLE_SMEAR.info \
   H1_1994_S2919893.info \
   H1_1995_S3167097.info \
   H1_2000_S4129130.info \
   JADE_OPAL_2000_S4300807.info \
   JADE_1998_S3612880.info \
   LHCB_2010_S8758301.info \
   LHCB_2010_I867355.info \
   LHCB_2011_I917009.info \
   LHCB_2011_I919315.info \
   LHCB_2012_I1119400.info \
   LHCB_2012_I1208102.info \
   LHCB_2013_I1208105.info \
   LHCB_2013_I1218996.info \
   LHCB_2014_I1281685.info \
   LHCB_2015_I1333223.info \
   LHCF_2012_I1115479.info \
   MC_DIPHOTON.info \
   MC_ELECTRONS.info \
   MC_GENERIC.info \
   MC_HFJETS.info \
   MC_HINC.info \
   MC_HJETS.info \
   MC_HHJETS.info \
   MC_HKTSPLITTINGS.info \
   MC_IDENTIFIED.info \
   MC_JETS.info \
   MC_JETTAGS.info \
   MC_KTSPLITTINGS.info \
   MC_LEADJETUE.info \
   MC_MET.info \
   MC_MUONS.info \
   MC_PDFS.info \
   MC_PHOTONINC.info \
   MC_PHOTONJETS.info \
   MC_PHOTONKTSPLITTINGS.info \
   MC_PHOTONS.info \
   MC_PRINTEVENT.info \
   MC_QCD_PARTONS.info \
   MC_SUSY.info \
   MC_TAUS.info \
   MC_TTBAR.info \
   MC_VH2BB.info \
   MC_WINC.info \
   MC_WINC_EL.info \
   MC_WINC_MU.info \
   MC_WINC_EL_BARE.info \
   MC_WINC_MU_BARE.info \
   MC_WJETS.info \
   MC_WJETS_EL.info \
   MC_WJETS_MU.info \
   MC_WJETS_EL_BARE.info \
   MC_WJETS_MU_BARE.info \
   MC_WKTSPLITTINGS.info \
   MC_WPOL.info \
   MC_WWINC.info \
   MC_WWJETS.info \
   MC_WWKTSPLITTINGS.info \
   MC_XS.info \
   MC_ZINC.info \
   MC_ZINC_EL.info \
   MC_ZINC_MU.info \
   MC_ZINC_EL_BARE.info \
   MC_ZINC_MU_BARE.info \
   MC_ZJETS.info \
   MC_ZJETS_EL.info \
   MC_ZJETS_MU.info \
   MC_ZJETS_EL_BARE.info \
   MC_ZJETS_MU_BARE.info \
   MC_ZKTSPLITTINGS.info \
   MC_ZZINC.info \
   MC_ZZJETS.info \
   MC_ZZKTSPLITTINGS.info \
   L3_1992_I336180.info \
   OPAL_1993_S2692198.info \
   OPAL_1993_I342766.info \
   OPAL_1994_S2927284.info \
   OPAL_1995_S3198391.info \
   OPAL_1996_S3257789.info \
   OPAL_1997_S3396100.info \
   OPAL_1997_S3608263.info \
   OPAL_1998_S3702294.info \
   OPAL_1998_S3780481.info \
   OPAL_1998_S3749908.info \
   OPAL_2000_S4418603.info \
   OPAL_2001_S4553896.info \
   OPAL_2002_S5361494.info \
   OPAL_2003_I599181.info \
   OPAL_2004_S6132243.info \
   PDG_HADRON_MULTIPLICITIES.info \
   PDG_HADRON_MULTIPLICITIES_RATIOS.info \
   PDG_TAUS.info \
   SFM_1984_S1178091.info \
   SLD_1996_S3398250.info \
   SLD_1999_S3743934.info \
   SLD_2002_S4869273.info \
   SLD_2004_S5693039.info \
   STAR_2006_S6500200.info \
   STAR_2006_S6860818.info \
   STAR_2006_S6870392.info \
   STAR_2008_S7869363.info \
   STAR_2008_S7993412.info \
   STAR_2009_UE_HELEN.info \
   TASSO_1990_S2148048.info \
   TOTEM_2012_I1115294.info \
   TOTEM_2012_I1220862.info \
   TOTEM_2014_I1328627.info \
   ZEUS_2001_S4815815.info \
   UA1_1990_S2044935.info \
   UA5_1982_S875503.info \
   UA5_1986_S1583476.info \
   UA5_1987_S1640666.info \
   UA5_1988_S1867512.info \
   UA5_1989_S1926373.info
diff --git a/data/plotinfo/ALICE_2012_I1116147.plot b/data/plotinfo/ALICE_2012_I1116147.plot
new file mode 100644
--- /dev/null
+++ b/data/plotinfo/ALICE_2012_I1116147.plot
@@ -0,0 +1,38 @@
+# BEGIN PLOT /ALICE_2012_I1116147/d01-x01-y01
+Title=$\pi^0$ invariant cross section at mid-rapidity at $\sqrt{s}=7$\,TeV
+XLabel=$p_\perp$ (GeV/$c$)
+YLabel=$E\frac{d^3\sigma}{dp^3}$ ($\mu b/$(GeV$^2c^3$))
+YMin=1e-4
+YMax=1e6
+# END PLOT
+
+
+
+# BEGIN PLOT /ALICE_2012_I1116147/d02-x01-y01
+Title=$\pi^0$ invariant cross section at mid-rapidity at $\sqrt{s}=0.9$\,TeV
+XLabel=$p_\perp$ (GeV/$c$)
+YLabel=$E\frac{d^3\sigma}{dp^3}$ ($\mu b/$(GeV$^2c^3$))
+YMin=1e-2
+YMax=1e5
+#END PLOT
+
+
+
+# BEGIN PLOT /ALICE_2012_I1116147/d03-x01-y01
+Title=$\eta$ invariant cross section at mid-rapidity at $\sqrt{s}=7$\,TeV
+XLabel=$p_\perp$ (GeV/$c$)
+YLabel=$E\frac{d^3\sigma}{dp^3}$ ($\mu b/$(GeV$^2c^3$))
+YMin=1e-3
+YMax=1e4
+# END PLOT
+
+
+
+# BEGIN PLOT /ALICE_2012_I1116147/d04-x01-y01
+Title=$\eta/\pi^0$ ratio at $\sqrt{s}=7$\,TeV
+XLabel=$p_\perp$ (GeV/$c$)
+YLabel=$\eta/\pi^0$
+LogY=0
+YMin=5e-2
+YMax=1.1
+# END PLOT
\ No newline at end of file
diff --git a/data/plotinfo/ATLAS_2011_I929691.plot b/data/plotinfo/ATLAS_2011_I929691.plot
new file mode 100644
--- /dev/null
+++ b/data/plotinfo/ATLAS_2011_I929691.plot
@@ -0,0 +1,238 @@
+# BEGIN PLOT /ATLAS_2011_I929691/*
+LogY=1
+LogX=0
+XTwosidedTicks=1
+YTwosidedTicks=1
+LegendAlign=r
+# END PLOT
+
+# BEGIN PLOT /ATLAS_2011_I929691/d01-x01-y01
+Title=$F(z)$ for 25 GeV $<p_\text{T~jet}<$ 40 GeV
+XLabel=$z$
+YLabel=$F(z)$
+XMax=0.8
+XMin=0.0121
+LogX=1
+# END PLOT
+
+# BEGIN PLOT /ATLAS_2011_I929691/d02-x01-y01
+Title=$F(z)$ for 40 GeV $<p_\text{T~jet}<$ 60 GeV
+XLabel=$z$
+YLabel=$F(z)$
+XMax=0.8
+XMin=0.0069
+LogX=1
+# END PLOT
+
+# BEGIN PLOT /ATLAS_2011_I929691/d03-x01-y01
+Title=$F(z)$ for 60 GeV $<p_\text{T~jet}<$ 80 GeV
+XLabel=$z$
+YLabel=$F(z)$
+XMax=0.8
+XMin=0.0052
+LogX=1
+# END PLOT
+
+# BEGIN PLOT /ATLAS_2011_I929691/d04-x01-y01
+Title=$F(z)$ for 80 GeV $<p_\text{T~jet}<$ 110 GeV
+XLabel=$z$
+YLabel=$F(z)$
+XMax=0.8
+XMin=0.004
+LogX=1
+# END PLOT
+
+# BEGIN PLOT /ATLAS_2011_I929691/d05-x01-y01
+Title=$F(z)$ for 110 GeV $<p_\text{T~jet}<$ 160 GeV
+XLabel=$z$
+YLabel=$F(z)$
+XMax=0.8
+XMin=0.003
+LogX=1
+# END PLOT
+
+# BEGIN PLOT /ATLAS_2011_I929691/d06-x01-y01
+Title=$F(z)$ for 160 GeV $<p_\text{T~jet}<$ 210 GeV
+XLabel=$z$
+YLabel=$F(z)$
+XMax=0.8
+XMin=0.003
+LogX=1
+# END PLOT
+
+# BEGIN PLOT /ATLAS_2011_I929691/d07-x01-y01
+Title=$F(z)$ for 210 GeV $<p_\text{T~jet}<$ 260 GeV
+XLabel=$z$
+YLabel=$F(z)$
+XMax=0.8
+XMin=0.003
+LogX=1
+# END PLOT
+
+# BEGIN PLOT /ATLAS_2011_I929691/d08-x01-y01
+Title=$F(z)$ for 260 GeV $<p_\text{T~jet}<$ 310 GeV
+XLabel=$z$
+YLabel=$F(z)$
+XMax=0.8
+XMin=0.003
+LogX=1
+# END PLOT
+
+# BEGIN PLOT /ATLAS_2011_I929691/d09-x01-y01
+Title=$F(z)$ for 310 GeV $<p_\text{T~jet}<$ 400 GeV
+XLabel=$z$
+YLabel=$F(z)$
+XMax=0.8
+XMin=0.003
+LogX=1
+# END PLOT
+
+# BEGIN PLOT /ATLAS_2011_I929691/d10-x01-y01
+Title=$F(z)$ for 400 GeV $<p_\text{T~jet}<$ 500 GeV
+XLabel=$z$
+YLabel=$F(z)$
+XMax=0.8
+XMin=0.003
+LogX=1
+# END PLOT
+
+# BEGIN PLOT /ATLAS_2011_I929691/d11-x01-y01
+Title=Density of charged particles for 25 GeV $<p_\text{T~jet}<$ 40 GeV
+XLabel=$r$
+YLabel=$\rho_\text{ch}(r)$
+XMax=0.6
+# END PLOT
+
+# BEGIN PLOT /ATLAS_2011_I929691/d12-x01-y01
+Title=Density of charged particles for 40 GeV $<p_\text{T~jet}<$ 60 GeV
+XLabel=$r$
+YLabel=$\rho_\text{ch}(r)$
+XMax=0.6
+# END PLOT
+
+# BEGIN PLOT /ATLAS_2011_I929691/d13-x01-y01
+Title=Density of charged particles for 60 GeV $<p_\text{T~jet}<$ 80 GeV
+XLabel=$r$
+YLabel=$\rho_\text{ch}(r)$
+XMax=0.6
+# END PLOT
+
+# BEGIN PLOT /ATLAS_2011_I929691/d14-x01-y01
+Title=Density of charged particles for 80 GeV $<p_\text{T~jet}<$ 110 GeV
+XLabel=$r$
+YLabel=$\rho_\text{ch}(r)$
+XMax=0.6
+# END PLOT
+
+# BEGIN PLOT /ATLAS_2011_I929691/d15-x01-y01
+Title=Density of charged particles for 110 GeV $<p_\text{T~jet}<$ 160 GeV
+XLabel=$r$
+YLabel=$\rho_\text{ch}(r)$
+XMax=0.6
+# END PLOT
+
+# BEGIN PLOT /ATLAS_2011_I929691/d16-x01-y01
+Title=Density of charged particles for 160 GeV $<p_\text{T~jet}<$ 210 GeV
+XLabel=$r$
+YLabel=$\rho_\text{ch}(r)$
+XMax=0.6
+# END PLOT
+
+# BEGIN PLOT /ATLAS_2011_I929691/d17-x01-y01
+Title=Density of charged particles for 210 GeV $<p_\text{T~jet}<$ 260 GeV
+XLabel=$r$
+YLabel=$\rho_\text{ch}(r)$
+XMax=0.6
+# END PLOT
+
+# BEGIN PLOT /ATLAS_2011_I929691/d18-x01-y01
+Title=Density of charged particles for 260 GeV $<p_\text{T~jet}<$ 310 GeV
+XLabel=$r$
+YLabel=$\rho_\text{ch}(r)$
+XMax=0.6
+# END PLOT
+
+# BEGIN PLOT /ATLAS_2011_I929691/d19-x01-y01
+Title=Density of charged particles for 310 GeV $<p_\text{T~jet}<$ 400 GeV
+XLabel=$r$
+YLabel=$\rho_\text{ch}(r)$
+XMax=0.6
+# END PLOT
+
+# BEGIN PLOT /ATLAS_2011_I929691/d20-x01-y01
+Title=Density of charged particles for 400 GeV $<p_\text{T~jet}<$ 500 GeV
+XLabel=$r$
+YLabel=$\rho_\text{ch}(r)$
+XMax=0.6
+# END PLOT
+
+# BEGIN PLOT /ATLAS_2011_I929691/d21-x01-y01
+Title=$f(p_\text{T}^\text{rel})$ for 25 GeV $<p_\text{T~jet}<$ 40 GeV
+XLabel=$p_\text{T}^\text{rel}$ [GeV]
+YLabel=$f(p_\text{T}^\text{rel})$
+LogY=0
+# END PLOT
+
+# BEGIN PLOT /ATLAS_2011_I929691/d22-x01-y01
+Title=$f(p_\text{T}^\text{rel})$ for 40 GeV $<p_\text{T~jet}<$ 60 GeV
+XLabel=$p_\text{T}^\text{rel}$ [GeV]
+YLabel=$f(p_\text{T}^\text{rel})$
+LogY=0
+# END PLOT
+
+# BEGIN PLOT /ATLAS_2011_I929691/d23-x01-y01
+Title=$f(p_\text{T}^\text{rel})$ for 60 GeV $<p_\text{T~jet}<$ 80 GeV
+XLabel=$p_\text{T}^\text{rel}$ [GeV]
+YLabel=$f(p_\text{T}^\text{rel})$
+LogY=0
+# END PLOT
+
+# BEGIN PLOT /ATLAS_2011_I929691/d24-x01-y01
+Title=$f(p_\text{T}^\text{rel})$ for 80 GeV $<p_\text{T~jet}<$ 110 GeV
+XLabel=$p_\text{T}^\text{rel}$ [GeV]
+YLabel=$f(p_\text{T}^\text{rel})$
+LogY=0
+# END PLOT
+
+# BEGIN PLOT /ATLAS_2011_I929691/d25-x01-y01
+Title=$f(p_\text{T}^\text{rel})$ for 110 GeV $<p_\text{T~jet}<$ 160 GeV
+XLabel=$p_\text{T}^\text{rel}$ [GeV]
+YLabel=$f(p_\text{T}^\text{rel})$
+LogY=0
+# END PLOT
+
+# BEGIN PLOT /ATLAS_2011_I929691/d26-x01-y01
+Title=$f(p_\text{T}^\text{rel})$ for 160 GeV $<p_\text{T~jet}<$ 210 GeV
+XLabel=$p_\text{T}^\text{rel}$ [GeV]
+YLabel=$f(p_\text{T}^\text{rel})$
+LogY=0
+# END PLOT
+
+# BEGIN PLOT /ATLAS_2011_I929691/d27-x01-y01
+Title=$f(p_\text{T}^\text{rel})$ for 210 GeV $<p_\text{T~jet}<$ 260 GeV
+XLabel=$p_\text{T}^\text{rel}$ [GeV]
+YLabel=$f(p_\text{T}^\text{rel})$
+LogY=0
+# END PLOT
+
+# BEGIN PLOT /ATLAS_2011_I929691/d28-x01-y01
+Title=$f(p_\text{T}^\text{rel})$ for 260 GeV $<p_\text{T~jet}<$ 310 GeV
+XLabel=$p_\text{T}^\text{rel}$ [GeV]
+YLabel=$f(p_\text{T}^\text{rel})$
+LogY=0
+# END PLOT
+
+# BEGIN PLOT /ATLAS_2011_I929691/d29-x01-y01
+Title=$f(p_\text{T}^\text{rel})$ for 310 GeV $<p_\text{T~jet}<$ 400 GeV
+XLabel=$p_\text{T}^\text{rel}$ [GeV]
+YLabel=$f(p_\text{T}^\text{rel})$
+LogY=0
+# END PLOT
+
+# BEGIN PLOT /ATLAS_2011_I929691/d30-x01-y01
+Title=$f(p_\text{T}^\text{rel})$ for 400 GeV $<p_\text{T~jet}<$ 500 GeV
+XLabel=$p_\text{T}^\text{rel}$ [GeV]
+YLabel=$f(p_\text{T}^\text{rel})$
+LogY=0
+# END PLOT
+
diff --git a/data/plotinfo/Makefile.am b/data/plotinfo/Makefile.am
--- a/data/plotinfo/Makefile.am
+++ b/data/plotinfo/Makefile.am
@@ -1,401 +1,403 @@
 dist_pkgdata_DATA = \
   ALEPH_1991_S2435284.plot \
   ALEPH_1995_I382179.plot \
   ALEPH_1996_S3486095.plot \
   ALEPH_1996_S3196992.plot \
   ALEPH_1999_S4193598.plot \
   ALEPH_2001_S4656318.plot \
   ALEPH_2002_S4823664.plot \
   ALEPH_2004_S5765862.plot \
   ALICE_2010_S8624100.plot \
   ALICE_2010_S8625980.plot \
   ALICE_2010_S8706239.plot \
   ALICE_2011_S8909580.plot \
   ALICE_2011_S8945144.plot \
+  ALICE_2012_I1116147.plot \
   ALICE_2012_I1181770.plot \
   ALICE_2014_I1300380.plot \
   ALICE_2015_I1357424.plot \
   ARGUS_1993_S2653028.plot \
   ARGUS_1993_S2669951.plot \
   ARGUS_1993_S2789213.plot \
   ATLAS_2010_S8591806.plot \
   ATLAS_2010_S8817804.plot \
   ATLAS_2010_S8894728.plot \
   ATLAS_2010_S8914702.plot \
   ATLAS_2010_S8918562.plot \
   ATLAS_2010_S8919674.plot \
   ATLAS_2010_CONF_2010_049.plot \
   ATLAS_2011_S8924791.plot \
   ATLAS_2011_S8971293.plot \
   ATLAS_2011_S8994773.plot \
   ATLAS_2011_S9002537.plot \
   ATLAS_2011_S9035664.plot \
   ATLAS_2011_S9120807.plot \
   ATLAS_2011_S9126244.plot \
   ATLAS_2011_S9128077.plot \
   ATLAS_2011_S9131140.plot \
   ATLAS_2011_I894867.plot  \
   ATLAS_2011_I919017.plot  \
   ATLAS_2011_I921594.plot  \
   ATLAS_2011_I928289_W.plot \
   ATLAS_2011_I928289_Z.plot \
   ATLAS_2011_I925932.plot  \
   ATLAS_2011_I926145.plot  \
+  ATLAS_2011_I929691.plot  \
   ATLAS_2011_I930220.plot  \
   ATLAS_2011_I944826.plot  \
   ATLAS_2011_I945498.plot  \
   ATLAS_2011_I954993.plot  \
   ATLAS_2011_S9225137.plot \
   ATLAS_2011_S9212183.plot \
   ATLAS_2011_S8983313.plot \
   ATLAS_2011_S9212353.plot \
   ATLAS_2011_CONF_2011_090.plot \
   ATLAS_2011_CONF_2011_098.plot \
   ATLAS_2012_I1082936.plot \
   ATLAS_2012_I1083318.plot \
   ATLAS_2012_I1084540.plot \
   ATLAS_2012_I1091481.plot \
   ATLAS_2012_I1093734.plot \
   ATLAS_2012_I1093738.plot \
   ATLAS_2012_I1094061.plot \
   ATLAS_2012_I1094564.plot \
   ATLAS_2012_I1094568.plot \
   ATLAS_2012_I1095236.plot \
   ATLAS_2012_I943401.plot \
   ATLAS_2012_I946427.plot \
   ATLAS_2012_I1119557.plot \
   ATLAS_2012_I1124167.plot \
   ATLAS_2012_I1125575.plot \
   ATLAS_2012_I1112263.plot \
   ATLAS_2012_I1125961.plot \
   ATLAS_2012_I1126136.plot \
   ATLAS_2012_I1117704.plot \
   ATLAS_2012_I1118269.plot \
   ATLAS_2012_I1180197.plot \
   ATLAS_2012_I1082009.plot \
   ATLAS_2012_I1183818.plot \
   ATLAS_2012_I1188891.plot \
   ATLAS_2012_I1186556.plot \
   ATLAS_2012_I1190891.plot \
   ATLAS_2012_I1199269.plot \
   ATLAS_2012_I1203852.plot \
   ATLAS_2012_I1204447.plot \
   ATLAS_2012_I1204784.plot \
   ATLAS_2012_CONF_2012_001.plot \
   ATLAS_2012_CONF_2012_103.plot \
   ATLAS_2012_CONF_2012_104.plot \
   ATLAS_2012_CONF_2012_105.plot \
   ATLAS_2012_CONF_2012_109.plot \
   ATLAS_2012_CONF_2012_153.plot \
   ATLAS_2013_I1190187.plot \
   ATLAS_2013_I1219109.plot \
   ATLAS_2013_I1219109_EL.plot \
   ATLAS_2013_I1219109_MU.plot \
   ATLAS_2013_I1217863_W.plot \
   ATLAS_2013_I1217863_W_EL.plot \
   ATLAS_2013_I1217863_W_MU.plot \
   ATLAS_2013_I1217863_Z.plot \
   ATLAS_2013_I1217863_Z_EL.plot \
   ATLAS_2013_I1217863_Z_MU.plot \
   ATLAS_2013_I1217867.plot \
   ATLAS_2013_I1216670.plot \
   ATLAS_2013_I1230812.plot \
   ATLAS_2013_I1230812_EL.plot \
   ATLAS_2013_I1230812_MU.plot \
   ATLAS_2013_I1243871.plot \
   ATLAS_2013_I1244522.plot \
   ATLAS_2013_I1263495.plot \
   ATLAS_2014_I1268975.plot \
   ATLAS_2014_I1279489.plot \
   ATLAS_2014_I1282441.plot \
   ATLAS_2014_I1298811.plot \
   ATLAS_2014_I1304688.plot \
   ATLAS_2014_I1307756.plot \
   ATLAS_2014_I1306294.plot \
   ATLAS_2014_I1306294_EL.plot \
   ATLAS_2014_I1306294_MU.plot \
   ATLAS_2014_I1315949.plot \
   ATLAS_2014_I1325553.plot \
   ATLAS_2014_I1300647.plot \
   ATLAS_2014_I1288706.plot \
   ATLAS_2014_I1307243.plot \
   ATLAS_2014_I1312627.plot \
   ATLAS_2014_I1312627_EL.plot \
   ATLAS_2014_I1312627_MU.plot \
   ATLAS_2014_I1306615.plot \
   ATLAS_2014_I1282447.plot \
   ATLAS_2014_I1298023.plot \
   ATLAS_2014_I1319490.plot \
   ATLAS_2014_I1319490_EL.plot \
   ATLAS_2014_I1319490_MU.plot \
   ATLAS_2014_I1326641.plot \
   ATLAS_2014_I1327229.plot \
   ATLAS_2015_I1393758.plot \
   ATLAS_2015_I1387176.plot \
   ATLAS_2015_I1364361.plot \
   ATLAS_2015_I1351916.plot \
   ATLAS_2015_I1351916_EL.plot \
   ATLAS_2015_I1351916_MU.plot \
   ATLAS_2015_I1345452.plot \
   ATLAS_2015_I1376945.plot \
   ATLAS_2015_I1390114.plot \
   ATLAS_2015_I1394679.plot \
   ATLAS_2015_I1397637.plot \
   ATLAS_2015_I1397635.plot \
   ATLAS_2015_CONF_2015_041.plot \
   ATLAS_2015_CONF_2015_041_EL.plot \
   ATLAS_2015_CONF_2015_041_MU.plot \
   ATLAS_2015_I1408516.plot \
   ATLAS_2015_I1408516_EL.plot \
   ATLAS_2015_I1408516_MU.plot \
   ATLAS_2015_I1404878.plot \
   ATLAS_2016_I1419070.plot \
   ATLAS_2016_I1419652.plot \
   ATLAS_2016_I1424838.plot \
   ATLAS_2016_I1426695.plot \
   ATLAS_2016_I1444991.plot \
   ATLAS_2016_I1457605.plot \
   ATLAS_2016_I1468168.plot \
   BABAR_2003_I593379.plot \
   BABAR_2005_S6181155.plot \
   BABAR_2007_S6895344.plot \
   BABAR_2007_S7266081.plot \
   BABAR_2013_I1116411.plot \
   BABAR_2013_I1238276.plot \
   BABAR_2015_I1334693.plot \
   BELLE_2001_S4598261.plot \
   BELLE_2008_I786560.plot \
   BELLE_2011_I878990.plot \
   BELLE_2013_I1216515.plot \
   BELLE_2013_I1238273.plot \
   BELLE_2015_I1397632.plot \
   CDF_1988_S1865951.plot \
   CDF_1990_S2089246.plot \
   CDF_1993_S2742446.plot \
   CDF_1994_S2952106.plot \
   CDF_1996_S3108457.plot \
   CDF_1996_S3349578.plot \
   CDF_1996_S3418421.plot \
   CDF_1997_S3541940.plot \
   CDF_1998_S3618439.plot \
   CDF_2000_S4155203.plot \
   CDF_2000_S4266730.plot \
   CDF_2001_S4517016.plot \
   CDF_2001_S4563131.plot \
   CDF_2001_S4751469.plot \
   CDF_2002_S4796047.plot \
   CDF_2004_S5839831.plot \
   CDF_2005_S6080774.plot \
   CDF_2005_S6217184.plot \
   CDF_2006_S6450792.plot \
   CDF_2006_S6653332.plot \
   CDF_2007_S7057202.plot \
   CDF_2008_S7540469.plot \
   CDF_2008_S7541902.plot \
   CDF_2008_S7782535.plot \
   CDF_2008_S7828950.plot \
   CDF_2008_S8093652.plot \
   CDF_2008_S8095620.plot \
   CDF_2009_S8233977.plot \
   CDF_2009_NOTE_9936.plot \
   CDF_2009_I856131.plot \
   CDF_2009_S8436959.plot \
   CDF_2010_S8591881_DY.plot \
   CDF_2010_S8591881_QCD.plot \
   CDF_2012_NOTE10874.plot \
   CDF_2012_I1124333.plot \
   CLEO_2004_S5809304.plot \
   CMS_2010_S8547297.plot \
   CMS_2010_S8656010.plot \
   CMS_2011_S8884919.plot \
   CMS_2011_S8941262.plot \
   CMS_2011_S8950903.plot \
   CMS_2011_S8957746.plot \
   CMS_2011_S8968497.plot \
   CMS_2011_S8973270.plot \
   CMS_2011_S8978280.plot \
   CMS_2011_S9086218.plot \
   CMS_2011_S9088458.plot \
   CMS_2011_S9120041.plot \
   CMS_2011_S9215166.plot \
   CMS_2012_I941555.plot \
   CMS_2011_I954992.plot \
   CMS_2012_I1087342.plot \
   CMS_2012_I1090423.plot \
   CMS_2012_I1102908.plot \
   CMS_2012_I1107658.plot \
   CMS_2012_I1184941.plot \
   CMS_2012_I1193338.plot \
   CMS_2013_I1122847.plot \
   CMS_2013_I1208923.plot \
   CMS_2013_I1209721.plot \
   CMS_2013_I1218372.plot \
   CMS_2013_I1224539_DIJET.plot \
   CMS_2013_I1224539_WJET.plot \
   CMS_2013_I1224539_ZJET.plot \
   CMS_2013_I1256943.plot \
   CMS_2013_I1258128.plot \
   CMS_2013_I1261026.plot \
   CMS_2013_I1265659.plot \
   CMS_2013_I1272853.plot \
   CMS_2013_I1273574.plot \
   CMS_2014_I1298810.plot \
   CMS_2014_I1303894.plot \
   CMS_2014_I1305624.plot \
   CMS_2015_I1310737.plot \
   CMS_2015_I1327224.plot \
   CMS_2015_I1346843.plot \
   CMS_2015_I1356998.plot \
   CMS_2015_I1370682.plot \
   CMS_2015_I1384119.plot \
   CMS_2015_I1385107.plot \
   CMS_2015_I1397174.plot \
   CMS_2016_I1473674.plot \
   CMS_2012_PAS_QCD_11_010.plot \
   CMS_QCD_10_024.plot \
   CMSTOTEM_2014_I1294140.plot \
   D0_1995_I398175.plot \
   D0_1996_S3214044.plot \
   D0_1996_S3324664.plot \
   D0_2000_S4480767.plot \
   D0_2000_I499943.plot \
   D0_2001_S4674421.plot \
   D0_2004_S5992206.plot \
   D0_2006_S6438750.plot \
   D0_2007_S7075677.plot \
   D0_2008_S6879055.plot \
   D0_2008_S7554427.plot \
   D0_2008_S7662670.plot \
   D0_2008_S7719523.plot \
   D0_2008_S7837160.plot \
   D0_2008_S7863608.plot \
   D0_2009_S8202443.plot \
   D0_2009_S8320160.plot \
   D0_2009_S8349509.plot \
   D0_2010_S8566488.plot \
   D0_2010_S8570965.plot \
   D0_2010_S8671338.plot \
   D0_2010_S8821313.plot \
   D0_2011_I895662.plot \
   D0_2015_I1324946.plot \
   D0_2000_I503361.plot \
   E735_1998_S3905616.plot \
   DELPHI_1995_S3137023.plot \
   DELPHI_1996_S3430090.plot \
   DELPHI_1999_S3960137.plot \
   DELPHI_2000_S4328825.plot \
   DELPHI_2002_069_CONF_603.plot \
   DELPHI_2011_I890503.plot \
   EXAMPLE.plot \
   H1_1994_S2919893.plot \
   H1_1995_S3167097.plot \
   H1_2000_S4129130.plot \
   JADE_OPAL_2000_S4300807.plot \
   JADE_1998_S3612880.plot \
   LHCB_2010_S8758301.plot \
   LHCB_2010_I867355.plot \
   LHCB_2011_I917009.plot \
   LHCB_2011_I919315.plot \
   LHCB_2012_I1119400.plot \
   LHCB_2012_I1208102.plot \
   LHCB_2013_I1208105.plot \
   LHCB_2013_I1218996.plot \
   LHCB_2014_I1281685.plot \
   LHCB_2015_I1333223.plot \
   LHCF_2012_I1115479.plot \
   MC_DIPHOTON.plot \
   MC_ELECTRONS.plot \
   MC_GENERIC.plot \
   MC_HFJETS.plot \
   MC_HINC.plot \
   MC_HJETS.plot \
   MC_HHJETS.plot \
   MC_HKTSPLITTINGS.plot \
   MC_IDENTIFIED.plot \
   MC_JETS.plot \
   MC_JETTAGS.plot \
   MC_KTSPLITTINGS.plot \
   MC_LEADJETUE.plot \
   MC_MET.plot \
   MC_MUONS.plot \
   MC_PDFS.plot \
   MC_PHOTONINC.plot \
   MC_PHOTONJETS.plot \
   MC_PHOTONKTSPLITTINGS.plot \
   MC_PHOTONS.plot \
   MC_QCD_PARTONS.plot \
   MC_SUSY.plot \
   MC_TAUS.plot \
   MC_TTBAR.plot \
   MC_VH2BB.plot \
   MC_WINC.plot \
   MC_WINC_EL.plot \
   MC_WINC_MU.plot \
   MC_WINC_EL_BARE.plot \
   MC_WINC_MU_BARE.plot \
   MC_WJETS.plot \
   MC_WJETS_EL.plot \
   MC_WJETS_MU.plot \
   MC_WJETS_EL_BARE.plot \
   MC_WJETS_MU_BARE.plot \
   MC_WKTSPLITTINGS.plot \
   MC_WPOL.plot \
   MC_WWINC.plot \
   MC_WWJETS.plot \
   MC_WWKTSPLITTINGS.plot \
   MC_XS.plot \
   MC_ZINC.plot \
   MC_ZINC_EL.plot \
   MC_ZINC_MU.plot \
   MC_ZINC_EL_BARE.plot \
   MC_ZINC_MU_BARE.plot \
   MC_ZJETS.plot \
   MC_ZJETS_EL.plot \
   MC_ZJETS_MU.plot \
   MC_ZJETS_EL_BARE.plot \
   MC_ZJETS_MU_BARE.plot \
   MC_ZKTSPLITTINGS.plot \
   MC_ZZINC.plot \
   MC_ZZJETS.plot \
   MC_ZZKTSPLITTINGS.plot \
   L3_1992_I336180.plot \
   OPAL_1993_S2692198.plot \
   OPAL_1993_I342766.plot \
   OPAL_1994_S2927284.plot \
   OPAL_1995_S3198391.plot \
   OPAL_1996_S3257789.plot \
   OPAL_1997_S3396100.plot \
   OPAL_1997_S3608263.plot \
   OPAL_1998_S3702294.plot \
   OPAL_1998_S3749908.plot \
   OPAL_1998_S3780481.plot \
   OPAL_2000_S4418603.plot \
   OPAL_2001_S4553896.plot \
   OPAL_2002_S5361494.plot \
   OPAL_2003_I599181.plot \
   OPAL_2004_S6132243.plot \
   PDG_HADRON_MULTIPLICITIES.plot \
   PDG_HADRON_MULTIPLICITIES_RATIOS.plot \
   PDG_TAUS.plot \
   SFM_1984_S1178091.plot \
   SLD_1996_S3398250.plot \
   SLD_1999_S3743934.plot \
   SLD_2002_S4869273.plot \
   SLD_2004_S5693039.plot \
   STAR_2006_S6500200.plot \
   STAR_2006_S6860818.plot \
   STAR_2006_S6870392.plot \
   STAR_2008_S7869363.plot \
   STAR_2008_S7993412.plot \
   STAR_2009_UE_HELEN.plot \
   TASSO_1990_S2148048.plot \
   TOTEM_2012_I1115294.plot \
   TOTEM_2012_I1220862.plot \
   TOTEM_2014_I1328627.plot \
   ZEUS_2001_S4815815.plot \
   UA1_1990_S2044935.plot \
   UA5_1982_S875503.plot \
   UA5_1986_S1583476.plot \
   UA5_1987_S1640666.plot \
   UA5_1988_S1867512.plot \
   UA5_1989_S1926373.plot
diff --git a/data/refdata/ALICE_2012_I1116147.yoda b/data/refdata/ALICE_2012_I1116147.yoda
new file mode 100644
--- /dev/null
+++ b/data/refdata/ALICE_2012_I1116147.yoda
@@ -0,0 +1,126 @@
+# BEGIN YODA_SCATTER2D /REF/ALICE_2012_I1116147/d01-x01-y01
+Path=/REF/ALICE_2012_I1116147/d01-x01-y01
+Type=Scatter2D
+# xval   xerr-   xerr+   yval   yerr-   yerr+
+0.35	0.04999999999999999	0.050000000000000044			88580.0	23448.660942578364	23448.660942578364
+0.449	0.04899999999999999	0.05099999999999999			67370.0	10971.313731727847	10971.313731727847
+0.549	0.049000000000000044	0.050999999999999934			38660.0	5669.586316478479	5669.586316478479
+0.694	0.09399999999999997	0.1060000000000001			20560.0	1974.526272299257	1974.526272299257
+0.894	0.09399999999999997	0.10599999999999998			8903.0	802.9701115234614	802.9701115234614
+1.095	0.09499999999999997	0.10499999999999998			4165.0	310.4528949776439	310.4528949776439
+1.295	0.09499999999999997	0.10499999999999998			2152.0	138.81282361511128	138.81282361511128
+1.495	0.0950000000000002	0.10499999999999998			1187.0	60.530983801686226	60.530983801686226
+1.696	0.09599999999999986	0.10400000000000009			677.7	31.76476034853718	31.76476034853718
+1.896	0.09599999999999986	0.10400000000000009			393.8	18.36355085488643	18.36355085488643
+2.096	0.09600000000000009	0.10400000000000009			241.2	11.002272492535349	11.002272492535349
+2.296	0.09599999999999964	0.10400000000000009			150.2	6.7468511173731995	6.7468511173731995
+2.497	0.09699999999999998	0.1030000000000002			99.72	4.615733961137709	4.615733961137709
+2.697	0.09699999999999998	0.10299999999999976			67.19	3.1531730050855122	3.1531730050855122
+2.897	0.09699999999999998	0.1030000000000002			44.36	2.1676023620581337	2.1676023620581337
+3.097	0.09699999999999998	0.1030000000000002			32.46	1.53687995627505	1.53687995627505
+3.297	0.09699999999999998	0.10299999999999976			21.98	1.0568822072492279	1.0568822072492279
+3.497	0.09699999999999998	0.1030000000000002			15.97	0.8228000972289684	0.8228000972289684
+3.697	0.09699999999999998	0.10299999999999976			11.69	0.5818075283115542	0.5818075283115542
+3.898	0.09800000000000031	0.10199999999999987			8.544	0.4482064256567503	0.4482064256567503
+4.236	0.23599999999999977	0.26400000000000023			5.376	0.253298243183801	0.253298243183801
+4.737	0.2370000000000001	0.2629999999999999			2.68	0.1341640786499874	0.1341640786499874
+5.238	0.23800000000000043	0.26199999999999957			1.475	0.07940403012442127	0.07940403012442127
+5.739	0.23899999999999988	0.2610000000000001			0.9079	0.05289357238833468	0.05289357238833468
+6.461	0.4610000000000003	0.5389999999999997			0.42	0.024935316320431947	0.024935316320431947
+7.465	0.46499999999999986	0.5350000000000001			0.2057	0.01422005625867915	0.01422005625867915
+8.883	0.8829999999999991	1.1170000000000009			0.06003	0.004400693127224392	0.004400693127224392
+10.902	0.9019999999999992	1.0980000000000008			0.01891	0.0017425555945220228	0.0017425555945220228
+12.916	0.9160000000000004	1.0839999999999996			0.006357	8.910454533860772E-4	8.910454533860772E-4
+14.927	0.9269999999999996	1.0730000000000004			0.002958	9.746902071940601E-4	9.746902071940601E-4
+16.935	0.9349999999999987	1.0650000000000013			9.478E-4	3.746797165580224E-4	3.746797165580224E-4
+18.941	0.940999999999999	1.059000000000001			3.942E-4	1.910556201738122E-4	1.910556201738122E-4
+22.197	2.196999999999999	2.803000000000001			2.052E-4	7.44452819190041E-5	7.44452819190041E-5
+# END YODA_SCATTER2D 
+
+
+# BEGIN YODA_SCATTER2D /REF/ALICE_2012_I1116147/d02-x01-y01
+Path=/REF/ALICE_2012_I1116147/d02-x01-y01
+Type=Scatter2D
+# xval   xerr-   xerr+   yval   yerr-   yerr+
+0.495	0.09499999999999997	0.10499999999999998			27760.0	7468.831568592239	7468.831568592239
+0.694	0.09399999999999997	0.1060000000000001			8458.0	1232.0551935688595	1232.0551935688595
+0.894	0.09399999999999997	0.10599999999999998			3404.0	395.82824558133797	395.82824558133797
+1.095	0.09499999999999997	0.10499999999999998			1474.0	155.03870484495155	155.03870484495155
+1.295	0.09499999999999997	0.10499999999999998			801.4	84.76803642883324	84.76803642883324
+1.495	0.0950000000000002	0.10499999999999998			380.0	40.98499725509323	40.98499725509323
+1.783	0.18299999999999983	0.21700000000000008			143.4	14.5	14.5
+2.228	0.2280000000000002	0.2719999999999998			41.24	4.951464429842953	4.951464429842953
+2.73	0.22999999999999998	0.27			18.55	2.6664770765937593	2.6664770765937593
+3.233	0.2330000000000001	0.2669999999999999			5.385	0.8742413854308202	0.8742413854308202
+3.735	0.23499999999999988	0.2650000000000001			2.531	0.47773737555271933	0.47773737555271933
+4.447	0.44700000000000006	0.5529999999999999			0.7415	0.15614957572788982	0.15614957572788982
+5.818	0.8179999999999996	1.1820000000000004			0.09131	0.026135173617177293	0.026135173617177293
+# END YODA_SCATTER2D 
+
+
+# BEGIN YODA_SCATTER2D /REF/ALICE_2012_I1116147/d03-x01-y01
+Path=/REF/ALICE_2012_I1116147/d03-x01-y01
+Type=Scatter2D
+# xval   xerr-   xerr+   yval   yerr-   yerr+
+0.548	0.14800000000000002	0.1519999999999999			4216.0	1363.117016253557	1363.117016253557
+0.839	0.139	0.16100000000000003			1797.0	317.2002522067093	317.2002522067093
+1.179	0.17900000000000005	0.22099999999999986			741.4	98.7656822990658	98.7656822990658
+1.581	0.18100000000000005	0.21900000000000008			261.2	29.47032405658275	29.47032405658275
+1.983	0.18300000000000005	0.21700000000000008			107.6	13.124404748406688	13.124404748406688
+2.385	0.1849999999999996	0.2150000000000003			45.85	4.964483860382669	4.964483860382669
+2.786	0.18599999999999994	0.21399999999999997			21.99	2.7572631357924475	2.7572631357924475
+3.231	0.23099999999999987	0.26900000000000013			9.723	1.1775623125762815	1.1775623125762815
+3.733	0.2330000000000001	0.2669999999999999			5.203	0.612474489264655	0.612474489264655
+4.792	0.7919999999999998	1.2080000000000002			1.134	0.12625371281669304	0.12625371281669304
+6.846	0.8460000000000001	1.154			0.1672	0.02846085030353099	0.02846085030353099
+8.877	0.8770000000000007	1.1229999999999993			0.03576	0.016479881674332498	0.016479881674332498
+11.98	1.9800000000000004	3.0199999999999996			0.004347	0.0025059852353914616	0.0025059852353914616
+# END YODA_SCATTER2D 
+
+#------------------------------------------------------------
+# HERE IS THE YODA SCATTER2D PLOT THAT HAS NO x-ERRORS
+#------------------------------------------------------------
+# BEGIN YODA_SCATTER2D /REF/ALICE_2012_I1116147/d04-x01-y01
+#Path=/REF/ALICE_2012_I1116147/d04-x01-y01
+#Type=Scatter2D
+# xval   xerr-   xerr+   yval   yerr-   yerr+
+#0.55	0.0	0.0			0.1067	0.03347013594235912	0.03347013594235912
+#0.85	0.0	0.0			0.1655	0.02529268669002959	0.02529268669002959
+#1.2	0.0	0.0			0.2501	0.02603113520382851	0.02603113520382851
+#1.6	0.0	0.0			0.2866	0.026801865606707307	0.026801865606707307
+#2.0	0.0	0.0			0.3514	0.030696090956341654	0.030696090956341654
+#2.4	0.0	0.0			0.3547	0.02972574641619618	0.02972574641619618
+#2.8	0.0	0.0			0.3757	0.035979021665409415	0.035979021665409415
+#3.25	0.0	0.0			0.3987	0.03927250946909301	0.03927250946909301
+#3.75	0.0	0.0			0.4655	0.04585193561890272	0.04585193561890272
+#5.0	0.0	0.0			0.4686	0.04081041533726409	0.04081041533726409
+#7.0	0.0	0.0			0.5014	0.08140718641495971	0.08140718641495971
+#9.0	0.0	0.0			0.7082	0.18510386273657284	0.18510386273657284
+#12.5	0.0	0.0			0.5205	0.27690873947927325	0.27690873947927325
+# END YODA_SCATTER2D 
+
+
+#------------------------------------------------------------
+# THIS IS TAKEN FROM THE AIDA FILE FROM HEPDATA,
+# THEN CONVERTED TO YODA WITH AIDA2YODA.
+# IT HAS THE RIGHT "x-ERRORS" AND THUS THE RIGHT BINNING
+#------------------------------------------------------------
+BEGIN YODA_SCATTER2D /REF/ALICE_2012_I1116147/d04-x01-y01
+Path=/REF/ALICE_2012_I1116147/d04-x01-y01
+Title=
+Type=Scatter2D
+# xval	 xerr-	 xerr+	 yval	 yerr-	 yerr+
+5.500000e-01	1.500000e-01	1.500000e-01	1.067000e-01	3.347014e-02	3.347014e-02
+8.500000e-01	1.500000e-01	1.750000e-01	1.655000e-01	2.529269e-02	2.529269e-02
+1.200000e+00	1.750000e-01	2.000000e-01	2.501000e-01	2.603114e-02	2.603114e-02
+1.600000e+00	2.000000e-01	2.000000e-01	2.866000e-01	2.680187e-02	2.680187e-02
+2.000000e+00	2.000000e-01	2.000000e-01	3.514000e-01	3.069609e-02	3.069609e-02
+2.400000e+00	2.000000e-01	2.000000e-01	3.547000e-01	2.972575e-02	2.972575e-02
+2.800000e+00	2.000000e-01	2.250000e-01	3.757000e-01	3.597902e-02	3.597902e-02
+3.250000e+00	2.250000e-01	2.500000e-01	3.987000e-01	3.927251e-02	3.927251e-02
+3.750000e+00	2.500000e-01	6.250000e-01	4.655000e-01	4.585194e-02	4.585194e-02
+5.000000e+00	6.250000e-01	1.000000e+00	4.686000e-01	4.081042e-02	4.081042e-02
+7.000000e+00	1.000000e+00	1.000000e+00	5.014000e-01	8.140719e-02	8.140719e-02
+9.000000e+00	1.000000e+00	1.750000e+00	7.082000e-01	1.851039e-01	1.851039e-01
+1.250000e+01	1.750000e+00	1.750000e+00	5.205000e-01	2.769087e-01	2.769087e-01
+END YODA_SCATTER2D
diff --git a/data/refdata/ATLAS_2011_I929691.yoda b/data/refdata/ATLAS_2011_I929691.yoda
new file mode 100644
--- /dev/null
+++ b/data/refdata/ATLAS_2011_I929691.yoda
@@ -0,0 +1,619 @@
+BEGIN YODA_SCATTER2D /REF/ATLAS_2011_I929691/d01-x01-y01
+Path=/REF/ATLAS_2011_I929691/d01-x01-y01
+Title=
+Type=Scatter2D
+# xval	 xerr-	 xerr+	 yval	 yerr-	 yerr+
+1.405000e-02	1.950000e-03	1.950000e-03	4.304000e+01	2.020742e+00	2.020742e+00
+1.860000e-02	2.600000e-03	2.600000e-03	1.007000e+02	3.178050e+00	3.178050e+00
+2.460000e-02	3.400000e-03	3.400000e-03	1.132000e+02	3.436568e+00	3.436568e+00
+3.255000e-02	4.550000e-03	4.550000e-03	9.569000e+01	3.317891e+00	3.317891e+00
+4.305000e-02	5.950000e-03	5.950000e-03	7.317000e+01	2.618416e+00	2.618416e+00
+5.690000e-02	7.900000e-03	7.900000e-03	5.370000e+01	2.206581e+00	2.206581e+00
+7.520000e-02	1.040000e-02	1.040000e-02	3.680000e+01	1.631104e+00	1.631104e+00
+9.940000e-02	1.380000e-02	1.380000e-02	2.358000e+01	1.176138e+00	1.176138e+00
+1.314500e-01	1.825000e-02	1.825000e-02	1.397000e+01	8.438009e-01	8.438009e-01
+1.738500e-01	2.415000e-02	2.415000e-02	7.916000e+00	5.265453e-01	5.265453e-01
+2.299000e-01	3.190000e-02	3.190000e-02	3.977000e+00	3.416958e-01	3.416958e-01
+3.039500e-01	4.215000e-02	4.215000e-02	1.734000e+00	1.997248e-01	1.997248e-01
+4.018500e-01	5.575000e-02	5.575000e-02	6.573000e-01	9.450608e-02	9.450608e-02
+5.313000e-01	7.370000e-02	7.370000e-02	2.055000e-01	4.754293e-02	4.754293e-02
+7.025000e-01	9.750000e-02	9.750000e-02	5.147000e-02	2.436682e-02	2.436682e-02
+END YODA_SCATTER2D
+
+BEGIN YODA_SCATTER2D /REF/ATLAS_2011_I929691/d02-x01-y01
+Path=/REF/ATLAS_2011_I929691/d02-x01-y01
+Title=
+Type=Scatter2D
+# xval	 xerr-	 xerr+	 yval	 yerr-	 yerr+
+8.050000e-03	1.150000e-03	1.150000e-03	3.209000e+01	2.107724e+00	2.107724e+00
+1.065000e-02	1.450000e-03	1.450000e-03	1.252000e+02	6.082763e+00	6.082763e+00
+1.405000e-02	1.950000e-03	1.950000e-03	1.819000e+02	7.539231e+00	7.539231e+00
+1.860000e-02	2.600000e-03	2.600000e-03	1.644000e+02	6.356886e+00	6.356886e+00
+2.460000e-02	3.400000e-03	3.400000e-03	1.335000e+02	5.124451e+00	5.124451e+00
+3.255000e-02	4.550000e-03	4.550000e-03	1.012000e+02	3.956008e+00	3.956008e+00
+4.305000e-02	5.950000e-03	5.950000e-03	7.575000e+01	2.993226e+00	2.993226e+00
+5.690000e-02	7.900000e-03	7.900000e-03	5.291000e+01	2.206015e+00	2.206015e+00
+7.520000e-02	1.040000e-02	1.040000e-02	3.577000e+01	1.579652e+00	1.579652e+00
+9.940000e-02	1.380000e-02	1.380000e-02	2.257000e+01	1.079120e+00	1.079120e+00
+1.314500e-01	1.825000e-02	1.825000e-02	1.328000e+01	6.926038e-01	6.926038e-01
+1.738500e-01	2.415000e-02	2.415000e-02	7.312000e+00	4.415699e-01	4.415699e-01
+2.299000e-01	3.190000e-02	3.190000e-02	3.811000e+00	2.581550e-01	2.581550e-01
+3.039500e-01	4.215000e-02	4.215000e-02	1.666000e+00	1.508410e-01	1.508410e-01
+4.018500e-01	5.575000e-02	5.575000e-02	6.541000e-01	7.918188e-02	7.918188e-02
+5.313000e-01	7.370000e-02	7.370000e-02	2.253000e-01	4.623127e-02	4.623127e-02
+7.025000e-01	9.750000e-02	9.750000e-02	6.794000e-02	3.910165e-02	3.910165e-02
+END YODA_SCATTER2D
+
+BEGIN YODA_SCATTER2D /REF/ATLAS_2011_I929691/d03-x01-y01
+Path=/REF/ATLAS_2011_I929691/d03-x01-y01
+Title=
+Type=Scatter2D
+# xval	 xerr-	 xerr+	 yval	 yerr-	 yerr+
+6.050000e-03	8.500000e-04	8.500000e-04	8.191000e+01	7.030512e+00	7.030512e+00
+8.050000e-03	1.150000e-03	1.150000e-03	2.413000e+02	1.274402e+01	1.274402e+01
+1.065000e-02	1.450000e-03	1.450000e-03	2.635000e+02	1.202414e+01	1.202414e+01
+1.405000e-02	1.950000e-03	1.950000e-03	2.262000e+02	1.061037e+01	1.061037e+01
+1.860000e-02	2.600000e-03	2.600000e-03	1.907000e+02	8.769265e+00	8.769265e+00
+2.460000e-02	3.400000e-03	3.400000e-03	1.427000e+02	6.161980e+00	6.161980e+00
+3.255000e-02	4.550000e-03	4.550000e-03	1.071000e+02	5.093133e+00	5.093133e+00
+4.305000e-02	5.950000e-03	5.950000e-03	8.187000e+01	3.628361e+00	3.628361e+00
+5.690000e-02	7.900000e-03	7.900000e-03	5.370000e+01	2.304821e+00	2.304821e+00
+7.520000e-02	1.040000e-02	1.040000e-02	3.399000e+01	1.711198e+00	1.711198e+00
+9.940000e-02	1.380000e-02	1.380000e-02	2.303000e+01	1.323820e+00	1.323820e+00
+1.314500e-01	1.825000e-02	1.825000e-02	1.328000e+01	8.139410e-01	8.139410e-01
+1.738500e-01	2.415000e-02	2.415000e-02	7.043000e+00	4.432212e-01	4.432212e-01
+2.299000e-01	3.190000e-02	3.190000e-02	3.829000e+00	3.334382e-01	3.334382e-01
+3.039500e-01	4.215000e-02	4.215000e-02	1.600000e+00	1.618394e-01	1.618394e-01
+4.018500e-01	5.575000e-02	5.575000e-02	6.431000e-01	1.263450e-01	1.263450e-01
+5.313000e-01	7.370000e-02	7.370000e-02	2.448000e-01	6.046263e-02	6.046263e-02
+7.025000e-01	9.750000e-02	9.750000e-02	3.722000e-02	2.154877e-02	2.154877e-02
+END YODA_SCATTER2D
+
+BEGIN YODA_SCATTER2D /REF/ATLAS_2011_I929691/d04-x01-y01
+Path=/REF/ATLAS_2011_I929691/d04-x01-y01
+Title=
+Type=Scatter2D
+# xval	 xerr-	 xerr+	 yval	 yerr-	 yerr+
+4.600000e-03	6.000000e-04	6.000000e-04	1.028000e+02	1.460137e+01	1.460137e+01
+6.050000e-03	8.500000e-04	8.500000e-04	2.926000e+02	2.479234e+01	2.479234e+01
+8.050000e-03	1.150000e-03	1.150000e-03	3.196000e+02	2.438237e+01	2.438237e+01
+1.065000e-02	1.450000e-03	1.450000e-03	2.833000e+02	1.889153e+01	1.889153e+01
+1.405000e-02	1.950000e-03	1.950000e-03	2.411000e+02	1.332216e+01	1.332216e+01
+1.860000e-02	2.600000e-03	2.600000e-03	1.934000e+02	1.170470e+01	1.170470e+01
+2.460000e-02	3.400000e-03	3.400000e-03	1.571000e+02	9.763708e+00	9.763708e+00
+3.255000e-02	4.550000e-03	4.550000e-03	1.153000e+02	7.357309e+00	7.357309e+00
+4.305000e-02	5.950000e-03	5.950000e-03	8.260000e+01	5.217394e+00	5.217394e+00
+5.690000e-02	7.900000e-03	7.900000e-03	5.368000e+01	4.025916e+00	4.025916e+00
+7.520000e-02	1.040000e-02	1.040000e-02	3.439000e+01	2.252110e+00	2.252110e+00
+9.940000e-02	1.380000e-02	1.380000e-02	2.177000e+01	1.900789e+00	1.900789e+00
+1.314500e-01	1.825000e-02	1.825000e-02	1.208000e+01	1.033538e+00	1.033538e+00
+1.738500e-01	2.415000e-02	2.415000e-02	6.905000e+00	6.200556e-01	6.200556e-01
+2.299000e-01	3.190000e-02	3.190000e-02	4.126000e+00	4.463272e-01	4.463272e-01
+3.039500e-01	4.215000e-02	4.215000e-02	1.358000e+00	1.995420e-01	1.995420e-01
+4.018500e-01	5.575000e-02	5.575000e-02	6.220000e-01	2.079259e-01	2.079259e-01
+5.313000e-01	7.370000e-02	7.370000e-02	2.026000e-01	9.305622e-02	9.305622e-02
+7.025000e-01	9.750000e-02	9.750000e-02	2.063000e-02	1.336075e-02	1.336075e-02
+END YODA_SCATTER2D
+
+BEGIN YODA_SCATTER2D /REF/ATLAS_2011_I929691/d05-x01-y01
+Path=/REF/ATLAS_2011_I929691/d05-x01-y01
+Title=
+Type=Scatter2D
+# xval	 xerr-	 xerr+	 yval	 yerr-	 yerr+
+3.500000e-03	5.000000e-04	5.000000e-04	2.354000e+02	8.732125e+00	8.732125e+00
+4.600000e-03	6.000000e-04	6.000000e-04	5.092000e+02	1.645843e+01	1.645843e+01
+6.050000e-03	8.500000e-04	8.500000e-04	5.273000e+02	1.793683e+01	1.793683e+01
+8.050000e-03	1.150000e-03	1.150000e-03	4.335000e+02	1.418238e+01	1.418238e+01
+1.065000e-02	1.450000e-03	1.450000e-03	3.508000e+02	1.101681e+01	1.101681e+01
+1.405000e-02	1.950000e-03	1.950000e-03	2.772000e+02	8.415462e+00	8.415462e+00
+1.860000e-02	2.600000e-03	2.600000e-03	2.149000e+02	6.894926e+00	6.894926e+00
+2.460000e-02	3.400000e-03	3.400000e-03	1.607000e+02	5.162364e+00	5.162364e+00
+3.255000e-02	4.550000e-03	4.550000e-03	1.166000e+02	3.889730e+00	3.889730e+00
+4.305000e-02	5.950000e-03	5.950000e-03	7.998000e+01	2.631368e+00	2.631368e+00
+5.690000e-02	7.900000e-03	7.900000e-03	5.248000e+01	1.794269e+00	1.794269e+00
+7.520000e-02	1.040000e-02	1.040000e-02	3.409000e+01	1.296919e+00	1.296919e+00
+9.940000e-02	1.380000e-02	1.380000e-02	2.112000e+01	8.100617e-01	8.100617e-01
+1.314500e-01	1.825000e-02	1.825000e-02	1.263000e+01	5.412024e-01	5.412024e-01
+1.738500e-01	2.415000e-02	2.415000e-02	6.728000e+00	2.986135e-01	2.986135e-01
+2.299000e-01	3.190000e-02	3.190000e-02	3.257000e+00	1.748885e-01	1.748885e-01
+3.039500e-01	4.215000e-02	4.215000e-02	1.432000e+00	9.976472e-02	9.976472e-02
+4.018500e-01	5.575000e-02	5.575000e-02	5.189000e-01	5.507032e-02	5.507032e-02
+5.313000e-01	7.370000e-02	7.370000e-02	1.989000e-01	2.903188e-02	2.903188e-02
+7.025000e-01	9.750000e-02	9.750000e-02	6.337000e-02	1.976788e-02	1.976788e-02
+END YODA_SCATTER2D
+
+BEGIN YODA_SCATTER2D /REF/ATLAS_2011_I929691/d06-x01-y01
+Path=/REF/ATLAS_2011_I929691/d06-x01-y01
+Title=
+Type=Scatter2D
+# xval	 xerr-	 xerr+	 yval	 yerr-	 yerr+
+3.500000e-03	5.000000e-04	5.000000e-04	7.801000e+02	2.959899e+01	2.959899e+01
+4.600000e-03	6.000000e-04	6.000000e-04	7.278000e+02	2.921660e+01	2.921660e+01
+6.050000e-03	8.500000e-04	8.500000e-04	6.065000e+02	2.317952e+01	2.317952e+01
+8.050000e-03	1.150000e-03	1.150000e-03	4.859000e+02	1.816618e+01	1.816618e+01
+1.065000e-02	1.450000e-03	1.450000e-03	3.902000e+02	1.475263e+01	1.475263e+01
+1.405000e-02	1.950000e-03	1.950000e-03	3.004000e+02	1.140175e+01	1.140175e+01
+1.860000e-02	2.600000e-03	2.600000e-03	2.253000e+02	8.634813e+00	8.634813e+00
+2.460000e-02	3.400000e-03	3.400000e-03	1.656000e+02	6.403124e+00	6.403124e+00
+3.255000e-02	4.550000e-03	4.550000e-03	1.197000e+02	4.707441e+00	4.707441e+00
+4.305000e-02	5.950000e-03	5.950000e-03	8.348000e+01	3.253014e+00	3.253014e+00
+5.690000e-02	7.900000e-03	7.900000e-03	5.343000e+01	2.127769e+00	2.127769e+00
+7.520000e-02	1.040000e-02	1.040000e-02	3.423000e+01	1.411559e+00	1.411559e+00
+9.940000e-02	1.380000e-02	1.380000e-02	2.060000e+01	9.176056e-01	9.176056e-01
+1.314500e-01	1.825000e-02	1.825000e-02	1.185000e+01	5.664804e-01	5.664804e-01
+1.738500e-01	2.415000e-02	2.415000e-02	6.592000e+00	3.236495e-01	3.236495e-01
+2.299000e-01	3.190000e-02	3.190000e-02	3.112000e+00	1.684340e-01	1.684340e-01
+3.039500e-01	4.215000e-02	4.215000e-02	1.402000e+00	9.493682e-02	9.493682e-02
+4.018500e-01	5.575000e-02	5.575000e-02	5.375000e-01	4.603586e-02	4.603586e-02
+5.313000e-01	7.370000e-02	7.370000e-02	2.011000e-01	2.548490e-02	2.548490e-02
+7.025000e-01	9.750000e-02	9.750000e-02	5.159000e-02	1.534719e-02	1.534719e-02
+END YODA_SCATTER2D
+
+BEGIN YODA_SCATTER2D /REF/ATLAS_2011_I929691/d07-x01-y01
+Path=/REF/ATLAS_2011_I929691/d07-x01-y01
+Title=
+Type=Scatter2D
+# xval	 xerr-	 xerr+	 yval	 yerr-	 yerr+
+3.500000e-03	5.000000e-04	5.000000e-04	9.845000e+02	3.360268e+01	3.360268e+01
+4.600000e-03	6.000000e-04	6.000000e-04	8.136000e+02	3.010548e+01	3.010548e+01
+6.050000e-03	8.500000e-04	8.500000e-04	6.818000e+02	2.298043e+01	2.298043e+01
+8.050000e-03	1.150000e-03	1.150000e-03	5.419000e+02	1.814635e+01	1.814635e+01
+1.065000e-02	1.450000e-03	1.450000e-03	4.150000e+02	1.414814e+01	1.414814e+01
+1.405000e-02	1.950000e-03	1.950000e-03	3.189000e+02	1.100636e+01	1.100636e+01
+1.860000e-02	2.600000e-03	2.600000e-03	2.364000e+02	7.981228e+00	7.981228e+00
+2.460000e-02	3.400000e-03	3.400000e-03	1.681000e+02	5.772348e+00	5.772348e+00
+3.255000e-02	4.550000e-03	4.550000e-03	1.211000e+02	4.684015e+00	4.684015e+00
+4.305000e-02	5.950000e-03	5.950000e-03	8.364000e+01	3.089547e+00	3.089547e+00
+5.690000e-02	7.900000e-03	7.900000e-03	5.341000e+01	2.017573e+00	2.017573e+00
+7.520000e-02	1.040000e-02	1.040000e-02	3.392000e+01	1.311030e+00	1.311030e+00
+9.940000e-02	1.380000e-02	1.380000e-02	2.012000e+01	8.860023e-01	8.860023e-01
+1.314500e-01	1.825000e-02	1.825000e-02	1.162000e+01	5.126402e-01	5.126402e-01
+1.738500e-01	2.415000e-02	2.415000e-02	6.470000e+00	2.935864e-01	2.935864e-01
+2.299000e-01	3.190000e-02	3.190000e-02	3.054000e+00	1.733263e-01	1.733263e-01
+3.039500e-01	4.215000e-02	4.215000e-02	1.412000e+00	8.969392e-02	8.969392e-02
+4.018500e-01	5.575000e-02	5.575000e-02	5.705000e-01	6.463412e-02	6.463412e-02
+5.313000e-01	7.370000e-02	7.370000e-02	1.782000e-01	1.947434e-02	1.947434e-02
+7.025000e-01	9.750000e-02	9.750000e-02	4.339000e-02	1.113677e-02	1.113677e-02
+END YODA_SCATTER2D
+
+BEGIN YODA_SCATTER2D /REF/ATLAS_2011_I929691/d08-x01-y01
+Path=/REF/ATLAS_2011_I929691/d08-x01-y01
+Title=
+Type=Scatter2D
+# xval	 xerr-	 xerr+	 yval	 yerr-	 yerr+
+3.500000e-03	5.000000e-04	5.000000e-04	1.098500e+03	3.567464e+01	3.567464e+01
+4.600000e-03	6.000000e-04	6.000000e-04	8.695000e+02	2.968451e+01	2.968451e+01
+6.050000e-03	8.500000e-04	8.500000e-04	7.096000e+02	2.298369e+01	2.298369e+01
+8.050000e-03	1.150000e-03	1.150000e-03	5.587000e+02	1.814635e+01	1.814635e+01
+1.065000e-02	1.450000e-03	1.450000e-03	4.285000e+02	1.401606e+01	1.401606e+01
+1.405000e-02	1.950000e-03	1.950000e-03	3.239000e+02	1.077868e+01	1.077868e+01
+1.860000e-02	2.600000e-03	2.600000e-03	2.389000e+02	7.851751e+00	7.851751e+00
+2.460000e-02	3.400000e-03	3.400000e-03	1.716000e+02	5.755867e+00	5.755867e+00
+3.255000e-02	4.550000e-03	4.550000e-03	1.202000e+02	4.130375e+00	4.130375e+00
+4.305000e-02	5.950000e-03	5.950000e-03	8.144000e+01	2.782104e+00	2.782104e+00
+5.690000e-02	7.900000e-03	7.900000e-03	5.326000e+01	1.910628e+00	1.910628e+00
+7.520000e-02	1.040000e-02	1.040000e-02	3.344000e+01	1.244588e+00	1.244588e+00
+9.940000e-02	1.380000e-02	1.380000e-02	2.019000e+01	1.224949e+00	1.224949e+00
+1.314500e-01	1.825000e-02	1.825000e-02	1.138000e+01	5.246904e-01	5.246904e-01
+1.738500e-01	2.415000e-02	2.415000e-02	6.128000e+00	2.753779e-01	2.753779e-01
+2.299000e-01	3.190000e-02	3.190000e-02	3.075000e+00	2.186733e-01	2.186733e-01
+3.039500e-01	4.215000e-02	4.215000e-02	1.418000e+00	1.675052e-01	1.675052e-01
+4.018500e-01	5.575000e-02	5.575000e-02	5.518000e-01	4.964071e-02	4.964071e-02
+5.313000e-01	7.370000e-02	7.370000e-02	1.893000e-01	2.046778e-02	2.046778e-02
+7.025000e-01	9.750000e-02	9.750000e-02	5.641000e-02	1.494045e-02	1.494045e-02
+END YODA_SCATTER2D
+
+BEGIN YODA_SCATTER2D /REF/ATLAS_2011_I929691/d09-x01-y01
+Path=/REF/ATLAS_2011_I929691/d09-x01-y01
+Title=
+Type=Scatter2D
+# xval	 xerr-	 xerr+	 yval	 yerr-	 yerr+
+3.500000e-03	5.000000e-04	5.000000e-04	1.172000e+03	3.788681e+01	3.788681e+01
+4.600000e-03	6.000000e-04	6.000000e-04	9.072000e+02	3.098596e+01	3.098596e+01
+6.050000e-03	8.500000e-04	8.500000e-04	7.296000e+02	2.361779e+01	2.361779e+01
+8.050000e-03	1.150000e-03	1.150000e-03	5.652000e+02	1.871069e+01	1.871069e+01
+1.065000e-02	1.450000e-03	1.450000e-03	4.352000e+02	1.432655e+01	1.432655e+01
+1.405000e-02	1.950000e-03	1.950000e-03	3.297000e+02	1.079120e+01	1.079120e+01
+1.860000e-02	2.600000e-03	2.600000e-03	2.419000e+02	8.448077e+00	8.448077e+00
+2.460000e-02	3.400000e-03	3.400000e-03	1.720000e+02	5.656854e+00	5.656854e+00
+3.255000e-02	4.550000e-03	4.550000e-03	1.196000e+02	3.945884e+00	3.945884e+00
+4.305000e-02	5.950000e-03	5.950000e-03	8.088000e+01	2.766839e+00	2.766839e+00
+5.690000e-02	7.900000e-03	7.900000e-03	5.260000e+01	1.851486e+00	1.851486e+00
+7.520000e-02	1.040000e-02	1.040000e-02	3.255000e+01	1.228088e+00	1.228088e+00
+9.940000e-02	1.380000e-02	1.380000e-02	1.972000e+01	8.174350e-01	8.174350e-01
+1.314500e-01	1.825000e-02	1.825000e-02	1.121000e+01	4.903060e-01	4.903060e-01
+1.738500e-01	2.415000e-02	2.415000e-02	6.053000e+00	2.814267e-01	2.814267e-01
+2.299000e-01	3.190000e-02	3.190000e-02	3.066000e+00	1.653874e-01	1.653874e-01
+3.039500e-01	4.215000e-02	4.215000e-02	1.417000e+00	9.178235e-02	9.178235e-02
+4.018500e-01	5.575000e-02	5.575000e-02	5.857000e-01	5.409510e-02	5.409510e-02
+5.313000e-01	7.370000e-02	7.370000e-02	2.107000e-01	2.151953e-02	2.151953e-02
+7.025000e-01	9.750000e-02	9.750000e-02	5.553000e-02	1.250496e-02	1.250496e-02
+END YODA_SCATTER2D
+
+BEGIN YODA_SCATTER2D /REF/ATLAS_2011_I929691/d10-x01-y01
+Path=/REF/ATLAS_2011_I929691/d10-x01-y01
+Title=
+Type=Scatter2D
+# xval	 xerr-	 xerr+	 yval	 yerr-	 yerr+
+3.500000e-03	5.000000e-04	5.000000e-04	1.243200e+03	3.752386e+01	3.752386e+01
+4.600000e-03	6.000000e-04	6.000000e-04	9.537000e+02	2.988394e+01	2.988394e+01
+6.050000e-03	8.500000e-04	8.500000e-04	7.693000e+02	2.272004e+01	2.272004e+01
+8.050000e-03	1.150000e-03	1.150000e-03	5.916000e+02	2.139252e+01	2.139252e+01
+1.065000e-02	1.450000e-03	1.450000e-03	4.430000e+02	1.338096e+01	1.338096e+01
+1.405000e-02	1.950000e-03	1.950000e-03	3.353000e+02	1.002846e+01	1.002846e+01
+1.860000e-02	2.600000e-03	2.600000e-03	2.442000e+02	7.665507e+00	7.665507e+00
+2.460000e-02	3.400000e-03	3.400000e-03	1.740000e+02	5.375872e+00	5.375872e+00
+3.255000e-02	4.550000e-03	4.550000e-03	1.191000e+02	3.700000e+00	3.700000e+00
+4.305000e-02	5.950000e-03	5.950000e-03	8.005000e+01	2.656501e+00	2.656501e+00
+5.690000e-02	7.900000e-03	7.900000e-03	5.216000e+01	1.711286e+00	1.711286e+00
+7.520000e-02	1.040000e-02	1.040000e-02	3.204000e+01	1.141622e+00	1.141622e+00
+9.940000e-02	1.380000e-02	1.380000e-02	1.931000e+01	7.912016e-01	7.912016e-01
+1.314500e-01	1.825000e-02	1.825000e-02	1.107000e+01	4.272002e-01	4.272002e-01
+1.738500e-01	2.415000e-02	2.415000e-02	6.044000e+00	2.561816e-01	2.561816e-01
+2.299000e-01	3.190000e-02	3.190000e-02	3.123000e+00	1.623392e-01	1.623392e-01
+3.039500e-01	4.215000e-02	4.215000e-02	1.473000e+00	9.638465e-02	9.638465e-02
+4.018500e-01	5.575000e-02	5.575000e-02	6.179000e-01	6.379216e-02	6.379216e-02
+5.313000e-01	7.370000e-02	7.370000e-02	2.268000e-01	2.802231e-02	2.802231e-02
+7.025000e-01	9.750000e-02	9.750000e-02	5.536000e-02	1.063328e-02	1.063328e-02
+END YODA_SCATTER2D
+
+BEGIN YODA_SCATTER2D /REF/ATLAS_2011_I929691/d11-x01-y01
+Path=/REF/ATLAS_2011_I929691/d11-x01-y01
+Title=
+Type=Scatter2D
+# xval	 xerr-	 xerr+	 yval	 yerr-	 yerr+
+3.000000e-02	3.000000e-02	3.000000e-02	4.777000e+01	3.308716e+00	3.308716e+00
+9.000000e-02	3.000000e-02	3.000000e-02	2.959000e+01	1.772823e+00	1.772823e+00
+1.500000e-01	3.000000e-02	3.000000e-02	1.779000e+01	8.814193e-01	8.814193e-01
+2.100000e-01	3.000000e-02	3.000000e-02	1.136000e+01	4.909175e-01	4.909175e-01
+2.700000e-01	3.000000e-02	3.000000e-02	7.727000e+00	3.422309e-01	3.422309e-01
+3.300000e-01	3.000000e-02	3.000000e-02	5.623000e+00	2.310498e-01	2.310498e-01
+3.900000e-01	3.000000e-02	3.000000e-02	4.230000e+00	1.739339e-01	1.739339e-01
+4.500000e-01	3.000000e-02	3.000000e-02	3.304000e+00	1.338432e-01	1.338432e-01
+5.100000e-01	3.000000e-02	3.000000e-02	2.584000e+00	8.287340e-02	8.287340e-02
+5.700000e-01	3.000000e-02	3.000000e-02	1.178000e+00	3.962323e-02	3.962323e-02
+END YODA_SCATTER2D
+
+BEGIN YODA_SCATTER2D /REF/ATLAS_2011_I929691/d12-x01-y01
+Path=/REF/ATLAS_2011_I929691/d12-x01-y01
+Title=
+Type=Scatter2D
+# xval	 xerr-	 xerr+	 yval	 yerr-	 yerr+
+3.000000e-02	3.000000e-02	3.000000e-02	8.430000e+01	5.604543e+00	5.604543e+00
+9.000000e-02	3.000000e-02	3.000000e-02	4.239000e+01	2.414063e+00	2.414063e+00
+1.500000e-01	3.000000e-02	3.000000e-02	2.218000e+01	1.039423e+00	1.039423e+00
+2.100000e-01	3.000000e-02	3.000000e-02	1.304000e+01	5.295281e-01	5.295281e-01
+2.700000e-01	3.000000e-02	3.000000e-02	8.485000e+00	3.490172e-01	3.490172e-01
+3.300000e-01	3.000000e-02	3.000000e-02	5.920000e+00	2.408423e-01	2.408423e-01
+3.900000e-01	3.000000e-02	3.000000e-02	4.261000e+00	1.572800e-01	1.572800e-01
+4.500000e-01	3.000000e-02	3.000000e-02	3.250000e+00	1.110000e-01	1.110000e-01
+5.100000e-01	3.000000e-02	3.000000e-02	2.588000e+00	9.392018e-02	9.392018e-02
+5.700000e-01	3.000000e-02	3.000000e-02	1.470000e+00	6.184658e-02	6.184658e-02
+END YODA_SCATTER2D
+
+BEGIN YODA_SCATTER2D /REF/ATLAS_2011_I929691/d13-x01-y01
+Path=/REF/ATLAS_2011_I929691/d13-x01-y01
+Title=
+Type=Scatter2D
+# xval	 xerr-	 xerr+	 yval	 yerr-	 yerr+
+3.000000e-02	3.000000e-02	3.000000e-02	1.251000e+02	7.564390e+00	7.564390e+00
+9.000000e-02	3.000000e-02	3.000000e-02	5.452000e+01	2.653338e+00	2.653338e+00
+1.500000e-01	3.000000e-02	3.000000e-02	2.588000e+01	1.106797e+00	1.106797e+00
+2.100000e-01	3.000000e-02	3.000000e-02	1.492000e+01	5.981639e-01	5.981639e-01
+2.700000e-01	3.000000e-02	3.000000e-02	9.514000e+00	3.687818e-01	3.687818e-01
+3.300000e-01	3.000000e-02	3.000000e-02	6.189000e+00	2.468299e-01	2.468299e-01
+3.900000e-01	3.000000e-02	3.000000e-02	4.445000e+00	1.875447e-01	1.875447e-01
+4.500000e-01	3.000000e-02	3.000000e-02	3.358000e+00	1.503330e-01	1.503330e-01
+5.100000e-01	3.000000e-02	3.000000e-02	2.742000e+00	1.429475e-01	1.429475e-01
+5.700000e-01	3.000000e-02	3.000000e-02	1.666000e+00	1.067755e-01	1.067755e-01
+END YODA_SCATTER2D
+
+BEGIN YODA_SCATTER2D /REF/ATLAS_2011_I929691/d14-x01-y01
+Path=/REF/ATLAS_2011_I929691/d14-x01-y01
+Title=
+Type=Scatter2D
+# xval	 xerr-	 xerr+	 yval	 yerr-	 yerr+
+3.000000e-02	3.000000e-02	3.000000e-02	1.655000e+02	9.479451e+00	9.479451e+00
+9.000000e-02	3.000000e-02	3.000000e-02	5.860000e+01	2.430823e+00	2.430823e+00
+1.500000e-01	3.000000e-02	3.000000e-02	2.725000e+01	1.202539e+00	1.202539e+00
+2.100000e-01	3.000000e-02	3.000000e-02	1.682000e+01	8.523497e-01	8.523497e-01
+2.700000e-01	3.000000e-02	3.000000e-02	1.058000e+01	6.216912e-01	6.216912e-01
+3.300000e-01	3.000000e-02	3.000000e-02	6.065000e+00	3.856488e-01	3.856488e-01
+3.900000e-01	3.000000e-02	3.000000e-02	4.713000e+00	3.140143e-01	3.140143e-01
+4.500000e-01	3.000000e-02	3.000000e-02	3.389000e+00	2.236180e-01	2.236180e-01
+5.100000e-01	3.000000e-02	3.000000e-02	2.530000e+00	1.892749e-01	1.892749e-01
+5.700000e-01	3.000000e-02	3.000000e-02	1.551000e+00	1.560801e-01	1.560801e-01
+END YODA_SCATTER2D
+
+BEGIN YODA_SCATTER2D /REF/ATLAS_2011_I929691/d15-x01-y01
+Path=/REF/ATLAS_2011_I929691/d15-x01-y01
+Title=
+Type=Scatter2D
+# xval	 xerr-	 xerr+	 yval	 yerr-	 yerr+
+3.000000e-02	3.000000e-02	3.000000e-02	2.199000e+02	7.156815e+00	7.156815e+00
+9.000000e-02	3.000000e-02	3.000000e-02	7.111000e+01	1.904127e+00	1.904127e+00
+1.500000e-01	3.000000e-02	3.000000e-02	3.099000e+01	7.783315e-01	7.783315e-01
+2.100000e-01	3.000000e-02	3.000000e-02	1.707000e+01	4.308132e-01	4.308132e-01
+2.700000e-01	3.000000e-02	3.000000e-02	1.060000e+01	2.773085e-01	2.773085e-01
+3.300000e-01	3.000000e-02	3.000000e-02	7.209000e+00	1.953586e-01	1.953586e-01
+3.900000e-01	3.000000e-02	3.000000e-02	5.187000e+00	1.456606e-01	1.456606e-01
+4.500000e-01	3.000000e-02	3.000000e-02	3.942000e+00	1.123610e-01	1.123610e-01
+5.100000e-01	3.000000e-02	3.000000e-02	3.040000e+00	9.170605e-02	9.170605e-02
+5.700000e-01	3.000000e-02	3.000000e-02	2.070000e+00	9.042677e-02	9.042677e-02
+END YODA_SCATTER2D
+
+BEGIN YODA_SCATTER2D /REF/ATLAS_2011_I929691/d16-x01-y01
+Path=/REF/ATLAS_2011_I929691/d16-x01-y01
+Title=
+Type=Scatter2D
+# xval	 xerr-	 xerr+	 yval	 yerr-	 yerr+
+3.000000e-02	3.000000e-02	3.000000e-02	2.850000e+02	8.434453e+00	8.434453e+00
+9.000000e-02	3.000000e-02	3.000000e-02	7.993000e+01	2.006614e+00	2.006614e+00
+1.500000e-01	3.000000e-02	3.000000e-02	3.445000e+01	9.774457e-01	9.774457e-01
+2.100000e-01	3.000000e-02	3.000000e-02	1.901000e+01	4.838388e-01	4.838388e-01
+2.700000e-01	3.000000e-02	3.000000e-02	1.150000e+01	2.846050e-01	2.846050e-01
+3.300000e-01	3.000000e-02	3.000000e-02	7.683000e+00	1.913766e-01	1.913766e-01
+3.900000e-01	3.000000e-02	3.000000e-02	5.479000e+00	1.369708e-01	1.369708e-01
+4.500000e-01	3.000000e-02	3.000000e-02	4.082000e+00	1.068925e-01	1.068925e-01
+5.100000e-01	3.000000e-02	3.000000e-02	3.148000e+00	9.230926e-02	9.230926e-02
+5.700000e-01	3.000000e-02	3.000000e-02	2.197000e+00	8.134494e-02	8.134494e-02
+END YODA_SCATTER2D
+
+BEGIN YODA_SCATTER2D /REF/ATLAS_2011_I929691/d17-x01-y01
+Path=/REF/ATLAS_2011_I929691/d17-x01-y01
+Title=
+Type=Scatter2D
+# xval	 xerr-	 xerr+	 yval	 yerr-	 yerr+
+3.000000e-02	3.000000e-02	3.000000e-02	3.345000e+02	1.329662e+01	1.329662e+01
+9.000000e-02	3.000000e-02	3.000000e-02	8.887000e+01	3.327897e+00	3.327897e+00
+1.500000e-01	3.000000e-02	3.000000e-02	3.798000e+01	1.479595e+00	1.479595e+00
+2.100000e-01	3.000000e-02	3.000000e-02	2.040000e+01	7.690254e-01	7.690254e-01
+2.700000e-01	3.000000e-02	3.000000e-02	1.213000e+01	4.657252e-01	4.657252e-01
+3.300000e-01	3.000000e-02	3.000000e-02	8.094000e+00	3.070016e-01	3.070016e-01
+3.900000e-01	3.000000e-02	3.000000e-02	5.604000e+00	2.127205e-01	2.127205e-01
+4.500000e-01	3.000000e-02	3.000000e-02	4.195000e+00	1.619197e-01	1.619197e-01
+5.100000e-01	3.000000e-02	3.000000e-02	3.284000e+00	1.354179e-01	1.354179e-01
+5.700000e-01	3.000000e-02	3.000000e-02	2.356000e+00	1.230163e-01	1.230163e-01
+END YODA_SCATTER2D
+
+BEGIN YODA_SCATTER2D /REF/ATLAS_2011_I929691/d18-x01-y01
+Path=/REF/ATLAS_2011_I929691/d18-x01-y01
+Title=
+Type=Scatter2D
+# xval	 xerr-	 xerr+	 yval	 yerr-	 yerr+
+3.000000e-02	3.000000e-02	3.000000e-02	3.800000e+02	1.492146e+01	1.492146e+01
+9.000000e-02	3.000000e-02	3.000000e-02	9.253000e+01	3.407110e+00	3.407110e+00
+1.500000e-01	3.000000e-02	3.000000e-02	3.891000e+01	1.494824e+00	1.494824e+00
+2.100000e-01	3.000000e-02	3.000000e-02	2.099000e+01	7.741447e-01	7.741447e-01
+2.700000e-01	3.000000e-02	3.000000e-02	1.271000e+01	4.738143e-01	4.738143e-01
+3.300000e-01	3.000000e-02	3.000000e-02	8.367000e+00	3.146824e-01	3.146824e-01
+3.900000e-01	3.000000e-02	3.000000e-02	5.912000e+00	2.219031e-01	2.219031e-01
+4.500000e-01	3.000000e-02	3.000000e-02	4.351000e+00	1.619413e-01	1.619413e-01
+5.100000e-01	3.000000e-02	3.000000e-02	3.386000e+00	1.308625e-01	1.308625e-01
+5.700000e-01	3.000000e-02	3.000000e-02	2.416000e+00	1.269212e-01	1.269212e-01
+END YODA_SCATTER2D
+
+BEGIN YODA_SCATTER2D /REF/ATLAS_2011_I929691/d19-x01-y01
+Path=/REF/ATLAS_2011_I929691/d19-x01-y01
+Title=
+Type=Scatter2D
+# xval	 xerr-	 xerr+	 yval	 yerr-	 yerr+
+3.000000e-02	3.000000e-02	3.000000e-02	4.252000e+02	1.714205e+01	1.714205e+01
+9.000000e-02	3.000000e-02	3.000000e-02	9.675000e+01	3.571792e+00	3.571792e+00
+1.500000e-01	3.000000e-02	3.000000e-02	3.976000e+01	1.475737e+00	1.475737e+00
+2.100000e-01	3.000000e-02	3.000000e-02	2.129000e+01	7.951101e-01	7.951101e-01
+2.700000e-01	3.000000e-02	3.000000e-02	1.290000e+01	4.738143e-01	4.738143e-01
+3.300000e-01	3.000000e-02	3.000000e-02	8.627000e+00	3.258497e-01	3.258497e-01
+3.900000e-01	3.000000e-02	3.000000e-02	6.129000e+00	2.267179e-01	2.267179e-01
+4.500000e-01	3.000000e-02	3.000000e-02	4.443000e+00	1.630859e-01	1.630859e-01
+5.100000e-01	3.000000e-02	3.000000e-02	3.407000e+00	1.298769e-01	1.298769e-01
+5.700000e-01	3.000000e-02	3.000000e-02	2.423000e+00	1.132828e-01	1.132828e-01
+END YODA_SCATTER2D
+
+BEGIN YODA_SCATTER2D /REF/ATLAS_2011_I929691/d20-x01-y01
+Path=/REF/ATLAS_2011_I929691/d20-x01-y01
+Title=
+Type=Scatter2D
+# xval	 xerr-	 xerr+	 yval	 yerr-	 yerr+
+3.000000e-02	3.000000e-02	3.000000e-02	4.807000e+02	1.514629e+01	1.514629e+01
+9.000000e-02	3.000000e-02	3.000000e-02	1.012000e+02	3.059412e+00	3.059412e+00
+1.500000e-01	3.000000e-02	3.000000e-02	4.168000e+01	1.229715e+00	1.229715e+00
+2.100000e-01	3.000000e-02	3.000000e-02	2.223000e+01	6.580274e-01	6.580274e-01
+2.700000e-01	3.000000e-02	3.000000e-02	1.365000e+01	4.052160e-01	4.052160e-01
+3.300000e-01	3.000000e-02	3.000000e-02	8.903000e+00	2.767400e-01	2.767400e-01
+3.900000e-01	3.000000e-02	3.000000e-02	6.374000e+00	1.998499e-01	1.998499e-01
+4.500000e-01	3.000000e-02	3.000000e-02	4.614000e+00	1.449966e-01	1.449966e-01
+5.100000e-01	3.000000e-02	3.000000e-02	3.523000e+00	1.173882e-01	1.173882e-01
+5.700000e-01	3.000000e-02	3.000000e-02	2.531000e+00	1.128938e-01	1.128938e-01
+END YODA_SCATTER2D
+
+BEGIN YODA_SCATTER2D /REF/ATLAS_2011_I929691/d21-x01-y01
+Path=/REF/ATLAS_2011_I929691/d21-x01-y01
+Title=
+Type=Scatter2D
+# xval	 xerr-	 xerr+	 yval	 yerr-	 yerr+
+3.000000e-02	3.000000e-02	3.000000e-02	2.129000e+00	9.472592e-02	9.472592e-02
+9.000000e-02	3.000000e-02	3.000000e-02	6.019000e+00	2.558847e-01	2.558847e-01
+1.642000e-01	4.420000e-02	4.420000e-02	1.017000e+01	4.119466e-01	4.119466e-01
+2.443000e-01	3.590000e-02	3.590000e-02	1.287000e+01	5.124451e-01	5.124451e-01
+3.285500e-01	4.835000e-02	4.835000e-02	1.207000e+01	4.517743e-01	4.517743e-01
+4.419000e-01	6.500000e-02	6.500000e-02	8.783000e+00	3.381612e-01	3.381612e-01
+5.943000e-01	8.740000e-02	8.740000e-02	5.503000e+00	2.179197e-01	2.179197e-01
+7.993000e-01	1.176000e-01	1.176000e-01	2.880000e+00	1.255747e-01	1.255747e-01
+1.075000e+00	1.581000e-01	1.581000e-01	1.225000e+00	5.842089e-02	5.842089e-02
+1.445800e+00	2.127000e-01	2.127000e-01	4.136000e-01	2.797320e-02	2.797320e-02
+1.944550e+00	2.860500e-01	2.860500e-01	9.954000e-02	1.668764e-02	1.668764e-02
+2.615300e+00	3.847000e-01	3.847000e-01	1.755000e-02	4.048827e-03	4.048827e-03
+END YODA_SCATTER2D
+
+BEGIN YODA_SCATTER2D /REF/ATLAS_2011_I929691/d22-x01-y01
+Path=/REF/ATLAS_2011_I929691/d22-x01-y01
+Title=
+Type=Scatter2D
+# xval	 xerr-	 xerr+	 yval	 yerr-	 yerr+
+3.000000e-02	3.000000e-02	3.000000e-02	2.576000e+00	1.034650e-01	1.034650e-01
+9.000000e-02	3.000000e-02	3.000000e-02	7.248000e+00	2.626176e-01	2.626176e-01
+1.642000e-01	4.420000e-02	4.420000e-02	1.193000e+01	3.860052e-01	3.860052e-01
+2.443000e-01	3.590000e-02	3.590000e-02	1.462000e+01	4.560702e-01	4.560702e-01
+3.285500e-01	4.835000e-02	4.835000e-02	1.380000e+01	4.301163e-01	4.301163e-01
+4.419000e-01	6.500000e-02	6.500000e-02	1.000000e+01	3.036445e-01	3.036445e-01
+5.943000e-01	8.740000e-02	8.740000e-02	6.388000e+00	2.061892e-01	2.061892e-01
+7.993000e-01	1.176000e-01	1.176000e-01	3.584000e+00	1.205529e-01	1.205529e-01
+1.075000e+00	1.581000e-01	1.581000e-01	1.670000e+00	6.356099e-02	6.356099e-02
+1.445800e+00	2.127000e-01	2.127000e-01	6.821000e-01	3.046309e-02	3.046309e-02
+1.944550e+00	2.860500e-01	2.860500e-01	2.325000e-01	1.683449e-02	1.683449e-02
+2.615300e+00	3.847000e-01	3.847000e-01	6.582000e-02	8.445425e-03	8.445425e-03
+END YODA_SCATTER2D
+
+BEGIN YODA_SCATTER2D /REF/ATLAS_2011_I929691/d23-x01-y01
+Path=/REF/ATLAS_2011_I929691/d23-x01-y01
+Title=
+Type=Scatter2D
+# xval	 xerr-	 xerr+	 yval	 yerr-	 yerr+
+3.000000e-02	3.000000e-02	3.000000e-02	3.047000e+00	1.680774e-01	1.680774e-01
+9.000000e-02	3.000000e-02	3.000000e-02	8.804000e+00	3.749827e-01	3.749827e-01
+1.642000e-01	4.420000e-02	4.420000e-02	1.328000e+01	4.701064e-01	4.701064e-01
+2.443000e-01	3.590000e-02	3.590000e-02	1.618000e+01	5.403702e-01	5.403702e-01
+3.285500e-01	4.835000e-02	4.835000e-02	1.529000e+01	4.940648e-01	4.940648e-01
+4.419000e-01	6.500000e-02	6.500000e-02	1.127000e+01	3.661967e-01	3.661967e-01
+5.943000e-01	8.740000e-02	8.740000e-02	7.218000e+00	2.570992e-01	2.570992e-01
+7.993000e-01	1.176000e-01	1.176000e-01	4.333000e+00	1.589025e-01	1.589025e-01
+1.075000e+00	1.581000e-01	1.581000e-01	2.224000e+00	9.265528e-02	9.265528e-02
+1.445800e+00	2.127000e-01	2.127000e-01	1.020000e+00	4.957822e-02	4.957822e-02
+1.944550e+00	2.860500e-01	2.860500e-01	4.032000e-01	2.736512e-02	2.736512e-02
+2.615300e+00	3.847000e-01	3.847000e-01	1.361000e-01	1.994242e-02	1.994242e-02
+END YODA_SCATTER2D
+
+BEGIN YODA_SCATTER2D /REF/ATLAS_2011_I929691/d24-x01-y01
+Path=/REF/ATLAS_2011_I929691/d24-x01-y01
+Title=
+Type=Scatter2D
+# xval	 xerr-	 xerr+	 yval	 yerr-	 yerr+
+3.000000e-02	3.000000e-02	3.000000e-02	3.167000e+00	3.333482e-01	3.333482e-01
+9.000000e-02	3.000000e-02	3.000000e-02	9.188000e+00	6.606981e-01	6.606981e-01
+1.642000e-01	4.420000e-02	4.420000e-02	1.353000e+01	7.582216e-01	7.582216e-01
+2.443000e-01	3.590000e-02	3.590000e-02	1.728000e+01	8.839118e-01	8.839118e-01
+3.285500e-01	4.835000e-02	4.835000e-02	1.511000e+01	7.574299e-01	7.574299e-01
+4.419000e-01	6.500000e-02	6.500000e-02	1.207000e+01	6.585590e-01	6.585590e-01
+5.943000e-01	8.740000e-02	8.740000e-02	7.960000e+00	4.419683e-01	4.419683e-01
+7.993000e-01	1.176000e-01	1.176000e-01	4.815000e+00	2.566807e-01	2.566807e-01
+1.075000e+00	1.581000e-01	1.581000e-01	2.565000e+00	1.547966e-01	1.547966e-01
+1.445800e+00	2.127000e-01	2.127000e-01	1.246000e+00	9.319335e-02	9.319335e-02
+1.944550e+00	2.860500e-01	2.860500e-01	6.009000e-01	5.791459e-02	5.791459e-02
+2.615300e+00	3.847000e-01	3.847000e-01	2.550000e-01	5.053197e-02	5.053197e-02
+END YODA_SCATTER2D
+
+BEGIN YODA_SCATTER2D /REF/ATLAS_2011_I929691/d25-x01-y01
+Path=/REF/ATLAS_2011_I929691/d25-x01-y01
+Title=
+Type=Scatter2D
+# xval	 xerr-	 xerr+	 yval	 yerr-	 yerr+
+3.000000e-02	3.000000e-02	3.000000e-02	3.775000e+00	1.560833e-01	1.560833e-01
+9.000000e-02	3.000000e-02	3.000000e-02	1.048000e+01	4.178516e-01	4.178516e-01
+1.642000e-01	4.420000e-02	4.420000e-02	1.589000e+01	5.966574e-01	5.966574e-01
+2.443000e-01	3.590000e-02	3.590000e-02	1.858000e+01	6.888396e-01	6.888396e-01
+3.285500e-01	4.835000e-02	4.835000e-02	1.733000e+01	6.596969e-01	6.596969e-01
+4.419000e-01	6.500000e-02	6.500000e-02	1.342000e+01	5.166237e-01	5.166237e-01
+5.943000e-01	8.740000e-02	8.740000e-02	9.006000e+00	3.473327e-01	3.473327e-01
+7.993000e-01	1.176000e-01	1.176000e-01	5.536000e+00	2.175891e-01	2.175891e-01
+1.075000e+00	1.581000e-01	1.581000e-01	3.142000e+00	1.259603e-01	1.259603e-01
+1.445800e+00	2.127000e-01	2.127000e-01	1.614000e+00	7.116881e-02	7.116881e-02
+1.944550e+00	2.860500e-01	2.860500e-01	7.834000e-01	4.758203e-02	4.758203e-02
+2.615300e+00	3.847000e-01	3.847000e-01	3.631000e-01	7.884802e-02	7.884802e-02
+END YODA_SCATTER2D
+
+BEGIN YODA_SCATTER2D /REF/ATLAS_2011_I929691/d26-x01-y01
+Path=/REF/ATLAS_2011_I929691/d26-x01-y01
+Title=
+Type=Scatter2D
+# xval	 xerr-	 xerr+	 yval	 yerr-	 yerr+
+3.000000e-02	3.000000e-02	3.000000e-02	4.221000e+00	2.001799e-01	2.001799e-01
+9.000000e-02	3.000000e-02	3.000000e-02	1.130000e+01	5.239275e-01	5.239275e-01
+1.642000e-01	4.420000e-02	4.420000e-02	1.725000e+01	7.863841e-01	7.863841e-01
+2.443000e-01	3.590000e-02	3.590000e-02	1.995000e+01	9.093404e-01	9.093404e-01
+3.285500e-01	4.835000e-02	4.835000e-02	1.866000e+01	8.500000e-01	8.500000e-01
+4.419000e-01	6.500000e-02	6.500000e-02	1.457000e+01	6.661081e-01	6.661081e-01
+5.943000e-01	8.740000e-02	8.740000e-02	9.954000e+00	4.603531e-01	4.603531e-01
+7.993000e-01	1.176000e-01	1.176000e-01	6.256000e+00	2.954353e-01	2.954353e-01
+1.075000e+00	1.581000e-01	1.581000e-01	3.670000e+00	1.742642e-01	1.742642e-01
+1.445800e+00	2.127000e-01	2.127000e-01	1.987000e+00	9.982485e-02	9.982485e-02
+1.944550e+00	2.860500e-01	2.860500e-01	1.039000e+00	9.503683e-02	9.503683e-02
+2.615300e+00	3.847000e-01	3.847000e-01	5.205000e-01	1.181674e-01	1.181674e-01
+END YODA_SCATTER2D
+
+BEGIN YODA_SCATTER2D /REF/ATLAS_2011_I929691/d27-x01-y01
+Path=/REF/ATLAS_2011_I929691/d27-x01-y01
+Title=
+Type=Scatter2D
+# xval	 xerr-	 xerr+	 yval	 yerr-	 yerr+
+3.000000e-02	3.000000e-02	3.000000e-02	4.428000e+00	1.796274e-01	1.796274e-01
+9.000000e-02	3.000000e-02	3.000000e-02	1.211000e+01	4.535416e-01	4.535416e-01
+1.642000e-01	4.420000e-02	4.420000e-02	1.851000e+01	6.844706e-01	6.844706e-01
+2.443000e-01	3.590000e-02	3.590000e-02	2.098000e+01	7.727872e-01	7.727872e-01
+3.285500e-01	4.835000e-02	4.835000e-02	1.960000e+01	7.218033e-01	7.218033e-01
+4.419000e-01	6.500000e-02	6.500000e-02	1.539000e+01	5.688585e-01	5.688585e-01
+5.943000e-01	8.740000e-02	8.740000e-02	1.062000e+01	4.044750e-01	4.044750e-01
+7.993000e-01	1.176000e-01	1.176000e-01	6.887000e+00	2.643936e-01	2.643936e-01
+1.075000e+00	1.581000e-01	1.581000e-01	4.047000e+00	1.639573e-01	1.639573e-01
+1.445800e+00	2.127000e-01	2.127000e-01	2.297000e+00	9.362158e-02	9.362158e-02
+1.944550e+00	2.860500e-01	2.860500e-01	1.262000e+00	7.009280e-02	7.009280e-02
+2.615300e+00	3.847000e-01	3.847000e-01	6.720000e-01	1.364770e-01	1.364770e-01
+END YODA_SCATTER2D
+
+BEGIN YODA_SCATTER2D /REF/ATLAS_2011_I929691/d28-x01-y01
+Path=/REF/ATLAS_2011_I929691/d28-x01-y01
+Title=
+Type=Scatter2D
+# xval	 xerr-	 xerr+	 yval	 yerr-	 yerr+
+3.000000e-02	3.000000e-02	3.000000e-02	4.576000e+00	1.686891e-01	1.686891e-01
+9.000000e-02	3.000000e-02	3.000000e-02	1.270000e+01	4.627094e-01	4.627094e-01
+1.642000e-01	4.420000e-02	4.420000e-02	1.891000e+01	6.926038e-01	6.926038e-01
+2.443000e-01	3.590000e-02	3.590000e-02	2.170000e+01	8.022468e-01	8.022468e-01
+3.285500e-01	4.835000e-02	4.835000e-02	1.998000e+01	7.224957e-01	7.224957e-01
+4.419000e-01	6.500000e-02	6.500000e-02	1.582000e+01	5.821512e-01	5.821512e-01
+5.943000e-01	8.740000e-02	8.740000e-02	1.113000e+01	4.509989e-01	4.509989e-01
+7.993000e-01	1.176000e-01	1.176000e-01	7.245000e+00	2.760453e-01	2.760453e-01
+1.075000e+00	1.581000e-01	1.581000e-01	4.383000e+00	1.697557e-01	1.697557e-01
+1.445800e+00	2.127000e-01	2.127000e-01	2.512000e+00	9.751410e-02	9.751410e-02
+1.944550e+00	2.860500e-01	2.860500e-01	1.399000e+00	7.355270e-02	7.355270e-02
+2.615300e+00	3.847000e-01	3.847000e-01	7.769000e-01	1.609611e-01	1.609611e-01
+END YODA_SCATTER2D
+
+BEGIN YODA_SCATTER2D /REF/ATLAS_2011_I929691/d29-x01-y01
+Path=/REF/ATLAS_2011_I929691/d29-x01-y01
+Title=
+Type=Scatter2D
+# xval	 xerr-	 xerr+	 yval	 yerr-	 yerr+
+3.000000e-02	3.000000e-02	3.000000e-02	4.719000e+00	1.747484e-01	1.747484e-01
+9.000000e-02	3.000000e-02	3.000000e-02	1.272000e+01	4.539824e-01	4.539824e-01
+1.642000e-01	4.420000e-02	4.420000e-02	1.925000e+01	6.935416e-01	6.935416e-01
+2.443000e-01	3.590000e-02	3.590000e-02	2.199000e+01	8.022468e-01	8.022468e-01
+3.285500e-01	4.835000e-02	4.835000e-02	2.045000e+01	7.416873e-01	7.416873e-01
+4.419000e-01	6.500000e-02	6.500000e-02	1.628000e+01	6.020797e-01	6.020797e-01
+5.943000e-01	8.740000e-02	8.740000e-02	1.154000e+01	4.418144e-01	4.418144e-01
+7.993000e-01	1.176000e-01	1.176000e-01	7.590000e+00	2.794065e-01	2.794065e-01
+1.075000e+00	1.581000e-01	1.581000e-01	4.658000e+00	1.708479e-01	1.708479e-01
+1.445800e+00	2.127000e-01	2.127000e-01	2.719000e+00	1.006032e-01	1.006032e-01
+1.944550e+00	2.860500e-01	2.860500e-01	1.520000e+00	7.632169e-02	7.632169e-02
+2.615300e+00	3.847000e-01	3.847000e-01	8.805000e-01	1.860967e-01	1.860967e-01
+END YODA_SCATTER2D
+
+BEGIN YODA_SCATTER2D /REF/ATLAS_2011_I929691/d30-x01-y01
+Path=/REF/ATLAS_2011_I929691/d30-x01-y01
+Title=
+Type=Scatter2D
+# xval	 xerr-	 xerr+	 yval	 yerr-	 yerr+
+3.000000e-02	3.000000e-02	3.000000e-02	4.911000e+00	1.859946e-01	1.859946e-01
+9.000000e-02	3.000000e-02	3.000000e-02	1.321000e+01	4.876474e-01	4.876474e-01
+1.642000e-01	4.420000e-02	4.420000e-02	1.968000e+01	7.200694e-01	7.200694e-01
+2.443000e-01	3.590000e-02	3.590000e-02	2.232000e+01	8.354639e-01	8.354639e-01
+3.285500e-01	4.835000e-02	4.835000e-02	2.076000e+01	8.318654e-01	8.318654e-01
+4.419000e-01	6.500000e-02	6.500000e-02	1.674000e+01	6.198387e-01	6.198387e-01
+5.943000e-01	8.740000e-02	8.740000e-02	1.193000e+01	4.472136e-01	4.472136e-01
+7.993000e-01	1.176000e-01	1.176000e-01	7.980000e+00	2.935864e-01	2.935864e-01
+1.075000e+00	1.581000e-01	1.581000e-01	5.017000e+00	1.859247e-01	1.859247e-01
+1.445800e+00	2.127000e-01	2.127000e-01	2.992000e+00	1.132652e-01	1.132652e-01
+1.944550e+00	2.860500e-01	2.860500e-01	1.777000e+00	9.317188e-02	9.317188e-02
+2.615300e+00	3.847000e-01	3.847000e-01	1.035000e+00	2.283703e-01	2.283703e-01
+END YODA_SCATTER2D
+
diff --git a/data/refdata/Makefile.am b/data/refdata/Makefile.am
--- a/data/refdata/Makefile.am
+++ b/data/refdata/Makefile.am
@@ -1,334 +1,336 @@
 dist_pkgdata_DATA = \
   ALEPH_1991_S2435284.yoda \
   ALEPH_1995_I382179.yoda \
   ALEPH_1996_S3486095.yoda \
   ALEPH_1996_S3196992.yoda \
   ALEPH_1999_S4193598.yoda \
   ALEPH_2001_S4656318.yoda \
   ALEPH_2002_S4823664.yoda \
   ALEPH_2004_S5765862.yoda \
   ALICE_2010_S8624100.yoda \
   ALICE_2010_S8625980.yoda \
   ALICE_2010_S8706239.yoda \
   ALICE_2011_S8909580.yoda \
   ALICE_2011_S8945144.yoda \
+  ALICE_2012_I1116147.yoda \
   ALICE_2012_I1181770.yoda \
   ALICE_2014_I1300380.yoda \
   ALICE_2015_I1357424.yoda \
   ARGUS_1993_S2653028.yoda \
   ARGUS_1993_S2669951.yoda \
   ARGUS_1993_S2789213.yoda \
   ATLAS_2010_S8591806.yoda \
   ATLAS_2010_S8817804.yoda \
   ATLAS_2010_S8894728.yoda \
   ATLAS_2010_S8914702.yoda \
   ATLAS_2010_S8918562.yoda \
   ATLAS_2010_S8919674.yoda \
   ATLAS_2011_S8924791.yoda \
   ATLAS_2011_S8971293.yoda \
   ATLAS_2011_S8994773.yoda \
   ATLAS_2011_S9002537.yoda \
   ATLAS_2010_CONF_2010_049.yoda \
   ATLAS_2011_S9120807.yoda \
   ATLAS_2011_S9126244.yoda \
   ATLAS_2011_S9128077.yoda \
   ATLAS_2011_S9131140.yoda \
   ATLAS_2011_S9035664.yoda \
   ATLAS_2011_I894867.yoda \
   ATLAS_2011_I928289_W.yoda \
   ATLAS_2011_I928289_Z.yoda \
   ATLAS_2011_I919017.yoda \
   ATLAS_2011_I921594.yoda \
   ATLAS_2011_I925932.yoda \
   ATLAS_2011_I926145.yoda \
+  ATLAS_2011_I929691.yoda \
   ATLAS_2011_I930220.yoda \
   ATLAS_2011_I944826.yoda \
   ATLAS_2011_I945498.yoda \
   ATLAS_2011_I954993.yoda \
   ATLAS_2011_S9225137.yoda \
   ATLAS_2011_S9212183.yoda \
   ATLAS_2012_I1082936.yoda \
   ATLAS_2012_I1083318.yoda \
   ATLAS_2012_I1084540.yoda \
   ATLAS_2012_I1091481.yoda \
   ATLAS_2012_I1093734.yoda \
   ATLAS_2012_I1093738.yoda \
   ATLAS_2012_I1094061.yoda \
   ATLAS_2012_I1094564.yoda \
   ATLAS_2012_I1094568.yoda \
   ATLAS_2012_I943401.yoda \
   ATLAS_2012_I1082009.yoda \
   ATLAS_2012_I1118269.yoda \
   ATLAS_2012_I1119557.yoda \
   ATLAS_2012_I1124167.yoda \
   ATLAS_2012_I1125575.yoda \
   ATLAS_2012_I1183818.yoda \
   ATLAS_2012_I1188891.yoda \
   ATLAS_2012_I1199269.yoda \
   ATLAS_2012_CONF_2012_001.yoda \
   ATLAS_2012_I1203852.yoda \
   ATLAS_2012_I1204784.yoda \
   ATLAS_2013_I1190187.yoda \
   ATLAS_2013_I1217863_W.yoda \
   ATLAS_2013_I1217863_W_EL.yoda \
   ATLAS_2013_I1217863_W_MU.yoda \
   ATLAS_2013_I1217863_Z.yoda \
   ATLAS_2013_I1217863_Z_EL.yoda \
   ATLAS_2013_I1217863_Z_MU.yoda \
   ATLAS_2013_I1217867.yoda \
   ATLAS_2013_I1219109.yoda \
   ATLAS_2013_I1219109_EL.yoda \
   ATLAS_2013_I1219109_MU.yoda \
   ATLAS_2013_I1230812.yoda \
   ATLAS_2013_I1230812_EL.yoda \
   ATLAS_2013_I1230812_MU.yoda \
   ATLAS_2013_I1243871.yoda \
   ATLAS_2013_I1263495.yoda \
   ATLAS_2013_I1216670.yoda \
   ATLAS_2013_I1244522.yoda \
   ATLAS_2014_I1282447.yoda \
   ATLAS_2014_I1298023.yoda \
   ATLAS_2014_I1319490.yoda \
   ATLAS_2014_I1319490_EL.yoda \
   ATLAS_2014_I1319490_MU.yoda \
   ATLAS_2014_I1326641.yoda \
   ATLAS_2014_I1268975.yoda \
   ATLAS_2014_I1279489.yoda \
   ATLAS_2014_I1282441.yoda \
   ATLAS_2014_I1298811.yoda \
   ATLAS_2014_I1304688.yoda \
   ATLAS_2014_I1307756.yoda \
   ATLAS_2014_I1306294.yoda \
   ATLAS_2014_I1306294_EL.yoda \
   ATLAS_2014_I1306294_MU.yoda \
   ATLAS_2014_I1315949.yoda \
   ATLAS_2014_I1325553.yoda \
   ATLAS_2014_I1300647.yoda \
   ATLAS_2014_I1288706.yoda \
   ATLAS_2014_I1307243.yoda \
   ATLAS_2014_I1312627.yoda \
   ATLAS_2014_I1312627_EL.yoda \
   ATLAS_2014_I1312627_MU.yoda \
   ATLAS_2014_I1306615.yoda \
   ATLAS_2015_I1393758.yoda \
   ATLAS_2015_I1364361.yoda \
   ATLAS_2015_I1351916.yoda \
   ATLAS_2015_I1351916_EL.yoda \
   ATLAS_2015_I1351916_MU.yoda \
   ATLAS_2015_I1345452.yoda \
   ATLAS_2015_I1387176.yoda \
   ATLAS_2015_I1376945.yoda \
   ATLAS_2015_I1390114.yoda \
   ATLAS_2015_I1394679.yoda \
   ATLAS_2015_I1397635.yoda \
   ATLAS_2015_I1397637.yoda \
   ATLAS_2015_CONF_2015_041.yoda \
   ATLAS_2015_CONF_2015_041_EL.yoda \
   ATLAS_2015_CONF_2015_041_MU.yoda \
   ATLAS_2015_I1408516.yoda \
   ATLAS_2015_I1408516_EL.yoda \
   ATLAS_2015_I1408516_MU.yoda \
   ATLAS_2015_I1404878.yoda \
   ATLAS_2016_I1419070.yoda \
   ATLAS_2016_I1419652.yoda \
   ATLAS_2016_I1424838.yoda \
   ATLAS_2016_I1426695.yoda \
   ATLAS_2016_I1444991.yoda \
   ATLAS_2016_I1457605.yoda \
   ATLAS_2016_I1468168.yoda \
   BABAR_2003_I593379.yoda \
   BABAR_2005_S6181155.yoda \
   BABAR_2006_S6511112.yoda \
   BABAR_2007_S6895344.yoda \
   BABAR_2007_S7266081.yoda \
   BABAR_2013_I1116411.yoda \
   BABAR_2013_I1238276.yoda \
   BABAR_2015_I1334693.yoda \
   BELLE_2001_S4598261.yoda \
   BELLE_2008_I786560.yoda \
   BELLE_2011_I878990.yoda \
   BELLE_2013_I1216515.yoda \
   BELLE_2013_I1238273.yoda \
   BELLE_2015_I1397632.yoda \
   CLEO_2001_S4557530.yoda \
   CLEO_2004_S5809304.yoda \
   CMS_2010_S8547297.yoda \
   CMS_2010_S8656010.yoda \
   CMS_2011_S8884919.yoda \
   CMS_2011_S8941262.yoda \
   CMS_2011_S8950903.yoda \
   CMS_2011_S8957746.yoda \
   CMS_2011_S8968497.yoda \
   CMS_2011_S8973270.yoda \
   CMS_2011_S8978280.yoda \
   CMS_2011_S9086218.yoda \
   CMS_2011_S9088458.yoda \
   CMS_2011_S9120041.yoda \
   CMS_2011_S9215166.yoda \
   CMS_2012_I941555.yoda \
   CMS_2011_I954992.yoda \
   CMS_2012_I1087342.yoda \
   CMS_2012_I1090423.yoda \
   CMS_2012_I1102908.yoda \
   CMS_2012_I1107658.yoda \
   CMS_2012_I1184941.yoda \
   CMS_2012_I1193338.yoda \
   CMS_2013_I1122847.yoda \
   CMS_2013_I1208923.yoda \
   CMS_2013_I1209721.yoda \
   CMS_2013_I1218372.yoda \
   CMS_2013_I1224539_DIJET.yoda \
   CMS_2013_I1224539_WJET.yoda \
   CMS_2013_I1224539_ZJET.yoda \
   CMS_2013_I1256943.yoda \
   CMS_2013_I1258128.yoda \
   CMS_2013_I1261026.yoda \
   CMS_2013_I1265659.yoda \
   CMS_2013_I1272853.yoda \
   CMS_2013_I1273574.yoda \
   CMSTOTEM_2014_I1294140.yoda \
   CMS_2014_I1298810.yoda \
   CMS_2014_I1303894.yoda \
   CMS_2014_I1305624.yoda \
   CMS_2015_I1310737.yoda \
   CMS_2015_I1327224.yoda \
   CMS_2015_I1346843.yoda \
   CMS_2015_I1356998.yoda \
   CMS_2015_I1370682.yoda \
   CMS_2015_I1384119.yoda \
   CMS_2015_I1385107.yoda \
   CMS_2015_I1397174.yoda \
   CMS_2016_I1473674.yoda \
   CMS_2012_PAS_QCD_11_010.yoda \
   CMS_QCD_10_024.yoda \
   LHCB_2010_S8758301.yoda \
   LHCB_2010_I867355.yoda \
   LHCB_2011_I917009.yoda \
   LHCB_2011_I919315.yoda \
   LHCB_2012_I1119400.yoda \
   LHCB_2012_I1208102.yoda \
   LHCB_2013_I1208105.yoda \
   LHCB_2013_I1218996.yoda \
   LHCB_2014_I1281685.yoda \
   LHCB_2015_I1333223.yoda \
   LHCF_2012_I1115479.yoda \
   DELPHI_1994_S3021912.yoda \
   DELPHI_1995_S3137023.yoda \
   DELPHI_1996_S3430090.yoda \
   DELPHI_1999_S3960137.yoda \
   DELPHI_2000_S4328825.yoda \
   DELPHI_2002_069_CONF_603.yoda \
   DELPHI_2011_I890503.yoda \
   L3_1992_I336180.yoda \
   OPAL_1993_S2692198.yoda \
   OPAL_1993_I342766.yoda \
   OPAL_1994_S2927284.yoda \
   OPAL_1995_S3198391.yoda \
   OPAL_1996_S3257789.yoda \
   OPAL_1997_S3396100.yoda \
   OPAL_1997_S3608263.yoda \
   OPAL_1998_S3702294.yoda \
   OPAL_1998_S3780481.yoda \
   OPAL_1998_S3749908.yoda \
   OPAL_2000_S4418603.yoda \
   OPAL_2001_S4553896.yoda \
   OPAL_2002_S5361494.yoda \
   OPAL_2003_I599181.yoda \
   OPAL_2004_S6132243.yoda \
   JADE_OPAL_2000_S4300807.yoda \
   JADE_1998_S3612880.yoda \
   TASSO_1990_S2148048.yoda \
   H1_1994_S2919893.yoda \
   H1_1995_S3167097.yoda \
   H1_2000_S4129130.yoda \
   H1_2007_S7223935.yoda \
   ZEUS_2001_S4815815.yoda \
   PHENIX_2003_S5538505.yoda \
   STAR_2006_S6500200.yoda \
   STAR_2006_S6860818.yoda \
   STAR_2006_S6870392.yoda \
   STAR_2008_S7993412.yoda \
   STAR_2009_UE_HELEN.yoda \
   BRAHMS_2007_S7052448.yoda \
   UA1_1990_S2044935.yoda \
   UA5_1982_S875503.yoda \
   UA5_1986_S1583476.yoda \
   UA5_1989_S1926373.yoda \
   UA5_1988_S1867512.yoda \
   UA5_1987_S1640666.yoda \
   CDF_1988_S1865951.yoda \
   CDF_1990_S2089246.yoda \
   CDF_1993_S2742446.yoda \
   CDF_1994_S2952106.yoda \
   CDF_1996_S3108457.yoda \
   CDF_1996_S3349578.yoda \
   CDF_1996_S3418421.yoda \
   CDF_1997_S3541940.yoda \
   CDF_1998_S3618439.yoda \
   CDF_2000_S4155203.yoda \
   CDF_2000_S4266730.yoda \
   CDF_2001_S4517016.yoda \
   CDF_2001_S4563131.yoda \
   CDF_2001_S4751469.yoda \
   CDF_2002_S4796047.yoda \
   CDF_2004_S5839831.yoda \
   CDF_2005_S6080774.yoda \
   CDF_2005_S6217184.yoda \
   CDF_2006_S6450792.yoda \
   CDF_2006_S6653332.yoda \
   CDF_2007_S7057202.yoda \
   CDF_2008_S7541902.yoda \
   CDF_2008_S7554427.yoda \
   CDF_2008_S7540469.yoda \
   CDF_2008_S7782535.yoda \
   CDF_2008_S7828950.yoda \
   CDF_2008_S8093652.yoda \
   CDF_2008_S8095620.yoda \
   CDF_2009_S8233977.yoda \
   CDF_2009_NOTE_9936.yoda \
   CDF_2009_I856131.yoda \
   CDF_2009_S8436959.yoda \
   CDF_2010_S8591881_DY.yoda \
   CDF_2010_S8591881_QCD.yoda \
   CDF_2012_NOTE10874.yoda \
   CDF_2012_I1124333.yoda \
   D0_1995_I398175.yoda \
   D0_1996_S3214044.yoda \
   D0_1996_S3324664.yoda \
   D0_2000_S4480767.yoda \
   D0_2000_I499943.yoda \
   D0_2001_S4674421.yoda \
   D0_2004_S5992206.yoda \
   D0_2006_S6438750.yoda \
   D0_2007_S7075677.yoda \
   D0_2008_S6879055.yoda \
   D0_2008_S7554427.yoda \
   D0_2008_S7662670.yoda \
   D0_2008_S7719523.yoda \
   D0_2008_S7837160.yoda \
   D0_2008_S7863608.yoda \
   D0_2009_S8202443.yoda \
   D0_2009_S8320160.yoda \
   D0_2009_S8349509.yoda \
   D0_2010_S8566488.yoda \
   D0_2010_S8570965.yoda \
   D0_2010_S8671338.yoda \
   D0_2010_S8821313.yoda \
   D0_2011_I895662.yoda \
   D0_2015_I1324946.yoda \
   D0_2000_I503361.yoda \
   E735_1992_S2485869.yoda \
   E735_1993_S2896508.yoda \
   E735_1998_S3905616.yoda \
   SFM_1984_S1178091.yoda \
   SLD_1996_S3398250.yoda \
   SLD_1999_S3743934.yoda \
   SLD_2002_S4869273.yoda \
   SLD_2004_S5693039.yoda \
   STAR_2008_S7869363.yoda \
   TOTEM_2012_I1115294.yoda \
   TOTEM_2012_I1220862.yoda \
   TOTEM_2014_I1328627.yoda \
   PDG_HADRON_MULTIPLICITIES.yoda \
   PDG_HADRON_MULTIPLICITIES_RATIOS.yoda \
   PDG_TAUS.yoda
diff --git a/doc/Makefile.am b/doc/Makefile.am
--- a/doc/Makefile.am
+++ b/doc/Makefile.am
@@ -1,106 +1,105 @@
 dist_noinst_SCRIPTS = mk-analysis-html mk-analysis-latex
 
 LATEXSUBSOURCES = preamble.tex intro.tex gettingstarted.tex writinganalyses.tex agilerunmc.tex acknowledgements.tex
 DOCSOURCES = compare-histos.txt slip.eps thinker.eps cone.eps bend.eps \
   h-physrev3.bst hepnames.sty JHEP3.cls JHEP.bst make-plots.txt rivet-manual.tex \
   $(LATEXSUBSOURCES) heppennames.sty hepnicenames.sty hepparticles.sty maybemath.sty \
   hepunits.sty underscore.sty microtype.sty
 
-EXTRA_DIST = $(DOCSOURCES)
+EXTRA_DIST = $(DOCSOURCES) refs.bib
 DOCS =
 
 if ENABLE_PDFMANUAL
 
 if ENABLE_PYEXT
 DOCS += analyses.html
 analyses.html: $(top_srcdir)/src/Analyses mk-analysis-html
 	LD_LIBRARY_PATH=$(top_builddir)/src/.libs:$(YAML_CPPLIBPATH):$(FASTJETLIBPATH):$(HEPMCLIBPATH):$(YODALIBPATH):$(LD_LIBRARY_PATH) \
     DYLD_LIBRARY_PATH=$(top_builddir)/src/.libs:$(YAML_CPPLIBPATH):$(FASTJETLIBPATH):$(HEPMCLIBPATH):$(YODALIBPATH):$(DYLD_LIBRARY_PATH) \
     PYTHONPATH=$(YODA_PYTHONPATH):$(RIVET_PYTHONPATH):$(PYTHONPATH) \
     $(PYTHON) mk-analysis-html
 endif
 
 
 if WITH_PDFLATEX
 
 DOCS += rivet-manual.pdf
-EXTRA_DIST += refs.bib
 
 if ENABLE_PYEXT
 analyses.bib analyses.tex: $(top_srcdir)/src/Analyses $(top_srcdir)/data/plotinfo $(top_srcdir)/data/anainfo mk-analysis-latex
 	LD_LIBRARY_PATH=$(top_builddir)/src/.libs:$(YAML_CPPLIBPATH):$(FASTJETLIBPATH):$(HEPMCLIBPATH):$(YODALIBPATH):$(LD_LIBRARY_PATH) \
     DYLD_LIBRARY_PATH=$(top_builddir)/src/.libs:$(YAML_CPPLIBPATH):$(FASTJETLIBPATH):$(HEPMCLIBPATH):$(YODALIBPATH):$(DYLD_LIBRARY_PATH) \
     PYTHONPATH=$(YODA_PYTHONPATH):$(RIVET_PYTHONPATH):$(PYTHONPATH) \
     $(PYTHON) mk-analysis-latex
 else
 analyses.bib analyses.tex:
 	> analyses.tex
 	> analyses.bib
 endif
 
 LATEX = pdflatex --interaction=batchmode
 RERUN = "(There were undefined references|Rerun to get (cross-references|the bars) right)"
 RERUNBIB = "No file.*\.bbl|Citation.*undefined"
 MAKEIDX = "^[^%]*\\makeindex"
 
 rivet-manual.pdf : rivet-manual.tex $(LATEXSUBSOURCES) analyses.tex refs.bib analyses.bib
 	$(LATEX) $<; true
 	egrep -c $(RERUNBIB) rivet-manual.log && (bibtex rivet-manual && cp rivet-manual.toc rivet-manual.toc.bak && $(LATEX) $<); true
 	for i in `seq 5`; do if egrep $(RERUN) rivet-manual.log; then echo "LaTeX re-run $i"; cp rivet-manual.toc rivet-manual.toc.bak; $(LATEX) $<; else break; fi; done; true
 	if cmp -s rivet-manual.toc rivet-manual.toc.bak; then true; else $(LATEX) $<; true; fi
 	rm -f rivet-manual.toc.bak; true
 
 paper.pdf : paper.tex $(LATEXSUBSOURCES) analyses.tex refs.bib analyses.bib
 	$(LATEX) $<; true
 	egrep -c $(RERUNBIB) paper.log && (bibtex paper && cp paper.toc paper.toc.bak && $(LATEX) $<); true
 	for i in `seq 5`; do if egrep $(RERUN) paper.log; then echo "LaTeX re-run $i"; cp paper.toc paper.toc.bak; $(LATEX) $<; else break; fi; done; true
 	if cmp -s paper.toc paper.toc.bak; then true; else $(LATEX) $<; true; fi
 	rm -f paper.toc.bak; true
 
 endif
 endif
 
 
 if WITH_ASCIIDOC
 
 DOCS += compare-histos.html make-plots.html
 EXTRA_DIST += compare-histos.html make-plots.html
 
 compare-histos.html: compare-histos.txt
 	asciidoc -a toc compare-histos.txt
 
 make-plots.html: make-plots.txt
 	asciidoc -a toc make-plots.txt
 
 endif
 
 
 ################
 
 
 .PHONY = all doc upload arxivtar
 
 doc: $(DOCS)
 	@true
 
 RSH=rsync
 DEST=login.hepforge.org:rivet/public_html/
 upload: $(DOCS)
 	$(RSH) $? $(DEST)
 
 arxivtar: $(DOCSOURCES) rivet-manual.bbl
 	for i in *.png; do convert $$i $${i/.png/.eps}; done
 	tar czf rivet-manual.tar.gz \
 rivet-manual.tex $(LATEXSUBSOURCES) analyses.tex refs.bib analyses.bib rivetversion.sty rivet-manual.bbl \
 hepnames.sty hepnicenames.sty hepparticles.sty heppennames.sty hepunits.sty maybemath.sty microtype.sty underscore.sty \
 bend.eps cone.eps thinker.eps slip.eps \
 h-physrev3.bst JHEP3.cls JHEP.bst
 
 mostlyclean-local:
 	rm -rf *.aux *.log *.toc
 
 clean-local:
 	rm -rf $(DOCS)
 
 ## Install!
 pkgdata_DATA = $(DOCS)
diff --git a/doc/make-plots.txt b/doc/make-plots.txt
--- a/doc/make-plots.txt
+++ b/doc/make-plots.txt
@@ -1,688 +1,688 @@
 make-plots
 ============
 
 About
 -----
 
 `make-plots` reads histogram files in a simple text format and converts
 them into PostScript or PDF files. This is done by creating a LaTeX file
 and running `latex`, `dvips`, and maybe `ps2pdf`.
 
 Usage
 -----
 
 To run `make-plots` call
 
 --------------------
  make-plots [options] file.dat [file2.dat ...]
 --------------------
 
 All available options can be listed by running
 
 --------------------
  make-plots --help
 --------------------
 
 Configuration files
 ~~~~~~~~~~~~~~~~~~~
 
 `make-plots` typically takes the plotting instructions and settings from the
 input ascii files as described in the "Input Format" chapter.
 It is also possible though to pass a global configuration file to `make-plots`
 (cf. `--help`) which allows to specify/overwrite settings for certain plots or
 histograms in a plot on top of what the input files specify. This could be
 useful if the ascii files are generated automatically (e.g. with `rivet-mkhtml`
 or `compare-histos`) and you still want to apply custom plotting options.
 
 An example for this looks like:
 
 --------------------
 # BEGIN PLOT figures/MC_WJETS/W_mass.dat
 XMin=60.0
 XMax=100.0
 LegendXPos=0.65
 # END PLOT
 
 .*myLOrun.aida/D0_2008_S7554427/d01-x01-y01::Scale=1.0
 --------------------
 
 Here first the options in the `PLOT` section of a specific ascii file are
 being amended/overwritten. The second part shows how to overwrite the `Scale`
 property of one specific histogram line using the ID of the histogram.
 
 
 Input Format
 ------------
 
 The ascii files which can be read by `make-plots` are divided into sections.
 There are four types of sections which are called `PLOT`, `HISTOGRAM`,
 `FUNCTION`, and `SPECIAL`. Every file must contain exactly one `PLOT` section
 and at least one section of the other three types. There may be multiple
 `HISTOGRAM`, `FUNCTION`, and `SPECIAL` sections.
 
 Empty lines and lines starting with `#` are ignored, except for the section
 delimiters described below.
 
 PLOT
 ~~~~
 
 The `PLOT` section starts with
 
 --------------------
 # BEGIN PLOT
 --------------------
 
 and ends with
 
 --------------------
 # END PLOT
 --------------------
 
 Every file must have exactly one `PLOT` section. In this section global
 parameters are specified, like the axis labels, the plot title, size, ...
 An empty `PLOT` section is perfectly legal, though.
 
 In this section the following parameters can be set:
 
 Titles, Labels
 ^^^^^^^^^^^^^^
 
 --------------------
 Title=<title>
 --------------------
 The title of the plot.
 
 --------------------
 XLabel=<label>
 YLabel=<label>
 ZLabel=<label>
 --------------------
 Axis labels for the x-, y-, and z-axis.
 
 --------------------
 XLabelSep=<distance>
 YLabelSep=<distance>
 ZLabelSep=<distance>
 --------------------
 Distance between the axis label and the plot in units of `\labelsep`.
 
 --------------------
 XMajorTickMarks=<last_digit>
 YMajorTickMarks=<last_digit>
 ZMajorTickMarks=<last_digit>
 XMinorTickMarks=<nticks>
 YMinorTickMarks=<nticks>
 ZMinorTickMarks=<nticks>
 --------------------
 `make-plots` tries to guess the distance between tickmarks automatically.
 If you are not satisfied with its result, you can override this by setting
 `<last_digit>` to 1, 2, 5, or 10, and `<nticks>` to the number of minor ticks
 you like.
 _Note_: These options are not available for logarithmic axes.
 
 --------------------
 XTwosidedTicks=<0|1>
 YTwosidedTicks=<0|1>
 --------------------
 Draw tickmarks also on the upper and/or right side of the plot.
 
 --------------------
 XCustomMajorTicks=<list>
 YCustomMajorTicks=<list>
 ZCustomMajorTicks=<list>
 --------------------
 To specify major ticks at arbitrary positions and/or with arbitrary labels.
 `<list>` is a whitespace-separated list of format `value1 <spaces_or_tabs> label1 <spaces_or_tabs> value2
 <spaces_or_tabs> label2 ...`.
 
 TODO: allow use of YAML-style list syntax to clarify delimiters?
 
 --------------------
 XCustomMinorTicks=<list>
 YCustomMinorTicks=<list>
 ZCustomMinorTicks=<list>
 --------------------
 To specify minor ticks at arbitrary positions. `<list>` is a tab separated
 list of format `value1 <tab> value2 <tab> value3 ...`.
 
 --------------------
-PlotTickLabels=<0|1>
+PlotXTickLabels=<0|1>
 RatioPlotTickLabels=<0|1>
 --------------------
 Disable/enable plotting of the tick labels in the plot and ratio plot (useful
 if multiple plots are to be combined manually later).
 
 Axes
 ^^^^
 
 --------------------
 LogX=<0|1>
 LogY=<0|1>
 LogZ=<0|1>
 --------------------
 Use a logarithmic x-, y-, or z-axis. Default is linear.
 
 --------------------
 XMin=<value>
 XMax=<value>
 YMin=<value>
 YMax=<value>
 ZMin=<value>
 ZMax=<value>
 FullRange=<0|1>
 ShowZero=<0|1>
 --------------------
 Specify the plot range. By default the range is chosen such that all data
 is visible in linear plots, and the zero is visible. `ShowZero=0` suppresses
 plotting the zero in linear plots and thus zooms into the actual y-value range
 of the distribution. In logarithmic plots the automatic choice of `YMin`
 is limited to be not smaller than 2e-4*`YMax`, but manually you can specify
 any value. `FullRange=1` also overrides the 2e-4*`YMax` limit and plots the
 full range in y.
 
 
 Normalization, Rebinning
 ^^^^^^^^^^^^^^^^^^^^^^^^
 
 --------------------
 NormalizeToIntegral=<1|0>
 NormalizeToSum=<1|0>
 Scale=<factor>
 --------------------
 Normalize all histograms to their integral, to their sum of entries, or scale
 them by some arbitrary factor. Normalization and scale options in the `PLOT`
 section override the corresponding option in the `HISTOGRAM` section.
 The scale factor is applied after normalization.
 
 --------------------
 Rebin=<nbins>
 --------------------
 Rebin all histograms in this plot. Syntax and functionality is the same as for
 the Rebin option in the `HISTOGRAM` section.
 
 
 Sizes and Margins
 ^^^^^^^^^^^^^^^^^
 
 --------------------
 PlotSize=<xsize,ysize>
 --------------------
 Size in x and y direction of the plot. This can be specified in any unit
 LaTeX understands.
 
 --------------------
 LeftMargin=<size>
 RightMargin=<size>
 TopMargin=<size>
 BottomMargin=<size>
 --------------------
 Distance between the plot and the paper edge.
 
 --------------------
 FrameColor=<color>
 --------------------
 Background color for the margin around the plot.
 
 
 Legends
 ^^^^^^^
 
 --------------------
 Legend=<0|1>
 --------------------
 Display a legend in the plot.
 
 --------------------
 CustomLegend=<text>
 --------------------
 Custom text that is added to the legend.
 
 --------------------
 LegendXPos=<pos>
 LegendYPos=<pos>
 --------------------
 Position of the legend within the plot. Anchor point is the top left corner of
 the legend, so units typically range between 0.0 and 1.0.
 
 --------------------
 LegendAlign=<align>
 --------------------
 Horizontal alignment of the legend: `LegendAlign=l` is the default and will
 create a left-aligned legend, while `LegendAlign=r` is right-aligned with
 the keys on the right hand side.
 
 --------------------
 LegendOnly=<list>
 --------------------
 Whitespace separated list of IDs. These can be histograms or functions. The
 legend is only shown for the listed objects. Without this option, all plotted
 objects which have a title enter the legend. The legend titles
 are plotted in the given order, so there are cases in which it makes
 sense to use `LegendOnly` together with all histogram IDs.
 It is also possible to specify the legend order on an entry-by-entry basis
 using the `LegendOrder=<int>` setting for each histogram or function.
 
 
 Plotting Options
 ^^^^^^^^^^^^^^^^
 
 --------------------
 DrawOnly=<list>
 --------------------
 Whitespace separated list of histogram IDs. Only the histograms in this list
 are plotted, even if there are more histograms defined in the file. The
 histograms are plotted in the given order, so there are cases in which it makes
 sense to use `DrawOnly` together with all histogram IDs. This is especially
 useful for the `Stack` option. It is also possible to specify the plotting order
 on a histogram-by-histogram basis using the `PlotOrder=<int>` setting for each
 histogram.
 
 --------------------
 Stack=<list>
 --------------------
 Whitespace separated list of histogram IDs. The histograms will be added on top
 of each other. This is useful for example to compare data with background if
 the background has contributions from several histograms.
 
 --------------------
 DrawSpecialFirst=<0|1>
 DrawFunctionFirst=<0|1>
 --------------------
 By default the `SPECIAL` and `FUNCTION` sections are plotted after the
 histograms. With these options you can override that behaviour.
 
 --------------------
 ConnectGaps=<0|1>
 --------------------
 If error bars are disabled and you want to bridge gaps in a histogram, you
 can set this parameter. By default it is off. Setting it in the `PLOT` section
 affects all histograms, but you can also set it in the `HISTOGRAM` section for
 individual histograms. The local setting overrides the global setting.
 
 
 Comparison Plots
 ^^^^^^^^^^^^^^^^
 
 With the
 
 --------------------
 RatioPlot=1
 RatioPlotReference=<histogram_ID>
 --------------------
 options you can create ratio plots for two or more histograms. Note that you
 must specify your reference data ID. This option is used by the
 link:compare-histos.html[`compare-histos`] script.
 
 --------------------
 RatioPlotMode=<default|deviation|datamc>
 --------------------
 By default, the ratio plot displays MC/Data. You can switch to
 (MC-data)/uncertainty (`deviation`) or Data/MC (`datamc`) with this
 option.
 
 In ratio plots the following additional options are available and work in
 a similar way as their regular counterparts:
 
 --------------------
 RatioPlotYLabel=<label>
 RatioPlotYMin=<value>
 RatioPlotYMax=<value>
 RatioPlotYSize=<size>
 RatioPlotErrorBandColor=<color>
 --------------------
 
 By default, the reference data is plotted using a yellow error band around the
 central value of the ratio plot. If you would rather have it plotted in the
 same style as in the main plot (e.g. with black errorbars), you can specify:
 
 --------------------
 RatioPlotSameStyle=1
 --------------------
 
 If you only want the ratio plot without showing the actual data distribution,
 you can switch off the main plot. This option implies `RatioPlot=1`:
 
 --------------------
 MainPlot=0
 --------------------
 
 
 Goodness of Fit
 ^^^^^^^^^^^^^^^
 
 `make-plots` can calculate the goodness of fit between histograms and display the
 result in the legend. It is also possible to change the color of the margin around
 the plot depending on the GoF. This is useful to provide a quick overview when
 looking at many plots.
 
 --------------------
 GofType=chi2
 --------------------
 
 The type of GoF. The default is `chi2` and currently that's the only option.
 
 --------------------
 GofReference=<histogram_ID>
 --------------------
 
 specifies the reference histogram to be used for the GoF calculation. If this
 option is omitted, the fallback is `RatioPlotReference`.
 
 The GoF calculation is activated by two options:
 
 --------------------
 GofLegend=<0|1>
 GofFrame=<histogram_ID>
 --------------------
 
 `GofLegend` calculates the GoF for all histograms and displays the results in
 the legend. With `GofFrame` you can specify a single histogram for which the
 GoF result will be shown in the legend and used to assign a color to the plot
 margins. Note that `FrameColor` overrides the color choice for the margin.
 You can use
 
 --------------------
 GofFrameColor=<colorthresholds>
 --------------------
 
 to specify the thresholds for the frame color. This option takes a list of
 `<threshold>:<color>` pairs, separated by whitespace. The default is
 `GofFrameColor=0:green 3:yellow 6:red!70`. Again, if you use `FrameColor`,
 this option is disabled.
 
 
 Color Palettes for 2-dim Plots
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 With the option `ColorSeries` you can define a custom color palette for
 2-dimensional plots. The syntax is the same as for the `\definecolorseries`
 command in the `xcolor` LaTeX package after the color series name, i.e.
 `{core-model}{method}[begin-model]{begin-spec}[end-model]{end-spec}`. For more
 information you can consult the
 http://www.ctan.org/tex-archive/macros/latex/contrib/xcolor/xcolor.pdf[xcolor documentation].
 Here is an example:
 
 --------------------
 ColorSeries={rgb}{last}[rgb]{1,0.97,0.94}[rgb]{0.6,0.0,0.05}
 --------------------
 
 
 HISTOGRAM
 ~~~~~~~~~
 
 The `HISTOGRAM` section starts with
 
 --------------------
 # BEGIN HISTOGRAM <ID>
 --------------------
 
 and ends with
 
 --------------------
 # END HISTOGRAM
 --------------------
 
 There can be more than one `HISTOGRAM` section in a file. Histograms are
 identified by `<ID>` which can be any string _not_ containing whitespace.
 
 
 Data Format
 ^^^^^^^^^^^
 
 Lines starting with a number (positive or negative) are interpreted as data.
 Each line specifies one bin. The fields in each line must be separated by tabs,
 not spaces (this needs to be fixes some day). For 1-dimensional histograms the
 format can be
 
 --------------------
 <lowerbinedge>  <upperbinedge>  <value>  <error>
 <lowerbinedge>  <upperbinedge>  <value>  <minuserror>  <pluserror>
 --------------------
 
 2-dimensional histograms are supported, too. They are plotted as colormap
 (errors are ignored) and specified as
 
 --------------------
 <lowerxbinedge>  <upperxbinedge>  <lowerybinedge>  <upperybinedge>  <value>  <error>
 --------------------
 
 
 Titles
 ^^^^^^
 
 --------------------
 Title=<title>
 --------------------
 Title of the histogram. This is used for the legend.
 
 
 Linestyles
 ^^^^^^^^^^
 
 --------------------
 LineStyle=<style>
 --------------------
 Any linestyle that is understood by the LaTeX pstricks package, e.g. `solid`,
 `dotted`, `dashed`, `none`, as well as a special `dashdotted` (or `dotdashed`)
 linestyle which does what you might expect.
 
 --------------------
 LineColor=<color>
 --------------------
 Color of the line. Default is black, but any color that pstricks understands
 can be used, including constructions like `red!70!blue!20` (for mixing colors),
 `{[rgb]{0.8,0,0.7}}` (for RGB-colors), `{[wave]{580}}` (for wavelengths in
 nm), `LineColor={[cmyk]{1,1,0,0}}` for CMYK-colors, or `[hsb]{0.5,1,1}` for
 HSB-colors.
 
 --------------------
 LineOpacity=<opacity>
 --------------------
 Set the opacity of the line. Default is 1.0. This might not work for ps output.
 
 --------------------
 LineWidth=<width>
 --------------------
 Width of the line.
 
 --------------------
 LineDash=<dashstyle>
 --------------------
 If `LineStyle` is set to `dashed`, you can specify the dash style with this
 option. Anything that is understood by pstrick's `dash=...` option is valid.
 An example for a dash-dotted line is `LineDash=3pt 3pt .8pt 3pt`. You can use
 `LineStyle=dashdotted` or `LineStyle=dotdashed` as an abbreviation for
 `LineStyle=dashed` with `LineDash=3pt 3pt .8pt 3pt`.
 
 --------------------
 ConnectBins=<0|1>
 --------------------
 Choose whether to connect adjacent bins' horizontal lines together by a vertical
 line on the bin edge. This is enabled by default, but you may wish to disable
 it when plotting reference data with error bars and point markers.
 
 --------------------
 ConnectGaps=<0|1>
 --------------------
 If ConnectBins is enabled and you want to bridge gaps in a histogram, you
 can set this parameter. By default it is off. Setting it in the `PLOT` section
 affects all histograms, but you can also set it in the `HISTOGRAM` section for
 individual histograms. The local setting overrides the global setting.
 
 --------------------
 SmoothLine=<0|1>
 --------------------
 Draw a smooth curve rather than a histogram
 
 
 Fillstyles
 ^^^^^^^^^^
 
 --------------------
 FillStyle=<style>
 FillColor=<color>
 --------------------
 To fill the area below a histogram, set `FillStyle` and `FillColor` to
 something pstricks understands. Examples for the style are `solid` or `vlines`.
 See `LineColor` for examples of color definitions.
 
 --------------------
 FillOpacity=<opacity>
 --------------------
 Set the opacity of the solid fillcolor. Default is 1.0. This might not work for
 ps output.
 
 --------------------
 HatchColor=<color>
 --------------------
 The color of a hatch pattern used for filling the area below a histogram. This
 is used for example when you use `vlines` as style.
 
 
 Data Points
 ^^^^^^^^^^^
 
 --------------------
 ErrorBars=<0|1>
 --------------------
 Turn on error bars.
 
 --------------------
 ErrorBands=<0|1>
 ErrorBandColor=<color>
 --------------------
 Turn on error bands and set their color (see `LineColor` for a description
 of color definitions).
 
 --------------------
 ErrorBandOpacity=<opacity>
 --------------------
 Set the opacity of the error band. Default is 1.0. This might not work for ps
 output.
 
 --------------------
 PolyMarker=<dotstyle>
 --------------------
 The marker style of the points. Any dot style which is understood by pstricks
 is valid, e.g. `*`, `o`, `triangle`, `diamond`, ...
 
 --------------------
 DotSize=<size>
 DotScale=<factor>
 --------------------
 The size of the markers. With `DotSize` you can specify the absolute size, e.g.
 in units of `pt`, while `DotScale` is a relative measure with respect to the
 default size.
 
 
 Normalization, Rebinning
 ^^^^^^^^^^^^^^^^^^^^^^^^
 
 --------------------
 NormalizeToIntegral=<1|0>
 NormalizeToSum=<1|0>
 Scale=<factor>
 --------------------
 Normalize the histogram to the integral, to the sum of entries, or scale it by
 some arbitrary factor. If normalization and a scale factor are given, the scale
 factor is applied after normalization. This is useful for stacking histograms
 when the ratios are known.
 
 --------------------
 Rebin=<nbins>
 ErrorType=<stat|env>
 --------------------
 Rebin the histogram. Starting with the lowest bin <nbins> bins are combined
 into a new bin. If the number of bins in the histogram is not a multiple of
 <nbins>, the remaining bins at the upper histogram end are silently ignored
 (i.e. if the original histogram has 10 bins and <nbins> is 3, the plotted
 histogram shows three bins combining the bins 1--9 of the original histogram).
 The treatment of the errors is determined by the given ErrorType:
 `stat` (default) assumes the errors are of statistical nature and combines
 them in quadrature sum, while `env` allows to treat errors as envelope of
 various uncertainty runs which are combined linearly.
 
 
 FUNCTION
 ~~~~~~~~
 
 `make-plots` can draw arbitrary functions. These functions are defined as
 python code sniplets which are evaluated by `make-plots`. The code sniplet
 must come after all other options in a `FUNCTION` section and are preceded by
 `Code=` on a single line. An example `FUNCTION` section might look like this:
 
 --------------------
 # BEGIN FUNCTION f_cc
 LineColor=red
 Code=
 p0=16.4
 p1=1.25
 p2=0.9832
 from scipy.special import erf
 x-=0.5
 if x<=0:
     return 0
 else:
     return .5*p2*(1.+erf( (x-p0)/sqrt(x*p1) ))
 # END FUNCTION
 --------------------
 
 Common Options with HISTOGRAM
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 The following options have the same meaning as in the `HISTOGRAM` section:
 
 --------------------
 Title=<title>
 LineStyle=<style>
 LineColor=<color>
 LineWidth=<width>
 LineDash=<dashstyle>
 FillStyle=<style>
 FillColor=<color>
 HatchColor=<color>
 --------------------
 
 Function Range
 ^^^^^^^^^^^^^^
 
 You can limit the plot range of functions by specifying
 
 --------------------
 XMin=<value>
 XMax=<value>
 --------------------
 
 
 SPECIAL
 ~~~~~~~
 
 The `SPECIAL` sections are used to include any custom pstricks code. This is
 useful for drawing arrows and lines, put text at any position into the plot,
 etc. The default coordinate system is defined to be `(0,0)` at the lower left
 and `(1,1)` at the upper right corner of the plot. By putting the
 `\physicscoor` command in front of a coordinate pair, these coordinates are
 interpreted not in the pstricks coordinate system, but in the physics
 coordinate system of the plot, which is useful e.g. for marking cut values in a
 plot. Similar `\physicsxcoor` and `\physicsycoor` commands exist which will only
 treat the x or y coordinate respectively as being in physics units.
 
 Hint: If you want to clip your `SPECIAL` code to the plot area, you can use
 --------------------
 \psclip{\psframe[linewidth=0, linestyle=none](0,0)(1,1)}
    ...
 \endpsclip
 --------------------
 
 An example of a `SPECIAL` section might look like this:
 
 --------------------
 # BEGIN SPECIAL
 \psclip{\psframe[linewidth=0, linestyle=none](0,0)(1,1)}
 \psline[linewidth=1.2pt,linecolor=red]{<-}\physicscoor(2.83,2)\physicscoor(2.83,18)
 \uput{4pt}[180]{0}\physicscoor(2.83,12){observed}
 \psline[linewidth=0.8pt,linecolor=red,linestyle=dashed]\physicscoor( 3.17,0)\physicscoor( 3.17,28.14)
 \psline[linewidth=0.8pt,linecolor=red,linestyle=dashed]\physicscoor(-3.59,0)\physicscoor(-3.59,28.14)
 \endpsclip
 # END SPECIAL
 --------------------
diff --git a/include/Rivet/Analysis.hh b/include/Rivet/Analysis.hh
--- a/include/Rivet/Analysis.hh
+++ b/include/Rivet/Analysis.hh
@@ -1,1038 +1,1049 @@
 // -*- C++ -*-
 #ifndef RIVET_Analysis_HH
 #define RIVET_Analysis_HH
 
 #include "Rivet/Config/RivetCommon.hh"
 #include "Rivet/AnalysisInfo.hh"
 #include "Rivet/Event.hh"
 #include "Rivet/Projection.hh"
 #include "Rivet/ProjectionApplier.hh"
 #include "Rivet/ProjectionHandler.hh"
 #include "Rivet/AnalysisLoader.hh"
 #include "Rivet/Tools/RivetYODA.hh"
 #include "Rivet/Tools/Logging.hh"
 #include "Rivet/Tools/ParticleUtils.hh"
 #include "Rivet/Tools/Cuts.hh"
 
 
 /// @def vetoEvent
 /// Preprocessor define for vetoing events, including the log message and return.
 #define vetoEvent                                                       \
   do { MSG_DEBUG("Vetoing event on line " << __LINE__ << " of " << __FILE__); return; } while(0)
 
 
 namespace Rivet {
 
 
   // Forward declaration
   class AnalysisHandler;
 
   /// @brief This is the base class of all analysis classes in Rivet.
   ///
   /// There are
   /// three virtual functions which should be implemented in base classes:
   ///
   /// void init() is called by Rivet before a run is started. Here the
   /// analysis class should book necessary histograms. The needed
   /// projections should probably rather be constructed in the
   /// constructor.
   ///
   /// void analyze(const Event&) is called once for each event. Here the
   /// analysis class should apply the necessary Projections and fill the
   /// histograms.
   ///
   /// void finalize() is called after a run is finished. Here the analysis
   /// class should do whatever manipulations are necessary on the
   /// histograms. Writing the histograms to a file is, however, done by
   /// the Rivet class.
   class Analysis : public ProjectionApplier {
 
     /// The AnalysisHandler is a friend.
     friend class AnalysisHandler;
 
 
   public:
 
     /// @name Standard constructors and destructors.
     //@{
 
     // /// The default constructor.
     // Analysis();
 
     /// Constructor
     Analysis(const std::string& name);
 
     /// The destructor.
     virtual ~Analysis() {}
 
     //@}
 
 
   public:
 
     /// @name Main analysis methods
     //@{
 
     /// Initialize this analysis object. A concrete class should here
     /// book all necessary histograms. An overridden function must make
     /// sure it first calls the base class function.
     virtual void init() { }
 
     /// Analyze one event. A concrete class should here apply the
     /// necessary projections on the \a event and fill the relevant
     /// histograms. An overridden function must make sure it first calls
     /// the base class function.
     virtual void analyze(const Event& event) = 0;
 
     /// Finalize this analysis object. A concrete class should here make
     /// all necessary operations on the histograms. Writing the
     /// histograms to a file is, however, done by the Rivet class. An
     /// overridden function must make sure it first calls the base class
     /// function.
     virtual void finalize() { }
 
     //@}
 
 
   public:
 
     /// @name Metadata
     /// Metadata is used for querying from the command line and also for
     /// building web pages and the analysis pages in the Rivet manual.
     //@{
 
     /// Get the actual AnalysisInfo object in which all this metadata is stored.
     const AnalysisInfo& info() const {
       assert(_info && "No AnalysisInfo object :O");
       return *_info;
     }
 
     /// @brief Get the name of the analysis.
     ///
     /// By default this is computed by combining the results of the experiment,
     /// year and Spires ID metadata methods and you should only override it if
     /// there's a good reason why those won't work.
     virtual std::string name() const {
       return (info().name().empty()) ? _defaultname : info().name();
     }
 
     /// Get the Inspire ID code for this analysis.
     virtual std::string inspireId() const {
       return info().inspireId();
     }
 
     /// Get the SPIRES ID code for this analysis (~deprecated).
     virtual std::string spiresId() const {
       return info().spiresId();
     }
 
     /// @brief Names & emails of paper/analysis authors.
     ///
     /// Names and email of authors in 'NAME \<EMAIL\>' format. The first
     /// name in the list should be the primary contact person.
     virtual std::vector<std::string> authors() const {
       return info().authors();
     }
 
     /// @brief Get a short description of the analysis.
     ///
     /// Short (one sentence) description used as an index entry.
     /// Use @a description() to provide full descriptive paragraphs
     /// of analysis details.
     virtual std::string summary() const {
       return info().summary();
     }
 
     /// @brief Get a full description of the analysis.
     ///
     /// Full textual description of this analysis, what it is useful for,
     /// what experimental techniques are applied, etc. Should be treated
     /// as a chunk of restructuredText (http://docutils.sourceforge.net/rst.html),
     /// with equations to be rendered as LaTeX with amsmath operators.
     virtual std::string description() const {
       return info().description();
     }
 
     /// @brief Information about the events needed as input for this analysis.
     ///
     /// Event types, energies, kinematic cuts, particles to be considered
     /// stable, etc. etc. Should be treated as a restructuredText bullet list
     /// (http://docutils.sourceforge.net/rst.html)
     virtual std::string runInfo() const {
       return info().runInfo();
     }
 
     /// Experiment which performed and published this analysis.
     virtual std::string experiment() const {
       return info().experiment();
     }
 
     /// Collider on which the experiment ran.
     virtual std::string collider() const {
       return info().collider();
     }
 
     /// When the original experimental analysis was published.
     virtual std::string year() const {
       return info().year();
     }
 
+    /// The luminosity in inverse femtobarn
+    virtual std::string luminosityfb() const {
+      return info().luminosityfb();
+    }
+
     /// Journal, and preprint references.
     virtual std::vector<std::string> references() const {
       return info().references();
     }
 
     /// BibTeX citation key for this article.
     virtual std::string bibKey() const {
       return info().bibKey();
     }
 
     /// BibTeX citation entry for this article.
     virtual std::string bibTeX() const {
       return info().bibTeX();
     }
 
     /// Whether this analysis is trusted (in any way!)
     virtual std::string status() const {
       return (info().status().empty()) ? "UNVALIDATED" : info().status();
     }
 
     /// Any work to be done on this analysis.
     virtual std::vector<std::string> todos() const {
       return info().todos();
     }
 
 
     /// Return the allowed pairs of incoming beams required by this analysis.
     virtual const std::vector<PdgIdPair>& requiredBeams() const {
       return info().beams();
     }
     /// Declare the allowed pairs of incoming beams required by this analysis.
     virtual Analysis& setRequiredBeams(const std::vector<PdgIdPair>& requiredBeams) {
       info().setBeams(requiredBeams);
       return *this;
     }
 
 
     /// Sets of valid beam energy pairs, in GeV
     virtual const std::vector<std::pair<double, double> >& requiredEnergies() const {
       return info().energies();
     }
+
+    /// Get vector of analysis keywords
+    virtual const std::vector<std::string> & keywords() const {
+      return info().keywords();
+    }
+
     /// Declare the list of valid beam energy pairs, in GeV
     virtual Analysis& setRequiredEnergies(const std::vector<std::pair<double, double> >& requiredEnergies) {
       info().setEnergies(requiredEnergies);
       return *this;
     }
 
 
     /// Return true if this analysis needs to know the process cross-section.
     /// @todo Remove this and require HepMC >= 2.06
     bool needsCrossSection() const {
       return info().needsCrossSection();
     }
     /// Declare whether this analysis needs to know the process cross-section from the generator.
     /// @todo Remove this and require HepMC >= 2.06
     Analysis& setNeedsCrossSection(bool needed=true) {
       info().setNeedsCrossSection(needed);
       return *this;
     }
 
     //@}
 
 
     /// @name Internal metadata modifying methods
     //@{
 
     /// Get the actual AnalysisInfo object in which all this metadata is stored (non-const).
     AnalysisInfo& info() {
       assert(_info && "No AnalysisInfo object :O");
       return *_info;
     }
 
     //@}
 
 
     /// @name Run conditions
     //@{
 
     /// Incoming beams for this run
     const ParticlePair& beams() const;
 
     /// Incoming beam IDs for this run
     const PdgIdPair beamIds() const;
 
     /// Centre of mass energy for this run
     double sqrtS() const;
 
     //@}
 
 
     /// @name Analysis / beam compatibility testing
     //@{
 
     /// Check if analysis is compatible with the provided beam particle IDs and energies
     bool isCompatible(const ParticlePair& beams) const;
 
     /// Check if analysis is compatible with the provided beam particle IDs and energies
     bool isCompatible(PdgId beam1, PdgId beam2, double e1, double e2) const;
 
     /// Check if analysis is compatible with the provided beam particle IDs and energies
     bool isCompatible(const PdgIdPair& beams, const std::pair<double,double>& energies) const;
 
     //@}
 
 
     /// Set the cross section from the generator
     Analysis& setCrossSection(double xs);
 
     /// Access the controlling AnalysisHandler object.
     AnalysisHandler& handler() const { return *_analysishandler; }
 
 
   protected:
 
     /// Get a Log object based on the name() property of the calling analysis object.
     Log& getLog() const;
 
     /// Get the process cross-section in pb. Throws if this hasn't been set.
     double crossSection() const;
 
     /// Get the process cross-section per generated event in pb. Throws if this
     /// hasn't been set.
     double crossSectionPerEvent() const;
 
     /// Get the number of events seen (via the analysis handler). Use in the
     /// finalize phase only.
     size_t numEvents() const;
 
     /// Get the sum of event weights seen (via the analysis handler). Use in the
     /// finalize phase only.
     double sumOfWeights() const;
 
 
   protected:
 
     /// @name Histogram paths
     //@{
 
     /// Get the canonical histogram "directory" path for this analysis.
     const std::string histoDir() const;
 
     /// Get the canonical histogram path for the named histogram in this analysis.
     const std::string histoPath(const std::string& hname) const;
 
     /// Get the canonical histogram path for the numbered histogram in this analysis.
     const std::string histoPath(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const;
 
     /// Get the internal histogram name for given d, x and y (cf. HepData)
     const std::string makeAxisCode(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const;
 
     //@}
 
 
     /// @name Histogram reference data
     //@{
 
     /// Get reference data for a named histo
     /// @todo Move to the templated version when we have C++11 and can have a default fn template type
     const YODA::Scatter2D& refData(const string& hname) const;
 
     /// Get reference data for a numbered histo
     /// @todo Move to the templated version when we have C++11 and can have a default fn template type
     const YODA::Scatter2D& refData(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const;
 
     /// Get reference data for a named histo
     /// @todo Would be nice to just use these and ditch the S2D no-template version,
     ///   but we need C++11 for default args in function templates
     // template <typename T=Scatter2D>
     /// @todo SFINAE to ensure that the type inherits from YODA::AnalysisObject?
     template <typename T>
     const T& refData(const string& hname) const {
       _cacheRefData();
       MSG_TRACE("Using histo bin edges for " << name() << ":" << hname);
       if (!_refdata[hname]) {
         MSG_ERROR("Can't find reference histogram " << hname);
         throw Exception("Reference data " + hname + " not found.");
       }
       return dynamic_cast<T&>(*_refdata[hname]);
     }
 
     /// Get reference data for a numbered histo
     /// @todo Would be nice to just use these and ditch the S2D no-template version,
     ///   but we need C++11 for default args in function templates
     // template <typename T=Scatter2D>
     /// @todo SFINAE to ensure that the type inherits from YODA::AnalysisObject?
     template <typename T>
     const T& refData(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const {
       const string hname = makeAxisCode(datasetId, xAxisId, yAxisId);
       return refData(hname);
     }
 
     //@}
 
 
     /// @name Counter booking
     //@{
 
     /// Book a counter.
     void book(CounterPtr &, const std::string& name,
                            const std::string& title="");
                            // const std::string& valtitle=""
 
     /// Book a counter, using a path generated from the dataset and axis ID codes
     ///
     /// The paper, dataset and x/y-axis IDs will be used to build the histo name in the HepData standard way.
     void book(CounterPtr &, unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId,
                            const std::string& title="");
                            // const std::string& valtitle=""
 
     //@}
 
 
     /// @name 1D histogram booking
     //@{
 
     /// Book a 1D histogram with @a nbins uniformly distributed across the range @a lower - @a upper .
     void book(Histo1DPtr &,const std::string& name,
                            size_t nbins, double lower, double upper,
                            const std::string& title="",
                            const std::string& xtitle="",
                            const std::string& ytitle="");
 
     /// Book a 1D histogram with non-uniform bins defined by the vector of bin edges @a binedges .
     void book(Histo1DPtr &,const std::string& name,
                            const std::vector<double>& binedges,
                            const std::string& title="",
                            const std::string& xtitle="",
                            const std::string& ytitle="");
 
     /// Book a 1D histogram with binning from a reference scatter.
     void book(Histo1DPtr &,const std::string& name,
                            const Scatter2D& refscatter,
                            const std::string& title="",
                            const std::string& xtitle="",
                            const std::string& ytitle="");
 
     /// Book a 1D histogram, using the binnings in the reference data histogram.
     void book(Histo1DPtr &,const std::string& name,
                            const std::string& title="",
                            const std::string& xtitle="",
                            const std::string& ytitle="");
 
     /// Book a 1D histogram, using the binnings in the reference data histogram.
     ///
     /// The paper, dataset and x/y-axis IDs will be used to build the histo name in the HepData standard way.
     void book(Histo1DPtr &,unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId,
                            const std::string& title="",
                            const std::string& xtitle="",
                            const std::string& ytitle="");
     //@}
 
 
     /// @name 2D histogram booking
     //@{
 
     /// Book a 2D histogram with @a nxbins and @a nybins uniformly
     /// distributed across the ranges @a xlower - @a xupper and @a
     /// ylower - @a yupper respectively along the x- and y-axis.
     void book(Histo2DPtr &,const std::string& name,
                            size_t nxbins, double xlower, double xupper,
                            size_t nybins, double ylower, double yupper,
                            const std::string& title="",
                            const std::string& xtitle="",
                            const std::string& ytitle="",
                            const std::string& ztitle="");
 
     /// Book a 2D histogram with non-uniform bins defined by the
     /// vectorx of bin edges @a xbinedges and @a ybinedges.
     void book(Histo2DPtr &,const std::string& name,
                            const std::vector<double>& xbinedges,
                            const std::vector<double>& ybinedges,
                            const std::string& title="",
                            const std::string& xtitle="",
                            const std::string& ytitle="",
                            const std::string& ztitle="");
 
     // /// Book a 2D histogram with binning from a reference scatter.
     // Histo2DPtr bookHisto2D(const std::string& name,
     //                        const Scatter3D& refscatter,
     //                        const std::string& title="",
     //                        const std::string& xtitle="",
     //                        const std::string& ytitle="",
     //                        const std::string& ztitle="");
 
     // /// Book a 2D histogram, using the binnings in the reference data histogram.
     // Histo2DPtr bookHisto2D(const std::string& name,
     //                        const std::string& title="",
     //                        const std::string& xtitle="",
     //                        const std::string& ytitle="",
     //                        const std::string& ztitle="");
 
     // /// Book a 2D histogram, using the binnings in the reference data histogram.
     // ///
     // /// The paper, dataset and x/y-axis IDs will be used to build the histo name in the HepData standard way.
     // Histo2DPtr bookHisto2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId,
     //                        const std::string& title="",
     //                        const std::string& xtitle="",
     //                        const std::string& ytitle="",
     //                        const std::string& ztitle="");
 
     //@}
 
 
     /// @name 1D profile histogram booking
     //@{
 
     /// Book a 1D profile histogram with @a nbins uniformly distributed across the range @a lower - @a upper .
     void book(Profile1DPtr &,  const std::string& name,
                                size_t nbins, double lower, double upper,
                                const std::string& title="",
                                const std::string& xtitle="",
                                const std::string& ytitle="");
 
     /// Book a 1D profile histogram with non-uniform bins defined by the vector of bin edges @a binedges .
     void book(Profile1DPtr &,  const std::string& name,
                                const std::vector<double>& binedges,
                                const std::string& title="",
                                const std::string& xtitle="",
                                const std::string& ytitle="");
 
     /// Book a 1D profile histogram with binning from a reference scatter.
     void book(Profile1DPtr &,  const std::string& name,
                                const Scatter2D& refscatter,
                                const std::string& title="",
                                const std::string& xtitle="",
                                const std::string& ytitle="");
 
     /// Book a 1D profile histogram, using the binnings in the reference data histogram.
     void book(Profile1DPtr &,  const std::string& name,
                                const std::string& title="",
                                const std::string& xtitle="",
                                const std::string& ytitle="");
 
     /// Book a 1D profile histogram, using the binnings in the reference data histogram.
     ///
     /// The paper, dataset and x/y-axis IDs will be used to build the histo name in the HepData standard way.
     void book(Profile1DPtr &,  unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId,
                                const std::string& title="",
                                const std::string& xtitle="",
                                const std::string& ytitle="");
 
     //@}
 
 
     /// @name 2D profile histogram booking
     //@{
 
     /// Book a 2D profile histogram with @a nxbins and @a nybins uniformly
     /// distributed across the ranges @a xlower - @a xupper and @a ylower - @a
     /// yupper respectively along the x- and y-axis.
     void book(Profile2DPtr &,  const std::string& name,
                                size_t nxbins, double xlower, double xupper,
                                size_t nybins, double ylower, double yupper,
                                const std::string& title="",
                                const std::string& xtitle="",
                                const std::string& ytitle="",
                                const std::string& ztitle="");
 
     /// Book a 2D profile histogram with non-uniform bins defined by the vectorx
     /// of bin edges @a xbinedges and @a ybinedges.
     void book(Profile2DPtr &,  const std::string& name,
                                const std::vector<double>& xbinedges,
                                const std::vector<double>& ybinedges,
                                const std::string& title="",
                                const std::string& xtitle="",
                                const std::string& ytitle="",
                                const std::string& ztitle="");
 
     /// Book a 2D profile histogram with binning from a reference scatter.
     // Profile2DPtr bookProfile2D(const std::string& name,
     //                            const Scatter3D& refscatter,
     //                            const std::string& title="",
     //                            const std::string& xtitle="",
     //                            const std::string& ytitle="",
     //                            const std::string& ztitle="");
 
     // /// Book a 2D profile histogram, using the binnings in the reference data histogram.
     // Profile2DPtr bookProfile2D(const std::string& name,
     //                            const std::string& title="",
     //                            const std::string& xtitle="",
     //                            const std::string& ytitle="",
     //                            const std::string& ztitle="");
 
     // /// Book a 2D profile histogram, using the binnings in the reference data histogram.
     // ///
     // /// The paper, dataset and x/y-axis IDs will be used to build the histo name in the HepData standard way.
     // Profile2DPtr bookProfile2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId,
     //                            const std::string& title="",
     //                            const std::string& xtitle="",
     //                            const std::string& ytitle="",
     //                            const std::string& ztitle="");
 
     //@}
 
 
     /// @name 2D scatter booking
     //@{
 
     /// @brief Book a 2-dimensional data point set with the given name.
     ///
     /// @note Unlike histogram booking, scatter booking by default makes no
     /// attempt to use reference data to pre-fill the data object. If you want
     /// this, which is sometimes useful e.g. when the x-position is not really
     /// meaningful and can't be extracted from the data, then set the @a
     /// copy_pts parameter to true. This creates points to match the reference
     /// data's x values and errors, but with the y values and errors zeroed...
     /// assuming that there is a reference histo with the same name: if there
     /// isn't, an exception will be thrown.
 
     Scatter2DPtr& bookScatter2D(const std::string& name,
                                bool copy_pts=false,
                                const std::string& title="",
                                const std::string& xtitle="",
                                const std::string& ytitle="");
 
     /// @brief Book a 2-dimensional data point set, using the binnings in the reference data histogram.
     ///
     /// The paper, dataset and x/y-axis IDs will be used to build the histo name in the HepData standard way.
     ///
     /// @note Unlike histogram booking, scatter booking by default makes no
     /// attempt to use reference data to pre-fill the data object. If you want
     /// this, which is sometimes useful e.g. when the x-position is not really
     /// meaningful and can't be extracted from the data, then set the @a
     /// copy_pts parameter to true. This creates points to match the reference
     /// data's x values and errors, but with the y values and errors zeroed.
     Scatter2DPtr& bookScatter2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId,
                                bool copy_pts=false,
                                const std::string& title="",
                                const std::string& xtitle="",
                                const std::string& ytitle="");
 
     /// @brief Book a 2-dimensional data point set with equally spaced x-points in a range.
     ///
     /// The y values and errors will be set to 0.
     Scatter2DPtr& bookScatter2D(const std::string& name,
                                size_t npts, double lower, double upper,
                                const std::string& title="",
                                const std::string& xtitle="",
                                const std::string& ytitle="");
 
     /// @brief Book a 2-dimensional data point set based on provided contiguous "bin edges".
     ///
     /// The y values and errors will be set to 0.
     Scatter2DPtr& bookScatter2D(const std::string& hname,
                                const std::vector<double>& binedges,
                                const std::string& title,
                                const std::string& xtitle,
                                const std::string& ytitle);
 
     //@}
 
 
   public:
 
 
     /// @name Analysis object manipulation
     /// @todo Should really be protected: only public to keep BinnedHistogram happy for now...
     //@{
 
     /// Multiplicatively scale the given counter, @a cnt, by factor @s factor.
     void scale(CounterPtr cnt, double factor);
 
     /// Multiplicatively scale the given counters, @a cnts, by factor @s factor.
     /// @note Constness intentional, if weird, to allow passing rvalue refs of smart ptrs (argh)
     /// @todo Use SFINAE for a generic iterable of CounterPtrs
     void scale(const std::vector<CounterPtr>& cnts, double factor) {
       for (auto& c : cnts) scale(c, factor);
     }
     /// @todo YUCK!
     template <std::size_t array_size>
     void scale(const CounterPtr (&cnts)[array_size], double factor) {
       // for (size_t i = 0; i < std::extent<decltype(cnts)>::value; ++i) scale(cnts[i], factor);
       for (auto& c : cnts) scale(c, factor);
     }
 
 
     /// Normalize the given histogram, @a histo, to area = @a norm.
     void normalize(Histo1DPtr histo, double norm=1.0, bool includeoverflows=true);
 
     /// Normalize the given histograms, @a histos, to area = @a norm.
     /// @note Constness intentional, if weird, to allow passing rvalue refs of smart ptrs (argh)
     /// @todo Use SFINAE for a generic iterable of Histo1DPtrs
     void normalize(const std::vector<Histo1DPtr>& histos, double norm=1.0, bool includeoverflows=true) {
       for (auto& h : histos) normalize(h, norm, includeoverflows);
     }
     /// @todo YUCK!
     template <std::size_t array_size>
     void normalize(const Histo1DPtr (&histos)[array_size], double norm=1.0, bool includeoverflows=true) {
       for (auto& h : histos) normalize(h, norm, includeoverflows);
     }
 
     /// Multiplicatively scale the given histogram, @a histo, by factor @s factor.
     void scale(Histo1DPtr histo, double factor);
 
     /// Multiplicatively scale the given histograms, @a histos, by factor @s factor.
     /// @note Constness intentional, if weird, to allow passing rvalue refs of smart ptrs (argh)
     /// @todo Use SFINAE for a generic iterable of Histo1DPtrs
     void scale(const std::vector<Histo1DPtr>& histos, double factor) {
       for (auto& h : histos) scale(h, factor);
     }
     /// @todo YUCK!
     template <std::size_t array_size>
     void scale(const Histo1DPtr (&histos)[array_size], double factor) {
       for (auto& h : histos) scale(h, factor);
     }
 
 
     /// Normalize the given histogram, @a histo, to area = @a norm.
     void normalize(Histo2DPtr histo, double norm=1.0, bool includeoverflows=true);
 
     /// Normalize the given histograms, @a histos, to area = @a norm.
     /// @note Constness intentional, if weird, to allow passing rvalue refs of smart ptrs (argh)
     /// @todo Use SFINAE for a generic iterable of Histo2DPtrs
     void normalize(const std::vector<Histo2DPtr>& histos, double norm=1.0, bool includeoverflows=true) {
       for (auto& h : histos) normalize(h, norm, includeoverflows);
     }
     /// @todo YUCK!
     template <std::size_t array_size>
     void normalize(const Histo2DPtr (&histos)[array_size], double norm=1.0, bool includeoverflows=true) {
       for (auto& h : histos) normalize(h, norm, includeoverflows);
     }
 
     /// Multiplicatively scale the given histogram, @a histo, by factor @s factor.
     void scale(Histo2DPtr histo, double factor);
 
     /// Multiplicatively scale the given histograms, @a histos, by factor @s factor.
     /// @note Constness intentional, if weird, to allow passing rvalue refs of smart ptrs (argh)
     /// @todo Use SFINAE for a generic iterable of Histo2DPtrs
     void scale(const std::vector<Histo2DPtr>& histos, double factor) {
       for (auto& h : histos) scale(h, factor);
     }
     /// @todo YUCK!
     template <std::size_t array_size>
     void scale(const Histo2DPtr (&histos)[array_size], double factor) {
       for (auto& h : histos) scale(h, factor);
     }
 
 
     /// Helper for counter division.
     ///
     /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
     void divide(CounterPtr c1, CounterPtr c2, Scatter1DPtr s) const;
 
     /// Helper for histogram division with raw YODA objects.
     ///
     /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
     void divide(const YODA::Counter& c1, const YODA::Counter& c2, Scatter1DPtr s) const;
 
 
     /// Helper for histogram division.
     ///
     /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
     void divide(Histo1DPtr h1, Histo1DPtr h2, Scatter2DPtr s) const;
 
     /// Helper for histogram division with raw YODA objects.
     ///
     /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
     void divide(const YODA::Histo1D& h1, const YODA::Histo1D& h2, Scatter2DPtr s) const;
 
 
     /// Helper for profile histogram division.
     ///
     /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
     void divide(Profile1DPtr p1, Profile1DPtr p2, Scatter2DPtr s) const;
 
     /// Helper for profile histogram division with raw YODA objects.
     ///
     /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
     void divide(const YODA::Profile1D& p1, const YODA::Profile1D& p2, Scatter2DPtr s) const;
 
 
     /// Helper for 2D histogram division.
     ///
     /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
     void divide(Histo2DPtr h1, Histo2DPtr h2, Scatter3DPtr s) const;
 
     /// Helper for 2D histogram division with raw YODA objects.
     ///
     /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
     void divide(const YODA::Histo2D& h1, const YODA::Histo2D& h2, Scatter3DPtr s) const;
 
 
     /// Helper for 2D profile histogram division.
     ///
     /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
     void divide(Profile2DPtr p1, Profile2DPtr p2, Scatter3DPtr s) const;
 
     /// Helper for 2D profile histogram division with raw YODA objects
     ///
     /// @note Assigns to the (already registered) output scatter, @a s.  Preserves the path information of the target.
     void divide(const YODA::Profile2D& p1, const YODA::Profile2D& p2, Scatter3DPtr s) const;
 
 
     /// Helper for histogram efficiency calculation.
     ///
     /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
     void efficiency(Histo1DPtr h1, Histo1DPtr h2, Scatter2DPtr s) const;
 
     /// Helper for histogram efficiency calculation.
     ///
     /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
     void efficiency(const YODA::Histo1D& h1, const YODA::Histo1D& h2, Scatter2DPtr s) const;
 
 
     /// Helper for histogram asymmetry calculation.
     ///
     /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
     void asymm(Histo1DPtr h1, Histo1DPtr h2, Scatter2DPtr s) const;
 
     /// Helper for histogram asymmetry calculation.
     ///
     /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
     void asymm(const YODA::Histo1D& h1, const YODA::Histo1D& h2, Scatter2DPtr s) const;
 
 
     /// Helper for converting a differential histo to an integral one.
     ///
     /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
     void integrate(Histo1DPtr h, Scatter2DPtr s) const;
 
     /// Helper for converting a differential histo to an integral one.
     ///
     /// @note Assigns to the (already registered) output scatter, @a s. Preserves the path information of the target.
     void integrate(const Histo1D& h, Scatter2DPtr s) const;
 
     //@}
 
 
   public:
 
     /// List of registered analysis data objects
     const vector<reference_wrapper<MultiweightAOPtr>>& analysisObjects() const {
       return _analysisobjects;
     }
 
 
   protected:
 
     /// @name Data object registration, retrieval, and removal
     //@{
 
     /// Register a data object in the histogram system
     void addAnalysisObject(MultiweightAOPtr & ao);
 
     /// @todo we need these separately since we *only* want to call this for scatters?
     void addAnalysisObject(const shared_ptr<Scatter1DPtr>& ao);
     void addAnalysisObject(const shared_ptr<Scatter2DPtr>& ao);
     void addAnalysisObject(const shared_ptr<Scatter3DPtr>& ao);
 
     /// Get a data object from the histogram system
     /// @todo Use this default function template arg in C++11
     // template <typename AO=AnalysisObjectPtr>
     template <typename AOPtr>
     const AOPtr& getAnalysisObject(const std::string& name) const {
       for (const auto & ao : analysisObjects()) {
         if (ao.get()->path() == histoPath(name)) return dynamic_cast<const AOPtr&>(ao.get());
       }
       throw Exception("Data object " + histoPath(name) + " not found");
     }
 
     /// Get a data object from the histogram system (non-const)
     /// @todo Use this default function template arg in C++11
     // template <typename AO=AnalysisObjectPtr>
     template <typename AOPtr>
     AOPtr& getAnalysisObject(const std::string& name) {
       for (const auto & ao : analysisObjects()) {
         if (ao.get()->path() == histoPath(name)) return dynamic_cast<AOPtr&>(ao.get());
       }
 
       throw Exception("Data object " + histoPath(name) + " not found");
     }
 
     /// Unregister a data object from the histogram system (by name)
     void removeAnalysisObject(const std::string& path);
 
     /// Unregister a data object from the histogram system (by pointer)
     void removeAnalysisObject(const MultiweightAOPtr& ao);
 
     void removeAnalysisObject(const Scatter1DPtr& ao);
     void removeAnalysisObject(const Scatter2DPtr& ao);
     void removeAnalysisObject(const Scatter3DPtr& ao);
 
 
     /// Get a named Histo1D object from the histogram system
     const Histo1DPtr getHisto1D(const std::string& name) const {
       return getAnalysisObject<Histo1DPtr>(name);
     }
 
     /// Get a named Histo1D object from the histogram system (non-const)
     Histo1DPtr getHisto1D(const std::string& name) {
       return getAnalysisObject<Histo1DPtr>(name);
     }
 
     /// Get a Histo1D object from the histogram system by axis ID codes (non-const)
     const Histo1DPtr getHisto1D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const {
       return getAnalysisObject<Histo1DPtr>(makeAxisCode(datasetId, xAxisId, yAxisId));
     }
 
     /// Get a Histo1D object from the histogram system by axis ID codes (non-const)
     Histo1DPtr getHisto1D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) {
       return getAnalysisObject<Histo1DPtr>(makeAxisCode(datasetId, xAxisId, yAxisId));
     }
 
 
     // /// Get a named Histo2D object from the histogram system
     // const Histo2DPtr getHisto2D(const std::string& name) const {
     //   return getAnalysisObject<Histo2D>(name);
     // }
 
     // /// Get a named Histo2D object from the histogram system (non-const)
     // Histo2DPtr getHisto2D(const std::string& name) {
     //   return getAnalysisObject<Histo2D>(name);
     // }
 
     // /// Get a Histo2D object from the histogram system by axis ID codes (non-const)
     // const Histo2DPtr getHisto2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const {
     //   return getAnalysisObject<Histo2D>(makeAxisCode(datasetId, xAxisId, yAxisId));
     // }
 
     // /// Get a Histo2D object from the histogram system by axis ID codes (non-const)
     // Histo2DPtr getHisto2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) {
     //   return getAnalysisObject<Histo2D>(makeAxisCode(datasetId, xAxisId, yAxisId));
     // }
 
 
     /// Get a named Profile1D object from the histogram system
     const Profile1DPtr getProfile1D(const std::string& name) const {
       return getAnalysisObject<Profile1DPtr>(name);
     }
 
     /// Get a named Profile1D object from the histogram system (non-const)
     Profile1DPtr getProfile1D(const std::string& name) {
       return getAnalysisObject<Profile1DPtr>(name);
     }
 
     /// Get a Profile1D object from the histogram system by axis ID codes (non-const)
     const Profile1DPtr getProfile1D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const {
       return getAnalysisObject<Profile1DPtr>(makeAxisCode(datasetId, xAxisId, yAxisId));
     }
 
     /// Get a Profile1D object from the histogram system by axis ID codes (non-const)
     Profile1DPtr getProfile1D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) {
       return getAnalysisObject<Profile1DPtr>(makeAxisCode(datasetId, xAxisId, yAxisId));
     }
 
 
     // /// Get a named Profile2D object from the histogram system
     // const Profile2DPtr getProfile2D(const std::string& name) const {
     //   return getAnalysisObject<Profile2D>(name);
     // }
 
     // /// Get a named Profile2D object from the histogram system (non-const)
     // Profile2DPtr getProfile2D(const std::string& name) {
     //   return getAnalysisObject<Profile2D>(name);
     // }
 
     // /// Get a Profile2D object from the histogram system by axis ID codes (non-const)
     // const Profile2DPtr getProfile2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const {
     //   return getAnalysisObject<Profile2D>(makeAxisCode(datasetId, xAxisId, yAxisId));
     // }
 
     // /// Get a Profile2D object from the histogram system by axis ID codes (non-const)
     // Profile2DPtr getProfile2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) {
     //   return getAnalysisObject<Profile2D>(makeAxisCode(datasetId, xAxisId, yAxisId));
     // }
 
 
     /// Get a named Scatter2D object from the histogram system
     const Scatter2DPtr getScatter2D(const std::string& name) const {
       return getAnalysisObject<Scatter2DPtr>(name);
     }
 
     /// Get a named Scatter2D object from the histogram system (non-const)
     Scatter2DPtr getScatter2D(const std::string& name) {
       return getAnalysisObject<Scatter2DPtr>(name);
     }
 
     /// Get a Scatter2D object from the histogram system by axis ID codes (non-const)
     const Scatter2DPtr getScatter2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) const {
       return getAnalysisObject<Scatter2DPtr>(makeAxisCode(datasetId, xAxisId, yAxisId));
     }
 
     /// Get a Scatter2D object from the histogram system by axis ID codes (non-const)
     Scatter2DPtr getScatter2D(unsigned int datasetId, unsigned int xAxisId, unsigned int yAxisId) {
       return getAnalysisObject<Scatter2DPtr>(makeAxisCode(datasetId, xAxisId, yAxisId));
     }
 
     //@}
 
 
   private:
 
     /// Name passed to constructor (used to find .info analysis data file, and as a fallback)
     string _defaultname;
 
     /// Pointer to analysis metadata object
     unique_ptr<AnalysisInfo> _info;
 
     /// Storage of all plot objects
     /// @todo Make this a map for fast lookup by path?
     vector<reference_wrapper<MultiweightAOPtr>> _analysisobjects;
     vector<shared_ptr<AnalysisObjectPtr> > _scatters;
 
     /// @name Cross-section variables
     //@{
     double _crossSection;
     bool _gotCrossSection;
     //@}
 
     /// The controlling AnalysisHandler object.
     AnalysisHandler* _analysishandler;
 
     /// Collection of cached refdata to speed up many autobookings: the
     /// reference data file should only be read once.
     mutable std::map<std::string, YODA::AnalysisObjectPtr> _refdata;
 
 
   private:
 
     /// @name Utility functions
     //@{
 
     /// Get the reference data for this paper and cache it.
     void _cacheRefData() const;
 
     //@}
 
 
     /// The assignment operator is private and must never be called.
     /// In fact, it should not even be implemented.
     Analysis& operator=(const Analysis&);
 
   };
 
 
 }
 
 
 // Include definition of analysis plugin system so that analyses automatically see it when including Analysis.hh
 #include "Rivet/AnalysisBuilder.hh"
 
 /// @def DECLARE_RIVET_PLUGIN
 /// Preprocessor define to prettify the global-object plugin hook mechanism.
 #define DECLARE_RIVET_PLUGIN(clsname) Rivet::AnalysisBuilder<clsname> plugin_ ## clsname
 
 /// @def DECLARE_ALIASED_RIVET_PLUGIN
 /// Preprocessor define to prettify the global-object plugin hook mechanism, with an extra alias name for this analysis.
 // #define DECLARE_ALIASED_RIVET_PLUGIN(clsname, alias) Rivet::AnalysisBuilder<clsname> plugin_ ## clsname ## ( ## #alias ## )
 #define DECLARE_ALIASED_RIVET_PLUGIN(clsname, alias) DECLARE_RIVET_PLUGIN(clsname)( #alias )
 
 /// @def DEFAULT_RIVET_ANA_CONSTRUCTOR
 /// Preprocessor define to prettify the manky constructor with name string argument
 #define DEFAULT_RIVET_ANALYSIS_CTOR(clsname) clsname() : Analysis(# clsname) {}
 
 // DEPRECATED ALIAS
 #define DEFAULT_RIVET_ANA_CONSTRUCTOR(clsname) DEFAULT_RIVET_ANALYSIS_CTOR(clsname)
 
 
 #endif
diff --git a/include/Rivet/AnalysisInfo.hh b/include/Rivet/AnalysisInfo.hh
--- a/include/Rivet/AnalysisInfo.hh
+++ b/include/Rivet/AnalysisInfo.hh
@@ -1,247 +1,258 @@
 // -*- C++ -*-
 #ifndef RIVET_AnalysisInfo_HH
 #define RIVET_AnalysisInfo_HH
 
 #include "Rivet/Config/RivetCommon.hh"
 #include <ostream>
 
 namespace Rivet {
 
 
   class AnalysisInfo {
   public:
 
     /// Static factory method: returns null pointer if no metadata found
     static unique_ptr<AnalysisInfo> make(const std::string& name);
 
     /// @name Standard constructors and destructors.
     //@{
 
     /// The default constructor.
     AnalysisInfo() { clear(); }
 
     /// The destructor.
     ~AnalysisInfo() { }
 
     //@}
 
 
   public:
 
     /// @name Metadata
     /// Metadata is used for querying from the command line and also for
     /// building web pages and the analysis pages in the Rivet manual.
     //@{
 
     /// Get the name of the analysis. By default this is computed using the
     /// experiment, year and Inspire/Spires ID metadata methods.
     std::string name() const {
       if (!_name.empty()) return _name;
       if (!experiment().empty() && !year().empty()) {
         if (!inspireId().empty()) {
           return experiment() + "_" + year() + "_I" + inspireId();
         } else if (!spiresId().empty()) {
           return experiment() + "_" + year() + "_S" + spiresId();
         }
       }
       return "";
     }
 
     /// Set the name of the analysis.
     void setName(const std::string& name) { _name = name; }
 
 
     /// Get the Inspire (SPIRES replacement) ID code for this analysis.
     const std::string& inspireId() const { return _inspireId; }
 
     /// Set the Inspire (SPIRES replacement) ID code for this analysis.
     void setInspireId(const std::string& inspireId) { _inspireId = inspireId; }
 
 
     /// Get the SPIRES ID code for this analysis.
     const std::string& spiresId() const { return _spiresId; }
 
     /// Set the SPIRES ID code for this analysis.
     void setSpiresId(const std::string& spiresId) { _spiresId = spiresId; }
 
 
     /// @brief Names & emails of paper/analysis authors.
     /// Names and email of authors in 'NAME \<EMAIL\>' format. The first
     /// name in the list should be the primary contact person.
     const std::vector<std::string>& authors() const { return _authors; }
 
     /// Set the author list.
     void setAuthors(const std::vector<std::string>& authors) { _authors = authors; }
 
 
     /// @brief Get a short description of the analysis.
     /// Short (one sentence) description used as an index entry.
     /// Use @a description() to provide full descriptive paragraphs
     /// of analysis details.
     const std::string& summary() const { return _summary; }
 
     /// Set the short description for this analysis.
     void setSummary(const std::string& summary) { _summary = summary; }
 
 
     /// @brief Get a full description of the analysis.
     /// Full textual description of this analysis, what it is useful for,
     /// what experimental techniques are applied, etc. Should be treated
     /// as a chunk of restructuredText (http://docutils.sourceforge.net/rst.html),
     /// with equations to be rendered as LaTeX with amsmath operators.
     const std::string& description() const { return _description; }
 
     /// Set the full description for this analysis.
     void setDescription(const std::string& description) { _description = description; }
 
 
     /// @brief Information about the events needed as input for this analysis.
     /// Event types, energies, kinematic cuts, particles to be considered
     /// stable, etc. etc. Should be treated as a restructuredText bullet list
     /// (http://docutils.sourceforge.net/rst.html)
     const std::string& runInfo() const { return _runInfo; }
 
     /// Set the full description for this analysis.
     void setRunInfo(const std::string& runInfo) { _runInfo = runInfo; }
 
 
     /// Beam particle types
     const std::vector<PdgIdPair>& beams() const { return _beams; }
 
     /// Set beam particle types
     void setBeams(const std::vector<PdgIdPair>& beams) { _beams = beams; }
 
 
     /// Sets of valid beam energies
     const std::vector<std::pair<double,double> >& energies() const { return _energies; }
 
     /// Set the valid beam energies
     void setEnergies(const std::vector<std::pair<double, double> >& energies) { _energies = energies; }
 
 
     /// Experiment which performed and published this analysis.
     const std::string& experiment() const { return _experiment; }
 
     /// Set the experiment which performed and published this analysis.
     void setExperiment(const std::string& experiment) { _experiment = experiment; }
 
 
     /// Collider on which the experiment ran.
     const std::string& collider() const { return _collider; }
 
     /// Set the collider on which the experiment ran.
     void setCollider(const std::string& collider) { _collider = collider; }
 
 
     /// @brief When the original experimental analysis was published.
     /// When the refereed paper on which this is based was published,
     /// according to SPIRES.
     const std::string& year() const { return _year; }
 
     /// Set the year in which the original experimental analysis was published.
     void setYear(const std::string& year) { _year = year; }
 
+    /// The integrated data luminosity of the data set
+    const std::string& luminosityfb() const { return _luminosityfb; }
+
+    /// Set the integrated data luminosity of the data set
+    void setLuminosityfb(const std::string& luminosityfb) { _luminosityfb = luminosityfb; }
 
     /// Journal and preprint references.
     const std::vector<std::string>& references() const { return _references; }
 
     /// Set the journal and preprint reference list.
     void setReferences(const std::vector<std::string>& references) { _references = references; }
 
+    /// Analysis Keywords for grouping etc
+    const std::vector<std::string>& keywords() const { return _keywords; }
 
     /// BibTeX citation key for this article.
     const std::string& bibKey() const { return _bibKey;}
 
     /// Set the BibTeX citation key for this article.
     void setBibKey(const std::string& bibKey) { _bibKey = bibKey; }
 
 
     /// BibTeX citation entry for this article.
     const std::string& bibTeX() const { return _bibTeX; }
 
     /// Set the BibTeX citation entry for this article.
     void setBibTeX(const std::string& bibTeX) { _bibTeX = bibTeX; }
 
 
     /// Whether this analysis is trusted (in any way!)
     const std::string& status() const { return _status; }
 
     /// Set the analysis code status.
     void setStatus(const std::string& status) { _status = status; }
 
 
     /// Any work to be done on this analysis.
     const std::vector<std::string>& todos() const { return _todos; }
 
     /// Set the to-do list.
     void setTodos(const std::vector<std::string>& todos) { _todos = todos; }
 
 
     /// Return true if this analysis needs to know the process cross-section.
     bool needsCrossSection() const { return _needsCrossSection; }
 
     /// Return true if this analysis needs to know the process cross-section.
     void setNeedsCrossSection(bool needXsec) { _needsCrossSection = needXsec; }
 
     //@}
 
 
   private:
 
     std::string _name;
     std::string _spiresId, _inspireId;
     std::vector<std::string> _authors;
     std::string _summary;
     std::string _description;
     std::string _runInfo;
     std::string _experiment;
     std::string _collider;
     std::vector<std::pair<PdgId, PdgId> > _beams;
     std::vector<std::pair<double, double> > _energies;
     std::string _year;
+    std::string _luminosityfb;
     std::vector<std::string> _references;
+    std::vector<std::string> _keywords;
     std::string _bibKey;
     std::string _bibTeX;
     //std::string _bibTeXBody; ///< Was thinking of avoiding duplication of BibKey...
     std::string _status;
     std::vector<std::string> _todos;
     bool _needsCrossSection;
 
     void clear() {
       _name = "";
       _spiresId = "";
       _inspireId = "";
       _authors.clear();
       _summary = "";
       _description = "";
       _runInfo = "";
       _experiment = "";
       _collider = "";
       _beams.clear();
       _energies.clear();
       _year = "";
+      _luminosityfb = "";
       _references.clear();
+      _keywords.clear();
       _bibKey = "";
       _bibTeX = "";
       //_bibTeXBody = "";
       _status = "";
       _todos.clear();
       _needsCrossSection = false;
     }
 
   };
 
 
   /// String representation
   std::string toString(const AnalysisInfo& ai);
 
   /// Stream an AnalysisInfo as a text description
   inline std::ostream& operator<<(std::ostream& os, const AnalysisInfo& ai) {
     os << toString(ai);
     return os;
   }
 
 
 }
 
 #endif
diff --git a/include/Rivet/Jet.hh b/include/Rivet/Jet.hh
--- a/include/Rivet/Jet.hh
+++ b/include/Rivet/Jet.hh
@@ -1,285 +1,285 @@
 // -*- C++ -*-
 #ifndef RIVET_Jet_HH
 #define RIVET_Jet_HH
 
 #include "Rivet/Config/RivetCommon.hh"
 #include "Rivet/Jet.fhh"
 #include "Rivet/Particle.hh"
 #include "Rivet/Tools/Cuts.hh"
 #include "Rivet/Tools/Utils.hh"
 #include "Rivet/Tools/RivetFastJet.hh"
 #include "Rivet/Math/LorentzTrans.hh"
 #include <numeric>
 
 namespace Rivet {
 
 
   /// @brief Representation of a clustered jet of particles.
   class Jet : public ParticleBase {
   public:
 
     /// @name Constructors
     //@{
 
     /// Constructor from a FastJet PseudoJet, with optional full particle constituents information.
     Jet(const fastjet::PseudoJet& pj, const Particles& particles=Particles(), const Particles& tags=Particles()) {
       setState(pj, particles, tags);
     }
 
     /// Set the jet data, with optional full particle information.
     Jet(const FourMomentum& pjet, const Particles& particles=Particles(), const Particles& tags=Particles()) {
       setState(pjet, particles, tags);
     }
 
     /// Set all the jet data, with full particle information.
     /// @deprecated Prefer the form where the 4-vec comes first and the particles list is optional.
     DEPRECATED("Prefer the form where the 4-vec comes first and the particles list is optional.")
     Jet(const Particles& particles, const FourMomentum& pjet) {
       setState(pjet, particles);
     }
 
     /// Default constructor -- only for STL storability
     Jet() { clear(); }
 
     //@}
 
 
     /// @name Access jet constituents
     //@{
 
     /// Number of particles in this jet.
     size_t size() const { return _particles.size(); }
 
     /// Get the particles in this jet.
     Particles& particles() { return _particles; }
     /// Get the particles in this jet (const version)
     const Particles& particles() const { return _particles; }
     /// Get the particles in this jet which pass a cut (const)
     const Particles particles(const Cut& c) const { return filterBy(_particles, c); }
 
     /// Get the particles in this jet (FastJet-like alias)
     Particles& constituents() { return particles(); }
     /// Get the particles in this jet (FastJet-like alias, const version)
     const Particles& constituents() const { return particles(); }
     /// Get the particles in this jet which pass a cut (FastJet-like alias, const)
     const Particles constituents(const Cut& c) const { return particles(c); }
 
     /// Check whether this jet contains a particular particle.
     bool containsParticle(const Particle& particle) const;
     /// Nicer alias for containsParticleId
     bool containsPID(const Particle& particle) const { return containsParticle(particle); }
 
     /// Check whether this jet contains a certain particle type.
     bool containsParticleId(PdgId pid) const;
     /// Nicer alias for containsParticleId
     bool containsPID(PdgId pid) const { return containsParticleId(pid); }
 
     /// Check whether this jet contains at least one of certain particle types.
     bool containsParticleId(const vector<PdgId>& pids) const;
     /// Nicer alias for containsParticleId
     bool containsPID(const vector<PdgId>& pids) const { return containsParticleId(pids); }
 
     //@}
 
 
     /// @name Tagging
     ///
     /// @note General sources of tag particles are planned. The default jet finding
     /// adds b-hadron, c-hadron, and tau tags by ghost association.
     //@{
 
     /// @brief Particles which have been tag-matched to this jet
     Particles& tags() { return _tags; }
     /// @brief Particles which have been tag-matched to this jet (const version)
     const Particles& tags() const { return _tags; }
     /// @brief Particles which have been tag-matched to this jet _and_ pass a selector function
     ///
     /// @note Note the less efficient return by value, due to the filtering.
     template <typename FN>
     Particles tags(const FN& f) const { return filter_select(tags(), f); }
     /// @brief Particles which have been tag-matched to this jet _and_ pass a Cut
     ///
     /// @note Note the less efficient return by value, due to the cut-pass filtering.
     Particles tags(const Cut& c) const;
 
 
     /// @brief b particles which have been tag-matched to this jet (and pass an optional Cut)
     ///
     /// The default jet finding adds b-hadron tags by ghost association.
     Particles bTags(const Cut& c=Cuts::open()) const;
     /// @brief b particles which have been tag-matched to this jet _and_ pass a selector function
     template <typename FN>
     Particles bTags(const FN& f) const { return filter_select(bTags(), f); }
 
     /// Does this jet have at least one b-tag (that passes an optional Cut)?
     bool bTagged(const Cut& c=Cuts::open()) const { return !bTags(c).empty(); }
     /// Does this jet have at least one b-tag (that passes the supplied selector function)?
     template <typename FN>
-    Particles bTagged(const FN& f) const { return !bTags(f).empty(); }
+    bool bTagged(const FN& f) const { return !bTags(f).empty(); }
 
 
     /// @brief c (and not b) particles which have been tag-matched to this jet (and pass an optional Cut)
     ///
     /// The default jet finding adds c-hadron tags by ghost association.
     Particles cTags(const Cut& c=Cuts::open()) const;
     /// @brief c (and not b) particles which have been tag-matched to this jet and pass a selector function
     template <typename FN>
     Particles cTags(const FN& f) const { return filter_select(cTags(), f); }
 
     /// Does this jet have at least one c-tag (that passes an optional Cut)?
     bool cTagged(const Cut& c=Cuts::open()) const { return !cTags(c).empty(); }
     /// Does this jet have at least one c-tag (that passes the supplied selector function)?
     template <typename FN>
     bool cTagged(const FN& f) const { return !cTags(f).empty(); }
 
 
     /// @brief Tau particles which have been tag-matched to this jet (and pass an optional Cut)
     ///
     /// The default jet finding adds tau tags by ghost association.
     Particles tauTags(const Cut& c=Cuts::open()) const;
     /// @brief Tau particles which have been tag-matched to this jet and pass a selector function
     template <typename FN>
     Particles tauTags(const FN& f) const { return filter_select(tauTags(), f); }
 
     /// Does this jet have at least one tau-tag (that passes an optional Cut)?
     bool tauTagged(const Cut& c=Cuts::open()) const { return !tauTags(c).empty(); }
     /// Does this jet have at least one tau-tag (that passes the supplied selector function)?
     template <typename FN>
     bool tauTagged(const FN& f) const { return !tauTags(f).empty(); }
 
 
     /// @brief Check whether this jet contains a bottom-flavoured hadron.
     ///
     /// @deprecated The bTags() or bTagged() function is probably what you want
     /// for tagging. This one ignores the tags() list and draws conclusions
     /// based directly on the jet constituents; the other gives a much better match
     /// to typical experimental methods.
     ///
     /// @note The decision is made by first trying to find a bottom-flavoured particle
     /// in the particles list. Most likely this will fail unless bottom hadrons
     /// are set stable. If @a include_decay_products is true (the default), a
     /// fallback is attempted, using the post-hadronization ancestor history of
     /// all constituents.
     //DEPRECATED("Prefer the bTags() or bTagged() function")
     bool containsBottom(bool include_decay_products=true) const;
 
     /// @brief Check whether this jet contains a charm-flavoured hadron.
     ///
     /// @deprecated The cTags() or cTagged() function is probably what you want
     /// for tagging. This one ignores the tags() list and draws conclusions
     /// based directly on the jet constituents; the other gives a much better match
     /// to typical experimental methods.
     ///
     /// @note The decision is made by first trying to find a charm-flavoured particle
     /// in the particles list. Most likely this will fail unless charmed hadrons
     /// are set stable. If @a include_decay_products is true (the default), a
     /// fallback is attempted, using the post-hadronization ancestor history of
     /// all constituents.
     //DEPRECATED("Prefer the cTags() or cTagged() function")
     bool containsCharm(bool include_decay_products=true) const;
 
     //@}
 
 
     /// @name Effective jet 4-vector properties
     //@{
 
     /// Get equivalent single momentum four-vector.
     const FourMomentum& momentum() const { return _momentum; }
 
     /// Apply an active Lorentz transform to this jet
     /// @note The Rivet jet momentum, constituent particles, and tag particles will be modified.
     /// @warning The FastJet cluster sequence and pseudojets will not be modified: don't use them after transformation!
     Jet& transformBy(const LorentzTransform& lt);
 
     /// Get the total energy of this jet.
     double totalEnergy() const { return momentum().E(); }
 
     /// Get the energy carried in this jet by neutral particles.
     double neutralEnergy() const;
 
     /// Get the energy carried in this jet by hadrons.
     double hadronicEnergy() const;
 
     //@}
 
 
     /// @name Interaction with FastJet
     //@{
 
     /// Access the internal FastJet3 PseudoJet (as a const reference)
     const fastjet::PseudoJet& pseudojet() const { return _pseudojet; }
 
     /// Cast operator to FastJet3 PseudoJet (as a const reference)
     operator const fastjet::PseudoJet& () const { return pseudojet(); }
 
     //@}
 
 
     /// @name Set the jet constituents and properties
     //@{
 
     /// @brief Set the jet data from a FastJet PseudoJet, with optional particle constituents and tags lists.
     ///
     /// @note The particles() list will be extracted from PseudoJet constituents
     /// by default, making use of an attached user info if one is found.
     Jet& setState(const fastjet::PseudoJet& pj, const Particles& particles=Particles(), const Particles& tags=Particles());
 
     /// Set all the jet data, with optional full particle constituent and tag information.
     Jet& setState(const FourMomentum& mom, const Particles& particles, const Particles& tags=Particles());
 
     /// @deprecated Prefer the 4-mom first-arg versions. Remove in Rivet v3
     DEPRECATED("Prefer the 4-mom first-arg versions")
     Jet& setState(const Particles& particles, const FourMomentum& mom) { return setState(mom, particles); }
 
     /// @brief Set the particles collection with full particle information.
     ///
     /// If set, this overrides particle info extracted from the PseudoJet
     Jet& setParticles(const Particles& particles);
     Jet& setConstituents(const Particles& particles) { return setParticles(particles); }
 
     /// Reset this jet as empty.
     Jet& clear();
 
     //@}
 
 
   private:
 
     /// FJ3 PseudoJet member to unify PseudoJet and Jet
     fastjet::PseudoJet _pseudojet;
 
     /// Full constituent particle information. (Filled from PseudoJet if possible.)
     /// @todo Make these mutable or similar? Add a flag to force a cache rebuild?
     Particles _particles;
 
     /// Particles used to tag this jet (can be anything, but c and b hadrons are the most common)
     Particles _tags;
 
     /// Effective jet 4-vector (just for caching)
     mutable FourMomentum _momentum;
 
   };
 
 
   /// @name String representation and streaming support
   //@{
 
   /// Represent a Jet as a string.
   std::string to_str(const Jet& j);
 
   /// Allow a Jet to be passed to an ostream.
   inline std::ostream& operator<<(std::ostream& os, const Jet& j) {
     os << to_str(j);
     return os;
   }
 
   //@}
 
 
 }
 
 
 #include "Rivet/Tools/JetUtils.hh"
 
 #endif
diff --git a/include/Rivet/Math/MathUtils.hh b/include/Rivet/Math/MathUtils.hh
--- a/include/Rivet/Math/MathUtils.hh
+++ b/include/Rivet/Math/MathUtils.hh
@@ -1,593 +1,597 @@
 // -*- C++ -*-
 #ifndef RIVET_MathUtils_HH
 #define RIVET_MathUtils_HH
 
 #include "Rivet/Math/MathHeader.hh"
 #include <type_traits>
 #include <cassert>
 
 namespace Rivet {
 
 
   /// @name Comparison functions for safe (floating point) equality tests
   //@{
 
   /// @brief Compare a number to zero
   ///
   /// This version for floating point types has a degree of fuzziness expressed
   /// by the absolute @a tolerance parameter, for floating point safety.
   template <typename NUM>
   inline typename std::enable_if<std::is_floating_point<NUM>::value, bool>::type
   isZero(NUM val, double tolerance=1e-8) {
     return fabs(val) < tolerance;
   }
 
   /// @brief Compare a number to zero
   ///
   /// SFINAE template specialisation for integers, since there is no FP
   /// precision issue.
   template <typename NUM>
   inline typename std::enable_if<std::is_integral<NUM>::value, bool>::type
     isZero(NUM val, double UNUSED(tolerance)=1e-8) {
     return val == 0;
   }
 
 
   /// @brief Compare two numbers for equality with a degree of fuzziness
   ///
   /// This version for floating point types (if any argument is FP) has a degree
   /// of fuzziness expressed by the fractional @a tolerance parameter, for
   /// floating point safety.
   template <typename N1, typename N2>
   inline typename std::enable_if<
     std::is_arithmetic<N1>::value && std::is_arithmetic<N2>::value &&
    (std::is_floating_point<N1>::value || std::is_floating_point<N2>::value), bool>::type
   fuzzyEquals(N1 a, N2 b, double tolerance=1e-5) {
     const double absavg = (std::abs(a) + std::abs(b))/2.0;
     const double absdiff = std::abs(a - b);
     const bool rtn = (isZero(a) && isZero(b)) || absdiff < tolerance*absavg;
     return rtn;
   }
 
   /// @brief Compare two numbers for equality with a degree of fuzziness
   ///
   /// Simpler SFINAE template specialisation for integers, since there is no FP
   /// precision issue.
   template <typename N1, typename N2>
   inline typename std::enable_if<
     std::is_integral<N1>::value && std::is_integral<N2>::value, bool>::type
   fuzzyEquals(N1 a, N2 b, double UNUSED(tolerance)=1e-5) {
     return a == b;
   }
 
 
   /// @brief Compare two numbers for >= with a degree of fuzziness
   ///
   /// The @a tolerance parameter on the equality test is as for @c fuzzyEquals.
   template <typename N1, typename N2>
   inline typename std::enable_if<
     std::is_arithmetic<N1>::value && std::is_arithmetic<N2>::value, bool>::type
   fuzzyGtrEquals(N1 a, N2 b, double tolerance=1e-5) {
     return a > b || fuzzyEquals(a, b, tolerance);
   }
 
 
   /// @brief Compare two floating point numbers for <= with a degree of fuzziness
   ///
   /// The @a tolerance parameter on the equality test is as for @c fuzzyEquals.
   template <typename N1, typename N2>
   inline typename std::enable_if<
     std::is_arithmetic<N1>::value && std::is_arithmetic<N2>::value, bool>::type
   fuzzyLessEquals(N1 a, N2 b, double tolerance=1e-5) {
     return a < b || fuzzyEquals(a, b, tolerance);
   }
 
   //@}
 
 
   /// @name Ranges and intervals
   //@{
 
   /// Represents whether an interval is open (non-inclusive) or closed (inclusive).
   ///
   /// For example, the interval \f$ [0, \pi) \f$ is closed (an inclusive
   /// boundary) at 0, and open (a non-inclusive boundary) at \f$ \pi \f$.
   enum RangeBoundary { OPEN=0, SOFT=0, CLOSED=1, HARD=1 };
 
   /// @brief Determine if @a value is in the range @a low to @a high, for floating point numbers
   ///
   /// Interval boundary types are defined by @a lowbound and @a highbound.
   template <typename N1, typename N2, typename N3>
   inline typename std::enable_if<
     std::is_arithmetic<N1>::value && std::is_arithmetic<N2>::value && std::is_arithmetic<N3>::value, bool>::type
   inRange(N1 value, N2 low, N3 high,
           RangeBoundary lowbound=CLOSED, RangeBoundary highbound=OPEN) {
     if (lowbound == OPEN && highbound == OPEN) {
       return (value > low && value < high);
     } else if (lowbound == OPEN && highbound == CLOSED) {
       return (value > low && value <= high);
     } else if (lowbound == CLOSED && highbound == OPEN) {
       return (value >= low && value < high);
     } else { // if (lowbound == CLOSED && highbound == CLOSED) {
       return (value >= low && value <= high);
     }
   }
 
   /// @brief Determine if @a value is in the range @a low to @a high, for floating point numbers
   ///
   /// Interval boundary types are defined by @a lowbound and @a highbound.
   /// Closed intervals are compared fuzzily.
   template <typename N1, typename N2, typename N3>
   inline typename std::enable_if<
     std::is_arithmetic<N1>::value && std::is_arithmetic<N2>::value && std::is_arithmetic<N3>::value, bool>::type
   fuzzyInRange(N1 value, N2 low, N3 high,
                RangeBoundary lowbound=CLOSED, RangeBoundary highbound=OPEN) {
     if (lowbound == OPEN && highbound == OPEN) {
       return (value > low && value < high);
     } else if (lowbound == OPEN && highbound == CLOSED) {
       return (value > low && fuzzyLessEquals(value, high));
     } else if (lowbound == CLOSED && highbound == OPEN) {
       return (fuzzyGtrEquals(value, low) && value < high);
     } else { // if (lowbound == CLOSED && highbound == CLOSED) {
       return (fuzzyGtrEquals(value, low) && fuzzyLessEquals(value, high));
     }
   }
 
   /// Alternative version of inRange which accepts a pair for the range arguments.
   template <typename N1, typename N2, typename N3>
   inline typename std::enable_if<
     std::is_arithmetic<N1>::value && std::is_arithmetic<N2>::value && std::is_arithmetic<N3>::value, bool>::type
   inRange(N1 value, pair<N2, N3> lowhigh,
           RangeBoundary lowbound=CLOSED, RangeBoundary highbound=OPEN) {
     return inRange(value, lowhigh.first, lowhigh.second, lowbound, highbound);
   }
 
 
   // Alternative forms, with snake_case names and boundary types in names rather than as args -- from MCUtils
 
   /// @brief Boolean function to determine if @a value is within the given range
   ///
   /// @note The interval is closed (inclusive) at the low end, and open (exclusive) at the high end.
   template <typename N1, typename N2, typename N3>
   inline typename std::enable_if<
     std::is_arithmetic<N1>::value && std::is_arithmetic<N2>::value && std::is_arithmetic<N3>::value, bool>::type
   in_range(N1 val, N2 low, N3 high) {
     return inRange(val, low, high, CLOSED, OPEN);
   }
 
   /// @brief Boolean function to determine if @a value is within the given range
   ///
   /// @note The interval is closed at both ends.
   template <typename N1, typename N2, typename N3>
   inline typename std::enable_if<
     std::is_arithmetic<N1>::value && std::is_arithmetic<N2>::value && std::is_arithmetic<N3>::value, bool>::type
   in_closed_range(N1 val, N2 low, N3 high) {
     return inRange(val, low, high, CLOSED, CLOSED);
   }
 
   /// @brief Boolean function to determine if @a value is within the given range
   ///
   /// @note The interval is open at both ends.
   template <typename N1, typename N2, typename N3>
   inline typename std::enable_if<
     std::is_arithmetic<N1>::value && std::is_arithmetic<N2>::value && std::is_arithmetic<N3>::value, bool>::type
   in_open_range(N1 val, N2 low, N3 high) {
     return inRange(val, low, high, OPEN, OPEN);
   }
 
   /// @todo Add pair-based versions of the named range-boundary functions
 
   //@}
 
 
   /// @name Miscellaneous numerical helpers
   //@{
 
   /// Named number-type squaring operation.
   template <typename NUM>
   inline typename std::enable_if<std::is_arithmetic<NUM>::value, NUM>::type
   sqr(NUM a) {
     return a*a;
   }
 
   /// @brief Named number-type addition in quadrature operation.
   ///
   /// @note Result has the sqrt operation applied.
   /// @todo When std::common_type can be used, generalise to multiple numeric types with appropriate return type.
   // template <typename N1, typename N2>
   template <typename NUM>
   inline typename std::enable_if<std::is_arithmetic<NUM>::value, NUM>::type
   //std::common_type<N1, N2>::type
   add_quad(NUM a, NUM b) {
     return sqrt(a*a + b*b);
   }
 
   /// Named number-type addition in quadrature operation.
   ///
   /// @note Result has the sqrt operation applied.
   /// @todo When std::common_type can be used, generalise to multiple numeric types with appropriate return type.
   // template <typename N1, typename N2>
   template <typename NUM>
   inline typename std::enable_if<std::is_arithmetic<NUM>::value, NUM>::type
   //std::common_type<N1, N2, N3>::type
   add_quad(NUM a, NUM b, NUM c) {
     return sqrt(a*a + b*b + c*c);
   }
 
   /// Return a/b, or @a fail if b = 0
   /// @todo When std::common_type can be used, generalise to multiple numeric types with appropriate return type.
   inline double safediv(double num, double den, double fail=0.0) {
     return (!isZero(den)) ? num/den : fail;
   }
 
   /// A more efficient version of pow for raising numbers to integer powers.
   template <typename NUM>
   inline typename std::enable_if<std::is_arithmetic<NUM>::value, NUM>::type
   intpow(NUM val, unsigned int exp) {
     assert(exp >= 0);
     if (exp == 0) return (NUM) 1;
     else if (exp == 1) return val;
     return val * intpow(val, exp-1);
   }
 
   /// Find the sign of a number
   template <typename NUM>
   inline typename std::enable_if<std::is_arithmetic<NUM>::value, int>::type
   sign(NUM val) {
     if (isZero(val)) return ZERO;
     const int valsign = (val > 0) ? PLUS : MINUS;
     return valsign;
   }
 
   //@}
 
 
   /// @name Physics statistical distributions
   //@{
 
   /// @brief CDF for the Breit-Wigner distribution
   inline double cdfBW(double x, double mu, double gamma) {
     // normalize to (0;1) distribution
     const double xn = (x - mu)/gamma;
     return std::atan(xn)/M_PI + 0.5;
   }
 
   /// @brief Inverse CDF for the Breit-Wigner distribution
   inline double invcdfBW(double p, double mu, double gamma) {
     const double xn = std::tan(M_PI*(p-0.5));
     return gamma*xn + mu;
   }
 
   //@}
 
 
   /// @name Binning helper functions
   //@{
 
   /// @brief Make a list of @a nbins + 1 values equally spaced between @a start and @a end inclusive.
   ///
   /// NB. The arg ordering and the meaning of the nbins variable is "histogram-like",
   /// as opposed to the Numpy/Matlab version.
   inline vector<double> linspace(size_t nbins, double start, double end, bool include_end=true) {
     assert(end >= start);
     assert(nbins > 0);
     vector<double> rtn;
     const double interval = (end-start)/static_cast<double>(nbins);
     for (size_t i = 0; i < nbins; ++i) {
       rtn.push_back(start + i*interval);
     }
     assert(rtn.size() == nbins);
     if (include_end) rtn.push_back(end); //< exact end, not result of n * interval
     return rtn;
   }
 
 
   /// @brief Make a list of @a nbins + 1 values exponentially spaced between @a start and @a end inclusive.
   ///
   /// NB. The arg ordering and the meaning of the nbins variable is "histogram-like",
   /// as opposed to the Numpy/Matlab version, and the start and end arguments are expressed
   /// in "normal" space, rather than as the logarithms of the start/end values as in Numpy/Matlab.
   inline vector<double> logspace(size_t nbins, double start, double end, bool include_end=true) {
     assert(end >= start);
     assert(start > 0);
     assert(nbins > 0);
     const double logstart = std::log(start);
     const double logend = std::log(end);
     const vector<double> logvals = linspace(nbins, logstart, logend, false);
     assert(logvals.size() == nbins);
     vector<double> rtn; rtn.reserve(nbins+1);
     rtn.push_back(start); //< exact start, not exp(log(start))
     for (size_t i = 1; i < logvals.size(); ++i) {
       rtn.push_back(std::exp(logvals[i]));
     }
     assert(rtn.size() == nbins);
     if (include_end) rtn.push_back(end); //< exact end, not exp(n * loginterval)
     return rtn;
   }
 
 
   /// @brief Make a list of @a nbins + 1 values spaced for equal area
   /// Breit-Wigner binning between @a start and @a end inclusive. @a
   /// mu and @a gamma are the Breit-Wigner parameters.
   ///
   /// @note The arg ordering and the meaning of the nbins variable is "histogram-like",
   /// as opposed to the Numpy/Matlab version, and the start and end arguments are expressed
   /// in "normal" space.
   inline vector<double> bwspace(size_t nbins, double start, double end, double mu, double gamma) {
     assert(end >= start);
     assert(nbins > 0);
     const double pmin = cdfBW(start, mu, gamma);
     const double pmax = cdfBW(end,   mu, gamma);
     const vector<double> edges = linspace(nbins, pmin, pmax);
     assert(edges.size() == nbins+1);
     vector<double> rtn;
     for (double edge : edges) {
       rtn.push_back(invcdfBW(edge, mu, gamma));
     }
     assert(rtn.size() == nbins+1);
     return rtn;
   }
 
 
   /// @brief Return the bin index of the given value, @a val, given a vector of bin edges
   ///
+  /// An underflow always returns -1. If allow_overflow is false (default) an overflow
+  /// also returns -1, otherwise it returns the Nedge-1, the index of an inclusive bin
+  /// starting at the last edge.
+  ///
   /// @note The @a binedges vector must be sorted
   /// @todo Use std::common_type<NUM1, NUM2>::type x = val; ?
   template <typename NUM1, typename NUM2>
-  inline typename std::enable_if<std::is_arithmetic<NUM1>::value && std::is_floating_point<NUM2>::value, int>::type
-    binIndex(NUM1 val, const vector<NUM2>& binedges, bool allow_overflow=false) {
+  inline typename std::enable_if<std::is_arithmetic<NUM1>::value && std::is_arithmetic<NUM2>::value, int>::type
+  binIndex(NUM1 val, const vector<NUM2>& binedges, bool allow_overflow=false) {
     if (val < binedges.front()) return -1; ///< Below/out of histo range
     if (val >= binedges.back()) return allow_overflow ? int(binedges.size())-1 : -1; ///< Above/out of histo range
     return std::distance(binedges.begin(), --std::upper_bound(binedges.begin(), binedges.end(), val));
     //
     // int index = -1;
     // for (size_t i = 1; i < binedges.size(); ++i) {
     //   if (val < binedges[i]) {
     //     index = i-1;
     //     break;
     //   }
     // }
     // assert(inRange(index, -1, int(binedges.size())-1));
     // return index;
   }
 
   //@}
 
 
   /// @name Discrete statistics functions
   //@{
 
   /// Calculate the median of a sample
   /// @todo Support multiple container types via SFINAE
   template <typename NUM>
   inline typename std::enable_if<std::is_arithmetic<NUM>::value, NUM>::type
   median(const vector<NUM>& sample) {
     if (sample.empty()) throw RangeError("Can't compute median of an empty set");
     vector<NUM> tmp = sample;
     std::sort(tmp.begin(), tmp.end());
     const size_t imid = tmp.size()/2; // len1->idx0, len2->idx1, len3->idx1, len4->idx2, ...
     if (sample.size() % 2 == 0) return (tmp.at(imid-1) + tmp.at(imid)) / 2.0;
     else return tmp.at(imid);
   }
 
 
   /// Calculate the mean of a sample
   /// @todo Support multiple container types via SFINAE
   template <typename NUM>
   inline typename std::enable_if<std::is_arithmetic<NUM>::value, double>::type
   mean(const vector<NUM>& sample) {
     if (sample.empty()) throw RangeError("Can't compute mean of an empty set");
     double mean = 0.0;
     for (size_t i = 0; i < sample.size(); ++i) {
       mean += sample[i];
     }
     return mean/sample.size();
   }
 
   // Calculate the error on the mean, assuming Poissonian errors
   /// @todo Support multiple container types via SFINAE
   template <typename NUM>
   inline typename std::enable_if<std::is_arithmetic<NUM>::value, double>::type
   mean_err(const vector<NUM>& sample) {
     if (sample.empty()) throw RangeError("Can't compute mean_err of an empty set");
     double mean_e = 0.0;
     for (size_t i = 0; i < sample.size(); ++i) {
       mean_e += sqrt(sample[i]);
     }
     return mean_e/sample.size();
   }
 
 
   /// Calculate the covariance (variance) between two samples
   /// @todo Support multiple container types via SFINAE
   template <typename NUM>
   inline typename std::enable_if<std::is_arithmetic<NUM>::value, double>::type
   covariance(const vector<NUM>& sample1, const vector<NUM>& sample2) {
     if (sample1.empty() || sample2.empty()) throw RangeError("Can't compute covariance of an empty set");
     if (sample1.size() != sample2.size()) throw RangeError("Sizes of samples must be equal for covariance calculation");
     const double mean1 = mean(sample1);
     const double mean2 = mean(sample2);
     const size_t N = sample1.size();
     double cov = 0.0;
     for (size_t i = 0; i < N; i++) {
       const double cov_i = (sample1[i] - mean1)*(sample2[i] - mean2);
       cov += cov_i;
     }
     if (N > 1) return cov/(N-1);
     else return 0.0;
   }
 
   /// Calculate the error on the covariance (variance) of two samples, assuming poissonian errors
   /// @todo Support multiple container types via SFINAE
   template <typename NUM>
   inline typename std::enable_if<std::is_arithmetic<NUM>::value, double>::type
   covariance_err(const vector<NUM>& sample1, const vector<NUM>& sample2) {
     if (sample1.empty() || sample2.empty()) throw RangeError("Can't compute covariance_err of an empty set");
     if (sample1.size() != sample2.size()) throw RangeError("Sizes of samples must be equal for covariance_err calculation");
     const double mean1 = mean(sample1);
     const double mean2 = mean(sample2);
     const double mean1_e = mean_err(sample1);
     const double mean2_e = mean_err(sample2);
     const size_t N = sample1.size();
     double cov_e = 0.0;
     for (size_t i = 0; i < N; i++) {
       const double cov_i = (sqrt(sample1[i]) - mean1_e)*(sample2[i] - mean2) +
         (sample1[i] - mean1)*(sqrt(sample2[i]) - mean2_e);
       cov_e += cov_i;
     }
     if (N > 1) return cov_e/(N-1);
     else return 0.0;
   }
 
 
   /// Calculate the correlation strength between two samples
   /// @todo Support multiple container types via SFINAE
   template <typename NUM>
   inline typename std::enable_if<std::is_arithmetic<NUM>::value, double>::type
   correlation(const vector<NUM>& sample1, const vector<NUM>& sample2) {
     const double cov = covariance(sample1, sample2);
     const double var1 = covariance(sample1, sample1);
     const double var2 = covariance(sample2, sample2);
     const double correlation = cov/sqrt(var1*var2);
     const double corr_strength = correlation*sqrt(var2/var1);
     return corr_strength;
   }
 
   /// Calculate the error of the correlation strength between two samples assuming Poissonian errors
   /// @todo Support multiple container types via SFINAE
   template <typename NUM>
   inline typename std::enable_if<std::is_arithmetic<NUM>::value, double>::type
   correlation_err(const vector<NUM>& sample1, const vector<NUM>& sample2) {
     const double cov = covariance(sample1, sample2);
     const double var1 = covariance(sample1, sample1);
     const double var2 = covariance(sample2, sample2);
     const double cov_e = covariance_err(sample1, sample2);
     const double var1_e = covariance_err(sample1, sample1);
     const double var2_e = covariance_err(sample2, sample2);
 
     // Calculate the correlation
     const double correlation = cov/sqrt(var1*var2);
     // Calculate the error on the correlation
     const double correlation_err = cov_e/sqrt(var1*var2) -
       cov/(2*pow(3./2., var1*var2)) * (var1_e * var2 + var1 * var2_e);
 
     // Calculate the error on the correlation strength
     const double corr_strength_err = correlation_err*sqrt(var2/var1) +
       correlation/(2*sqrt(var2/var1)) * (var2_e/var1 - var2*var1_e/pow(2, var2));
 
     return corr_strength_err;
   }
 
   //@}
 
 
   /// @name Angle range mappings
   //@{
 
   /// @brief Reduce any number to the range [-2PI, 2PI]
   ///
   /// Achieved by repeated addition or subtraction of 2PI as required. Used to
   /// normalise angular measures.
   inline double _mapAngleM2PITo2Pi(double angle) {
     double rtn = fmod(angle, TWOPI);
     if (isZero(rtn)) return 0;
     assert(rtn >= -TWOPI && rtn <= TWOPI);
     return rtn;
   }
 
   /// Map an angle into the range (-PI, PI].
   inline double mapAngleMPiToPi(double angle) {
     double rtn = _mapAngleM2PITo2Pi(angle);
     if (isZero(rtn)) return 0;
     if (rtn > PI) rtn -= TWOPI;
     if (rtn <= -PI) rtn += TWOPI;
     assert(rtn > -PI && rtn <= PI);
     return rtn;
   }
 
   /// Map an angle into the range [0, 2PI).
   inline double mapAngle0To2Pi(double angle) {
     double rtn = _mapAngleM2PITo2Pi(angle);
     if (isZero(rtn)) return 0;
     if (rtn < 0) rtn += TWOPI;
     if (rtn == TWOPI) rtn = 0;
     assert(rtn >= 0 && rtn < TWOPI);
     return rtn;
   }
 
   /// Map an angle into the range [0, PI].
   inline double mapAngle0ToPi(double angle) {
     double rtn = fabs(mapAngleMPiToPi(angle));
     if (isZero(rtn)) return 0;
     assert(rtn > 0 && rtn <= PI);
     return rtn;
   }
 
   /// Map an angle into the enum-specified range.
   inline double mapAngle(double angle, PhiMapping mapping) {
     switch (mapping) {
     case MINUSPI_PLUSPI:
       return mapAngleMPiToPi(angle);
     case ZERO_2PI:
       return mapAngle0To2Pi(angle);
     case ZERO_PI:
       return mapAngle0To2Pi(angle);
     default:
       throw Rivet::UserError("The specified phi mapping scheme is not implemented");
     }
   }
 
   //@}
 
 
   /// @name Phase space measure helpers
   //@{
 
   /// @brief Calculate the difference between two angles in radians
   ///
   /// Returns in the range [0, PI].
   inline double deltaPhi(double phi1, double phi2) {
     return mapAngle0ToPi(phi1 - phi2);
   }
 
   /// Calculate the abs difference between two pseudorapidities
   ///
   /// @note Just a cosmetic name for analysis code clarity.
   inline double deltaEta(double eta1, double eta2) {
     return fabs(eta1 - eta2);
   }
 
   /// Calculate the abs difference between two rapidities
   ///
   /// @note Just a cosmetic name for analysis code clarity.
   inline double deltaRap(double y1, double y2) {
     return fabs(y1 - y2);
   }
 
   /// Calculate the distance between two points in 2D rapidity-azimuthal
   /// ("\f$ \eta-\phi \f$") space. The phi values are given in radians.
   inline double deltaR(double rap1, double phi1, double rap2, double phi2) {
     const double dphi = deltaPhi(phi1, phi2);
     return sqrt( sqr(rap1-rap2) + sqr(dphi) );
   }
 
   /// Calculate a rapidity value from the supplied energy @a E and longitudinal momentum @a pz.
   inline double rapidity(double E, double pz) {
     if (isZero(E - pz)) {
       throw std::runtime_error("Divergent positive rapidity");
       return MAXDOUBLE;
     }
     if (isZero(E + pz)) {
       throw std::runtime_error("Divergent negative rapidity");
       return -MAXDOUBLE;
     }
     return 0.5*log((E+pz)/(E-pz));
   }
 
   //@}
 
 
 }
 
 
 #endif
diff --git a/include/Rivet/Math/Vector3.hh b/include/Rivet/Math/Vector3.hh
--- a/include/Rivet/Math/Vector3.hh
+++ b/include/Rivet/Math/Vector3.hh
@@ -1,330 +1,358 @@
 #ifndef RIVET_MATH_VECTOR3
 #define RIVET_MATH_VECTOR3
 
 #include "Rivet/Math/MathHeader.hh"
 #include "Rivet/Math/MathUtils.hh"
 #include "Rivet/Math/VectorN.hh"
 
 namespace Rivet {
 
 
   class Vector3;
   typedef Vector3 ThreeVector;
   class Matrix3;
 
   Vector3 multiply(const double, const Vector3&);
   Vector3 multiply(const Vector3&, const double);
   Vector3 add(const Vector3&, const Vector3&);
   Vector3 operator*(const double, const Vector3&);
   Vector3 operator*(const Vector3&, const double);
   Vector3 operator/(const Vector3&, const double);
   Vector3 operator+(const Vector3&, const Vector3&);
   Vector3 operator-(const Vector3&, const Vector3&);
 
 
   /// @brief Three-dimensional specialisation of Vector.
   class Vector3 : public Vector<3> {
 
     friend class Matrix3;
     friend Vector3 multiply(const double, const Vector3&);
     friend Vector3 multiply(const Vector3&, const double);
     friend Vector3 add(const Vector3&, const Vector3&);
     friend Vector3 subtract(const Vector3&, const Vector3&);
 
   public:
     Vector3() : Vector<3>() { }
 
     template<typename V3>
     Vector3(const V3& other) {
       this->setX(other.x());
       this->setY(other.y());
       this->setZ(other.z());
     }
 
     Vector3(const Vector<3>& other) {
       this->setX(other.get(0));
       this->setY(other.get(1));
       this->setZ(other.get(2));
     }
 
     Vector3(double x, double y, double z) {
       this->setX(x);
       this->setY(y);
       this->setZ(z);
     }
 
     ~Vector3() { }
 
+
   public:
+
     static Vector3 mkX() { return Vector3(1,0,0); }
     static Vector3 mkY() { return Vector3(0,1,0); }
     static Vector3 mkZ() { return Vector3(0,0,1); }
 
+
   public:
+
     double x() const { return get(0); }
     double y() const { return get(1); }
     double z() const { return get(2); }
     Vector3& setX(double x) { set(0, x); return *this; }
     Vector3& setY(double y) { set(1, y); return *this; }
     Vector3& setZ(double z) { set(2, z); return *this; }
 
+
+    /// Dot-product with another vector
     double dot(const Vector3& v) const {
       return _vec.dot(v._vec);
     }
 
+    /// Cross-product with another vector
     Vector3 cross(const Vector3& v) const {
       Vector3 result;
       result._vec = _vec.cross(v._vec);
       return result;
     }
 
+    /// Angle in radians to another vector
     double angle(const Vector3& v) const {
       const double localDotOther = unit().dot(v.unit());
       if (localDotOther > 1.0) return 0.0;
       if (localDotOther < -1.0) return M_PI;
       return acos(localDotOther);
     }
 
-    Vector3 unit() const {
+
+    /// Unit-normalized version of this vector
+    Vector3 unitVec() const {
       /// @todo What to do in this situation?
       if (isZero()) return *this;
       else return *this * 1.0/this->mod();
     }
 
+    /// Synonym for unitVec
+    Vector3 unit() const {
+      return unitVec();
+    }
+
+
+    /// Polar projection of this vector into the x-y plane
+    Vector3 polarVec() const {
+      Vector3 rtn = *this;
+      rtn.setZ(0.);
+      return rtn;
+    }
+    /// Synonym for polarVec
+    Vector3 perpVec() const {
+      return polarVec();
+    }
+    /// Synonym for polarVec
+    Vector3 rhoVec() const {
+      return polarVec();
+    }
+
+    /// Square of the polar radius (
     double polarRadius2() const {
       return x()*x() + y()*y();
     }
-
     /// Synonym for polarRadius2
     double perp2() const {
       return polarRadius2();
     }
-
     /// Synonym for polarRadius2
     double rho2() const {
       return polarRadius2();
     }
 
+    /// Polar radius
     double polarRadius() const {
       return sqrt(polarRadius2());
     }
-
     /// Synonym for polarRadius
     double perp() const {
       return polarRadius();
     }
-
     /// Synonym for polarRadius
     double rho() const {
       return polarRadius();
     }
 
     /// Angle subtended by the vector's projection in x-y and the x-axis.
     double azimuthalAngle(const PhiMapping mapping = ZERO_2PI) const {
       // If this is a null vector, return zero rather than let atan2 set an error state
       if (Rivet::isZero(mod2())) return 0.0;
 
       // Calculate the arctan and return in the requested range
       const double value = atan2( y(), x() );
       return mapAngle(value, mapping);
     }
-
     /// Synonym for azimuthalAngle.
     double phi(const PhiMapping mapping = ZERO_2PI) const {
       return azimuthalAngle(mapping);
     }
 
     /// Angle subtended by the vector and the z-axis.
     double polarAngle() const {
       // Get number beween [0,PI]
       const double polarangle = atan2(polarRadius(), z());
       return mapAngle0ToPi(polarangle);
     }
 
     /// Synonym for polarAngle
     double theta() const {
       return polarAngle();
     }
 
     /// Purely geometric approximation to rapidity; exact for massless particles
     /// and in the central region.
     // cut-off such that |eta| < log(2/DBL_EPSILON)
     double pseudorapidity() const {
       const double epsilon = DBL_EPSILON;
       double m = mod();
       if ( m == 0.0 ) return  0.0;
       double pt = max(epsilon*m, perp());
       double rap = std::log((m + fabs(z()))/pt);
       return z() > 0.0 ? rap: -rap;
     }
 
     /// Synonym for pseudorapidity.
     double eta() const {
       return pseudorapidity();
     }
 
   public:
     Vector3& operator*=(const double a) {
       _vec = multiply(a, *this)._vec;
       return *this;
     }
 
     Vector3& operator/=(const double a) {
       _vec = multiply(1.0/a, *this)._vec;
       return *this;
     }
 
     Vector3& operator+=(const Vector3& v) {
       _vec = add(*this, v)._vec;
       return *this;
     }
 
     Vector3& operator-=(const Vector3& v) {
       _vec = subtract(*this, v)._vec;
       return *this;
     }
 
     Vector3 operator-() const {
       Vector3 rtn;
       rtn._vec = -_vec;
       return rtn;
     }
 
   };
 
 
 
   inline double dot(const Vector3& a, const Vector3& b) {
     return a.dot(b);
   }
 
   inline Vector3 cross(const Vector3& a, const Vector3& b) {
     return a.cross(b);
   }
 
   inline Vector3 multiply(const double a, const Vector3& v) {
     Vector3 result;
     result._vec = a * v._vec;
     return result;
   }
 
   inline Vector3 multiply(const Vector3& v, const double a) {
     return multiply(a, v);
   }
 
   inline Vector3 operator*(const double a, const Vector3& v) {
     return multiply(a, v);
   }
 
   inline Vector3 operator*(const Vector3& v, const double a) {
     return multiply(a, v);
   }
 
   inline Vector3 operator/(const Vector3& v, const double a) {
     return multiply(1.0/a, v);
   }
 
   inline Vector3 add(const Vector3& a, const Vector3& b) {
     Vector3 result;
     result._vec = a._vec + b._vec;
     return result;
   }
 
   inline Vector3 subtract(const Vector3& a, const Vector3& b) {
     Vector3 result;
     result._vec = a._vec - b._vec;
     return result;
   }
 
   inline Vector3 operator+(const Vector3& a, const Vector3& b) {
     return add(a, b);
   }
 
   inline Vector3 operator-(const Vector3& a, const Vector3& b) {
     return subtract(a, b);
   }
 
   // More physicsy coordinates etc.
 
   /// Angle (in radians) between two 3-vectors.
   inline double angle(const Vector3& a, const Vector3& b) {
     return a.angle(b);
   }
 
   /////////////////////////////////////////////////////
 
   /// @name \f$ |\Delta eta| \f$ calculations from 3-vectors
   //@{
 
   /// Calculate the difference in pseudorapidity between two spatial vectors.
   inline double deltaEta(const Vector3& a, const Vector3& b) {
     return deltaEta(a.pseudorapidity(), b.pseudorapidity());
   }
 
   /// Calculate the difference in pseudorapidity between two spatial vectors.
   inline double deltaEta(const Vector3& v, double eta2) {
     return deltaEta(v.pseudorapidity(), eta2);
   }
 
   /// Calculate the difference in pseudorapidity between two spatial vectors.
   inline double deltaEta(double eta1, const Vector3& v) {
     return deltaEta(eta1, v.pseudorapidity());
   }
 
   //@}
 
 
   /// @name \f$ \Delta phi \f$ calculations from 3-vectors
   //@{
 
   /// Calculate the difference in azimuthal angle between two spatial vectors.
   inline double deltaPhi(const Vector3& a, const Vector3& b) {
     return deltaPhi(a.azimuthalAngle(), b.azimuthalAngle());
   }
 
   /// Calculate the difference in azimuthal angle between two spatial vectors.
   inline double deltaPhi(const Vector3& v, double phi2) {
     return deltaPhi(v.azimuthalAngle(), phi2);
   }
 
   /// Calculate the difference in azimuthal angle between two spatial vectors.
   inline double deltaPhi(double phi1, const Vector3& v) {
     return deltaPhi(phi1, v.azimuthalAngle());
   }
 
   //@}
 
 
   /// @name \f$ \Delta R \f$ calculations from 3-vectors
   //@{
 
   /// Calculate the 2D rapidity-azimuthal ("eta-phi") distance between two spatial vectors.
   inline double deltaR(const Vector3& a, const Vector3& b) {
     return deltaR(a.pseudorapidity(), a.azimuthalAngle(),
                   b.pseudorapidity(), b.azimuthalAngle());
   }
 
   /// Calculate the 2D rapidity-azimuthal ("eta-phi") distance between two spatial vectors.
   inline double deltaR(const Vector3& v, double eta2, double phi2) {
     return deltaR(v.pseudorapidity(), v.azimuthalAngle(), eta2, phi2);
   }
 
   /// Calculate the 2D rapidity-azimuthal ("eta-phi") distance between two spatial vectors.
   inline double deltaR(double eta1, double phi1, const Vector3& v) {
     return deltaR(eta1, phi1, v.pseudorapidity(), v.azimuthalAngle());
   }
 
   //@}
 
 
   /// @name Typedefs of vector types to short names
   /// @todo Switch canonical and alias names
   //@{
   //typedef Vector3 V3; //< generic
   typedef Vector3 X3; //< spatial
   //@}
 
 
 }
 
 #endif
diff --git a/include/Rivet/Math/Vector4.hh b/include/Rivet/Math/Vector4.hh
--- a/include/Rivet/Math/Vector4.hh
+++ b/include/Rivet/Math/Vector4.hh
@@ -1,1416 +1,1438 @@
 #ifndef RIVET_MATH_VECTOR4
 #define RIVET_MATH_VECTOR4
 
 #include "Rivet/Math/MathHeader.hh"
 #include "Rivet/Math/MathUtils.hh"
 #include "Rivet/Math/VectorN.hh"
 #include "Rivet/Math/Vector3.hh"
 
 namespace Rivet {
 
 
   class FourVector;
   class FourMomentum;
   class LorentzTransform;
   typedef FourVector Vector4;
   FourVector transform(const LorentzTransform& lt, const FourVector& v4);
 
 
   /// @brief Specialisation of VectorN to a general (non-momentum) Lorentz 4-vector.
   ///
   /// @todo Add composite set/mk methods from different coord systems
   class FourVector : public Vector<4> {
     friend FourVector multiply(const double a, const FourVector& v);
     friend FourVector multiply(const FourVector& v, const double a);
     friend FourVector add(const FourVector& a, const FourVector& b);
     friend FourVector transform(const LorentzTransform& lt, const FourVector& v4);
 
   public:
 
     FourVector() : Vector<4>() { }
 
     template<typename V4>
     FourVector(const V4& other) {
       this->setT(other.t());
       this->setX(other.x());
       this->setY(other.y());
       this->setZ(other.z());
     }
 
     FourVector(const Vector<4>& other)
       : Vector<4>(other) { }
 
     FourVector(const double t, const double x, const double y, const double z) {
       this->setT(t);
       this->setX(x);
       this->setY(y);
       this->setZ(z);
     }
 
     virtual ~FourVector() { }
 
   public:
 
     double t() const { return get(0); }
     double t2() const { return sqr(t()); }
     FourVector& setT(const double t) { set(0, t); return *this; }
 
     double x() const { return get(1); }
     double x2() const { return sqr(x()); }
     FourVector& setX(const double x) { set(1, x); return *this; }
 
     double y() const { return get(2); }
     double y2() const { return sqr(y()); }
     FourVector& setY(const double y) { set(2, y); return *this; }
 
     double z() const { return get(3); }
     double z2() const { return sqr(z()); }
     FourVector& setZ(const double z) { set(3, z); return *this; }
 
     double invariant() const {
       // Done this way for numerical precision
       return (t() + z())*(t() - z()) - x()*x() - y()*y();
     }
 
     bool isNull() const {
       return Rivet::isZero(invariant());
     }
 
     /// Angle between this vector and another
     double angle(const FourVector& v) const {
       return vector3().angle( v.vector3() );
     }
     /// Angle between this vector and another (3-vector)
     double angle(const Vector3& v3) const {
       return vector3().angle(v3);
     }
 
-    /// @brief Square of the projection of the 3-vector on to the \f$ x-y \f$ plane
+    /// @brief Mod-square of the projection of the 3-vector on to the \f$ x-y \f$ plane
     /// This is a more efficient function than @c polarRadius, as it avoids the square root.
     /// Use it if you only need the squared value, or e.g. an ordering by magnitude.
     double polarRadius2() const {
       return vector3().polarRadius2();
     }
     /// Synonym for polarRadius2
     double perp2() const {
       return vector3().perp2();
     }
     /// Synonym for polarRadius2
     double rho2() const {
       return vector3().rho2();
     }
 
-    /// Projection of 3-vector on to the \f$ x-y \f$ plane
+    /// Magnitude of projection of 3-vector on to the \f$ x-y \f$ plane
     double polarRadius() const {
       return vector3().polarRadius();
     }
     /// Synonym for polarRadius
     double perp() const {
       return vector3().perp();
     }
     /// Synonym for polarRadius
     double rho() const {
       return vector3().rho();
     }
 
+    /// Projection of 3-vector on to the \f$ x-y \f$ plane
+    Vector3 polarVec() const {
+      return vector3().polarVec();
+    }
+    /// Synonym for polarVec
+    Vector3 perpVec() const {
+      return vector3().perpVec();
+    }
+    /// Synonym for polarVec
+    Vector3 rhoVec() const {
+      return vector3().rhoVec();
+    }
+
     /// Angle subtended by the 3-vector's projection in x-y and the x-axis.
     double azimuthalAngle(const PhiMapping mapping=ZERO_2PI) const {
       return vector3().azimuthalAngle(mapping);
     }
     /// Synonym for azimuthalAngle.
     double phi(const PhiMapping mapping=ZERO_2PI) const {
       return vector3().phi(mapping);
     }
 
     /// Angle subtended by the 3-vector and the z-axis.
     double polarAngle() const {
       return vector3().polarAngle();
     }
     /// Synonym for polarAngle.
     double theta() const {
       return vector3().theta();
     }
 
     /// Pseudorapidity (defined purely by the 3-vector components)
     double pseudorapidity() const {
       return vector3().pseudorapidity();
     }
     /// Synonym for pseudorapidity.
     double eta() const {
       return vector3().eta();
     }
 
     /// Get the \f$ |\eta| \f$ directly.
     double abspseudorapidity() const { return fabs(eta()); }
     /// Get the \f$ |\eta| \f$ directly (alias).
     double abseta() const { return fabs(eta()); }
 
     /// Get the spatial part of the 4-vector as a 3-vector.
     Vector3 vector3() const {
       return Vector3(get(1), get(2), get(3));
     }
 
 
   public:
 
     /// Contract two 4-vectors, with metric signature (+ - - -).
     double contract(const FourVector& v) const {
       const double result = t()*v.t() - x()*v.x() - y()*v.y() - z()*v.z();
       return result;
     }
 
     /// Contract two 4-vectors, with metric signature (+ - - -).
     double dot(const FourVector& v) const {
       return contract(v);
     }
 
     /// Contract two 4-vectors, with metric signature (+ - - -).
     double operator*(const FourVector& v) const {
       return contract(v);
     }
 
     /// Multiply by a scalar.
     FourVector& operator*=(double a) {
       _vec = multiply(a, *this)._vec;
       return *this;
     }
 
     /// Divide by a scalar.
     FourVector& operator/=(double a) {
       _vec = multiply(1.0/a, *this)._vec;
       return *this;
     }
 
     /// Add to this 4-vector.
     FourVector& operator+=(const FourVector& v) {
       _vec = add(*this, v)._vec;
       return *this;
     }
 
     /// Subtract from this 4-vector. NB time as well as space components are subtracted.
     FourVector& operator-=(const FourVector& v) {
       _vec = add(*this, -v)._vec;
       return *this;
     }
 
     /// Multiply all components (space and time) by -1.
     FourVector operator-() const {
       FourVector result;
       result._vec = -_vec;
       return result;
     }
 
     /// Multiply space components only by -1.
     FourVector reverse() const {
       FourVector result = -*this;
       result.setT(-result.t());
       return result;
     }
 
   };
 
 
   /// Contract two 4-vectors, with metric signature (+ - - -).
   inline double contract(const FourVector& a, const FourVector& b) {
     return a.contract(b);
   }
 
   /// Contract two 4-vectors, with metric signature (+ - - -).
   inline double dot(const FourVector& a, const FourVector& b) {
     return contract(a, b);
   }
 
   inline FourVector multiply(const double a, const FourVector& v) {
     FourVector result;
     result._vec = a * v._vec;
     return result;
   }
 
   inline FourVector multiply(const FourVector& v, const double a) {
     return multiply(a, v);
   }
 
   inline FourVector operator*(const double a, const FourVector& v) {
     return multiply(a, v);
   }
 
   inline FourVector operator*(const FourVector& v, const double a) {
     return multiply(a, v);
   }
 
   inline FourVector operator/(const FourVector& v, const double a) {
     return multiply(1.0/a, v);
   }
 
   inline FourVector add(const FourVector& a, const FourVector& b) {
     FourVector result;
     result._vec = a._vec + b._vec;
     return result;
   }
 
   inline FourVector operator+(const FourVector& a, const FourVector& b) {
     return add(a, b);
   }
 
   inline FourVector operator-(const FourVector& a, const FourVector& b) {
     return add(a, -b);
   }
 
   /// Calculate the Lorentz self-invariant of a 4-vector.
   /// \f$ v_\mu v^\mu = g_{\mu\nu} x^\mu x^\nu \f$.
   inline double invariant(const FourVector& lv) {
     return lv.invariant();
   }
 
   /// Angle (in radians) between spatial parts of two Lorentz vectors.
   inline double angle(const FourVector& a, const FourVector& b) {
     return a.angle(b);
   }
 
   /// Angle (in radians) between spatial parts of two Lorentz vectors.
   inline double angle(const Vector3& a, const FourVector& b) {
     return angle( a, b.vector3() );
   }
 
   /// Angle (in radians) between spatial parts of two Lorentz vectors.
   inline double angle(const FourVector& a, const Vector3& b) {
     return a.angle(b);
   }
 
 
   ////////////////////////////////////////////////
 
 
   /// Specialized version of the FourVector with momentum/energy functionality.
   class FourMomentum : public FourVector {
     friend FourMomentum multiply(const double a, const FourMomentum& v);
     friend FourMomentum multiply(const FourMomentum& v, const double a);
     friend FourMomentum add(const FourMomentum& a, const FourMomentum& b);
     friend FourMomentum transform(const LorentzTransform& lt, const FourMomentum& v4);
 
   public:
     FourMomentum() { }
 
     template<typename V4>
     FourMomentum(const V4& other) {
       this->setE(other.t());
       this->setPx(other.x());
       this->setPy(other.y());
       this->setPz(other.z());
     }
 
     FourMomentum(const Vector<4>& other)
       : FourVector(other) { }
 
     FourMomentum(const double E, const double px, const double py, const double pz) {
       this->setE(E);
       this->setPx(px);
       this->setPy(py);
       this->setPz(pz);
     }
 
     ~FourMomentum() {}
 
   public:
 
 
     /// @name Coordinate setters
     //@{
 
     /// Set energy \f$ E \f$ (time component of momentum).
     FourMomentum& setE(double E) {
       setT(E);
       return *this;
     }
 
     /// Set x-component of momentum \f$ p_x \f$.
     FourMomentum& setPx(double px) {
       setX(px);
       return *this;
     }
 
     /// Set y-component of momentum \f$ p_y \f$.
     FourMomentum& setPy(double py) {
       setY(py);
       return *this;
     }
 
     /// Set z-component of momentum \f$ p_z \f$.
     FourMomentum& setPz(double pz) {
       setZ(pz);
       return *this;
     }
 
 
     /// Set the p coordinates and energy simultaneously
     FourMomentum& setPE(double px, double py, double pz, double E) {
       if (E < 0)
         throw std::invalid_argument("Negative energy given as argument: " + to_str(E));
       setPx(px); setPy(py); setPz(pz); setE(E);
       return *this;
     }
     /// Alias for setPE
     FourMomentum& setXYZE(double px, double py, double pz, double E) {
       return setPE(px, py, pz, E);
     }
     // /// Near-alias with switched arg order
     // FourMomentum& setEP(double E, double px, double py, double pz) {
     //   return setPE(px, py, pz, E);
     // }
     // /// Alias for setEP
     // FourMomentum& setEXYZ(double E, double px, double py, double pz) {
     //   return setEP(E, px, py, pz);
     // }
 
 
     /// Set the p coordinates and mass simultaneously
     FourMomentum& setPM(double px, double py, double pz, double mass) {
       if (mass < 0)
         throw std::invalid_argument("Negative mass given as argument: " + to_str(mass));
       const double E = sqrt( sqr(mass) + sqr(px) + sqr(py) + sqr(pz) );
       // setPx(px); setPy(py); setPz(pz); setE(E);
       return setPE(px, py, pz, E);
     }
     /// Alias for setPM
     FourMomentum& setXYZM(double px, double py, double pz, double mass) {
       return setPM(px, py, pz, mass);
     }
 
 
     /// Set the vector state from (eta,phi,energy) coordinates and the mass
     ///
     /// eta = -ln(tan(theta/2))
     /// -> theta = 2 atan(exp(-eta))
     FourMomentum& setEtaPhiME(double eta, double phi, double mass, double E) {
       if (mass < 0)
         throw std::invalid_argument("Negative mass given as argument");
       if (E < 0)
         throw std::invalid_argument("Negative energy given as argument");
       const double theta = 2 * atan(exp(-eta));
       if (theta < 0 || theta > M_PI)
         throw std::domain_error("Polar angle outside 0..pi in calculation");
       setThetaPhiME(theta, phi, mass, E);
       return *this;
     }
 
     /// Set the vector state from (eta,phi,pT) coordinates and the mass
     ///
     /// eta = -ln(tan(theta/2))
     /// -> theta = 2 atan(exp(-eta))
     FourMomentum& setEtaPhiMPt(double eta, double phi, double mass, double pt) {
       if (mass < 0)
         throw std::invalid_argument("Negative mass given as argument");
       if (pt < 0)
         throw std::invalid_argument("Negative transverse momentum given as argument");
       const double theta = 2 * atan(exp(-eta));
       if (theta < 0 || theta > M_PI)
         throw std::domain_error("Polar angle outside 0..pi in calculation");
       const double p = pt / sin(theta);
       const double E = sqrt( sqr(p) + sqr(mass) );
       setThetaPhiME(theta, phi, mass, E);
       return *this;
     }
 
     /// Set the vector state from (y,phi,energy) coordinates and the mass
     ///
     /// y = 0.5 * ln((E+pz)/(E-pz))
     /// -> (E^2 - pz^2) exp(2y) = (E+pz)^2
     ///  & (E^2 - pz^2) exp(-2y) = (E-pz)^2
     /// -> E = sqrt(pt^2 + m^2) cosh(y)
     /// -> pz = sqrt(pt^2 + m^2) sinh(y)
     /// -> sqrt(pt^2 + m^2) = E / cosh(y)
     FourMomentum& setRapPhiME(double y, double phi, double mass, double E) {
       if (mass < 0)
         throw std::invalid_argument("Negative mass given as argument");
       if (E < 0)
         throw std::invalid_argument("Negative energy given as argument");
       const double sqrt_pt2_m2 = E / cosh(y);
       const double pt = sqrt( sqr(sqrt_pt2_m2) - sqr(mass) );
       if (pt < 0)
         throw std::domain_error("Negative transverse momentum in calculation");
       const double pz = sqrt_pt2_m2 * sinh(y);
       const double px = pt * cos(phi);
       const double py = pt * sin(phi);
       setPE(px, py, pz, E);
       return *this;
     }
 
     /// Set the vector state from (y,phi,pT) coordinates and the mass
     ///
     /// y = 0.5 * ln((E+pz)/(E-pz))
     /// -> E = sqrt(pt^2 + m^2) cosh(y)  [see above]
     FourMomentum& setRapPhiMPt(double y, double phi, double mass, double pt) {
       if (mass < 0)
         throw std::invalid_argument("Negative mass given as argument");
       if (pt < 0)
         throw std::invalid_argument("Negative transverse mass given as argument");
       const double E = sqrt( sqr(pt) + sqr(mass) ) * cosh(y);
       if (E < 0)
         throw std::domain_error("Negative energy in calculation");
       setRapPhiME(y, phi, mass, E);
       return *this;
     }
 
     /// Set the vector state from (theta,phi,energy) coordinates and the mass
     ///
     /// p = sqrt(E^2 - mass^2)
     /// pz = p cos(theta)
     /// pt = p sin(theta)
     FourMomentum& setThetaPhiME(double theta, double phi, double mass, double E) {
       if (theta < 0 || theta > M_PI)
         throw std::invalid_argument("Polar angle outside 0..pi given as argument");
       if (mass < 0)
         throw std::invalid_argument("Negative mass given as argument");
       if (E < 0)
         throw std::invalid_argument("Negative energy given as argument");
       const double p = sqrt( sqr(E) - sqr(mass) );
       const double pz = p * cos(theta);
       const double pt = p * sin(theta);
       if (pt < 0)
         throw std::invalid_argument("Negative transverse momentum in calculation");
       const double px = pt * cos(phi);
       const double py = pt * sin(phi);
       setPE(px, py, pz, E);
       return *this;
     }
 
     /// Set the vector state from (theta,phi,pT) coordinates and the mass
     ///
     /// p = pt / sin(theta)
     /// pz = p cos(theta)
     /// E = sqrt(p^2 + mass^2)
     FourMomentum& setThetaPhiMPt(double theta, double phi, double mass, double pt) {
       if (theta < 0 || theta > M_PI)
         throw std::invalid_argument("Polar angle outside 0..pi given as argument");
       if (mass < 0)
         throw std::invalid_argument("Negative mass given as argument");
       if (pt < 0)
         throw std::invalid_argument("Negative transverse momentum given as argument");
       const double p = pt / sin(theta);
       const double px = pt * cos(phi);
       const double py = pt * sin(phi);
       const double pz = p * cos(theta);
       const double E = sqrt( sqr(p) + sqr(mass) );
       setPE(px, py, pz, E);
       return *this;
     }
 
     /// Set the vector state from (pT,phi,energy) coordinates and the mass
     ///
     /// pz = sqrt(E^2 - mass^2 - pt^2)
     FourMomentum& setPtPhiME(double pt, double phi, double mass, double E) {
       if (pt < 0)
         throw std::invalid_argument("Negative transverse momentum given as argument");
       if (mass < 0)
         throw std::invalid_argument("Negative mass given as argument");
       if (E < 0)
         throw std::invalid_argument("Negative energy given as argument");
       const double px = pt * cos(phi);
       const double py = pt * sin(phi);
       const double pz = sqrt(sqr(E) - sqr(mass) - sqr(pt));
       setPE(px, py, pz, E);
       return *this;
     }
 
     //@}
 
 
     /// @name Accessors
     //@{
 
     /// Get energy \f$ E \f$ (time component of momentum).
     double E() const { return t(); }
     /// Get energy-squared \f$ E^2 \f$.
     double E2() const { return t2(); }
 
     /// Get x-component of momentum \f$ p_x \f$.
     double px() const { return x(); }
     /// Get x-squared \f$ p_x^2 \f$.
     double px2() const { return x2(); }
 
     /// Get y-component of momentum \f$ p_y \f$.
     double py() const { return y(); }
     /// Get y-squared \f$ p_y^2 \f$.
     double py2() const { return y2(); }
 
     /// Get z-component of momentum \f$ p_z \f$.
     double pz() const { return z(); }
     /// Get z-squared \f$ p_z^2 \f$.
     double pz2() const { return z2(); }
 
 
     /// @brief Get the mass \f$ m = \sqrt{E^2 - p^2} \f$ (the Lorentz self-invariant).
     ///
     /// For spacelike momenta, the mass will be -sqrt(|mass2|).
     double mass() const {
       // assert(Rivet::isZero(mass2()) || mass2() > 0);
       // if (Rivet::isZero(mass2())) {
       //   return 0.0;
       // } else {
       //   return sqrt(mass2());
       // }
       return sign(mass2()) * sqrt(fabs(mass2()));
     }
 
     /// Get the squared mass \f$ m^2 = E^2 - p^2 \f$ (the Lorentz self-invariant).
     double mass2() const {
       return invariant();
     }
 
 
     /// Get 3-momentum part, \f$ p \f$.
     Vector3 p3() const { return vector3(); }
 
     /// Get the modulus of the 3-momentum
     double p() const {
       return p3().mod();
     }
 
     /// Get the modulus-squared of the 3-momentum
     double p2() const {
       return p3().mod2();
     }
 
 
     /// Calculate the rapidity.
     double rapidity() const {
       return 0.5 * std::log( (E() + pz()) / (E() - pz()) );
     }
     /// Alias for rapidity.
     double rap() const {
       return rapidity();
     }
 
     /// Absolute rapidity.
     double absrapidity() const {
       return fabs(rapidity());
     }
     /// Absolute rapidity.
     double absrap() const {
       return fabs(rap());
     }
 
+    /// Calculate the transverse momentum vector \f$ \vec{p}_T \f$.
+    Vector3 pTvec() const {
+      return p3().polarVec();
+    }
+    /// Synonym for pTvec
+    Vector3 ptvec() const {
+      return pTvec();
+    }
+
     /// Calculate the squared transverse momentum \f$ p_T^2 \f$.
     double pT2() const {
       return vector3().polarRadius2();
     }
     /// Calculate the squared transverse momentum \f$ p_T^2 \f$.
     double pt2() const {
       return vector3().polarRadius2();
     }
 
     /// Calculate the transverse momentum \f$ p_T \f$.
     double pT() const {
       return sqrt(pT2());
     }
     /// Calculate the transverse momentum \f$ p_T \f$.
     double pt() const {
       return sqrt(pT2());
     }
 
     /// Calculate the transverse energy \f$ E_T^2 = E^2 \sin^2{\theta} \f$.
     double Et2() const {
       return Et() * Et();
     }
     /// Calculate the transverse energy \f$ E_T = E \sin{\theta} \f$.
     double Et() const {
       return E() * sin(polarAngle());
     }
 
     //@}
 
 
     /// @name Lorentz boost factors and vectors
     //@{
 
     /// Calculate the boost factor \f$ \gamma \f$.
     /// @note \f$ \gamma = E/mc^2 \f$ so we rely on the c=1 convention
     double gamma() const {
       return sqrt(E2()/mass2());
     }
 
     /// Calculate the boost vector \f$ \vec{\gamma} \f$.
     /// @note \f$ \gamma = E/mc^2 \f$ so we rely on the c=1 convention
     Vector3 gammaVec() const {
       return gamma() * p3().unit();
     }
 
     /// Calculate the boost factor \f$ \beta \f$.
     /// @note \f$ \beta = pc/E \f$ so we rely on the c=1 convention
     double beta() const {
       return p()/E();
     }
 
     /// Calculate the boost vector \f$ \vec{\beta} \f$.
     /// @note \f$ \beta = pc/E \f$ so we rely on the c=1 convention
     Vector3 betaVec() const {
       // return Vector3(px()/E(), py()/E(), pz()/E());
       return p3()/E();
     }
 
     /// @brief Deprecated alias for betaVec
     /// @deprecated This will be removed; use betaVec() instead
     Vector3 boostVector() const { return betaVec(); }
 
     //@}
 
 
     ////////////////////////////////////////
 
 
     /// @name Sorting helpers
     //@{
 
     /// Struct for sorting by increasing energy
     struct byEAscending {
       bool operator()(const FourMomentum& left, const FourMomentum& right) const{
         const double pt2left = left.E();
         const double pt2right = right.E();
         return pt2left < pt2right;
       }
 
       bool operator()(const FourMomentum* left, const FourMomentum* right) const{
         return (*this)(*left, *right);
       }
     };
 
 
     /// Struct for sorting by decreasing energy
     struct byEDescending {
       bool operator()(const FourMomentum& left, const FourMomentum& right) const{
         return byEAscending()(right, left);
       }
 
       bool operator()(const FourMomentum* left, const FourVector* right) const{
         return (*this)(*left, *right);
       }
     };
 
     //@}
 
 
     ////////////////////////////////////////
 
 
     /// @name Arithmetic operators
     //@{
 
     /// Multiply by a scalar
     FourMomentum& operator*=(double a) {
       _vec = multiply(a, *this)._vec;
       return *this;
     }
 
     /// Divide by a scalar
     FourMomentum& operator/=(double a) {
       _vec = multiply(1.0/a, *this)._vec;
       return *this;
     }
 
     /// Add to this 4-vector. NB time as well as space components are added.
     FourMomentum& operator+=(const FourMomentum& v) {
       _vec = add(*this, v)._vec;
       return *this;
     }
 
     /// Subtract from this 4-vector. NB time as well as space components are subtracted.
     FourMomentum& operator-=(const FourMomentum& v) {
       _vec = add(*this, -v)._vec;
       return *this;
     }
 
     /// Multiply all components (time and space) by -1.
     FourMomentum operator-() const {
       FourMomentum result;
       result._vec = -_vec;
       return result;
     }
 
     /// Multiply space components only by -1.
     FourMomentum reverse() const {
       FourMomentum result = -*this;
       result.setE(-result.E());
       return result;
     }
 
     //@}
 
 
     ////////////////////////////////////////
 
 
     /// @name Factory functions
     //@{
 
     /// Make a vector from (px,py,pz,E) coordinates
     static FourMomentum mkXYZE(double px, double py, double pz, double E) {
       return FourMomentum().setPE(px, py, pz, E);
     }
 
     /// Make a vector from (px,py,pz) coordinates and the mass
     static FourMomentum mkXYZM(double px, double py, double pz, double mass) {
       return FourMomentum().setPM(px, py, pz, mass);
     }
 
     /// Make a vector from (eta,phi,energy) coordinates and the mass
     static FourMomentum mkEtaPhiME(double eta, double phi, double mass, double E) {
       return FourMomentum().setEtaPhiME(eta, phi, mass, E);
     }
 
     /// Make a vector from (eta,phi,pT) coordinates and the mass
     static FourMomentum mkEtaPhiMPt(double eta, double phi, double mass, double pt) {
       return FourMomentum().setEtaPhiMPt(eta, phi, mass, pt);
     }
 
     /// Make a vector from (y,phi,energy) coordinates and the mass
     static FourMomentum mkRapPhiME(double y, double phi, double mass, double E) {
       return FourMomentum().setRapPhiME(y, phi, mass, E);
     }
 
     /// Make a vector from (y,phi,pT) coordinates and the mass
     static FourMomentum mkRapPhiMPt(double y, double phi, double mass, double pt) {
       return FourMomentum().setRapPhiMPt(y, phi, mass, pt);
     }
 
     /// Make a vector from (theta,phi,energy) coordinates and the mass
     static FourMomentum mkThetaPhiME(double theta, double phi, double mass, double E) {
       return FourMomentum().setThetaPhiME(theta, phi, mass, E);
     }
 
     /// Make a vector from (theta,phi,pT) coordinates and the mass
     static FourMomentum mkThetaPhiMPt(double theta, double phi, double mass, double pt) {
       return FourMomentum().setThetaPhiMPt(theta, phi, mass, pt);
     }
 
     /// Make a vector from (pT,phi,energy) coordinates and the mass
     static FourMomentum mkPtPhiME(double pt, double phi, double mass, double E) {
       return FourMomentum().setPtPhiME(pt, phi, mass, E);
     }
 
     //@}
 
 
   };
 
 
 
   inline FourMomentum multiply(const double a, const FourMomentum& v) {
     FourMomentum result;
     result._vec = a * v._vec;
     return result;
   }
 
   inline FourMomentum multiply(const FourMomentum& v, const double a) {
     return multiply(a, v);
   }
 
   inline FourMomentum operator*(const double a, const FourMomentum& v) {
     return multiply(a, v);
   }
 
   inline FourMomentum operator*(const FourMomentum& v, const double a) {
     return multiply(a, v);
   }
 
   inline FourMomentum operator/(const FourMomentum& v, const double a) {
     return multiply(1.0/a, v);
   }
 
   inline FourMomentum add(const FourMomentum& a, const FourMomentum& b) {
     FourMomentum result;
     result._vec = a._vec + b._vec;
     return result;
   }
 
   inline FourMomentum operator+(const FourMomentum& a, const FourMomentum& b) {
     return add(a, b);
   }
 
   inline FourMomentum operator-(const FourMomentum& a, const FourMomentum& b) {
     return add(a, -b);
   }
 
 
   //////////////////////////////////////////////////////
 
 
   /// @name \f$ \Delta R \f$ calculations from 4-vectors
   //@{
 
   /// @brief Calculate the 2D rapidity-azimuthal ("eta-phi") distance between two four-vectors.
   /// There is a scheme ambiguity for momentum-type four vectors as to whether
   /// the pseudorapidity (a purely geometric concept) or the rapidity (a
   /// relativistic energy-momentum quantity) is to be used: this can be chosen
   /// via the optional scheme parameter. Use of this scheme option is
   /// discouraged in this case since @c RAPIDITY is only a valid option for
   /// vectors whose type is really the FourMomentum derived class.
   inline double deltaR(const FourVector& a, const FourVector& b,
                        RapScheme scheme = PSEUDORAPIDITY) {
     switch (scheme) {
     case PSEUDORAPIDITY :
       return deltaR(a.vector3(), b.vector3());
     case RAPIDITY:
       {
         const FourMomentum* ma = dynamic_cast<const FourMomentum*>(&a);
         const FourMomentum* mb = dynamic_cast<const FourMomentum*>(&b);
         if (!ma || !mb) {
           string err = "deltaR with scheme RAPIDITY can only be called with FourMomentum objects, not FourVectors";
           throw std::runtime_error(err);
         }
         return deltaR(*ma, *mb, scheme);
       }
     default:
       throw std::runtime_error("The specified deltaR scheme is not yet implemented");
     }
   }
 
 
   /// @brief Calculate the 2D rapidity-azimuthal ("eta-phi") distance between two four-vectors.
   /// There is a scheme ambiguity for momentum-type four vectors
   /// as to whether the pseudorapidity (a purely geometric concept) or the
   /// rapidity (a relativistic energy-momentum quantity) is to be used: this can
   /// be chosen via the optional scheme parameter.
   inline double deltaR(const FourVector& v,
                        double eta2, double phi2,
                        RapScheme scheme = PSEUDORAPIDITY) {
     switch (scheme) {
     case PSEUDORAPIDITY :
       return deltaR(v.vector3(), eta2, phi2);
     case RAPIDITY:
       {
         const FourMomentum* mv = dynamic_cast<const FourMomentum*>(&v);
         if (!mv) {
           string err = "deltaR with scheme RAPIDITY can only be called with FourMomentum objects, not FourVectors";
           throw std::runtime_error(err);
         }
         return deltaR(*mv, eta2, phi2, scheme);
       }
     default:
       throw std::runtime_error("The specified deltaR scheme is not yet implemented");
     }
   }
 
 
   /// @brief Calculate the 2D rapidity-azimuthal ("eta-phi") distance between two four-vectors.
   /// There is a scheme ambiguity for momentum-type four vectors
   /// as to whether the pseudorapidity (a purely geometric concept) or the
   /// rapidity (a relativistic energy-momentum quantity) is to be used: this can
   /// be chosen via the optional scheme parameter.
   inline double deltaR(double eta1, double phi1,
                        const FourVector& v,
                        RapScheme scheme = PSEUDORAPIDITY) {
     switch (scheme) {
     case PSEUDORAPIDITY :
       return deltaR(eta1, phi1, v.vector3());
     case RAPIDITY:
       {
         const FourMomentum* mv = dynamic_cast<const FourMomentum*>(&v);
         if (!mv) {
           string err = "deltaR with scheme RAPIDITY can only be called with FourMomentum objects, not FourVectors";
           throw std::runtime_error(err);
         }
         return deltaR(eta1, phi1, *mv, scheme);
       }
     default:
       throw std::runtime_error("The specified deltaR scheme is not yet implemented");
     }
   }
 
 
   /// @brief Calculate the 2D rapidity-azimuthal ("eta-phi") distance between two four-vectors.
   /// There is a scheme ambiguity for momentum-type four vectors
   /// as to whether the pseudorapidity (a purely geometric concept) or the
   /// rapidity (a relativistic energy-momentum quantity) is to be used: this can
   /// be chosen via the optional scheme parameter.
   inline double deltaR(const FourMomentum& a, const FourMomentum& b,
                        RapScheme scheme = PSEUDORAPIDITY) {
     switch (scheme) {
     case PSEUDORAPIDITY:
       return deltaR(a.vector3(), b.vector3());
     case RAPIDITY:
       return deltaR(a.rapidity(), a.azimuthalAngle(), b.rapidity(), b.azimuthalAngle());
     default:
       throw std::runtime_error("The specified deltaR scheme is not yet implemented");
     }
   }
 
   /// @brief Calculate the 2D rapidity-azimuthal ("eta-phi") distance between two four-vectors.
   /// There is a scheme ambiguity for momentum-type four vectors
   /// as to whether the pseudorapidity (a purely geometric concept) or the
   /// rapidity (a relativistic energy-momentum quantity) is to be used: this can
   /// be chosen via the optional scheme parameter.
   inline double deltaR(const FourMomentum& v,
                        double eta2, double phi2,
                        RapScheme scheme = PSEUDORAPIDITY) {
     switch (scheme) {
     case PSEUDORAPIDITY:
       return deltaR(v.vector3(), eta2, phi2);
     case RAPIDITY:
       return deltaR(v.rapidity(), v.azimuthalAngle(), eta2, phi2);
     default:
       throw std::runtime_error("The specified deltaR scheme is not yet implemented");
     }
   }
 
 
   /// @brief Calculate the 2D rapidity-azimuthal ("eta-phi") distance between two four-vectors.
   /// There is a scheme ambiguity for momentum-type four vectors
   /// as to whether the pseudorapidity (a purely geometric concept) or the
   /// rapidity (a relativistic energy-momentum quantity) is to be used: this can
   /// be chosen via the optional scheme parameter.
   inline double deltaR(double eta1, double phi1,
                        const FourMomentum& v,
                        RapScheme scheme = PSEUDORAPIDITY) {
     switch (scheme) {
     case PSEUDORAPIDITY:
       return deltaR(eta1, phi1, v.vector3());
     case RAPIDITY:
       return deltaR(eta1, phi1, v.rapidity(), v.azimuthalAngle());
     default:
       throw std::runtime_error("The specified deltaR scheme is not yet implemented");
     }
   }
 
   /// @brief Calculate the 2D rapidity-azimuthal ("eta-phi") distance between two four-vectors.
   /// There is a scheme ambiguity for momentum-type four vectors
   /// as to whether the pseudorapidity (a purely geometric concept) or the
   /// rapidity (a relativistic energy-momentum quantity) is to be used: this can
   /// be chosen via the optional scheme parameter.
   inline double deltaR(const FourMomentum& a, const FourVector& b,
                        RapScheme scheme = PSEUDORAPIDITY) {
     switch (scheme) {
     case PSEUDORAPIDITY:
       return deltaR(a.vector3(), b.vector3());
     case RAPIDITY:
       return deltaR(a.rapidity(), a.azimuthalAngle(), FourMomentum(b).rapidity(), b.azimuthalAngle());
     default:
       throw std::runtime_error("The specified deltaR scheme is not yet implemented");
     }
   }
 
   /// @brief Calculate the 2D rapidity-azimuthal ("eta-phi") distance between two four-vectors.
   /// There is a scheme ambiguity for momentum-type four vectors
   /// as to whether the pseudorapidity (a purely geometric concept) or the
   /// rapidity (a relativistic energy-momentum quantity) is to be used: this can
   /// be chosen via the optional scheme parameter.
   inline double deltaR(const FourVector& a, const FourMomentum& b,
                        RapScheme scheme = PSEUDORAPIDITY) {
     return deltaR(b, a, scheme);
   }
 
   /// @brief Calculate the 2D rapidity-azimuthal ("eta-phi") distance between a
   /// three-vector and a four-vector.
   inline double deltaR(const FourMomentum& a, const Vector3& b) {
     return deltaR(a.vector3(), b);
   }
 
   /// @brief Calculate the 2D rapidity-azimuthal ("eta-phi") distance between a
   /// three-vector and a four-vector.
   inline double deltaR(const Vector3& a, const FourMomentum& b) {
     return deltaR(a, b.vector3());
   }
 
   /// @brief Calculate the 2D rapidity-azimuthal ("eta-phi") distance between a
   /// three-vector and a four-vector.
   inline double deltaR(const FourVector& a, const Vector3& b) {
     return deltaR(a.vector3(), b);
   }
 
   /// @brief Calculate the 2D rapidity-azimuthal ("eta-phi") distance between a
   /// three-vector and a four-vector.
   inline double deltaR(const Vector3& a, const FourVector& b) {
     return deltaR(a, b.vector3());
   }
 
   //@}
 
 
   //////////////////////////////////////////////////////
 
 
   /// @name \f$ \Delta phi \f$ calculations from 4-vectors
   //@{
 
   /// Calculate the difference in azimuthal angle between two vectors.
   inline double deltaPhi(const FourMomentum& a, const FourMomentum& b) {
     return deltaPhi(a.vector3(), b.vector3());
   }
 
   /// Calculate the difference in azimuthal angle between two vectors.
   inline double deltaPhi(const FourMomentum& v, double phi2) {
     return deltaPhi(v.vector3(), phi2);
   }
 
   /// Calculate the difference in azimuthal angle between two vectors.
   inline double deltaPhi(double phi1, const FourMomentum& v) {
     return deltaPhi(phi1, v.vector3());
   }
 
   /// Calculate the difference in azimuthal angle between two vectors.
   inline double deltaPhi(const FourVector& a, const FourVector& b) {
     return deltaPhi(a.vector3(), b.vector3());
   }
 
   /// Calculate the difference in azimuthal angle between two vectors.
   inline double deltaPhi(const FourVector& v, double phi2) {
     return deltaPhi(v.vector3(), phi2);
   }
 
   /// Calculate the difference in azimuthal angle between two vectors.
   inline double deltaPhi(double phi1, const FourVector& v) {
     return deltaPhi(phi1, v.vector3());
   }
 
   /// Calculate the difference in azimuthal angle between two vectors.
   inline double deltaPhi(const FourVector& a, const FourMomentum& b) {
     return deltaPhi(a.vector3(), b.vector3());
   }
 
   /// Calculate the difference in azimuthal angle between two vectors.
   inline double deltaPhi(const FourMomentum& a, const FourVector& b) {
     return deltaPhi(a.vector3(), b.vector3());
   }
 
   /// Calculate the difference in azimuthal angle between two vectors.
   inline double deltaPhi(const FourVector& a, const Vector3& b) {
     return deltaPhi(a.vector3(), b);
   }
 
   /// Calculate the difference in azimuthal angle between two vectors.
   inline double deltaPhi(const Vector3& a, const FourVector& b) {
     return deltaPhi(a, b.vector3());
   }
 
   /// Calculate the difference in azimuthal angle between two vectors.
   inline double deltaPhi(const FourMomentum& a, const Vector3& b) {
     return deltaPhi(a.vector3(), b);
   }
 
   /// Calculate the difference in azimuthal angle between two vectors.
   inline double deltaPhi(const Vector3& a, const FourMomentum& b) {
     return deltaPhi(a, b.vector3());
   }
 
   //@}
 
 
   //////////////////////////////////////////////////////
 
 
   /// @name \f$ |\Delta eta| \f$ calculations from 4-vectors
   //@{
 
   /// Calculate the difference in pseudorapidity between two vectors.
   inline double deltaEta(const FourMomentum& a, const FourMomentum& b) {
     return deltaEta(a.vector3(), b.vector3());
   }
 
   /// Calculate the difference in pseudorapidity between two vectors.
   inline double deltaEta(const FourMomentum& v, double eta2) {
     return deltaEta(v.vector3(), eta2);
   }
 
   /// Calculate the difference in pseudorapidity between two vectors.
   inline double deltaEta(double eta1, const FourMomentum& v) {
     return deltaEta(eta1, v.vector3());
   }
 
   /// Calculate the difference in pseudorapidity between two vectors.
   inline double deltaEta(const FourVector& a, const FourVector& b) {
     return deltaEta(a.vector3(), b.vector3());
   }
 
   /// Calculate the difference in pseudorapidity between two vectors.
   inline double deltaEta(const FourVector& v, double eta2) {
     return deltaEta(v.vector3(), eta2);
   }
 
   /// Calculate the difference in pseudorapidity between two vectors.
   inline double deltaEta(double eta1, const FourVector& v) {
     return deltaEta(eta1, v.vector3());
   }
 
   /// Calculate the difference in pseudorapidity between two vectors.
   inline double deltaEta(const FourVector& a, const FourMomentum& b) {
     return deltaEta(a.vector3(), b.vector3());
   }
 
   /// Calculate the difference in pseudorapidity between two vectors.
   inline double deltaEta(const FourMomentum& a, const FourVector& b) {
     return deltaEta(a.vector3(), b.vector3());
   }
 
   /// Calculate the difference in pseudorapidity between two vectors.
   inline double deltaEta(const FourVector& a, const Vector3& b) {
     return deltaEta(a.vector3(), b);
   }
 
   /// Calculate the difference in pseudorapidity between two vectors.
   inline double deltaEta(const Vector3& a, const FourVector& b) {
     return deltaEta(a, b.vector3());
   }
 
   /// Calculate the difference in pseudorapidity between two vectors.
   inline double deltaEta(const FourMomentum& a, const Vector3& b) {
     return deltaEta(a.vector3(), b);
   }
 
   /// Calculate the difference in pseudorapidity between two vectors.
   inline double deltaEta(const Vector3& a, const FourMomentum& b) {
     return deltaEta(a, b.vector3());
   }
 
   //@}
 
 
   /// @name \f$ |\Delta y| \f$ calculations from 4-momentum vectors
   //@{
 
   /// Calculate the difference in rapidity between two 4-momentum vectors.
   inline double deltaRap(const FourMomentum& a, const FourMomentum& b) {
     return deltaRap(a.rapidity(), b.rapidity());
   }
 
   /// Calculate the difference in rapidity between two 4-momentum vectors.
   inline double deltaRap(const FourMomentum& v, double y2) {
     return deltaRap(v.rapidity(), y2);
   }
 
   /// Calculate the difference in rapidity between two 4-momentum vectors.
   inline double deltaRap(double y1, const FourMomentum& v) {
     return deltaRap(y1, v.rapidity());
   }
 
   //@}
 
 
   //////////////////////////////////////////////////////
 
 
   /// @name 4-vector comparison functions (for sorting)
   //@{
 
   /// Comparison to give a sorting by decreasing pT
   inline bool cmpMomByPt(const FourMomentum& a, const FourMomentum& b) {
     return a.pt() > b.pt();
   }
   /// Comparison to give a sorting by increasing pT
   inline bool cmpMomByAscPt(const FourMomentum& a, const FourMomentum& b) {
     return a.pt() < b.pt();
   }
 
   /// Comparison to give a sorting by decreasing 3-momentum magnitude |p|
   inline bool cmpMomByP(const FourMomentum& a, const FourMomentum& b) {
     return a.vector3().mod() > b.vector3().mod();
   }
   /// Comparison to give a sorting by increasing 3-momentum magnitude |p|
   inline bool cmpMomByAscP(const FourMomentum& a, const FourMomentum& b) {
     return a.vector3().mod() < b.vector3().mod();
   }
 
   /// Comparison to give a sorting by decreasing transverse energy
   inline bool cmpMomByEt(const FourMomentum& a, const FourMomentum& b) {
     return a.Et() > b.Et();
   }
   /// Comparison to give a sorting by increasing transverse energy
   inline bool cmpMomByAscEt(const FourMomentum& a, const FourMomentum& b) {
     return a.Et() < b.Et();
   }
 
   /// Comparison to give a sorting by decreasing energy
   inline bool cmpMomByE(const FourMomentum& a, const FourMomentum& b) {
     return a.E() > b.E();
   }
   /// Comparison to give a sorting by increasing energy
   inline bool cmpMomByAscE(const FourMomentum& a, const FourMomentum& b) {
     return a.E() < b.E();
   }
 
   /// Comparison to give a sorting by decreasing mass
   inline bool cmpMomByMass(const FourMomentum& a, const FourMomentum& b) {
     return a.mass() > b.mass();
   }
   /// Comparison to give a sorting by increasing mass
   inline bool cmpMomByAscMass(const FourMomentum& a, const FourMomentum& b) {
     return a.mass() < b.mass();
   }
 
   /// Comparison to give a sorting by increasing eta (pseudorapidity)
   inline bool cmpMomByEta(const FourMomentum& a, const FourMomentum& b) {
     return a.eta() < b.eta();
   }
   /// Comparison to give a sorting by increasing eta (pseudorapidity)
   /// @deprecated Use cmpMomByEta
   DEPRECATED("Use cmpMomByEta")
   inline bool cmpMomByAscPseudorapidity(const FourMomentum& a, const FourMomentum& b) {
     return cmpMomByEta(a,b);
   }
 
   /// Comparison to give a sorting by decreasing eta (pseudorapidity)
   inline bool cmpMomByDescEta(const FourMomentum& a, const FourMomentum& b) {
     return a.pseudorapidity() > b.pseudorapidity();
   }
   /// Comparison to give a sorting by decreasing eta (pseudorapidity)
   /// @deprecated Use cmpMomByDescEta
   DEPRECATED("Use cmpMomByDescEta")
   inline bool cmpMomByDescPseudorapidity(const FourMomentum& a, const FourMomentum& b) {
     return cmpMomByDescEta(a,b);
   }
 
   /// Comparison to give a sorting by increasing absolute eta (pseudorapidity)
   inline bool cmpMomByAbsEta(const FourMomentum& a, const FourMomentum& b) {
     return fabs(a.eta()) < fabs(b.eta());
   }
   /// Comparison to give a sorting by increasing absolute eta (pseudorapidity)
   /// @deprecated Use cmpMomByAbsEta
   DEPRECATED("Use cmpMomByAbsEta")
   inline bool cmpMomByAscAbsPseudorapidity(const FourMomentum& a, const FourMomentum& b) {
     return cmpMomByAbsEta(a,b);
   }
 
   /// Comparison to give a sorting by increasing absolute eta (pseudorapidity)
   inline bool cmpMomByDescAbsEta(const FourMomentum& a, const FourMomentum& b) {
     return fabs(a.eta()) > fabs(b.eta());
   }
   /// Comparison to give a sorting by increasing absolute eta (pseudorapidity)
   /// @deprecated Use cmpMomByDescAbsEta
   DEPRECATED("Use cmpMomByDescAbsEta")
   inline bool cmpMomByDescAbsPseudorapidity(const FourMomentum& a, const FourMomentum& b) {
     return cmpMomByDescAbsEta(a,b);
   }
 
   /// Comparison to give a sorting by increasing rapidity
   inline bool cmpMomByRap(const FourMomentum& a, const FourMomentum& b) {
     return a.rapidity() < b.rapidity();
   }
   /// Comparison to give a sorting by increasing rapidity
   /// @deprecated Use cmpMomByRap
   DEPRECATED("Use cmpMomByRap")
   inline bool cmpMomByAscRapidity(const FourMomentum& a, const FourMomentum& b) {
     return cmpMomByRap(a,b);
   }
 
   /// Comparison to give a sorting by decreasing rapidity
   inline bool cmpMomByDescRap(const FourMomentum& a, const FourMomentum& b) {
     return a.rapidity() > b.rapidity();
   }
   /// Comparison to give a sorting by decreasing rapidity
   /// @deprecated Use cmpMomByDescRap
   DEPRECATED("Use cmpMomByDescRap")
   inline bool cmpMomByDescRapidity(const FourMomentum& a, const FourMomentum& b) {
     return cmpMomByDescRap(a,b);
   }
 
   /// Comparison to give a sorting by increasing absolute rapidity
   inline bool cmpMomByAbsRap(const FourMomentum& a, const FourMomentum& b) {
     return fabs(a.rapidity()) < fabs(b.rapidity());
   }
   /// Comparison to give a sorting by increasing absolute rapidity
   /// @deprecated Use cmpMomByAbsRap
   DEPRECATED("Use cmpMomByAbsRap")
   inline bool cmpMomByAscAbsRapidity(const FourMomentum& a, const FourMomentum& b) {
     return cmpMomByAbsRap(a,b);
   }
 
   /// Comparison to give a sorting by decreasing absolute rapidity
   inline bool cmpMomByDescAbsRap(const FourMomentum& a, const FourMomentum& b) {
     return fabs(a.rapidity()) > fabs(b.rapidity());
   }
   /// Comparison to give a sorting by decreasing absolute rapidity
   /// @deprecated Use cmpMomByDescAbsRap
   DEPRECATED("Use cmpMomByDescAbsRap")
   inline bool cmpMomByDescAbsRapidity(const FourMomentum& a, const FourMomentum& b) {
     return cmpMomByDescAbsRap(a,b);
   }
 
   /// @todo Add sorting by phi [0..2PI]
 
 
   /// Sort a container of momenta by cmp and return by reference for non-const inputs
   template<typename MOMS, typename CMP>
   inline MOMS& sortBy(MOMS& pbs, const CMP& cmp) {
     std::sort(pbs.begin(), pbs.end(), cmp);
     return pbs;
   }
   /// Sort a container of momenta by cmp and return by value for const inputs
   template<typename MOMS, typename CMP>
   inline MOMS sortBy(const MOMS& pbs, const CMP& cmp) {
     MOMS rtn = pbs;
     std::sort(rtn.begin(), rtn.end(), cmp);
     return rtn;
   }
 
   /// Sort a container of momenta by pT (decreasing) and return by reference for non-const inputs
   template<typename MOMS>
   inline MOMS& sortByPt(MOMS& pbs) {
     return sortBy(pbs, cmpMomByPt);
   }
   /// Sort a container of momenta by pT (decreasing) and return by value for const inputs
   template<typename MOMS>
   inline MOMS sortByPt(const MOMS& pbs) {
     return sortBy(pbs, cmpMomByPt);
   }
 
   /// Sort a container of momenta by E (decreasing) and return by reference for non-const inputs
   template<typename MOMS>
   inline MOMS& sortByE(MOMS& pbs) {
     return sortBy(pbs, cmpMomByE);
   }
   /// Sort a container of momenta by E (decreasing) and return by value for const inputs
   template<typename MOMS>
   inline MOMS sortByE(const MOMS& pbs) {
     return sortBy(pbs, cmpMomByE);
   }
 
   /// Sort a container of momenta by Et (decreasing) and return by reference for non-const inputs
   template<typename MOMS>
   inline MOMS& sortByEt(MOMS& pbs) {
     return sortBy(pbs, cmpMomByEt);
   }
   /// Sort a container of momenta by Et (decreasing) and return by value for const inputs
   template<typename MOMS>
   inline MOMS sortByEt(const MOMS& pbs) {
     return sortBy(pbs, cmpMomByEt);
   }
 
   //@}
 
 
   //////////////////////////////////////////////////////
 
 
   /// @name 4-vector string representations
   //@{
 
   /// Render a 4-vector as a string.
   inline std::string toString(const FourVector& lv) {
     ostringstream out;
     out << "("  << (fabs(lv.t()) < 1E-30 ? 0.0 : lv.t())
         << "; " << (fabs(lv.x()) < 1E-30 ? 0.0 : lv.x())
         << ", " << (fabs(lv.y()) < 1E-30 ? 0.0 : lv.y())
         << ", " << (fabs(lv.z()) < 1E-30 ? 0.0 : lv.z())
         << ")";
     return out.str();
   }
 
   /// Write a 4-vector to an ostream.
   inline std::ostream& operator<<(std::ostream& out, const FourVector& lv) {
     out << toString(lv);
     return out;
   }
 
   //@}
 
 
   /// @name Typedefs of vector types to short names
   /// @todo Switch canonical and alias names
   //@{
   //typedef FourVector V4; //< generic
   typedef FourVector X4; //< spatial
   typedef FourMomentum P4; //< momentum
   //@}
 
 
 }
 
 #endif
diff --git a/include/Rivet/ParticleBase.hh b/include/Rivet/ParticleBase.hh
--- a/include/Rivet/ParticleBase.hh
+++ b/include/Rivet/ParticleBase.hh
@@ -1,280 +1,284 @@
 #ifndef RIVET_ParticleBase_HH
 #define RIVET_ParticleBase_HH
 
 #include "Rivet/Config/RivetCommon.hh"
 #include "Rivet/Jet.fhh"
 #include "Rivet/Tools/Cuts.fhh"
 #include "Rivet/Math/Vectors.hh"
 
 namespace Rivet {
 
 
   /// @brief Base class for particle-like things like Particle and Jet
   class ParticleBase {
   public:
 
     /// Default constructor
     ParticleBase() { }
 
     /// Virtual destructor
     virtual ~ParticleBase() { }
 
 
     /// @name Effective momentum accessors
     //@{
 
     /// Get equivalent single momentum four-vector (const).
     virtual const FourMomentum& momentum() const = 0;
     /// Get equivalent single momentum four-vector (const) (alias).
     const FourMomentum& mom() const { return momentum(); };
 
     /// Cast operator for conversion to FourMomentum
     operator const FourMomentum& () const { return momentum(); }
 
     //@}
 
 
     /// @name Convenience access to the effective 4-vector properties
     //@{
 
     /// Get the energy directly.
     double E() const { return momentum().E(); }
     /// Get the energy directly (alias).
     double energy() const { return momentum().E(); }
 
     /// Get the energy-squared.
     double E2() const { return momentum().E2(); }
     /// Get the energy-squared (alias).
     double energy2() const { return momentum().E2(); }
 
     /// Get the \f$ p_T \f$ directly.
     double pt() const { return momentum().pt(); }
     /// Get the \f$ p_T \f$ directly (alias).
     double pT() const { return pt(); }
     /// Get the \f$ p_T \f$ directly (alias).
     double perp() const { return pt(); }
 
     /// Get the \f$ p_T^2 \f$ directly.
     double pt2() const { return momentum().pt2(); }
     /// Get the \f$ p_T^2 \f$ directly (alias).
     double pT2() const { return pt2(); }
     /// Get the \f$ p_T^2 \f$ directly (alias).
     double perp2() const { return pt2(); }
 
     /// Get the \f$ E_T \f$ directly.
     double Et() const { return momentum().Et(); }
     /// Get the \f$ E_T^2 \f$ directly.
     double Et2() const { return momentum().Et2(); }
 
     /// Get the mass directly.
     double mass() const { return momentum().mass(); }
     /// Get the mass**2 directly.
     double mass2() const { return momentum().mass2(); }
 
     /// Get the \f$ \eta \f$ directly.
     double pseudorapidity() const { return momentum().eta(); }
     /// Get the \f$ \eta \f$ directly (alias).
     double eta() const { return momentum().eta(); }
     /// Get the \f$ |\eta| \f$ directly.
     double abspseudorapidity() const { return momentum().abspseudorapidity(); }
     /// Get the \f$ |\eta| \f$ directly (alias).
     double abseta() const { return momentum().abseta(); }
 
     /// Get the \f$ y \f$ directly.
     double rapidity() const { return momentum().rapidity(); }
     /// Get the \f$ y \f$ directly (alias).
     double rap() const { return momentum().rapidity(); }
     /// Get the \f$ |y| \f$ directly.
     double absrapidity() const { return momentum().absrapidity(); }
     /// Get the \f$ |y| \f$ directly (alias).
     double absrap() const { return momentum().absrap(); }
 
     /// Azimuthal angle \f$ \phi \f$.
     double azimuthalAngle(const PhiMapping mapping=ZERO_2PI) const { return momentum().azimuthalAngle(mapping); }
     /// Get the \f$ \phi \f$ directly.
     double phi(const PhiMapping mapping=ZERO_2PI) const { return momentum().phi(mapping); }
 
-
     /// Get the 3-momentum directly.
     Vector3 p3() const { return momentum().vector3(); }
     /// Get the 3-momentum magnitude directly.
     double p() const { return momentum().p(); }
     /// Get the 3-momentum magnitude-squared directly.
     double p2() const { return momentum().p2(); }
 
+    /// Get the transverse 3-momentum directly.
+    Vector3 ptvec() const { return momentum().ptvec(); }
+    /// Get the transverse 3-momentum directly.
+    Vector3 pTvec() const { return momentum().pTvec(); }
+
     /// x component of momentum.
     double px() const { return momentum().x(); }
     /// y component of momentum.
     double py() const { return momentum().y(); }
     /// z component of momentum.
     double pz() const { return momentum().z(); }
 
     /// x component of momentum, squared.
     double px2() const { return momentum().x2(); }
     /// y component of momentum, squared.
     double py2() const { return momentum().y2(); }
     /// z component of momentum, squared.
     double pz2() const { return momentum().z2(); }
 
     /// Angle subtended by the 3-vector and the z-axis.
     double polarAngle() const { return momentum().polarAngle(); }
     /// Synonym for polarAngle.
     double theta() const { return momentum().theta(); }
 
     /// Angle between this vector and another
     double angle(const ParticleBase& v) const { return momentum().angle(v.momentum()); }
     /// Angle between this vector and another
     double angle(const FourVector& v) const { return momentum().angle(v); }
     /// Angle between this vector and another (3-vector)
     double angle(const Vector3& v3) const { return momentum().angle(v3); }
 
     //@}
 
   };
 
 
   /// @name deltaR, deltaEta, deltaPhi functions specifically for Particle/Jet arguments
   //@{
 
   inline double deltaR(const ParticleBase& p1, const ParticleBase& p2,
                        RapScheme scheme = PSEUDORAPIDITY) {
     return deltaR(p1.momentum(), p2.momentum(), scheme);
   }
 
   inline double deltaR(const ParticleBase& p, const FourMomentum& v,
                        RapScheme scheme = PSEUDORAPIDITY) {
     return deltaR(p.momentum(), v, scheme);
   }
 
   inline double deltaR(const ParticleBase& p, const FourVector& v,
                        RapScheme scheme = PSEUDORAPIDITY) {
     return deltaR(p.momentum(), v, scheme);
   }
 
   inline double deltaR(const ParticleBase& p, const Vector3& v) {
     return deltaR(p.momentum(), v);
   }
 
   inline double deltaR(const ParticleBase& p, double eta, double phi) {
     return deltaR(p.momentum(), eta, phi);
   }
 
   inline double deltaR(const FourMomentum& v, const ParticleBase& p,
                        RapScheme scheme = PSEUDORAPIDITY) {
     return deltaR(v, p.momentum(), scheme);
   }
 
   inline double deltaR(const FourVector& v, const ParticleBase& p,
                        RapScheme scheme = PSEUDORAPIDITY) {
     return deltaR(v, p.momentum(), scheme);
   }
 
   inline double deltaR(const Vector3& v, const ParticleBase& p) {
     return deltaR(v, p.momentum());
   }
 
   inline double deltaR(double eta, double phi, const ParticleBase& p) {
     return deltaR(eta, phi, p.momentum());
   }
 
 
   inline double deltaPhi(const ParticleBase& p1, const ParticleBase& p2) {
     return deltaPhi(p1.momentum(), p2.momentum());
   }
 
   inline double deltaPhi(const ParticleBase& p, const FourMomentum& v) {
     return deltaPhi(p.momentum(), v);
   }
 
   inline double deltaPhi(const ParticleBase& p, const FourVector& v) {
     return deltaPhi(p.momentum(), v);
   }
 
   inline double deltaPhi(const ParticleBase& p, const Vector3& v) {
     return deltaPhi(p.momentum(), v);
   }
 
   inline double deltaPhi(const ParticleBase& p, double phi) {
     return deltaPhi(p.momentum(), phi);
   }
 
   inline double deltaPhi(const FourMomentum& v, const ParticleBase& p) {
     return deltaPhi(v, p.momentum());
   }
 
   inline double deltaPhi(const FourVector& v, const ParticleBase& p) {
     return deltaPhi(v, p.momentum());
   }
 
   inline double deltaPhi(const Vector3& v, const ParticleBase& p) {
     return deltaPhi(v, p.momentum());
   }
 
   inline double deltaPhi(double phi, const ParticleBase& p) {
     return deltaPhi(phi, p.momentum());
   }
 
 
   inline double deltaEta(const ParticleBase& p1, const ParticleBase& p2) {
     return deltaEta(p1.momentum(), p2.momentum());
   }
 
   inline double deltaEta(const ParticleBase& p, const FourMomentum& v) {
     return deltaEta(p.momentum(), v);
   }
 
   inline double deltaEta(const ParticleBase& p, const FourVector& v) {
     return deltaEta(p.momentum(), v);
   }
 
   inline double deltaEta(const ParticleBase& p, const Vector3& v) {
     return deltaEta(p.momentum(), v);
   }
 
   inline double deltaEta(const ParticleBase& p, double eta) {
     return deltaEta(p.momentum(), eta);
   }
 
   inline double deltaEta(const FourMomentum& v, const ParticleBase& p) {
     return deltaEta(v, p.momentum());
   }
 
   inline double deltaEta(const FourVector& v, const ParticleBase& p) {
     return deltaEta(v, p.momentum());
   }
 
   inline double deltaEta(const Vector3& v, const ParticleBase& p) {
     return deltaEta(v, p.momentum());
   }
 
   inline double deltaEta(double eta, const ParticleBase& p) {
     return deltaEta(eta, p.momentum());
   }
 
 
   inline double deltaRap(const ParticleBase& p1, const ParticleBase& p2) {
     return deltaRap(p1.momentum(), p2.momentum());
   }
 
   inline double deltaRap(const ParticleBase& p, const FourMomentum& v) {
     return deltaRap(p.momentum(), v);
   }
 
   inline double deltaRap(const ParticleBase& p, double y) {
     return deltaRap(p.momentum(), y);
   }
 
   inline double deltaRap(const FourMomentum& v, const ParticleBase& p) {
     return deltaRap(v, p.momentum());
   }
 
   inline double deltaRap(double y, const ParticleBase& p) {
     return deltaRap(y, p.momentum());
   }
 
   //@}
 
 
 }
 
 #endif
diff --git a/include/Rivet/ProjectionApplier.hh b/include/Rivet/ProjectionApplier.hh
--- a/include/Rivet/ProjectionApplier.hh
+++ b/include/Rivet/ProjectionApplier.hh
@@ -1,192 +1,200 @@
 // -*- C++ -*-
 #ifndef RIVET_ProjectionApplier_HH
 #define RIVET_ProjectionApplier_HH
 
 #include "Rivet/Config/RivetCommon.hh"
 #include "Rivet/Projection.fhh"
 #include "Rivet/ProjectionHandler.hh"
 #include "Rivet/Tools/Logging.hh"
 
 namespace Rivet {
 
 
   // Forward declarations
   class Event;
 
 
   /// @brief Common base class for Projection and Analysis, used for internal polymorphism
   ///
   /// Empty interface used for storing Projection and Analysis pointers in the
   /// same container (used by the ProjectionHandler)
   class ProjectionApplier {
   public:
 
     // The proj handler needs access to reset the _allowProjReg flag before calling a.init()
     // friend class ProjectionHandler;
 
     /// Constructor
     ProjectionApplier();
 
     // Virtual destructor: ensure that inheritance is possible.
     virtual ~ProjectionApplier();
 
 
     /// @name Metadata functions
     //@{
     /// Get the name of this Projection or Analysis class
     virtual std::string name() const = 0;
     //@}
 
     /// @name Projection "getting" functions
     //@{
     /// Get the contained projections, including recursion.
     std::set<ConstProjectionPtr> getProjections() const {
       return getProjHandler().getChildProjections(*this, ProjectionHandler::DEEP);
     }
 
     /// Get the named projection, specifying return type via a template argument.
     /// @todo Add SFINAE to require that PROJ inherit from Projection
     template <typename PROJ>
     const PROJ& getProjection(const std::string& name) const {
       const Projection& p = getProjHandler().getProjection(*this, name);
       return pcast<PROJ>(p);
     }
     /// Get the named projection, specifying return type via a template argument (user-facing alias).
     /// @todo Add SFINAE to require that PROJ inherit from Projection
     template <typename PROJ>
     const PROJ& get(const std::string& name) const { return getProjection<PROJ>(name); }
 
     /// Get the named projection (non-templated, so returns as a reference to a
     /// Projection base class).
     const Projection& getProjection(const std::string& name) const {
       return getProjHandler().getProjection(*this, name);
     }
 
     //@}
 
 
     /// @name Projection applying functions
     //@{
 
     /// Apply the supplied projection on event @a evt.
     template <typename PROJ>
     /// @todo Add SFINAE to require that PROJ inherit from Projection
     const PROJ& applyProjection(const Event& evt, const Projection& proj) const {
       return pcast<PROJ>(_applyProjection(evt, proj));
     }
     /// Apply the supplied projection on event @a evt (user-facing alias).
     /// @todo Add SFINAE to require that PROJ inherit from Projection
     template <typename PROJ>
     const PROJ& apply(const Event& evt, const Projection& proj) const { return applyProjection<PROJ>(evt, proj); }
 
 
     /// Apply the supplied projection on event @a evt.
     template <typename PROJ>
     /// @todo Add SFINAE to require that PROJ inherit from Projection
     const PROJ& applyProjection(const Event& evt, const PROJ& proj) const {
       return pcast<PROJ>(_applyProjection(evt, proj));
     }
     /// Apply the supplied projection on event @a evt (user-facing alias).
     /// @todo Add SFINAE to require that PROJ inherit from Projection
     template <typename PROJ>
     const PROJ& apply(const Event& evt, const PROJ& proj) const { return applyProjection<PROJ>(evt, proj); }
 
 
     /// Apply the named projection on event @a evt.
     /// @todo Add SFINAE to require that PROJ inherit from Projection
     template <typename PROJ>
     const PROJ& applyProjection(const Event& evt, const std::string& name) const {
       return pcast<PROJ>(_applyProjection(evt, name));
     }
     /// Apply the supplied projection on event @a evt (user-facing alias).
     /// @todo Add SFINAE to require that PROJ inherit from Projection
     template <typename PROJ>
     const PROJ& apply(const Event& evt, const std::string& name) const { return applyProjection<PROJ>(evt, name); }
+    /// Apply the supplied projection on event @a evt (convenience arg-reordering alias).
+    /// @todo Add SFINAE to require that PROJ inherit from Projection
+    template <typename PROJ>
+    const PROJ& apply(const std::string& name, const Event& evt) const { return applyProjection<PROJ>(evt, name); }
 
     //@}
 
 
     /// Mark object as owned by the _projhandler
     /// @todo Huh? What's this for?
     void markAsOwned() const { _owned = true; }
 
 
   protected:
 
     Log& getLog() const {
       return Log::getLog("Rivet.ProjectionHandler");
     }
 
 
     /// Get a reference to the ProjectionHandler for this thread.
     ProjectionHandler& getProjHandler() const {
       return _projhandler;
     }
 
 
     /// @name Projection registration functions
     //@{
 
     /// @brief Register a contained projection
     ///
     /// The type of the argument is used to instantiate a new projection
     /// internally: this new object is applied to events rather than the
     /// argument object. Hence you are advised to only use locally-scoped
     /// Projection objects in your Projection and Analysis constructors, and to
     /// avoid polymorphism (e.g. handling @c ConcreteProjection via a pointer or
     /// reference to type @c Projection) since this will screw up the internal
     /// type management.
     ///
     /// @todo Add SFINAE to require that PROJ inherit from Projection
     template <typename PROJ>
     const PROJ& declareProjection(const PROJ& proj, const std::string& name) {
       const Projection& reg = _declareProjection(proj, name);
       const PROJ& rtn = dynamic_cast<const PROJ&>(reg);
       return rtn;
     }
 
     /// @brief Register a contained projection (user-facing version)
     /// @todo Add SFINAE to require that PROJ inherit from Projection
     template <typename PROJ>
     const PROJ& declare(const PROJ& proj, const std::string& name) { return declareProjection(proj, name); }
+    /// @brief Register a contained projection (user-facing, arg-reordered version)
+    /// @todo Add SFINAE to require that PROJ inherit from Projection
+    template <typename PROJ>
+    const PROJ& declare(const std::string& name, const PROJ& proj) { return declareProjection(proj, name); }
 
     /// @brief Register a contained projection (user-facing version)
     /// @deprecated Use declareProjection() or declare()
     /// @todo Add SFINAE to require that PROJ inherit from Projection
     template <typename PROJ>
     const PROJ& addProjection(const PROJ& proj, const std::string& name) { return declareProjection(proj, name); }
 
 
     /// Untemplated function to do the work...
     const Projection& _declareProjection(const Projection& proj, const std::string& name);
 
     //@}
 
 
     /// Non-templated version of string-based applyProjection, to work around
     /// header dependency issue.
     const Projection& _applyProjection(const Event& evt, const std::string& name) const;
 
     /// Non-templated version of proj-based applyProjection, to work around
     /// header dependency issue.
     const Projection& _applyProjection(const Event& evt, const Projection& proj) const;
 
 
     /// Flag to forbid projection registration in analyses until the init phase
     bool _allowProjReg;
 
 
   private:
 
     /// Mark object as owned by the _projhandler
     mutable bool _owned;
 
     /// Pointer to projection handler.
     ProjectionHandler& _projhandler;
 
   };
 
 
 }
 
 #endif
diff --git a/include/Rivet/Projections/MissingMomentum.hh b/include/Rivet/Projections/MissingMomentum.hh
--- a/include/Rivet/Projections/MissingMomentum.hh
+++ b/include/Rivet/Projections/MissingMomentum.hh
@@ -1,109 +1,142 @@
 // -*- C++ -*-
 #ifndef RIVET_MissingMomentum_HH
 #define RIVET_MissingMomentum_HH
 
 #include "Rivet/Config/RivetCommon.hh"
 #include "Rivet/Projection.hh"
 #include "Rivet/Projections/VisibleFinalState.hh"
 #include "Rivet/Particle.hh"
 #include "Rivet/Event.hh"
 
 namespace Rivet {
 
 
   /// @brief Calculate missing \f$ E \f$, \f$ E_\perp \f$ etc.
   ///
   /// Project out the total visible energy vector, allowing missing
   /// \f$ E \f$, \f$ E_\perp \f$ etc. to be calculated. Final state
   /// visibility restrictions are automatic.
   class MissingMomentum : public Projection {
   public:
 
     /// Default constructor with optional cut.
     MissingMomentum(const Cut& c=Cuts::open()) {
       setName("MissingMomentum");
       FinalState fs(c);
       addProjection(fs, "FS");
       addProjection(VisibleFinalState(fs), "VisibleFS");
     }
 
 
     /// Constructor.
     MissingMomentum(const FinalState& fs) {
       setName("MissingMomentum");
       addProjection(fs, "FS");
       addProjection(VisibleFinalState(fs), "VisibleFS");
     }
 
 
     /// Clone on the heap.
     DEFAULT_RIVET_PROJ_CLONE(MissingMomentum);
 
 
+    /// @name Visible/missing four-momentum functions
+    //@{
+
     /// The vector-summed visible four-momentum in the event.
     ///
     /// @note Reverse this vector with .reverse() to get the missing momentum vector.
     ///
     /// @note The optional @a mass argument is used to set a mass on the 4-vector. By
     ///   default it is zero (since missing momentum is really a 3-momentum quantity:
     ///   adding the E components of visible momenta just gives a huge mass)
     const FourMomentum visibleMomentum(double mass=0*GeV) const;
     /// Alias for visibleMomentum
     const FourMomentum visibleMom(double mass=0*GeV) const { return visibleMomentum(mass); }
 
     /// The missing four-momentum in the event, required to balance the final state.
     ///
     /// @note The optional @a mass argument is used to set a mass on the 4-vector. By
     ///   default it is zero (since missing momentum is really a 3-momentum quantity:
     ///   adding the E components of visible momenta just gives a huge mass)
     const FourMomentum missingMomentum(double mass=0*GeV) const { return visibleMomentum(mass).reverse(); }
     /// Alias for missingMomentum
     const FourMomentum missingMom(double mass=0*GeV) const { return missingMomentum(mass); }
 
+    //@}
+
+
+    /// @name Transverse momentum functions
+    /// @note This may be what you want, even if the paper calls it "missing Et"!
+    //@{
+
+    /// The vector-summed visible transverse momentum in the event, as a 3-vector with z=0
+    /// @note Reverse this vector with operator- to get the missing pT vector.
+    const Vector3& vectorPt() const { return _vpt; }
+
+    /// The vector-summed missing transverse momentum in the event.
+    double missingPt() const { return vectorPt().mod(); }
+    // /// Alias for missingPt
+    // double mpt() const { return missingPt(); }
+
+    /// The scalar-summed visible transverse momentum in the event.
+    double scalarPt() const { return _spt; }
+    // /// Alias for scalarPt
+    // double spt() const { return scalarPt(); }
+
+    //@}
+
+
+    /// @name Transverse energy functions
+    /// @warning Despite the common names "MET" and "SET", what's often meant is the pT functions above!
+    //@{
+
     /// The vector-summed visible transverse energy in the event, as a 3-vector with z=0
     /// @note Reverse this vector with operator- to get the missing ET vector.
     const Vector3& vectorEt() const { return _vet; }
 
     /// The vector-summed missing transverse energy in the event.
     double missingEt() const { return vectorEt().mod(); }
     /// Alias for missingEt
     double met() const { return missingEt(); }
 
     /// The scalar-summed visible transverse energy in the event.
     double scalarEt() const { return _set; }
     /// Alias for scalarEt
     double set() const { return scalarEt(); }
 
+    //@}
+
+
+  public:
+
+    /// Clear the projection results.
+    void clear();
+
 
   protected:
 
     /// Apply the projection to the event.
     void project(const Event& e);
 
     /// Compare projections.
     int compare(const Projection& p) const;
 
 
-  public:
-
-    /// Clear the projection results.
-    void clear();
-
-
   private:
 
     /// The total visible momentum
     FourMomentum _momentum;
 
     /// Scalar transverse energy
-    double _set;
+    double _set, _spt;
 
     /// Vector transverse energy
-    Vector3 _vet;
+    Vector3 _vet, _vpt;
 
   };
 
 
 }
 
 #endif
diff --git a/include/Rivet/Tools/Cutflow.hh b/include/Rivet/Tools/Cutflow.hh
--- a/include/Rivet/Tools/Cutflow.hh
+++ b/include/Rivet/Tools/Cutflow.hh
@@ -1,174 +1,210 @@
 #ifndef RIVET_Cutflow_HH
 #define RIVET_Cutflow_HH
 
 #include "Rivet/Tools/Utils.hh"
 
 namespace Rivet {
 
 
   /// A tracker of numbers & fractions of events passing sequential cuts
   struct Cutflow {
 
     /// @brief Default constructor
     ///
     /// Does nothing! Just to allow storage in STL containers and use as a member variable without using the init list
     Cutflow() {}
 
     /// Proper constructor
     Cutflow(const string& cfname, const vector<string>& cutnames)
       : name(cfname), ncuts(cutnames.size()), cuts(cutnames), counts(ncuts+1, 0)
     {  }
 
     /// @brief Fill the pre-cut counter
-    void fillinit() {
-      counts[0] += 1;
+    void fillinit(double weight=1.) {
+      counts[0] += weight;
     }
 
     /// @brief Fill the @a {icut}'th post-cut counter
     ///
     /// @note Returns the cut result to allow 'side-effect' cut-flow filling in an if-statement
-    bool fill(size_t icut, bool cutresult) {
-      if (cutresult) counts[icut+1] += 1;
+    bool fill(size_t icut, bool cutresult, double weight=1.) {
+      if (cutresult) counts[icut+1] += weight;
       return cutresult;
     }
 
     /// @brief Fill all cut-state counters from an Ncut-element results vector
     ///
     /// This function is to be used to fill all of an event's pre- and post-cut
     /// state counters at once, including the incoming event counter. It must not be
     /// mixed with calls to the @c fill(size_t, bool) and @c fillinit() methods,
     /// or double-counting will occur.
     ///
     /// @note Returns the overall cut result to allow 'side-effect' cut-flow filling in an if-statement
-    bool fill(const vector<bool>& cutresults) {
+    bool fill(const vector<bool>& cutresults, double weight=1.) {
       if (cutresults.size() != ncuts)
         throw RangeError("Number of filled cut results needs to match the Cutflow construction");
       counts[0] += 1;
       for (size_t i = 0; i < ncuts; ++i) {
-        if (cutresults[i]) counts[i+1] += 1; else break;
+        if (cutresults[i]) counts[i+1] += weight; else break;
       }
       return all(cutresults);
     }
 
+    /// @todo Add a fillnext(), keeping track of current ifill
+
+    /// @todo Add a fillhead() (or vector fillnext()?)
+
     /// @brief Fill the N trailing post-cut counters, when supplied with an N-element results vector
     ///
     /// The @a cutresults vector represents the boolean results of the last N cuts. This function
     /// allows mixing of cut-flow filling with higher-level analyze() function escapes such as
     /// the vetoEvent directive. The initial state (state 0) is not incremented.
     ///
     /// @note Returns the overall cut result to allow 'side-effect' cut-flow filling in an if-statement
-    bool filltail(const vector<bool>& cutresults) {
+    bool filltail(const vector<bool>& cutresults, double weight=1.) {
       if (cutresults.size() > ncuts)
         throw RangeError("Number of filled cut results needs to match the Cutflow construction");
       const size_t offset = counts.size() - cutresults.size();
       for (size_t i = 0; i < cutresults.size(); ++i) {
-        if (cutresults[i]) counts[offset+i] += 1; else break;
+        if (cutresults[i]) counts[offset+i] += weight; else break;
       }
       return all(cutresults);
     }
 
+    /// Scale the cutflow weights by the given factor
+    void scale(double factor) {
+      for (double& x : counts) x *= factor;
+    }
+
     /// Create a string representation
     string str() const {
       stringstream ss;
+      ss << fixed << setprecision(1) << counts[0];
+      const size_t count0len = ss.str().length();
+      ss.str("");
       ss << name << " cut-flow:";
-      size_t maxlen = 0;
-      for (const string& t : cuts) maxlen = max(t.length(), maxlen);
+      size_t maxnamelen = 0;
+      for (const string& t : cuts)
+        maxnamelen = max(t.length(), maxnamelen);
       for (size_t i = 0; i <= ncuts; ++i) {
-        const int pcttot = (counts[0] == 0) ? -1 : int(100*counts[i]/double(counts[0]));
-        const int pctinc = (i == 0 || counts[i-1] == 0) ? -1 : int(100*counts[i]/double(counts[i-1]));
-        ss << "\n" << setw(maxlen+5) << left
-           << (i == 0 ? "" : "Pass "+cuts[i-1]) << "   " << right
-           << setw(toString(counts[0]).length()) << toString(counts[i]) << "    "
-           << setw(4) << (pcttot < 0 ? "- " : toString(pcttot)+"%") << "    "
-           << setw(4) << (pctinc < 0 ? "- " : toString(pctinc)+"%");
+        const int pcttot = (counts[0] == 0) ? -1 : round(100*counts[i]/double(counts[0]));
+        const int pctinc = (i == 0 || counts[i-1] == 0) ? -1 : round(100*counts[i]/double(counts[i-1]));
+        stringstream ss2;
+        ss2 << fixed << setprecision(1) << counts[i];
+        const string countstr = ss2.str(); ss2.str("");
+        ss2 << fixed << setprecision(2) << pcttot << "%";
+        const string pcttotstr = ss2.str(); ss2.str("");
+        ss2 << fixed << setprecision(2) << pctinc << "%";
+        const string pctincstr = ss2.str();
+        ss << "\n"
+           << setw(maxnamelen+5) << left << (i == 0 ? "" : "Pass "+cuts[i-1]) << "   "
+           << setw(count0len) << right << countstr << "    "
+           << setw(4) << right << (pcttot < 0 ? "- " : pcttotstr) << "    "
+           << setw(4) << right << (pctinc < 0 ? "- " : pctincstr);
       }
       return ss.str();
     }
 
     /// Print string representation to a stream
     void print(ostream& os) const {
       os << str() << flush;
     }
 
     string name;
     size_t ncuts;
     vector<string> cuts;
-    vector<int> counts;
+    vector<double> counts;
 
   };
 
+
   /// Print a Cutflow to a stream
-  ostream& operator << (ostream& os, const Cutflow& cf) {
+  inline ostream& operator << (ostream& os, const Cutflow& cf) {
     return os << cf.str();
   }
 
 
 
   /// A container for several Cutflow objects, with some convenient batch access
   struct Cutflows {
 
     /// Do-nothing default constructor
     Cutflows() {  }
 
     /// Populating constructor
     Cutflows(const vector<Cutflow>& cutflows) : cfs(cutflows) {  }
 
     /// Append a provided Cutflow to the list
     void addCutflow(const Cutflow& cf) {
       cfs.push_back(cf);
     }
 
     /// Append a newly constructed Cutflow to the list
     void addCutflow(const string& cfname, const vector<string>& cutnames) {
       cfs.push_back(Cutflow(cfname, cutnames));
     }
 
     /// Access the @a i'th Cutflow
     Cutflow& operator [] (size_t i) { return cfs[i]; }
     /// Access the @a i'th Cutflow (const)
     const Cutflow& operator [] (size_t i) const { return cfs[i]; }
 
     /// Access the Cutflow whose name is @a name
     Cutflow& operator [] (const string& name) {
       for (Cutflow& cf : cfs)
         if (cf.name == name) return cf;
       throw UserError("Requested cut-flow name '" + name + "' does not exist");
     }
     /// Access the @a i'th Cutflow (const)
     const Cutflow& operator [] (const string& name) const {
       for (const Cutflow& cf : cfs)
         if (cf.name == name) return cf;
       throw UserError("Requested cut-flow name '" + name + "' does not exist");
     }
 
-    /// Fill the pre-cuts state counter for all contained Cutflows
-    void fillinit() {
-      for (Cutflow& cf : cfs) cf.fillinit();
+    /// Fill the pre-cuts state counter for all contained {Cutflow}s
+    void fillinit(double weight=1.) {
+      for (Cutflow& cf : cfs) cf.fillinit(weight);
+    }
+
+    /// @brief Fill the @a {icut}'th post-cut counter with the same result for all {Cutflow}s
+    bool fill(size_t icut, bool cutresult, double weight=1.) {
+      for (Cutflow& cf : cfs) cf.fill(icut, cutresult, weight);
+      return cutresult;
+    }
+
+    /// @todo Add a fillnext(), keeping track of current ifill
+
+    /// @todo Add a fillhead() (or vector fillnext()?)
+
+    /// Scale the contained {Cutflow}s by the given factor
+    void scale(double factor) {
+      for (Cutflow& cf : cfs) cf.scale(factor);
     }
 
     /// Create a string representation
     string str() const {
       stringstream ss;
       for (const Cutflow& cf : cfs)
         ss << cf << "\n\n";
       return ss.str();
     }
 
     /// Print string representation to a stream
     void print(ostream& os) const {
       os << str() << flush;
     }
 
     vector<Cutflow> cfs;
 
   };
 
   /// Print a Cutflows to a stream
-  ostream& operator << (ostream& os, const Cutflows& cfs) {
+  inline ostream& operator << (ostream& os, const Cutflows& cfs) {
     return os << cfs.str();
   }
 
 
 }
 
 #endif
diff --git a/include/Rivet/Tools/ParticleBaseUtils.hh b/include/Rivet/Tools/ParticleBaseUtils.hh
--- a/include/Rivet/Tools/ParticleBaseUtils.hh
+++ b/include/Rivet/Tools/ParticleBaseUtils.hh
@@ -1,341 +1,347 @@
 #ifndef RIVET_PARTICLEBASEUTILS_HH
 #define RIVET_PARTICLEBASEUTILS_HH
 
 #include "Rivet/ParticleBase.hh"
 
 namespace Rivet {
 
 
   /// @name ParticleBase classifier -> bool functors
   /// @todo Move to FourMomentum functions
   ///
   /// To be passed to any() or all() e.g. any(jets, DeltaRLess(electron, 0.4))
   //@{
 
   /// Base type for Particle -> bool functors
   struct BoolParticleBaseFunctor {
     virtual bool operator()(const ParticleBase& p) const = 0;
   };
 
   /// Transverse momentum greater-than functor
   struct PtGtr : public BoolParticleBaseFunctor {
     PtGtr(double pt) : ptcut(pt) { }
     bool operator()(const ParticleBase& p) const { return p.pT() > ptcut; }
     double ptcut;
   };
   using pTGtr = PtGtr;
   using ptGtr = PtGtr;
 
   /// Transverse momentum less-than functor
   struct PtLess : public BoolParticleBaseFunctor {
     PtLess(double pt) : ptcut(pt) { }
     bool operator()(const ParticleBase& p) const { return p.pT() < ptcut; }
     double ptcut;
   };
   using pTLess = PtLess;
   using ptLess = PtLess;
 
 
   /// Pseudorapidity greater-than functor
   struct EtaGtr : public BoolParticleBaseFunctor {
     EtaGtr(double eta) : etacut(eta) { }
     bool operator()(const ParticleBase& p) const { return p.eta() > etacut; }
     double etacut;
   };
   using etaGtr = EtaGtr;
 
   /// Pseudorapidity momentum less-than functor
   struct EtaLess : public BoolParticleBaseFunctor {
     EtaLess(double eta) : etacut(eta) { }
     bool operator()(const ParticleBase& p) const { return p.eta() < etacut; }
     double etacut;
   };
   using etaLess = EtaLess;
 
   /// Abs pseudorapidity greater-than functor
   struct AbsEtaGtr : public BoolParticleBaseFunctor {
     AbsEtaGtr(double abseta) : absetacut(abseta) { }
     bool operator()(const ParticleBase& p) const { return p.abseta() > absetacut; }
     double absetacut;
   };
   using absEtaGtr = AbsEtaGtr;
   using absetaGtr = AbsEtaGtr;
 
   /// Abs pseudorapidity momentum less-than functor
   struct AbsEtaLess : public BoolParticleBaseFunctor {
     AbsEtaLess(double abseta) : absetacut(abseta) { }
     bool operator()(const ParticleBase& p) const { return p.abseta() < absetacut; }
     double absetacut;
   };
   using absEtaLess = AbsEtaLess;
   using absetaLess = AbsEtaLess;
 
 
   /// Rapidity greater-than functor
   struct RapGtr : public BoolParticleBaseFunctor {
     RapGtr(double rap) : rapcut(rap) { }
     bool operator()(const ParticleBase& p) const { return p.rap() > rapcut; }
     double rapcut;
   };
   using rapGtr = RapGtr;
 
   /// Rapidity momentum less-than functor
   struct RapLess : public BoolParticleBaseFunctor {
     RapLess(double rap) : rapcut(rap) { }
     bool operator()(const ParticleBase& p) const { return p.rap() < rapcut; }
     double rapcut;
   };
   using rapLess = RapLess;
 
   /// Abs rapidity greater-than functor
   struct AbsRapGtr : public BoolParticleBaseFunctor {
     AbsRapGtr(double absrap) : absrapcut(absrap) { }
     bool operator()(const ParticleBase& p) const { return p.absrap() > absrapcut; }
     double absrapcut;
   };
   using absRapGtr = AbsRapGtr;
   using absrapGtr = AbsRapGtr;
 
   /// Abs rapidity momentum less-than functor
   struct AbsRapLess : public BoolParticleBaseFunctor {
     AbsRapLess(double absrap) : absrapcut(absrap) { }
     bool operator()(const ParticleBase& p) const { return p.absrap() < absrapcut; }
     double absrapcut;
   };
   using absRapLess = AbsRapLess;
   using absrapLess = AbsRapLess;
 
 
   /// @f$ \Delta R @f$ (with respect to another 4-momentum, @a vec) greater-than functor
   struct DeltaRGtr : public BoolParticleBaseFunctor {
     DeltaRGtr(const ParticleBase& vec, double dr, RapScheme scheme=PSEUDORAPIDITY)
       : refvec(vec.mom()), drcut(dr), rapscheme(scheme) { }
     DeltaRGtr(const FourMomentum& vec, double dr, RapScheme scheme=PSEUDORAPIDITY)
       : refvec(vec), drcut(dr), rapscheme(scheme) { }
     DeltaRGtr(const Vector3& vec, double dr)
       : drcut(dr), rapscheme(PSEUDORAPIDITY) { refvec.setPx(vec.x()); refvec.setPy(vec.y()); refvec.setPz(vec.z()); }
     bool operator()(const ParticleBase& p) const { return deltaR(p, refvec, rapscheme) > drcut; }
     FourMomentum refvec;
     double drcut;
     RapScheme rapscheme;
   };
   using deltaRGtr = DeltaRGtr;
 
   /// @f$ \Delta R @f$ (with respect to another 4-momentum, @a vec) less-than functor
   struct DeltaRLess : public BoolParticleBaseFunctor {
     DeltaRLess(const ParticleBase& vec, double dr, RapScheme scheme=PSEUDORAPIDITY)
       : refvec(vec.mom()), drcut(dr), rapscheme(scheme) { }
     DeltaRLess(const FourMomentum& vec, double dr, RapScheme scheme=PSEUDORAPIDITY)
       : refvec(vec), drcut(dr), rapscheme(scheme) { }
     DeltaRLess(const Vector3& vec, double dr)
       : drcut(dr), rapscheme(PSEUDORAPIDITY) { refvec.setPx(vec.x()); refvec.setPy(vec.y()); refvec.setPz(vec.z()); }
     bool operator()(const ParticleBase& p) const { return deltaR(p, refvec, rapscheme) < drcut; }
     FourMomentum refvec;
     double drcut;
     RapScheme rapscheme;
   };
   using deltaRLess = DeltaRLess;
 
 
   /// @f$ |\Delta \phi| @f$ (with respect to another momentum, @a vec) greater-than functor
   struct DeltaPhiGtr : public BoolParticleBaseFunctor {
     DeltaPhiGtr(const ParticleBase& vec, double dphi)
       : refvec(vec.p3()), dphicut(dphi) { }
     DeltaPhiGtr(const FourMomentum& vec, double dphi)
       : refvec(vec.p3()), dphicut(dphi) { }
     DeltaPhiGtr(const Vector3& vec, double dphi)
       : refvec(vec), dphicut(dphi) { }
     bool operator()(const ParticleBase& p) const { return deltaPhi(p, refvec) > dphicut; }
     Vector3 refvec;
     double dphicut;
   };
   using deltaPhiGtr = DeltaPhiGtr;
 
   /// @f$ |\Delta \phi| @f$ (with respect to another momentum, @a vec) less-than functor
   struct DeltaPhiLess : public BoolParticleBaseFunctor {
     DeltaPhiLess(const ParticleBase& vec, double dphi)
       : refvec(vec.p3()), dphicut(dphi) { }
     DeltaPhiLess(const FourMomentum& vec, double dphi)
       : refvec(vec.p3()), dphicut(dphi) { }
     DeltaPhiLess(const Vector3& vec, double dphi)
       : refvec(vec), dphicut(dphi) { }
     bool operator()(const ParticleBase& p) const { return deltaPhi(p, refvec) < dphicut; }
     Vector3 refvec;
     double dphicut;
   };
   using deltaPhiLess = DeltaPhiLess;
 
 
   /// @f$ |\Delta \eta| @f$ (with respect to another momentum, @a vec) greater-than functor
   struct DeltaEtaGtr : public BoolParticleBaseFunctor {
     DeltaEtaGtr(const ParticleBase& vec, double deta)
       : refvec(vec.p3()), detacut(deta) { }
     DeltaEtaGtr(const FourMomentum& vec, double deta)
       : refvec(vec.p3()), detacut(deta) { }
     DeltaEtaGtr(const Vector3& vec, double deta)
       : refvec(vec), detacut(deta) { }
     bool operator()(const ParticleBase& p) const { return std::abs(deltaEta(p, refvec)) > detacut; }
     Vector3 refvec;
     double detacut;
   };
   using deltaEtaGtr = DeltaEtaGtr;
 
   /// @f$ |\Delta \eta| @f$ (with respect to another momentum, @a vec) less-than functor
   struct DeltaEtaLess : public BoolParticleBaseFunctor {
     DeltaEtaLess(const ParticleBase& vec, double deta)
       : refvec(vec.p3()), detacut(deta) { }
     DeltaEtaLess(const FourMomentum& vec, double deta)
       : refvec(vec.p3()), detacut(deta) { }
     DeltaEtaLess(const Vector3& vec, double deta)
       : refvec(vec), detacut(deta) { }
     bool operator()(const ParticleBase& p) const { return std::abs(deltaEta(p, refvec)) < detacut; }
     Vector3 refvec;
     double detacut;
   };
   using deltaEtaLess = DeltaEtaLess;
 
 
   /// @f$ |\Delta y| @f$ (with respect to another momentum, @a vec) greater-than functor
   struct DeltaRapGtr : public BoolParticleBaseFunctor {
     DeltaRapGtr(const ParticleBase& vec, double drap)
       : refvec(vec.mom()), drapcut(drap) { }
     DeltaRapGtr(const FourMomentum& vec, double drap)
       : refvec(vec), drapcut(drap) { }
     bool operator()(const ParticleBase& p) const { return std::abs(deltaRap(p, refvec)) > drapcut; }
     FourMomentum refvec;
     double drapcut;
   };
   using deltaRapGtr = DeltaRapGtr;
 
   /// @f$ |\Delta y| @f$ (with respect to another momentum, @a vec) less-than functor
   struct DeltaRapLess : public BoolParticleBaseFunctor {
     DeltaRapLess(const ParticleBase& vec, double drap)
       : refvec(vec.mom()), drapcut(drap) { }
     DeltaRapLess(const FourMomentum& vec, double drap)
       : refvec(vec), drapcut(drap) { }
     bool operator()(const ParticleBase& p) const { return std::abs(deltaRap(p, refvec)) < drapcut; }
     FourMomentum refvec;
     double drapcut;
   };
   using deltaRapLess = DeltaRapLess;
 
   //@}
 
 
   /// @name ParticleBase comparison -> double functors
   /// @todo Move to FourMomentum functions
   ///
   /// To be passed to transform()any(jets, DeltaRLess(electron, 0.4))
   //@{
 
   /// Base type for Particle -> double functors
   struct DoubleParticleBaseFunctor {
     virtual double operator()(const ParticleBase& p) const = 0;
   };
 
   /// Calculator of @f$ \Delta R @f$ with respect to a given momentum
   struct DeltaRWRT : public DoubleParticleBaseFunctor {
     DeltaRWRT(const ParticleBase& pb, RapScheme scheme=PSEUDORAPIDITY) : p(pb.mom()) {}
     DeltaRWRT(const FourMomentum& p4, RapScheme scheme=PSEUDORAPIDITY) : p(p4) {}
     DeltaRWRT(const Vector3& p3) : p(p3.mod(), p3.x(), p3.y(), p3.z()), rapscheme(PSEUDORAPIDITY) {}
     double operator()(const ParticleBase& pb) const { return deltaR(p, pb, rapscheme); }
     double operator()(const FourMomentum& p4) const { return deltaR(p, p4, rapscheme); }
     double operator()(const Vector3& p3) const { return deltaR(p, p3); }
     const FourMomentum p;
     RapScheme rapscheme;
   };
   using deltaRWRT = DeltaRWRT;
 
   /// Calculator of @f$ \Delta \phi @f$ with respect to a given momentum
   struct DeltaPhiWRT : public DoubleParticleBaseFunctor {
     DeltaPhiWRT(const ParticleBase& pb) : p(pb.mom().vector3()) {}
     DeltaPhiWRT(const FourMomentum& p4) : p(p4.vector3()) {}
     DeltaPhiWRT(const Vector3& p3) : p(p3) {}
     double operator()(const ParticleBase& pb) const { return deltaPhi(p, pb); }
     double operator()(const FourMomentum& p4) const { return deltaPhi(p, p4); }
     double operator()(const Vector3& p3) const { return deltaPhi(p, p3); }
     const Vector3 p;
   };
   using deltaPhiWRT = DeltaPhiWRT;
 
   /// Calculator of @f$ \Delta \eta @f$ with respect to a given momentum
   struct DeltaEtaWRT : public DoubleParticleBaseFunctor {
     DeltaEtaWRT(const ParticleBase& pb) : p(pb.mom().vector3()) {}
     DeltaEtaWRT(const FourMomentum& p4) : p(p4.vector3()) {}
     DeltaEtaWRT(const Vector3& p3) : p(p3) {}
     double operator()(const ParticleBase& pb) const { return deltaEta(p, pb); }
     double operator()(const FourMomentum& p4) const { return deltaEta(p, p4); }
     double operator()(const Vector3& p3) const { return deltaEta(p, p3); }
     const Vector3 p;
   };
   using deltaEtaWRT = DeltaEtaWRT;
 
   /// Calculator of @f$ |\Delta \eta| @f$ with respect to a given momentum
   struct AbsDeltaEtaWRT : public DoubleParticleBaseFunctor {
     AbsDeltaEtaWRT(const ParticleBase& pb) : p(pb.mom().vector3()) {}
     AbsDeltaEtaWRT(const FourMomentum& p4) : p(p4.vector3()) {}
     AbsDeltaEtaWRT(const Vector3& p3) : p(p3) {}
     double operator()(const ParticleBase& pb) const { return fabs(deltaEta(p, pb)); }
     double operator()(const FourMomentum& p4) const { return fabs(deltaEta(p, p4)); }
     double operator()(const Vector3& p3) const { return fabs(deltaEta(p, p3)); }
     const Vector3 p;
   };
   using absDeltaEtaWRT = AbsDeltaEtaWRT;
 
   /// Calculator of @f$ \Delta y @f$ with respect to a given momentum
   struct DeltaRapWRT : public DoubleParticleBaseFunctor {
     DeltaRapWRT(const ParticleBase& pb) : p(pb.mom()) {}
     DeltaRapWRT(const FourMomentum& p4) : p(p4) {}
     double operator()(const ParticleBase& pb) const { return deltaRap(p, pb); }
     double operator()(const FourMomentum& p4) const { return deltaRap(p, p4); }
     const FourMomentum p;
   };
   using deltaRapWRT = DeltaRapWRT;
 
   /// Calculator of @f$ |\Delta y| @f$ with respect to a given momentum
   struct AbsDeltaRapWRT : public DoubleParticleBaseFunctor {
     AbsDeltaRapWRT(const ParticleBase& pb) : p(pb.mom()) {}
     AbsDeltaRapWRT(const FourMomentum& p4) : p(p4) {}
     double operator()(const ParticleBase& pb) const { return fabs(deltaRap(p, pb)); }
     double operator()(const FourMomentum& p4) const { return fabs(deltaRap(p, p4)); }
     const FourMomentum p;
   };
   using absDeltaRapWRT = AbsDeltaRapWRT;
 
   //@}
 
 
   /// @name Non-PID particle properties, via unbound functions
   /// @todo Mostly move to functions on FourMomentum
   //@{
 
   /// Unbound function access to momentum
   inline FourMomentum mom(const ParticleBase& p) { return p.mom(); }
 
   /// Unbound function access to p3
   inline Vector3 p3(const ParticleBase& p) { return p.p3(); }
 
+  /// Unbound function access to pTvec
+  inline Vector3 pTvec(const ParticleBase& p) { return p.pTvec(); }
+
   /// Unbound function access to p
   inline double p(const ParticleBase& p) { return p.p(); }
 
   /// Unbound function access to pT
   inline double pT(const ParticleBase& p) { return p.pT(); }
 
+  /// Unbound function access to ET
+  inline double Et(const ParticleBase& p) { return p.Et(); }
+
   /// Unbound function access to eta
   inline double eta(const ParticleBase& p) { return p.eta(); }
 
   /// Unbound function access to abseta
   inline double abseta(const ParticleBase& p) { return p.abseta(); }
 
   /// Unbound function access to rapidity
   inline double rap(const ParticleBase& p) { return p.rap(); }
 
   /// Unbound function access to abs rapidity
   inline double absrap(const ParticleBase& p) { return p.absrap(); }
 
   //@}
 
 
 }
 
 #endif
diff --git a/include/Rivet/Tools/SmearingFunctions.hh b/include/Rivet/Tools/SmearingFunctions.hh
--- a/include/Rivet/Tools/SmearingFunctions.hh
+++ b/include/Rivet/Tools/SmearingFunctions.hh
@@ -1,787 +1,805 @@
 // -*- C++ -*-
 #ifndef RIVET_SmearingFunctions_HH
 #define RIVET_SmearingFunctions_HH
 
 #include "Rivet/Particle.hh"
 #include "Rivet/Jet.hh"
 #include <random>
 
 namespace Rivet {
 
 
   /// @name Random number and filtering utils
   //@{
 
   /// Return a uniformly sampled random number between 0 and 1
+  /// @todo Where is it seeded?! Default = by timestamp?!
   /// @todo Move to (math?)utils
   /// @todo Need to isolate random generators to a single thread
   inline double rand01() {
     //return rand() / (double)RAND_MAX;
     static random_device rd;
     static mt19937 gen(rd());
     return generate_canonical<double, 10>(gen);
   }
 
+  /// Return true if Particle @a p is chosen to survive a random efficiency selection
   template <typename FN>
   inline bool efffilt(const Particle& p, FN& feff) {
     return rand01() < feff(p);
   }
+  /// Return true if Jet @a j is chosen to survive a random efficiency selection
   template <typename FN>
   inline bool efffilt(const Jet& j, FN& feff) {
     return rand01() < feff(j);
   }
 
+  /// A functor to return true if Particle @a p survives a random efficiency selection
   struct ParticleEffFilter {
     template <typename FN>
     ParticleEffFilter(const FN& feff) : _feff(feff) {}
-    bool operator () (const Particle& p) { return efffilt(p, _feff); }
+    bool operator () (const Particle& p)  const { return efffilt(p, _feff); }
   private:
     const std::function<bool(const Particle&)> _feff;
   };
   using particleEffFilter = ParticleEffFilter;
 
+  /// A functor to return true if Jet @a j survives a random efficiency selection
   struct JetEffFilter {
     template <typename FN>
     JetEffFilter(const FN& feff) : _feff(feff) {}
-    bool operator () (const Jet& j) { return efffilt(j, _feff); }
+    bool operator () (const Jet& j) const { return efffilt(j, _feff); }
   private:
     const std::function<bool(const Jet&)> _feff;
   };
   using jetEffFilter = JetEffFilter;
 
   //@}
 
 
   /// @name General particle & momentum efficiency and smearing functions
   //@{
 
   /// Take a Particle and return 0
   inline double PARTICLE_FN0(const Particle& p) { return 0; }
   /// Take a Particle and return 1
   inline double PARTICLE_FN1(const Particle& p) { return 1; }
   /// Take a Particle and return it unmodified
   inline Particle PARTICLE_SMEAR_IDENTITY(const Particle& p) { return p; }
 
 
   /// Take a FourMomentum and return 0
   inline double P4_FN0(const FourMomentum& p) { return 0; }
   /// Take a FourMomentum and return 1
   inline double P4_FN1(const FourMomentum& p) { return 1; }
   /// Take a FourMomentum and return it unmodified
   inline FourMomentum P4_SMEAR_IDENTITY(const FourMomentum& p) { return p; }
 
   /// Smear a FourMomentum's energy using a Gaussian of absolute width @a resolution
   /// @todo Also make jet versions that update/smear constituents?
   inline FourMomentum P4_SMEAR_E_GAUSS(const FourMomentum& p, double resolution) {
     /// @todo Need to isolate random generators to a single thread
     static random_device rd;
     static mt19937 gen(rd());
     normal_distribution<> d(p.E(), resolution);
     const double mass = p.mass2() > 0 ? p.mass() : 0; //< numerical carefulness...
     const double smeared_E = max(d(gen), mass); //< can't let the energy go below the mass!
     return FourMomentum::mkEtaPhiME(p.eta(), p.phi(), mass, smeared_E);
   }
 
   /// Smear a FourMomentum's transverse momentum using a Gaussian of absolute width @a resolution
   inline FourMomentum P4_SMEAR_PT_GAUSS(const FourMomentum& p, double resolution) {
     /// @todo Need to isolate random generators to a single thread
     static random_device rd;
     static mt19937 gen(rd());
     normal_distribution<> d(p.pT(), resolution);
     const double smeared_pt = max(d(gen), 0.);
     const double mass = p.mass2() > 0 ? p.mass() : 0; //< numerical carefulness...
     return FourMomentum::mkEtaPhiMPt(p.eta(), p.phi(), mass, smeared_pt);
   }
 
   /// Smear a FourMomentum's mass using a Gaussian of absolute width @a resolution
   inline FourMomentum P4_SMEAR_MASS_GAUSS(const FourMomentum& p, double resolution) {
     /// @todo Need to isolate random generators to a single thread
     static random_device rd;
     static mt19937 gen(rd());
     normal_distribution<> d(p.mass(), resolution);
     const double smeared_mass = max(d(gen), 0.);
     return FourMomentum::mkEtaPhiMPt(p.eta(), p.phi(), smeared_mass, p.pT());
   }
 
 
   /// Take a Vector3 and return 0
   inline double P3_FN0(const Vector3& p) { return 0; }
   /// Take a Vector3 and return 1
   inline double P3_FN1(const Vector3& p) { return 1; }
   /// Take a Vector3 and return it unmodified
   inline Vector3 P3_SMEAR_IDENTITY(const Vector3& p) { return p; }
 
   /// Smear a Vector3's length using a Gaussian of absolute width @a resolution
   inline Vector3 P3_SMEAR_LEN_GAUSS(const Vector3& p, double resolution) {
     /// @todo Need to isolate random generators to a single thread
     static random_device rd;
     static mt19937 gen(rd());
     normal_distribution<> d(p.mod(), resolution);
     const double smeared_mod = max(d(gen), 0.); //< can't let the energy go below the mass!
     return smeared_mod * p.unit();
   }
 
   //@}
 
 
   /// @name Electron efficiency and smearing functions
   //@{
 
-  /// ATLAS Run 1 electron tracking efficiency
-  /// @todo How to use this in combination with reco eff?
-  inline double ELECTRON_TRKEFF_ATLAS_RUN1(const Particle& e) {
-    if (e.abseta() > 2.5) return 0;
-    if (e.pT() < 0.1*GeV) return 0;
-    if (e.abseta() < 1.5) {
-      if (e.pT() < 1*GeV) return 0.73;
-      if (e.pT() < 100*GeV) return 0.95;
-      return 0.99;
-    } else {
-      if (e.pT() < 1*GeV) return 0.50;
-      if (e.pT() < 100*GeV) return 0.83;
-      else return 0.90;
-    }
-  }
-
-  /// ATLAS Run 2 electron tracking efficiency
-  /// @todo Currently just a copy of Run 1: fix!
-  inline double ELECTRON_TRKEFF_ATLAS_RUN2(const Particle& e) {
-    return ELECTRON_TRKEFF_ATLAS_RUN1(e);
-  }
-
-
   /// ATLAS Run 1 electron reconstruction efficiency
   /// @todo Include reco eff (but no e/y discrimination) in forward region
   /// @todo How to use this in combination with tracking eff?
   inline double ELECTRON_EFF_ATLAS_RUN1(const Particle& e) {
     if (e.abseta() > 2.5) return 0;
     if (e.pT() < 10*GeV) return 0;
     return (e.abseta() < 1.5) ? 0.95 : 0.85;
   }
 
   /// ATLAS Run 2 electron reco efficiency
   /// @todo Currently just a copy of Run 1: fix!
   inline double ELECTRON_EFF_ATLAS_RUN2(const Particle& e) {
     return ELECTRON_EFF_ATLAS_RUN1(e);
   }
 
 
   /// @brief ATLAS Run 2 'loose' electron identification/selection efficiency
   ///
   /// Values read from Fig 3 of ATL-PHYS-PUB-2015-041
   /// @todo What about faking by jets or non-electrons?
   inline double ELECTRON_IDEFF_ATLAS_RUN2_LOOSE(const Particle& e) {
 
     // Manually symmetrised eta eff histogram
     const static vector<double> edges_eta = { 0.0,   0.1,   0.8,   1.37,  1.52,  2.01,  2.37,  2.47 };
     const static vector<double> effs_eta  = { 0.950, 0.965, 0.955, 0.885, 0.950, 0.935, 0.90 };
     // Et eff histogram (10-20 is a guess)
     const static vector<double> edges_et = { 0,   10,   20,   25,   30,   35,   40,    45,    50,   60,  80 };
     const static vector<double> effs_et  = { 0.0, 0.90, 0.91, 0.92, 0.94, 0.95, 0.955, 0.965, 0.97, 0.98 };
 
     if (e.abseta() > 2.47) return 0.0; // no ID outside the tracker
 
     const int i_eta = binIndex(e.abseta(), edges_eta);
     const int i_et = binIndex(e.Et()/GeV, edges_et, true);
     const double eff = effs_et[i_et] * effs_eta[i_eta] / 0.95; //< norm factor as approximate double differential
     return min(eff, 1.0);
   }
 
 
   /// @brief ATLAS Run 1 'medium' electron identification/selection efficiency
   inline double ELECTRON_IDEFF_ATLAS_RUN1_MEDIUM(const Particle& e) {
 
     const static vector<double> eta_edges_10 = {0.000, 0.049, 0.454, 1.107, 1.46, 1.790, 2.277, 2.500};
     const static vector<double> eta_vals_10  = {0.730, 0.757, 0.780, 0.771, 0.77, 0.777, 0.778};
 
     const static vector<double> eta_edges_15 = {0.000, 0.053, 0.456, 1.102, 1.463, 1.783, 2.263, 2.500};
     const static vector<double> eta_vals_15  = {0.780, 0.800, 0.819, 0.759, 0.749, 0.813, 0.829};
 
     const static vector<double> eta_edges_20 = {0.000, 0.065, 0.362, 0.719, 0.980, 1.289, 1.455, 1.681, 1.942, 2.239, 2.452, 2.500};
     const static vector<double> eta_vals_20  = {0.794, 0.806, 0.816, 0.806, 0.797, 0.774, 0.764, 0.788, 0.793, 0.806, 0.825};
 
     const static vector<double> eta_edges_25 = {0.000, 0.077, 0.338, 0.742, 1.004, 1.265, 1.467, 1.692, 1.940, 2.227, 2.452, 2.500};
     const static vector<double> eta_vals_25  = {0.833, 0.843, 0.853, 0.845, 0.839, 0.804, 0.790, 0.825, 0.830, 0.833, 0.839};
 
     const static vector<double> eta_edges_30 = {0.000, 0.077, 0.350, 0.707, 0.980, 1.289, 1.479, 1.681, 1.942, 2.239, 2.441, 2.500};
     const static vector<double> eta_vals_30  = {0.863, 0.872, 0.881, 0.874, 0.870, 0.824, 0.808, 0.847, 0.845, 0.840, 0.842};
 
     const static vector<double> eta_edges_35 = {0.000, 0.058, 0.344, 0.700, 1.009, 1.270, 1.458, 1.685, 1.935, 2.231, 2.468, 2.500};
     const static vector<double> eta_vals_35  = {0.878, 0.889, 0.901, 0.895, 0.893, 0.849, 0.835, 0.868, 0.863, 0.845, 0.832};
 
     const static vector<double> eta_edges_40 = {0.000, 0.047, 0.355, 0.699, 0.983, 1.280, 1.446, 1.694, 1.943, 2.227, 2.441, 2.500};
     const static vector<double> eta_vals_40  = {0.894, 0.901, 0.909, 0.905, 0.904, 0.875, 0.868, 0.889, 0.876, 0.848, 0.827};
 
     const static vector<double> eta_edges_45 = {0.000, 0.058, 0.356, 0.712, 0.997, 1.282, 1.459, 1.686, 1.935, 2.220, 2.444, 2.500};
     const static vector<double> eta_vals_45  = {0.900, 0.911, 0.923, 0.918, 0.917, 0.897, 0.891, 0.904, 0.894, 0.843, 0.796};
 
     const static vector<double> eta_edges_50 = {0.000, 0.059, 0.355, 0.711, 0.983, 1.280, 1.469, 1.682, 1.919, 2.227, 2.441, 2.500};
     const static vector<double> eta_vals_50  = {0.903, 0.913, 0.923, 0.922, 0.923, 0.903, 0.898, 0.908, 0.895, 0.831, 0.774};
 
     const static vector<double> eta_edges_60 = {0.000, 0.053, 0.351, 0.720, 1.006, 1.291, 1.469, 1.696, 1.946, 2.243, 2.455, 2.500};
     const static vector<double> eta_vals_60  = {0.903, 0.917, 0.928, 0.924, 0.927, 0.915, 0.911, 0.915, 0.899, 0.827, 0.760};
 
     const static vector<double> eta_edges_80 = {0.000, 0.053, 0.351, 0.720, 0.994, 1.292, 1.482, 1.708, 1.934, 2.220, 2.458, 2.500};
     const static vector<double> eta_vals_80  = {0.936, 0.942, 0.952, 0.956, 0.956, 0.934, 0.931, 0.944, 0.933, 0.940, 0.948};
 
     const static vector<double> et_edges = { 10, 15, 20, 25, 30, 35, 40, 45, 50, 60, 80 };
     const static vector< vector<double> > et_eta_edges = { eta_edges_10, eta_edges_15, eta_edges_20, eta_edges_25, eta_edges_30, eta_edges_35, eta_edges_40, eta_edges_45, eta_edges_50, eta_edges_60, eta_edges_80 };
     const static vector< vector<double> > et_eta_vals  = { eta_vals_10, eta_vals_15, eta_vals_20, eta_vals_25, eta_vals_30, eta_vals_35, eta_vals_40, eta_vals_45, eta_vals_50, eta_vals_60, eta_vals_80 };
 
     if (e.abseta() > 2.5 || e.Et() < 10*GeV) return 0.0;
     const int i_et = binIndex(e.Et()/GeV, et_edges, true);
     const int i_eta = binIndex(e.abseta(), et_eta_edges[i_et]);
     return et_eta_vals[i_et][i_eta];
   }
 
   /// @brief ATLAS Run 2 'medium' electron identification/selection efficiency
   /// @todo Currently just a copy of Run 1: fix!
   inline double ELECTRON_IDEFF_ATLAS_RUN2_MEDIUM(const Particle& e) {
     return ELECTRON_IDEFF_ATLAS_RUN1_MEDIUM(e);
   }
 
 
   /// @brief ATLAS Run 1 'tight' electron identification/selection efficiency
   inline double ELECTRON_IDEFF_ATLAS_RUN1_TIGHT(const Particle& e) {
 
     const static vector<double> eta_edges_10 = {0.000, 0.049, 0.459, 1.100, 1.461, 1.789, 2.270, 2.500};
     const static vector<double> eta_vals_10  = {0.581, 0.632, 0.668, 0.558, 0.548, 0.662, 0.690};
 
     const static vector<double> eta_edges_15 = {0.000, 0.053, 0.450, 1.096, 1.463, 1.783, 2.269, 2.500};
     const static vector<double> eta_vals_15 =  {0.630, 0.678, 0.714, 0.633, 0.616, 0.700, 0.733};
 
     const static vector<double> eta_edges_20 = {0.000, 0.065, 0.362, 0.719, 0.992, 1.277, 1.479, 1.692, 1.930, 2.227, 2.464, 2.500};
     const static vector<double> eta_vals_20 =  {0.653, 0.695, 0.735, 0.714, 0.688, 0.635, 0.625, 0.655, 0.680, 0.691, 0.674};
 
     const static vector<double> eta_edges_25 = {0.000, 0.077, 0.362, 0.719, 0.992, 1.300, 1.479, 1.692, 1.942, 2.227, 2.464, 2.500};
     const static vector<double> eta_vals_25 =  {0.692, 0.732, 0.768, 0.750, 0.726, 0.677, 0.667, 0.692, 0.710, 0.706, 0.679};
 
     const static vector<double> eta_edges_30 = {0.000, 0.053, 0.362, 0.719, 1.004, 1.277, 1.467, 1.681, 1.954, 2.239, 2.452, 2.500};
     const static vector<double> eta_vals_30 =  {0.724, 0.763, 0.804, 0.789, 0.762, 0.702, 0.690, 0.720, 0.731, 0.714, 0.681};
 
     const static vector<double> eta_edges_35 = {0.000, 0.044, 0.342, 0.711, 0.971, 1.280, 1.456, 1.683, 1.944, 2.218, 2.442, 2.500};
     const static vector<double> eta_vals_35 =  {0.736, 0.778, 0.824, 0.811, 0.784, 0.730, 0.718, 0.739, 0.743, 0.718, 0.678};
 
     const static vector<double> eta_edges_40 = {0.000, 0.047, 0.355, 0.699, 0.983, 1.268, 1.457, 1.671, 1.931, 2.204, 2.453, 2.500};
     const static vector<double> eta_vals_40 =  {0.741, 0.774, 0.823, 0.823, 0.802, 0.764, 0.756, 0.771, 0.771, 0.734, 0.684};
 
     const static vector<double> eta_edges_45 = {0.000, 0.056, 0.354, 0.711, 0.984, 1.280, 1.458, 1.684, 1.945, 2.207, 2.442, 2.500};
     const static vector<double> eta_vals_45 =  {0.758, 0.792, 0.841, 0.841, 0.823, 0.792, 0.786, 0.796, 0.794, 0.734, 0.663};
 
     const static vector<double> eta_edges_50 = {0.000, 0.059, 0.355, 0.699, 0.983, 1.268, 1.446, 1.682, 1.943, 2.216, 2.453, 2.500};
     const static vector<double> eta_vals_50 =  {0.771, 0.806, 0.855, 0.858, 0.843, 0.810, 0.800, 0.808, 0.802, 0.730, 0.653};
 
     const static vector<double> eta_edges_60 = {0.000, 0.050, 0.350, 0.707, 0.981, 1.278, 1.468, 1.694, 1.944, 2.242, 2.453, 2.500};
     const static vector<double> eta_vals_60 =  {0.773, 0.816, 0.866, 0.865, 0.853, 0.820, 0.812, 0.817, 0.804, 0.726, 0.645};
 
     const static vector<double> eta_edges_80 = {0.000, 0.051, 0.374, 0.720, 0.981, 1.279, 1.468, 1.707, 1.945, 2.207, 2.457, 2.500};
     const static vector<double> eta_vals_80 =  {0.819, 0.855, 0.899, 0.906, 0.900, 0.869, 0.865, 0.873, 0.869, 0.868, 0.859};
 
     const static vector<double> et_edges = { 10, 15, 20, 25, 30, 35, 40, 45, 50, 60, 80 };
     const static vector< vector<double> > et_eta_edges = { eta_edges_10, eta_edges_15, eta_edges_20, eta_edges_25, eta_edges_30, eta_edges_35, eta_edges_40, eta_edges_45, eta_edges_50, eta_edges_60, eta_edges_80 };
     const static vector< vector<double> > et_eta_vals  = { eta_vals_10, eta_vals_15, eta_vals_20, eta_vals_25, eta_vals_30, eta_vals_35, eta_vals_40, eta_vals_45, eta_vals_50, eta_vals_60, eta_vals_80 };
 
     if (e.abseta() > 2.5 || e.Et() < 10*GeV) return 0.0;
     const int i_et = binIndex(e.Et()/GeV, et_edges, true);
     const int i_eta = binIndex(e.abseta(), et_eta_edges[i_et]);
     return et_eta_vals[i_et][i_eta];
   }
 
   /// @brief ATLAS Run 2 'tight' electron identification/selection efficiency
   /// @todo Currently just a copy of Run 1: fix!
   inline double ELECTRON_IDEFF_ATLAS_RUN2_TIGHT(const Particle& e) {
     return ELECTRON_IDEFF_ATLAS_RUN1_TIGHT(e);
   }
 
 
 
   /// ATLAS Run 1 electron reco smearing
   inline Particle ELECTRON_SMEAR_ATLAS_RUN1(const Particle& e) {
     static const vector<double> edges_eta = {0., 2.5, 3.};
     static const vector<double> edges_pt = {0., 0.1, 25.};
     static const vector<double> e2s = {0.000, 0.015, 0.005,
                                        0.005, 0.005, 0.005,
                                        0.107, 0.107, 0.107};
     static const vector<double> es = {0.00, 0.00, 0.05,
                                       0.05, 0.05, 0.05,
                                       2.08, 2.08, 2.08};
     static const vector<double> cs = {0.00, 0.00, 0.25,
                                       0.25, 0.25, 0.25,
                                       0.00, 0.00, 0.00};
 
     const int i_eta = binIndex(e.abseta(), edges_eta, true);
     const int i_pt = binIndex(e.pT()/GeV, edges_pt, true);
     const int i = i_eta*edges_pt.size() + i_pt;
 
     // Calculate absolute resolution in GeV
     const double c1 = sqr(e2s[i]), c2 = sqr(es[i]), c3 = sqr(cs[i]);
     const double resolution = sqrt(c1*e.E2() + c2*e.E() + c3) * GeV;
 
     // normal_distribution<> d(e.E(), resolution);
     // const double mass = e.mass2() > 0 ? e.mass() : 0; //< numerical carefulness...
     // const double smeared_E = max(d(gen), mass); //< can't let the energy go below the mass!
     // return Particle(e.pid(), FourMomentum::mkEtaPhiME(e.eta(), e.phi(), mass, smeared_E));
     return Particle(e.pid(), P4_SMEAR_E_GAUSS(e, resolution));
   }
 
 
   /// ATLAS Run 2 electron reco smearing
   /// @todo Currently just a copy of the Run 1 version: fix!
   inline Particle ELECTRON_SMEAR_ATLAS_RUN2(const Particle& e) {
     return ELECTRON_SMEAR_ATLAS_RUN1(e);
   }
 
 
   /// @todo Add charge flip efficiency?
 
 
-  /// CMS Run 1 electron tracking efficiency
-  /// @todo How to use this in combination with reco eff?
-  inline double ELECTRON_TRKEFF_CMS_RUN1(const Particle& e) {
-    if (e.abseta() > 2.5) return 0;
-    if (e.pT() < 0.1*GeV) return 0;
-    if (e.abseta() < 1.5) {
-      return (e.pT() < 1*GeV) ? 0.70 : 0.95;
-    } else {
-      return (e.pT() < 1*GeV) ? 0.60 : 0.85;
-    }
-  }
-
-
-  /// CMS Run 2 electron tracking efficiency
-  /// @todo Currently just a copy of Run 1: fix!
-  inline double ELECTRON_TRKEFF_CMS_RUN2(const Particle& e) {
-    return ELECTRON_TRKEFF_CMS_RUN1(e);
-  }
-
 
   /// CMS Run 1 electron reconstruction efficiency
   /// @todo How to use this in combination with tracking eff?
   inline double ELECTRON_EFF_CMS_RUN1(const Particle& e) {
     if (e.abseta() > 2.5) return 0;
     if (e.pT() < 10*GeV) return 0;
     return (e.abseta() < 1.5) ? 0.95 : 0.85;
   }
 
 
   /// CMS Run 2 electron reco efficiency
   /// @todo Currently just a copy of Run 1: fix!
   inline double ELECTRON_EFF_CMS_RUN2(const Particle& e) {
     return ELECTRON_EFF_CMS_RUN1(e);
   }
 
 
   /// @brief CMS electron energy smearing, preserving direction
   ///
   /// Calculate resolution
   /// for pT > 0.1 GeV, E resolution = |eta| < 0.5 -> sqrt(0.06^2 + pt^2 * 1.3e-3^2)
   ///                                  |eta| < 1.5 -> sqrt(0.10^2 + pt^2 * 1.7e-3^2)
   ///                                  |eta| < 2.5 -> sqrt(0.25^2 + pt^2 * 3.1e-3^2)
   inline Particle ELECTRON_SMEAR_CMS_RUN1(const Particle& e) {
     // Calculate absolute resolution in GeV from functional form
     double resolution = 0;
     const double abseta = e.abseta();
     if (e.pT() > 0.1*GeV && abseta < 2.5) { //< should be a given from efficiencies
       if (abseta < 0.5) {
         resolution = add_quad(0.06, 1.3e-3 * e.pT()/GeV) * GeV;
       } else if (abseta < 1.5) {
         resolution = add_quad(0.10, 1.7e-3 * e.pT()/GeV) * GeV;
       } else { // still |eta| < 2.5
         resolution = add_quad(0.25, 3.1e-3 * e.pT()/GeV) * GeV;
       }
     }
 
     // normal_distribution<> d(e.E(), resolution);
     // const double mass = e.mass2() > 0 ? e.mass() : 0; //< numerical carefulness...
     // const double smeared_E = max(d(gen), mass); //< can't let the energy go below the mass!
     // return Particle(e.pid(), FourMomentum::mkEtaPhiME(e.eta(), e.phi(), mass, smeared_E));
     return Particle(e.pid(), P4_SMEAR_E_GAUSS(e, resolution));
   }
 
 
   /// CMS Run 2 electron reco smearing
   /// @todo Currently just a copy of the Run 1 version: fix!
   inline Particle ELECTRON_SMEAR_CMS_RUN2(const Particle& e) {
     return ELECTRON_SMEAR_CMS_RUN1(e);
   }
 
   //@}
 
 
 
   /// @name Photon efficiency and smearing functions
   //@{
 
-  /// @todo Photon efficiency and smearing
+  /// ATLAS Run 1 photon reco efficiency
+  /// @todo Currently identical to CMS, cf. Delphes
+  inline double PHOTON_EFF_ATLAS_RUN1(const Particle& y) {
+    if (y.pT() < 10*GeV || y.abseta() > 2.5) return 0;
+    return (y.abseta() < 1.5) ? 0.95 : 0.85;
+  }
+
+  /// ATLAS Run 2 photon reco efficiency
+  /// @todo Currently just a copy of Run 1: fix!
+  inline double PHOTON_EFF_ATLAS_RUN2(const Particle& y) {
+    return PHOTON_EFF_ATLAS_RUN1(y);
+  }
+
+  /// CMS Run 1 photon reco efficiency
+  /// @todo Currently identical to ATLAS, cf. Delphes
+  inline double PHOTON_EFF_CMS_RUN1(const Particle& y) {
+    if (y.pT() < 10*GeV || y.abseta() > 2.5) return 0;
+    return (y.abseta() < 1.5) ? 0.95 : 0.85;
+  }
+
+  /// CMS Run 2 photon reco efficiency
+  /// @todo Currently just a copy of Run 1: fix!
+  inline double PHOTON_EFF_CMS_RUN2(const Particle& y) {
+    return PHOTON_EFF_CMS_RUN1(y);
+  }
 
   //@}
 
 
 
   /// @name Muon efficiency and smearing functions
   //@{
 
-  /// ATLAS Run 1 muon tracking efficiency
-  /// @todo How to use this in combination with reco eff?
-  inline double MUON_TRKEFF_ATLAS_RUN1(const Particle& m) {
-    if (m.abseta() > 2.5) return 0;
-    if (m.pT() < 0.1*GeV) return 0;
-    if (m.abseta() < 1.5) {
-      return (m.pT() < 1*GeV) ? 0.75 : 0.99;
-    } else {
-      return (m.pT() < 1*GeV) ? 0.70 : 0.98;
-    }
-  }
-
-  /// ATLAS Run 2 muon tracking efficiency
-  /// @todo Currently just a copy of Run 1: fix!
-  inline double MUON_TRKEFF_ATLAS_RUN2(const Particle& m) {
-    return MUON_TRKEFF_ATLAS_RUN1(m);
-  }
-
-
   /// ATLAS Run 1 muon reco efficiency
-  /// @todo How to use this in combination with tracking eff?
   inline double MUON_EFF_ATLAS_RUN1(const Particle& m) {
     if (m.abseta() > 2.7) return 0;
     if (m.pT() < 10*GeV) return 0;
     return (m.abseta() < 1.5) ? 0.95 : 0.85;
   }
 
   /// ATLAS Run 2 muon reco efficiency
   /// @todo Currently just a copy of Run 1: fix!
   inline double MUON_EFF_ATLAS_RUN2(const Particle& m) {
     return MUON_EFF_ATLAS_RUN1(m);
   }
 
 
   /// ATLAS Run 1 muon reco smearing
   inline Particle MUON_SMEAR_ATLAS_RUN1(const Particle& m) {
     static const vector<double> edges_eta = {0, 1.5, 2.5};
     static const vector<double> edges_pt = {0, 0.1, 1.0, 10., 200.};
     static const vector<double> res = {0., 0.03, 0.02, 0.03, 0.05,
                                        0., 0.04, 0.03, 0.04, 0.05};
 
     const int i_eta = binIndex(m.abseta(), edges_eta, true);
     const int i_pt = binIndex(m.pT()/GeV, edges_pt, true);
     const int i = i_eta*edges_pt.size() + i_pt;
 
     const double resolution = res[i];
 
     // Smear by a Gaussian centered on the current pT, with width given by the resolution
     // normal_distribution<> d(m.pT(), resolution*m.pT());
     // const double smeared_pt = max(d(gen), 0.);
     // const double mass = m.mass2() > 0 ? m.mass() : 0; //< numerical carefulness...
     // return Particle(m.pid(), FourMomentum::mkEtaPhiMPt(m.eta(), m.phi(), mass, smeared_pt));
     return Particle(m.pid(), P4_SMEAR_PT_GAUSS(m, resolution*m.pT()));
   }
 
   /// ATLAS Run 2 muon reco smearing
   /// @todo Currently just a copy of the Run 1 version: fix!
   inline Particle MUON_SMEAR_ATLAS_RUN2(const Particle& m) {
     return MUON_SMEAR_ATLAS_RUN1(m);
   }
 
 
-  /// CMS Run 1 muon tracking efficiency
-  /// @todo How to use this in combination with reco eff?
-  /// @note Eff values currently identical to those in ATLAS (AB, 2016-04-12)
-  inline double MUON_TRKEFF_CMS_RUN1(const Particle& m) {
-    if (m.abseta() > 2.5) return 0;
-    if (m.pT() < 0.1*GeV) return 0;
-    if (m.abseta() < 1.5) {
-      return (m.pT() < 1*GeV) ? 0.75 : 0.99;
-    } else {
-      return (m.pT() < 1*GeV) ? 0.70 : 0.98;
-    }
-  }
-
-  /// CMS Run 2 muon tracking efficiency
-  /// @todo Currently just a copy of Run 1: fix!
-  inline double MUON_TRKEFF_CMS_RUN2(const Particle& m) {
-    return MUON_TRKEFF_CMS_RUN1(m);
-  }
 
 
   /// CMS Run 1 muon reco efficiency
-  /// @todo How to use this in combination with tracking eff?
   inline double MUON_EFF_CMS_RUN1(const Particle& m) {
     if (m.abseta() > 2.4) return 0;
     if (m.pT() < 10*GeV) return 0;
     return 0.95 * (m.abseta() < 1.5 ? 1 : exp(0.5 - 5e-4*m.pT()/GeV));
   }
 
   /// CMS Run 2 muon reco efficiency
   /// @todo Currently just a copy of Run 1: fix!
   inline double MUON_EFF_CMS_RUN2(const Particle& m) {
     return MUON_EFF_CMS_RUN1(m);
   }
 
 
   /// CMS Run 1 muon reco smearing
   inline Particle MUON_SMEAR_CMS_RUN1(const Particle& m) {
     // Calculate fractional resolution
     // for pT > 0.1 GeV, mom resolution = |eta| < 0.5 -> sqrt(0.01^2 + pt^2 * 2.0e-4^2)
     //                                    |eta| < 1.5 -> sqrt(0.02^2 + pt^2 * 3.0e-4^2)
     //                                    |eta| < 2.5 -> sqrt(0.05^2 + pt^2 * 2.6e-4^2)
     double resolution = 0;
     const double abseta = m.abseta();
     if (m.pT() > 0.1*GeV && abseta < 2.5) {
       if (abseta < 0.5) {
         resolution = add_quad(0.01, 2.0e-4 * m.pT()/GeV);
       } else if (abseta < 1.5) {
         resolution = add_quad(0.02, 3.0e-4 * m.pT()/GeV);
       } else { // still |eta| < 2.5... but isn't CMS' mu acceptance < 2.4?
         resolution = add_quad(0.05, 2.6e-4 * m.pT()/GeV);
       }
     }
 
     // Smear by a Gaussian centered on the current pT, with width given by the resolution
     // normal_distribution<> d(m.pT(), resolution*m.pT());
     // const double smeared_pt = max(d(gen), 0.);
     // const double mass = m.mass2() > 0 ? m.mass() : 0; //< numerical carefulness...
     // return Particle(m.pid(), FourMomentum::mkEtaPhiMPt(m.eta(), m.phi(), mass, smeared_pt));
     return Particle(m.pid(), P4_SMEAR_PT_GAUSS(m, resolution*m.pT()));
   }
 
   /// CMS Run 2 muon reco smearing
   /// @todo Currently just a copy of the Run 1 version: fix!
   inline Particle MUON_SMEAR_CMS_RUN2(const Particle& m) {
     return MUON_SMEAR_CMS_RUN1(m);
   }
 
   //@}
 
 
 
   /// @name Tau efficiency and smearing functions
   //@{
 
   /// @brief ATLAS Run 1 8 TeV tau efficiencies (medium working point)
   ///
   /// Taken from http://arxiv.org/pdf/1412.7086.pdf
   ///   20-40 GeV 1-prong LMT eff|mis = 0.66|1/10, 0.56|1/20, 0.36|1/80
   ///   20-40 GeV 3-prong LMT eff|mis = 0.45|1/60, 0.38|1/100, 0.27|1/300
   ///   > 40 GeV 1-prong LMT eff|mis = 0.66|1/15, 0.56|1/25, 0.36|1/80
   ///   > 40 GeV 3-prong LMT eff|mis = 0.45|1/250, 0.38|1/400, 0.27|1/1300
   inline double TAU_EFF_ATLAS_RUN1(const Particle& t) {
     if (t.abseta() > 2.5) return 0; //< hmm... mostly
     double pThadvis = 0;
     Particles chargedhadrons;
     for (const Particle& p : t.children()) {
       if (p.isHadron()) {
         pThadvis += p.pT(); //< right definition? Paper is unclear
         if (p.charge3() != 0 && p.abseta() < 2.5 && p.pT() > 1*GeV) chargedhadrons += p;
       }
     }
     if (chargedhadrons.empty()) return 0; //< leptonic tau
     if (pThadvis < 20*GeV) return 0; //< below threshold
     if (pThadvis < 40*GeV) {
       if (chargedhadrons.size() == 1) return (t.abspid() == PID::TAU) ? 0.56 : 1/20.;
       if (chargedhadrons.size() == 3) return (t.abspid() == PID::TAU) ? 0.38 : 1/100.;
     } else {
       if (chargedhadrons.size() == 1) return (t.abspid() == PID::TAU) ? 0.56 : 1/25.;
       if (chargedhadrons.size() == 3) return (t.abspid() == PID::TAU) ? 0.38 : 1/400.;
     }
     return 0;
   }
 
 
   /// @brief ATLAS Run 2 13 TeV tau efficiencies (medium working point)
   ///
   /// From https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/PUBNOTES/ATL-PHYS-PUB-2015-045/ATL-PHYS-PUB-2015-045.pdf
   ///   LMT 1 prong efficiency/mistag = 0.6|1/30, 0.55|1/50, 0.45|1/120
   ///   LMT 3 prong efficiency/mistag = 0.5|1/30, 0.4|1/110, 0.3|1/300
   inline double TAU_EFF_ATLAS_RUN2(const Particle& t) {
     if (t.abseta() > 2.5) return 0; //< hmm... mostly
     double pThadvis = 0;
     Particles chargedhadrons;
     for (const Particle& p : t.children()) {
       if (p.isHadron()) {
         pThadvis += p.pT(); //< right definition? Paper is unclear
         if (p.charge3() != 0 && p.abseta() < 2.5 && p.pT() > 1*GeV) chargedhadrons += p;
       }
     }
     if (chargedhadrons.empty()) return 0; //< leptonic tau
     if (pThadvis < 20*GeV) return 0; //< below threshold
     if (chargedhadrons.size() == 1) return (t.abspid() == PID::TAU) ? 0.55 : 1/50.;
     if (chargedhadrons.size() == 3) return (t.abspid() == PID::TAU) ? 0.40 : 1/110.;
     return 0;
   }
 
 
   /// ATLAS Run 1 tau smearing
   /// @todo Currently a copy of the crappy jet smearing that is probably wrong...
   inline Particle TAU_SMEAR_ATLAS_RUN1(const Particle& t) {
     // Const fractional resolution for now
     static const double resolution = 0.03;
 
     // Smear by a Gaussian centered on 1 with width given by the (fractional) resolution
     /// @todo Is this the best way to smear? Should we preserve the energy, or pT, or direction?
     /// @todo Need to isolate random generators to a single thread
     static random_device rd;
     static mt19937 gen(rd());
     normal_distribution<> d(1., resolution);
     const double fsmear = max(d(gen), 0.);
     const double mass = t.mass2() > 0 ? t.mass() : 0; //< numerical carefulness...
     return Particle(t.pid(), FourMomentum::mkXYZM(t.px()*fsmear, t.py()*fsmear, t.pz()*fsmear, mass));
   }
 
 
   /// ATLAS Run 2 tau smearing
   /// @todo Currently a copy of the Run 1 version
   inline Particle TAU_SMEAR_ATLAS_RUN2(const Particle& t) {
     return TAU_SMEAR_ATLAS_RUN1(t);
   }
 
 
   /// CMS Run 2 tau efficiency
   ///
   /// @todo Needs work; this is the dumb version from Delphes 3.3.2
   inline double TAU_EFF_CMS_RUN2(const Particle& t) {
     return (t.abspid() == PID::TAU) ? 0.6 : 0;
   }
 
   /// CMS Run 1 tau efficiency
   ///
   /// @todo Needs work; this is just a copy of the Run 2 version in Delphes 3.3.2
   inline double TAU_EFF_CMS_RUN1(const Particle& t) {
     return TAU_EFF_CMS_RUN2(t);
   }
 
 
   /// CMS Run 1 tau smearing
   /// @todo Currently a copy of the crappy ATLAS one
   inline Particle TAU_SMEAR_CMS_RUN1(const Particle& t) {
     return TAU_SMEAR_ATLAS_RUN1(t);
   }
 
 
   /// CMS Run 2 tau smearing
   /// @todo Currently a copy of the Run 1 version
   inline Particle TAU_SMEAR_CMS_RUN2(const Particle& t) {
     return TAU_SMEAR_CMS_RUN1(t);
   }
 
   //@}
 
 
   /// @name Jet efficiency and smearing functions
   //@{
 
   /// Return a constant 0 given a Jet as argument
   inline double JET_EFF_ZERO(const Jet& p) { return 0; }
   /// Return a constant 1 given a Jet as argument
   inline double JET_EFF_ONE(const Jet& p) { return 1; }
 
   /// Return 1 if the given Jet contains a b, otherwise 0
   inline double JET_BTAG_PERFECT(const Jet& j) { return j.bTagged() ? 1 : 0; }
   /// Return the ATLAS Run 1 jet flavour tagging efficiency for the given Jet
   inline double JET_BTAG_ATLAS_RUN1(const Jet& j) {
-    if (j.bTagged()) return 0.80*tanh(0.003*j.pT()/GeV)*(30/(1+0.086*j.pT()/GeV));
-    if (j.cTagged()) return 0.20*tanh(0.02*j.pT()/GeV)*(1/(1+0.0034*j.pT()/GeV));
+    /// @todo This form drops past ~100 GeV, asymptotically to zero efficiency... really?!
+    if (j.abseta() > 2.5) return 0;
+    const auto ftagsel = [&](const Particle& p){ return p.pT() > 5*GeV && deltaR(p,j) < 0.3; };
+    if (j.bTagged(ftagsel)) return 0.80*tanh(0.003*j.pT()/GeV)*(30/(1+0.0860*j.pT()/GeV));
+    if (j.cTagged(ftagsel)) return 0.20*tanh(0.020*j.pT()/GeV)*( 1/(1+0.0034*j.pT()/GeV));
     return 0.002 + 7.3e-6*j.pT()/GeV;
   }
   /// Return the ATLAS Run 2 MC2c20 jet flavour tagging efficiency for the given Jet
   inline double JET_BTAG_ATLAS_RUN2_MV2C20(const Jet& j) {
-    if (j.bTagged()) return 0.77;
-    if (j.cTagged()) return 1/4.5;
+    if (j.abseta() > 2.5) return 0;
+    if (j.bTagged(Cuts::pT > 5*GeV)) return 0.77;
+    if (j.cTagged(Cuts::pT > 5*GeV)) return 1/4.5;
     return 1/140.;
   }
   /// Return the ATLAS Run 2 MC2c10 jet flavour tagging efficiency for the given Jet
   inline double JET_BTAG_ATLAS_RUN2_MV2C10(const Jet& j) {
-    if (j.bTagged()) return 0.77;
-    if (j.cTagged()) return 1/6.0;
+    if (j.abseta() > 2.5) return 0;
+    if (j.bTagged(Cuts::pT > 5*GeV)) return 0.77;
+    if (j.cTagged(Cuts::pT > 5*GeV)) return 1/6.0;
     return 1/134.;
   }
 
   /// Return 1 if the given Jet contains a c, otherwise 0
   inline double JET_CTAG_PERFECT(const Jet& j) { return j.cTagged() ? 1 : 0; }
 
   /// Take a jet and return an unmodified copy
   /// @todo Modify constituent particle vectors for consistency
   /// @todo Set a null PseudoJet if the Jet is smeared?
   inline Jet JET_SMEAR_IDENTITY(const Jet& j) { return j; }
 
   /// ATLAS Run 1 jet smearing
   /// @todo This is a cluster-level flat 3% resolution, I think, and smearing is suboptimal: improve!
   inline Jet JET_SMEAR_ATLAS_RUN1(const Jet& j) {
     // Const fractional resolution for now
     static const double resolution = 0.03;
 
     // Smear by a Gaussian centered on 1 with width given by the (fractional) resolution
     /// @todo Is this the best way to smear? Should we preserve the energy, or pT, or direction?
     /// @todo Need to isolate random generators to a single thread
     static random_device rd;
     static mt19937 gen(rd());
     normal_distribution<> d(1., resolution);
     const double fsmear = max(d(gen), 0.);
     const double mass = j.mass2() > 0 ? j.mass() : 0; //< numerical carefulness...
     return Jet(FourMomentum::mkXYZM(j.px()*fsmear, j.py()*fsmear, j.pz()*fsmear, mass));
   }
 
   /// ATLAS Run 2 jet smearing
   /// @todo Just a copy of the Run 1 one: improve!!
   inline Jet JET_SMEAR_ATLAS_RUN2(const Jet& j) {
     return JET_SMEAR_ATLAS_RUN1(j);
   }
 
   /// CMS Run 2 jet smearing
   /// @todo Just a copy of the suboptimal ATLAS one: improve!!
   inline Jet JET_SMEAR_CMS_RUN2(const Jet& j) {
     return JET_SMEAR_ATLAS_RUN1(j);
   }
 
   //@}
 
 
   /// @name ETmiss smearing functions
   //@{
 
   inline Vector3 MET_SMEAR_IDENTITY(const Vector3& met, double) { return met; }
 
   /// @brief ATLAS Run 1 ETmiss smearing
   ///
   /// Based on https://arxiv.org/pdf/1108.5602v2.pdf, Figs 14 and 15
   inline Vector3 MET_SMEAR_ATLAS_RUN1(const Vector3& met, double set) {
     // Linearity offset (Fig 14)
     Vector3 smeared_met = met;
     if (met.mod()/GeV < 25*GeV) smeared_met *= 1.05;
     else if (met.mod()/GeV < 40*GeV) smeared_met *= (1.05 - (0.04/15)*(met.mod()/GeV - 25)); //< linear decrease
     else smeared_met *= 1.01;
 
     // Smear by a Gaussian with width given by the resolution(sumEt) ~ 0.45 sqrt(sumEt) GeV
     const double resolution = 0.45 * sqrt(set/GeV) * GeV;
     static random_device rd;
     static mt19937 gen(rd());
     normal_distribution<> d(smeared_met.mod(), resolution);
     const double metsmear = max(d(gen), 0.);
     smeared_met = metsmear * smeared_met.unit();
 
     return smeared_met;
   }
 
   /// ATLAS Run 2 ETmiss smearing
   /// @todo Just a copy of the Run 1 one: improve!!
   inline Vector3 MET_SMEAR_ATLAS_RUN2(const Vector3& met, double set) {
     return MET_SMEAR_ATLAS_RUN1(met, set);
   }
 
   /// CMS Run 1 ETmiss smearing
   /// @todo Just a copy of the ATLAS one: improve!!
   inline Vector3 MET_SMEAR_CMS_RUN1(const Vector3& met, double set) {
     return MET_SMEAR_ATLAS_RUN1(met, set);
   }
 
   /// CMS Run 2 ETmiss smearing
   /// @todo Just a copy of the ATLAS one: improve!!
   inline Vector3 MET_SMEAR_CMS_RUN2(const Vector3& met, double set) {
     return MET_SMEAR_ATLAS_RUN2(met, set);
   }
 
   //@}
 
 
+  /// @name Tracking efficiency and smearing functions
+  //@{
+
+  /// ATLAS Run 1 tracking efficiency
+  inline double TRK_EFF_ATLAS_RUN1(const Particle& p) {
+    if (p.charge3() == 0) return 0;
+    if (p.abseta() > 2.5) return 0;
+    if (p.pT() < 0.1*GeV) return 0;
+
+    if (p.abspid() == PID::ELECTRON) {
+      if (p.abseta() < 1.5) {
+        if (p.pT() < 1*GeV) return 0.73;
+        if (p.pT() < 100*GeV) return 0.95;
+        return 0.99;
+      } else {
+        if (p.pT() < 1*GeV) return 0.50;
+        if (p.pT() < 100*GeV) return 0.83;
+        else return 0.90;
+      }
+    } else { // muons and hadrons
+      if (p.abseta() < 1.5) {
+        return (p.pT() < 1*GeV) ? 0.75 : 0.99;
+      } else {
+        return (p.pT() < 1*GeV) ? 0.70 : 0.98;
+      }
+    }
+  }
+
+  /// ATLAS Run 2 tracking efficiency
+  /// @todo Currently just a copy of Run 1: fix!
+  inline double TRK_EFF_ATLAS_RUN2(const Particle& p) {
+    return TRK_EFF_ATLAS_RUN1(p);
+  }
+
+
+  /// CMS Run 1 tracking efficiency
+  inline double TRK_EFF_CMS_RUN1(const Particle& p) {
+    if (p.charge3() == 0) return 0;
+    if (p.abseta() > 2.5) return 0;
+    if (p.pT() < 0.1*GeV) return 0;
+
+    if (p.abspid() == PID::ELECTRON) {
+      if (p.abseta() < 1.5) {
+        return (p.pT() < 1*GeV) ? 0.70 : 0.95;
+      } else {
+        return (p.pT() < 1*GeV) ? 0.60 : 0.85;
+      }
+    } else { // muons and hadrons
+      if (p.abseta() < 1.5) {
+        return (p.pT() < 1*GeV) ? 0.75 : 0.99;
+      } else {
+        return (p.pT() < 1*GeV) ? 0.70 : 0.98;
+      }
+    }
+  }
+
+  /// CMS Run 2 tracking efficiency
+  /// @todo Currently just a copy of Run 1: fix!
+  inline double TRK_EFF_CMS_RUN2(const Particle& p) {
+    return TRK_EFF_CMS_RUN1(p);
+  }
+
+  //@}
+
+
 }
 
 #endif
diff --git a/include/Rivet/Tools/Utils.hh b/include/Rivet/Tools/Utils.hh
--- a/include/Rivet/Tools/Utils.hh
+++ b/include/Rivet/Tools/Utils.hh
@@ -1,467 +1,467 @@
 // -*- C++ -*-
 #ifndef RIVET_Utils_HH
 #define RIVET_Utils_HH
 
 #include "Rivet/Tools/RivetSTL.hh"
 #include "Rivet/Tools/PrettyPrint.hh"
 #include <ostream>
 #include <iostream>
 #include <cctype>
 #include <cerrno>
 #include <stdexcept>
 #include <numeric>
 #include <limits>
 #include <climits>
 #include <cfloat>
 #include <cmath>
 
 
 // Macro to help with overzealous compiler warnings
 /// @note It's easier and better to just not give an arg name to args which won't be used, when possible.
 #ifdef UNUSED
 #elif defined(__GNUC__)
 # define UNUSED(x) UNUSED_ ## x __attribute__((unused))
 #elif defined(__LCLINT__)
 # define UNUSED(x) /*@unused@*/ x
 #else
 # define UNUSED(x) x
 #endif
 
 
 /// Macro to help mark code as deprecated to produce compiler warnings
 #ifndef DEPRECATED
 #if __GNUC__ && __cplusplus && RIVET_NO_DEPRECATION_WARNINGS == 0
 #define GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
 #if GCC_VERSION >= 40500
   #if __cplusplus > 201103L
   #define DEPRECATED(x) [[deprecated(x)]]
   #else
   #define DEPRECATED(x) __attribute__((deprecated(x)))
   #endif
 #else
   #define DEPRECATED(x) __attribute__((deprecated))
 #endif
 #else
   #define DEPRECATED(x)
 #endif
 #endif
 
 
 namespace Rivet {
 
 
   /// Convenient const for getting the double NaN value
   static constexpr double DBL_NAN = std::numeric_limits<double>::quiet_NaN();
 
 
   /// @name String utils
   //@{
 
   struct bad_lexical_cast : public std::runtime_error {
     bad_lexical_cast(const std::string& what) : std::runtime_error(what) {}
   };
 
   /// @brief Convert between any types via stringstream
   template<typename T, typename U>
   T lexical_cast(const U& in) {
     try {
       std::stringstream ss;
       ss << in;
       T out;
       ss >> out;
       return out;
     } catch (const std::exception& e) {
       throw bad_lexical_cast(e.what());
     }
   }
 
   /// @brief Convert any object to a string
   ///
   /// Just a convenience wrapper for the more general Boost lexical_cast
   template <typename T>
   inline string to_str(const T& x) {
     return lexical_cast<string>(x);
   }
 
   /// @brief Convert any object to a string
   ///
   /// An alias for to_str() with a more "Rivety" mixedCase name.
   template <typename T>
   inline string toString(const T& x) {
     return to_str(x);
   }
 
   /// Replace the first instance of patt with repl
   inline string& replace_first(string& str, const string& patt, const string& repl) {
     if (!contains(str, patt)) return str; //< contains from RivetSTL
     str.replace(str.find(patt), patt.size(), repl);
     return str;
   }
 
   /// @brief Replace all instances of patt with repl
   ///
   /// @note Finding is interleaved with replacement, so the second search happens after
   /// first replacement, etc. This could lead to infinite loops and other counterintuitive
   /// behaviours if not careful.
   inline string& replace_all(string& str, const string& patt, const string& repl) {
     if (!contains(str, patt)) return str; //< contains from RivetSTL
     while (true) {
       string::size_type it = str.find(patt);
       if (it == string::npos) break;
       str.replace(it, patt.size(), repl);
     }
     return str;
   }
 
 
   /// Case-insensitive string comparison function
   inline int nocase_cmp(const string& s1, const string& s2) {
     string::const_iterator it1 = s1.begin();
     string::const_iterator it2 = s2.begin();
     while ( (it1 != s1.end()) && (it2 != s2.end()) ) {
       if(::toupper(*it1) != ::toupper(*it2)) { // < Letters differ?
         // Return -1 to indicate smaller than, 1 otherwise
         return (::toupper(*it1) < ::toupper(*it2)) ? -1 : 1;
       }
       // Proceed to the next character in each string
       ++it1;
       ++it2;
     }
     size_t size1 = s1.size(), size2 = s2.size(); // Cache lengths
     // Return -1,0 or 1 according to strings' lengths
     if (size1 == size2) return 0;
     return (size1 < size2) ? -1 : 1;
   }
 
 
   /// Case-insensitive string equality function
   inline bool nocase_equals(const string& s1, const string& s2) {
     return nocase_cmp(s1, s2) == 0;
   }
 
 
   /// Convert a string to lower-case
   inline string toLower(const string& s) {
     string out = s;
     std::transform(out.begin(), out.end(), out.begin(), (int(*)(int)) std::tolower);
     return out;
   }
 
 
   /// Convert a string to upper-case
   inline string toUpper(const string& s) {
     string out = s;
     std::transform(out.begin(), out.end(), out.begin(), (int(*)(int)) std::toupper);
     return out;
   }
 
 
   /// Check whether a string @a start is found at the start of @a s
   inline bool startsWith(const string& s, const string& start) {
     if (s.length() < start.length()) return false;
     return s.substr(0, start.length()) == start;
   }
 
 
   /// Check whether a string @a end is found at the end of @a s
   inline bool endsWith(const string& s, const string& end) {
     if (s.length() < end.length()) return false;
     return s.substr(s.length() - end.length()) == end;
   }
 
 
   /// Make a string containing the string representations of each item in v, separated by sep
   template <typename T>
   inline string join(const vector<T>& v, const string& sep=" ") {
     string rtn;
     for (size_t i = 0; i < v.size(); ++i) {
       if (i != 0) rtn += sep;
       rtn += to_str(v[i]);
     }
     return rtn;
   }
 
   /// Make a string containing the string representations of each item in s, separated by sep
   template <typename T>
   inline string join(const set<T>& s, const string& sep=" ") {
     string rtn;
     for (const T& x : s) {
       if (rtn.size() > 0) rtn += sep;
       rtn += to_str(x);
     }
     return rtn;
   }
 
   //@}
 
 
   /// @name Path utils
   //@{
 
   /// @brief Split a path string with colon delimiters
   ///
   /// Ignores zero-length substrings. Designed for getting elements of filesystem paths, naturally.
   inline vector<string> pathsplit(const string& path) {
     const string delim = ":";
     vector<string> dirs;
     string tmppath = path;
     while (true) {
       const size_t delim_pos = tmppath.find(delim);
       if (delim_pos == string::npos) break;
       const string dir = tmppath.substr(0, delim_pos);
       if (dir.length()) dirs.push_back(dir); // Don't insert "empties"
       tmppath.replace(0, delim_pos+1, "");
     }
     if (tmppath.length()) dirs.push_back(tmppath); // Don't forget the trailing component!
     return dirs;
   }
 
 
   /// @brief Join several filesystem paths together with the standard ':' delimiter
   ///
   /// Note that this does NOT join path elements together with a platform-portable
   /// directory delimiter, cf. the Python @c {os.path.join}!
   inline string pathjoin(const vector<string>& paths) {
     return join(paths, ":");
   }
 
   //@}
 
 
   /// @name Container utils
   //@{
 
   /// Return number of elements in the container @a c for which @c f(x) is true.
   template <typename CONTAINER>
   inline unsigned int count(const CONTAINER& c) {
     // return std::count_if(std::begin(c), std::end(c), [](const typename CONTAINER::value_type& x){return bool(x);});
     unsigned int rtn = 0;
     for (const auto& x : c) if (bool(x)) rtn += 1;
     return rtn;
   }
 
   /// Return number of elements in the container @a c for which @c f(x) is true.
   template <typename CONTAINER, typename FN>
   inline unsigned int count(const CONTAINER& c, const FN& f) {
     return std::count_if(std::begin(c), std::end(c), f);
   }
 
   /// Return true if x is true for any x in container c, otherwise false.
   template <typename CONTAINER>
   inline bool any(const CONTAINER& c) {
     // return std::any_of(std::begin(c), std::end(c), [](const auto& x){return bool(x);});
     for (const auto& x : c) if (bool(x)) return true;
     return false;
   }
 
   /// Return true if f(x) is true for any x in container c, otherwise false.
   template <typename CONTAINER, typename FN>
   inline bool any(const CONTAINER& c, const FN& f) {
     return std::any_of(std::begin(c), std::end(c), f);
   }
 
   /// Return true if @a x is true for all @c x in container @a c, otherwise false.
   template <typename CONTAINER>
   inline bool all(const CONTAINER& c) {
     // return std::all_of(std::begin(c), std::end(c), [](const auto& x){return bool(x);});
     for (const auto& x : c) if (!bool(x)) return false;
     return true;
   }
 
   /// Return true if @a f(x) is true for all @c x in container @a c, otherwise false.
   template <typename CONTAINER, typename FN>
   inline bool all(const CONTAINER& c, const FN& f) {
     return std::all_of(std::begin(c), std::end(c), f);
   }
 
   /// Return true if @a x is false for all @c x in container @a c, otherwise false.
   template <typename CONTAINER>
   inline bool none(const CONTAINER& c) {
     // return std::none_of(std::begin(c), std::end(c), [](){});
     for (const auto& x : c) if (bool(x)) return false;
     return true;
   }
 
   /// Return true if @a f(x) is false for all @c x in container @a c, otherwise false.
   template <typename CONTAINER, typename FN>
   inline bool none(const CONTAINER& c, const FN& f) {
     return std::none_of(std::begin(c), std::end(c), f);
   }
 
 
   /// A single-container-arg version of std::transform, aka @c map
   template <typename C1, typename C2, typename FN>
   inline const C2& transform(const C1& in, C2& out, const FN& f) {
     out.clear(); out.resize(in.size());
     std::transform(in.begin(), in.end(), out.begin(), f);
     return out;
   }
 
   /// A single-container-arg version of std::accumulate, aka @c reduce
   template <typename C1, typename T, typename FN>
   inline T accumulate(const C1& in, const T& init, const FN& f) {
     const T rtn = std::accumulate(in.begin(), in.end(), init, f);
     return rtn;
   }
 
   /// Generic sum function, adding @c x for all @c x in container @a c, starting with @a start
   template <typename CONTAINER, typename T>
   inline T sum(const CONTAINER& c, const T& start=T()) {
     T rtn = start;
     for (const auto& x : c) rtn += x;
     return rtn;
   }
 
   /// Generic sum function, adding @a fn(@c x) for all @c x in container @a c, starting with @a start
   template <typename CONTAINER, typename FN, typename T>
   inline T sum(const CONTAINER& c, const FN& f, const T& start=T()) {
     T rtn = start;
     for (const auto& x : c) rtn += f(x);
     return rtn;
   }
 
 
   /// Filter a collection in-place, removing the subset that passes the supplied function
   template <typename CONTAINER, typename FN>
   inline CONTAINER& ifilter_discard(CONTAINER& c, const FN& f) {
     const auto newend = std::remove_if(std::begin(c), std::end(c), f);
     c.erase(newend, c.end());
     return c;
   }
 
   /// Filter a collection by copy, removing the subset that passes the supplied function
   template <typename CONTAINER, typename FN>
   inline CONTAINER filter_discard(const CONTAINER& c, const FN& f) {
     CONTAINER rtn = c;
     return ifilter_discard(rtn, f); ///< @todo More efficient would be copy_if with back_inserter...
   }
 
   /// Filter a collection by copy into a supplied container, removing the subset that passes the supplied function
   /// @note New container will be replaced, not appended to
   template <typename CONTAINER, typename FN>
   inline CONTAINER& filter_discard(const CONTAINER& c, const FN& f, CONTAINER& out) {
     out = filter_discard(c, f);
     return out;
   }
 
 
   /// Filter a collection in-place, keeping the subset that passes the supplied function
   template <typename CONTAINER, typename FN>
   inline CONTAINER& ifilter_select(CONTAINER& c, const FN& f) {
     //using value_type = typename std::remove_reference<decltype(*std::begin(std::declval<typename std::add_lvalue_reference<CONTAINER>::type>()))>::type;
-    const auto invf = [&](const typename CONTAINER::value_type& x){ return !f(x); };
+    auto invf = [&](const typename CONTAINER::value_type& x){ return !f(x); };
     return ifilter_discard(c, invf);
   }
 
   /// Filter a collection by copy, keeping the subset that passes the supplied function
   template <typename CONTAINER, typename FN>
   inline CONTAINER filter_select(const CONTAINER& c, const FN& f) {
     CONTAINER rtn = c;
     return ifilter_select(rtn, f); ///< @todo More efficient would be copy_if with back_inserter ... but is that equally container agnostic?
   }
 
   /// Filter a collection by copy into a supplied container, keeping the subset that passes the supplied function
   /// @note New container will be replaced, not appended to
   template <typename CONTAINER, typename FN>
   inline CONTAINER& filter_select(const CONTAINER& c, const FN& f, CONTAINER& out) {
     out = filter_select(c, f);
     return out;
   }
 
 
   /// @brief Slice of the container elements cf. Python's [i:j] syntax
   ///
   /// The element at the @j index is not included in the returned container.
   /// @a i and @a j can be negative, treated as backward offsets from the end of the container.
   template <typename CONTAINER>
   inline CONTAINER slice(const CONTAINER& c, int i, int j) {
     CONTAINER rtn;
     const size_t off1 = (i >= 0) ? i : c.size() + i;
     const size_t off2 = (j >= 0) ? j : c.size() + j;
     if (off1 > c.size() || off2 > c.size()) throw RangeError("Attempting to slice beyond requested offsets");
     if (off2 < off1) throw RangeError("Requested offsets in invalid order");
     rtn.resize(off2 - off1);
     std::copy(c.begin()+off1, c.begin()+off2, rtn.begin());
     return rtn;
   }
 
   /// @brief Tail slice of the container elements cf. Python's [i:] syntax
   ///
   /// Single-index specialisation of @c slice(c, i, j)
   template <typename CONTAINER>
   inline CONTAINER slice(const CONTAINER& c, int i) {
     return slice(c, i, c.size());
   }
 
   /// @brief Head slice of the @a n first container elements
   ///
   /// Negative @a n means to take the head excluding the @a{n}-element tail
   template <typename CONTAINER>
   inline CONTAINER head(const CONTAINER& c, int n) {
     // if (n > c.size()) throw RangeError("Requested head longer than container");
     if (n < 0) n = std::max(0, (int)c.size()+n);
     n = std::min(n, (int)c.size());
     return slice(c, 0, n);
   }
 
   /// @brief Tail slice of the @a n last container elements
   ///
   /// Negative @a n means to take the tail from after the @a{n}th element
   template <typename CONTAINER>
   inline CONTAINER tail(const CONTAINER& c, int n) {
     // if (n > c.size()) throw RangeError("Requested tail longer than container");
     if (n < 0) n = std::max(0, (int)c.size()+n);
     n = std::min(n, (int)c.size());
     return slice(c, c.size()-n);
   }
 
 
   using std::min;
   using std::max;
 
   /// Find the minimum value in the vector
   inline double min(const vector<double>& in, double errval=DBL_NAN) {
     return *std::min_element(in.begin(), in.end());
   }
 
   /// Find the maximum value in the vector
   inline double max(const vector<double>& in, double errval=DBL_NAN) {
     const auto e = std::max_element(in.begin(), in.end());
     return e != in.end() ? *e : errval;
   }
 
   /// Find the minimum and maximum values in the vector
   inline pair<double,double> minmax(const vector<double>& in, double errval=DBL_NAN) {
     const auto e = std::minmax_element(in.begin(), in.end());
     const double rtnmin = e.first != in.end() ? *e.first : errval;
     const double rtnmax = e.second != in.end() ? *e.first : errval;
     return std::make_pair(rtnmin, rtnmax);
   }
 
 
   /// Find the minimum value in the vector
   inline int min(const vector<int>& in, int errval=-1) {
     const auto e = std::min_element(in.begin(), in.end());
     return e != in.end() ? *e : errval;
   }
 
   /// Find the maximum value in the vector
   inline int max(const vector<int>& in, int errval=-1) {
     const auto e = std::max_element(in.begin(), in.end());
     return e != in.end() ? *e : errval;
   }
 
   /// Find the minimum and maximum values in the vector
   inline pair<int,int> minmax(const vector<int>& in, int errval=-1) {
     const auto e = std::minmax_element(in.begin(), in.end());
     const double rtnmin = e.first != in.end() ? *e.first : errval;
     const double rtnmax = e.second != in.end() ? *e.first : errval;
     return std::make_pair(rtnmin, rtnmax);
   }
 
   //@}
 
 
 }
 
 #endif
diff --git a/plotnanas b/plotnanas
--- a/plotnanas
+++ b/plotnanas
@@ -1,23 +1,27 @@
 #! /usr/bin/env python
 
+import argparse
+ap = argparse.ArgumentParser()
+ap.add_argument("DATFILE", metavar="file", default="nanas.dat", help="release/count data file to read")
+args = ap.parse_args()
+
 import datetime
 import numpy as np
 import matplotlib.pyplot as plt
 # import matplotlib.dates as mdates
 # import matplotlib.cbook as cbook
 
-datfile = "nanas.dat"
 tags, dates, nanas = [], [], []
-with open(datfile) as f:
+with open(args.DATFILE) as f:
     for line in f:
         items = line.split()
         tags.append(items[0])
         ts = float(items[1].replace("-3600", "").replace("-7200", ""))
         dates.append(datetime.date.fromtimestamp(ts))
         nanas.append(int(items[2]))
 
 plt.plot(dates, nanas)
 plt.xlabel("Year")
 plt.ylabel("# analyses")
-plt.savefig("nanas.pdf")
+plt.savefig(args.DATFILE.replace(".dat", ".pdf"))
 # plt.show()
diff --git a/pyext/rivet/core.pyx b/pyext/rivet/core.pyx
--- a/pyext/rivet/core.pyx
+++ b/pyext/rivet/core.pyx
@@ -1,210 +1,215 @@
 # distutils: language = c++
 
 cimport rivet as c
 from cython.operator cimport dereference as deref
 # Need to be careful with memory management -- perhaps use the base object that
 # we used in YODA?
 
 cdef extern from "<utility>" namespace "std" nogil:
     cdef c.unique_ptr[c.Analysis] move(c.unique_ptr[c.Analysis])
 
 cdef class AnalysisHandler:
     cdef c.AnalysisHandler *_ptr
 
     def __cinit__(self):
         self._ptr = new c.AnalysisHandler()
 
     def __del__(self):
         del self._ptr
 
     def setIgnoreBeams(self, ignore=True):
         self._ptr.setIgnoreBeams(ignore)
 
     def addAnalysis(self, name):
         self._ptr.addAnalysis(name)
         return self
 
     def analysisNames(self):
         anames = self._ptr.analysisNames()
         return [a for a in anames]
 
     # def analysis(self, aname):
     #     cdef c.Analysis* ptr = self._ptr.analysis(aname)
     #     cdef Analysis pyobj = Analysis.__new__(Analysis)
     #     if not ptr:
     #         return None
     #     pyobj._ptr = ptr
     #     return pyobj
 
     def writeData(self, name):
         self._ptr.writeData(name)
 
     def crossSection(self):
         return self._ptr.crossSection()
 
     def finalize(self):
         self._ptr.finalize()
 
 
 cdef class Run:
     cdef c.Run *_ptr
 
     def __cinit__(self, AnalysisHandler h):
         self._ptr = new c.Run(h._ptr[0])
 
     def __del__(self):
         del self._ptr
 
     def setCrossSection(self, double x):
         self._ptr.setCrossSection(x)
         return self
 
     def setListAnalyses(self, choice):
         self._ptr.setListAnalyses(choice)
         return self
 
     def init(self, name, weight=1.0):
         return self._ptr.init(name, weight)
 
     def openFile(self, name, weight=1.0):
         return self._ptr.openFile(name, weight)
 
     def readEvent(self):
         return self._ptr.readEvent()
 
     def skipEvent(self):
         return self._ptr.skipEvent()
 
     def processEvent(self):
         return self._ptr.processEvent()
 
     def finalize(self):
         return self._ptr.finalize()
 
 
 cdef class Analysis:
     cdef c.unique_ptr[c.Analysis] _ptr
 
     def __init__(self):
         raise RuntimeError('This class cannot be instantiated')
 
     def requiredBeams(self):
         return deref(self._ptr).requiredBeams()
 
     def requiredEnergies(self):
         return deref(self._ptr).requiredEnergies()
 
+    def keywords(self):
+        return deref(self._ptr).keywords()
+
     def authors(self):
         return deref(self._ptr).authors()
 
     def bibKey(self):
         return deref(self._ptr).bibKey()
 
     def name(self):
         return deref(self._ptr).name()
 
     def bibTeX(self):
         return deref(self._ptr).bibTeX()
 
     def references(self):
         return deref(self._ptr).references()
 
     def collider(self):
         return deref(self._ptr).collider()
 
     def description(self):
         return deref(self._ptr).description()
 
     def experiment(self):
         return deref(self._ptr).experiment()
 
     def inspireId(self):
         return deref(self._ptr).inspireId()
 
     def spiresId(self):
         return deref(self._ptr).spiresId()
 
     def runInfo(self):
         return deref(self._ptr).runInfo()
 
     def status(self):
         return deref(self._ptr).status()
 
     def summary(self):
         return deref(self._ptr).summary()
 
     def year(self):
         return deref(self._ptr).year()
 
+    def luminosityfb(self):
+        return deref(self._ptr).luminosityfb()
 
 #cdef object
 LEVELS = dict(TRACE = 0, DEBUG = 10, INFO = 20,
               WARN = 30, WARNING = 30, ERROR = 40,
               CRITICAL = 50, ALWAYS = 50)
 
 
 cdef class AnalysisLoader:
     @staticmethod
     def analysisNames():
         return c.AnalysisLoader_analysisNames()
 
     @staticmethod
     def getAnalysis(name):
         cdef c.unique_ptr[c.Analysis] ptr = c.AnalysisLoader_getAnalysis(name)
         cdef Analysis pyobj = Analysis.__new__(Analysis)
         if not ptr:
             return None
         pyobj._ptr = move(ptr)
         # Create python object
         return pyobj
 
 
 def getAnalysisLibPaths():
     return c.getAnalysisLibPaths()
 
 def setAnalysisLibPaths(xs):
     c.setAnalysisLibPaths(xs)
 
 def addAnalysisLibPath(path):
     c.addAnalysisLibPath(path)
 
 
 def setAnalysisDataPaths(xs):
     c.setAnalysisDataPaths(xs)
 
 def addAnalysisDataPath(path):
     c.addAnalysisDataPath(path)
 
 def getAnalysisDataPaths():
     return c.getAnalysisDataPaths()
 
 def findAnalysisDataFile(q):
     return c.findAnalysisDataFile(q)
 
 
 def getAnalysisRefPaths():
     return c.getAnalysisRefPaths()
 
 def findAnalysisRefFile(q):
     return c.findAnalysisRefFile(q)
 
 
 def getAnalysisInfoPaths():
     return c.getAnalysisInfoPaths()
 
 def findAnalysisInfoFile(q):
     return c.findAnalysisInfoFile(q)
 
 
 def getAnalysisPlotPaths():
     return c.getAnalysisPlotPaths()
 
 def findAnalysisPlotFile(q):
     return c.findAnalysisPlotFile(q)
 
 
 def version():
     return c.version()
 
 def setLogLevel(name, level):
     c.setLogLevel(name, level)
diff --git a/pyext/rivet/rivet.pxd b/pyext/rivet/rivet.pxd
--- a/pyext/rivet/rivet.pxd
+++ b/pyext/rivet/rivet.pxd
@@ -1,89 +1,91 @@
 from libcpp.map cimport map
 from libcpp.pair cimport pair
 from libcpp.vector cimport vector
 from libcpp cimport bool
 from libcpp.string cimport string
 from libcpp.memory cimport unique_ptr
 
 ctypedef int PdgId
 ctypedef pair[PdgId,PdgId] PdgIdPair
 
 cdef extern from "Rivet/AnalysisHandler.hh" namespace "Rivet":
     cdef cppclass AnalysisHandler:
         void setIgnoreBeams(bool)
         AnalysisHandler& addAnalysis(string)
         vector[string] analysisNames() const
         # Analysis* analysis(string)
         void writeData(string&)
         double crossSection()
         void finalize()
 
 cdef extern from "Rivet/Run.hh" namespace "Rivet":
     cdef cppclass Run:
         Run(AnalysisHandler)
         Run& setCrossSection(double) # For chaining?
         Run& setListAnalyses(bool)
         bool init(string, double) # $2=1.0
         bool openFile(string, double) # $2=1.0
         bool readEvent()
         bool skipEvent()
         bool processEvent()
         bool finalize()
 
 cdef extern from "Rivet/Analysis.hh" namespace "Rivet":
     cdef cppclass Analysis:
         vector[PdgIdPair]& requiredBeams()
         vector[pair[double, double]] requiredEnergies()
         vector[string] authors()
         vector[string] references()
+        vector[string] keywords()
         string name()
         string bibTeX()
         string bibKey()
         string collider()
         string description()
         string experiment()
         string inspireId()
         string spiresId()
         string runInfo()
         string status()
         string summary()
         string year()
+        string luminosityfb()
 
 # Might need to translate the following errors, although I believe 'what' is now
 # preserved. But often, we need the exception class name.
 #Error
 #RangeError
 #LogicError
 #PidError
 #InfoError
 #WeightError
 #UserError
 
 cdef extern from "Rivet/AnalysisLoader.hh":
     vector[string] AnalysisLoader_analysisNames "Rivet::AnalysisLoader::analysisNames" ()
     unique_ptr[Analysis] AnalysisLoader_getAnalysis "Rivet::AnalysisLoader::getAnalysis" (string)
 
 cdef extern from "Rivet/Tools/RivetPaths.hh" namespace "Rivet":
     vector[string] getAnalysisLibPaths()
     void setAnalysisLibPaths(vector[string])
     void addAnalysisLibPath(string)
 
     vector[string] getAnalysisDataPaths()
     void setAnalysisDataPaths(vector[string])
     void addAnalysisDataPath(string)
     string findAnalysisDataFile(string)
 
     vector[string] getAnalysisRefPaths()
     string findAnalysisRefFile(string)
 
     vector[string] getAnalysisInfoPaths()
     string findAnalysisInfoFile(string)
 
     vector[string] getAnalysisPlotPaths()
     string findAnalysisPlotFile(string)
 
 cdef extern from "Rivet/Rivet.hh" namespace "Rivet":
     string version()
 
 cdef extern from "Rivet/Tools/Logging.hh":
     void setLogLevel "Rivet::Log::setLevel" (string, int)
diff --git a/src/Analyses/ALICE_2012_I1116147.cc b/src/Analyses/ALICE_2012_I1116147.cc
new file mode 100644
--- /dev/null
+++ b/src/Analyses/ALICE_2012_I1116147.cc
@@ -0,0 +1,86 @@
+//-*- C++ -*-
+#include "Rivet/Analysis.hh"
+#include "Rivet/Projections/UnstableFinalState.hh"
+
+namespace Rivet {
+
+
+  class ALICE_2012_I1116147 : public Analysis {
+  public:
+
+    /// Constructor
+    DEFAULT_RIVET_ANALYSIS_CTOR(ALICE_2012_I1116147);
+
+
+    void init() {
+
+      const UnstableFinalState ufs(Cuts::absrap < RAPMAX);
+      addProjection(ufs, "UFS");
+
+      // Check if cm energy is 7 TeV or 0.9 TeV
+      if (fuzzyEquals(sqrtS()/GeV, 900, 1E-3))  _cm_energy_case = 1;
+      else if (fuzzyEquals(sqrtS()/GeV, 7000, 1E-3)) _cm_energy_case = 2;
+      if (_cm_energy_case == 0)
+        throw UserError("Center of mass energy of the given input is neither 900 nor 7000 GeV.");
+
+      // Book histos
+      if (_cm_energy_case == 1) {
+        _h_pi0 = bookHisto1D(2,1,1);
+      } else {
+        _h_pi0 = bookHisto1D(1,1,1);
+        _h_eta = bookHisto1D(3,1,1);
+        _h_etaToPion = bookScatter2D(4,1,1);
+      }
+
+      // Temporary plots with the binning of _h_etaToPion to construct the eta/pi0 ratio
+      _temp_h_pion = bookHisto1D("TMP/h_pion", refData(4,1,1));
+      _temp_h_eta = bookHisto1D("TMP/h_eta", refData(4,1,1));
+    }
+
+
+    void analyze(const Event& event) {
+
+      const FinalState& ufs = applyProjection<UnstableFinalState>(event, "UFS");
+      const double weight = event.weight();
+
+      for (const Particle& p : ufs.particles()) {
+        if (p.pid() == 111) {
+          // Neutral pion
+          _h_pi0->fill(p.pT()/GeV, weight /(TWOPI*p.pT()/GeV*2*RAPMAX));
+          _temp_h_pion->fill(p.pT()/GeV, weight);
+        } else if ((p.pid() == 221) && _cm_energy_case == 2) {
+          // Eta meson (only for 7 TeV)
+          _h_eta->fill(p.pT()/GeV, weight /(TWOPI*p.pT()/GeV*2*RAPMAX));
+          _temp_h_eta->fill(p.pT()/GeV, weight);
+        }
+      }
+
+    }
+
+
+    void finalize() {
+
+      scale(_h_pi0, crossSection()/microbarn/sumOfWeights());
+      scale(_h_eta, crossSection()/microbarn/sumOfWeights());
+
+      if (_cm_energy_case == 2)
+        divide(_temp_h_eta, _temp_h_pion, _h_etaToPion);
+
+    }
+
+
+  private:
+
+    const double RAPMAX = 0.8;
+    int _cm_energy_case = 0;
+
+    Histo1DPtr _h_pi0, _h_eta;
+    Histo1DPtr _temp_h_pion, _temp_h_eta;
+    Scatter2DPtr _h_etaToPion;
+
+  };
+
+
+  DECLARE_RIVET_PLUGIN(ALICE_2012_I1116147);
+
+}
diff --git a/src/Analyses/ATLAS_2011_I929691.cc b/src/Analyses/ATLAS_2011_I929691.cc
new file mode 100644
--- /dev/null
+++ b/src/Analyses/ATLAS_2011_I929691.cc
@@ -0,0 +1,103 @@
+// -*- C++ -*-
+#include "Rivet/Analysis.hh"
+#include "Rivet/Projections/FinalState.hh"
+#include "Rivet/Projections/FastJets.hh"
+#include "Rivet/Projections/ChargedFinalState.hh"
+
+namespace Rivet {
+
+
+  /// Jet fragmentation at 7 TeV
+  class ATLAS_2011_I929691 : public Analysis {
+  public:
+
+    /// Constructor
+    DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2011_I929691);
+
+
+    /// Initialisation
+    void init() {
+      const FinalState fs(Cuts::abseta < 2.0);
+
+      FastJets antikt_06_jets(fs, FastJets::ANTIKT, 0.6, JetAlg::NO_MUONS, JetAlg::NO_INVISIBLES);
+      declare(antikt_06_jets, "jets");
+
+      ChargedFinalState tracks(Cuts::pT > 0.5*GeV && Cuts::abseta < 2.0);
+      declare(tracks, "tracks");
+
+      // Set up the histograms (each element is a binning in jet pT)
+      for (size_t i = 0; i < 10; i++) {
+        _p_F_z[i]     = bookProfile1D(i+1, 1, 1);
+        _p_rho_r[i]   = bookProfile1D(i+11, 1, 1);
+        _p_f_pTrel[i] = bookProfile1D(i+21, 1, 1);
+      }
+
+    }
+
+
+    // Per-event analysis
+    void analyze(const Event& event) {
+
+      const Jets alljets = apply<FastJets>(event, "jets").jetsByPt(Cuts::absrap < 1.2);
+      const Particles& tracks = apply<ChargedFinalState>(event, "tracks").particlesByPt();
+
+      for (size_t i = 0; i < 10; ++i) {
+
+        const Jets jets = filter_select(alljets, Cuts::pT > bedges[i] && Cuts::pT < bedges[i+1]);
+        const int n_jets = jets.size();
+        if (n_jets == 0) continue;
+
+        // First... count the tracks
+        Histo1D h_ntracks_z(*_p_F_z[i]), h_ntracks_r(*_p_rho_r[i]), h_ntracks_pTrel(*_p_f_pTrel[i]);
+
+        for (const Jet& j : jets) {
+          for (const Particle& p : tracks) {
+            const double dr = deltaR(j, p, RAPIDITY);
+            if (dr > 0.6) continue; // The paper uses pseudorapidity, but this is a requirement for filling the histogram
+            h_ntracks_z.fill(z(j, p), 1.0/n_jets);
+            h_ntracks_r.fill(dr, 1.0/n_jets);
+            h_ntracks_pTrel.fill(pTrel(j, p), 1.0/n_jets);
+          }
+        }
+
+        // Then... calculate the observable and fill the profiles
+        const double weight = event.weight();
+        for (const HistoBin1D& b : h_ntracks_z.bins())
+          _p_F_z[i]->fill(b.xMid(), b.height(), weight);
+        for (const HistoBin1D& b : h_ntracks_r.bins())
+          _p_rho_r[i]->fill(b.xMid(), b.area()/annulus_area(b.xMin(), b.xMax()), weight);
+        for (const HistoBin1D& b : h_ntracks_pTrel.bins())
+          _p_f_pTrel[i]->fill(b.xMid(), b.height(), weight);
+
+      }
+
+    }
+
+
+    double z (const Jet& jet, const Particle& ch) {
+      return dot(jet.p3(), ch.p3()) / jet.p3().mod2();
+    }
+
+    double pTrel (const Jet& jet, const Particle& ch) {
+      return (ch.p3().cross(jet.p3())).mod()/(jet.p3().mod());
+    }
+
+    // To calculate the area of the annulus in an r bin
+    double annulus_area(double r1, double r2) {
+      return M_PI*(sqr(r2) - sqr(r1));
+    }
+
+
+  private:
+
+    Profile1DPtr _p_F_z[10], _p_rho_r[10], _p_f_pTrel[10];
+    const vector<double> bedges = { 25., 40., 60., 80., 110., 160., 210., 260., 310., 400., 500. };
+
+  };
+
+
+  // The hook for the plugin system
+  DECLARE_RIVET_PLUGIN(ATLAS_2011_I929691);
+
+
+}
diff --git a/src/Analyses/ATLAS_2014_I1327229.cc b/src/Analyses/ATLAS_2014_I1327229.cc
--- a/src/Analyses/ATLAS_2014_I1327229.cc
+++ b/src/Analyses/ATLAS_2014_I1327229.cc
@@ -1,1327 +1,1330 @@
 // -*- C++ -*-
 #include "Rivet/Analysis.hh"
 #include "Rivet/Projections/FinalState.hh"
 #include "Rivet/Projections/ChargedFinalState.hh"
 #include "Rivet/Projections/VisibleFinalState.hh"
 #include "Rivet/Projections/VetoedFinalState.hh"
 #include "Rivet/Projections/IdentifiedFinalState.hh"
 #include "Rivet/Projections/UnstableFinalState.hh"
 #include "Rivet/Projections/FastJets.hh"
 
 
 namespace Rivet {
 
 
   class ATLAS_2014_I1327229 : public Analysis {
   public:
 
     /// Constructor
     ATLAS_2014_I1327229()
       : Analysis("ATLAS_2014_I1327229") {    }
 
 
     /// Book histograms and initialise projections before the run
     void init() {
 
       // To calculate the acceptance without having the fiducial lepton efficiencies included, this part can be turned off
       _use_fiducial_lepton_efficiency = true;
 
       // Random numbers for simulation of ATLAS detector reconstruction efficiency
-      srand (160385);
+      /// @todo Replace with SmearedParticles etc.
+      srand(160385);
 
       // Read in all signal regions
       _signal_regions = getSignalRegions();
 
       // Set number of events per signal region to 0
       for (size_t i = 0; i < _signal_regions.size(); i++)
         _eventCountsPerSR[_signal_regions[i]] = 0.0;
 
       // Final state including all charged and neutral particles
       const FinalState fs(-5.0, 5.0, 1*GeV);
       declare(fs, "FS");
 
       // Final state including all charged particles
       declare(ChargedFinalState(-2.5, 2.5, 1*GeV), "CFS");
 
       // Final state including all visible particles (to calculate MET, Jets etc.)
       declare(VisibleFinalState(-5.0,5.0),"VFS");
 
       // Final state including all AntiKt 04 Jets
       VetoedFinalState vfs;
       vfs.addVetoPairId(PID::MUON);
       declare(FastJets(vfs, FastJets::ANTIKT, 0.4), "AntiKtJets04");
 
       // Final state including all unstable particles (including taus)
       declare(UnstableFinalState(Cuts::abseta < 5.0 && Cuts::pT > 5*GeV),"UFS");
 
       // Final state including all electrons
       IdentifiedFinalState elecs(Cuts::abseta < 2.47 && Cuts::pT > 10*GeV);
       elecs.acceptIdPair(PID::ELECTRON);
       declare(elecs, "elecs");
 
       // Final state including all muons
       IdentifiedFinalState muons(Cuts::abseta < 2.5 && Cuts::pT > 10*GeV);
       muons.acceptIdPair(PID::MUON);
       declare(muons, "muons");
 
 
 
       /// Book histograms:
       book(_h_HTlep_all ,"HTlep_all", 30,0,3000);
       book(_h_HTjets_all ,"HTjets_all", 30,0,3000);
       book(_h_MET_all ,"MET_all", 30,0,1500);
       book(_h_Meff_all ,"Meff_all", 50,0,5000);
       book(_h_min_pT_all ,"min_pT_all", 50, 0, 2000);
       book(_h_mT_all ,"mT_all", 50, 0, 2000);
 
       book(_h_e_n ,"e_n", 10, -0.5, 9.5);
       book(_h_mu_n ,"mu_n", 10, -0.5, 9.5);
       book(_h_tau_n ,"tau_n", 10, -0.5, 9.5);
 
       book(_h_pt_1_3l ,"pt_1_3l", 100, 0, 2000);
       book(_h_pt_2_3l ,"pt_2_3l", 100, 0, 2000);
       book(_h_pt_3_3l ,"pt_3_3l", 100, 0, 2000);
       book(_h_pt_1_2ltau ,"pt_1_2ltau", 100, 0, 2000);
       book(_h_pt_2_2ltau ,"pt_2_2ltau", 100, 0, 2000);
       book(_h_pt_3_2ltau ,"pt_3_2ltau", 100, 0, 2000);
 
       book(_h_excluded ,"excluded", 2, -0.5, 1.5);
     }
 
 
     /// Perform the per-event analysis
     void analyze(const Event& event) {
 
       // Muons
       Particles muon_candidates;
       const Particles charged_tracks = apply<ChargedFinalState>(event, "CFS").particles();
       const Particles visible_particles = apply<VisibleFinalState>(event, "VFS").particles();
-      foreach (const Particle& mu, apply<IdentifiedFinalState>(event, "muons").particlesByPt() ) {
+      for (const Particle& mu : apply<IdentifiedFinalState>(event, "muons").particlesByPt() ) {
 
         // Calculate pTCone30 variable (pT of all tracks within dR<0.3 - pT of muon itself)
         double pTinCone = -mu.pT();
-        foreach (const Particle& track, charged_tracks ) {
+        for (const Particle& track : charged_tracks ) {
           if (deltaR(mu.momentum(),track.momentum()) < 0.3 )
             pTinCone += track.pT();
         }
 
         // Calculate eTCone30 variable (pT of all visible particles within dR<0.3)
         double eTinCone = 0.;
-        foreach (const Particle& visible_particle, visible_particles) {
+        for (const Particle& visible_particle : visible_particles) {
           if (visible_particle.abspid() != PID::MUON && inRange(deltaR(mu.momentum(),visible_particle.momentum()), 0.1, 0.3))
             eTinCone += visible_particle.pT();
         }
 
         // Apply reconstruction efficiency and simulate reconstruction
         int muon_id = 13;
         if (mu.hasAncestor(PID::TAU) || mu.hasAncestor(-PID::TAU)) muon_id = 14;
         const double eff = (_use_fiducial_lepton_efficiency) ? apply_reco_eff(muon_id,mu) : 1.0;
         const bool keep_muon = rand()/static_cast<double>(RAND_MAX)<=eff;
 
         // Keep muon if pTCone30/pT < 0.15 and eTCone30/pT < 0.2 and reconstructed
         if (keep_muon && pTinCone/mu.pT() <= 0.1 && eTinCone/mu.pT() < 0.1)
           muon_candidates.push_back(mu);
       }
 
       // Electrons
       Particles electron_candidates;
-      foreach (const Particle& e, apply<IdentifiedFinalState>(event, "elecs").particlesByPt() ) {
+      for (const Particle& e : apply<IdentifiedFinalState>(event, "elecs").particlesByPt() ) {
         // Neglect electrons in crack regions
         if (inRange(e.abseta(), 1.37, 1.52)) continue;
 
         // Calculate pTCone30 variable (pT of all tracks within dR<0.3 - pT of electron itself)
         double pTinCone = -e.pT();
-        foreach (const Particle& track, charged_tracks) {
+        for (const Particle& track : charged_tracks) {
           if (deltaR(e.momentum(), track.momentum()) < 0.3 ) pTinCone += track.pT();
         }
 
         // Calculate eTCone30 variable (pT of all visible particles (except muons) within dR<0.3)
         double eTinCone = 0.;
-        foreach (const Particle& visible_particle, visible_particles) {
+        for (const Particle& visible_particle : visible_particles) {
           if (visible_particle.abspid() != PID::MUON && inRange(deltaR(e.momentum(),visible_particle.momentum()), 0.1, 0.3))
             eTinCone += visible_particle.pT();
         }
 
         // Apply reconstruction efficiency and simulate reconstruction
         int elec_id = 11;
         if (e.hasAncestor(15) || e.hasAncestor(-15)) elec_id = 12;
         const double eff = (_use_fiducial_lepton_efficiency) ? apply_reco_eff(elec_id,e) : 1.0;
         const bool keep_elec = rand()/static_cast<double>(RAND_MAX)<=eff;
 
         // Keep electron if pTCone30/pT < 0.13 and eTCone30/pT < 0.2 and reconstructed
         if (keep_elec && pTinCone/e.pT() <= 0.1  && eTinCone/e.pT() < 0.1)
           electron_candidates.push_back(e);
       }
 
 
       // Taus
       Particles tau_candidates;
-      foreach (const Particle& tau, apply<UnstableFinalState>(event, "UFS").particles() ) {
+      for (const Particle& tau : apply<UnstableFinalState>(event, "UFS").particles() ) {
         // Only pick taus out of all unstable particles
         if ( tau.abspid() != PID::TAU) continue;
         // Check that tau has decayed into daughter particles
         if (tau.genParticle()->end_vertex() == 0) continue;
        // Calculate visible tau momentum using the tau neutrino momentum in the tau decay
         FourMomentum daughter_tau_neutrino_momentum = get_tau_neutrino_momentum(tau);
         Particle tau_vis = tau;
         tau_vis.setMomentum(tau.momentum()-daughter_tau_neutrino_momentum);
         // keep only taus in certain eta region and above 15 GeV of visible tau pT
         if ( tau_vis.pT()/GeV <= 15.0 || tau_vis.abseta() > 2.5) continue;
 
         // Get prong number (number of tracks) in tau decay and check if tau decays leptonically
         unsigned int nprong = 0;
         bool lep_decaying_tau = false;
         get_prong_number(tau.genParticle(),nprong,lep_decaying_tau);
 
         // Apply reconstruction efficiency and simulate reconstruction
         int tau_id = 15;
         if (nprong == 1) tau_id = 15;
         else if (nprong == 3) tau_id = 16;
 
 
         const double eff = (_use_fiducial_lepton_efficiency) ? apply_reco_eff(tau_id,tau_vis) : 1.0;
         const bool keep_tau = rand()/static_cast<double>(RAND_MAX)<=eff;
 
         // Keep tau if nprong = 1, it decays hadronically and it is reconstructed
         if ( !lep_decaying_tau && nprong == 1 && keep_tau) tau_candidates.push_back(tau_vis);
       }
 
       // Jets (all anti-kt R=0.4 jets with pT > 30 GeV and eta < 4.9
       Jets jet_candidates;
-      foreach (const Jet& jet, apply<FastJets>(event, "AntiKtJets04").jetsByPt(30.0*GeV) ) {
+      for (const Jet& jet : apply<FastJets>(event, "AntiKtJets04").jetsByPt(30.0*GeV) ) {
         if (jet.abseta() < 4.9 ) jet_candidates.push_back(jet);
       }
 
       // ETmiss
       Particles vfs_particles = apply<VisibleFinalState>(event, "VFS").particles();
       FourMomentum pTmiss;
-      foreach (const Particle& p, vfs_particles ) pTmiss -= p.momentum();
+      for (const Particle& p : vfs_particles)
+        pTmiss -= p.momentum();
       double eTmiss = pTmiss.pT()/GeV;
 
 
       // -------------------------
       // Overlap removal
 
       // electron - electron
       Particles electron_candidates_2;
       for(size_t ie = 0; ie < electron_candidates.size(); ++ie) {
         const Particle& e = electron_candidates[ie];
         bool away = true;
         // If electron pair within dR < 0.1: remove electron with lower pT
         for(size_t ie2 = 0; ie2 < electron_candidates_2.size(); ++ie2) {
           if (deltaR(e.momentum(),electron_candidates_2[ie2].momentum()) < 0.1 ) {
             away = false;
             break;
           }
         }
         // If isolated keep it
         if ( away )
           electron_candidates_2.push_back( e );
       }
 
       // jet - electron
       Jets recon_jets;
-      foreach (const Jet& jet, jet_candidates) {
+      for (const Jet& jet : jet_candidates) {
         bool away = true;
         // If jet within dR < 0.2 of electron: remove jet
-        foreach (const Particle& e, electron_candidates_2) {
+        for (const Particle& e : electron_candidates_2) {
           if (deltaR(e.momentum(), jet.momentum()) < 0.2 ) {
             away = false;
             break;
           }
         }
 
         // jet - tau
         if ( away )  {
           // If jet within dR < 0.2 of tau: remove jet
-          foreach (const Particle& tau, tau_candidates) {
+          for (const Particle& tau : tau_candidates) {
             if (deltaR(tau.momentum(), jet.momentum()) < 0.2 ) {
               away = false;
               break;
             }
           }
         }
         // If isolated keep it
         if ( away )
           recon_jets.push_back( jet );
       }
 
       // electron - jet
       Particles recon_leptons, recon_e;
       for (size_t ie = 0; ie < electron_candidates_2.size(); ++ie) {
         const Particle& e = electron_candidates_2[ie];
         // If electron within 0.2 < dR < 0.4 from any jets: remove electron
         bool away = true;
-        foreach (const Jet& jet, recon_jets) {
+        for (const Jet& jet : recon_jets) {
           if (deltaR(e.momentum(), jet.momentum()) < 0.4 ) {
             away = false;
             break;
           }
         }
         // electron - muon
         // If electron within dR < 0.1 of a muon: remove electron
         if (away) {
-          foreach (const Particle& mu, muon_candidates) {
+          for (const Particle& mu : muon_candidates) {
             if (deltaR(mu.momentum(),e.momentum()) < 0.1) {
               away = false;
               break;
             }
           }
         }
         // If isolated keep it
         if ( away )  {
           recon_e.push_back( e );
           recon_leptons.push_back( e );
           }
       }
 
       // tau - electron
       Particles recon_tau;
-      foreach (const Particle& tau, tau_candidates) {
+      for (const Particle& tau : tau_candidates) {
         bool away = true;
         // If tau within dR < 0.2 of an electron: remove tau
-        foreach (const Particle & e, recon_e) {
+        for (const Particle & e : recon_e) {
           if (deltaR(tau.momentum(),e.momentum()) < 0.2 ) {
             away = false;
             break;
           }
         }
         // tau - muon
         // If tau within dR < 0.2 of a muon: remove tau
         if (away)  {
-          foreach (const Particle& mu, muon_candidates) {
+          for (const Particle& mu : muon_candidates) {
             if (deltaR(tau.momentum(), mu.momentum()) < 0.2 ) {
               away = false;
               break;
             }
           }
         }
         // If isolated keep it
         if (away) recon_tau.push_back( tau );
       }
 
       // muon - jet
       Particles recon_mu, trigger_mu;
       // If muon within dR < 0.4 of a jet: remove muon
-      foreach (const Particle& mu, muon_candidates ) {
+      for (const Particle& mu : muon_candidates ) {
         bool away = true;
-        foreach (const Jet& jet, recon_jets) {
+        for (const Jet& jet : recon_jets) {
           if (deltaR(mu.momentum(), jet.momentum()) < 0.4 ) {
             away = false;
             break;
           }
         }
         if (away)  {
           recon_mu.push_back( mu );
           recon_leptons.push_back( mu );
           if (mu.abseta() < 2.4) trigger_mu.push_back( mu );
         }
       }
 
       // End overlap removal
       // ---------------------
 
       // Jet cleaning
       if (rand()/static_cast<double>(RAND_MAX) <= 0.42) {
-        foreach (const Jet& jet, recon_jets ) {
+        for (const Jet& jet : recon_jets ) {
           const double eta = jet.rapidity();
           const double phi = jet.azimuthalAngle(MINUSPI_PLUSPI);
           if(jet.pT() > 25*GeV && inRange(eta,-0.1,1.5) && inRange(phi,-0.9,-0.5)) vetoEvent;
         }
       }
 
       // Event selection
       // Require at least 3 charged tracks in event
       if (charged_tracks.size() < 3) vetoEvent;
 
       // And at least one e/mu passing trigger
       if( !( !recon_e.empty() && recon_e[0].pT()>26.*GeV)  &&
           !( !trigger_mu.empty() && trigger_mu[0].pT()>26.*GeV) ) {
         MSG_DEBUG("Hardest lepton fails trigger");
         vetoEvent;
       }
 
       // And only accept events with at least 2 electrons and muons and at least 3 leptons in total
       if (recon_mu.size() + recon_e.size() + recon_tau.size() < 3 || recon_leptons.size() < 2) vetoEvent;
 
       // Getting the event weight
       const double weight = 1.0;
 
       // Sort leptons by decreasing pT
       sortByPt(recon_leptons);
       sortByPt(recon_tau);
 
       // Calculate HTlep, fill lepton pT histograms & store chosen combination of 3 leptons
       double HTlep = 0.;
       Particles chosen_leptons;
       if (recon_leptons.size() > 2) {
         _h_pt_1_3l->fill(recon_leptons[0].pT()/GeV, weight);
         _h_pt_2_3l->fill(recon_leptons[1].pT()/GeV, weight);
         _h_pt_3_3l->fill(recon_leptons[2].pT()/GeV, weight);
         HTlep = (recon_leptons[0].pT() + recon_leptons[1].pT() + recon_leptons[2].pT())/GeV;
         chosen_leptons.push_back( recon_leptons[0] );
         chosen_leptons.push_back( recon_leptons[1] );
         chosen_leptons.push_back( recon_leptons[2] );
       }
       else {
         _h_pt_1_2ltau->fill(recon_leptons[0].pT()/GeV, weight);
         _h_pt_2_2ltau->fill(recon_leptons[1].pT()/GeV, weight);
         _h_pt_3_2ltau->fill(recon_tau[0].pT()/GeV, weight);
         HTlep = recon_leptons[0].pT()/GeV + recon_leptons[1].pT()/GeV + recon_tau[0].pT()/GeV;
         chosen_leptons.push_back( recon_leptons[0] );
         chosen_leptons.push_back( recon_leptons[1] );
         chosen_leptons.push_back( recon_tau[0] );
       }
 
       // Calculate mT and mTW variable
       Particles mT_leptons;
       Particles mTW_leptons;
       for (size_t i1 = 0; i1 < 3; i1 ++)  {
         for (size_t i2 = i1+1; i2 < 3; i2 ++)  {
           double OSSF_inv_mass = isOSSF_mass(chosen_leptons[i1],chosen_leptons[i2]);
           if (OSSF_inv_mass != 0.)  {
             for (size_t i3 = 0; i3 < 3 ; i3 ++)  {
               if (i3 != i2 && i3 != i1)  {
                 mT_leptons.push_back(chosen_leptons[i3]);
                 if ( fabs(91.0 - OSSF_inv_mass) < 20. )
                   mTW_leptons.push_back(chosen_leptons[i3]);
               }
             }
           }
           else  {
             mT_leptons.push_back(chosen_leptons[0]);
             mTW_leptons.push_back(chosen_leptons[0]);
           }
         }
       }
 
       sortByPt(mT_leptons);
       sortByPt(mTW_leptons);
 
       double mT = sqrt(2*pTmiss.pT()/GeV*mT_leptons[0].pT()/GeV*(1-cos(pTmiss.phi()-mT_leptons[0].phi())));
       double mTW = sqrt(2*pTmiss.pT()/GeV*mTW_leptons[0].pT()/GeV*(1-cos(pTmiss.phi()-mTW_leptons[0].phi())));
 
       // Calculate Min pT variable
       double min_pT = chosen_leptons[2].pT()/GeV;
 
       // Number of prompt e/mu and had taus
       _h_e_n->fill(recon_e.size(),weight);
       _h_mu_n->fill(recon_mu.size(),weight);
       _h_tau_n->fill(recon_tau.size(),weight);
 
       // Calculate HTjets variable
       double HTjets = 0.;
-      foreach (const Jet& jet, recon_jets)
+      for (const Jet& jet : recon_jets)
         HTjets += jet.pT()/GeV;
 
       // Calculate meff variable
       double meff = eTmiss + HTjets;
       Particles all_leptons;
-      foreach (const Particle& e, recon_e ) {
+      for (const Particle& e : recon_e ) {
         meff += e.pT()/GeV;
         all_leptons.push_back( e );
       }
-      foreach (const Particle& mu, recon_mu) {
+      for (const Particle& mu : recon_mu) {
         meff += mu.pT()/GeV;
         all_leptons.push_back( mu );
       }
-      foreach (const Particle& tau, recon_tau) {
+      for (const Particle& tau : recon_tau) {
         meff += tau.pT()/GeV;
         all_leptons.push_back( tau );
       }
 
       // Fill histograms of kinematic variables
       _h_HTlep_all->fill(HTlep,weight);
       _h_HTjets_all->fill(HTjets,weight);
       _h_MET_all->fill(eTmiss,weight);
       _h_Meff_all->fill(meff,weight);
       _h_min_pT_all->fill(min_pT,weight);
       _h_mT_all->fill(mT,weight);
 
       // Determine signal region (3l / 2ltau , onZ / offZ OSSF / offZ no-OSSF)
       // 3l vs. 2ltau
       string basic_signal_region;
       if (recon_mu.size() + recon_e.size() > 2)
         basic_signal_region += "3l_";
       else if ( (recon_mu.size() + recon_e.size() == 2) && (recon_tau.size() > 0))
         basic_signal_region += "2ltau_";
 
       // Is there an OSSF pair or a three lepton combination with an invariant mass close to the Z mass
       int onZ = isonZ(chosen_leptons);
       if (onZ == 1) basic_signal_region += "onZ";
       else if (onZ == 0)  {
         bool OSSF = isOSSF(chosen_leptons);
         if (OSSF) basic_signal_region += "offZ_OSSF";
         else basic_signal_region += "offZ_noOSSF";
         }
 
       // Check in which signal regions this event falls and adjust event counters
       // INFO: The b-jet signal regions of the paper are not included in this Rivet implementation
       fillEventCountsPerSR(basic_signal_region,onZ,HTlep,eTmiss,HTjets,meff,min_pT,mTW,weight);
     }
 
 
     /// Normalise histograms etc., after the run
     void finalize() {
 
       // Normalize to an integrated luminosity of 1 fb-1
       double norm = crossSection()/femtobarn/sumOfWeights();
 
       string best_signal_region = "";
       double ratio_best_SR = 0.;
 
       // Loop over all signal regions and find signal region with best sensitivity (ratio signal events/visible cross-section)
       for (size_t i = 0; i < _signal_regions.size(); i++) {
         double signal_events = _eventCountsPerSR[_signal_regions[i]] * norm;
         // Use expected upper limits to find best signal region:
         double UL95 = getUpperLimit(_signal_regions[i],false);
         double ratio = signal_events / UL95;
         if (ratio > ratio_best_SR)  {
           best_signal_region = _signal_regions.at(i);
           ratio_best_SR = ratio;
         }
       }
 
       double signal_events_best_SR = _eventCountsPerSR[best_signal_region] * norm;
       double exp_UL_best_SR = getUpperLimit(best_signal_region, false);
       double obs_UL_best_SR = getUpperLimit(best_signal_region, true);
 
 
       // Print out result
       cout << "----------------------------------------------------------------------------------------" << endl;
       cout << "Number of total events: " << sumOfWeights() << endl;
       cout << "Best signal region: " << best_signal_region << endl;
       cout << "Normalized number of signal events in this best signal region (per fb-1): " << signal_events_best_SR << endl;
       cout << "Efficiency*Acceptance: " << _eventCountsPerSR[best_signal_region]/sumOfWeights() << endl;
       cout << "Cross-section [fb]: " << crossSection()/femtobarn << endl;
       cout << "Expected visible cross-section (per fb-1): " << exp_UL_best_SR << endl;
       cout << "Ratio (signal events / expected visible cross-section): " << ratio_best_SR << endl;
       cout << "Observed visible cross-section (per fb-1): " << obs_UL_best_SR << endl;
       cout << "Ratio (signal events / observed visible cross-section): " <<  signal_events_best_SR/obs_UL_best_SR << endl;
       cout << "----------------------------------------------------------------------------------------" << endl;
 
       cout << "Using the EXPECTED limits (visible cross-section) of the analysis: " << endl;
       if (signal_events_best_SR > exp_UL_best_SR)  {
         cout << "Since the number of signal events > the visible cross-section, this model/grid point is EXCLUDED with 95% C.L." << endl;
         _h_excluded->fill(1);
       }
       else  {
         cout << "Since the number of signal events < the visible cross-section, this model/grid point is NOT EXCLUDED." << endl;
         _h_excluded->fill(0);
       }
       cout << "----------------------------------------------------------------------------------------" << endl;
 
       cout << "Using the OBSERVED limits (visible cross-section) of the analysis: " << endl;
       if (signal_events_best_SR > obs_UL_best_SR)  {
         cout << "Since the number of signal events > the visible cross-section, this model/grid point is EXCLUDED with 95% C.L." << endl;
         _h_excluded->fill(1);
       }
       else  {
         cout << "Since the number of signal events < the visible cross-section, this model/grid point is NOT EXCLUDED." << endl;
         _h_excluded->fill(0);
       }
       cout << "----------------------------------------------------------------------------------------" << endl;
       cout << "INFO: The b-jet signal regions of the paper are not included in this Rivet implementation." << endl;
       cout << "----------------------------------------------------------------------------------------" << endl;
 
 
       /// Normalize to cross section
 
       if (norm != 0)  {
         scale(_h_HTlep_all, norm);
         scale(_h_HTjets_all, norm);
         scale(_h_MET_all, norm);
         scale(_h_Meff_all, norm);
         scale(_h_min_pT_all, norm);
         scale(_h_mT_all, norm);
 
         scale(_h_pt_1_3l, norm);
         scale(_h_pt_2_3l, norm);
         scale(_h_pt_3_3l, norm);
         scale(_h_pt_1_2ltau, norm);
         scale(_h_pt_2_2ltau, norm);
         scale(_h_pt_3_2ltau, norm);
 
         scale(_h_e_n, norm);
         scale(_h_mu_n, norm);
         scale(_h_tau_n, norm);
 
         scale(_h_excluded, norm);
       }
 
     }
 
 
     /// Helper functions
     //@{
     /// Function giving a list of all signal regions
     vector<string> getSignalRegions()  {
 
       // List of basic signal regions
       vector<string> basic_signal_regions;
       basic_signal_regions.push_back("3l_offZ_OSSF");
       basic_signal_regions.push_back("3l_offZ_noOSSF");
       basic_signal_regions.push_back("3l_onZ");
       basic_signal_regions.push_back("2ltau_offZ_OSSF");
       basic_signal_regions.push_back("2ltau_offZ_noOSSF");
       basic_signal_regions.push_back("2ltau_onZ");
 
       // List of kinematic variables
       vector<string> kinematic_variables;
       kinematic_variables.push_back("HTlep");
       kinematic_variables.push_back("METStrong");
       kinematic_variables.push_back("METWeak");
       kinematic_variables.push_back("Meff");
       kinematic_variables.push_back("MeffStrong");
       kinematic_variables.push_back("MeffMt");
       kinematic_variables.push_back("MinPt");
 
       vector<string> signal_regions;
       // Loop over all kinematic variables and basic signal regions
       for (size_t i0 = 0; i0 < kinematic_variables.size(); i0++)  {
         for (size_t i1 = 0; i1 < basic_signal_regions.size(); i1++)  {
           // Is signal region onZ?
           int onZ = (basic_signal_regions[i1].find("onZ") != string::npos) ? 1 : 0;
           // Get cut values for this kinematic variable
           vector<int> cut_values = getCutsPerSignalRegion(kinematic_variables[i0], onZ);
           // Loop over all cut values
           for (size_t i2 = 0; i2 < cut_values.size(); i2++)  {
             // Push signal region into vector
             signal_regions.push_back( kinematic_variables[i0] + "_" + basic_signal_regions[i1] + "_cut_" + toString(cut_values[i2]) );
           }
         }
       }
       return signal_regions;
     }
 
 
 
     /// Function giving all cut values per kinematic variable
     vector<int> getCutsPerSignalRegion(const string& signal_region, int onZ = 0)  {
       vector<int> cutValues;
 
       // Cut values for HTlep
       if (signal_region.compare("HTlep") == 0)  {
         cutValues.push_back(0);
         cutValues.push_back(200);
         cutValues.push_back(500);
         cutValues.push_back(800);
         }
       // Cut values for MinPt
       else if (signal_region.compare("MinPt") == 0)  {
         cutValues.push_back(0);
         cutValues.push_back(50);
         cutValues.push_back(100);
         cutValues.push_back(150);
         }
       // Cut values for METStrong (HTjets > 150 GeV) and METWeak (HTjets < 150 GeV)
       else if (signal_region.compare("METStrong") == 0 || signal_region.compare("METWeak") == 0)  {
         cutValues.push_back(0);
         cutValues.push_back(100);
         cutValues.push_back(200);
         cutValues.push_back(300);
         }
       // Cut values for Meff
       if (signal_region.compare("Meff") == 0)  {
         cutValues.push_back(0);
         cutValues.push_back(600);
         cutValues.push_back(1000);
         cutValues.push_back(1500);
         }
       // Cut values for MeffStrong (MET > 100 GeV)
       if ((signal_region.compare("MeffStrong") == 0 || signal_region.compare("MeffMt") == 0) && onZ ==1)  {
         cutValues.push_back(0);
         cutValues.push_back(600);
         cutValues.push_back(1200);
         }
 
       return cutValues;
     }
 
     /// function fills map _eventCountsPerSR by looping over all signal regions
     /// and looking if the event falls into this signal region
     void fillEventCountsPerSR(const string& basic_signal_region, int onZ,
                               double HTlep, double eTmiss, double HTjets,
                               double meff, double min_pT, double mTW,
                               double weight)  {
 
       // Get cut values for HTlep, loop over them and add event if cut is passed
       vector<int> cut_values = getCutsPerSignalRegion("HTlep", onZ);
       for (size_t i = 0; i < cut_values.size(); i++)  {
         if (HTlep > cut_values[i])
           _eventCountsPerSR[("HTlep_" + basic_signal_region + "_cut_" + toString(cut_values[i]))] += weight;
       }
 
       // Get cut values for MinPt, loop over them and add event if cut is passed
       cut_values = getCutsPerSignalRegion("MinPt", onZ);
       for (size_t i = 0; i < cut_values.size(); i++)  {
         if (min_pT > cut_values[i])
           _eventCountsPerSR[("MinPt_" + basic_signal_region + "_cut_" + toString(cut_values[i]))] += weight;
       }
 
       // Get cut values for METStrong, loop over them and add event if cut is passed
       cut_values = getCutsPerSignalRegion("METStrong", onZ);
       for (size_t i = 0; i < cut_values.size(); i++)  {
         if (eTmiss > cut_values[i] && HTjets > 150.)
           _eventCountsPerSR[("METStrong_" + basic_signal_region + "_cut_" + toString(cut_values[i]))] += weight;
       }
 
       // Get cut values for METWeak, loop over them and add event if cut is passed
       cut_values = getCutsPerSignalRegion("METWeak", onZ);
       for (size_t i = 0; i < cut_values.size(); i++)  {
         if (eTmiss > cut_values[i] && HTjets <= 150.)
           _eventCountsPerSR[("METWeak_" + basic_signal_region + "_cut_" + toString(cut_values[i]))] += weight;
       }
 
       // Get cut values for Meff, loop over them and add event if cut is passed
       cut_values = getCutsPerSignalRegion("Meff", onZ);
       for (size_t i = 0; i < cut_values.size(); i++)  {
         if (meff > cut_values[i])
           _eventCountsPerSR[("Meff_" + basic_signal_region + "_cut_" + toString(cut_values[i]))] += weight;
       }
 
       // Get cut values for MeffStrong, loop over them and add event if cut is passed
       cut_values = getCutsPerSignalRegion("MeffStrong", onZ);
       for (size_t i = 0; i < cut_values.size(); i++)  {
         if (meff > cut_values[i] && eTmiss > 100.)
           _eventCountsPerSR[("MeffStrong_" + basic_signal_region + "_cut_" + toString(cut_values[i]))] += weight;
       }
 
       // Get cut values for MeffMt, loop over them and add event if cut is passed
       cut_values = getCutsPerSignalRegion("MeffMt", onZ);
       for (size_t i = 0; i < cut_values.size(); i++)  {
         if (meff > cut_values[i] && mTW > 100. && onZ == 1)
           _eventCountsPerSR[("MeffMt_" + basic_signal_region + "_cut_" + toString(cut_values[i]))] += weight;
       }
 
     }
 
     /// Function returning 4-momentum of daughter-particle if it is a tau neutrino
     FourMomentum get_tau_neutrino_momentum(const Particle& p)  {
       assert(p.abspid() == PID::TAU);
       const GenVertex* dv = p.genParticle()->end_vertex();
       assert(dv != NULL);
       // Loop over all daughter particles
       for (GenVertex::particles_out_const_iterator pp = dv->particles_out_const_begin(); pp != dv->particles_out_const_end(); ++pp) {
         if (abs((*pp)->pdg_id()) == PID::NU_TAU) return FourMomentum((*pp)->momentum());
       }
       return FourMomentum();
     }
 
     /// Function calculating the prong number of taus
     void get_prong_number(const GenParticle* p, unsigned int& nprong, bool& lep_decaying_tau)  {
       assert(p != NULL);
       const GenVertex* dv = p->end_vertex();
       assert(dv != NULL);
       for (GenVertex::particles_out_const_iterator pp = dv->particles_out_const_begin(); pp != dv->particles_out_const_end(); ++pp) {
         // If they have status 1 and are charged they will produce a track and the prong number is +1
         if ((*pp)->status() == 1 )  {
           const int id = (*pp)->pdg_id();
           if (Rivet::PID::charge(id) != 0 ) ++nprong;
           // Check if tau decays leptonically
           if (( abs(id) == PID::ELECTRON || abs(id) == PID::MUON || abs(id) == PID::TAU ) && abs(p->pdg_id()) == PID::TAU) lep_decaying_tau = true;
         }
         // If the status of the daughter particle is 2 it is unstable and the further decays are checked
         else if ((*pp)->status() == 2 )  {
           get_prong_number((*pp),nprong,lep_decaying_tau);
         }
       }
     }
 
     /// Function giving fiducial lepton efficiency
     double apply_reco_eff(int flavor, const Particle& p) {
-      float pt = p.pT()/GeV;
-      float eta = p.eta();
+      double pt = p.pT()/GeV;
+      double eta = p.eta();
 
       double eff = 0.;
 
       if (flavor == 11) { // weight prompt electron -- now including data/MC ID SF in eff.
         double avgrate = 0.685;
-        float wz_ele[] =  {0.0256,0.522,0.607,0.654,0.708,0.737,0.761,0.784,0.815,0.835,0.851,0.841,0.898};
-        // float ewz_ele[] = {0.000257,0.00492,0.00524,0.00519,0.00396,0.00449,0.00538,0.00513,0.00773,0.00753,0.0209,0.0964,0.259};
+        const static double wz_ele[] =  {0.0256,0.522,0.607,0.654,0.708,0.737,0.761,0.784,0.815,0.835,0.851,0.841,0.898};
+        // double ewz_ele[] = {0.000257,0.00492,0.00524,0.00519,0.00396,0.00449,0.00538,0.00513,0.00773,0.00753,0.0209,0.0964,0.259};
         int ibin = 0;
         if(pt > 10  && pt < 15) ibin = 0;
         if(pt > 15  && pt < 20) ibin = 1;
         if(pt > 20  && pt < 25) ibin = 2;
         if(pt > 25  && pt < 30) ibin = 3;
         if(pt > 30  && pt < 40) ibin = 4;
         if(pt > 40  && pt < 50) ibin = 5;
         if(pt > 50  && pt < 60) ibin = 6;
         if(pt > 60  && pt < 80) ibin = 7;
         if(pt > 80  && pt < 100) ibin = 8;
         if(pt > 100 && pt < 200) ibin = 9;
         if(pt > 200 && pt < 400) ibin = 10;
         if(pt > 400 && pt < 600) ibin = 11;
         if(pt > 600) ibin = 12;
         double eff_pt = 0.;
         eff_pt = wz_ele[ibin];
 
         eta = fabs(eta);
 
-        float wz_ele_eta[] =  {0.65,0.714,0.722,0.689,0.635,0.615};
-        // float ewz_ele_eta[] = {0.00642,0.00355,0.00335,0.004,0.00368,0.00422};
+        const static double wz_ele_eta[] =  {0.65,0.714,0.722,0.689,0.635,0.615};
+        // double ewz_ele_eta[] = {0.00642,0.00355,0.00335,0.004,0.00368,0.00422};
         ibin = 0;
         if(eta > 0 && eta < 0.1) ibin = 0;
         if(eta > 0.1 && eta < 0.5) ibin = 1;
         if(eta > 0.5 && eta < 1.0) ibin = 2;
         if(eta > 1.0 && eta < 1.5) ibin = 3;
         if(eta > 1.5 && eta < 2.0) ibin = 4;
         if(eta > 2.0 && eta < 2.5) ibin = 5;
         double eff_eta = 0.;
         eff_eta = wz_ele_eta[ibin];
 
         eff = (eff_pt * eff_eta) / avgrate;
       }
 
       if (flavor == 12) { // weight electron from tau
         double avgrate = 0.476;
-        float wz_ele[] =  {0.00855,0.409,0.442,0.55,0.632,0.616,0.615,0.642,0.72,0.617};
-        // float ewz_ele[] = {0.000573,0.0291,0.0366,0.0352,0.0363,0.0474,0.0628,0.0709,0.125,0.109};
+        const static double wz_ele[] =  {0.00855,0.409,0.442,0.55,0.632,0.616,0.615,0.642,0.72,0.617};
+        // double ewz_ele[] = {0.000573,0.0291,0.0366,0.0352,0.0363,0.0474,0.0628,0.0709,0.125,0.109};
         int ibin = 0;
         if(pt > 10  && pt < 15) ibin = 0;
         if(pt > 15  && pt < 20) ibin = 1;
         if(pt > 20  && pt < 25) ibin = 2;
         if(pt > 25  && pt < 30) ibin = 3;
         if(pt > 30  && pt < 40) ibin = 4;
         if(pt > 40  && pt < 50) ibin = 5;
         if(pt > 50  && pt < 60) ibin = 6;
         if(pt > 60  && pt < 80) ibin = 7;
         if(pt > 80  && pt < 100) ibin = 8;
         if(pt > 100)           ibin = 9;
         double eff_pt = 0.;
         eff_pt = wz_ele[ibin];
 
         eta = fabs(eta);
 
-        float wz_ele_eta[] =  {0.546,0.5,0.513,0.421,0.47,0.433};
-        //float ewz_ele_eta[] = {0.0566,0.0257,0.0263,0.0263,0.0303,0.0321};
+        const static double wz_ele_eta[] =  {0.546,0.5,0.513,0.421,0.47,0.433};
+        //double ewz_ele_eta[] = {0.0566,0.0257,0.0263,0.0263,0.0303,0.0321};
         ibin = 0;
         if(eta > 0 && eta < 0.1) ibin = 0;
         if(eta > 0.1 && eta < 0.5) ibin = 1;
         if(eta > 0.5 && eta < 1.0) ibin = 2;
         if(eta > 1.0 && eta < 1.5) ibin = 3;
         if(eta > 1.5 && eta < 2.0) ibin = 4;
         if(eta > 2.0 && eta < 2.5) ibin = 5;
         double eff_eta = 0.;
         eff_eta = wz_ele_eta[ibin];
 
         eff = (eff_pt * eff_eta) / avgrate;
       }
 
       if (flavor == 13) { // weight prompt muon
         int ibin = 0;
         if(pt > 10  && pt < 15) ibin = 0;
         if(pt > 15  && pt < 20) ibin = 1;
         if(pt > 20  && pt < 25) ibin = 2;
         if(pt > 25  && pt < 30) ibin = 3;
         if(pt > 30  && pt < 40) ibin = 4;
         if(pt > 40  && pt < 50) ibin = 5;
         if(pt > 50  && pt < 60) ibin = 6;
         if(pt > 60  && pt < 80) ibin = 7;
         if(pt > 80  && pt < 100) ibin = 8;
         if(pt > 100 && pt < 200) ibin = 9;
         if(pt > 200 && pt < 400) ibin = 10;
         if(pt > 400) ibin = 11;
         if(fabs(eta) < 0.1) {
-          float wz_mu[] =  {0.00705,0.402,0.478,0.49,0.492,0.499,0.527,0.512,0.53,0.528,0.465,0.465};
-          //float ewz_mu[] = {0.000298,0.0154,0.017,0.0158,0.0114,0.0123,0.0155,0.0133,0.0196,0.0182,0.0414,0.0414};
+          const static double wz_mu[] =  {0.00705,0.402,0.478,0.49,0.492,0.499,0.527,0.512,0.53,0.528,0.465,0.465};
+          //double ewz_mu[] = {0.000298,0.0154,0.017,0.0158,0.0114,0.0123,0.0155,0.0133,0.0196,0.0182,0.0414,0.0414};
           double eff_pt = 0.;
           eff_pt = wz_mu[ibin];
           eff = eff_pt;
         }
         if(fabs(eta) > 0.1) {
-          float wz_mu[] =  {0.0224,0.839,0.887,0.91,0.919,0.923,0.925,0.925,0.922,0.918,0.884,0.834};
-          //float ewz_mu[] = {0.000213,0.00753,0.0074,0.007,0.00496,0.00534,0.00632,0.00583,0.00849,0.00804,0.0224,0.0963};
+          const static double wz_mu[] =  {0.0224,0.839,0.887,0.91,0.919,0.923,0.925,0.925,0.922,0.918,0.884,0.834};
+          //double ewz_mu[] = {0.000213,0.00753,0.0074,0.007,0.00496,0.00534,0.00632,0.00583,0.00849,0.00804,0.0224,0.0963};
           double eff_pt = 0.;
           eff_pt = wz_mu[ibin];
           eff = eff_pt;
         }
       }
 
       if (flavor == 14) { // weight muon from tau
         int ibin = 0;
         if(pt > 10  && pt < 15) ibin = 0;
         if(pt > 15  && pt < 20) ibin = 1;
         if(pt > 20  && pt < 25) ibin = 2;
         if(pt > 25  && pt < 30) ibin = 3;
         if(pt > 30  && pt < 40) ibin = 4;
         if(pt > 40  && pt < 50) ibin = 5;
         if(pt > 50  && pt < 60) ibin = 6;
         if(pt > 60  && pt < 80) ibin = 7;
         if(pt > 80  && pt < 100) ibin = 8;
         if(pt > 100) ibin = 9;
 
         if(fabs(eta) < 0.1) {
-          float wz_mu[] =  {0.0,0.664,0.124,0.133,0.527,0.283,0.495,0.25,0.5,0.331};
-          //float ewz_mu[] = {0.0,0.192,0.0437,0.0343,0.128,0.107,0.202,0.125,0.25,0.191};
+          const static double wz_mu[] =  {0.0,0.664,0.124,0.133,0.527,0.283,0.495,0.25,0.5,0.331};
+          //double ewz_mu[] = {0.0,0.192,0.0437,0.0343,0.128,0.107,0.202,0.125,0.25,0.191};
           double eff_pt = 0.;
           eff_pt = wz_mu[ibin];
           eff = eff_pt;
         }
         if(fabs(eta) > 0.1) {
-          float wz_mu[] =  {0.0,0.617,0.655,0.676,0.705,0.738,0.712,0.783,0.646,0.745};
-          //float ewz_mu[] = {0.0,0.043,0.0564,0.0448,0.0405,0.0576,0.065,0.0825,0.102,0.132};
+          const static double wz_mu[] =  {0.0,0.617,0.655,0.676,0.705,0.738,0.712,0.783,0.646,0.745};
+          //double ewz_mu[] = {0.0,0.043,0.0564,0.0448,0.0405,0.0576,0.065,0.0825,0.102,0.132};
           double eff_pt = 0.;
           eff_pt = wz_mu[ibin];
           eff = eff_pt;
         }
       }
 
       if (flavor == 15) { // weight hadronic tau 1p
         double avgrate = 0.16;
-        float wz_tau1p[] =  {0.0,0.0311,0.148,0.229,0.217,0.292,0.245,0.307,0.227,0.277};
-        //float ewz_tau1p[] = {0.0,0.00211,0.0117,0.0179,0.0134,0.0248,0.0264,0.0322,0.0331,0.0427};
+        const static double wz_tau1p[] =  {0.0,0.0311,0.148,0.229,0.217,0.292,0.245,0.307,0.227,0.277};
+        //double ewz_tau1p[] = {0.0,0.00211,0.0117,0.0179,0.0134,0.0248,0.0264,0.0322,0.0331,0.0427};
         int ibin = 0;
         if(pt > 10  && pt < 15) ibin = 0;
         if(pt > 15  && pt < 20) ibin = 1;
         if(pt > 20  && pt < 25) ibin = 2;
         if(pt > 25  && pt < 30) ibin = 3;
         if(pt > 30  && pt < 40) ibin = 4;
         if(pt > 40  && pt < 50) ibin = 5;
         if(pt > 50  && pt < 60) ibin = 6;
         if(pt > 60  && pt < 80) ibin = 7;
         if(pt > 80  && pt < 100) ibin = 8;
         if(pt > 100) ibin = 9;
         double eff_pt = 0.;
         eff_pt = wz_tau1p[ibin];
 
-        float wz_tau1p_eta[] = {0.166,0.15,0.188,0.175,0.142,0.109};
-        //float ewz_tau1p_eta[] ={0.0166,0.00853,0.0097,0.00985,0.00949,0.00842};
+        const static double wz_tau1p_eta[] = {0.166,0.15,0.188,0.175,0.142,0.109};
+        //double ewz_tau1p_eta[] ={0.0166,0.00853,0.0097,0.00985,0.00949,0.00842};
         ibin = 0;
         if(eta > 0.0 && eta < 0.1) ibin = 0;
         if(eta > 0.1 && eta < 0.5) ibin = 1;
         if(eta > 0.5 && eta < 1.0) ibin = 2;
         if(eta > 1.0 && eta < 1.5) ibin = 3;
         if(eta > 1.5 && eta < 2.0) ibin = 4;
         if(eta > 2.0 && eta < 2.5) ibin = 5;
         double eff_eta = 0.;
         eff_eta = wz_tau1p_eta[ibin];
 
         eff = (eff_pt * eff_eta) / avgrate;
       }
 
       return eff;
     }
 
+
     /// Function giving observed and expected upper limits (on the visible cross-section)
     double getUpperLimit(const string& signal_region, bool observed)  {
 
       map<string,double> upperLimitsObserved;
       map<string,double> upperLimitsExpected;
 
       upperLimitsObserved["HTlep_3l_offZ_OSSF_cut_0"] = 2.435;
       upperLimitsObserved["HTlep_3l_offZ_OSSF_cut_200"] = 0.704;
       upperLimitsObserved["HTlep_3l_offZ_OSSF_cut_500"] = 0.182;
       upperLimitsObserved["HTlep_3l_offZ_OSSF_cut_800"] = 0.147;
       upperLimitsObserved["HTlep_2ltau_offZ_OSSF_cut_0"] = 13.901;
       upperLimitsObserved["HTlep_2ltau_offZ_OSSF_cut_200"] = 1.677;
       upperLimitsObserved["HTlep_2ltau_offZ_OSSF_cut_500"] = 0.141;
       upperLimitsObserved["HTlep_2ltau_offZ_OSSF_cut_800"] = 0.155;
       upperLimitsObserved["HTlep_3l_offZ_noOSSF_cut_0"] = 1.054;
       upperLimitsObserved["HTlep_3l_offZ_noOSSF_cut_200"] = 0.341;
       upperLimitsObserved["HTlep_3l_offZ_noOSSF_cut_500"] = 0.221;
       upperLimitsObserved["HTlep_3l_offZ_noOSSF_cut_800"] = 0.140;
       upperLimitsObserved["HTlep_2ltau_offZ_noOSSF_cut_0"] = 4.276;
       upperLimitsObserved["HTlep_2ltau_offZ_noOSSF_cut_200"] = 0.413;
       upperLimitsObserved["HTlep_2ltau_offZ_noOSSF_cut_500"] = 0.138;
       upperLimitsObserved["HTlep_2ltau_offZ_noOSSF_cut_800"] = 0.150;
       upperLimitsObserved["HTlep_3l_onZ_cut_0"] = 29.804;
       upperLimitsObserved["HTlep_3l_onZ_cut_200"] = 3.579;
       upperLimitsObserved["HTlep_3l_onZ_cut_500"] = 0.466;
       upperLimitsObserved["HTlep_3l_onZ_cut_800"] = 0.298;
       upperLimitsObserved["HTlep_2ltau_onZ_cut_0"] = 205.091;
       upperLimitsObserved["HTlep_2ltau_onZ_cut_200"] = 3.141;
       upperLimitsObserved["HTlep_2ltau_onZ_cut_500"] = 0.290;
       upperLimitsObserved["HTlep_2ltau_onZ_cut_800"] = 0.157;
       upperLimitsObserved["METStrong_3l_offZ_OSSF_cut_0"] = 1.111;
       upperLimitsObserved["METStrong_3l_offZ_OSSF_cut_100"] = 0.354;
       upperLimitsObserved["METStrong_3l_offZ_OSSF_cut_200"] = 0.236;
       upperLimitsObserved["METStrong_3l_offZ_OSSF_cut_300"] = 0.150;
       upperLimitsObserved["METStrong_2ltau_offZ_OSSF_cut_0"] = 1.881;
       upperLimitsObserved["METStrong_2ltau_offZ_OSSF_cut_100"] = 0.406;
       upperLimitsObserved["METStrong_2ltau_offZ_OSSF_cut_200"] = 0.194;
       upperLimitsObserved["METStrong_2ltau_offZ_OSSF_cut_300"] = 0.134;
       upperLimitsObserved["METStrong_3l_offZ_noOSSF_cut_0"] = 0.770;
       upperLimitsObserved["METStrong_3l_offZ_noOSSF_cut_100"] = 0.295;
       upperLimitsObserved["METStrong_3l_offZ_noOSSF_cut_200"] = 0.149;
       upperLimitsObserved["METStrong_3l_offZ_noOSSF_cut_300"] = 0.140;
       upperLimitsObserved["METStrong_2ltau_offZ_noOSSF_cut_0"] = 2.003;
       upperLimitsObserved["METStrong_2ltau_offZ_noOSSF_cut_100"] = 0.806;
       upperLimitsObserved["METStrong_2ltau_offZ_noOSSF_cut_200"] = 0.227;
       upperLimitsObserved["METStrong_2ltau_offZ_noOSSF_cut_300"] = 0.138;
       upperLimitsObserved["METStrong_3l_onZ_cut_0"] = 6.383;
       upperLimitsObserved["METStrong_3l_onZ_cut_100"] = 0.959;
       upperLimitsObserved["METStrong_3l_onZ_cut_200"] = 0.549;
       upperLimitsObserved["METStrong_3l_onZ_cut_300"] = 0.182;
       upperLimitsObserved["METStrong_2ltau_onZ_cut_0"] = 10.658;
       upperLimitsObserved["METStrong_2ltau_onZ_cut_100"] = 0.637;
       upperLimitsObserved["METStrong_2ltau_onZ_cut_200"] = 0.291;
       upperLimitsObserved["METStrong_2ltau_onZ_cut_300"] = 0.227;
       upperLimitsObserved["METWeak_3l_offZ_OSSF_cut_0"] = 1.802;
       upperLimitsObserved["METWeak_3l_offZ_OSSF_cut_100"] = 0.344;
       upperLimitsObserved["METWeak_3l_offZ_OSSF_cut_200"] = 0.189;
       upperLimitsObserved["METWeak_3l_offZ_OSSF_cut_300"] = 0.148;
       upperLimitsObserved["METWeak_2ltau_offZ_OSSF_cut_0"] = 12.321;
       upperLimitsObserved["METWeak_2ltau_offZ_OSSF_cut_100"] = 0.430;
       upperLimitsObserved["METWeak_2ltau_offZ_OSSF_cut_200"] = 0.137;
       upperLimitsObserved["METWeak_2ltau_offZ_OSSF_cut_300"] = 0.134;
       upperLimitsObserved["METWeak_3l_offZ_noOSSF_cut_0"] = 0.562;
       upperLimitsObserved["METWeak_3l_offZ_noOSSF_cut_100"] = 0.153;
       upperLimitsObserved["METWeak_3l_offZ_noOSSF_cut_200"] = 0.154;
       upperLimitsObserved["METWeak_3l_offZ_noOSSF_cut_300"] = 0.141;
       upperLimitsObserved["METWeak_2ltau_offZ_noOSSF_cut_0"] = 2.475;
       upperLimitsObserved["METWeak_2ltau_offZ_noOSSF_cut_100"] = 0.244;
       upperLimitsObserved["METWeak_2ltau_offZ_noOSSF_cut_200"] = 0.141;
       upperLimitsObserved["METWeak_2ltau_offZ_noOSSF_cut_300"] = 0.142;
       upperLimitsObserved["METWeak_3l_onZ_cut_0"] = 24.769;
       upperLimitsObserved["METWeak_3l_onZ_cut_100"] = 0.690;
       upperLimitsObserved["METWeak_3l_onZ_cut_200"] = 0.198;
       upperLimitsObserved["METWeak_3l_onZ_cut_300"] = 0.138;
       upperLimitsObserved["METWeak_2ltau_onZ_cut_0"] = 194.360;
       upperLimitsObserved["METWeak_2ltau_onZ_cut_100"] = 0.287;
       upperLimitsObserved["METWeak_2ltau_onZ_cut_200"] = 0.144;
       upperLimitsObserved["METWeak_2ltau_onZ_cut_300"] = 0.130;
       upperLimitsObserved["Meff_3l_offZ_OSSF_cut_0"] = 2.435;
       upperLimitsObserved["Meff_3l_offZ_OSSF_cut_600"] = 0.487;
       upperLimitsObserved["Meff_3l_offZ_OSSF_cut_1000"] = 0.156;
       upperLimitsObserved["Meff_3l_offZ_OSSF_cut_1500"] = 0.140;
       upperLimitsObserved["Meff_2ltau_offZ_OSSF_cut_0"] = 13.901;
       upperLimitsObserved["Meff_2ltau_offZ_OSSF_cut_600"] = 0.687;
       upperLimitsObserved["Meff_2ltau_offZ_OSSF_cut_1000"] = 0.224;
       upperLimitsObserved["Meff_2ltau_offZ_OSSF_cut_1500"] = 0.155;
       upperLimitsObserved["Meff_3l_offZ_noOSSF_cut_0"] = 1.054;
       upperLimitsObserved["Meff_3l_offZ_noOSSF_cut_600"] = 0.249;
       upperLimitsObserved["Meff_3l_offZ_noOSSF_cut_1000"] = 0.194;
       upperLimitsObserved["Meff_3l_offZ_noOSSF_cut_1500"] = 0.145;
       upperLimitsObserved["Meff_2ltau_offZ_noOSSF_cut_0"] = 4.276;
       upperLimitsObserved["Meff_2ltau_offZ_noOSSF_cut_600"] = 0.772;
       upperLimitsObserved["Meff_2ltau_offZ_noOSSF_cut_1000"] = 0.218;
       upperLimitsObserved["Meff_2ltau_offZ_noOSSF_cut_1500"] = 0.204;
       upperLimitsObserved["Meff_3l_onZ_cut_0"] = 29.804;
       upperLimitsObserved["Meff_3l_onZ_cut_600"] = 2.933;
       upperLimitsObserved["Meff_3l_onZ_cut_1000"] = 0.912;
       upperLimitsObserved["Meff_3l_onZ_cut_1500"] = 0.225;
       upperLimitsObserved["Meff_2ltau_onZ_cut_0"] = 205.091;
       upperLimitsObserved["Meff_2ltau_onZ_cut_600"] = 1.486;
       upperLimitsObserved["Meff_2ltau_onZ_cut_1000"] = 0.641;
       upperLimitsObserved["Meff_2ltau_onZ_cut_1500"] = 0.204;
       upperLimitsObserved["MeffStrong_3l_offZ_OSSF_cut_0"] = 0.479;
       upperLimitsObserved["MeffStrong_3l_offZ_OSSF_cut_600"] = 0.353;
       upperLimitsObserved["MeffStrong_3l_offZ_OSSF_cut_1200"] = 0.187;
       upperLimitsObserved["MeffStrong_2ltau_offZ_OSSF_cut_0"] = 0.617;
       upperLimitsObserved["MeffStrong_2ltau_offZ_OSSF_cut_600"] = 0.320;
       upperLimitsObserved["MeffStrong_2ltau_offZ_OSSF_cut_1200"] = 0.281;
       upperLimitsObserved["MeffStrong_3l_offZ_noOSSF_cut_0"] = 0.408;
       upperLimitsObserved["MeffStrong_3l_offZ_noOSSF_cut_600"] = 0.240;
       upperLimitsObserved["MeffStrong_3l_offZ_noOSSF_cut_1200"] = 0.150;
       upperLimitsObserved["MeffStrong_2ltau_offZ_noOSSF_cut_0"] = 0.774;
       upperLimitsObserved["MeffStrong_2ltau_offZ_noOSSF_cut_600"] = 0.417;
       upperLimitsObserved["MeffStrong_2ltau_offZ_noOSSF_cut_1200"] = 0.266;
       upperLimitsObserved["MeffStrong_3l_onZ_cut_0"] = 1.208;
       upperLimitsObserved["MeffStrong_3l_onZ_cut_600"] = 0.837;
       upperLimitsObserved["MeffStrong_3l_onZ_cut_1200"] = 0.269;
       upperLimitsObserved["MeffStrong_2ltau_onZ_cut_0"] = 0.605;
       upperLimitsObserved["MeffStrong_2ltau_onZ_cut_600"] = 0.420;
       upperLimitsObserved["MeffStrong_2ltau_onZ_cut_1200"] = 0.141;
       upperLimitsObserved["MeffMt_3l_onZ_cut_0"] = 1.832;
       upperLimitsObserved["MeffMt_3l_onZ_cut_600"] = 0.862;
       upperLimitsObserved["MeffMt_3l_onZ_cut_1200"] = 0.222;
       upperLimitsObserved["MeffMt_2ltau_onZ_cut_0"] = 1.309;
       upperLimitsObserved["MeffMt_2ltau_onZ_cut_600"] = 0.481;
       upperLimitsObserved["MeffMt_2ltau_onZ_cut_1200"] = 0.146;
       upperLimitsObserved["MinPt_3l_offZ_OSSF_cut_0"] = 2.435;
       upperLimitsObserved["MinPt_3l_offZ_OSSF_cut_50"] = 0.500;
       upperLimitsObserved["MinPt_3l_offZ_OSSF_cut_100"] = 0.203;
       upperLimitsObserved["MinPt_3l_offZ_OSSF_cut_150"] = 0.128;
       upperLimitsObserved["MinPt_2ltau_offZ_OSSF_cut_0"] = 13.901;
       upperLimitsObserved["MinPt_2ltau_offZ_OSSF_cut_50"] = 0.859;
       upperLimitsObserved["MinPt_2ltau_offZ_OSSF_cut_100"] = 0.158;
       upperLimitsObserved["MinPt_2ltau_offZ_OSSF_cut_150"] = 0.155;
       upperLimitsObserved["MinPt_3l_offZ_noOSSF_cut_0"] = 1.054;
       upperLimitsObserved["MinPt_3l_offZ_noOSSF_cut_50"] = 0.295;
       upperLimitsObserved["MinPt_3l_offZ_noOSSF_cut_100"] = 0.148;
       upperLimitsObserved["MinPt_3l_offZ_noOSSF_cut_150"] = 0.137;
       upperLimitsObserved["MinPt_2ltau_offZ_noOSSF_cut_0"] = 4.276;
       upperLimitsObserved["MinPt_2ltau_offZ_noOSSF_cut_50"] = 0.314;
       upperLimitsObserved["MinPt_2ltau_offZ_noOSSF_cut_100"] = 0.134;
       upperLimitsObserved["MinPt_2ltau_offZ_noOSSF_cut_150"] = 0.140;
       upperLimitsObserved["MinPt_3l_onZ_cut_0"] = 29.804;
       upperLimitsObserved["MinPt_3l_onZ_cut_50"] = 1.767;
       upperLimitsObserved["MinPt_3l_onZ_cut_100"] = 0.690;
       upperLimitsObserved["MinPt_3l_onZ_cut_150"] = 0.301;
       upperLimitsObserved["MinPt_2ltau_onZ_cut_0"] = 205.091;
       upperLimitsObserved["MinPt_2ltau_onZ_cut_50"] = 1.050;
       upperLimitsObserved["MinPt_2ltau_onZ_cut_100"] = 0.155;
       upperLimitsObserved["MinPt_2ltau_onZ_cut_150"] = 0.146;
       upperLimitsObserved["nbtag_3l_offZ_OSSF_cut_0"] = 2.435;
       upperLimitsObserved["nbtag_3l_offZ_OSSF_cut_1"] = 0.865;
       upperLimitsObserved["nbtag_3l_offZ_OSSF_cut_2"] = 0.474;
       upperLimitsObserved["nbtag_2ltau_offZ_OSSF_cut_0"] = 13.901;
       upperLimitsObserved["nbtag_2ltau_offZ_OSSF_cut_1"] = 1.566;
       upperLimitsObserved["nbtag_2ltau_offZ_OSSF_cut_2"] = 0.426;
       upperLimitsObserved["nbtag_3l_offZ_noOSSF_cut_0"] = 1.054;
       upperLimitsObserved["nbtag_3l_offZ_noOSSF_cut_1"] = 0.643;
       upperLimitsObserved["nbtag_3l_offZ_noOSSF_cut_2"] = 0.321;
       upperLimitsObserved["nbtag_2ltau_offZ_noOSSF_cut_0"] = 4.276;
       upperLimitsObserved["nbtag_2ltau_offZ_noOSSF_cut_1"] = 2.435;
       upperLimitsObserved["nbtag_2ltau_offZ_noOSSF_cut_2"] = 1.073;
       upperLimitsObserved["nbtag_3l_onZ_cut_0"] = 29.804;
       upperLimitsObserved["nbtag_3l_onZ_cut_1"] = 3.908;
       upperLimitsObserved["nbtag_3l_onZ_cut_2"] = 0.704;
       upperLimitsObserved["nbtag_2ltau_onZ_cut_0"] = 205.091;
       upperLimitsObserved["nbtag_2ltau_onZ_cut_1"] = 9.377;
       upperLimitsObserved["nbtag_2ltau_onZ_cut_2"] = 0.657;
       upperLimitsExpected["HTlep_3l_offZ_OSSF_cut_0"] = 2.893;
       upperLimitsExpected["HTlep_3l_offZ_OSSF_cut_200"] = 1.175;
       upperLimitsExpected["HTlep_3l_offZ_OSSF_cut_500"] = 0.265;
       upperLimitsExpected["HTlep_3l_offZ_OSSF_cut_800"] = 0.155;
       upperLimitsExpected["HTlep_2ltau_offZ_OSSF_cut_0"] = 14.293;
       upperLimitsExpected["HTlep_2ltau_offZ_OSSF_cut_200"] = 1.803;
       upperLimitsExpected["HTlep_2ltau_offZ_OSSF_cut_500"] = 0.159;
       upperLimitsExpected["HTlep_2ltau_offZ_OSSF_cut_800"] = 0.155;
       upperLimitsExpected["HTlep_3l_offZ_noOSSF_cut_0"] = 0.836;
       upperLimitsExpected["HTlep_3l_offZ_noOSSF_cut_200"] = 0.340;
       upperLimitsExpected["HTlep_3l_offZ_noOSSF_cut_500"] = 0.218;
       upperLimitsExpected["HTlep_3l_offZ_noOSSF_cut_800"] = 0.140;
       upperLimitsExpected["HTlep_2ltau_offZ_noOSSF_cut_0"] = 4.132;
       upperLimitsExpected["HTlep_2ltau_offZ_noOSSF_cut_200"] = 0.599;
       upperLimitsExpected["HTlep_2ltau_offZ_noOSSF_cut_500"] = 0.146;
       upperLimitsExpected["HTlep_2ltau_offZ_noOSSF_cut_800"] = 0.148;
       upperLimitsExpected["HTlep_3l_onZ_cut_0"] = 32.181;
       upperLimitsExpected["HTlep_3l_onZ_cut_200"] = 4.879;
       upperLimitsExpected["HTlep_3l_onZ_cut_500"] = 0.473;
       upperLimitsExpected["HTlep_3l_onZ_cut_800"] = 0.266;
       upperLimitsExpected["HTlep_2ltau_onZ_cut_0"] = 217.801;
       upperLimitsExpected["HTlep_2ltau_onZ_cut_200"] = 3.676;
       upperLimitsExpected["HTlep_2ltau_onZ_cut_500"] = 0.235;
       upperLimitsExpected["HTlep_2ltau_onZ_cut_800"] = 0.150;
       upperLimitsExpected["METStrong_3l_offZ_OSSF_cut_0"] = 1.196;
       upperLimitsExpected["METStrong_3l_offZ_OSSF_cut_100"] = 0.423;
       upperLimitsExpected["METStrong_3l_offZ_OSSF_cut_200"] = 0.208;
       upperLimitsExpected["METStrong_3l_offZ_OSSF_cut_300"] = 0.158;
       upperLimitsExpected["METStrong_2ltau_offZ_OSSF_cut_0"] = 2.158;
       upperLimitsExpected["METStrong_2ltau_offZ_OSSF_cut_100"] = 0.461;
       upperLimitsExpected["METStrong_2ltau_offZ_OSSF_cut_200"] = 0.186;
       upperLimitsExpected["METStrong_2ltau_offZ_OSSF_cut_300"] = 0.138;
       upperLimitsExpected["METStrong_3l_offZ_noOSSF_cut_0"] = 0.495;
       upperLimitsExpected["METStrong_3l_offZ_noOSSF_cut_100"] = 0.284;
       upperLimitsExpected["METStrong_3l_offZ_noOSSF_cut_200"] = 0.150;
       upperLimitsExpected["METStrong_3l_offZ_noOSSF_cut_300"] = 0.146;
       upperLimitsExpected["METStrong_2ltau_offZ_noOSSF_cut_0"] = 1.967;
       upperLimitsExpected["METStrong_2ltau_offZ_noOSSF_cut_100"] = 0.732;
       upperLimitsExpected["METStrong_2ltau_offZ_noOSSF_cut_200"] = 0.225;
       upperLimitsExpected["METStrong_2ltau_offZ_noOSSF_cut_300"] = 0.147;
       upperLimitsExpected["METStrong_3l_onZ_cut_0"] = 7.157;
       upperLimitsExpected["METStrong_3l_onZ_cut_100"] = 1.342;
       upperLimitsExpected["METStrong_3l_onZ_cut_200"] = 0.508;
       upperLimitsExpected["METStrong_3l_onZ_cut_300"] = 0.228;
       upperLimitsExpected["METStrong_2ltau_onZ_cut_0"] = 12.441;
       upperLimitsExpected["METStrong_2ltau_onZ_cut_100"] = 0.534;
       upperLimitsExpected["METStrong_2ltau_onZ_cut_200"] = 0.243;
       upperLimitsExpected["METStrong_2ltau_onZ_cut_300"] = 0.218;
       upperLimitsExpected["METWeak_3l_offZ_OSSF_cut_0"] = 2.199;
       upperLimitsExpected["METWeak_3l_offZ_OSSF_cut_100"] = 0.391;
       upperLimitsExpected["METWeak_3l_offZ_OSSF_cut_200"] = 0.177;
       upperLimitsExpected["METWeak_3l_offZ_OSSF_cut_300"] = 0.144;
       upperLimitsExpected["METWeak_2ltau_offZ_OSSF_cut_0"] = 12.431;
       upperLimitsExpected["METWeak_2ltau_offZ_OSSF_cut_100"] = 0.358;
       upperLimitsExpected["METWeak_2ltau_offZ_OSSF_cut_200"] = 0.150;
       upperLimitsExpected["METWeak_2ltau_offZ_OSSF_cut_300"] = 0.135;
       upperLimitsExpected["METWeak_3l_offZ_noOSSF_cut_0"] = 0.577;
       upperLimitsExpected["METWeak_3l_offZ_noOSSF_cut_100"] = 0.214;
       upperLimitsExpected["METWeak_3l_offZ_noOSSF_cut_200"] = 0.155;
       upperLimitsExpected["METWeak_3l_offZ_noOSSF_cut_300"] = 0.140;
       upperLimitsExpected["METWeak_2ltau_offZ_noOSSF_cut_0"] = 2.474;
       upperLimitsExpected["METWeak_2ltau_offZ_noOSSF_cut_100"] = 0.382;
       upperLimitsExpected["METWeak_2ltau_offZ_noOSSF_cut_200"] = 0.144;
       upperLimitsExpected["METWeak_2ltau_offZ_noOSSF_cut_300"] = 0.146;
       upperLimitsExpected["METWeak_3l_onZ_cut_0"] = 26.305;
       upperLimitsExpected["METWeak_3l_onZ_cut_100"] = 1.227;
       upperLimitsExpected["METWeak_3l_onZ_cut_200"] = 0.311;
       upperLimitsExpected["METWeak_3l_onZ_cut_300"] = 0.188;
       upperLimitsExpected["METWeak_2ltau_onZ_cut_0"] = 205.198;
       upperLimitsExpected["METWeak_2ltau_onZ_cut_100"] = 0.399;
       upperLimitsExpected["METWeak_2ltau_onZ_cut_200"] = 0.166;
       upperLimitsExpected["METWeak_2ltau_onZ_cut_300"] = 0.140;
       upperLimitsExpected["Meff_3l_offZ_OSSF_cut_0"] = 2.893;
       upperLimitsExpected["Meff_3l_offZ_OSSF_cut_600"] = 0.649;
       upperLimitsExpected["Meff_3l_offZ_OSSF_cut_1000"] = 0.252;
       upperLimitsExpected["Meff_3l_offZ_OSSF_cut_1500"] = 0.150;
       upperLimitsExpected["Meff_2ltau_offZ_OSSF_cut_0"] = 14.293;
       upperLimitsExpected["Meff_2ltau_offZ_OSSF_cut_600"] = 0.657;
       upperLimitsExpected["Meff_2ltau_offZ_OSSF_cut_1000"] = 0.226;
       upperLimitsExpected["Meff_2ltau_offZ_OSSF_cut_1500"] = 0.154;
       upperLimitsExpected["Meff_3l_offZ_noOSSF_cut_0"] = 0.836;
       upperLimitsExpected["Meff_3l_offZ_noOSSF_cut_600"] = 0.265;
       upperLimitsExpected["Meff_3l_offZ_noOSSF_cut_1000"] = 0.176;
       upperLimitsExpected["Meff_3l_offZ_noOSSF_cut_1500"] = 0.146;
       upperLimitsExpected["Meff_2ltau_offZ_noOSSF_cut_0"] = 4.132;
       upperLimitsExpected["Meff_2ltau_offZ_noOSSF_cut_600"] = 0.678;
       upperLimitsExpected["Meff_2ltau_offZ_noOSSF_cut_1000"] = 0.243;
       upperLimitsExpected["Meff_2ltau_offZ_noOSSF_cut_1500"] = 0.184;
       upperLimitsExpected["Meff_3l_onZ_cut_0"] = 32.181;
       upperLimitsExpected["Meff_3l_onZ_cut_600"] = 3.219;
       upperLimitsExpected["Meff_3l_onZ_cut_1000"] = 0.905;
       upperLimitsExpected["Meff_3l_onZ_cut_1500"] = 0.261;
       upperLimitsExpected["Meff_2ltau_onZ_cut_0"] = 217.801;
       upperLimitsExpected["Meff_2ltau_onZ_cut_600"] = 1.680;
       upperLimitsExpected["Meff_2ltau_onZ_cut_1000"] = 0.375;
       upperLimitsExpected["Meff_2ltau_onZ_cut_1500"] = 0.178;
       upperLimitsExpected["MeffStrong_3l_offZ_OSSF_cut_0"] = 0.571;
       upperLimitsExpected["MeffStrong_3l_offZ_OSSF_cut_600"] = 0.386;
       upperLimitsExpected["MeffStrong_3l_offZ_OSSF_cut_1200"] = 0.177;
       upperLimitsExpected["MeffStrong_2ltau_offZ_OSSF_cut_0"] = 0.605;
       upperLimitsExpected["MeffStrong_2ltau_offZ_OSSF_cut_600"] = 0.335;
       upperLimitsExpected["MeffStrong_2ltau_offZ_OSSF_cut_1200"] = 0.249;
       upperLimitsExpected["MeffStrong_3l_offZ_noOSSF_cut_0"] = 0.373;
       upperLimitsExpected["MeffStrong_3l_offZ_noOSSF_cut_600"] = 0.223;
       upperLimitsExpected["MeffStrong_3l_offZ_noOSSF_cut_1200"] = 0.150;
       upperLimitsExpected["MeffStrong_2ltau_offZ_noOSSF_cut_0"] = 0.873;
       upperLimitsExpected["MeffStrong_2ltau_offZ_noOSSF_cut_600"] = 0.428;
       upperLimitsExpected["MeffStrong_2ltau_offZ_noOSSF_cut_1200"] = 0.210;
       upperLimitsExpected["MeffStrong_3l_onZ_cut_0"] = 2.034;
       upperLimitsExpected["MeffStrong_3l_onZ_cut_600"] = 1.093;
       upperLimitsExpected["MeffStrong_3l_onZ_cut_1200"] = 0.293;
       upperLimitsExpected["MeffStrong_2ltau_onZ_cut_0"] = 0.690;
       upperLimitsExpected["MeffStrong_2ltau_onZ_cut_600"] = 0.392;
       upperLimitsExpected["MeffStrong_2ltau_onZ_cut_1200"] = 0.156;
       upperLimitsExpected["MeffMt_3l_onZ_cut_0"] = 2.483;
       upperLimitsExpected["MeffMt_3l_onZ_cut_600"] = 0.845;
       upperLimitsExpected["MeffMt_3l_onZ_cut_1200"] = 0.255;
       upperLimitsExpected["MeffMt_2ltau_onZ_cut_0"] = 1.448;
       upperLimitsExpected["MeffMt_2ltau_onZ_cut_600"] = 0.391;
       upperLimitsExpected["MeffMt_2ltau_onZ_cut_1200"] = 0.146;
       upperLimitsExpected["MinPt_3l_offZ_OSSF_cut_0"] = 2.893;
       upperLimitsExpected["MinPt_3l_offZ_OSSF_cut_50"] = 0.703;
       upperLimitsExpected["MinPt_3l_offZ_OSSF_cut_100"] = 0.207;
       upperLimitsExpected["MinPt_3l_offZ_OSSF_cut_150"] = 0.143;
       upperLimitsExpected["MinPt_2ltau_offZ_OSSF_cut_0"] = 14.293;
       upperLimitsExpected["MinPt_2ltau_offZ_OSSF_cut_50"] = 0.705;
       upperLimitsExpected["MinPt_2ltau_offZ_OSSF_cut_100"] = 0.149;
       upperLimitsExpected["MinPt_2ltau_offZ_OSSF_cut_150"] = 0.155;
       upperLimitsExpected["MinPt_3l_offZ_noOSSF_cut_0"] = 0.836;
       upperLimitsExpected["MinPt_3l_offZ_noOSSF_cut_50"] = 0.249;
       upperLimitsExpected["MinPt_3l_offZ_noOSSF_cut_100"] = 0.135;
       upperLimitsExpected["MinPt_3l_offZ_noOSSF_cut_150"] = 0.136;
       upperLimitsExpected["MinPt_2ltau_offZ_noOSSF_cut_0"] = 4.132;
       upperLimitsExpected["MinPt_2ltau_offZ_noOSSF_cut_50"] = 0.339;
       upperLimitsExpected["MinPt_2ltau_offZ_noOSSF_cut_100"] = 0.149;
       upperLimitsExpected["MinPt_2ltau_offZ_noOSSF_cut_150"] = 0.145;
       upperLimitsExpected["MinPt_3l_onZ_cut_0"] = 32.181;
       upperLimitsExpected["MinPt_3l_onZ_cut_50"] = 2.260;
       upperLimitsExpected["MinPt_3l_onZ_cut_100"] = 0.438;
       upperLimitsExpected["MinPt_3l_onZ_cut_150"] = 0.305;
       upperLimitsExpected["MinPt_2ltau_onZ_cut_0"] = 217.801;
       upperLimitsExpected["MinPt_2ltau_onZ_cut_50"] = 1.335;
       upperLimitsExpected["MinPt_2ltau_onZ_cut_100"] = 0.162;
       upperLimitsExpected["MinPt_2ltau_onZ_cut_150"] = 0.149;
       upperLimitsExpected["nbtag_3l_offZ_OSSF_cut_0"] = 2.893;
       upperLimitsExpected["nbtag_3l_offZ_OSSF_cut_1"] = 0.923;
       upperLimitsExpected["nbtag_3l_offZ_OSSF_cut_2"] = 0.452;
       upperLimitsExpected["nbtag_2ltau_offZ_OSSF_cut_0"] = 14.293;
       upperLimitsExpected["nbtag_2ltau_offZ_OSSF_cut_1"] = 1.774;
       upperLimitsExpected["nbtag_2ltau_offZ_OSSF_cut_2"] = 0.549;
       upperLimitsExpected["nbtag_3l_offZ_noOSSF_cut_0"] = 0.836;
       upperLimitsExpected["nbtag_3l_offZ_noOSSF_cut_1"] = 0.594;
       upperLimitsExpected["nbtag_3l_offZ_noOSSF_cut_2"] = 0.298;
       upperLimitsExpected["nbtag_2ltau_offZ_noOSSF_cut_0"] = 4.132;
       upperLimitsExpected["nbtag_2ltau_offZ_noOSSF_cut_1"] = 2.358;
       upperLimitsExpected["nbtag_2ltau_offZ_noOSSF_cut_2"] = 0.958;
       upperLimitsExpected["nbtag_3l_onZ_cut_0"] = 32.181;
       upperLimitsExpected["nbtag_3l_onZ_cut_1"] = 3.868;
       upperLimitsExpected["nbtag_3l_onZ_cut_2"] = 0.887;
       upperLimitsExpected["nbtag_2ltau_onZ_cut_0"] = 217.801;
       upperLimitsExpected["nbtag_2ltau_onZ_cut_1"] = 9.397;
       upperLimitsExpected["nbtag_2ltau_onZ_cut_2"] = 0.787;
 
 
 
       if (observed) return upperLimitsObserved[signal_region];
       else          return upperLimitsExpected[signal_region];
     }
 
 
     /// Function checking if there is an OSSF lepton pair or a combination of 3 leptons with an invariant mass close to the Z mass
     int isonZ (const Particles& particles) {
       int onZ = 0;
       double best_mass_2 = 999.;
       double best_mass_3 = 999.;
 
       // Loop over all 2 particle combinations to find invariant mass of OSSF pair closest to Z mass
-      foreach (const Particle& p1, particles)  {
-        foreach (const Particle& p2, particles)  {
+      for (const Particle& p1 : particles)  {
+        for (const Particle& p2 : particles)  {
           double mass_difference_2_old = fabs(91.0 - best_mass_2);
           double mass_difference_2_new = fabs(91.0 - (p1.momentum() + p2.momentum()).mass()/GeV);
 
           // If particle combination is OSSF pair calculate mass difference to Z mass
           if ((p1.pid()*p2.pid() == -121 || p1.pid()*p2.pid() == -169))  {
 
             // Get invariant mass closest to Z mass
             if (mass_difference_2_new < mass_difference_2_old)
               best_mass_2 = (p1.momentum() + p2.momentum()).mass()/GeV;
           // In case there is an OSSF pair take also 3rd lepton into account (e.g. from FSR and photon to electron conversion)
-          foreach (const Particle& p3 , particles  )  {
+            for (const Particle& p3 : particles  )  {
             double mass_difference_3_old = fabs(91.0 - best_mass_3);
             double mass_difference_3_new = fabs(91.0 - (p1.momentum() + p2.momentum() + p3.momentum()).mass()/GeV);
             if (mass_difference_3_new < mass_difference_3_old)
               best_mass_3 = (p1.momentum() + p2.momentum() + p3.momentum()).mass()/GeV;
             }
           }
         }
       }
 
       // Pick the minimum invariant mass of the best OSSF pair combination and the best 3 lepton combination
       double best_mass = min(best_mass_2,best_mass_3);
       // if this mass is in a 20 GeV window around the Z mass, the event is classified as onZ
       if ( fabs(91.0 - best_mass) < 20. ) onZ = 1;
       return onZ;
     }
 
     /// function checking if two leptons are an OSSF lepton pair and giving out the invariant mass (0 if no OSSF pair)
     double isOSSF_mass (const Particle& p1, const Particle& p2) {
       double inv_mass = 0.;
       // Is particle combination OSSF pair?
       if ((p1.pid()*p2.pid() == -121 || p1.pid()*p2.pid() == -169))  {
         // Get invariant mass
         inv_mass = (p1.momentum() + p2.momentum()).mass()/GeV;
       }
       return inv_mass;
     }
 
     /// Function checking if there is an OSSF lepton pair
     bool isOSSF (const Particles& particles)  {
       for (size_t i1=0 ; i1 < 3 ; i1 ++)  {
         for (size_t i2 = i1+1 ; i2 < 3 ; i2 ++)  {
           if ((particles[i1].pid()*particles[i2].pid() == -121 || particles[i1].pid()*particles[i2].pid() == -169))  {
             return true;
           }
         }
       }
       return false;
     }
 
     //@}
 
   private:
 
     /// Histograms
     //@{
     Histo1DPtr _h_HTlep_all, _h_HTjets_all, _h_MET_all, _h_Meff_all, _h_min_pT_all, _h_mT_all;
     Histo1DPtr _h_pt_1_3l, _h_pt_2_3l, _h_pt_3_3l, _h_pt_1_2ltau, _h_pt_2_2ltau, _h_pt_3_2ltau;
     Histo1DPtr _h_e_n, _h_mu_n, _h_tau_n;
     Histo1DPtr _h_excluded;
     //@}
 
     /// Fiducial efficiencies to model the effects of the ATLAS detector
     bool _use_fiducial_lepton_efficiency;
 
     /// List of signal regions and event counts per signal region
     vector<string> _signal_regions;
     map<string, double> _eventCountsPerSR;
 
   };
 
 
   DECLARE_RIVET_PLUGIN(ATLAS_2014_I1327229);
 
 }
diff --git a/src/Analyses/ATLAS_2016_CONF_2016_037.cc b/src/Analyses/ATLAS_2016_CONF_2016_037.cc
new file mode 100644
--- /dev/null
+++ b/src/Analyses/ATLAS_2016_CONF_2016_037.cc
@@ -0,0 +1,203 @@
+// -*- C++ -*-
+#include "Rivet/Analysis.hh"
+#include "Rivet/Projections/FinalState.hh"
+#include "Rivet/Projections/PromptFinalState.hh"
+#include "Rivet/Projections/FastJets.hh"
+#include "Rivet/Projections/Sphericity.hh"
+#include "Rivet/Projections/SmearedParticles.hh"
+#include "Rivet/Projections/SmearedJets.hh"
+#include "Rivet/Projections/SmearedMET.hh"
+#include "Rivet/Tools/Cutflow.hh"
+
+namespace Rivet {
+
+
+  /// @brief ATLAS 2016 2 -SS-lepton / 3-lepton SUSY search, from 13.2/fb CONF note
+  class ATLAS_2016_CONF_2016_037 : public Analysis {
+  public:
+
+    /// Constructor
+    DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2016_CONF_2016_037);
+
+
+    /// @name Analysis methods
+    //@{
+
+    /// Book histograms and initialise projections before the run
+    void init() {
+
+      // Initialise and register projections
+      FinalState calofs(Cuts::abseta < 4.9);
+      declare(calofs, "Clusters");
+      FastJets fj(calofs, FastJets::ANTIKT, 0.4);
+      declare(fj, "TruthJets");
+      declare(SmearedJets(fj, JET_SMEAR_ATLAS_RUN2, [](const Jet& j) {
+            if (j.abseta() > 2.5) return 0.;
+            return j.bTagged(Cuts::pT > 5*GeV) ? 0.70 :
+              j.cTagged(Cuts::pT > 5*GeV) ? 1/12. :
+              j.tauTagged(Cuts::pT > 5*GeV) ? 1/54. : 1/380.; }), "Jets");
+
+      MissingMomentum mm(calofs);
+      declare(mm, "TruthMET");
+      declare(SmearedMET(mm, MET_SMEAR_ATLAS_RUN2), "MET");
+
+      FinalState es(Cuts::abspid == PID::ELECTRON && Cuts::abseta < 2.47 && !Cuts::absetaIn(1.37, 1.52) && Cuts::pT > 10*GeV);
+      declare(es, "TruthElectrons");
+      declare(SmearedParticles(es, ELECTRON_EFF_ATLAS_RUN2, ELECTRON_SMEAR_ATLAS_RUN2), "Electrons");
+
+      FinalState mus(Cuts::abspid == PID::MUON && Cuts::abseta < 2.5 && Cuts::pT > 10*GeV);
+      declare(mus, "TruthMuons");
+      declare(SmearedParticles(mus, MUON_EFF_ATLAS_RUN2, MUON_SMEAR_ATLAS_RUN2), "Muons");
+
+      ChargedFinalState cfs(Cuts::abseta < 2.5);
+      declare(cfs, "TruthTracks");
+      declare(SmearedParticles(cfs, TRK_EFF_ATLAS_RUN2), "Tracks");
+
+
+      // Book histograms/counters
+      _h_3l1 = bookCounter("SR3l1");
+      _h_3l2 = bookCounter("SR3l2");
+      _h_0b1 = bookCounter("SR0b1");
+      _h_0b2 = bookCounter("SR0b2");
+      _h_1b = bookCounter("SR1b");
+      _h_3b = bookCounter("SR3b");
+      _h_1bDD = bookCounter("SR1bDD");
+      _h_3bDD = bookCounter("SR3bDD");
+      _h_1bGG = bookCounter("SR1bGG");
+
+    }
+
+
+    /// Perform the per-event analysis
+    void analyze(const Event& event) {
+
+      // Get baseline electrons, muons, and jets
+      Particles elecs = apply<ParticleFinder>(event, "Electrons").particlesByPt();
+      Particles muons = apply<ParticleFinder>(event, "Muons").particlesByPt();
+      Jets jets = apply<JetAlg>(event, "Jets").jetsByPt(Cuts::pT > 20*GeV && Cuts::abseta < 2.8);
+      const Jets bjets = filter_select(jets, [&](const Jet& j) { return j.bTagged(Cuts::pT > 5*GeV); });
+
+
+      // Jet/electron/muon overlap removal and selection
+      // Remove any electron or muon within dR = 0.2 of a b-tagged jet
+      for (const Jet& bj : bjets) {
+        ifilter_discard(elecs, deltaRLess(bj, 0.2, RAPIDITY));
+        ifilter_discard(muons, deltaRLess(bj, 0.2, RAPIDITY));
+      }
+      // Remove any untagged jet within dR = 0.2 of an electron or muon
+      for (const Particle& e : elecs)
+        ifilter_discard(jets, deltaRLess(e, 0.2, RAPIDITY));
+      for (const Particle& m : muons)
+        ifilter_discard(jets, deltaRLess(m, 0.2, RAPIDITY));
+      // Remove any untagged low-multiplicity/muon-dominated jet within dR = 0.4 of a muon
+      for (const Particle& m : muons)
+        ifilter_discard(jets, [&](const Jet& j) {
+            if (deltaR(m, j, RAPIDITY) > 0.4) return false;
+            const Particles trks = j.particles(Cuts::abscharge != 0);
+            if (trks.size() < 3) return true;
+            return m.pT()/j.pT() > 0.5 && m.pT()/sum(trks, pT, 0.0) > 0.7;
+          });
+      // Remove any electron or muon near a remaining jet, with a shrinking cone
+      const auto lcone_iso_fn = [&](const Particle& l) {
+        const double dr = min(0.4, 0.04 + 10*GeV/l.pT());
+        return any(jets, deltaRLess(l, dr, RAPIDITY));
+      };
+      ifilter_discard(elecs, lcone_iso_fn);
+      ifilter_discard(muons, lcone_iso_fn);
+      // Track-sharing e,mu also filtered, but that decision can't be made here
+      const Jets& sigjets = jets;
+      const Jets& sigbjets = bjets;
+
+
+      // Lepton isolation
+      Particles sigelecs = filter_select(elecs, Cuts::abseta < 2);
+      Particles sigmuons = muons;
+      ifilter_select(sigelecs, ParticleEffFilter(ELECTRON_IDEFF_ATLAS_RUN2_MEDIUM));
+      const Particles trks = apply<ParticleFinder>(event, "Tracks").particles();
+      const Particles clus = apply<ParticleFinder>(event, "Clusters").particles();
+      ifilter_discard(sigelecs, [&](const Particle& e) {
+          const double R = min(0.2, 10*GeV/e.pT());
+          double ptsum = -e.pT(), etsum = -e.Et();
+          for (const Particle& t : trks)
+            if (deltaR(t,e) < R) ptsum += t.pT();
+          for (const Particle& c : clus)
+            if (deltaR(c,e) < 0.2) etsum += c.pT(); ///< @todo Bit vague about "energy"
+          return ptsum / e.pT() > 0.06 || etsum / e.pT() > 0.06;
+        });
+      ifilter_discard(sigmuons, [&](const Particle& m) {
+          const double R = min(0.3, 10*GeV/m.pT());
+          double ptsum = -m.pT();
+          for (const Particle& t : trks)
+            if (deltaR(t,m) < R) ptsum += t.pT();
+          return ptsum / m.pT() > 0.06;
+        });
+      /// @todo Note is vague about whether "signal lepton" defn includes pT > 20?
+      ifilter_discard(sigelecs, Cuts::pT > 20*GeV);
+      ifilter_discard(sigmuons, Cuts::pT > 20*GeV);
+
+
+      // MET calculation (NB. done generically, with smearing, rather than via explicit physics objects)
+      const Vector3 vmet = -apply<SmearedMET>(event, "MET").vectorEt();
+      const double etmiss = vmet.mod();
+
+
+      //////////////////
+
+
+      // Event selection cuts
+      const Particles sigleptons = sigelecs + sigmuons;
+      if (sigleptons.size() < 2) vetoEvent;
+      if (sigleptons.size() == 2 && sigleptons[0].charge() != sigleptons[1].charge()) vetoEvent;
+
+      // Jet sub-selections and meff calculation
+      const Jets sigjets25 = filter_select(sigjets, Cuts::pT > 25*GeV);
+      const Jets sigjets40 = filter_select(sigjets25, Cuts::pT > 40*GeV);
+      const Jets sigjets50 = filter_select(sigjets40, Cuts::pT > 50*GeV);
+      /// @todo Is meff specific to the jet pT cut?
+      const double meff = sum(sigjets, pT, 0.0) + sum(sigleptons, pT, 0.0);
+
+      // Fill counters
+      const double w = event.weight();
+      if (sigleptons.size() >= 3 && sigbjets.empty() && sigjets40.size() >= 4 && etmiss > 150*GeV) _h_3l1->fill(w);
+      if (sigleptons.size() >= 3 && sigbjets.empty() && sigjets40.size() >= 4 && etmiss > 200*GeV && meff > 1500*GeV) _h_3l2->fill(w);
+      if (sigleptons.size() >= 2 && sigbjets.empty() && sigjets25.size() >= 6 && etmiss > 150*GeV && meff > 500*GeV) _h_0b1->fill(w);
+      if (sigleptons.size() >= 2 && sigbjets.empty() && sigjets40.size() >= 6 && etmiss > 150*GeV && meff > 900*GeV) _h_0b2->fill(w);
+      if (sigleptons.size() >= 2 && sigbjets.size() >= 1 && sigjets25.size() >= 6 && etmiss > 200*GeV && meff > 650*GeV) _h_1b->fill(w);
+      if (sigleptons.size() >= 2 && sigbjets.size() >= 3 && sigjets25.size() >= 6 && etmiss > 150*GeV && meff > 600*GeV) _h_3b->fill(w);
+      if (filter_select(sigleptons, Cuts::charge < 0).size() >= 2) {
+        if (sigleptons.size() >= 2 && sigbjets.size() >= 1 && sigjets50.size() >= 6 && meff > 1200*GeV) _h_1bDD->fill(w);
+        if (sigleptons.size() >= 2 && sigbjets.size() >= 3 && sigjets50.size() >= 6 && meff > 1000*GeV) _h_3bDD->fill(w);
+        if (sigleptons.size() >= 2 && sigbjets.size() >= 1 && sigjets50.size() >= 6 && meff > 1800*GeV) _h_1bGG->fill(w);
+      }
+
+    }
+
+
+    /// Normalise counters after the run
+    void finalize() {
+
+      const double sf = 13.2*crossSection()/femtobarn/sumOfWeights();
+      scale({_h_3l1, _h_3l2, _h_0b1, _h_0b2, _h_1b, _h_3b, _h_1bDD, _h_3bDD, _h_1bGG}, sf);
+
+    }
+
+    //@}
+
+
+  private:
+
+    /// @name Histograms
+    //@{
+    CounterPtr _h_3l1, _h_3l2, _h_0b1, _h_0b2, _h_1b, _h_3b, _h_1bDD, _h_3bDD, _h_1bGG;
+    //@}
+
+
+  };
+
+
+
+  // The hook for the plugin system
+  DECLARE_RIVET_PLUGIN(ATLAS_2016_CONF_2016_037);
+
+
+}
diff --git a/src/Analyses/ATLAS_2016_CONF_2016_054.cc b/src/Analyses/ATLAS_2016_CONF_2016_054.cc
new file mode 100644
--- /dev/null
+++ b/src/Analyses/ATLAS_2016_CONF_2016_054.cc
@@ -0,0 +1,217 @@
+// -*- C++ -*-
+#include "Rivet/Analysis.hh"
+#include "Rivet/Projections/FinalState.hh"
+#include "Rivet/Projections/PromptFinalState.hh"
+#include "Rivet/Projections/FastJets.hh"
+#include "Rivet/Projections/Sphericity.hh"
+#include "Rivet/Projections/SmearedParticles.hh"
+#include "Rivet/Projections/SmearedJets.hh"
+#include "Rivet/Projections/SmearedMET.hh"
+#include "Rivet/Tools/Cutflow.hh"
+
+namespace Rivet {
+
+
+  /// @brief ATLAS 2016 1-lepton SUSY search, from 14.8/fb CONF note
+  class ATLAS_2016_CONF_2016_054 : public Analysis {
+  public:
+
+    /// Constructor
+    DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2016_CONF_2016_054);
+
+
+    /// @name Analysis methods
+    //@{
+
+    /// Book histograms and initialise projections before the run
+    void init() {
+
+      // Initialise and register projections
+      FinalState calofs(Cuts::abseta < 4.9);
+      FastJets fj(calofs, FastJets::ANTIKT, 0.4);
+      declare(fj, "TruthJets");
+      declare(SmearedJets(fj, JET_SMEAR_ATLAS_RUN2, [](const Jet& j) {
+            if (j.abseta() > 2.5) return 0.;
+            return j.bTagged(Cuts::pT > 5*GeV) ? 0.77 :
+              j.cTagged(Cuts::pT > 5*GeV) ? 1/6.2 : 1/134.; }), "Jets");
+
+      MissingMomentum mm(calofs);
+      declare(mm, "TruthMET");
+      declare(SmearedMET(mm, MET_SMEAR_ATLAS_RUN2), "MET");
+
+      FinalState es(Cuts::abseta < 2.47 && Cuts::pT > 7*GeV && Cuts::abspid == PID::ELECTRON);
+      declare(es, "TruthElectrons");
+      declare(SmearedParticles(es, ELECTRON_EFF_ATLAS_RUN2, ELECTRON_SMEAR_ATLAS_RUN2), "Electrons");
+
+      FinalState mus(Cuts::abseta < 2.5 && Cuts::pT > 6*GeV && Cuts::abspid == PID::MUON);
+      declare(mus, "TruthMuons");
+      declare(SmearedParticles(mus, MUON_EFF_ATLAS_RUN2, MUON_SMEAR_ATLAS_RUN2), "Muons");
+
+
+      // Book histograms/counters
+      _h_gg2j = bookCounter("GG-2j");
+      _h_gg6j0 = bookCounter("GG-6j-0bulk");
+      _h_gg6j1 = bookCounter("GG-6j-1highmass");
+      _h_gg4j0 = bookCounter("GG-4j-0lowx");
+      _h_gg4j1 = bookCounter("GG-4j-1lowxbveto");
+      _h_gg4j2 = bookCounter("GG-4j-2highx");
+      _h_ss4j0 = bookCounter("SS-4j-0x12");
+      _h_ss4j1 = bookCounter("SS-4j-1lowx");
+      _h_ss5j0 = bookCounter("SS-5j-0x12");
+      _h_ss5j1 = bookCounter("SS-5j-1highx");
+
+    }
+
+
+    /// Perform the per-event analysis
+    void analyze(const Event& event) {
+
+      // Get baseline electrons, muons, and jets
+      Particles elecs = apply<ParticleFinder>(event, "Electrons").particles();
+      Particles muons = apply<ParticleFinder>(event, "Muons").particles();
+      Jets jets = apply<JetAlg>(event, "Jets").jetsByPt(Cuts::pT > 20*GeV && Cuts::abseta < 4.5);
+
+      // Jet/electron/muons overlap removal and selection
+      // Remove any jet within dR = 0.2 of an electron
+      for (const Particle& e : elecs)
+        ifilter_discard(jets, deltaRLess(e, 0.2, RAPIDITY));
+      // Remove any electron within dR = 0.01 of a muon
+      for (const Particle& m : muons)
+        ifilter_discard(elecs, deltaRLess(m, 0.01, RAPIDITY));
+      // Assemble b-jets collection, and remove muons within dR = 0.2 of a b-tagged jet
+      Jets bjets;
+      for (const Jet& j : jets) {
+        if (j.abseta() < 2.5 && j.pT() > 30*GeV && j.bTagged(Cuts::pT > 5*GeV)) {
+          bjets += j;
+          ifilter_discard(muons, deltaRLess(j, 0.2, RAPIDITY));
+        }
+      }
+      // Remove any jet within dR = 0.2 of a muon if track conditions are met
+      for (const Particle& m : muons)
+        ifilter_discard(jets, [&](const Jet& j){
+            if (deltaR(j,m) > 0.2) return false;
+            /// @todo Add track efficiency random filtering
+            const Particles trks = j.particles(Cuts::abscharge > 0 && Cuts::pT > 0.5*GeV);
+            return trks.size() < 4 || m.pT()/j.pT() > 0.7;
+          });
+      // Remove any muon within dR = 0.2 of a remaining jet if the same track conditions are *not* met
+      /// @todo There must be nicer way to do complementary removal...
+      for (const Jet& j : jets) {
+        /// @todo Add track efficiency random filtering
+        const size_t ntrks = j.particles(Cuts::abscharge > 0 && Cuts::pT > 0.5*GeV).size();
+        ifilter_discard(muons, [&](const Particle& m){
+            if (deltaR(j,m) > 0.2) return false;
+            return ntrks > 3 && m.pT()/j.pT() < 0.7;
+          });
+      }
+      // Remove any muon with dR close to a remaining jet, via a functional form
+      for (const Jet& j : jets)
+        ifilter_discard(muons, [&](const Particle& m) { return deltaR(m,j, RAPIDITY) < min(0.4, 0.04 + 10*GeV/m.pT()); });
+
+
+      // Signal jet selection
+      const Jets sigjets = filter_select(jets, Cuts::pT > 30*GeV && Cuts::abseta < 2.8);
+      const Jets sigbjets = bjets;
+
+      // "Gradient-loose" signal lepton selection
+      const ParticleEffFilter grad_loose_filter([](const Particle& e) { return e.pT() > 60*GeV ? 0.98 : 0.95; });
+      Particles sigelecs = filter_select(elecs, grad_loose_filter);
+      Particles sigmuons = filter_select(muons, grad_loose_filter);
+      // Tight electron selection (NB. assuming independent eff to gradient-loose... hmm)
+      ifilter_select(sigelecs, ParticleEffFilter(ELECTRON_IDEFF_ATLAS_RUN2_TIGHT));
+
+
+      // MET calculation (NB. done generically, with smearing, rather than via explicit physics objects)
+      const Vector3 vmet = -apply<SmearedMET>(event, "MET").vectorEt();
+      const double etmiss = vmet.mod();
+
+
+      //////////////////
+
+
+      // Event selection cuts
+      if (sigelecs.size() + sigmuons.size() != 1) vetoEvent;
+      const Particle siglepton = sigelecs.empty() ? sigmuons.front() : sigelecs.front();
+
+      // mT and m_eff
+      const double mT = sqrt(2*siglepton.pT()*etmiss*(1-cos(deltaPhi(siglepton,vmet))));
+      const double meff = siglepton.pT() + sum(sigjets, pT, 0.0) + etmiss;
+
+      // Aplanarities
+      Sphericity sph;
+      vector<FourMomentum> vecs;
+      transform(sigjets, vecs, mom);
+      sph.calc(vecs);
+      const double jet_aplanarity = sph.aplanarity();
+      vecs += siglepton.mom();
+      sph.calc(vecs);
+      const double lepton_aplanarity = sph.aplanarity();
+
+
+      //////////////////
+
+
+      // Fill counters
+      const double w = event.weight();
+      // GG
+      if (siglepton.pT() < 35*GeV && sigjets.size() >= 2 &&
+          sigjets[0].pT() > 200*GeV && sigjets[1].pT() > 30*GeV &&
+          mT > 100*GeV && etmiss > 460*GeV && etmiss/meff > 0.35) _h_gg2j->fill(w);
+      if (siglepton.pT() > 35*GeV && sigjets.size() >= 6 &&
+          sigjets[0].pT() > 125*GeV && sigjets[5].pT() > 30*GeV &&
+          mT > 225*GeV && etmiss > 250*GeV && meff > 1000*GeV && etmiss/meff > 0.2 &&
+          jet_aplanarity > 0.04) _h_gg6j0->fill(w);
+      if (siglepton.pT() > 35*GeV && sigjets.size() >= 6 &&
+          sigjets[0].pT() > 125*GeV && sigjets[5].pT() > 30*GeV &&
+          mT > 225*GeV && etmiss > 250*GeV && meff > 2000*GeV && etmiss/meff > 0.1 &&
+          jet_aplanarity > 0.04) _h_gg6j1->fill(w);
+      if (sigjets.size() >= 4 && sigjets[3].pT() > 100*GeV &&
+          mT > 125*GeV && etmiss > 250*GeV && meff > 2000*GeV && jet_aplanarity > 0.06) _h_gg4j0->fill(w);
+      if (sigjets.size() >= 4 && sigjets[3].pT() > 100*GeV && sigbjets.empty() &&
+          mT > 125*GeV && etmiss > 250*GeV && meff > 2000*GeV && jet_aplanarity > 0.03) _h_gg4j1->fill(w);
+      if (siglepton.pT() > 35*GeV &&
+          sigjets.size() >= 4 && sigjets[0].pT() > 400*GeV && inRange(sigjets[3].pT(), 30*GeV, 100*GeV) &&
+          mT > 475*GeV && etmiss > 250*GeV && meff > 1600*GeV && etmiss/meff > 0.3) _h_gg4j2->fill(w);
+      // SS
+      if (siglepton.pT() > 35*GeV && sigjets.size() >= 4 && sigjets[3].pT() > 50*GeV &&
+          mT > 175*GeV && etmiss > 300*GeV && meff > 1200*GeV && lepton_aplanarity > 0.08) _h_ss4j0->fill(w);
+      if (siglepton.pT() > 35*GeV && sigjets.size() >= 5 && sigjets[4].pT() > 50*GeV && sigbjets.empty() &&
+          mT > 175*GeV && etmiss > 300*GeV && etmiss/meff > 0.2) _h_ss5j0->fill(w);
+      if (siglepton.pT() > 35*GeV && sigjets.size() >= 4 && sigjets[0].pT() > 250*GeV && sigjets[3].pT() > 30*GeV &&
+          inRange(mT, 150*GeV, 400*GeV) && etmiss > 250*GeV && lepton_aplanarity > 0.03) _h_ss4j1->fill(w);
+      if (siglepton.pT() > 35*GeV && sigjets.size() >= 5 && sigjets[4].pT() > 30*GeV &&
+          mT > 400*GeV && etmiss > 400*GeV && lepton_aplanarity > 0.03) _h_ss5j1->fill(w);
+
+    }
+
+
+    /// Normalise counters after the run
+    void finalize() {
+
+      const double sf = 14.8*crossSection()/femtobarn/sumOfWeights();
+      scale({_h_gg2j, _h_gg6j0, _h_gg6j1, _h_gg4j0, _h_gg4j1, _h_gg4j2}, sf);
+      scale({_h_ss4j0, _h_ss4j1, _h_ss5j0,_h_ss5j1}, sf);
+
+    }
+
+    //@}
+
+
+  private:
+
+    /// @name Histograms
+    //@{
+    CounterPtr _h_gg2j, _h_gg6j0, _h_gg6j1, _h_gg4j0, _h_gg4j1, _h_gg4j2;
+    CounterPtr _h_ss4j0, _h_ss4j1, _h_ss5j0,_h_ss5j1;
+    //@}
+
+
+  };
+
+
+
+  // The hook for the plugin system
+  DECLARE_RIVET_PLUGIN(ATLAS_2016_CONF_2016_054);
+
+
+}
diff --git a/src/Analyses/ATLAS_2016_CONF_2016_078.cc b/src/Analyses/ATLAS_2016_CONF_2016_078.cc
new file mode 100644
--- /dev/null
+++ b/src/Analyses/ATLAS_2016_CONF_2016_078.cc
@@ -0,0 +1,269 @@
+// -*- C++ -*-
+#include "Rivet/Analysis.hh"
+#include "Rivet/Projections/FinalState.hh"
+#include "Rivet/Projections/PromptFinalState.hh"
+#include "Rivet/Projections/FastJets.hh"
+#include "Rivet/Projections/Sphericity.hh"
+#include "Rivet/Projections/SmearedParticles.hh"
+#include "Rivet/Projections/SmearedJets.hh"
+#include "Rivet/Projections/SmearedMET.hh"
+#include "Rivet/Tools/Cutflow.hh"
+
+namespace Rivet {
+
+
+  /// @brief ATLAS 2016 0-lepton SUSY search, from 13/fb ICHEP'16 CONF note
+  class ATLAS_2016_CONF_2016_078 : public Analysis {
+  public:
+
+    /// Constructor
+    DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2016_CONF_2016_078);
+
+
+    /// @name Analysis methods
+    //@{
+
+    /// Book histograms and initialise projections before the run
+    void init() {
+
+      // Initialise and register projections
+      FinalState calofs(Cuts::abseta < 3.2);
+      FastJets fj(calofs, FastJets::ANTIKT, 0.4);
+      declare(fj, "TruthJets");
+      declare(SmearedJets(fj, JET_SMEAR_ATLAS_RUN2, JET_BTAG_ATLAS_RUN2_MV2C10), "RecoJets");
+
+      MissingMomentum mm(calofs);
+      declare(mm, "TruthMET");
+      declare(SmearedMET(mm, MET_SMEAR_ATLAS_RUN2), "RecoMET");
+
+      PromptFinalState es(Cuts::abseta < 2.47 && Cuts::abspid == PID::ELECTRON, true, true);
+      declare(es, "TruthElectrons");
+      declare(SmearedParticles(es, ELECTRON_EFF_ATLAS_RUN2, ELECTRON_SMEAR_ATLAS_RUN2), "RecoElectrons");
+
+      PromptFinalState mus(Cuts::abseta < 2.7 && Cuts::abspid == PID::MUON, true);
+      declare(mus, "TruthMuons");
+      declare(SmearedParticles(mus, MUON_EFF_ATLAS_RUN2, MUON_SMEAR_ATLAS_RUN2), "RecoMuons");
+
+
+      // Book histograms/counters
+      _h_2j_0800 = bookCounter("2j-0800");
+      _h_2j_1200 = bookCounter("2j-1200");
+      _h_2j_1600 = bookCounter("2j-1600");
+      _h_2j_2000 = bookCounter("2j-2000");
+      _h_3j_1200 = bookCounter("2j-2000");
+      _h_4j_1000 = bookCounter("4j-1000");
+      _h_4j_1400 = bookCounter("4j-1400");
+      _h_4j_1800 = bookCounter("4j-1800");
+      _h_4j_2200 = bookCounter("4j-2200");
+      _h_4j_2600 = bookCounter("4j-2600");
+      _h_5j_1400 = bookCounter("5j-1400");
+      _h_6j_1800 = bookCounter("6j-1800");
+      _h_6j_2200 = bookCounter("6j-2200");
+
+
+      // Book cut-flows
+      const vector<string> cuts23j = {"Pre-sel+MET+pT1+meff", "Njet", "Dphi_min(j123,MET)", "Dphi_min(j4+,MET)", "pT2", "eta_j12", "MET/sqrtHT", "m_eff(incl)"};
+      _flows.addCutflow("2j-0800", cuts23j);
+      _flows.addCutflow("2j-1200", cuts23j);
+      _flows.addCutflow("2j-1600", cuts23j);
+      _flows.addCutflow("2j-2000", cuts23j);
+      _flows.addCutflow("3j-1200", cuts23j);
+      const vector<string> cuts456j = {"Pre-sel+MET+pT1+meff", "Njet", "Dphi_min(j123,MET)", "Dphi_min(j4+,MET)", "pT4", "eta_j1234", "Aplanarity", "MET/m_eff(Nj)", "m_eff(incl)"};
+      _flows.addCutflow("4j-1000", cuts456j);
+      _flows.addCutflow("4j-1400", cuts456j);
+      _flows.addCutflow("4j-1800", cuts456j);
+      _flows.addCutflow("4j-2200", cuts456j);
+      _flows.addCutflow("4j-2600", cuts456j);
+      _flows.addCutflow("5j-1400", cuts456j);
+      _flows.addCutflow("6j-1800", cuts456j);
+      _flows.addCutflow("6j-2200", cuts456j);
+
+    }
+
+
+    /// Perform the per-event analysis
+    void analyze(const Event& event) {
+
+      _flows.fillinit();
+
+      // Same MET cut for all signal regions
+      const Vector3 vmet = -apply<SmearedMET>(event, "RecoMET").vectorEt();
+      const double met = vmet.mod();
+      if (met < 250*GeV) vetoEvent;
+
+      // Get baseline electrons, muons, and jets
+      Particles elecs = apply<ParticleFinder>(event, "RecoElectrons").particles(Cuts::pT > 10*GeV);
+      Particles muons = apply<ParticleFinder>(event, "RecoMuons").particles(Cuts::pT > 10*GeV);
+      Jets jets = apply<JetAlg>(event, "RecoJets").jetsByPt(Cuts::pT > 20*GeV && Cuts::abseta < 2.8); ///< @todo Pile-up subtraction
+
+      // Jet/electron/muons overlap removal and selection
+      // Remove electrons within dR = 0.2 of a b-tagged jet
+      for (const Jet& j : jets)
+        if (j.abseta() < 2.5 && j.pT() > 50*GeV && j.bTagged(Cuts::pT > 5*GeV))
+          ifilter_discard(elecs, deltaRLess(j, 0.2, RAPIDITY));
+      // Remove any |eta| < 2.8 jet within dR = 0.2 of a remaining electron
+      for (const Particle& e : elecs)
+        ifilter_discard(jets, deltaRLess(e, 0.2, RAPIDITY));
+      // Remove any electron with dR in [0.2, 0.4] of a remaining jet
+      for (const Jet& j : jets)
+        ifilter_discard(elecs, [&](const Particle& e) { return inRange(deltaR(e,j, RAPIDITY), 0.2, 0.4); });
+      // Remove any muon with dR close to a remaining jet, via a functional form
+      for (const Jet& j : jets)
+        ifilter_discard(muons, [&](const Particle& m) { return deltaR(m,j, RAPIDITY) < min(0.4, 0.04 + 10*GeV/m.pT()); });
+      // Remove any |eta| < 2.8 jet within dR = 0.2 of a remaining muon if track conditions are met
+      for (const Particle& m : muons)
+        /// @todo Add track efficiency random filtering
+        ifilter_discard(jets, [&](const Jet& j) {
+            if (deltaR(j,m, RAPIDITY) > 0.2) return false;
+            const Particles trks = j.particles(Cuts::abscharge > 0 && Cuts::pT > 0.5*GeV);
+            return trks.size() < 3 || (m.pT() > 2*j.pT() && m.pT() > 0.7*sum(trks, pT, 0.0));
+          });
+      // Loose electron selection
+      ifilter_select(elecs, ParticleEffFilter(ELECTRON_IDEFF_ATLAS_RUN2_LOOSE));
+
+      // Veto the event if there are any remaining baseline leptons
+      if (!elecs.empty()) vetoEvent;
+      if (!muons.empty()) vetoEvent;
+
+      // Passed presel & MET
+      _flows.fill(0, true);
+
+      // Get jets and their pTs
+      const Jets jets20 = jets;
+      const Jets jets50 = filterBy(jets, Cuts::pT > 50*GeV);
+      const size_t njets50 = jets50.size(), njets20 = jets20.size();
+      if (jets50.size() < 2) vetoEvent;
+      vector<double> jetpts20, jetpts50;
+      transform(jets20, jetpts20, pT);
+      transform(jets50, jetpts50, pT);
+
+      // Construct multi-jet observables
+      const double ht = sum(jetpts20, 0.0);
+      const double met_sqrtHT = met / sqrt(ht);
+      const double meff_incl = sum(jetpts50, met);
+      const double meff_4 = (njets50 >= 4) ? sum(head(jetpts50, 4), met) : -1;
+      const double meff_5 = (njets50 >= 5) ? sum(head(jetpts50, 5), met) : -1;
+      const double meff_6 = (njets50 >= 6) ? sum(head(jetpts50, 6), met) : -1;
+      const double met_meff_4 = met / meff_4;
+      const double met_meff_5 = met / meff_5;
+      const double met_meff_6 = met / meff_6;
+
+      // Jet |eta|s
+      vector<double> jetetas20; transform(jets20, jetetas20, abseta);
+      const double etamax_2 = (njets20 >= 2) ? max(head(jetetas20, 2)) : -1;
+      const double etamax_4 = (njets20 >= 4) ? max(head(jetetas20, 4)) : -1;
+      const double etamax_6 = (njets20 >= 6) ? max(head(jetetas20, 6)) : -1;
+
+      // Get dphis between MET and jets
+      vector<double> dphimets50; transform(jets50, dphimets50, deltaPhiWRT(vmet));
+      const vector<double> dphimets50_123 = head(dphimets50, 3);
+      const vector<double> dphimets50_more = tail(dphimets50, -3);
+      const double dphimin_123 = !dphimets50_123.empty() ? min(dphimets50_123) : -1;
+      const double dphimin_more = !dphimets50_more.empty() ? min(dphimets50_more) : -1;
+
+      // Jet aplanarity
+      Sphericity sph; sph.calc(jets50);
+      const double aplanarity = sph.aplanarity();
+
+
+      //////////////////
+
+
+      const double w = event.weight();
+
+      // 2 jet regions
+      if (dphimin_123 > 0.8 && dphimin_more > 0.4) {
+        if (jetpts50[1] > 200*GeV && etamax_2 < 0.8) { //< implicit pT[0] cut
+          if (met_sqrtHT > 14*sqrt(GeV) && meff_incl > 800*GeV) _h_2j_0800->fill(w);
+        }
+        if (jetpts50[1] > 250*GeV && etamax_2 < 1.2) { //< implicit pT[0] cut
+          if (met_sqrtHT > 16*sqrt(GeV) && meff_incl > 1200*GeV) _h_2j_1200->fill(w);
+          if (met_sqrtHT > 18*sqrt(GeV) && meff_incl > 1600*GeV) _h_2j_1600->fill(w);
+          if (met_sqrtHT > 20*sqrt(GeV) && meff_incl > 2000*GeV) _h_2j_2000->fill(w);
+        }
+      }
+
+      // 3 jet region
+      if (njets50 >= 3 && dphimin_123 > 0.4 && dphimin_more > 0.2) {
+        if (jetpts50[0] > 600*GeV && jetpts50[2] > 50*GeV) { //< implicit pT[1] cut
+          if (met_sqrtHT > 16*sqrt(GeV) && meff_incl > 1200*GeV) _h_3j_1200->fill(w);
+        }
+      }
+
+      // 4 jet regions (note implicit pT[1,2] cuts)
+      if (njets50 >= 4 && dphimin_123 > 0.4 && dphimin_more > 0.4 && jetpts50[0] > 200*GeV && aplanarity > 0.04) {
+        if (jetpts50[3] > 100*GeV && etamax_4 < 1.2 && met_meff_4 > 0.25*sqrt(GeV) && meff_incl > 1000*GeV) _h_4j_1000->fill(w);
+        if (jetpts50[3] > 100*GeV && etamax_4 < 2.0 && met_meff_4 > 0.25*sqrt(GeV) && meff_incl > 1400*GeV) _h_4j_1400->fill(w);
+        if (jetpts50[3] > 100*GeV && etamax_4 < 2.0 && met_meff_4 > 0.20*sqrt(GeV) && meff_incl > 1800*GeV) _h_4j_1800->fill(w);
+        if (jetpts50[3] > 150*GeV && etamax_4 < 2.0 && met_meff_4 > 0.20*sqrt(GeV) && meff_incl > 2200*GeV) _h_4j_2200->fill(w);
+        if (jetpts50[3] > 150*GeV &&                   met_meff_4 > 0.20*sqrt(GeV) && meff_incl > 2600*GeV) _h_4j_2600->fill(w);
+      }
+
+      // 5 jet region (note implicit pT[1,2,3] cuts)
+      if (njets50 >= 5 && dphimin_123 > 0.4 && dphimin_more > 0.2 && jetpts50[0] > 500*GeV) {
+        if (jetpts50[4] > 50*GeV && met_meff_5 > 0.3*sqrt(GeV) && meff_incl > 1400*GeV) _h_5j_1400->fill(w);
+      }
+
+      // 6 jet regions (note implicit pT[1,2,3,4] cuts)
+      if (njets50 >= 6 && dphimin_123 > 0.4 && dphimin_more > 0.2 && jetpts50[0] > 200*GeV && aplanarity > 0.08) {
+        if (jetpts50[5] >  50*GeV && etamax_6 < 2.0 && met_meff_6*sqrt(GeV) > 0.20 && meff_incl > 1800*GeV) _h_6j_1800->fill(w);
+        if (jetpts50[5] > 100*GeV &&                   met_meff_6*sqrt(GeV) > 0.15 && meff_incl > 2200*GeV) _h_6j_2200->fill(w);
+      }
+
+      // Cutflows
+      _flows["2j-0800"].filltail({true, dphimin_123 > 0.8, dphimin_more > 0.4, jetpts50[1] > 200*GeV, etamax_2 < 0.8, met_sqrtHT > 14*sqrt(GeV), meff_incl >  800*GeV});
+      _flows["2j-1200"].filltail({true, dphimin_123 > 0.8, dphimin_more > 0.4, jetpts50[1] > 250*GeV, etamax_2 < 1.2, met_sqrtHT > 16*sqrt(GeV), meff_incl > 1200*GeV});
+      _flows["2j-1600"].filltail({true, dphimin_123 > 0.8, dphimin_more > 0.4, jetpts50[1] > 250*GeV, etamax_2 < 1.2, met_sqrtHT > 18*sqrt(GeV), meff_incl > 1600*GeV});
+      _flows["2j-2000"].filltail({true, dphimin_123 > 0.8, dphimin_more > 0.4, jetpts50[1] > 250*GeV, etamax_2 < 1.2, met_sqrtHT > 20*sqrt(GeV), meff_incl > 2000*GeV});
+      _flows["3j-1200"].filltail({njets50 >= 3, dphimin_123 > 0.4, dphimin_more > 0.2, jetpts50[0] > 600*GeV && jetpts50[2] > 50*GeV, true, met_sqrtHT > 16*sqrt(GeV), meff_incl > 1200*GeV});
+      _flows["4j-1000"].filltail({njets50 >= 4, dphimin_123 > 0.4, dphimin_more > 0.4, jetpts50[0] > 200*GeV && jetpts50[3] > 100*GeV, etamax_4 < 1.2, aplanarity > 0.04, met_meff_4 > 0.25*sqrt(GeV), meff_incl > 1000*GeV});
+      _flows["4j-1400"].filltail({njets50 >= 4, dphimin_123 > 0.4, dphimin_more > 0.4, jetpts50[0] > 200*GeV && jetpts50[3] > 100*GeV, etamax_4 < 2.0, aplanarity > 0.04, met_meff_4 > 0.25*sqrt(GeV), meff_incl > 1400*GeV});
+      _flows["4j-1800"].filltail({njets50 >= 4, dphimin_123 > 0.4, dphimin_more > 0.4, jetpts50[0] > 200*GeV && jetpts50[3] > 100*GeV, etamax_4 < 2.0, aplanarity > 0.04, met_meff_4 > 0.20*sqrt(GeV), meff_incl > 1800*GeV});
+      _flows["4j-2200"].filltail({njets50 >= 4, dphimin_123 > 0.4, dphimin_more > 0.4, jetpts50[0] > 200*GeV && jetpts50[3] > 150*GeV, etamax_4 < 2.0, aplanarity > 0.04, met_meff_4 > 0.20*sqrt(GeV), meff_incl > 2200*GeV});
+      _flows["4j-2600"].filltail({njets50 >= 4, dphimin_123 > 0.4, dphimin_more > 0.4, jetpts50[0] > 200*GeV && jetpts50[3] > 150*GeV, true,           aplanarity > 0.04, met_meff_4 > 0.20*sqrt(GeV), meff_incl > 2600*GeV});
+      _flows["5j-1400"].filltail({njets50 >= 5, dphimin_123 > 0.4, dphimin_more > 0.2, jetpts50[0] > 500*GeV && jetpts50[4] > 50*GeV, true, true, met_meff_5 > 0.3*sqrt(GeV), meff_incl > 1400*GeV});
+      _flows["6j-1800"].filltail({njets50 >= 6, dphimin_123 > 0.4, dphimin_more > 0.2, jetpts50[0] > 200*GeV && jetpts50[5] >  50*GeV, etamax_6 < 2.0, aplanarity > 0.08, met_meff_6 > 0.20*sqrt(GeV), meff_incl > 1800*GeV});
+      _flows["6j-2200"].filltail({njets50 >= 6, dphimin_123 > 0.4, dphimin_more > 0.2, jetpts50[0] > 200*GeV && jetpts50[5] > 100*GeV, true,           aplanarity > 0.08, met_meff_6 > 0.15*sqrt(GeV), meff_incl > 2200*GeV});
+
+    }
+
+
+    /// Normalise counters after the run
+    void finalize() {
+
+      const double sf = 13.3*crossSection()/femtobarn/sumOfWeights();
+      scale({_h_2j_0800, _h_2j_1200, _h_2j_1600, _h_2j_2000}, sf);
+      scale( _h_3j_1200, sf);
+      scale({_h_4j_1000, _h_4j_1400, _h_4j_1800, _h_4j_2200, _h_4j_2600}, sf);
+      scale( _h_5j_1400, sf);
+      scale({_h_6j_1800, _h_6j_2200}, sf);
+
+      _flows.scale(sf);
+      MSG_INFO("CUTFLOWS:\n\n" << _flows);
+
+    }
+
+    //@}
+
+
+  private:
+
+    /// @name Histograms
+    //@{
+    CounterPtr _h_2j_0800, _h_2j_1200, _h_2j_1600, _h_2j_2000, _h_3j_1200;
+    CounterPtr _h_4j_1000, _h_4j_1400, _h_4j_1800, _h_4j_2200, _h_4j_2600;
+    CounterPtr _h_5j_1400, _h_6j_1800, _h_6j_2200;
+    //@}
+
+    /// Cut-flows
+    Cutflows _flows;
+
+  };
+
+
+
+  // The hook for the plugin system
+  DECLARE_RIVET_PLUGIN(ATLAS_2016_CONF_2016_078);
+
+
+}
diff --git a/src/Analyses/ATLAS_2016_CONF_2016_094.cc b/src/Analyses/ATLAS_2016_CONF_2016_094.cc
new file mode 100644
--- /dev/null
+++ b/src/Analyses/ATLAS_2016_CONF_2016_094.cc
@@ -0,0 +1,166 @@
+// -*- C++ -*-
+#include "Rivet/Analysis.hh"
+#include "Rivet/Projections/FinalState.hh"
+#include "Rivet/Projections/PromptFinalState.hh"
+#include "Rivet/Projections/FastJets.hh"
+#include "Rivet/Projections/Sphericity.hh"
+#include "Rivet/Projections/SmearedParticles.hh"
+#include "Rivet/Projections/SmearedJets.hh"
+#include "Rivet/Projections/SmearedMET.hh"
+#include "Rivet/Tools/Cutflow.hh"
+
+namespace Rivet {
+
+
+  /// @brief ATLAS 2016 1-lepton + many jets SUSY search, from 14.8/fb CONF note
+  class ATLAS_2016_CONF_2016_094 : public Analysis {
+  public:
+
+    /// Constructor
+    DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2016_CONF_2016_094);
+
+
+    /// @name Analysis methods
+    //@{
+
+    /// Book histograms and initialise projections before the run
+    void init() {
+
+      // Initialise and register projections
+      FinalState calofs(Cuts::abseta < 4.9);
+      FastJets fj(calofs, FastJets::ANTIKT, 0.4);
+      declare(fj, "TruthJets");
+      declare(SmearedJets(fj, JET_SMEAR_ATLAS_RUN2, [](const Jet& j) {
+            if (j.abseta() > 2.5) return 0.;
+            return j.bTagged(Cuts::pT > 5*GeV) ? 0.80 :
+              j.cTagged(Cuts::pT > 5*GeV) ? 1/6. : 1/106.; }), "Jets");
+
+      // MissingMomentum mm(calofs);
+      // declare(mm, "TruthMET");
+      // declare(SmearedMET(mm, MET_SMEAR_ATLAS_RUN2), "MET");
+
+      FinalState es(Cuts::abspid == PID::ELECTRON && Cuts::abseta < 2.47 && !Cuts::absetaIn(1.37, 1.52) && Cuts::pT > 10*GeV);
+      declare(es, "TruthElectrons");
+      declare(SmearedParticles(es, ELECTRON_EFF_ATLAS_RUN2, ELECTRON_SMEAR_ATLAS_RUN2), "Electrons");
+
+      FinalState mus(Cuts::abspid == PID::MUON && Cuts::abseta < 2.4 && Cuts::pT > 10*GeV);
+      declare(mus, "TruthMuons");
+      declare(SmearedParticles(mus, MUON_EFF_ATLAS_RUN2, MUON_SMEAR_ATLAS_RUN2), "Muons");
+
+
+      // Book histograms/counters
+      _h_08j40_0b = bookCounter("08j40_0b");
+      _h_09j40_0b = bookCounter("09j40_0b");
+      _h_10j40_0b = bookCounter("10j40_0b");
+      _h_08j40_3b = bookCounter("08j40_3b");
+      _h_09j40_3b = bookCounter("09j40_3b");
+      _h_10j40_3b = bookCounter("10j40_3b");
+      _h_08j60_0b = bookCounter("08j60_0b");
+      _h_09j60_0b = bookCounter("09j60_0b");
+      _h_10j60_0b = bookCounter("10j60_0b");
+      _h_08j60_3b = bookCounter("08j60_3b");
+      _h_09j60_3b = bookCounter("09j60_3b");
+      _h_10j60_3b = bookCounter("10j60_3b");
+
+    }
+
+
+    /// Perform the per-event analysis
+    void analyze(const Event& event) {
+
+      // Get baseline electrons, muons, and jets
+      // NB. for electrons, we don't apply the loose ID here, since we don't want to double-count effs with later use of tight ID
+      Particles elecs = apply<ParticleFinder>(event, "Electrons").particles();
+      Particles muons = apply<ParticleFinder>(event, "Muons").particles();
+      Jets jets = apply<JetAlg>(event, "Jets").jetsByPt(Cuts::pT > 20*GeV && Cuts::abseta < 2.4);
+      ifilter_select(jets, JetEffFilter([](const Jet& j) { return j.pT() > 60*GeV ? 1.0 : 0.94; }));
+
+
+      // Jet/electron/muon overlap removal and selection
+      // Remove any untagged jet within dR = 0.2 of an electron
+      for (const Particle& e : elecs)
+        ifilter_discard(jets, [&](const Jet& j) { return !j.bTagged(Cuts::pT > 5*GeV) && deltaR(e, j, RAPIDITY) < 0.2; });
+      // Remove any untagged low-multiplicity/muon-dominated jet within dR = 0.4 of a muon
+      for (const Particle& m : muons)
+        ifilter_discard(jets, [&](const Jet& j) {
+            if (!j.bTagged(Cuts::pT > 5*GeV)) return false; /// @note A different b-tag working point, 85%, was actually used here *sigh*
+            if (deltaR(m, j, RAPIDITY) > 0.4) return false;
+            if (j.particles(Cuts::abscharge != 0).size() < 3) return true;
+            return m.pT()/j.pT() > 0.5;
+          });
+      // Removing leptons within dR = 0.4 of remaining jets
+      for (const Jet& j : jets) {
+        ifilter_discard(elecs, deltaRLess(j, 0.4, RAPIDITY));
+        ifilter_discard(muons, deltaRLess(j, 0.4, RAPIDITY));
+      }
+
+      // Signal jet and lepton selection
+      const Jets sigjets40 = filter_select(jets, Cuts::pT > 40*GeV);
+      const Jets sigjets60 = filter_select(sigjets40, Cuts::pT > 60*GeV);
+      const Jets sigbjets40 = filter_select(sigjets40, [](const Jet& j) { return j.bTagged(Cuts::pT > 5*GeV); });
+      const Jets sigbjets60 = filter_select(sigjets60, [](const Jet& j) { return j.bTagged(Cuts::pT > 5*GeV); });
+      const Particles sigmuons = filter_select(muons, Cuts::pT > 35*GeV);
+      Particles sigelecs = filter_select(elecs, Cuts::pT > 35*GeV);
+      ifilter_select(sigelecs, ParticleEffFilter(ELECTRON_IDEFF_ATLAS_RUN2_TIGHT));
+
+
+      //////////////////
+
+
+      // Event selection cuts
+      if (sigelecs.size() + sigmuons.size() != 1) vetoEvent;
+      const Particle siglepton = sigelecs.empty() ? sigmuons.front() : sigelecs.front();
+
+      /// @note The note describes Nj = 5, 6, 7, 8, 9, >= 10 and Nb = 0, 1, 2, 3, >= 4 = 30 2D bins
+      ///  for each jet cut... but only provides data for six Nj = >= 8, 9, 10, Nb = 0, >= 3 bins.
+      /// We just implement the latter for now.
+
+      // Fill counters
+      const double w = event.weight();
+      if (sigjets40.size() >= 8  && sigbjets40.empty()) _h_08j40_0b->fill(w);
+      if (sigjets40.size() >= 9  && sigbjets40.empty()) _h_09j40_0b->fill(w);
+      if (sigjets40.size() >= 10 && sigbjets40.empty()) _h_10j40_0b->fill(w);
+      if (sigjets40.size() >= 8  && sigbjets40.size() >= 3) _h_08j40_3b->fill(w);
+      if (sigjets40.size() >= 9  && sigbjets40.size() >= 3) _h_09j40_3b->fill(w);
+      if (sigjets40.size() >= 10 && sigbjets40.size() >= 3) _h_10j40_3b->fill(w);
+
+      if (sigjets60.size() >= 8  && sigbjets60.empty()) _h_08j60_0b->fill(w);
+      if (sigjets60.size() >= 9  && sigbjets60.empty()) _h_09j60_0b->fill(w);
+      if (sigjets60.size() >= 10 && sigbjets60.empty()) _h_10j60_0b->fill(w);
+      if (sigjets60.size() >= 8  && sigbjets60.size() >= 3) _h_08j60_3b->fill(w);
+      if (sigjets60.size() >= 9  && sigbjets60.size() >= 3) _h_09j60_3b->fill(w);
+      if (sigjets60.size() >= 10 && sigbjets60.size() >= 3) _h_10j60_3b->fill(w);
+
+    }
+
+
+    /// Normalise counters after the run
+    void finalize() {
+
+      const double sf = 14.8*crossSection()/femtobarn/sumOfWeights();
+      scale({_h_08j40_0b, _h_09j40_0b, _h_10j40_0b, _h_08j40_3b, _h_09j40_3b, _h_10j40_3b}, sf);
+      scale({_h_08j60_0b, _h_09j60_0b, _h_10j60_0b, _h_08j60_3b, _h_09j60_3b, _h_10j60_3b}, sf);
+
+    }
+
+    //@}
+
+
+  private:
+
+    /// @name Histograms
+    //@{
+    CounterPtr _h_08j40_0b, _h_09j40_0b, _h_10j40_0b, _h_08j40_3b, _h_09j40_3b, _h_10j40_3b;
+    CounterPtr _h_08j60_0b, _h_09j60_0b, _h_10j60_0b, _h_08j60_3b, _h_09j60_3b, _h_10j60_3b;
+    //@}
+
+
+  };
+
+
+
+  // The hook for the plugin system
+  DECLARE_RIVET_PLUGIN(ATLAS_2016_CONF_2016_094);
+
+
+}
diff --git a/src/Analyses/ATLAS_2016_I1452559.cc b/src/Analyses/ATLAS_2016_I1452559.cc
--- a/src/Analyses/ATLAS_2016_I1452559.cc
+++ b/src/Analyses/ATLAS_2016_I1452559.cc
@@ -1,132 +1,132 @@
 #include "Rivet/Analysis.hh"
 #include "Rivet/Projections/FinalState.hh"
 #include "Rivet/Projections/IdentifiedFinalState.hh"
 #include "Rivet/Projections/VisibleFinalState.hh"
 #include "Rivet/Projections/FastJets.hh"
 #include "Rivet/Projections/MissingMomentum.hh"
 #include "Rivet/Projections/SmearedParticles.hh"
 #include "Rivet/Projections/SmearedJets.hh"
 #include "Rivet/Projections/SmearedMET.hh"
 
 namespace Rivet {
 
 
   /// ATLAS 13 TeV monojet search with 3.2/fb of pp data
   class ATLAS_2016_I1452559 : public Analysis {
   public:
 
     DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2016_I1452559);
 
     void init() {
 
       FastJets jets(FinalState(Cuts::abseta < 4.9), FastJets::ANTIKT, 0.4);
       SmearedJets recojets(jets, JET_SMEAR_ATLAS_RUN1);
       declare(recojets, "Jets");
 
       FinalState electrons(Cuts::abspid == PID::ELECTRON && Cuts::abseta < 2.47 && Cuts::pT > 20*GeV);
-      SmearedParticles recoelectrons(electrons, ELECTRON_EFF_ATLAS_RUN2);
+      SmearedParticles recoelectrons(electrons, ELECTRON_EFF_ATLAS_RUN1);
       declare(recoelectrons, "Electrons");
 
       FinalState muons(Cuts::abspid == PID::MUON && Cuts::abseta < 2.50 && Cuts::pT > 10*GeV);
-      SmearedParticles recomuons(muons, MUON_EFF_ATLAS_RUN2);
+      SmearedParticles recomuons(muons, MUON_EFF_ATLAS_RUN1);
       declare(recomuons, "Muons");
 
       VisibleFinalState calofs(Cuts::abseta < 4.9 && Cuts::abspid != PID::MUON);
       MissingMomentum met(calofs);
-      SmearedMET recomet(met, MET_SMEAR_ATLAS_RUN2);
+      SmearedMET recomet(met, MET_SMEAR_ATLAS_RUN1);
       declare(recomet, "MET");
 
 
       /// Book histograms
       for (size_t i = 0; i < 7; ++i)
         book(_count_IM[i], "count_IM" + toString(i+1));
       for (size_t i = 0; i < 6; ++i)
         book(_count_EM[i], "count_EM" + toString(i+1));
 
     }
 
 
     void analyze(const Event& event) {
 
       const Jets jets = apply<JetAlg>(event, "Jets").jetsByPt(Cuts::pT > 20*GeV && Cuts::abseta < 2.8);
       const Particles elecs = apply<ParticleFinder>(event, "Electrons").particlesByPt();
       const Particles mus = apply<ParticleFinder>(event, "Muons").particlesByPt();
       MSG_DEBUG("Number of raw jets, electrons, muons = "
                 << jets.size() << ", " << elecs.size() << ", " << mus.size());
 
       // Discard jets very close to electrons, or with low track multiplicity and close to muons
       const Jets isojets = filter_discard(jets, [&](const Jet& j) {
           /// @todo Add track efficiency random filtering
           if (any(elecs, deltaRLess(j, 0.2))) return true;
           if (j.particles(Cuts::abscharge > 0 && Cuts::pT > 0.4*GeV).size() < 3 &&
               any(mus, deltaRLess(j, 0.4))) return true;
           return false;
         });
 
       // Discard electrons close to remaining jets
       const Particles isoelecs = filter_discard(elecs, [&](const Particle& e) {
           return any(isojets, deltaRLess(e, 0.4));
         });
 
       // Discard muons close to remaining jets
       const Particles isomus = filter_discard(mus, [&](const Particle& m) {
           for (const Jet& j : isojets) {
             if (deltaR(j,m) > 0.4) continue;
             if (j.particles(Cuts::abscharge > 0 && Cuts::pT > 0.4*GeV).size() > 3) return true;
           }
           return false;
         });
 
       // Calculate ETmiss
       //const Vector3& vet = apply<MissingMomentum>(event, "MET").vectorEt();
       const Vector3& vet = apply<SmearedMET>(event, "MET").vectorEt();
       const double etmiss = vet.perp();
 
 
       // Event selection cuts
       if (etmiss < 250*GeV) vetoEvent;
       // Require at least one jet with pT > 250 GeV and |eta| < 2.4
       if (filter_select(isojets, Cuts::pT > 250*GeV && Cuts::abseta < 2.4).empty()) vetoEvent;
       // Require at most 4 jets with pT > 30 GeV and |eta| < 2.8
       if (filter_select(isojets, Cuts::pT > 30*GeV).size() > 4) vetoEvent;
       // Require no isolated jets within |dphi| < 0.4 of the MET vector
       if (any(isojets, deltaPhiLess(-vet, 0.4))) vetoEvent;
       // Require no isolated electrons or muons
       if (!isoelecs.empty() || !isomus.empty()) vetoEvent;
 
 
       ////////////////////
 
 
       const double weight = 1.0;
 
       // Get ETmiss bin number and fill counters
       const int i_etmiss = binIndex(etmiss/GeV, ETMISS_CUTS);
       // Inclusive ETmiss bins
       for (int ibin = 0; ibin < 7; ++ibin)
         if (i_etmiss >= ibin) _count_IM[ibin]->fill(weight);
       // Exclusive ETmiss bins
       if (inRange(i_etmiss, 0, 6)) _count_EM[i_etmiss]->fill(weight);
 
     }
 
 
     void finalize() {
       const double norm = 3.2*crossSection()/femtobarn;
       scale(_count_IM, norm/sumOfWeights());
       scale(_count_EM, norm/sumOfWeights());
     }
 
 
   private:
 
     const vector<double> ETMISS_CUTS = { 250, 300, 350, 400, 500, 600, 700, 13000 };
     CounterPtr _count_IM[7], _count_EM[6];
 
   };
 
 
   // The hook for the plugin system
   DECLARE_RIVET_PLUGIN(ATLAS_2016_I1452559);
 
 }
diff --git a/src/Analyses/ATLAS_2016_I1457605.cc b/src/Analyses/ATLAS_2016_I1457605.cc
--- a/src/Analyses/ATLAS_2016_I1457605.cc
+++ b/src/Analyses/ATLAS_2016_I1457605.cc
@@ -1,130 +1,130 @@
 // -*- C++ -*-
 #include "Rivet/Analysis.hh"
 #include "Rivet/Projections/FinalState.hh"
 #include "Rivet/Projections/PromptFinalState.hh"
 #include "Rivet/Projections/LeadingParticlesFinalState.hh"
 #include "Rivet/Projections/FastJets.hh"
 
 namespace Rivet {
 
 
   /// Inclusive isolated prompt photon analysis with 2012 LHC data
   class ATLAS_2016_I1457605 : public Analysis {
   public:
 
     /// Constructor
     DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2016_I1457605);
 
     /// Book histograms and initialise projections before the run
     void init() {
 
       FinalState fs;
       addProjection(fs, "FS");
 
       // Consider the final state jets for the energy density calculation
       FastJets fj(fs, FastJets::KT, 0.5);
       fj.useJetArea(new fastjet::AreaDefinition(fastjet::VoronoiAreaSpec()));
       addProjection(fj, "KtJetsD05");
 
       // Consider the leading pt photon with |eta| < 2.37 and pT > 25 GeV
       LeadingParticlesFinalState photonfs(PromptFinalState(FinalState(Cuts::abseta < 2.37 && Cuts::pT > 25*GeV)));
       photonfs.addParticleId(PID::PHOTON);
       addProjection(photonfs, "LeadingPhoton");
 
       // Book the dsigma/dEt (in eta bins) histograms
       for (size_t i = 0; i < _eta_bins.size() - 1; ++i) {
         if (fuzzyEquals(_eta_bins[i], 1.37)) continue; // skip this bin
         int offset = i > 2? 0 : 1;
         book(_h_Et_photon[i] ,i + offset, 1, 1);
       }
 
     }
 
 
     /// Return eta bin for either dsigma/dET histogram (area_eta=false) or energy density correction (area_eta=true)
     size_t _getEtaBin(double eta_w, bool area_eta) const {
       const double eta = fabs(eta_w);
       if (!area_eta) {
         return binIndex(eta, _eta_bins);
       } else {
         return binIndex(eta, _eta_bins_areaoffset);
       }
     }
 
 
     /// Perform the per-event analysis
     void analyze(const Event& event) {
       // Retrieve leading photon
       Particles photons = applyProjection<LeadingParticlesFinalState>(event, "LeadingPhoton").particles();
       if (photons.size() < 1)  vetoEvent;
       const Particle& leadingPhoton = photons[0];
 
       // Veto events with photon in ECAL crack
       if (inRange(leadingPhoton.abseta(), 1.37, 1.56)) vetoEvent;
 
       // Compute isolation energy in cone of radius .4 around photon (all particles)
       FourMomentum mom_in_EtCone;
       Particles fs = applyProjection<FinalState>(event, "FS").particles();
-      foreach (const Particle& p, fs) {
+      for (const Particle& p : fs) {
         // Check if it's outside the cone of 0.4
         if (deltaR(leadingPhoton, p) >= 0.4) continue;
         // Except muons or neutrinos
         if (PID::isNeutrino(p.abspid()) || p.abspid() == PID::MUON) continue;
         // Increment isolation energy
         mom_in_EtCone += p.momentum();
       }
       // Remove the photon energy from the isolation
       mom_in_EtCone -= leadingPhoton.momentum();
 
       // Get the area-filtered jet inputs for computing median energy density, etc.
       vector<double> ptDensity;
       vector< vector<double> > ptDensities(_eta_bins_areaoffset.size()-1);
       const FastJets& fast_jets = applyProjection<FastJets>(event, "KtJetsD05");
       const auto clust_seq_area = fast_jets.clusterSeqArea();
-      foreach (const Jet& jet, fast_jets.jets()) {
+      for (const Jet& jet : fast_jets.jets()) {
         const double area = clust_seq_area->area(jet);
         if (area > 1e-3 && jet.abseta() < _eta_bins_areaoffset.back())
           ptDensities.at( _getEtaBin(jet.abseta(), true) ) += jet.pT()/area;
       }
       // Compute the median energy density, etc.
       for (size_t b = 0; b < _eta_bins_areaoffset.size()-1; ++b) {
         const int njets = ptDensities[b].size();
         ptDensity += (njets > 0) ? median(ptDensities[b]) : 0.0;
       }
       // Compute the isolation energy correction (cone area*energy density)
       const double etCone_area = PI * sqr(0.4);
       const double correction = ptDensity[_getEtaBin(leadingPhoton.abseta(), true)] * etCone_area;
 
       // Apply isolation cut on area-corrected value
       // cut is Etiso < 4.8GeV + 4.2E-03 * Et_gamma.
       if (mom_in_EtCone.Et() - correction > 4.8*GeV + 0.0042*leadingPhoton.Et()) vetoEvent;
 
       // Fill histograms
       const size_t eta_bin = _getEtaBin(leadingPhoton.abseta(), false);
       _h_Et_photon[eta_bin]->fill(leadingPhoton.Et(), event.weight());
     }
 
 
     /// Normalise histograms etc., after the run
     void finalize() {
       double sf = crossSection() / (picobarn * sumOfWeights());
       for (size_t i = 0; i < _eta_bins.size()-1; ++i) {
         if (fuzzyEquals(_eta_bins[i], 1.37)) continue;
         scale(_h_Et_photon[i], sf);
       }
     }
 
 
   private:
 
     Histo1DPtr _h_Et_photon[5];
 
     const vector<double> _eta_bins = {0.00, 0.60, 1.37, 1.56, 1.81, 2.37 };
     const vector<double> _eta_bins_areaoffset = {0.0, 1.5, 3.0};
 
   };
 
 
   DECLARE_RIVET_PLUGIN(ATLAS_2016_I1457605);
 
 }
diff --git a/src/Analyses/ATLAS_2016_I1458270.cc b/src/Analyses/ATLAS_2016_I1458270.cc
--- a/src/Analyses/ATLAS_2016_I1458270.cc
+++ b/src/Analyses/ATLAS_2016_I1458270.cc
@@ -1,210 +1,210 @@
 // -*- C++ -*-
 #include "Rivet/Analysis.hh"
 #include "Rivet/Projections/FinalState.hh"
 #include "Rivet/Projections/PromptFinalState.hh"
 #include "Rivet/Projections/FastJets.hh"
 #include "Rivet/Projections/Sphericity.hh"
 #include "Rivet/Projections/SmearedParticles.hh"
 #include "Rivet/Projections/SmearedJets.hh"
 #include "Rivet/Projections/SmearedMET.hh"
 #include "Rivet/Tools/Cutflow.hh"
 
 namespace Rivet {
 
 
   /// @brief ATLAS 0-lepton SUSY search with 3.2/fb of 13 TeV pp data
   class ATLAS_2016_I1458270 : public Analysis {
   public:
 
     /// Constructor
     DEFAULT_RIVET_ANALYSIS_CTOR(ATLAS_2016_I1458270);
 
 
     /// @name Analysis methods
     //@{
 
     /// Book histograms and initialise projections before the run
     void init() {
 
       // Initialise and register projections
       FinalState calofs(Cuts::abseta < 4.8);
       FastJets fj(calofs, FastJets::ANTIKT, 0.4);
       declare(fj, "TruthJets");
       declare(SmearedJets(fj, JET_SMEAR_ATLAS_RUN2, JET_BTAG_ATLAS_RUN2_MV2C20), "RecoJets");
 
       MissingMomentum mm(calofs);
       declare(mm, "TruthMET");
       declare(SmearedMET(mm, MET_SMEAR_ATLAS_RUN2), "RecoMET");
 
       PromptFinalState es(Cuts::abseta < 2.47 && Cuts::abspid == PID::ELECTRON, true, true);
       declare(es, "TruthElectrons");
       declare(SmearedParticles(es, ELECTRON_EFF_ATLAS_RUN2, ELECTRON_SMEAR_ATLAS_RUN2), "RecoElectrons");
 
       PromptFinalState mus(Cuts::abseta < 2.7 && Cuts::abspid == PID::MUON, true);
       declare(mus, "TruthMuons");
       declare(SmearedParticles(mus, MUON_EFF_ATLAS_RUN2, MUON_SMEAR_ATLAS_RUN2), "RecoMuons");
 
 
       // Book histograms/counters
       book(_h_2jl, "2jl");
       book(_h_2jm, "2jm");
       book(_h_2jt, "2jt");
       book(_h_4jt, "4jt");
       book(_h_5j , "5j");
       book(_h_6jm, "6jm");
       book(_h_6jt, "6jt");
 
 
       // Book cut-flows
       const vector<string> cuts2j = {"Pre-sel+MET+pT1", "Njet", "Dphi_min(j,MET)", "pT2", "MET/sqrtHT", "m_eff(incl)"};
       _flows.addCutflow("2jl", cuts2j);
       _flows.addCutflow("2jm", cuts2j);
       _flows.addCutflow("2jt", cuts2j);
       const vector<string> cutsXj = {"Pre-sel+MET+pT1", "Njet", "Dphi_min(j,MET)", "pT2", "pT4", "Aplanarity", "MET/m_eff(Nj)", "m_eff(incl)"};
       _flows.addCutflow("4jt", cutsXj);
       _flows.addCutflow("5j",  cutsXj);
       _flows.addCutflow("6jm", cutsXj);
       _flows.addCutflow("6jt", cutsXj);
 
     }
 
 
     /// Perform the per-event analysis
     void analyze(const Event& event) {
 
       _flows.fillinit();
 
       // Same MET cut for all signal regions
       //const Vector3 vmet = -apply<MissingMomentum>(event, "TruthMET").vectorEt();
       const Vector3 vmet = -apply<SmearedMET>(event, "RecoMET").vectorEt();
       const double met = vmet.mod();
       if (met < 200*GeV) vetoEvent;
 
       // Get baseline electrons, muons, and jets
       Particles elecs = apply<ParticleFinder>(event, "RecoElectrons").particles(Cuts::pT > 10*GeV);
       Particles muons = apply<ParticleFinder>(event, "RecoMuons").particles(Cuts::pT > 10*GeV);
       Jets jets = apply<JetAlg>(event, "RecoJets").jetsByPt(Cuts::pT > 20*GeV && Cuts::abseta < 2.8); ///< @todo Pile-up subtraction
 
       // Jet/electron/muons overlap removal and selection
       // Remove any |eta| < 2.8 jet within dR = 0.2 of a baseline electron
       for (const Particle& e : elecs)
         ifilter_discard(jets, deltaRLess(e, 0.2, RAPIDITY));
       // Remove any electron or muon with dR < 0.4 of a remaining (Nch > 3) jet
       for (const Jet& j : jets) {
         /// @todo Add track efficiency random filtering
         ifilter_discard(elecs, deltaRLess(j, 0.4, RAPIDITY));
         if (j.particles(Cuts::abscharge > 0 && Cuts::pT > 500*MeV).size() >= 3)
           ifilter_discard(muons, deltaRLess(j, 0.4, RAPIDITY));
       }
       // Discard the softer of any electrons within dR < 0.05
       for (size_t i = 0; i < elecs.size(); ++i) {
         const Particle& e1 = elecs[i];
         /// @todo Would be nice to pass a "tail view" for the filtering, but awkward without range API / iterator guts
         ifilter_discard(elecs, [&](const Particle& e2){ return e2.pT() < e1.pT() && deltaR(e1,e2) < 0.05; });
       }
 
       // Loose electron selection
-      ifilter_discard(elecs, ParticleEffFilter(ELECTRON_IDEFF_ATLAS_RUN2_LOOSE));
+      ifilter_select(elecs, ParticleEffFilter(ELECTRON_IDEFF_ATLAS_RUN2_LOOSE));
 
       // Veto the event if there are any remaining baseline leptons
       if (!elecs.empty()) vetoEvent;
       if (!muons.empty()) vetoEvent;
 
       // Signal jets have pT > 50 GeV
       const Jets jets50 = filter_select(jets, Cuts::pT > 50*GeV);
       if (jets50.size() < 2) vetoEvent;
       vector<double> jetpts; transform(jets, jetpts, pT);
       vector<double> jetpts50; transform(jets50, jetpts50, pT);
       const double j1pt = jetpts50[0];
       const double j2pt = jetpts50[1];
       if (j1pt < 200*GeV) vetoEvent;
 
       // Construct multi-jet observables
       const double ht = sum(jetpts, 0.0);
       const double met_sqrt_ht = met / sqrt(ht);
       const double meff_incl = sum(jetpts50, met);
 
       // Get dphis between MET and jets
       vector<double> dphimets50; transform(jets50, dphimets50, deltaPhiWRT(vmet));
       const double min_dphi_met_2 = min(head(dphimets50, 2));
       const double min_dphi_met_3 = min(head(dphimets50, 3));
       MSG_DEBUG(dphimets50 << ", " << min_dphi_met_2 << ", " << min_dphi_met_3);
 
       // Jet aplanarity
       Sphericity sph; sph.calc(jets);
       const double aplanarity = sph.aplanarity();
 
 
       // Fill SR counters
       // 2-jet SRs
       if (_flows["2jl"].filltail({true, true, min_dphi_met_2 > 0.8, j2pt > 200*GeV,
               met_sqrt_ht > 15*sqrt(GeV), meff_incl > 1200*GeV})) _h_2jl->fill(event.weight());
       if (_flows["2jm"].filltail({j1pt > 300*GeV, true, min_dphi_met_2 > 0.4, j2pt > 50*GeV,
               met_sqrt_ht > 15*sqrt(GeV), meff_incl > 1600*GeV})) _h_2jm->fill(event.weight());
       if (_flows["2jt"].filltail({true, true, min_dphi_met_2 > 0.8, j2pt > 200*GeV,
               met_sqrt_ht > 20*sqrt(GeV), meff_incl > 2000*GeV})) _h_2jt->fill(event.weight());
 
       // Upper multiplicity SRs
       const double j4pt = jets50.size() > 3 ? jetpts50[3] : -1;
       const double j5pt = jets50.size() > 4 ? jetpts50[4] : -1;
       const double j6pt = jets50.size() > 5 ? jetpts50[5] : -1;
       const double meff_4 = jets50.size() > 3 ? sum(head(jetpts50, 4), 0.0) : -1;
       const double meff_5 = jets50.size() > 4 ? meff_4 + jetpts50[4] : -1;
       const double meff_6 = jets50.size() > 5 ? meff_5 + jetpts50[5] : -1;
       const double met_meff_4 = met / meff_4;
       const double met_meff_5 = met / meff_5;
       const double met_meff_6 = met / meff_6;
       const double min_dphi_met_more = jets50.size() > 3 ? min(tail(dphimets50, -3)) : -1;
 
       if (_flows["4jt"].filltail({true, jets50.size() >= 4, min_dphi_met_3 > 0.4 && min_dphi_met_more > 0.2,
               jetpts[1] > 200*GeV, j4pt > 100*GeV, aplanarity > 0.04, met_meff_4 > 0.20, meff_incl > 2200*GeV}))
         _h_4jt->fill(event.weight());
       if (_flows["5j"].filltail({true, jets50.size() >= 5, min_dphi_met_3 > 0.4 && min_dphi_met_more > 0.2,
               jetpts[1] > 200*GeV, j4pt > 100*GeV && j5pt > 50*GeV, aplanarity > 0.04, met_meff_5 > 0.25, meff_incl > 1600*GeV}))
         _h_5j->fill(event.weight());
       if (_flows["6jm"].filltail({true, jets50.size() >= 6, min_dphi_met_3 > 0.4 && min_dphi_met_more > 0.2,
               jetpts[1] > 200*GeV, j4pt > 100*GeV && j6pt > 50*GeV, aplanarity > 0.04, met_meff_6 > 0.25, meff_incl > 1600*GeV}))
         _h_6jm->fill(event.weight());
       if (_flows["6jt"].filltail({true, jets50.size() >= 6, min_dphi_met_3 > 0.4 && min_dphi_met_more > 0.2,
               jetpts[1] > 200*GeV, j4pt > 100*GeV && j6pt > 50*GeV, aplanarity > 0.04, met_meff_6 > 0.20, meff_incl > 2000*GeV}))
         _h_6jt->fill(event.weight());
 
     }
 
 
     /// Normalise histograms etc., after the run
     void finalize() {
 
       const double sf = 3.2*crossSection()/femtobarn/sumOfWeights();
       scale({_h_2jl, _h_2jl, _h_2jl}, sf);
       scale({_h_4jt, _h_5j}, sf);
       scale({_h_6jm, _h_6jt}, sf);
 
       MSG_INFO("CUTFLOWS:\n\n" << _flows);
 
     }
 
     //@}
 
 
   private:
 
     /// @name Histograms
     //@{
     CounterPtr _h_2jl, _h_2jm, _h_2jt;
     CounterPtr _h_4jt, _h_5j;
     CounterPtr _h_6jm, _h_6jt;
     //@}
 
     /// Cut-flows
     Cutflows _flows;
 
   };
 
 
 
   // The hook for the plugin system
   DECLARE_RIVET_PLUGIN(ATLAS_2016_I1458270);
 
 
 }
diff --git a/src/Analyses/BABAR_2007_S7266081.cc b/src/Analyses/BABAR_2007_S7266081.cc
--- a/src/Analyses/BABAR_2007_S7266081.cc
+++ b/src/Analyses/BABAR_2007_S7266081.cc
@@ -1,182 +1,181 @@
 // -*- C++ -*-
 #include <iostream>
 #include "Rivet/Analysis.hh"
 #include "Rivet/Projections/UnstableFinalState.hh"
 
 namespace Rivet {
 
 
   /// @brief BABAR tau lepton to three charged hadrons
   /// @author Peter Richardson
   class BABAR_2007_S7266081 : public Analysis {
   public:
 
     BABAR_2007_S7266081()
       : Analysis("BABAR_2007_S7266081"),
         _weight_total(0),
         _weight_pipipi(0), _weight_Kpipi(0), _weight_KpiK(0), _weight_KKK(0)
     {   }
 
 
     void init() {
       declare(UnstableFinalState(), "UFS");
       book(_hist_pipipi_pipipi , 1, 1, 1);
       book(_hist_pipipi_pipi   , 2, 1, 1);
       book(_hist_Kpipi_Kpipi   , 3, 1, 1);
       book(_hist_Kpipi_Kpi     , 4, 1, 1);
       book(_hist_Kpipi_pipi    , 5, 1, 1);
       book(_hist_KpiK_KpiK     , 6, 1, 1);
       book(_hist_KpiK_KK       , 7, 1, 1);
       book(_hist_KpiK_piK      , 8, 1, 1);
       book(_hist_KKK_KKK       , 9, 1, 1);
       book(_hist_KKK_KK        ,10, 1, 1);
     }
 
 
     void analyze(const Event& e) {
+      double weight = e.weight();
       // Find the taus
       Particles taus;
-      const UnstableFinalState& ufs = apply<UnstableFinalState>(e, "UFS");
-      foreach (const Particle& p, ufs.particles()) {
-        if (p.abspid() != PID::TAU) continue;
-        _weight_total += 1.;
+      foreach(const Particle& p, apply<UnstableFinalState>(e, "UFS").particles(Cuts::pid==PID::TAU)) {
+        _weight_total += weight;
         Particles pip, pim, Kp, Km;
         unsigned int nstable = 0;
         // Get the boost to the rest frame
         LorentzTransform cms_boost;
         if (p.p3().mod() > 1*MeV)
           cms_boost = LorentzTransform::mkFrameTransformFromBeta(p.momentum().betaVec());
         // Find the decay products we want
         findDecayProducts(p.genParticle(), nstable, pip, pim, Kp, Km);
         if (p.pid() < 0) {
           swap(pip, pim);
           swap(Kp, Km );
         }
         if (nstable != 4) continue;
         // pipipi
         if (pim.size() == 2 && pip.size() == 1) {
-          _weight_pipipi += 1.;
+          _weight_pipipi += weight;
           _hist_pipipi_pipipi->
-            fill((pip[0].momentum()+pim[0].momentum()+pim[1].momentum()).mass(),1.);
+            fill((pip[0].momentum()+pim[0].momentum()+pim[1].momentum()).mass(), weight);
           _hist_pipipi_pipi->
-            fill((pip[0].momentum()+pim[0].momentum()).mass(),1.);
+            fill((pip[0].momentum()+pim[0].momentum()).mass(), weight);
           _hist_pipipi_pipi->
-            fill((pip[0].momentum()+pim[1].momentum()).mass(),1.);
+            fill((pip[0].momentum()+pim[1].momentum()).mass(), weight);
         }
         else if (pim.size() == 1 && pip.size() == 1 && Km.size() == 1) {
-          _weight_Kpipi += 1.;
+          _weight_Kpipi += weight;
           _hist_Kpipi_Kpipi->
-            fill((pim[0].momentum()+pip[0].momentum()+Km[0].momentum()).mass(),1.);
+            fill((pim[0].momentum()+pip[0].momentum()+Km[0].momentum()).mass(), weight);
           _hist_Kpipi_Kpi->
-            fill((pip[0].momentum()+Km[0].momentum()).mass(),1.);
+            fill((pip[0].momentum()+Km[0].momentum()).mass(), weight);
           _hist_Kpipi_pipi->
-            fill((pim[0].momentum()+pip[0].momentum()).mass(),1.);
+            fill((pim[0].momentum()+pip[0].momentum()).mass(), weight);
         }
         else if (Kp.size() == 1 && Km.size() == 1 && pim.size() == 1) {
-          _weight_KpiK += 1.;
+          _weight_KpiK += weight;
           _hist_KpiK_KpiK->
-            fill((Kp[0].momentum()+Km[0].momentum()+pim[0].momentum()).mass(),1.);
+            fill((Kp[0].momentum()+Km[0].momentum()+pim[0].momentum()).mass(), weight);
           _hist_KpiK_KK->
-            fill((Kp[0].momentum()+Km[0].momentum()).mass(),1.);
+            fill((Kp[0].momentum()+Km[0].momentum()).mass(), weight);
           _hist_KpiK_piK->
-            fill((Kp[0].momentum()+pim[0].momentum()).mass(),1.);
+            fill((Kp[0].momentum()+pim[0].momentum()).mass(), weight);
         }
         else if (Kp.size() == 1 && Km.size() == 2) {
-          _weight_KKK += 1.;
+          _weight_KKK += weight;
           _hist_KKK_KKK->
-            fill((Kp[0].momentum()+Km[0].momentum()+Km[1].momentum()).mass(),1.);
+            fill((Kp[0].momentum()+Km[0].momentum()+Km[1].momentum()).mass(), weight);
           _hist_KKK_KK->
-            fill((Kp[0].momentum()+Km[0].momentum()).mass(),1.);
+            fill((Kp[0].momentum()+Km[0].momentum()).mass(), weight);
           _hist_KKK_KK->
-            fill((Kp[0].momentum()+Km[1].momentum()).mass(),1.);
+            fill((Kp[0].momentum()+Km[1].momentum()).mass(), weight);
         }
       }
     }
 
 
     void finalize() {
       if (_weight_pipipi > 0.) {
         scale(_hist_pipipi_pipipi, 1.0/_weight_pipipi);
         scale(_hist_pipipi_pipi  , 0.5/_weight_pipipi);
       }
       if (_weight_Kpipi > 0.) {
         scale(_hist_Kpipi_Kpipi  , 1.0/_weight_Kpipi);
         scale(_hist_Kpipi_Kpi    , 1.0/_weight_Kpipi);
         scale(_hist_Kpipi_pipi   , 1.0/_weight_Kpipi);
       }
       if (_weight_KpiK > 0.) {
         scale(_hist_KpiK_KpiK    , 1.0/_weight_KpiK);
         scale(_hist_KpiK_KK      , 1.0/_weight_KpiK);
         scale(_hist_KpiK_piK     , 1.0/_weight_KpiK);
       }
       if (_weight_KKK > 0.) {
         scale(_hist_KKK_KKK      , 1.0/_weight_KKK);
         scale(_hist_KKK_KK       , 0.5/_weight_KKK);
       }
       /// @note Using autobooking for these scatters since their x values are not really obtainable from the MC data
       bookScatter2D(11, 1, 1, true)->point(0).setY(100*_weight_pipipi/_weight_total, 100*sqrt(_weight_pipipi)/_weight_total);
       bookScatter2D(12, 1, 1, true)->point(0).setY(100*_weight_Kpipi/_weight_total, 100*sqrt(_weight_Kpipi)/_weight_total);
       bookScatter2D(13, 1, 1, true)->point(0).setY(100*_weight_KpiK/_weight_total, 100*sqrt(_weight_KpiK)/_weight_total);
       bookScatter2D(14, 1, 1, true)->point(0).setY(100*_weight_KKK/_weight_total, 100*sqrt(_weight_KKK)/_weight_total);
     }
 
 
   private:
 
     //@{
 
     // Histograms
     Histo1DPtr _hist_pipipi_pipipi, _hist_pipipi_pipi;
     Histo1DPtr _hist_Kpipi_Kpipi, _hist_Kpipi_Kpi, _hist_Kpipi_pipi;
     Histo1DPtr _hist_KpiK_KpiK, _hist_KpiK_KK, _hist_KpiK_piK;
     Histo1DPtr _hist_KKK_KKK, _hist_KKK_KK;
 
     // Weights counters
     double _weight_total, _weight_pipipi, _weight_Kpipi, _weight_KpiK, _weight_KKK;
 
     //@}
 
     void findDecayProducts(const GenParticle* p,
                            unsigned int & nstable,
                            Particles& pip, Particles& pim,
                            Particles& Kp, Particles& Km) {
       const GenVertex* dv = p->end_vertex();
       /// @todo Use better looping
       for (GenVertex::particles_out_const_iterator pp = dv->particles_out_const_begin(); pp != dv->particles_out_const_end(); ++pp) {
         int id = (*pp)->pdg_id();
         if (id == PID::PI0 )
           ++nstable;
         else if (id == PID::K0S)
           ++nstable;
         else if (id == PID::PIPLUS) {
           pip.push_back(Particle(**pp));
           ++nstable;
         }
         else if (id == PID::PIMINUS) {
           pim.push_back(Particle(**pp));
           ++nstable;
         }
         else if (id == PID::KPLUS) {
           Kp.push_back(Particle(**pp));
           ++nstable;
         }
         else if (id == PID::KMINUS) {
           Km.push_back(Particle(**pp));
           ++nstable;
         }
         else if ((*pp)->end_vertex()) {
           findDecayProducts(*pp, nstable, pip, pim, Kp, Km);
         }
         else
           ++nstable;
       }
     }
 
 
   };
 
 
   // The hook for the plugin system
   DECLARE_RIVET_PLUGIN(BABAR_2007_S7266081);
 
 }
diff --git a/src/Analyses/CMS_2013_I1223519.cc b/src/Analyses/CMS_2013_I1223519.cc
new file mode 100644
--- /dev/null
+++ b/src/Analyses/CMS_2013_I1223519.cc
@@ -0,0 +1,249 @@
+// -*- C++ -*-
+#include "Rivet/Analysis.hh"
+#include "Rivet/Projections/FinalStates.hh"
+#include "Rivet/Projections/FastJets.hh"
+#include "Rivet/Projections/MissingMomentum.hh"
+#include "Rivet/Projections/Smearing.hh"
+#include <bitset>
+
+namespace Rivet {
+
+
+  /// @brief Add a short analysis description here
+  class CMS_2013_I1223519 : public Analysis {
+  public:
+
+    /// Constructor
+    DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2013_I1223519);
+
+
+    /// @name Analysis methods
+    //@{
+
+    /// Book histograms and initialise projections before the run
+    void init() {
+
+      // Initialise and register projections
+      FinalState calofs(Cuts::abseta < 5.0);
+      declare(calofs, "Clusters");
+
+      MissingMomentum mm(calofs);
+      declare(mm, "TruthMET");
+      declare(SmearedMET(mm, MET_SMEAR_CMS_RUN2), "MET");
+
+      FastJets fj(calofs, FastJets::ANTIKT, 0.5);
+      declare(fj, "TruthJets");
+      declare(SmearedJets(fj, JET_SMEAR_CMS_RUN2, [](const Jet& j) {
+            if (j.abseta() > 2.4) return 0.;
+            return j.bTagged() ? 0.65 : 0.01; }), "Jets"); ///< @note Charm mistag and exact b-tag eff not given
+
+      FinalState ys(Cuts::abspid == PID::PHOTON && Cuts::abseta < 5.0);
+      declare(ys, "TruthPhotons");
+      declare(SmearedParticles(ys, PHOTON_EFF_CMS_RUN2 /*, PHOTON_SMEAR_CMS_RUN2 */), "Photons");
+
+      FinalState es(Cuts::abspid == PID::ELECTRON && Cuts::abseta < 2.5);
+      declare(es, "TruthElectrons");
+      declare(SmearedParticles(es, ELECTRON_EFF_CMS_RUN2, ELECTRON_SMEAR_CMS_RUN2), "Electrons");
+
+      FinalState mus(Cuts::abspid == PID::MUON && Cuts::abseta < 2.4);
+      declare(mus, "TruthMuons");
+      declare(SmearedParticles(mus, MUON_EFF_CMS_RUN2, MUON_SMEAR_CMS_RUN2), "Muons");
+
+      ChargedFinalState cfs(Cuts::abseta < 2.5);
+      declare(cfs, "TruthTracks");
+      declare(SmearedParticles(cfs, TRK_EFF_CMS_RUN2), "Tracks");
+
+
+      // Book histograms
+      _h_alphaT23 = bookHisto1D("alphaT23", 15, 0, 3);
+      _h_alphaT4 = bookHisto1D("alphaT4", 15, 0, 3);
+      /// @todo Add HT histograms
+
+      // Book counters
+      _h_srcounters.resize(8*7 + 3);
+      for (size_t inj = 0; inj < 2; ++inj) {
+        const size_t njmax = inj + 3;
+        for (size_t nb = 0; nb < njmax; ++nb) {
+          for (size_t iht = 0; iht < 8; ++iht) {
+            const size_t i = 8 * ((inj == 0 ? 0 : 3) + nb) + iht;
+            _h_srcounters[i] = bookCounter("srcount_j" + toString(njmax) + "_b" + toString(nb) + "_ht" + toString(iht+1));
+          }
+        }
+      }
+      // Special nj >= 4, nb >= 4 bins
+      for (size_t iht = 0; iht < 3; ++iht) {
+        _h_srcounters[8*7 + iht] = bookCounter("srcount_j4_b4_ht" + toString(iht+1));
+      }
+
+    }
+
+
+    /// Perform the per-event analysis
+    void analyze(const Event& event) {
+
+      // Get baseline photons, electrons & muons
+      Particles photons = apply<ParticleFinder>(event, "Photons").particles(Cuts::pT > 25*GeV);
+      Particles elecs = apply<ParticleFinder>(event, "Electrons").particles(Cuts::pT > 10*GeV);
+      Particles muons = apply<ParticleFinder>(event, "Muons").particles(Cuts::pT > 10*GeV);
+
+      // Electron/muon isolation (guesswork/copied from other CMS analysis -- paper is unspecific)
+      const Particles calofs = apply<ParticleFinder>(event, "Clusters").particles();
+      ifilter_discard(photons, [&](const Particle& y) {
+          double ptsum = -y.pT();
+          for (const Particle& p : calofs)
+            if (deltaR(p,y) < 0.3) ptsum += p.pT();
+          return ptsum / y.pT() > 0.1;
+        });
+      ifilter_discard(elecs, [&](const Particle& e) {
+          double ptsum = -e.pT();
+          for (const Particle& p : calofs)
+            if (deltaR(p,e) < 0.3) ptsum += p.pT();
+          return ptsum / e.pT() > 0.1;
+        });
+      ifilter_discard(muons, [&](const Particle& m) {
+          double ptsum = -m.pT();
+          for (const Particle& p : calofs)
+            if (deltaR(p,m) < 0.3) ptsum += p.pT();
+          return ptsum / m.pT() > 0.2;
+        });
+
+      // Veto the event if there are any remaining baseline photons or leptons
+      if (!photons.empty()) vetoEvent;
+      if (!elecs.empty()) vetoEvent;
+      if (!muons.empty()) vetoEvent;
+
+
+      // Get jets and apply jet-based event-selection cuts
+      const JetAlg& jetproj = apply<JetAlg>(event, "Jets");
+      const Jets alljets = jetproj.jetsByPt(Cuts::abseta < 3.0 && Cuts::Et > 37*GeV); //< most inclusive jets requirement
+      if (filter_select(alljets, Cuts::Et > 73*GeV).size() < 2) vetoEvent; //< most inclusive lead jets requirement
+
+      // Filter jets into different Et requirements & compute corresponding HTs
+      /// @note It's not clear if different HTs are used to choose the HT bins
+      const Jets jets37 = filter_select(alljets, Cuts::Et > 37*GeV);
+      const Jets jets43 = filter_select(jets37, Cuts::Et > 43*GeV);
+      const Jets jets50 = filter_select(jets43, Cuts::Et > 50*GeV);
+      const double ht37 = sum(jets37, Et, 0.0);
+      const double ht43 = sum(jets43, Et, 0.0);
+      const double ht50 = sum(jets50, Et, 0.0);
+
+      // Find the relevant HT bin and apply leading jet event-selection cuts
+      static const vector<double> htcuts = { /* 275., 325., */ 375., 475., 575., 675., 775., 875.}; //< comment to avoid jets50 "fall-down"
+      const int iht = inRange(ht37, 275*GeV, 325*GeV) ? 0 : inRange(ht43, 325*GeV, 375*GeV) ? 1 : (2+binIndex(ht50, htcuts, true));
+      MSG_TRACE("HT = {" << ht37 << ", " << ht43 << ", " << ht50 << "} => IHT = " << iht);
+      if (iht < 0) vetoEvent;
+      if (iht == 1 && filter_select(jets43, Cuts::Et > 78*GeV).size() < 2) vetoEvent;
+      if (iht >= 2 && filter_select(jets50, Cuts::Et > 100*GeV).size() < 2) vetoEvent;
+
+      // Create references for uniform access to relevant set of jets & HT
+      const double etcut = iht == 0 ? 37. : iht == 1 ? 43. : 50.;
+      const double& ht = iht == 0 ? ht37 : iht == 1 ? ht43 : ht50;
+      const Jets& jets = iht == 0 ? jets37 : iht == 1 ? jets43 : jets50;
+      if (!jetproj.jets(Cuts::abseta > 3 && Cuts::Et > etcut*GeV).empty()) vetoEvent;
+      const size_t nj = jets.size();
+      const size_t nb = count_if(jets.begin(), jets.end(), [](const Jet& j) { return j.bTagged(Cuts::pT > 5*GeV); });
+
+      // Compute HTmiss = pT of 4-vector sum of jet momenta
+      const FourMomentum jsum = sum(jets, mom, FourMomentum());
+      const double htmiss = jsum.pT();
+
+      // Require HTmiss / ETmiss < 1.25
+      const double etmiss = apply<SmearedMET>(event, "MET").met();
+      if (htmiss/etmiss > 1.25) vetoEvent;
+
+      // Compute DeltaHT = minimum difference of "dijet" ETs, i.e. max(|1+2-3|, |1+3-2|, |2+3-1|)
+      double deltaht = -1;
+      vector<double> jetets; transform(jets, jetets, Et);
+      for (int i = 1; i < (1 << (jetets.size()-1)); ++i) { // count from 1 to 2**N-1, i.e. through all heterogeneous bitmasks with MSB(2**N)==0
+        const bitset<10> bits(i); /// @warning There'd better not be more than 10 jets...
+        const double htdiff = partition_diff(bits, jetets);
+        // MSG_INFO(bits.to_string() << " => " << htdiff);
+        if (deltaht < 0 || htdiff < deltaht) deltaht = htdiff;
+      }
+      MSG_DEBUG("dHT_bitmask = " << deltaht);
+
+      // Cross-check calculation in 2- and 3-jet cases
+      // if (jets.size() == 2) {
+      //   MSG_INFO("dHT2 = " << fabs(jets[0].Et() - jets[1].Et()));
+      // } else if (jets.size() == 3) {
+      //   double deltaht_01_2 = fabs(jets[0].Et()+jets[1].Et()-jets[2].Et());
+      //   double deltaht_02_1 = fabs(jets[0].Et()+jets[2].Et()-jets[1].Et());
+      //   double deltaht_12_0 = fabs(jets[1].Et()+jets[2].Et()-jets[0].Et());
+      //   MSG_INFO("dHT3 = " << min({deltaht_01_2, deltaht_02_1, deltaht_12_0}));
+      // }
+
+      // Compute alphaT from the above
+      double alphaT = fabs(0.5*((ht-deltaht)/(sqrt((ht*ht)-(htmiss*htmiss)))));
+      if (alphaT < 0.55) vetoEvent;
+
+      /// @todo Need to include trigger efficiency sampling or weighting?
+
+      // Fill histograms
+      const double weight = event.weight();
+      const size_t inj = nj < 4 ? 0 : 1;
+      const size_t inb = nb < 4 ? nb : 4;
+      if (iht >= 2)
+        (inj == 0 ? _h_alphaT23 : _h_alphaT4)->fill(alphaT, weight);
+
+      // Fill the appropriate counter -- after working out the irregular SR bin index! *sigh*
+      size_t i = 8 * ((inj == 0 ? 0 : 3) + inb) + iht;
+      if (inj == 1 && inb == 4) i = 8*7 + (iht < 3 ? iht : 2);
+      MSG_INFO("inj = " << inj << ", inb = " << inb << ", i = " << i);
+      _h_srcounters[i]->fill(weight);
+
+    }
+
+
+    /// Normalise histograms etc., after the run
+    void finalize() {
+
+      const double sf = crossSection()/femtobarn*11.7/sumOfWeights();
+      scale({_h_alphaT23,_h_alphaT4}, sf);
+      for (size_t i = 0; i < 8*7+3; ++i)
+        scale(_h_srcounters[i], sf);
+
+    }
+
+    //@}
+
+
+    /// @name Utility functions for partitioning jet pTs into two groups and summing/diffing them
+    //@{
+
+    /// Sum the given values into two subsets according to the provided bitmask
+    template <size_t N>
+    pair<double, double> partition_sum(const bitset<N>& mask, const vector<double>& vals) const {
+      pair<double, double> rtn(0., 0.);
+      for (size_t i = 0; i < vals.size(); ++i) {
+        (!mask[vals.size()-1-i] ? rtn.first : rtn.second) += vals[i];
+      }
+      return rtn;
+    }
+
+    /// Return the difference between summed subsets according to the provided bitmask
+    template <size_t N>
+    double partition_diff(const bitset<N>& mask, const vector<double>& vals) const {
+      const pair<double, double> sums = partition_sum(mask, vals);
+      const double diff = fabs(sums.first - sums.second);
+      MSG_TRACE(mask.to_string() << ": " << sums.first << "/" << sums.second << " => " << diff);
+      return diff;
+    }
+
+    //@}
+
+
+    /// @name Histograms
+    //@{
+    Histo1DPtr _h_alphaT23, _h_alphaT4;
+    vector<CounterPtr> _h_srcounters;
+    //@}
+
+
+  };
+
+
+  // The hook for the plugin system
+  DECLARE_RIVET_PLUGIN(CMS_2013_I1223519);
+
+
+}
diff --git a/src/Analyses/CMS_2016_PAS_SUS_16_14.cc b/src/Analyses/CMS_2016_PAS_SUS_16_14.cc
new file mode 100644
--- /dev/null
+++ b/src/Analyses/CMS_2016_PAS_SUS_16_14.cc
@@ -0,0 +1,201 @@
+// -*- C++ -*-
+#include "Rivet/Analysis.hh"
+#include "Rivet/Projections/FinalState.hh"
+#include "Rivet/Projections/PromptFinalState.hh"
+#include "Rivet/Projections/FastJets.hh"
+#include "Rivet/Projections/Sphericity.hh"
+#include "Rivet/Projections/SmearedParticles.hh"
+#include "Rivet/Projections/SmearedJets.hh"
+#include "Rivet/Projections/SmearedMET.hh"
+#include "Rivet/Tools/Cutflow.hh"
+
+namespace Rivet {
+
+
+  /// @brief CMS 2016 0-lepton SUSY search, from 13/fb PAS note
+  class CMS_2016_PAS_SUS_16_14 : public Analysis {
+  public:
+
+    /// Constructor
+    DEFAULT_RIVET_ANALYSIS_CTOR(CMS_2016_PAS_SUS_16_14);
+
+
+    /// @name Analysis methods
+    //@{
+
+    /// Book histograms and initialise projections before the run
+    void init() {
+
+      // Initialise and register projections
+      FinalState calofs(Cuts::abseta < 5.0);
+      FastJets fj(calofs, FastJets::ANTIKT, 0.4);
+      declare(fj, "TruthJets");
+      declare(SmearedJets(fj, JET_SMEAR_CMS_RUN2, [](const Jet& j) {
+            if (j.abseta() > 2.5) return 0.;
+            return j.bTagged() ? 0.55 : j.cTagged() ? 0.12 : 0.016; }), "Jets");
+
+      FinalState es(Cuts::abspid == PID::ELECTRON && Cuts::abseta < 2.5);
+      declare(es, "TruthElectrons");
+      declare(SmearedParticles(es, ELECTRON_EFF_CMS_RUN2, ELECTRON_SMEAR_CMS_RUN2), "Electrons");
+
+      FinalState mus(Cuts::abspid == PID::MUON && Cuts::abseta < 2.4);
+      declare(mus, "TruthMuons");
+      declare(SmearedParticles(mus, MUON_EFF_CMS_RUN2, MUON_SMEAR_CMS_RUN2), "Muons");
+
+      FinalState isofs(Cuts::abseta < 3.0 && Cuts::abspid != PID::ELECTRON && Cuts::abspid != PID::MUON);
+      declare(isofs, "IsoFS");
+      FinalState cfs(Cuts::abseta < 2.5 && Cuts::abscharge != 0);
+      declare(cfs, "TruthTracks");
+      declare(SmearedParticles(cfs, TRK_EFF_CMS_RUN2), "Tracks");
+
+      // Book histograms/counters
+      _h_srcounts.resize(160);
+      for (size_t ij = 0; ij < 4; ++ij) {
+        for (size_t ib = 0; ib < 4; ++ib) {
+          for (size_t ih = 0; ih < 10; ++ih) {
+            const size_t i = 40*ij + 10*ib + ih;
+            _h_srcounts[i] = bookCounter(toString(2*ij+3) + "j-" + toString(ib) + "b-" + toString(ih));
+          }
+        }
+      }
+
+    }
+
+
+    /// Perform the per-event analysis
+    void analyze(const Event& event) {
+
+      // Get jets and require Nj >= 3
+      const Jets jets24 = apply<JetAlg>(event, "Jets").jetsByPt(Cuts::pT > 30*GeV && Cuts::abseta < 2.4);
+      if (jets24.size() < 3) vetoEvent;
+
+      // HT cut
+      vector<double> jetpts24; transform(jets24, jetpts24, pT);
+      const double ht = sum(jetpts24, 0.0);
+      if (ht < 300*GeV) vetoEvent;
+
+      // HTmiss cut
+      const Jets jets50 = apply<JetAlg>(event, "Jets").jetsByPt(Cuts::pT > 30*GeV && Cuts::abseta < 5.0);
+      const FourMomentum htmissvec = -sum(jets24, mom, FourMomentum());
+      const double htmiss = htmissvec.pT();
+      if (htmissvec.pT() < 300*GeV) vetoEvent;
+
+
+      // Get baseline electrons & muons
+      Particles elecs = apply<ParticleFinder>(event, "Electrons").particles(Cuts::pT > 10*GeV);
+      Particles muons = apply<ParticleFinder>(event, "Muons").particles(Cuts::pT > 10*GeV);
+
+      // Electron/muon isolation
+      const Particles calofs = apply<ParticleFinder>(event, "IsoFS").particles();
+      ifilter_discard(elecs, [&](const Particle& e) {
+          const double R = max(0.05, min(0.2, 10*GeV/e.pT()));
+          double ptsum = -e.pT();
+          for (const Particle& p : calofs)
+            if (deltaR(p,e) < R) ptsum += p.pT();
+          return ptsum / e.pT() > 0.1;
+        });
+      ifilter_discard(muons, [&](const Particle& m) {
+          const double R = max(0.05, min(0.2, 10*GeV/m.pT()));
+          double ptsum = -m.pT();
+          for (const Particle& p : calofs)
+            if (deltaR(p,m) < R) ptsum += p.pT();
+          return ptsum / m.pT() > 0.2;
+        });
+
+      // Veto the event if there are any remaining baseline leptons
+      if (!elecs.empty()) vetoEvent;
+      if (!muons.empty()) vetoEvent;
+
+
+      // Get isolated tracks
+      Particles trks25 = apply<ParticleFinder>(event, "Tracks").particles();
+      ifilter_discard(trks25, [&](const Particle& t) {
+          double ptsum = -t.pT();
+          for (const Particle& p : trks25)
+            if (deltaR(p,t) < 0.3) ptsum += p.pT();
+          return ptsum/t.pT() > ((t.abspid() == PID::ELECTRON || t.abspid() == PID::MUON) ? 0.2 : 0.1);
+        });
+      const Particles trks = filter_select(trks25, Cuts::abseta < 2.4);
+
+      // Isolated track pT, pTmiss and mT cut
+      // mT^2 = m1^2 + m2^2 + 2(ET1 ET2 - pT1 . pT2))
+      // => mT0^2 = 2(ET1 |pT2| - pT1 . pT2)) for m1, m2 -> 0
+      FourMomentum ptmissvec = htmissvec; ///< @todo Can we do better? No e,mu left...
+      const double ptmiss = ptmissvec.pT();
+      for (const Particle& t : trks) {
+        const double ptcut = (t.abspid() == PID::ELECTRON || t.abspid() == PID::MUON) ? 5*GeV : 10*GeV;
+        const double mT = sqrt( t.mass2() + 2*(t.Et()*ptmiss - t.pT()*ptmiss*cos(deltaPhi(t,ptmissvec))) );
+        if (mT < 100*GeV && t.pT() < ptcut) vetoEvent;
+      }
+
+      // Lead jets isolation from Htmiss
+      if (deltaPhi(htmissvec, jets24[0]) < 0.5) vetoEvent;
+      if (deltaPhi(htmissvec, jets24[1]) < 0.5) vetoEvent;
+      if (deltaPhi(htmissvec, jets24[2]) < 0.3) vetoEvent;
+      if (jets24.size() >= 4 && deltaPhi(htmissvec, jets24[3]) < 0.3) vetoEvent;
+
+
+      ////////
+
+
+      // Calculate a bin index for this event
+      // Nj bin
+      static const vector<double> njedges = {3., 5., 7., 9.};
+      const size_t nj = jets24.size();
+      // Nbj bin
+      static const vector<double> njbedges = {0., 1., 2., 3.};
+      const size_t inj = binIndex(nj, njedges, true);
+      size_t nbj = 0;
+      for (const Jet& j : jets24)
+        if (j.bTagged()) nbj += 1;
+      const size_t inbj = binIndex(nbj, njbedges, true);
+      // HTmiss vs HT 2D bin
+      int iht = 0;
+      if (htmiss < 350*GeV) {
+        iht = ht < 500 ? 1 : ht < 1000 ? 2 : 3;
+      } if (htmiss < 500*GeV && ht > 350*GeV) {
+        iht = ht < 500 ? 4 : ht < 1000 ? 5 : 6;
+      } if (htmiss < 750*GeV && ht > 500*GeV) {
+        iht = ht < 1000 ? 7 : 8;
+      } if (ht > 750*GeV) {
+        iht = ht < 1500 ? 9 : 10;
+      }
+      if (iht == 0) vetoEvent;
+      iht -= 1; //< change from the paper's indexing scheme to C++ zero-indexed
+      // Total bin number
+      const size_t ibin = 40*inj + 10*inbj + (size_t)iht;
+
+      // Fill SR counter
+      _h_srcounts[ibin]->fill(event.weight());
+
+    }
+
+
+    /// Normalise counters after the run
+    void finalize() {
+
+      const double sf = 12.9*crossSection()/femtobarn/sumOfWeights();
+      scale(_h_srcounts, sf);
+
+    }
+
+    //@}
+
+
+  private:
+
+    /// @name Histograms
+    //@{
+    vector<CounterPtr> _h_srcounts;
+    //@}
+
+
+  };
+
+
+
+  // The hook for the plugin system
+  DECLARE_RIVET_PLUGIN(CMS_2016_PAS_SUS_16_14);
+
+
+}
diff --git a/src/Analyses/cat_with_lines b/src/Analyses/cat_with_lines
new file mode 100755
--- /dev/null
+++ b/src/Analyses/cat_with_lines
@@ -0,0 +1,6 @@
+#! /bin/bash
+for i in "$@"
+do
+	echo "#line 1 \"$i\""
+	cat "$i"
+done
diff --git a/src/Core/AnalysisInfo.cc b/src/Core/AnalysisInfo.cc
--- a/src/Core/AnalysisInfo.cc
+++ b/src/Core/AnalysisInfo.cc
@@ -1,246 +1,248 @@
 #include "Rivet/Config/RivetCommon.hh"
 #include "Rivet/AnalysisInfo.hh"
 #include "Rivet/Tools/RivetPaths.hh"
 #include "Rivet/Tools/Utils.hh"
 #include "Rivet/Tools/Logging.hh"
 #include "yaml-cpp/yaml.h"
 #include <iostream>
 #include <fstream>
 #include <unistd.h>
 
 #ifdef YAML_NAMESPACE
 #define YAML YAML_NAMESPACE
 #endif
 
 namespace Rivet {
 
 
   namespace {
     Log& getLog() {
       return Log::getLog("Rivet.AnalysisInfo");
     }
   }
 
 
   /// Static factory method
   unique_ptr<AnalysisInfo> AnalysisInfo::make(const std::string& ananame) {
     // Returned AI, in semi-null state
     unique_ptr<AnalysisInfo> ai( new AnalysisInfo );
     ai->_beams += make_pair(PID::ANY, PID::ANY);
     ai->_name = ananame;
 
     /// If no ana data file found, return null AI
     const string datapath = findAnalysisInfoFile(ananame + ".info");
     if (datapath.empty()) {
       MSG_DEBUG("No datafile " << ananame + ".info found");
       return ai;
     }
 
     // Read data from YAML document
     MSG_DEBUG("Reading analysis data from " << datapath);
     YAML::Node doc;
     try {
       #if YAMLCPP_API == 3
       std::ifstream file(datapath.c_str());
       YAML::Parser parser(file);
       parser.GetNextDocument(doc);
       #elif YAMLCPP_API == 5
       doc = YAML::LoadFile(datapath);
       #endif
     } catch (const YAML::ParserException& ex) {
       MSG_ERROR("Parse error when reading analysis data from " << datapath << " (" << ex.what() << ")");
       return ai;
     }
 
     #define THROW_INFOERR(KEY) throw InfoError("Problem in info parsing while accessing key " + string(KEY) + " in file " + datapath)
 
     // Simple scalars (test for nullness before casting)
     #if YAMLCPP_API == 3
     #define TRY_GETINFO(KEY, VAR) try { if (doc.FindValue(KEY)) { string val; doc[KEY] >> val; ai->_ ## VAR = val; } } catch (...) { THROW_INFOERR(KEY); }
     #elif YAMLCPP_API == 5
     #define TRY_GETINFO(KEY, VAR) try { if (doc[KEY] && !doc[KEY].IsNull()) ai->_ ## VAR = doc[KEY].as<string>(); } catch (...) { THROW_INFOERR(KEY); }
     #endif
     TRY_GETINFO("Name", name);
     TRY_GETINFO("Summary", summary);
     TRY_GETINFO("Status", status);
     TRY_GETINFO("RunInfo", runInfo);
     TRY_GETINFO("Description", description);
     TRY_GETINFO("Experiment", experiment);
     TRY_GETINFO("Collider", collider);
     TRY_GETINFO("Year", year);
+    TRY_GETINFO("Luminosity_fb", luminosityfb);
     TRY_GETINFO("SpiresID", spiresId);
     TRY_GETINFO("InspireID", inspireId);
     TRY_GETINFO("BibKey", bibKey);
     TRY_GETINFO("BibTeX", bibTeX);
     #undef TRY_GETINFO
 
     // Sequences (test the seq *and* each entry for nullness before casting)
     #if YAMLCPP_API == 3
     #define TRY_GETINFO_SEQ(KEY, VAR) try { \
         if (const YAML::Node* VAR = doc.FindValue(KEY)) {               \
           for (size_t i = 0; i < VAR->size(); ++i) {                    \
             string val; (*VAR)[i] >> val; ai->_ ## VAR += val;          \
           } } } catch (...) { THROW_INFOERR(KEY); }
     #elif YAMLCPP_API == 5
     #define TRY_GETINFO_SEQ(KEY, VAR) try { \
         if (doc[KEY] && !doc[KEY].IsNull()) {                           \
           const YAML::Node& VAR = doc[KEY];                             \
           for (size_t i = 0; i < VAR.size(); ++i)                       \
             if (!VAR[i].IsNull()) ai->_ ## VAR += VAR[i].as<string>();  \
         } } catch (...) { THROW_INFOERR(KEY); }
     #endif
     TRY_GETINFO_SEQ("Authors", authors);
     TRY_GETINFO_SEQ("References", references);
     TRY_GETINFO_SEQ("ToDo", todos);
+    TRY_GETINFO_SEQ("Keywords", keywords);
     #undef TRY_GETINFO_SEQ
 
 
     // A boolean with some name flexibility
     try {
       #if YAMLCPP_API == 3
       bool val;
       if (const YAML::Node* n = doc.FindValue("NeedsCrossSection")) { *n >> val; ai->_needsCrossSection = val; }
       if (const YAML::Node* n = doc.FindValue("NeedCrossSection")) { *n >> val; ai->_needsCrossSection = val; }
       #elif YAMLCPP_API == 5
       if (doc["NeedsCrossSection"]) ai->_needsCrossSection = doc["NeedsCrossSection"].as<bool>();
       else if (doc["NeedCrossSection"]) ai->_needsCrossSection = doc["NeedCrossSection"].as<bool>();
       #endif
     } catch (...) {
       THROW_INFOERR("NeedsCrossSection|NeedCrossSection");
     }
 
 
     // Beam particle identities
     try {
       #if YAMLCPP_API == 3
 
       if (const YAML::Node* pbeampairs = doc.FindValue("Beams")) {
         const YAML::Node& beampairs = *pbeampairs;
         vector<PdgIdPair> beam_pairs;
         if (beampairs.size() == 2 &&
             beampairs[0].Type() == YAML::NodeType::Scalar &&
             beampairs[1].Type() == YAML::NodeType::Scalar) {
           string bstr0, bstr1;
           beampairs[0] >> bstr0;
           beampairs[1] >> bstr1;
           beam_pairs += PID::make_pdgid_pair(bstr0, bstr1);
         } else {
           for (YAML::Iterator bpi = beampairs.begin(); bpi != beampairs.end(); ++bpi) {
             const YAML::Node& bp = *bpi;
             if (bp.size() == 2 &&
                 bp[0].Type() == YAML::NodeType::Scalar &&
                 bp[1].Type() == YAML::NodeType::Scalar) {
               string bstr0, bstr1;
               bp[0] >> bstr0;
               bp[1] >> bstr1;
               beam_pairs += PID::make_pdgid_pair(bstr0, bstr1);
             } else {
               throw InfoError("Beam ID pairs have to be either a 2-tuple or a list of 2-tuples of particle names");
             }
           }
         }
         ai->_beams = beam_pairs;
       }
 
       #elif YAMLCPP_API == 5
 
       if (doc["Beams"]) {
         const YAML::Node& beams = doc["Beams"];
         vector<PdgIdPair> beam_pairs;
         if (beams.size() == 2 && beams[0].IsScalar() && beams[0].IsScalar()) {
           beam_pairs += PID::make_pdgid_pair(beams[0].as<string>(), beams[1].as<string>());
         } else {
           for (size_t i = 0; i < beams.size(); ++i) {
             const YAML::Node& bp = beams[i];
             if (bp.size() != 2 || !bp[0].IsScalar() || !bp[0].IsScalar())
               throw InfoError("Beam ID pairs have to be either a 2-tuple or a list of 2-tuples of particle names");
             beam_pairs += PID::make_pdgid_pair(bp[0].as<string>(), bp[1].as<string>());
           }
         }
         ai->_beams = beam_pairs;
       }
 
       #endif
     } catch (...) { THROW_INFOERR("Beams"); }
 
 
     // Beam energies
     try {
       #if YAMLCPP_API == 3
 
       if (const YAML::Node* penergies = doc.FindValue("Energies")) {
         const YAML::Node& energies = *penergies;
         vector<pair<double,double> > beam_energy_pairs;
         for (YAML::Iterator be = energies.begin(); be != energies.end(); ++be) {
           if (be->Type() == YAML::NodeType::Scalar) {
             // If beam energy is a scalar, then assume symmetric beams each with half that energy
             double sqrts;
             *be >> sqrts;
             beam_energy_pairs += make_pair(sqrts/2.0, sqrts/2.0);
           } else if (be->Type() == YAML::NodeType::Sequence) {
             const YAML::Node& beseq = *be;
             // If the sub-sequence is of length 1, then it's another scalar sqrt(s)!
             if (beseq.size() == 1) {
               double sqrts;
               (*be)[0] >> sqrts;
               beam_energy_pairs += make_pair(sqrts/2.0, sqrts/2.0);
             } else if (beseq.size() == 2) {
               vector<double> beamenergies;
               double beamenergy0, beamenergy1;
               beseq[0] >> beamenergy0;
               beseq[1] >> beamenergy1;
               beam_energy_pairs += make_pair(beamenergy0, beamenergy1);
             } else {
               throw InfoError("Beam energies have to be a list of either numbers or pairs of numbers");
             }
           } else {
             throw InfoError("Beam energies have to be a list of either numbers or pairs of numbers");
           }
         }
         ai->_energies = beam_energy_pairs;
       }
 
       #elif YAMLCPP_API == 5
 
       if (doc["Energies"]) {
         vector< pair<double,double> > beam_energy_pairs;
         for (size_t i = 0; i < doc["Energies"].size(); ++i) {
           const YAML::Node& be = doc["Energies"][i];
           if (be.IsScalar()) {
             // If beam energy is a scalar, then assume symmetric beams each with half that energy
             beam_energy_pairs += make_pair(be.as<double>()/2.0, be.as<double>()/2.0);
           } else if (be.IsSequence()) {
             if (be.size() != 2)
               throw InfoError("Beam energies have to be a list of either numbers or pairs of numbers");
             beam_energy_pairs += make_pair(be[0].as<double>(), be[1].as<double>());
           } else {
             throw InfoError("Beam energies have to be a list of either numbers or pairs of numbers");
           }
         }
         ai->_energies = beam_energy_pairs;
       }
 
       #endif
 
     } catch (...) { THROW_INFOERR("Energies"); }
 
     #undef THROW_INFOERR
 
 
     MSG_TRACE("AnalysisInfo pointer = " << ai.get());
     return ai;
   }
 
 
   string toString(const AnalysisInfo& ai) {
     stringstream ss;
     ss << ai.name();
     ss << " - " << ai.summary();
     // ss << " - " << ai.beams();
     // ss << " - " << ai.energies();
     ss << " (" << ai.status() << ")";
     return ss.str();
   }
 
 
 }
diff --git a/src/Projections/MissingMomentum.cc b/src/Projections/MissingMomentum.cc
--- a/src/Projections/MissingMomentum.cc
+++ b/src/Projections/MissingMomentum.cc
@@ -1,43 +1,46 @@
 // -*- C++ -*-
 #include "Rivet/Projections/MissingMomentum.hh"
 
 namespace Rivet {
 
 
   int MissingMomentum::compare(const Projection& p) const {
     return mkNamedPCmp(p, "VisibleFS");
   }
 
 
   void MissingMomentum::clear() {
     _momentum = FourMomentum();
     _set = 0.0;
     _vet = Vector3();
   }
 
 
   void MissingMomentum::project(const Event& e) {
     clear();
 
     // Project into final state
     const FinalState& vfs = applyProjection<FinalState>(e, "VisibleFS");
     for (const Particle& p : vfs.particles()) {
       const FourMomentum& mom = p.momentum();
+      const Vector3 ptunit = mom.vector3().setZ(0.0).unit();
       _momentum += mom;
       _set += mom.Et();
-      _vet += mom.Et() * mom.vector3().setZ(0.0).unit();
+      _spt += mom.pT();
+      _vet += mom.Et() * ptunit;
+      _vpt += mom.pT() * ptunit;
     }
   }
 
 
   const FourMomentum MissingMomentum::visibleMomentum(double mass) const {
     /// @todo Couldn't we just reset the internal _momentum's mass and return by value? Would require mutable, though
     FourMomentum p4 = _momentum;
     const double pmod2 = p4.p3().mod2();
     const double new_energy = sqrt(pmod2 + sqr(mass));
     p4.setE(new_energy);
     return p4;
   }
 
 
 }