diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -1,67 +1,65 @@ ade23f139a057b3c8788e04fccf7401df16a3ef7 yoda-1.0.0 31767f4a3df9a4dc357e912e4d451e020d9b6739 yoda-1.0.1 5e4b6b0d4905ba6f3924df8a72ea7625d950429c yoda-1.0.2 44d94c99d32153230bf4ee378e85b9cf857dce4d yoda-1.0.3 9b66273834e0438491fe77b95d30dd9af1491b79 yoda-1.0.4 6bdfde6d1657b181cb0e2c5402506ce00c191b95 yoda-1.0.5 71e482a0279707c36ed1514078485384ab64cf8f yoda-1.0.6 f5eefcbf9d10ab41d371ac1f19478d0f62880a19 yoda-1.0.7 9d49f2ba2242fcb03bedeafc36c7de93a1855914 yoda-1.1.0 ee5bcdd0b0dbab82ecefac99b9f2504d0d9daba1 yoda-1.2.0 5e46ae065571a1b3ee277fd2a1ee7043cdc08737 yoda-1.2.1 44eaa92656e675e9735b1dcc2bb5e00cb9c8b5a7 yoda-1.3.0 292dc62f86dca4ff90a74abc9ec1c3838cf0c9f8 yoda-1.3.1 292dc62f86dca4ff90a74abc9ec1c3838cf0c9f8 yoda-1.3.1 0000000000000000000000000000000000000000 yoda-1.3.1 0000000000000000000000000000000000000000 yoda-1.3.1 c26de2762ed22a840a41ac25d36550307d48829a yoda-1.3.1 9bdaa370b066244a1174b9a4d0ae272be8a92a4a yoda-1.4.0 95f1e7f4bfb4e283c5513077cc73803754298d40 yoda-1.5.0 d9a2076dcaffb55fad556937f744f2fe9468e365 yoda-1.5.1 0000000000000000000000000000000000000000 yoda-1.5.1 ede1e10afe3b1292f1a923c152ac87d43dc02a5a yoda-1.5.1 508477ceb62a09a501defbb4cf87f03e1032e0b1 yoda-1.5.2 e3f1d286e64a2af763764d54005230d26758e280 yoda-1.5.3 1c3ac4d17fb5519dc583a54195826c3de9f6560d yoda-1.5.4 35c26aa19eb269eec5d8e2d20594e4d3c273cb97 yoda-1.5.5 35c26aa19eb269eec5d8e2d20594e4d3c273cb97 yoda-1.5.5 0000000000000000000000000000000000000000 yoda-1.5.5 0000000000000000000000000000000000000000 yoda-1.5.5 5590c32abfae9f9f3f46acf4e79420ebf0a38bd6 yoda-1.5.5 2ec6efa3c7b74c112c589d438eaf818479d74d93 yoda-1.5.6 2a5c30e6fd751cdce59ac535dd54b4b499e92801 yoda-1.5.7 7dfc1023d37c85d9fe55d91a1a082dc3f784f030 yoda-1.5.9 f4f3465b93e36b574e98920114ee1293ea967ff1 yoda-1.5.8 e6d259d62a233df2a71ec38995069b7cbd41a10f yoda-1.6.0beta1 e6d259d62a233df2a71ec38995069b7cbd41a10f yoda-1.6.0beta1 0000000000000000000000000000000000000000 yoda-1.6.0beta1 0000000000000000000000000000000000000000 yoda-1.6.0beta1 a1fbfd42fa562688b9843ab9a26f34b81bbe93e2 yoda-1.6.0beta1 a1fbfd42fa562688b9843ab9a26f34b81bbe93e2 yoda-1.6.0beta1 0000000000000000000000000000000000000000 yoda-1.6.0beta1 a57d544b72714e45c3a7bcf408479473ec2481a7 yoda-1.6.0 f27b8f2bddc522aaf30c06bb643207b67422fa9b yoda-1.6.1 0000000000000000000000000000000000000000 yoda-1.6.1 3d956bb73656d46a3118a5a5d03d25420c04f538 yoda-1.6.1 2a32b9e0b9a9f962bd9ebb2ac7e611981da14ce4 yoda-1.6.2 6e222b9c2422baef39a531e2461213a51c898661 yoda-1.6.3 1edc280f270b7cf812bf7bc6d56abbf93eb15987 yoda-1.6.4 6001b6383d178b5db17b767a79d960b0474192d5 yoda-1.6.5 d233673f96a94803dfd45e8a4e238a593b56ef7b yoda-1.6.6 45d0b8d20b231025d1a7671f4ca0ffa964a7b661 yoda-1.6.7 2c76de0259854c113e77d76c4b24a719a66e0969 yoda-1.7.0 2c76de0259854c113e77d76c4b24a719a66e0969 yoda-1.7.0 0000000000000000000000000000000000000000 yoda-1.7.0 0000000000000000000000000000000000000000 yoda-1.7.0 7ac806a002e8abe3c894c8ef8ab72699e66ccdfe yoda-1.7.0 9e179349e4d32a349560c09a0488ddd24328ee67 yoda-1.7.1 224dc64b526de1cf9c495a6fd9c2909abc2e450b yoda-1.7.2 224dc64b526de1cf9c495a6fd9c2909abc2e450b yoda-1.7.2 0000000000000000000000000000000000000000 yoda-1.7.2 0000000000000000000000000000000000000000 yoda-1.7.2 eb6a2a575f47478c5c7c3ffd47b2a96c740b147d yoda-1.7.2 c937a66ce9c92007b36349d05065a8c9930b74d1 yoda-1.7.3 c88e4ebc1317334c46845db83c6c357a376634d4 yoda-1.7.4 9ee5f2cadeb034e09c6dddc35baa1b7a3757778f yoda-1.7.5 -376626bf2c079c4b303603310dc80c4190efcfd2 yoda-1.7.6 -4019ca1879b672f71ca8f5247877a2321773820b yoda-1.7.7 diff --git a/ChangeLog b/ChangeLog --- a/ChangeLog +++ b/ChangeLog @@ -1,1987 +1,1950 @@ -2019-11-01 Andy Buckley - - * Add extra optional bool arguments to the mkScatter converter - functions for Histo1D and Histo2D types. - - * Force rebuild of the Python interface if a sufficient Cython is - found, even if the generated .cpp file is present at - configure-time. - -2019-09-05 Andy Buckley - - * Final conversion to use ast module not just in autotype but as first choice in AO.annotation. - -2019-08-20 Andy Buckley - - * Remove ALL uses of @property in the Python interfaces: all methods must have call-parentheses now. - -2019-07-30 Andy Buckley - - * Improve Python annotation parsing to preferentially use ast rather than yaml. - -2019-06-21 Andy Buckley - - * Add x/yErrs and x/yErrAvgs functions to Scatter2D Python. - -2019-06-018 Louie Corpe - - * Release 1.7.7 - - * Fix printing of YAML annotations. - - * Fix Python 3 / Unicode compatibility for Point.pyx. - -2019-06-07 Andy Buckley - - * Release 1.7.6 - 2019-06-06 Andy Buckley * Add xEdges and yEdges methods to the 2D Histo and Profile classes, in C++ and Python. - * Remove @property attributes from Python xEdges methods. More of that to come in version 1.8.0. + * Remove @property attributes from Python xEdges methods. More to come... 2019-05-26 Andy Buckley * Fix Python3/argparse attribute access bug in yodamerge. 2019-05-09 Andy Buckley * Release 1.7.5 2019-05-06 Andy Buckley * Convert bin/* scripts to use Python3-compatible argparse rather than old optparse. 2019-05-02 Christian Gutschow * Fix Histo1D::integralRange to include the second-argument bin content as described in the docstring. 2019-04-29 Andy Buckley * Fix Python3 StringIO import compatibility -- thanks to Tom Neep for the patch! 2019-03-05 Andy Buckley * Add -m/-M filtering to yodadiff. 2019-02-27 Andy Buckley * Fix HistoBin2D printing in Python interface. 2019-02-17 Jon Butterworth * Protect yodamerge against zero-valued (?!) ScaledBy attributes. 2018-12-10 Andy Buckley * Release 1.7.4 2018-11-16 Christian Gutschow * Roll out sameBinning methods for Profile1D and Profile2D. 2018-11-08 Christian Gutschow * Add sameBinning methods for Histo1D and Histo2D. 2018-11-08 Andy Buckley * Remove debug couts from single-AO Writer::write() function. * Add --ignore-missing and --ignore-new flags to yodadiff. * Tidy up point-comparison output of yodadiff. 2018-10-13 Christian Gutschow * Fix additional weighting in yodamerge 2018-09-24 Andy Buckley * Release 1.7.3 2018-09-12 Louie Corpe * Fix bug in multi-err point reader which was not parsing Variations correctly, and a minor typo where the err- was duplicated for the additional columns. 2018-09-06 Andy Buckley * Fix missing y and z components of Point2D and Point3D equality testing. 2018-09-04 Andy Buckley * Add array accessors of x,y,zMins/Mids/Maxs/Vals/Errs, etc. to Python Histo2D and Profile2D. 2018-08-30 Andy Buckley * Update embedded yaml-cpp to v0.6.0. * Convert xyzMin/Max properties to functions in the Python interfaces. * Return numpy arrays if possible in Python interface array methods. 2018-08-23 Andy Buckley * Release 1.7.2 * Add missing bin(i) method to Python Profile1D interface. 2018-08-22 Andy Buckley * Avoid const-overloading clash for binAt functions. 2018-08-14 Chris Gutschow * Merge Graeme Watt's modifications to yodadiff. 2018-08-14 Andy Buckley * Release 1.7.1 2018-08-08 Andy Buckley * yodadiff now supports quiet (-q) and list (-l) modes. 2018-06-01 Louie Corpe * Point1,2,3D now support multiple error sources for the highest dimension. * Scatter1,2,3D now read in/out with additional columns for the extra error sources, with a variations() method to check with sources are available 2018-05-02 Andy Buckley * StringUtils.h: Replace std::ptr_fun (removed in C++17) with a lambda function. Thanks to Stefan Richter. 2018-04-30 Andy Buckley * Add "-" = stdin/stdout recognition to C++ IO functions. * Convert remaining Python read/write methods to use the C++ IO functions. * Convert Python read/write methods with filename arguments to use the gzip-aware IO functions. 2018-04-27 Andy Buckley * Fix missing lock=false flag initialisation in Histo2D constructor from a Bins vector. * Improve axis locking error messages. 2017-12-21 Andy Buckley * Version 1.7.0 release 2017-12-11 Andy Buckley * Add Nentries printout to yodals -v 2017-09-18 Andy Buckley * Patches for ROOT conversion from Robert Hatcher -- thanks! 2017-09-16 Andy Buckley * Add YODA format version annotation, at version 2, and update YODA reader to use version info and multiline YAML EOF marker. * Write YODA annotations in YAML with a --- YAML break-line. 2017-09-14 Andy Buckley * Enable compressed writing from Python. 2017-09-13 Andy Buckley * Remove UNUSED macro in favour of anonymous args. * Enable zipped writing... but only works from C++ so far. 2017-09-11 Andy Buckley * Add HistoBin2D::area(), and provide a default normto=1.0 argument on the Python Histo2D.normalize() method. 2017-09-04 Andy Buckley * Change license explicitly to GPLv3, cf. MCnet3 agreement. * Parse YODA format AO headers as YAML (restriction to single-line dict entries for now). 2017-08-24 Andy Buckley * Use a slightly enhanced fast numeric parser in ReaderYODA (taken from LHAPDF, originally inspired by Gavin Salam). * Add an UNSCALE spec option to yodascale, to undo ScaledBy effects. 2017-08-19 Andy Buckley * Add optional zlib support via zstr -- massive thanks to Dmitry Kalinkin for the lovely patch! 2017-08-16 Andy Buckley * Fix setVal(i, x) numbered-axis methods on Point2D and Point3D: switch break statements were missing. * Explicitly load all ROOT objects as a list rather than generator. Patch from Dmitry Kalinkin. 2017-07-24 Andy Buckley * Improvements to yodaplot, including two operating modes: the default CMP mode is suitable for plotting histos by path, from raw .yoda files. * Update yoda.plotting functions to treat plot-keys as args and AO annotations via case-insensitive keys. * Add annotationsDict to the Python AO interface. * Add AO as an alias for AnalysisObject. 2017-07-23 Andy Buckley * Add parallel/compatibility yoda1 package to aid eventual transition to YODA v2. 2017-07-22 Andy Buckley * Add x,y,zMins and Maxs to all 1D data types and scatters (and x,yMin/Max to the scatters) -- Python interface only. * Rework some of the yoda.plotting tools, making it a bit more compatible with user-scripted matplotlib. 2017-07-18 Andy Buckley * Add convenience aliases H1D, H2D, P1D, P2D, and S1D, S2D, S3D for the HistoXD, ProfileXD, and ScatterXD classes respectively. 2017-07-08 Andy Buckley * Add xyVals/Errs and other 'bin array property' accessors to the Python Histo1D and Profile1D types: important for connection to matplotlib. 2017-06-28 Andy Buckley * Use Python natsort library to sort yodals output if available. 2017-06-18 Andy Buckley * Version 1.6.7 release. 2017-05-12 Andy Buckley * pyext/yoda/rootcompat.pyx: Fix ordering of TH1 vs. TProfile conversion -- TProfile *is* a TH1, so we have to test for the more specific type-match first. Thanks to Dmitry Kalinkin for the patch. 2017-05-02 Andy Buckley * Add static Reader methods to match the Writer ones. 2017-02-23 Andy Buckley * Fix Histo2D and Profile2D total distribution reading from YODA format. 2017-02-19 Holger Schulz * Convert TH1F to TH1D in root2flat. Much simpler than duplicating the TH1D stuff in pyext. 2016-12-13 Andy Buckley * Version 1.6.6 release. 2016-12-12 Holger Schulz * Bugfixes in Cython bins accessors for Histo2D. 2016-11-17 Leif Lonnblad * Fixed warning messages about the obsoleteness of AIDA so that the scripts actually still work. 2016-09-28 Andy Buckley * Version 1.6.5 release, for the benefit of ROOT fans. * Fix handling of --enable/disable-root configure options. 2016-09-26 David Grellscheid * Improvements to Cython version testing. 2016-09-25 Andy Buckley * Version 1.6.4 release. 2016-09-20 David Grellscheid * Remove aliases for @property functions. They were scheduled for removal anyway, and don't work with Cython >= 24. 2016-09-06 Andy Buckley * Update configure scripts to use newer (Py3-safe) Python testing macros. 2016-08-09 Andy Buckley * Version 1.6.3 release! 2016-07-22 Andy Buckley * Add 'add' modes for scatter combination to yodamerge. * Fix yodamerge scatter averaging to use the first AO. 2016-07-21 Andy Buckley * Add --type-mismatch-mode flag and fallback logic to yodamerge. * Fix yodamerge logic to handle cases where an AO only appears once. 2016-07-19 Andy Buckley * Deprecate flat2yoda script and add warning output to it and the AIDA conversion scripts. * Add a convenience yoda2yoda script. 2016-07-14 Andy Buckley * Try to build PyROOT interface by default, if root-config is found. 2016-07-11 Andy Buckley * Remove accidentally remaining reference to Boost flags in yoda-config. 2016-07-06 Andy Buckley * Version 1.6.2 release! 2016-07-05 Andy Buckley * Pass the toNewScatter3D() scalebyarea flag to the called toScatter3D() functions. 2016-06-06 Andy Buckley * Re-enable the disabled-for-some-reason Scatter1D combineWith Python mappings. 2016-04-28 Andy Buckley * Version 1.6.1 release! * Add a unit test for annotation handling correctness. * Fix numerical precision of string storage of floating-point attributes. * Fix a bug in use of the replacement for lexical_cast. 2016-04-20 Andy Buckley * Version 1.6.0 release! 2016-04-16 Andy Buckley * Extend SFINAE craziness to allow writing of any object (e.g. smart pointer) that can be dereferenced to something that has AnalysisObject as its base class... and also to any container of them! Amazing what you can do with C++11! 2016-04-14 Andy Buckley * Add a few consts to the arguments in Scatter error setting via pairs. * Fix double-writing of minus errors in WriterYODA for Scatter1D and Scatter3D. Thanks to Graeme Watt for the report and fix. 2016-04-12 Andy Buckley * Remove Boost dependency and require C++11 compilation. 2016-04-08 Andy Buckley * Add a --guess-prefix flag to yoda-config, cf. fastjet-config. 2015-12-20 Andy Buckley * Change AO uncomputable division and mkScatter operations to return/set NaN rather than 0 -- behaviour change requires new major version series 1.6. 2016-03-09 Andy Buckley * Version 1.5.9! (oh no, we're out of convenient version number space!!) 2016-03-08 Andy Buckley * Add abs function to eq calculation in yodadiff. 2016-02-29 Andy Buckley * Remove blocking of builds against ROOT6 -- it works fine. 2016-02-16 Andy Buckley * Add a --add option to yodamerge, for simple histo stacking. Thanks to Chris Gutschow for the patch, although my spidey sense is tingling... 2015-12-21 Andy Buckley * Version 1.5.8! * Add a rebinning unit test, pytest-rebin. * Add optional range arguments to rebinBy methods, allowing block rebinnings to be applied only within a range of (original) bin indices. * Add missing root.py submodule file. Oops! 2015-12-20 Andy Buckley * Convert linspace to use multiplication rather than repeated addition to construct edge values, reducing precision errors. Thanks to Holger Schulz for the suggestion. 2015-12-15 Andy Buckley * Add xEdges() methods to Axis1D and the Histo1D and Profile1D that use it. The returned edge lists are finite only, i.e. they do not contain the +-inf values on the ends of the internal BinSearcher edges. 2015-12-13 Andy Buckley * Version 1.5.7! * Extend batch-adding in ReaderYODA to include Scatter types. * Add a match_aos function to Python, for filtering AO lists/dicts on path patterns and anti-patterns. 2015-12-12 Andy Buckley * Add a flag to yoda2root to change whether the conversion is to 'proper' types or to (more robustly) TGraphAsymmErrors objects. * Fix accidental use of S2D_MODE flag where S1D_MODE should have been used, in yodamerge. Thanks again to Radek Podskubka. * Allow new rebinTo() merging to restrict to a subset of the bin range, merging the outside bins into the overflow distributions. 2015-12-11 Andy Buckley * Add a rebinTo() method on Axis1D, allowing rebinning to a new given set of bin edges. Add an explicitly named rebinBy(), and overloaded rebin() aliases for both, and pass through to Histo1D and Profile1D APIs. Plus other internal tweaks to binning functionality... anticipating/fuelling the fundamental rewrite. 2015-12-10 Andy Buckley * Improve ReaderYODA to use temporary bin containers, to minimise calling sort when adding bins to histos. A quick test suggests this has sped up big file reading by a factor of 30 or so!!! * Add missing addBins() operators (only in C++ so far) to Histo and Profile classes. * Fix Counter::numEntries to return an unsigned long rather than double. Thanks to Radek Podskubka for the bug discovery and detective work. * Improve sortedvector to insert new elements into the sorted position, rather than resorting the whole vector. This should be a bit more efficient, but I think the asymptotic complexity is the same. Might help a bit with reading big data files. 2015-12-04 Andy Buckley * Add yoda.HAS_ROOT_SUPPORT flag, for API user convenience. 2015-11-22 Andy Buckley * Version 1.5.6! * Make AO path setting and retrieval prepend a leading slash if it is missing (unless the path is completely empty). 2015-11-21 Andy Buckley * Add root2yoda conversion script. * Deprecating yoda.to_root() in favour of yoda.root module, which contains to_root and to_yoda functions, as well as a ROOT file walking function. 2015-11-17 Andy Buckley * Map ROOT-to-YODA (as scatter) functions to Python. Phew. * Map new to-ROOT functions, including TGraph ones, to Python. 2015-11-16 Andy Buckley * ROOTCnv.h: Add toScatter3D ROOT->YODA, and toNew* YODA->ROOT conversion routines. * ROOTCnv.h: Fix bug in toTH2D(const Histo2D& h) as used with ROOT6. Thanks to Tim Martin. 2015-11-05 Andy Buckley * Fix double-dealloc in new Point class hierarchy Python mapping. 2015-10-23 Andy Buckley * Make the version() function inline, and the numerical constants static. * Change the default plotting backend to MPL rather than the much slower PGF. 2015-10-09 Andy Buckley * Reinstate __getitem__ special methods for Scatters in Python. * Provide dim() methods/attributes for the Point and Bin base classes. * Rename set*Err to set*Errs for the asymmetric variants. Plural aliases are also provided for the symm case. 2015-10-08 Andy Buckley * Pass std::pairs by reference in Point*D error setting functions. * Add Point base class with generic accessors to Point*D properties via an integer axis ID argument. 2015-10-07 Andy Buckley * Version 1.5.5 release. * Counter.pyx: Typo fix in sumW mapping. * yodamerge: Re-add checking for non-emptiness before merging, in case the empty ones are missing a ScaledBy attribute. Won't normally apply to Profiles, since they don't usually get normalised, but we might as well include them in the vetoing since empty histos don't contribute to the merging. Added a command-line option to disable this heuristic since in very strange situations a null sumW does not mean no fills. 2015-10-06 Andy Buckley * Version 1.5.4 release. * ReaderYODA: Typo fixes in Counter filling of sumW and Scatter3D reader state flag. * yodamerge: add merging heuristics for Scatter1D and Scatter3D (needs unification) 2015-10-05 Andy Buckley * yodamerge: add a fix for empty-in-all-runs histo merging; thanks to Daniel Rauch. 2015-10-04 Andy Buckley * Adding dim() function and corresponding Python attribute to AnalysisObject. * Map Counter arithmetic operations into Python. * Map Counter mkScatter() into Python (as bound method). * Add a YODA::version() function, mapped into Python and used to set the yoda.__version__ variable. 2015-10-01 Andy Buckley * Expose the yoda.plot() Python function in a way that doesn't automatically induce a dependence on matplotlib. 2015-09-30 Andy Buckley * Fix yodals to work with Counters. 2015-09-23 Andy Buckley * Version 1.5.3 release. * Update Boost version requirement to 1.48, due to use of type_traits/has_dereference, and add a check for that feature's header. 2015-09-19 Andy Buckley * Further improvements to handling leading _multiple_ # marks on YODA format BEGIN lines. 2015-09-11 Andy Buckley * Version 1.5.2 release. * Tolerate leading # symbols without separating whitespace on BEGIN lines in YODA format parsing. * Further improvements to handling LowStatsErrors in YODA format writing. * Fix shadowed variables that made ReaderYODA unhappy. 2015-09-03 Andy Buckley * Version 1.5.1 release. * Fix bugs in Python wrapper for Point3D. 2015-08-28 Peter Richardson * Catch LowStatsError when writing multiple histograms so only the histogram with the problem is not written 2015-08-28 Andy Buckley * Version 1.5.0 release. 2015-08-24 Andy Buckley * Improve protection of efficiency calculation against the weird world of general weighted events. 2015-08-17 Andy Buckley * Add some protection against calling matplotlib's legend() method if there are no valid labels to display, to suppress an MPL warning message when using yoda.plot(). 2015-08-12 Andy Buckley * Fix cut & paste typo, and add LowStatsError catching in Profile division. 2015-08-11 Andy Buckley * Replace old Spirit-based ReaderYODA with the new hand-rolled one. * Adding filling of Histo1D, Profile1D, Histo2D and Profile2D in new ReaderYODA. More hacking of Axis and Histo/Profile interfaces... needs clean-up, and infinite binning implementation. 2015-08-07 Andy Buckley * Convert the ReaderFLAT parser to also use a simple hand-written parser rather than Spirit. 2015-08-04 Andy Buckley * Add methods for Counter, Axis and Histo1D internal state access/setting, mainly for new persistency. NEEDS PRE-RELEASE TESTING!!! 2015-07-30 Andy Buckley * Remove # markers from YODA format BEGIN/END output. The parser will continue to accept them. 2015-07-29 Andy Buckley * Add SFINAE trait magic to restrict write(RANGE) functions to accepting iterables. Also generalising to allow either container-of-objects or container-of-pointers args by providing a writeBody(AO*) function to complement writeBody(AO&). Based on a patch from Lukas Heinrich. * Add configuration of output streams to throw exceptions on bad/fail state (based on patch from Lukas Heinrich). 2015-07-01 Andy Buckley * 1.4.0 release. 2015-06-30 Andy Buckley * More tweaks to yodamerge: adding control of S2D merging strategy, and now performing weighted normalized histo merges without reference to an absolute normalization. * Change yodamerge norm-detection heuristic to just look for a ScaledBy attribute rather than fuzzily compare norms. 2015-06-26 Andy Buckley * Removing add, subtract, and divide functions and operators on Scatter types, and re-implementing Histo and Profile divide functions explicitly rather than via mkScatter. Also removed from the Python interface. The combine() methods remain. 2015-06-24 Andy Buckley * Adding workaround versions of binAt to all the Python histo types (for some reason the direct mapping that works for bin(i) produces a compile error for binAt(x) :-/ 2015-06-23 Andy Buckley * Renaming, tidying, completing, etc. the Python-mapped methods on Bin1D and Bin2D. * Rename Python Histo2D mean, variance, etc. pair-returning methods to xyMean, xyVariance, etc., to distinguish from Profile2D mean, variance, etc. * Add Python mappings of all the methods below. * Add full set of {x,y}{Mean,Variance,StdDev,StdErr,RMS} to 1D and 2D binned distributions. * Add optional includeoverflows=True argument to all binned AO numEntries and effNumEntries. * Fix type of numEntries to always be unsigned long. 2015-06-18 Andy Buckley * Fix typos in Point3D Python mapping (accidentally trying to get the ptr via _Point2D rather than _Point3D). 2015-06-13 Andy Buckley * Adding an AnalysisObject::name() method, to return the last part of the path. Mapped into a Python property. * Adding an optional usestddev argument to mkScatter for profile types, so the error bars can represent distribution width rather than uncertainty on the mean. Mapped to Python. 2015-06-08 Andy Buckley * Adding unpatterns arguments to Python read functions, and auto-conversion from single strings and re.compile()d strs. 2015-06-04 Andy Buckley * Add binAt(x,y) and binIndexAt(iglobal) Python methods for 2D histos. Still want a way to get and pass a pair of bin indices, I think. * Adding includeoverflows optional args for Histo1D (eff)numEntries. * Adding Rename Histo1D integral() methods as integral(), integralRange(), and integralTo(), and mapping to Python. 2015-06-02 Andy Buckley * Adding missing binAt and binIndexAt methods to Histo1D and Profile1D, plus other minor Python mapping tweaks. * Add a regex pattern match optional argument to the IO.read() Python functions, for pre-emptive filtering. 2015-03-27 Andy Buckley * Fix a harmless possibility to raise an FPE exception in the BinSearcher. Thanks to Leif Lonnblad for the discovery, debug and patch! 2015-03-19 Andy Buckley * Bump version for 1.3.1 release. 2015-03-06 Andy Buckley * Adding usefocus optional argument to some mkScatter functions, plus the Python bindings. * Cleaning up some Python mappings of 2D histogram bin classes. * Removing mappings of bin-level fill and scale operations in Python. * Fix formatting and error handling in Python Bin and Dbn __repr__ methods. * Add a -i/--in-place option pair on yodascale. 2015-02-05 Andy Buckley * Convert script matching options to use re search rather than match. * Adding matching options and verbose option to yodals. 2015-01-27 Andy Buckley * Improvements and additions to ROOTCnv.h routines, particularly to TProfile creation: thanks to Roman Lysak for advice. 2015-01-16 Andy Buckley * Add convenience YODA/YODA.h header. 2015-01-15 Andy Buckley * yodascale now uses PointMatcher and can normalize or multiply to abs values or ref histos/bin ranges. 2015-01-05 Andy Buckley * Adding yoda.matcher Python sub-package with PointMatcher functionality. To be used in Professor 2.0 and in yodascale. * Adding 'scat2' type to yodahist. * Add match/unmatch args to all conversion scripts, via a new Python yoda.script_helpers function. * Script updates, improved docstrings, and improved tab completion. 2014-12-10 Andy Buckley * Add a yoda.plotting sub-module, based on matplotlib. 2014-12-03 Andy Buckley * Small build improvements: cleaning test1.root from the yoda2root test, and adding a make target & flag file for mktemplates in pyext/yoda to make sure that it only gets run once. 2014-11-25 Andy Buckley * Handle overflow filling in binned types without invoking an exception. * Change inRange to have non-fuzzy comparison behaviour. 2014-11-11 Andy Buckley * Improving/adding __div__ functions in Python for all binned types. * Add std:: prefix to isinf() calls in BinSearcher.h. 2014-09-30 Andy Buckley * 1.3.0 release! * Use numEntries() rather than effNumEntries() when checking consistency of inputs to efficiency() calculations -- the effNumEntries of a set can be smaller than that of a strict subset, surprisingly! 2014-09-17 Andy Buckley * Small improvements to yodahist and yodaplot behaviours/UIs. * Adding setX/Y/Z(val, err) methods to Point3D. * Add an efficiency method for 2D histos. * Hide fill and fillBin methods from Python mappings of bin types. 2014-09-01 Andy Buckley * YODA 1.2.1 release! 2014-08-29 Andy Buckley * Hide non-const access to bin objects from histogram users: avoids potential for inconsistency between total dbns and in-range bins. * Bug in BinSearcher fixed by Peter Richardson: constructor arguments were passed in the wrong order when constructing a LinEstimator in cases where log binning wouldn't be allowed. 2014-08-26 Andy Buckley * Add protection against / characters in histo names in yoda2root (thanks to Will Bell for the report and suggested patch). 2014-08-17 Andy Buckley * Add +=, -=, *-, /=, ++ and -- operators to Counter, along with an (implicit) constructor from a double -- all for user convenience so Counter can be used in lieu of a simple number. 2014-08-15 Andy Buckley * YODA 1.2.0 release! * Permit +-inf values to be filled into histograms; NaN fills will still explicitly throw an exception. * Add unit tests for Counter, Scatter1D, and Scatter2D, including persistency. * Adding YODA and FLAT format I/O for Scatter1D and Counter (as far as currently possible -- FLAT Counter can't be read due to a #item ambiguity, just like the one between the YODA format Point3D and ProfileBin1D). To be continued... 2014-08-14 Andy Buckley * Python mappings for Dbn0D and Counter, and other improvements. * Adding val() and err() methods to Counter, and errW() and relErrW() to all DbnXD types. * Adding Scatter1D and Point1D types, with conversion from Counter supported. Both Counter and Scatter1D still need to be supported by YODA persistency. 2014-08-11 Andy Buckley * Add ROOT version checking to configure. Thanks to Michael Grosse for the report/request. 2014-08-05 Andy Buckley * Remove all methods not specific about the axis to which they refer, e.g. Histo1D::mean -> xMean. Also remove all related aliases (a nightmare to maintain) and low/highEdge and midpoint functions: use the proper xMin/Max/Mid from now on. This is a significant compatibility breaking API change (and the decision was not taken lightly) so will require a 2nd digit version change. * Lots of adding xMin/Max etc. functions to C++ and Python bin/histo classes. * Compiler pickiness fixes in BinSearcher. * Improvement to linspace, avoiding fuzzyEquals and again making sure that the end value is exact. 2014-07-23 Andy Buckley * Fix to logspace: make sure that start and end values are exact, not the result of exp(log(x)). * Clean-up, minor improvements, and adding a test for BinSearcher and friends. 2014-07-19 Andy Buckley * Various consistency improvements and minor bugfixes to Python mapping utils and Dbn and Bin objects. * Fix Axis2D::reset, which was resetting the total dbn and outflows, but not the bins!!! Thanks to Ewen Gillies for the report. 2014-07-18 Andy Buckley * Add scaleX,Y,Z and scaleXYZ to Point and Scatter classes, and deprecate less explicit/consistent Scatter2D/3D.scale method. 2014-07-17 Andy Buckley * yodascale now writes out rescaled histograms and profiles rather than scatters. * A few more improvements on Point2D/3D, adding x,y,zMin/Max function mappings. * Add first version of a yodascale script, based on code from Simone Amoroso. 2014-07-16 Andy Buckley * More Scatter and 2D histo interface improvement. * Remove 'return *this' from Scatter2D/3D add and combine methods. * Add unit test checks for 1D and 2D mkScatter functions. * Improve Scatter2D/3D C++ and Python interfaces. * Add Scatter3D Python mapping. 2014-07-15 Andy Buckley * Add auto-parsing of yes/no/on/off/true/false as bools in the Python ao.annotation() function. * Add parsing of yodaplot styles from command line args and analysis object annotations. 2014-07-12 Andy Buckley * Fix infinite recursions in Python wrappers for Point2D and Point3D, and make the Python Point3D interface more standard. * Add yodaplot script for basic plotting, using pgfplots as a backend. 2014-07-10 Andy Buckley * Add mkScatter(Scatter2D) and mkScatter(Scatter3D) functions and Python mappings: this allows all AOs to be used as args to mkScatter(...) without needing to check if they already are scatters. 2014-07-02 Andy Buckley * Set y value and/or error to 0 in mkScatter(Histo1D) if an exception is thrown when calculating the appropriate values. Need an optional param to control this error handling behaviour between set-zero and skip-bin? 2014-07-01 Andy Buckley * Add exception translation to the mkScatter functions. * Add -m/-M match/unmatch options to yodacnv -- useful for filtering histogram file contents in a YODA->YODA conversion. 2014-06-24 Andy Buckley * Don't complain about merge assumptions if there is only one object with that path name to be 'merged' 2014-06-17 Andy Buckley * Adding explicit int cast in Python wrapping of numEntries functions. 2014-06-13 Andy Buckley * Adding yodals script to list data file contents. 2014-06-11 David Grellscheid * pyext/yoda/Makefile.am: 'make distcheck' and out-of-source builds should work now. 2014-06-10 Andy Buckley * Fix use of the install command for bash completion installation on Macs. 2014-06-06 Andy Buckley * YODA 1.1.0 release. Middle version number change to reflect API changes w.r.t 1.0.6. * Adding unit tests against ref data for yodamerge in make check. 2014-06-04 Andy Buckley * Fix silly typos in yodamerge which somehow made it past "make check" testing :-( 2014-06-02 Andy Buckley * YODA 1.0.7 release. DO NOT USE: prefer 1.1.0, above. 2014-05-30 Andy Buckley * Removing 'foreach' macro definition and using raw BOOST_FOREACH instead until C++11 is allowed. * Adding pytest-p1d and pytest-p2d tests, and FLAT writing/reading in pytests. * Tweaking WriterFLAT and adding ReaderFLAT functionality for 2D histos and profiles. 2014-05-30 Holger Schulz * Some basic (FLAT) write-out capability for 2D histos 2014-05-29 Andy Buckley * Improvements to the yodahist script, including support for 2D histograms and input files. * Adding bin edges constructors for all 1D and 2D histos in Python. 2014-05-22 Andy Buckley * Improvements to Profile2D and Point2D interfaces in Python. * Add a single-file at a time yodacnv multi-format converter script (thanks to Andrii Verbytskyi for the suggestion). 2014-05-19 Andy Buckley * Typo fixes in Profile2D YODA-format parsing: 2D histo tests now pass! * Adding Python tests for Histo2D and Profile2D. * Adding a YODA/Predicates.h header and using it in Axis2D bin edge construction. * Enabling Profile2D writing in various Writers, and a bit of IO code tidying. 2014-05-17 Andy Buckley * Disable writing out of Histo2D and Profile2D outflows for now, while they are redesigned, and get 2D I/O working for the in-range part. 2014-05-14 Andy Buckley * Mapping the divide and efficiency functions into the Python interface as class methods, including the __div__ special function. 2014-05-13 Andy Buckley * Add the AnalysisObject::type() method back in Python (even though type(ao) is more Pythonic, this may be useful) 2014-05-06 Andy Buckley * Add Profile2D YODA format writing, note need for Scatter format change, add sumXY storage to both 2D histo types. 2014-05-03 Andy Buckley * Adding YODA reader functionality for Histo2D and Profile2D, but without outflows support yet. 2014-04-25 Andy Buckley * Adding simple command line yodahist script for quick 1D histogramming from plain text files, with weight support. More development to come! * Mapping linspace, logspace and a few stat functions into Python. 2014-04-24 Andy Buckley * Fixes, script installation, and detailed numerical comparisons in yodadiff. 2014-04-17 Andy Buckley * Change AnalysisObject::annotations to return the list of annotation keys rather than the map, since the previous behaviour mapped very badly into Python. 2014-04-16 Andy Buckley * Add special case handling for 2-arg use of x2y scripts where the second arg is -, for stdin. This will be treated as writing out to stdout, not converting two files one of which is stdin. 2014-04-15 Andy Buckley * Adding a more portable version of getline to be used in the YODA file parsing to avoid falling over on DOS-produced input files. 2014-04-14 Andy Buckley * Adding the namespace protection workaround for Boost described at http://www.boost.org/doc/libs/1_55_0/doc/html/foreach.html 2014-04-13 Andy Buckley * Adding an assumed-equal-run-size, ratio-like merging heuristic for Scatter2Ds to yodamerge, and renaming the --normalize-all flag to --assume-normalized. * Adding and installing a pkg-config data file for YODA. * Rationalising (and fixing?) the yodamerge logic re. user & normalization scalings, and making way for a Scatter2D merging heuristic. 2014-03-10 Andy Buckley * YODA 1.0.6 release. 2014-03-06 Andy Buckley * Improvements to AnalysisObject annotation handling in Python. * Adding rescaling arguments to yodamerge (and scale function to Python Scatter2D). * Better documentation and consistency of Histo and Profile Python wrappers, and removing some inappropriate attributes. * Adding clone() and newclone() functions to all analysis object classes. 2014-02-28 Andy Buckley * Cython mapping improvements, esp. adding numPoints/numBins functions and better Scatter2D __repr__. * Adding mapping of the mkScatter functions into Python (as methods on Histo1D and Profile1D rather than the original free functions, at least for now: some Python type-identifying boilerplate is needed to make a single mkScatter function work in Python) 2014-02-27 Leif Lönnblad * Minor modifications to BinSearcher to avoid NaN's. The NaN's were treated correctly before, but better to avoid them all together. 2014-02-27 Andy Buckley * Adding the generated pyext/yoda/rootcompat.cpp to the tarball and sorting out the ROOT/Cython interface conditionals a bit more sanely. Thanks to Oldrich Kepka for the bug report. * Protecting yodamerge against input histograms with zero integrals (thanks to Christian Bierlich for the bug report). 2014-02-14 Frank Siegert * Fix race condition with mktemplates. 2014-02-12 David Grellscheid * Fix bug in mktemplates code (thanks to Christian Johnson for the bug report). 2014-02-09 Andy Buckley * Adding explicit include/generated dir creation to Python extension build (thanks to Christian Johnson for the bug report). 2014-02-06 Andy Buckley * 1.0.5 release! 2014-02-05 Andy Buckley * Adding patches to ReaderFLAT and ReaderYODA use of Boost Spirit which reduce the Boost version requirement from 1.47 -> 1.41. Thanks to Andrii Verbytskyi for the patch. * Protect against invalid prefix value if the --prefix configure option is unused. 2014-02-04 Andy Buckley * Adding copy assignment operators where missing, based on an implementation in AnalysisObject which only copies rvalue paths and titles if they are non-null. 2014-02-03 Andy Buckley * Improving (i.e. increasing) bin edge overlap tolerance: 1e-10 relative was too tight. 2014-01-31 Andy Buckley * Adding x/yMid etc. methods on Bin1D and 2D, and more related Python API improvements. 2014-01-28 Andy Buckley * Adding missing fillBin methods to 1D and 2D Histo/Profile Python classes. * Fixed yodamerge default output file name treatment. * Avoid computing an unrecoverable error in Histo1D.__repr__ * Clean-ups and API improvements in Python IO functions. * Adding more sumW,W2 and (eff)NumEntries attrs to Python objects. 2013-12-17 Andy Buckley * Improved argument handling for x2y scripts. 2013-11-16 Andy Buckley * Fix to build the Cython rootcompt extension .cpp on request. 2013-11-14 Andy Buckley * Adding flags for the C++11 or C++0x standard if supported, cf. Rivet. 2013-10-24 Andy Buckley * YODA 1.0.4 release. * Supporting zsh completion via bash completion compatibility. 2013-10-21 Andy Buckley * Removing unused internal iterator typedefs from Writer functions. 2013-10-18 Andy Buckley * Adding a yodaenv.sh sourceable script to help with environment setup. * Remove Scatters from being handled by yodamerge by blocking the __add__ method fallback. 2013-10-09 Andy Buckley * Improvements to yoda-config and command-line completion, for the Rivet 2.0.0 release. 2013-10-09 Andy Buckley * Version 1.0.3 release. 2013-10-04 Andy Buckley * Cython mapping improvements. * Adding some improved heuristics and a --normalize-all option to yodamerge. Frank S is now happy again ;-) 2013-10-01 Andy Buckley * Adding operator +, -, +=, -= Python mappings wherever possible for Histo1/2D, Profile1/2D, and Scatter2D. 2013-09-26 Andy Buckley * Cython is no longer needed by tarball users. 2013-09-25 Andy Buckley * Unset path of returned histogram if those of the args to add() and subtract() are difference. 2013-09-24 Andy Buckley * Python mapping improvements. 2013-09-23 Andy Buckley * Add the -avoid-version flag to libtool. * Adding more add and subtract special methods in Python. 2013-09-22 Andy Buckley * mkScatter schanged to use histo midpoints rather than focuses by default for the point x value. 2013-08-14 Andy Buckley * Version 1.0.2. * Some exception message improvements and improving the protection of cosmetic mean calculations in WriterYODA. 2013-07-12 Andy Buckley * Adding ROOT detection in configure and otherwise updating Dave's rootcompat module so that it'll compile. There might be an inconvenient ROOT version dependency in the signature of one of the PyROOT API functions that is used as a shim :-( 2013-06-17 Andy Buckley * Adding yoda.m4 from James Robinson. 2013-06-06 Hendrik Hoeth * Improve "==" operator in Axis1D and Axis2D 2013-06-06 Andy Buckley * Adding fillBin() methods to all 1D and 2D histos, and noting that Bin types need a back-link to their axis to maintain consistency. * Release of version 1.0.1 2013-06-05 Andy Buckley * Change the divide(Scatter, Scatter) behaviours to use the midpoint of the num/denom bins rather than mean of foci for the output point position (and hence errors, too). * Adding a toIntegralEfficiencyHisto function. * Adding another Histo1D::integral() function, this time from 0 -> i, maybe including the underflow. 2013-06-04 Andy Buckley * Updating the Cython version requirement to 0.18 2013-06-03 Andy Buckley * Adding relErr functions to 1D and 2D histo and profile bins, and being careful about div by zero. * Improvements in error treatment in division (better handling of zeros). * Renaming merge-histos to yodamerge and installing it (and improving the usage string a bit). 2013-05-31 Andy Buckley * Adding the Counter type, and ability to output it from the YODA writer. * Adding numEntries and effNumEntries methods to 1D and 2D Histo and Profile classes. * Adding Dbn0D and using it to implement Dbn1D. 2013-05-30 Andy Buckley * Fixing several nasty errors in argument ordering for Point{1,2,3}D construction in Scatter addPoint functions. * Adding abs(...) to the returned Dbn1D::variance(), to avoid problems when negative weights produce negative variance. No, we don't like this either: is there a more correct way? * Fixing the efficiency(Histo1D, Histo1D) implementation, cf. http://root.cern.ch/phpBB3/viewtopic.php?t=3753 * Adding mkScatter(Profile2D) 2013-05-29 Hendrik Hoeth * Adding a reader for FLAT files and a flat2yoda converter 2013-05-27 Andy Buckley * Adding a yoda-completion file for bash. 2013-05-17 Andy Buckley * Mapping HistoBin1D.relErr in Python. * Adding a non-const points() accessor to Scatter2D. 2013-05-13 Andy Buckley * Adding combined value+error setX/Y functions on Point2D. * Adding HistoBin1D::relErr() 2013-04-23 Andy Buckley * Adding Python output handling for single AOs and to be able to use a "-" filename to mean stdout. 2013-04-12 Andy Buckley * Releasing version 1.0.0 -- it seems stable enough. 2013-04-10 Andy Buckley * Being more careful about adding -Wno-* flags to the C++ compiler used to built the Cython extension lib. 2013-03-22 Andy Buckley * Removing the use of svn:external to pull in Boost macros and using a minimal local set instead. * Using the nice Boost-finding macros from https://github.com/tsuna/boost.m4 and tidying up configure.ac 2013-03-15 Andy Buckley * Re-organising the C++ side of the auto-format I/O functions, into a new IO header and separated from the Reader.h and Writer.h. I'm tempted to say that users shouldn't really NEED to ever directly touch the Reader and Writer objects... * Adding auto-format read and write functions. I will probably change the API. Python mappings have been provided, but the string workarounds were too much of a pain with Cython 0.16 so I have updated the Cython version requirement to 0.17 where it is automatic and hence much cleaner. 2013-03-08 Andy Buckley * Making the x2y converter scripts write a copy into the *current* directory if only the input is specified. 2013-03-05 Andy Buckley * Removing Plot entirely from YODA: it was an anomaly only added to make plot file generation easy, but this is now done better via StringIO (in new compare-histos/rivet-cmphistos). * Removing the Plot from Cython... and soon from YODA itself: we'll do this stuff manually and less hackily. * Make Cython automatically add a copy of the original call signature to each function's docstring. 2013-03-04 Andy Buckley * Adding Plot mapping to Cython and improving the AO annotations handling in Python. * Adding PLOT section writing to WriterFLAT (and WriterYODA, although that might be a bad idea...) * Adding aida2yoda and aida2flat converter scripts. 2013-02-02 David Mallows * Adding support for Python >= 2.4 (was Python >= 2.6) * Fixing miscellaneous warnings on GCC 4.1 2013-01-30 Andy Buckley * Adding a points() method to the Python Scatter2D wrapper. * Adding a virtual destructor to Bin. 2012-12-30 Andy Buckley * Adding support for Boost.Range arguments and file format autodetection in Writer. 2012-11-24 Andy Buckley * Bump version to 0.6beta0 * Adding more ROOT converters. Who knows how to make TProfiles from scratch, but Histo1D and Scatter2D are covered, which should be enough to get started with, at least. 2012-11-16 Andy Buckley * Adding yoda-config 2012-11-16 Hendrik Hoeth * Adding WriterFLAT and yoda2flat 2012-11-16 Andy Buckley * Adding YODA/ROOTCnv.h. for data object converter functions. Two (untested) functions added for TH1 -> YODA. * Adding toIntegralHisto(Histo1D&) function. 2012-11-15 Dave Mallows * Commited numerous changes to Axis2D. Axis2D now uses BinSearcher as with Axis1D. 2012-11-15 Andy Buckley * Improving division and efficiency treatments, and allowing arbitrary f(x), f(y), and flip transformations on Scatter2D. 2012-11-14 Andy Buckley * Converting linspace, logspace, and their usage to place the nbins argument first. 2012-08-07 Andy Buckley * Removing unused (beyond 2nd order) sumWXYZ counter from Dbn3D. 2012-08-07 Dave Mallows * Converted Axis1D to use new Utils/BinSearcher. 2012-08-02 Dave Mallows * Heavily refactored Cython bindings * HistoBin1D, ProfileBin1D etc. now inherit from Bin1D[DBN] * Temporarily removed Histo2D, Profile2D and Scatter3D mappings. 2012-07-23 Andy Buckley * Installing scripts from bin dir, and making the yoda2aida interface nicer. * Adding Cython mappings for Dbn3D and Profile2D, and other fixes/improvements. 2012-07-22 Andy Buckley * Adding Cython mappings for Scatter3D and ProfileBin2D. * Fixing more crap code legacy from old 2D plot implementation, this time in Scatter3D. 2012-07-19 Andy Buckley * Adding stdErr for Histo2D + Python mapping, and more Cython improvements. * Adding path/title-only AO constructors and making nice Python constructor for Histo2D. * Cython mapping improvements & additions for Point3D + Scatter3D. * Removing mixed symm/asymm constructors on Point*D & Scatter*D classes. 2012-07-12 Andy Buckley * Reintroducing Profile2D and Scatter3D. * Adding axis locking to Axis2D. * Supporting Histo2D in WriterYODA. 2012-07-02 Andy Buckley * More incremental progress toward a working 2D bin hash mechanism. 2012-05-03 Andy Buckley * Adding nice constructor behaviours to the Histo1D and Profile2D Python interfaces, and adding the mkScatter operation for Profile1D. * Adding more default constructors for analysis objects, to allow member variable and STL container use without pointers. 2012-05-02 Andy Buckley * A much simplified and more robust rewrite of the Axis1D class, just using STL map in place of the hand-written bin edge caching. * Improvements (I hope) to the binary search in Axis1D, and providing an experimental default constructor for Histo1D. 2011-12-08 Hendrik Hoeth * ReaderYODA can now parse Histo1D and Profile1D flat files 2011-12-08 Andy Buckley * Adding a Utils::ndarray object and using it to implement a general Scatter system, with generalised Point and Error to boot. 2011-12-07 Hendrik Hoeth * Lots of cleanup 2011-12-07 Andy Buckley * Mapping the Dbn1D and Dbn2D classes into Python. * Adding an outflows() accessor to Histo2D. * Writing out total dbn lines for Histo1D and Profile1D in the YODA format, and now writing out the 'cross-terms' for Profile1D, too. * Properly adding Dbn1D accessors for Histo1D. * Updating the Cython mappings to provide the totalDbn() methods and add a placeholder mapping for Dbn2D. Completed mappings are needed for Dbn{1,2,3}D and the Profile types. * Adding totalDbn() accessors to data types. 2011-12-06 Andy Buckley * Making Histo1D/2D::scaleW() write a ScaledBy annotation. * Adding annotation-fetching methods with a default return value argument to AnalysisObject. * Adding normalize() methods to Histo1D/2D. * Adding weighted RMS calculating methods to Dbn1D, Dbn2D and Bin1D/2D. 2011-09-03 Dave Mallows * Fixed ReaderAIDA: x-value and low y-error interchanged when filling Scatter2D. * Changed to Cython for Python bindings: Swig bindings were in need of serious amounts of work. Cython should provide a means to provide more Pythonic bindings to YODA. A minimal subset of ReaderAIDA, Scatter2D and Point2D have been wrapped. * Modified configure.ac, Makefile.am and pyext/Makefile.am to reflect change to Cython. Added cython.m4 from python-efl (Part of the enlightenment project; LGPL) 2011-08-31 Dave Mallows * Fixed python tests by installing python extension to pyext/build 2011-08-23 Andy Buckley * Adding rebinning interface to Histo1D and Profile1D, and adding a test (and a new test feature for output message formatting) * Adding first implementation of 1D bin merging to Axis1D. 2011-08-22 Andy Buckley * Adding copy constructors and assignment operators to Histo1D, Profile1D, and Scatter2D, and their respective bins/points. * Remove use of sign(weight) in filling sum(w2) -- I think this was an historical attempt based on a scaling axiom which turned out to be inappropriate. * Reworking the Bin1D inheritance and composition design so that all bin types store a single distribution object -- a Dbn1D for histo bins and a Dbn2D for profile bins. 2011-08-18 Andy Buckley * Removing the Profile1D -> ProfileBin1D friendship. This is very heartening -- the fewer friend declarations we need, the more indication that the class structure is not pathological! (Or that we've just made everything public... but we haven't) 2011-08-15 Andy Buckley * Inlining all functions in HistoBin1D, ProfileBin1D, and HistoBin2D. * Converting Dbn2D to be composed of two Dbn1Ds and a cross-term. Also tidying the interfaces of the 2D classes and the scaleX/Y methods throughout, and adding Doxygen comments. 2011-08-12 Andy Buckley * Adding proper Doxygen structures and full descriptive comments to Dbn2D. * Adding the persistency state-setting constructors for Profile1D and Dbn2D. * Inlining lots of methods on Dbn1D. Same should be done for Dbn2D, but first it needs to be reimplemented in terms of two Dbn1Ds + the cross-term. 2011-08-11 Andy Buckley * Various typo fixes and comments relating to persistency constructors, Histo2D slicing, etc. * Changing the HistoBin1D state-setting constructors (aargh, these should *not* have already existed) to take Dbn1D as an argument rather than a long list of doubles. 2011-08-01 Andy Buckley * Adding tests to check that implicit construction of Weights objects from literal doubles and ints works. 2011-07-28 Andy Buckley * Bumping version number to 0.4.0beta0 -- there have been substantial changes recently and YODA is now in a state where it should be interesting for outsiders to start playing with it. * Templating the Axis1D on the distribution type to be used for total and under/overflow statistics: Profile1D now has Dbn2D objects handling its total and overflow statistics. 2011-07-26 Andy Buckley * Added a Histo1D::integral(index1, index2) method. Not sure how or if to extend this to Profile1D. * Implementing incomplete Scatter2D operator+ and operator- functions. 2011-07-25 Andy Buckley * Adding a Weights class, designed to seamlessly replace double-type weights and weighted moments with a named and vectorised form. 2011-07-19 Andy Buckley * Add Profile1D and Scatter2D division operators. * Add xMin/xMax synonyms to the Axis1D, cf. the bins. 2011-07-18 Andy Buckley * Add a first stab at a Histo1D/Histo1D division operator. 2011-07-10 Andy Buckley * Add construction of Histo1Ds from Profile1D and Histo1D, and construction of Profile1Ds from those and Scatter2D. 2011-07-07 Andy Buckley * Add construction of a Histo1D from Scatter2D. 2011-06-15 Andy Buckley * Making the AIDA reader work, including reading of annotations and a few tweaks to the simple type persistency system. Test histo 1b updated. 2011-06-12 Andy Buckley * Removing Histo1D::area * Filling and using under/overflow and total db on Histo1D, and adding boolean arg to integral, sumW, etc. * Fixing for C++ change in behaviour of std::make_pair * Adding addAnnotation, and mapping annotations to Python. 2011-02-22 Andy Buckley * Use distutils rather than setuptools for the Python interface build. * Renaming Bin, HistoBin and ProfileBin to be Bin1D, HistoBin1D, ProfileBin1D. Bin is now a top-level abstract class with minimal functionality to be shared between 1D and 2D bins. 2011-01-12 Andy Buckley * Type annotations in mkScatter * Added many vector constructors and addPoint functions to Scatter2D. 2011-01-11 Andy Buckley * Add lexical_cast support to annotation get and set functions. * Write out annotations in AIDA format, and copy annotations in mkScatter -- using a new AnalysisObject::setAnnotations method. * Convert DPS output to use interim Scatter construction * Make (unused) yoda.plot subpackage. * Write out annotations in YODA format. * Make Scatter2D representations of Histo1D and Profile1D. * Write out Scatter2D objects in AIDA and YODA formats. * Make Scatter2D and Point2D work. Add a few extra methods... evolution and tweaking required. 2011-01-10 Andy Buckley * Add Boost checks and header includes. Not used yet. * Hide Utils:: content from Doxygen and nvector -> Utils::nvector. * Removing unused YAML stuff: we aren't going that way for persistency anymore. * Renaming Axis -> Axis1D * Removing dead-end templated Scatter stuff. * Move (generated) config files into the Config subdir. * Move sortedvector and indexedset into the Utils dir. * Move the "utils" directory and namespace to "Utils" * Put the Doxyfile under configure control by moving it to Doxyfile.in and using the @PACKAGE_VERSION@ token. * Make Doxygen find the .icc file and hide functions with name _* and in the YAML namespace. * Removing the Binning argument and enum in favour of explicit bin edge vectors, possibly produced explicitly via the MathUtils linspace and logspace functions, or the new Axis::mkBinEdgesLin/Log alias functions. * Fixed Axis, Histo1D and Profile1D constructors, by adding a path argument, passing the path and title args to the AnalysisObject base constructor properly. * Removed several old and unused files such as Tree.h 2011-01-09 Andy Buckley * Updating copyright comments to be valid into 2011. * Persistency fixes, and changing the interface to use the annotated path. * Using annotations for path and title. * Adding tests of collection and iterator range AO writing. * Adding static write functions on Writer*.h implementations to avoid needing to make an explicit Writer object via the create() functions. * Rename Exception.h -> Exceptions.h * Added AnnotationError. * Re-enable persistency of collections with begin/end iterators. 2011-01-08 Andy Buckley * Enabling quiet compilation. * More annotation functionality. 2011-01-07 Andy Buckley * Sorting out autoheaders to be more useful. * Rewriting AIDA writer to use DPS representation (no reloading) for Histo1D and Profile1D objects. * Adding persistency system hooks, since RTTI just sucks too much. * Renaming test files to have more meaningful names. 2010-12-10 Andy Buckley * Some tweaks to Axis, Bin, etc. to use the sortedvector. Seems to be working! (I must be checking it wrongly...) * Adding another candidate object for the axis bin container: a sorted extension to STL std::vector with an insert method. This will do as a development placeholder: a proper sorted & indexed container may be substituted later. * Fix test code: titles are no longer given as histogram constructor arguments. 2010-11-21 Andy Buckley * Adding indexed set for holding bins on axes. Still not sure it's what we want, as (I just realised) STL sets are iterator-immutable because they are self-keyed and changes to elements would also change their sorting. 2010-09-19 Andy Buckley * Restarting ChangeLog contributions! Many changes in the huge time since last update... activity on YODA has renewed and we have a better picture of the distinctive features we require. New idea: named weight vector filling, allowing "parallel" histograms for various event weight variations. I/O remains an awkward issue, especially since the classes are now much richer than they used to be, and don't know about paths. Output can be easily put on top: not an issue... and we can probably do something with pickling. But reading in from C++? 2008-09-16 Andy Buckley * Moved duplicate Histo1D/Profile1D code on to Axis, making Axis a templated class at the same time. 2008-09-12 Andy Buckley * Started work on a little plotting tool, initially for Herwig++ parton pT cut testing, but incrementally enhancing it to be a command-line quick plotter seems like a good idea. * Added some more test programs... working towards a proper test suite. * Added "no path & title" constructors - you don't always want to write out the histo, since sometimes it's just a good way to gather statistics. * Fixed YODA mapping to allow use of vectors of bins as Python lists. * Added Profile1D functionality. * Fixed Dbn1D to use sign(weight) as part of the "w**2" measure, so that negative weights behave themselves. 2008-05-23 Andy Buckley * Added Dbn1D class to centralise the calculation of statistics from unbounded, unbinned, weighted distributions. 2008-05-15 Andy Buckley * Added Profile1D class. * Fixed NaN errors from zero weights. 2008-04-14 Andy Buckley * Python SWIG interface now compiles and can be used: the subtlety that was breaking it was that SWIG has to be prodded in pretty non-obvious ways to make std::vectors of classes without default (no-arg) constructors. See http://osdir.com/ml/programming.swig/2004-04/msg00011.html for about the only reference to this to be found anywhere! * Basic AIDA writer now available - it doesn't yet output all the necessary information though, especially not for merging parallel runs. diff --git a/bin/yodacmp b/bin/yodacmp --- a/bin/yodacmp +++ b/bin/yodacmp @@ -1,346 +1,346 @@ #! /usr/bin/env python """\ %(prog)s - generate histogram comparison plots USAGE: %(prog)s [options] yodafile1[:'Option1=Value':'Option2=Value':...] [path/to/yodafile2 ...] [PLOT:PlotOption1=Value:...] """ import yoda, sys, os def main(): import argparse parser = argparse.ArgumentParser(usage=__doc__) parser.add_argument("ARGS", nargs="+", help="YODA data files with optional colon-separated option strings") parser.add_argument("-o", "--outdir", dest="OUTDIR", default=".", help="write data files into this directory") # parser.add_argument("--hier-out", action="store_true", dest="HIER_OUTPUT", default=False, # help="write output dat files into a directory hierarchy which matches the analysis paths") # parser.add_argument("--plotinfodir", dest="PLOTINFODIRS", action="append", # default=["."], help="directory which may contain plot header information (in addition " # "to standard Rivet search paths)") stygroup = parser.add_argument_group("Plot style") # stygroup.add_argument("--refid", dest="REF_ID", # default="REF", help="ID of reference data set (file path for non-REF data)") # stygroup.add_argument("--linear", action="store_true", dest="LINEAR", # default=False, help="plot with linear scale") # stygroup.add_argument("--mc-errs", action="store_true", dest="MC_ERRS", # default=False, help="show vertical error bars on the MC lines") # stygroup.add_argument("--no-title", action="store_true", dest="NOPLOTTITLE", # default=False, help="don't show the plot title on the plot " # "(useful when the plot description should only be given in a caption)") # stygroup.add_argument("--style", dest="STYLE", default="default", # help="change plotting style: default|bw|talk") stygroup.add_argument("-c", "--config", dest="CONFIGFILES", action="append", default=[], #["~/.make-plots"], help="additional plot config file(s). Settings will be included in the output configuration.") # TODO: re-enable the pattern matching, and _maybe_ the variants on ref-only plotting if we don't have a plotting system via YODA soon # selgroup = parser.add_argument_group("Selective plotting") # selgroup.add_argument("--show-single", dest="SHOW_SINGLE", choices=("no", "ref", "mc", "all"), # default="mc", help="control if a plot file is made if there is only one dataset to be plotted " # "[default=%(default)s]. If the value is 'no', single plots are always skipped, for 'ref' and 'mc', " # "the plot will be written only if the single plot is a reference plot or an MC " # "plot respectively, and 'all' will always create single plot files.\n The 'ref' and 'all' values " # "should be used with great care, as they will also write out plot files for all reference " # "histograms without MC traces: combined with the -R/--rivet-refs flag, this is a great way to " # "write out several thousand irrelevant reference data histograms!") # selgroup.add_argument("--show-mc-only", "--all", action="store_true", dest="SHOW_IF_MC_ONLY", # default=False, help="make a plot file even if there is only one dataset to be plotted and " # "it is an MC one. Deprecated and will be removed: use --show-single instead, which overrides this.") # # selgroup.add_argument("-l", "--histogram-list", dest="HISTOGRAMLIST", # # default=None, help="specify a file containing a list of histograms to plot, in the format " # # "/ANALYSIS_ID/histoname, one per line, e.g. '/DELPHI_1996_S3430090/d01-x01-y01'.") selgroup.add_argument("-m", "--match", action="append", help="only write out histograms whose $path/$name string matches these regexes. The argument " "may also be a text file.", dest="PATHPATTERNS") selgroup.add_argument("-M", "--unmatch", action="append", help="exclude histograms whose $path/$name string matches these regexes", dest="PATHUNPATTERNS") ## Parse command line args into filenames and associated annotations args = parser.parse_args() fnames, fname_anns = [], [] for a in args.ARGS: fname, anns = parse_arg(a) fnames.append(fname) fname_anns.append(anns) ## Read .plot files # TODO: Order, dict merging, etc. import re plotkeys = {} for pf in args.CONFIGFILES: plotkeys.update( yoda.plotting.read_plot_keys(plotfile) ) ## Extract PLOT keys plotanns = {} if "PLOT" in fnames: i = fnames.index("PLOT") plotanns = fname_anns[i] del fnames[i] del fname_anns[i] ## Load data objects into a dict of list[AO]s, and apply annotations # TODO: do the path pattern matching 'inline' here? aolists = {} for i, (fname, anns) in enumerate(zip(fnames, fname_anns)): aos = [ ao.mkScatter() for ao in yoda.read(fname, asdict=False) ] for ao in aos: # TODO: Allow Rivet to strip /REF prefixes in its use of this API - p = ao.path().replace("/REF", "") + p = ao.path.replace("/REF", "") ## Apply .plot patterns first for patt, keys in plotkeys.keys(): if re.match(patt, p): for k, v in keys.items(): ao.setAnnotation(k, v) ## Then command line annotation overrides for k, v in anns.items(): ao.setAnnotation(k, v) ## Obsfucate the path for uniqueness # TODO: Tidy fname slashes etc. for this purpose - ao.setPath(p + "@" + fname) + ao.path = p + "@" + fname ## The first file is used as the reference # TODO: Use arg[0] or --refid for identifying the ref histo (if there is one) if i == 0: ao.setAnnotation("RatioRef", "yes") ## Label with the original filename ao.setAnnotation("Origin", fname) ## Add to plotting dict aolists.setdefault(p, []).append(ao) # for p, aos in sorted(aolists.items()): print(p, len(aos)) ## Apply path pattern match discarding from dict args.PATHPATTERNS = [re.compile(r) for r in args.PATHPATTERNS] if args.PATHPATTERNS else [] args.PATHUNPATTERNS = [re.compile(r) for r in args.PATHUNPATTERNS] if args.PATHUNPATTERNS else [] keylist = list(aolists.keys()) for path in keylist: # can't modify for-loop target in loop useThis = True if args.PATHPATTERNS: useThis = False for regex in args.PATHPATTERNS: if regex.search(path): useThis = True break if useThis and args.PATHUNPATTERNS: for regex in args.PATHUNPATTERNS: if regex.search(path): useThis = False break if not useThis: del aolists[path] # for p, aos in sorted(aolists.items()): print(p, len(aos)) ## Loop over unique paths, plotting in order of command-line appearance # TODO: bind default color/style cycling to the filenames ## Now loop over all MC histograms and plot them for path, aos in aolists.items(): ## Find and move the reference plot has_ref = False for i, ao in enumerate(aos): if ao.annotation("RatioRef", False): aos.insert(0, aos.pop(i)) has_ref = True break ## Plot object for the PLOT section in the .dat file plot = Plot() plot["Legend"] = has_ref plot["LogY"] = True # TODO: make dynamic? ## ## Apply PLOT annotations from command line # TODO: also handle .plot file annotations via plotanns? for k, v in plotanns.items(): plot[k] = v ## # for key, val in plotparser.getHeaders(h).items(): # plot[key] = val # if args.LINEAR: # plot["LogY"] = "0" # if args.NOPLOTTITLE: # plot["Title"] = "" ## # if args.STYLE == "talk": # plot["PlotSize"] = "8,6" # elif args.STYLE == "bw": # if args.RATIO: # plot["RatioPlotErrorBandColor"] = "black!10" ## Style the histos # TODO: Make style and label more customisable for i, ao in enumerate(aos): if ao.annotation("RatioRef", False): ao.setAnnotation("ErrorBars", "1") ao.setAnnotation("Marker", "o") ao.setAnnotation("Title", "Data") # TODO:improve ao.setAnnotation("Color", "black") # TODO:improve # ao.setAnnotation("ConnectBins", "0") # TODO: add else: setStyle(ao, i) # ## Loop over the MC files to plot all instances of the histogram # styleidx = 0 # for infile in filelist: # if mchistos.has_key(infile) and mchistos[infile].has_key(h): # ## Default linecolor, linestyle # setStyle(mchistos[infile][h], styleidx) # styleidx += 1 # if args.MC_ERRS: # mchistos[infile][h].setAnnotation("ErrorBars", "1") # ## Plot defaults from .plot files # for key, val in plotparser.getHistogramOptions(h).items(): # mchistos[infile][h].setAnnotation(key, val) # ## Command line plot options # setOptions(mchistos[infile][h], plotoptions[infile]) # mchistos[infile][h].setAnnotation("Path", infile + h) # anaobjects.append(mchistos[infile][h]) # drawonly.append(infile + h) # if args.RATIO and ratioreference is None: # ratioreference = infile + h # if args.RATIO and len(drawonly) > 1: # plot["RatioPlot"] = "1" # plot["RatioPlotReference"] = ratioreference ## Create the output. We can't use yoda.writeFLAT because PLOT and SPECIAL aren't AOs from io import StringIO sio = StringIO() yoda.writeFLAT(aos, sio) output = str(plot) + sio.getvalue() ## outpath = path.strip("/").replace("/", "_") + ".dat" # TODO: tidy up with open(outpath, "w") as of: of.write(output) def parse_arg(arg): "Function to parse a command line arg and return the filename + dict of command-line annotations" argparts = arg.split(":") fname = argparts[0] anns = {} for ann in argparts[1:]: if "=" in ann: aname, aval = ann.split("=") anns[aname] = aval else: anns["Title"] = ann return fname, anns class Plot(dict): "A tiny Plot object to help writing out the head in the .dat file" def __repr__(self): return "# BEGIN PLOT\n" + "\n".join("%s=%s" % (k,v) for k,v in self.items()) + "\n# END PLOT\n\n" def setStyle(ao, index): """Set default plot styles (color and line width)""" # Colors: red (Google uses "DC3912"), blue, green, orange, lilac LINECOLORS = ["#EE3311", "#3366FF", "#109618", "#FF9900", "#990099"] LINECOLORS = ["Blue", "Red", "Green", "DarkOrange", "DarkOrchid"] LINESTYLES = ["-", "--", "-.", ":"] # if args.STYLE == "talk": # ao.setAnnotation("LineWidth", "1pt") # if args.STYLE == "bw": # LINECOLORS = [0.9, 0.5, 0.3] c = index % len(LINECOLORS) s = index // len(LINECOLORS) if not ao.hasAnnotation("LineStyle"): ao.setAnnotation("LineStyle", "%s" % LINESTYLES[s]) if not ao.hasAnnotation("Color"): ao.setAnnotation("Color", "%s" % LINECOLORS[c]) def mkoutdir(outdir): "Function to make output directories" if not os.path.exists(outdir): try: os.makedirs(outdir) except: msg = "Can't make output directory '%s'" % outdir raise Exception(msg) if not os.access(outdir, os.W_OK): msg = "Can't write to output directory '%s'" % outdir raise Exception(msg) if __name__ == "__main__": main() # def parseArgs(args): # """Look at the argument list and split it at colons, in order to separate # the file names from the plotting options. Store the file names and # file specific plotting options.""" # filelist = [] # plotoptions = {} # for a in args: # asplit = a.split(":") # path = asplit[0] # filelist.append(path) # plotoptions[path] = [] # has_title = False # for i in xrange(1, len(asplit)): # ## Add "Title" if there is no = sign before math mode # if not "=" in asplit[i] or ("$" in asplit[i] and asplit[i].index("$") < asplit[i].index("=")): # asplit[i] = "Title=%s" % asplit[i] # if asplit[i].startswith("Title="): # has_title = True # plotoptions[path].append(asplit[i]) # if not has_title: # plotoptions[path].append("Title=%s" % sanitiseString(os.path.basename( os.path.splitext(path)[0] )) ) # return filelist, plotoptions def writeOutput(output, h): "Choose output file name and dir" hparts = h.strip("/").split("/") if args.HIER_OUTPUT: ana = "_".join(hparts[:-1]) if len(hparts) > 1 else "ANALYSIS" outdir = os.path.join(args.OUTDIR, ana) outfile = "%s.dat" % hparts[-1] else: outdir = args.OUTDIR outfile = "%s.dat" % "_".join(hparts) mkoutdir(outdir) outfilepath = os.path.join(outdir, outfile) f = open(outfilepath, "w") f.write(output) f.close() diff --git a/bin/yodadiff b/bin/yodadiff --- a/bin/yodadiff +++ b/bin/yodadiff @@ -1,190 +1,190 @@ #! /usr/bin/env python """\ %(prog)s [-o outfile] Compare analysis objects between two YODA-readable data files. """ from __future__ import print_function import yoda, sys, argparse from yoda.script_helpers import filter_aos parser = argparse.ArgumentParser(usage=__doc__) parser.add_argument("ARGS", nargs=2, help=" ") parser.add_argument("-o", "--output", default="-", dest="OUTPUT_FILE", help="write output to the given file (default: stdout)") parser.add_argument("-t", "--tol", type=float, default=1e-5, dest="TOL", help="relative tolerance of numerical difference permitted before complaining (default: %(default)s)") parser.add_argument("-l", "--list", action="store_true", default=False, dest="LIST", help="only print paths of mismatching objects, skip diff details (default: %(default)s)") parser.add_argument("-a", "--annotations", action="store_true", default=False, dest="ANNOTATIONS", help="also compare annotations (not done by default)") parser.add_argument("-m", "--match", dest="MATCH", metavar="PATT", default=None, help="only write out histograms whose path matches this regex") parser.add_argument("-M", "--unmatch", dest="UNMATCH", metavar="PATT", default=None, help="exclude histograms whose path matches this regex") parser.add_argument("--ignore-missing", action="store_true", default=False, dest="IGNORE_MISSING", help="don't complain if an object in file #1 is not found in file #2") parser.add_argument("--ignore-new", action="store_true", default=False, dest="IGNORE_NEW", help="don't complain if an object in file #2 is not found in file #1") parser.add_argument("-q", "--quiet", action="store_true", default=False, dest="QUIET", help="print nothing, only express comparison result via return code (default: %(default)s)") args = parser.parse_args() filenames = args.ARGS if len(filenames) != 2: print("ERROR! Please supply *two* YODA files for comparison") sys.exit(7) ## Get data objects aodict1 = yoda.read(filenames[0]) aodict2 = yoda.read(filenames[1]) ## Filter data objects filter_aos(aodict1, args.MATCH, args.UNMATCH) filter_aos(aodict2, args.MATCH, args.UNMATCH) CLEAN = True def log(msg): if not args.QUIET and not args.LIST: print(msg) ## Check number of data objects in each file if len(aodict1) != len(aodict2): CLEAN = False if not (args.IGNORE_MISSING or args.IGNORE_NEW): log("Different numbers of data objects in %s and %s" % tuple(filenames[:2])) elif sorted(list(aodict1.keys())) != sorted(list(aodict2.keys())): CLEAN = False if not (args.IGNORE_MISSING or args.IGNORE_NEW): log("Different data object paths in %s and %s" % tuple(filenames[:2])) ## A slightly tolerant numerical comparison function def eq(a, b): if a == b: return True from math import isnan if type(a) is type(b) is float and isnan(a) and isnan(b): return True ## Type-check: be a bit careful re. int vs. float if type(a) is not type(b) and not all(type(x) in (int, float) for x in (a,b)): return False ## Recursively call on pairs of components if a and b are iterables if hasattr(a, "__iter__"): return all(eq(*pair) for pair in zip(a, b)) ## Check if a and b have equal magnitude but opposite sign if a == -b: return False ## Finally apply a tolerant numerical comparison on numeric types # TODO: Be careful with values on either side of zero -- abs(float(a)) etc. on denom? return abs(float(a) - float(b))/(float(a) + float(b)) < args.TOL def ptstr(pt): vstr1 = "{x:.5g} + {ex[0]:.5g} - {ex[1]:.5g}" vstr2 = "{x:.5g} +- {ex[0]:.5g}" vstrs = [] if pt.dim >= 1: vstrs.append( (vstr2 if eq(*pt.xErrs) else vstr1).format(x=pt.x, ex=pt.xErrs) ) if pt.dim >= 2: vstrs.append( (vstr2 if eq(*pt.yErrs) else vstr1).format(x=pt.y, ex=pt.yErrs) ) if pt.dim >= 3: vstrs.append( (vstr2 if eq(*pt.zErrs) else vstr1).format(x=pt.z, ex=pt.zErrs) ) return "(" + ", ".join(vstrs) + ")" ## Compare each object pair for path in sorted(set(list(aodict1.keys()) + list(aodict2.keys()))): THISCLEAN = True while True: #< hack to allow early exits while still reporting path-specific failures ## Get the object in file #1 ao1 = aodict1.get(path, None) if ao1 is None: if not args.IGNORE_NEW: THISCLEAN = False log("Data object '%s' not found in %s" % (path, filenames[0])) break ## Get the object in file #2 ao2 = aodict2.get(path, None) if ao2 is None: if not args.IGNORE_MISSING: THISCLEAN = False log("Data object '%s' not found in %s" % (path, filenames[1])) break ## Compare the file #1 vs. #2 object types if type(ao1) is not type(ao2): THISCLEAN = False log("Data objects with path '%s' have different types (%s and %s) in %s and %s" % \ (path, str(type(ao1)), str(type(ao2)), filenames[0], filenames[1])) break ## Convert to scatter representations try: s1 = ao1.mkScatter() s2 = ao2.mkScatter() except Exception as e: print("WARNING! Could not create a '%s' scatter for comparison (%s)" % (path, type(e).__name__)) ## Check for compatible dimensionalities (should already be ok, but just making sure) - if s1.dim() != s2.dim(): + if s1.dim != s2.dim: THISCLEAN = False log("Data objects with path '%s' have different scatter dimensions (%d and %d) in %s and %s" % \ - (path, s1.dim(), s2.dim(), filenames[0], filenames[1])) + (path, s1.dim, s2.dim, filenames[0], filenames[1])) break ## Compare the numbers of points/bins - if s1.numPoints() != s2.numPoints(): + if s1.numPoints != s2.numPoints: THISCLEAN = False log("Data objects with path '%s' have different numbers of points (%d and %d) in %s and %s" % \ - (path, s1.numPoints(), s2.numPoints(), filenames[0], filenames[1])) + (path, s1.numPoints, s2.numPoints, filenames[0], filenames[1])) break ## Compare the numeric values of each point premsg = "Data points differ for data objects with path '%s' in %s and %s:\n" % (path, filenames[0], filenames[1]) msgs = [] - for i, (p1, p2) in enumerate(zip(s1.points(), s2.points())): + for i, (p1, p2) in enumerate(zip(s1.points, s2.points)): # TODO: do this more nicely when point.val(int) and point.err(int) are mapped into Python ok = True diffaxis = [] - if p1.dim() >= 1 and not (eq(p1.x(), p2.x()) and eq(p1.xErrs(), p2.xErrs())): + if p1.dim >= 1 and not (eq(p1.x, p2.x) and eq(p1.xErrs, p2.xErrs)): ok = False diffaxis.append('x') - if p1.dim() >= 2 and not (eq(p1.y(), p2.y()) and eq(p1.yErrs(), p2.yErrs())): + if p1.dim >= 2 and not (eq(p1.y, p2.y) and eq(p1.yErrs, p2.yErrs)): ok = False diffaxis.append('y') - if p1.dim() >= 3 and not (eq(p1.z(), p2.z()) and eq(p1.zErrs(), p2.zErrs())): + if p1.dim >= 3 and not (eq(p1.z, p2.z) and eq(p1.zErrs, p2.zErrs)): ok = False diffaxis.append('z') if not ok: msgs.append(" Point #%d (different %s): %s vs. %s" % (i, ", ".join(diffaxis), ptstr(p1), ptstr(p2))) if msgs: THISCLEAN = False log(premsg + "\n".join(msgs)) break ## Compare the annotations (not done by default) if args.ANNOTATIONS: - for annotation in set(ao1.annotations() + ao2.annotations()): + for annotation in set(ao1.annotations + ao2.annotations): if ao1.annotation(annotation) != ao2.annotation(annotation): THISCLEAN = False log("Data objects with path '%s' have different '%s' annotations ('%s' and '%s') in %s and %s" % (path, annotation, ao1.annotation(annotation), ao2.annotation(annotation), filenames[0], filenames[1])) ## Make sure to exit the hack while-loop if clean break ## List failing path in list mode if not THISCLEAN and args.LIST and not args.QUIET: print(path) ## Aggregate total cleanliness flag CLEAN &= THISCLEAN if not CLEAN: sys.exit(1) # sys.exit(0) diff --git a/bin/yodahist b/bin/yodahist --- a/bin/yodahist +++ b/bin/yodahist @@ -1,252 +1,252 @@ #! /usr/bin/env python """\ %(prog)s Make and fill a YODA histogram from plain text file/stream input. e.g. cat foo.dat | %(prog)s h1 10 0. 100. out foo.yoda cat foo2.dat | %(prog)s prof2 10 0. 100. 5 -10 10 show Command syntax: The first command must be the histogram type, chosen from the list hist1 hist2 prof1 prof2 or the corresponding abbreviations h1 h2 p1 p2. Each of these must be followed by a list of numbers defining bin edges: a 3-tuple of x3 = nxbins xlow xhigh for 1D histogram types and a 6 tuple of x3 y3 for 2D histogram types. To book with logarithmic binning, use the xlogbins,ylogbins with boolean arguments. Remaining commands all take a single argument. They allow specifying the histogram path: path /mypath the plot and axis titles: title 'Foo bar' xlabel '$p_T$ [GeV]' ylabel '$N$' using lin/log axis plotting measures: logx yes logy 0 general annotations (can be used multiple times): ann 'Foo=bar' and input output file/stdout: in - (default) out 'foo.yoda' show yes TODO: * Automatically treat '-' as a minus sign in cmds list (with argparse?) * Also allow explicit lists of bin edges as parseable strings on command line? * Default printout/write and auto-true vals for show, log*, etc. * How to determine bin range in advance?... must need two passes?? * Add plotting later: plot params nx lx ux palette linecolor linestyle legend ticks on this or yodaplot interface? * Multiple datasets / histos? How??? * Data column spec & using eval to do math manipulations """ from __future__ import print_function import yoda import sys, math, numbers import argparse parser = argparse.ArgumentParser(usage=__doc__) parser.add_argument("CMDS", nargs="+", help="list of histogram-specification commands") #parser.add_option('-o', '--output', default='-', dest='OUTPUT_FILE') args = parser.parse_args() class Binning: # TODO: Also allow explicit lists of bin edges as parseable strings def __init__(self, nbins, low, high, measure="LIN"): try: self.nbins = int(nbins) self.low = float(low) self.high = float(high) self.measure = str(measure) except: raise Exception("Couldn't construct a binning from arguments: " + ", ".join([str(nbins), str(low), str(high)]) + " and " +str(measure)) def binedges(self): if self.nbins <= 0: raise Exception("Your histogram must have at least one bin!") if self.measure == "LIN": return yoda.linspace(self.nbins, self.low, self.high) elif self.measure == "LOG": if self.low <= 0 or self.high <= 0: raise Exception("Can't have a zero or negative logarithmic bin distribution") return yoda.logspace(self.nbins, self.low, self.high) else: raise Exception("Unknown histogram bin measure: " + self.measure) @classmethod def checkargs(cls, args): """Check that there are enough args in a sequence to be passed to the Binning constructor and that the types of the first three are suitable.""" if len(args) < 3: return False try: n = int(args[0]) if n < 1: return False l = float(args[1]) h = float(args[2]) except: return False return True def error(msg, rtncode=1): "A convenient way to exit with a standard error message format" sys.stderr.write("ERROR: " + msg + "\n") sys.exit(rtncode) ## Copy the args: we're going to modify them tmpargs = list(args.CMDS) ## First arg must be the run mode, so we detect and normalize that first MODE = tmpargs[0].lower() if MODE in ["h", "h1", "hist", "hist1"]: MODE = "hist1" elif MODE in ["p", "p1", "prof", "prof1"]: MODE = "prof1" elif MODE in ["h2", "hist2"]: MODE = "hist2" elif MODE in ["p2", "prof2"]: MODE = "prof2" elif MODE in ["s", "s2", "scat", "scat2"]: MODE = "scat2" else: raise Exception("Unknown histogramming mode: " + MODE) ## Now process binning instructions # TODO: Also allow explicit lists of bin edges as parseable strings del tmpargs[0] XBINNING = None YBINNING = None if MODE in ["hist1", "prof1"]: if not Binning.checkargs(tmpargs): error("1D histograms need 3 numeric binning arguments: nbins, lowedge, highedge") XBINNING = Binning(*tmpargs[:3]) del tmpargs[:3] elif MODE in ["hist2", "prof2"]: if len(tmpargs) < 6 or not Binning.checkargs(tmpargs) or not Binning.checkargs(tmpargs[3:]): error("2D histograms need 2 x 3 numeric binning arguments: nbins, lowedge, highedge for each of the x and y directions in turn") XBINNING = Binning(*tmpargs[:3]) del tmpargs[:3] YBINNING = Binning(*tmpargs[:3]) del tmpargs[:3] elif MODE in ["scat2"]: pass ## Break remaining args into cmds, as a dict[cmd] -> [cmdargs] cmds = {} while tmpargs: cmd = tmpargs[0].lower() try: if cmd == "ann": cmds.setdefault("ann", []).append(tmpargs[1]) else: cmds[cmd] = tmpargs[1] except: sys.stderr.write("Value missing for command '%s'\n" % cmd) del tmpargs[:2] # TODO: For now all commands take single-value arguments... maybe this will always be the case? # TODO: We avoid enforcing specific allowed commands for now. # ## Single-arg commands # if cmd in ["path", "title", "xlabel", "ylabel", "logx", "logy", # "xlogbins", "ylogbins", # "show", "in", "out"]: # cmds[cmd] = tmpargs[1] # del tmpargs[:2] # else: # error("unknown command '%s'\n" % cmd) # print(cmds) ## Apply log binning measure(s) if needed if XBINNING: XBINNING.measure = "LOG" if yoda.util.as_bool(cmds.get("xlogbins", False)) else "LIN" if YBINNING: YBINNING.measure = "LOG" if yoda.util.as_bool(cmds.get("ylogbins", False)) else "LIN" ## Make the histo object h = None if MODE == "hist1": h = yoda.Histo1D(XBINNING.binedges()) elif MODE == "prof1": h = yoda.Profile1D(XBINNING.binedges()) elif MODE == "hist2": h = yoda.Histo2D(XBINNING.binedges(), YBINNING.binedges()) elif MODE == "prof2": h = yoda.Profile2D(XBINNING.binedges(), YBINNING.binedges()) elif MODE == "scat2": h = yoda.Scatter2D() else: raise Exception("Unknown histogramming mode: " + MODE) ## Set more annotations, etc. -h.setPath(cmds.get("path", "/hist1")) +h.path = cmds.get("path", "/hist1") if "title" in cmds: - h.setAnnotation("Title", cmds.get("title")) + h.title = cmds.get("title") if "xlabel" in cmds: h.setAnnotation("XLabel", cmds.get("xlabel")) if "ylabel" in cmds: h.setAnnotation("YLabel", cmds.get("ylabel")) if "logx" in cmds: h.setAnnotation("LogX", int(yoda.util.as_bool(cmds.get("logx"))) ) if "logy" in cmds: h.setAnnotation("LogY", int(yoda.util.as_bool(cmds.get("logy"))) ) if "ann" in cmds: for kv in cmds.get("ann"): try: k, v = kv.split("=", 1) h.setAnnotation(k, v) except: print("Couldn't set annotation from arg '%s'" % kv) ## Read the input and fill the histo INPUT = cmds.get("in", "-") import fileinput for line in fileinput.input(INPUT): if not line.strip(): continue vals = [float(x) for x in line.strip().split()] if MODE == "scat2": # TODO: Multiple errors and asymm errors h.addPoint(*vals) else: if MODE == "hist1": assert len(vals) in [1,2] elif MODE in ["prof1", "hist2"]: assert len(vals) in [2,3] elif MODE == "prof2": assert len(vals) in [3,4] h.fill(*vals) ## Show the histogram on the terminal if yoda.util.as_bool(cmds.get("show", False)): yoda.writeFLAT([h], "-") ## Write output to the chosen output file (including - for stdout) OUTPUT = cmds.get("out", "hist.yoda") if OUTPUT == "-": yoda.writeYODA([h], OUTPUT) else: yoda.write([h], OUTPUT) diff --git a/bin/yodals b/bin/yodals --- a/bin/yodals +++ b/bin/yodals @@ -1,54 +1,54 @@ #! /usr/bin/env python """\ %(prog)s [ ...] List the contents of YODA-readable data files (sorted by path name). """ from __future__ import print_function import yoda, sys, argparse from yoda.script_helpers import filter_aos parser = argparse.ArgumentParser(usage=__doc__) parser.add_argument("ARGS", nargs="+", help="infile [outfile]") parser.add_argument('-v', '--verbose', action="store_const", const=2, default=1, dest='VERBOSITY', help="print extra histogram details") parser.add_argument('-q', '--quiet', action="store_const", const=0, default=1, dest='VERBOSITY', help="just print histogram details, no cosmetic filenames or blank lines") parser.add_argument("-m", "--match", dest="MATCH", metavar="PATT", default=None, help="only write out histograms whose path matches this regex") parser.add_argument("-M", "--unmatch", dest="UNMATCH", metavar="PATT", default=None, help="exclude histograms whose path matches this regex") args = parser.parse_args() filenames = args.ARGS if not filenames: print("ERROR! Please supply at least one data file for listing") sys.exit(1) try: import natsort ysorted = natsort.natsorted except: ysorted = sorted for i, f in enumerate(filenames): if args.VERBOSITY >= 1: if i > 0: print() print("Data objects in %s:" % f) aodict = yoda.read(f) filter_aos(aodict, args.MATCH, args.UNMATCH) for p, ao in ysorted(aodict.items()): extrainfo = "" if args.VERBOSITY >= 2: if hasattr(ao, "numEntries"): extrainfo += " N={sumw:.3g}".format(sumw=ao.numEntries()) if hasattr(ao, "sumW"): extrainfo += " sumW={sumw:.3g}".format(sumw=ao.sumW()) try: nobjstr = "{n:4d}".format(n=len(ao)) except: nobjstr = " -" - print("{path:<50} {type:<10} {nobjs} bins/pts".format(path=p, type=ao.type(), nobjs=nobjstr) + extrainfo) + print("{path:<50} {type:<10} {nobjs} bins/pts".format(path=p, type=ao.type, nobjs=nobjstr) + extrainfo) diff --git a/bin/yodamerge b/bin/yodamerge --- a/bin/yodamerge +++ b/bin/yodamerge @@ -1,298 +1,298 @@ #! /usr/bin/env python """\ %(prog)s [-o outfile] [:] [:] ... e.g. %(prog)s run1.yoda run2.yoda run3.yoda (unweighted merging of three runs) or %(prog)s run1.yoda:2.0 run2.yoda:3.142 (weighted merging of two runs) Merge analysis objects from multiple YODA files, combining the statistics of objects whose names are found in multiple files. May be used either to merge disjoint collections of data objects, or to combine multiple statistically independent runs of the same data objects into one high-statistics run. Optional scaling parameters may be given to rescale the weights of the objects on a per-file basis before merging. By default the output is written to stdout since we can't guess what would be a good automatic filename choice! Use the -o option to provide an output filename. IMPORTANT! This script is not meant to handle all run merging situations or data objects: there are limitations to what can be inferred from data objects alone. If you need to do something more complex than the common cases handled by this script, please write your own script / program to load and process the data objects. SCATTERS (E.G. HISTOGRAM RATIOS) CAN'T BE MERGED Note that 'scatter' data objects, as opposed to histograms, cannot be merged by this tool since they do not preserve sufficient statistical information. The canonical example of this is a ratio plot: there are infinitely many combinations of numerator and denominator which could give the same ratio, and the result does not indicate anything about which of those infinite inputs is right (or the effects of correlations in the division). If you need to merge Scatter2D objects, you can write your own Python script or C++ program using the YODA interface, and apply whatever case-specific treatment is appropriate. By default the first such copy encountered will be returned as the 'merged' output, with no actual merging having been done. NORMALIZED, UNNORMALIZED, OR A MIX? An important detail in histogram merging is whether a statistical treatment for normalized or unnormalized histograms should be used: in the former case the normalization scaling must be undone *before* the histograms are added together, and then re-applied afterwards. This script examines the ScaledBy attribute each histograms to determine if it has been normalized. We make the assumption that if ScaledBy exists (i.e. h.scaleW has been called) then the histogram is normalized and we normalize the resulting merged histogram to the weighted average of input norms; if there is no ScaledBy, we assume that the histogram is not normalised. This is not an infallible approach, but we believe is more robust than heuristics to determine whether norms are sufficiently close to be considered equal. In complicated situations you will again be better off writing your own script or program to do the merging: the merging machinery of this script is available directly in the yoda Python module. See the source of this script (e.g. use 'less `which %(prog)s`) for more discussion. """ # MORE NOTES # # If all the input histograms with a particular path are found to have the same # normalization, and they have ScaledBy attributes indicating that a histogram # weight scaling has been applied in producing the input histograms, each # histogram in that group will be first unscaled by their appropriate factor, then # merged, and then re-normalized to the target value. Otherwise the weights from # each histogram copy will be directly added together with no attempt to guess an # appropriate normalization. The normalization guesses (and they are guesses -- # see below) are made *before* application of the per-file scaling arguments. # # IMPORTANT: note from the above that this script can't work out what to do # re. scaling and normalization of output histograms from the input data files # alone. It may be possible (although unlikely) that input histograms have the # same normalization but are meant to be added directly. It may also be the case # (and much more likely) that histograms which should be normalized to a common # value will not trigger the appropriate treatment due to e.g. statistical # fluctuations in each run's calculation of a cross-section used in the # normalization. And anything more complex than a global scaling (e.g. calculation # of a ratio or asymmetry) cannot be handled at all with a post-hoc scaling # treatment. The --assume-normalized command line option will force all histograms # to be treated as if they are normalized in the input, which can be useful if # you know that all the output histograms are indeed of this nature. If they are # not, it will go wrong: you have been warned! # # Please use this script as a template if you need to do something more specific. # # NOTE: there are many possible desired behaviours when merging runs, depending on # the factors above as well as whether the files being merged are of homogeneous # type, heterogeneous type, or a combination of both. It is tempting, therefore, # to add a large number of optional command-line parameters to this script, to # handle these cases. Experience from Rivet 1.x suggests that this is a bad idea: # if a problem is of programmatic complexity then a command-line interface which # attempts to solve it in general is doomed to both failure and unusability. Hence # we will NOT add extra arguments for applying different merging weights or # strategies based on analysis object path regexes, auto-identifying 'types' of # run, etc., etc.: if you need to merge data files in such complex ways, please # use this script as a template around which to write logic that satisfies your # particular requirements. import yoda, argparse, sys, math parser = argparse.ArgumentParser(usage=__doc__) parser.add_argument("INFILES", nargs="+", help="datafile1 datafile2 [...]") parser.add_argument("-o", "--output", default="-", dest="OUTPUT_FILE", metavar="PATH", help="write output to specified path") parser.add_argument("--s1d-mode", "--s1dmode", default="assume_mean", dest="S1D_MODE", metavar="MODE", help="choose strategy for combining Scatter1D objects: one of 'first', 'combine', 'assume_mean', 'add'") parser.add_argument("--s2d-mode", "--s2dmode", default="assume_mean", dest="S2D_MODE", metavar="MODE", help="choose strategy for combining Scatter2D objects: one of 'first', 'combine', 'assume_mean', 'add'") parser.add_argument("--s3d-mode", "--s3dmode", default="assume_mean", dest="S3D_MODE", metavar="MODE", help="choose strategy for combining Scatter3D objects: one of 'first', 'combine', 'assume_mean', 'add'") parser.add_argument("--type-mismatch-mode", default="scatter", dest="TYPE_MISMATCH_MODE", metavar="MODE", help="choose strategy for combining objects whose types mismatch: one of 'first', 'scatter'") parser.add_argument("--add", "--stack", action="store_true", default=False, dest="STACK", help="force simple stacking (also forces all scatter modes to 'add')") parser.add_argument("--no-veto-empty", action="store_false", default=True, dest="VETO_EMPTY", help="disable the removal of empty (sumW=0) data objects _before_ applying merge heuristics. You probably want the default!") parser.add_argument("--assume-normalized", action="store_true", default=False, dest="ASSUME_NORMALIZED", help="DEPRECATED, AND DOES NOTHING. This option _used_ to bypass the detection heuristic for unnormalized histograms") args = parser.parse_args() ## Include scatters in "add" mode if args.STACK: args.S1D_MODE = "add" args.S2D_MODE = "add" args.S3D_MODE = "add" ## Put the incoming objects into a dict from each path to a list of histos and scalings analysisobjects_in = {} for fa in args.INFILES: filename, scale = fa, 1.0 if ":" in fa: try: filename, scale = fa.rsplit(":", 1) scale = float(scale) except: sys.stderr.write("Error processing arg '%s' with file:scale format\n" % fa) aos = yoda.read(filename) for aopath, ao in aos.items(): ao.setAnnotation("yodamerge_scale", scale) analysisobjects_in.setdefault(aopath, []).append(ao) analysisobjects_out = {} for p, aos in analysisobjects_in.items(): ## Identify the canonical aotype being handled from the type of the first entry in aos aotype = type(aos[0]) ## Check that types match, and just output the first one if they don't if not all(type(ao) is aotype for ao in aos): msg = "WARNING: cannot merge mismatched analysis object types for path %s: " % p scatter_fail = False if args.TYPE_MISMATCH_MODE == "scatter": saos = [] for ao in aos: sao = ao.mkScatter() sao.setAnnotation("yodamerge_scale", ao.annotation("yodamerge_scale")) saos.append(sao) saotype = type(saos[0]) msg += "converting to %ss" % saotype.__name__ if all(type(sao) is saotype for sao in saos): sys.stderr.write(msg + "\n") aos = saos aotype = saotype else: msg += "... failed, " scatter_fail = True if args.TYPE_MISMATCH_MODE == "first" or scatter_fail: sys.stderr.write(msg + "returning first object\n") analysisobjects_out[p] = aos[0] continue ## Remove empty fillable data objects, to avoid gotchas where e.g. histos are normalised and hence ## ScaledBy should be set... but isn't because the emptiness blocked rescaling to finite area if args.VETO_EMPTY: # TODO: Add a Fillable interface/ABC and use that for the type matching if aotype in (yoda.Counter, yoda.Histo1D, yoda.Histo2D, yoda.Profile1D, yoda.Profile2D): aos_nonzero = [ao for ao in aos if ao.sumW() != 0] #< possible that this doesn't mean no fills :-/ ## Just output the first histo if they are all empty if not aos_nonzero: analysisobjects_out[p] = aos[0] continue ## Reset aos to only contain non-empty ones aos = aos_nonzero ## Counter, Histo and Profile (i.e. Fillable) merging # TODO: Add a Fillable interface/ABC and use that for the type matching if aotype in (yoda.Counter, yoda.Histo1D, yoda.Histo2D, yoda.Profile1D, yoda.Profile2D): ## Identify a target rescaling factor from the 1/scalefactor-weighted norms of each run rescale = None if len(aos) > 1 and args.STACK: pass # we're in dumb stacking mode - elif all("ScaledBy" in ao.annotations() for ao in aos): + elif all("ScaledBy" in ao.annotations for ao in aos): try: rescale = 1.0 / sum(float(ao.annotation("yodamerge_scale"))/float(ao.annotation("ScaledBy")) for ao in aos) except ZeroDivisionError: sys.stderr.write("WARNING: Abandoning normalized merge of path %s because ScaledBy attributes are zero.\n" % p) - elif all("ScaledBy" not in ao.annotations() for ao in aos): + elif all("ScaledBy" not in ao.annotations for ao in aos): pass else: sys.stderr.write("WARNING: Abandoning normalized merge of path %s because some but not all inputs have ScaledBy attributes\n" % p) ## Now that the normalization-identifying heuristic is done, apply user scalings and undo the normalization scaling if appropriate for ao in aos: if rescale: ao.scaleW( 1.0/float(ao.annotation("ScaledBy")) ) ao.scaleW( float(ao.annotation("yodamerge_scale")) ) ## Make a copy of the (scaled & unnormalized) first object as the basis for the output ## and merge for histograms (including weights, normalization, and user scaling) ao_out = aos[0].clone() ao_out.rmAnnotation("yodamerge_scale") for ao in aos[1:]: ao_out += ao if rescale: ao_out.scaleW(rescale) ## Merge for Scatters, assuming equal run sizes, and applying user scaling else: ## Make a copy of the first object as the basis for merging (suitable for all Scatter types) ao_out = aos[0].clone() ao_out.rmAnnotation("yodamerge_scale") ## If there's only one object, there's no need to do any combining if len(aos) == 1: pass elif aotype in (yoda.Scatter1D,yoda.Scatter2D,yoda.Scatter3D): ## Retrieve dimensionality of the Scatter*D object dim = ao_out.dim SND_MODE = getattr(args, "S%dD_MODE" % dim) axis = ['','x','y','z'] ## Use asymptotic mean+stderr convergence statistics if SND_MODE in ("assume_mean", "add"): msg = "WARNING: Scatter%dD %s merge assumes asymptotic statistics and equal run sizes" % (dim, p) if any(float(ao.annotation("yodamerge_scale")) != 1.0 for ao in aos): msg += " (+ user scaling)" sys.stderr.write(msg + "\n") - npoints = len(ao_out.points()) + npoints = len(ao_out.points) for i in range(npoints): val_i = scalesum = 0.0 ep_i = {} # will hold the values of the multiple error sources em_i = {} # will hold the values of the multiple error sources for ao in aos: scale = float(ao.annotation("yodamerge_scale")) variations = ao.variations() scalesum += scale - val_i += scale * ao.point(i).val(dim) + val_i += scale * ao.points[i].val(dim) for var in variations: if not var in ep_i.keys(): ep_i[var] = 0. em_i[var] = 0. - ep_i[var] += (scale * ao.point(i).errs(dim,var)[0])**2 - em_i[var] += (scale * ao.point(i).errs(dim,var)[1])**2 + ep_i[var] += (scale * ao.points[i].errs(dim,var)[0])**2 + em_i[var] += (scale * ao.points[i].errs(dim,var)[1])**2 for var in ep_i.keys(): ep_i[var] = math.sqrt(ep_i[var]) em_i[var] = math.sqrt(em_i[var]) if SND_MODE == "assume_mean": val_i /= scalesum for var in ep_i.keys(): ep_i[var] /= scalesum em_i[var] /= scalesum - setattr(ao_out.point(i),'%s' % axis[dim], val_i) + setattr(ao_out.points[i],'%s' % axis[dim], val_i) for var in ep_i.keys(): - #setattr(ao_out.point(i),'set%sErrs' % axis[dim].upper(), ((ep_i[var], em_i[var]),var)) - ao_out.point(i).setErrs(dim , (ep_i[var], em_i[var]),var) + #setattr(ao_out.points[i],'set%sErrs' % axis[dim].upper(), ((ep_i[var], em_i[var]),var)) + ao_out.points[i].setErrs(dim , (ep_i[var], em_i[var]),var) ## Add more points to the output scatter elif SND_MODE == "combine": for ao in aos[1:]: ao_out.combineWith(ao) ## Just return the first AO unmodified & unmerged elif SND_MODE == "first": pass else: raise Exception("Unknown Scatter%dD merging mode:" % dim + args.SND_MODE) ## Other data types (just warn, and write out the first object) else: sys.stderr.write("WARNING: Analysis object %s of type %s cannot be merged\n" % (p, str(aotype))) ## Put the output AO into the output dict analysisobjects_out[p] = ao_out ## Write output yoda.writeYODA(analysisobjects_out, args.OUTPUT_FILE) diff --git a/bin/yodascale b/bin/yodascale --- a/bin/yodascale +++ b/bin/yodascale @@ -1,308 +1,308 @@ #! /usr/bin/env python """\ %(prog)s [ ...] -r [-r ...] Rescale YODA histograms by constant factors or to match (partial) normalizations of histos in a reference file. Examples: %(prog)s run1.yoda run2.yoda -r refdir/ -r reffile.yoda Rescales histograms in run1 and run2 to their namesakes in the given ref files (including all .yoda found in refdir). Writes out to run{1,2}-scaled.yoda %(prog)s -c '.* 0.123x' foo.yoda Rescales all histograms in foo.yoda by the factor 0.123. This latter example is a demonstration of the general scaling specification syntax, many of which can be either given with -c/--spec command-line options, or can be placed in a file read by the -f/--specfile options. The scaling specification syntax is a histogram path pattern followed by a definition of the scaling to be done on that histogram (or histograms). The path pattern consists of a regular expression, optionally followed by a range specifier. The best explanation is probably a few examples: /path/to/hist (match all bins in that particular histogram) /.*pt (match all bins in histograms ending in 'pt') /myhist@5 (match bins with high edge >= 5 in /myhist) /myhist@@10 (match bins with low edge <= 10 in /myhist) /myhist@3@10 (match bins with edges within 3-10 in /myhist) /myhist#3:10 (match bins with index 3-10 inclusive in /myhist) /myhist#5 (match bins with index >= 5 in /myhist) /myhist##10 (match bins with index <= 10 in /myhist) Mixing of bin index and bin position range locators is not currently allowed. The scaling definition which follows this path pattern can also take several forms. Again, examples: 10 (scale the matching histos/bin ranges to normalize to 10) 2x (scale the matching histos by a factor of 2) 2xREF (scale the matching histos/ranges to 2x the normalization of their reference namesake) 3.14x/some/other/path (rescale to the normalization of a different ref histo) This scheme is fairly complex and may evolve slightly as we try to make the syntax as natural as possible, particularly for the simplest & most common cases, while retaining the general power evident from the examples above. Please report bugs and wishes to the YODA authors at yoda@projects.hepforge.org TODO: * x scaling of only the given range? * check that ref norm scaling respects the range limits * check that ranges can also be given on the RHS path * y-scaling of profile histograms (requires scaleY & scaleZ etc. methods) * add overflow inclusion in normalization for binned types """ from __future__ import print_function ## Parse command line args import argparse parser = argparse.ArgumentParser(usage=__doc__) # TODO: how to look up ref histos? parser.add_argument("INFILES", nargs="+", help="file or folder with reference histos") parser.add_argument("-r", "--ref", dest="REFS", action="append", default=[], help="file or folder with reference histos") parser.add_argument("--ref-prefix", dest="REF_PREFIX", metavar="NAME", default="REF", help="treat /NAME/foo as a reference plot for /foo, and don't rescale /NAME histos") parser.add_argument("-c", "--spec", dest="SPECS", metavar="SPECSTR", action="append", default=[], help="provide a single scaling specification on the command line. Multiple -c options " + "may be given. Specs will be _appended_ to any read from a file with -f/--specfile") parser.add_argument("-f", "--specfile", dest="SPECFILE", metavar="FILE", default=None, help="specify a file with histogram path patterns (and bin ranges) that are to be normalised") parser.add_argument("-i", "--in-place", dest="IN_PLACE", default=False, action="store_true", help="overwrite input file(s) rather than making -scaled.yoda") parser.add_argument("-q", "--quiet", dest="VERBOSITY", action="store_const", const=0, default=1, help="reduce printouts to errors-only") parser.add_argument("-v", "--debug", dest="VERBOSITY", action="store_const", const=2, default=1, help="increase printouts to include debug info") args = parser.parse_args() ## Define parser for scale specification strings from yoda.search import PointMatcher def parse_specstr(line): ## Strip comments line = " " + line if " #" in line: line = line[:line.index(" #")] ## Split whitespace-separated target and ref parts parts = line.strip().split() pathpatt, scalespec = parts[0], " ".join(parts[1:]) ## Match and extract the spec command structure import re re_scalespec = re.compile(r"([\d\.eE\+\-]+x?|UNSCALE)?(.*)") m = re_scalespec.match(scalespec) if not m: raise Exception("Invalid scaling spec string: '%s'" % scalespec) scalearg, refpatt = m.groups() scaleop = "=" if not scalearg: scalearg = "1x" if scalearg.endswith("x"): scaleop = "x" scalearg = float(scalearg[:-1]) elif scalearg == "UNSCALE": scaleop = "x" rtn = (PointMatcher(pathpatt), PointMatcher(refpatt), scaleop, scalearg) #print(rtn) return rtn ## Parse spec file and command-line specs SPECS = [] if args.SPECFILE: with open(args.SPECFILE, "r") as f: for line in f: line = line.strip() if line.startswith("#"): continue SPECS.append( parse_specstr(line) ) SPECS += [parse_specstr(s) for s in args.SPECS] ## Read reference histograms import yoda, os, glob reffiles = [] for r in args.REFS: if os.path.isdir(r): reffiles += glob.glob( os.path.join(r, "*.yoda") ) #< TODO: Add a yoda.util function for finding files that it can read elif r.endswith(".yoda"): reffiles.append(r) aos_ref = {} for r in reffiles: aos = yoda.read(r) for aopath, ao in aos.items(): ## Use /REF_PREFIX/foo as a ref plot for /foo, if the ref prefix is set if args.REF_PREFIX and aopath.startswith("/%s/" % args.REF_PREFIX): aopath = aopath.replace("/%s/" % args.REF_PREFIX, "/", 1) # NB. ao.path is unchanged aos_ref[aopath] = ao if args.VERBOSITY > 1: print("DEBUG: %d reference histos" % len(aos_ref)) ## Loop over input files for rescaling for infile in args.INFILES: aos_out = {} aos_in = yoda.read(infile) for aopath, ao in sorted(aos_in.items()): ## Default is to write out the unrescaled AO if no match is found aos_out[aopath] = ao ## Don't rescale /REF_PREFIX objects if aopath.startswith("/%s/" % args.REF_PREFIX): if args.VERBOSITY > 1: print("DEBUG: not rescaling ref object '%s'" % aopath) continue ## Match specs to MC AO path aospecs = [] for s in SPECS: if s[0].search_path(aopath): aoref, (matcher, mode, factor) = None, s[1:] # TODO: s[1] -> matcher? ## Match to ref AOs if any were given if s[1].path is not None: refpaths = [] if s[1].path.pattern == "REF": if aopath in aos_ref: refpaths = [aopath] else: refpaths = [p for p in list(aos_ref.keys()) if (s[1] and s[1].search_path(p))] if args.VERBOSITY > 1: print("DEBUG:", aopath, "=>", refpaths) if not refpaths: print("ERROR: No reference histogram '%s' found for '%s'. Not rescaling" % (s[1].path.pattern, aopath)) # TODO: should we actually skip it from the output entirely in this case? continue if len(refpaths) > 1: if args.VERBOSITY > 0: print("WARNING: Multiple reference histograms found for '%s'. Using first: '%s'" % (aopath, refpaths[0])) aoref = aos_ref[refpaths[0]] aospecs.append( [aoref, s[1], mode, factor] ) ## Identify and check spec to be used for scaling determination if not aospecs: aoref, matcher, mode, factor = None, None, "x", 1.0 if args.VERBOSITY > 1: print("DEBUG: No scaling spec found for '%s'. Output is unscaled" % aopath) else: aoref, matcher, mode, factor = aospecs[0] if args.VERBOSITY > 0: print("WARNING: Multiple scaling specs found for '%s'. Using first: '%r'" % (aopath, aospecs[0])) ## Work out scalefactor if aoref is None and mode == "x": if factor != "UNSCALE": sf = factor elif ao.hasAnnotation("ScaledBy"): sf = 1/float(ao.annotation("ScaledBy")) else: sf = 1.0 else: # TODO: check binning compatibility (between types... hmm, check via equiv scatters?) # TODO: need a function on Scatter for this? Or would break symmetry? (including 3D scatters) ## Convert types to scatters # TODO: depends on mode and types. Histo/Profile have outflows, Scatters do not # TODO: provide a mode to only do rescales on Histos, not Profiles and Scatters? # TODO: ranges (index or val) from spec file. Need syntax for 2D ranges # TODO: assume full integral for now, but ignore overflows # TODO: check that the Scatter type attribute matches the non-Scatter type ## Note that we are assuming that it is the Scatter y (or z) axis that is to be rescaled s = ao.mkScatter() sref = None if aoref: sref = aoref.mkScatter() if sref and type(s) is not type(sref): if args.VERBOSITY > 0: print("WARNING: Type mismatch between Scatter reps of '%s'. Are input and ref histos of same dimension?" % aopath) continue def matchpoint(i, p): class Pt(object): def __init__(self, n, xmin, xmax): self.n = n self.xmin = xmin self.xmax = xmax pt = Pt(i, p.xMin, p.xMax) result = matcher.match_pos(pt) #print(result) return result ## Work out normalisations # TODO: Are these at all appropriate (with width factor?) for profiles? And ratios? Need an "ignore width" mode flag? if type(s) is yoda.Scatter2D: ## NOTE: sum(errs) only works if there's only one +- pair # TODO: only loop over specified bins/points - assert(all(len(p.xErrs()) == 2 for p in s.points())) - norm = sum(p.y() * sum(p.xErrs()) for i, p in enumerate(s.points()) if matchpoint(i, p)) + assert(all(len(p.xErrs) == 2 for p in s.points)) + norm = sum(p.y * sum(p.xErrs) for i, p in enumerate(s.points) if matchpoint(i, p)) if norm == 0: print("ERROR: Normalisation of given range is 0; cannot rescale this to a finite value. Result will be unscaled") sf = 1.0 elif mode == "=": sf = float(factor)/norm elif mode == "x": - assert(all(len(p.xErrs()) == 2 for p in sref.points())) - refnorm = sum(p.y * sum(p.xErrs()) for i, p in enumerate(sref.points()) if matchpoint(i, p)) + assert(all(len(p.xErrs) == 2 for p in sref.points)) + refnorm = sum(p.y * sum(p.xErrs) for i, p in enumerate(sref.points) if matchpoint(i, p)) sf = factor*refnorm/norm # elif type(s) is yoda.Scatter3D: ## NOTE: sum(errs) only works if there's only one +- pair # TODO: only loop over specified bins/points if args.VERBOSITY > 0: print("WARNING: Point/bin range restrictions are not yet implemented for 2D scatter / 3D histo types") - assert(all(len(p.xErrs()) == 2 and len(p.yErrs()) == 2) for p in s.points()) - norm = sum(p.z() * sum(p.xErrs()) * sum(p.yErrs()) for p in s.points()) + assert(all(len(p.xErrs) == 2 and len(p.yErrs) == 2) for p in s.points) + norm = sum(p.z * sum(p.xErrs) * sum(p.yErrs) for p in s.points) if norm == 0: print("ERROR: Normalisation of given range is 0; cannot rescale this to a finite value. Result will be unscaled") sf = 1.0 elif mode == "=": sf = factor/norm elif mode == "x": - assert(all(len(p.xErrs()) == 2 and len(p.yErrs()) == 2) for p in sref.points()) - refnorm = sum(p.z * sum(p.xErrs()) * sum(p.yErrs()) for p in sref.points()) + assert(all(len(p.xErrs) == 2 and len(p.yErrs) == 2) for p in sref.points) + refnorm = sum(p.z * sum(p.xErrs) * sum(p.yErrs) for p in sref.points) sf = factor*refnorm/norm ## Rescale if args.VERBOSITY > 1: print("DEBUG: '%s' rescaled by factor %.3g" % (aopath, sf)) if type(ao) in (yoda.Histo1D, yoda.Histo2D): ao.scaleW(sf) elif type(ao) is yoda.Profile1D: # TODO: should this scale y or sumW? A: sumW... only relevant for merging & "norm" is wrong concept print("WARNING:", ao.path, "no scaling applied to Profile1D") elif type(ao) is yoda.Profile2D: # TODO: should this scale z or sumW? A: sumW... only relevant for merging & "norm" is wrong concept print("WARNING:", ao.path, "no scaling applied to Profile2D") elif type(ao) is yoda.Scatter2D: ao.scale(1, sf) elif type(ao) is yoda.Scatter3D: ao.scale(1, 1, sf) ## Store rescaled result # TODO: should apply scaling to original type or to the scatter? aos_out[aopath] = ao ## Write out rescaled file, possibly in-place (i.e. replace input -- not the default behaviour!) if not args.IN_PLACE: infileparts = os.path.splitext( os.path.basename(infile) ) outfile = infileparts[0] + "-scaled.yoda" else: outfile = infile # yoda.write(sorted(aos_out.values()), outfile) diff --git a/configure.ac b/configure.ac --- a/configure.ac +++ b/configure.ac @@ -1,204 +1,204 @@ ## Process this file with autoconf to produce a configure script. AC_PREREQ(2.59) -AC_INIT([YODA],[1.8.0],[yoda@projects.hepforge.org],[YODA]) +AC_INIT([YODA],[1.7.5],[yoda@projects.hepforge.org],[YODA]) ## Check and block installation into the src/build dir if test "$prefix" = "$PWD"; then AC_MSG_ERROR([Installation into the build directory is not supported: use a different --prefix argument]) fi ## Force default prefix to have a path value rather than NONE if test "$prefix" = "NONE"; then prefix=/usr/local fi AC_CONFIG_SRCDIR([src/Counter.cc]) AM_INIT_AUTOMAKE([subdir-objects -Wall dist-bzip2 1.10]) m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])]) m4_ifdef([AM_PROG_AR], [AM_PROG_AR]) AC_CONFIG_MACRO_DIR([m4]) AC_CONFIG_HEADERS([include/YODA/Config/DummyConfig.h include/YODA/Config/YodaConfig.h include/YODA/Config/BuildConfig.h]) AC_DEFINE_UNQUOTED(YODA_VERSION, "$PACKAGE_VERSION", "YODA version string") AC_DEFINE_UNQUOTED(YODA_NAME, "$PACKAGE_NAME", "YODA name string") AC_DEFINE_UNQUOTED(YODA_STRING, "$PACKAGE_STRING", "YODA name and version string") AC_DEFINE_UNQUOTED(YODA_TARNAME, "$PACKAGE_TARNAME", "YODA short name string") AC_DEFINE_UNQUOTED(YODA_BUGREPORT, "$PACKAGE_BUGREPORT", "YODA contact email address") ## OS X AC_CEDAR_OSX ## Set default compiler flags if test "x$CXXFLAGS" = "x"; then CXXFLAGS="-O3"; fi ## Compiler setup AC_LANG(C++) AC_PROG_CXX AX_CXX_COMPILE_STDCXX([11], [noext], [mandatory]) AC_PROG_INSTALL AC_PROG_LN_S AC_DISABLE_STATIC AC_LIBTOOL_DLOPEN AC_PROG_LIBTOOL ## Work out library suffix for the build LIB_SUFFIX=\\\"$shrext_cmds\\\" AC_SUBST([LIB_SUFFIX]) ## Set default build flags AC_CEDAR_CHECKCXXFLAG([-pedantic], [AM_CXXFLAGS="$AM_CXXFLAGS -pedantic"]) AC_CEDAR_CHECKCXXFLAG([-Wall], [AM_CXXFLAGS="$AM_CXXFLAGS -Wall -Wno-format"]) dnl AC_CEDAR_CHECKCXXFLAG([-std=c++98], [AM_CXXFLAGS="$AM_CXXFLAGS -std=c++98"]) dnl AC_CEDAR_CHECKCXXFLAG([-Wno-unused-variable], [AM_CXXFLAGS="$AM_CXXFLAGS -Wno-unused-variable"]) ## Debug flag (default=none) AC_ARG_ENABLE([debug], [AC_HELP_STRING(--enable-debug, [build with debugging symbols @<:@default=no@:>@])], [], [enable_debug=no]) if test x$enable_debug = xyes; then [AM_CXXFLAGS="$AM_CXXFLAGS -g"] fi ## Optional zlib support for gzip-compressed data streams/files AX_CHECK_ZLIB ## Optional ROOT compatibility AC_ARG_ENABLE([root], [AC_HELP_STRING(--disable-root, [don't try to build YODA interface to PyROOT (needs root-config) @<:@default=yes@:>@])], [], [enable_root=yes]) if test "x$enable_root" = "xyes"; then AC_PATH_PROG(ROOTCONFIG, [root-config]) if test "x$ROOTCONFIG" = "x"; then AC_MSG_WARN([root-config not found -- not building extra ROOT compatibility tools]) enable_root=no; else AC_MSG_CHECKING([ROOT version]) ROOT_VERSION=`$ROOTCONFIG --version` ROOT_MAJOR_VERSION=`echo $ROOT_VERSION | cut -d. -f1` ROOT_MINOR_VERSION=`echo $ROOT_VERSION | cut -d. -f2 | cut -d/ -f1` ROOT_MICRO_VERSION=`echo $ROOT_VERSION | cut -d. -f2 | cut -d/ -f2` AC_MSG_RESULT([$ROOT_VERSION ($ROOT_MAJOR_VERSION,$ROOT_MINOR_VERSION,$ROOT_MICRO_VERSION)]) if test "$ROOT_MAJOR_VERSION" -lt 5; then enable_root=no; AC_MSG_WARN([ROOT major version is < 5 -- not building extra ROOT compatibility tools]) elif test "$ROOT_MAJOR_VERSION" -eq 5; then if test "$ROOT_MINOR_VERSION" -lt 33; then enable_root=no; AC_MSG_WARN([ROOT version is less than 5.33 -- not building extra ROOT compatibility tools]) elif test "$ROOT_MINOR_VERSION" -eq 33 && test "$ROOT_MICRO_VERSION" -lt 2; then enable_root=no; AC_MSG_WARN([ROOT version is less than 5.33/02 -- not building extra ROOT compatibility tools]) fi fi # TODO: Test for existence of TPython, instance_from_void API, etc. #AM_CXXFLAGS="$AM_CXXFLAGS -Wno-long-long" ROOT_CXXFLAGS=`$ROOTCONFIG --cflags` ROOT_LDFLAGS=`$ROOTCONFIG --ldflags` ROOT_LIBS=`$ROOTCONFIG --libs` AC_SUBST(ROOT_CXXFLAGS) AC_SUBST(ROOT_LDFLAGS) AC_SUBST(ROOT_LIBS) fi fi AM_CONDITIONAL(ENABLE_ROOT, [test x$enable_root = xyes]) if test x$enable_root = xyes; then AC_MSG_NOTICE([Building extra ROOT compatibility tools]) else AC_MSG_NOTICE([Not building extra ROOT compatibility tools]) fi ## Python extension AC_ARG_ENABLE(pyext, [AC_HELP_STRING(--disable-pyext, [don't build Python module (default=build)])], [], [enable_pyext=yes]) ## Basic Python checks if test x$enable_pyext = xyes; then AX_PYTHON_DEVEL([>= '2.7.3']) AC_SUBST(PYTHON_VERSION) YODA_PYTHONPATH=`$PYTHON -c "from __future__ import print_function; import distutils.sysconfig; print(distutils.sysconfig.get_python_lib(prefix='$prefix', plat_specific=True));"` AC_SUBST(YODA_PYTHONPATH) if test -z "$PYTHON"; then AC_MSG_ERROR([Can't build Python extension since python can't be found]) enable_pyext=no fi if test -z "$PYTHON_CPPFLAGS"; then AC_MSG_ERROR([Can't build Python extension since Python.h header file cannot be found]) enable_pyext=no fi fi AM_CONDITIONAL(ENABLE_PYEXT, [test x$enable_pyext == xyes]) ## Cython checks if test x$enable_pyext == xyes; then - AM_CHECK_CYTHON([0.24], [:], [:]) + AM_CHECK_CYTHON([0.23.5], [:], [:]) if test x$CYTHON_FOUND = xyes; then - AC_MSG_NOTICE([Cython >= 0.24 found: Python extension source can be rebuilt (for developers)]) - # Force rebuild since we have a sufficient Cython - touch pyext/rivet/core.pyx + + AC_MSG_NOTICE([Cython >= 0.23.5 found: Python extension source can be rebuilt (for developers)]) + fi AC_CHECK_FILE([pyext/yoda/core.cpp], [], [if test "x$CYTHON_FOUND" != "xyes"; then AC_MSG_ERROR([Cython is required for --enable-pyext, no pre-built core.cpp was found.]) fi]) ## Set extra Python extension build flags (to cope with Cython output code oddities) PYEXT_CXXFLAGS=$CXXFLAGS AC_CEDAR_CHECKCXXFLAG([-Wno-unused-but-set-variable], [PYEXT_CXXFLAGS="$PYEXT_CXXFLAGS -Wno-unused-but-set-variable"]) AC_CEDAR_CHECKCXXFLAG([-Wno-sign-compare], [PYEXT_CXXFLAGS="$PYEXT_CXXFLAGS -Wno-sign-compare"]) AC_CEDAR_CHECKCXXFLAG([-Wno-strict-prototypes], [PYEXT_CXXFLAGS="$PYEXT_CXXFLAGS -Wno-strict-prototypes"]) AC_SUBST(PYEXT_CXXFLAGS) AC_MSG_NOTICE([All Python build checks successful: 'yoda' Python extension will be built]) fi AM_CONDITIONAL(WITH_CYTHON, [test x$CYTHON_FOUND = xyes]) ## Extend and substitute the default build flags after lib testing AM_CPPFLAGS="-I\$(top_srcdir)/include -I\$(top_builddir)/include" AC_SUBST(AM_CPPFLAGS) AC_SUBST(AM_CXXFLAGS) dnl dnl setup.py puts its build artifacts into a labelled path dnl this helps the test scripts to find them locally instead of dnl having to install first dnl YODA_SETUP_PY_PATH=$(${PYTHON} -c 'from __future__ import print_function; import distutils.util as u, sys; vi=sys.version_info; print("lib.%s-%s.%s" % (u.get_platform(),vi.major, vi.minor))') AC_SUBST(YODA_SETUP_PY_PATH) ## Build Doxygen if possible AC_PATH_PROG(DOXYGEN, doxygen) AM_CONDITIONAL(WITH_DOXYGEN, test "$DOXYGEN") ## Build file output AC_EMPTY_SUBST AC_CONFIG_FILES([Makefile Doxyfile]) AC_CONFIG_FILES([include/Makefile include/YODA/Makefile]) AC_CONFIG_FILES([src/Makefile src/tinyxml/Makefile src/yamlcpp/Makefile ]) AC_CONFIG_FILES([tests/Makefile]) AC_CONFIG_FILES([pyext/Makefile pyext/setup.py pyext/yoda/Makefile ]) AC_CONFIG_FILES([bin/Makefile bin/yoda-config]) AC_CONFIG_FILES([yodaenv.sh yoda.pc]) AC_OUTPUT if test x$enable_pyext == xyes; then cat < #include #include namespace YODA { /// AnalysisObject is the base class for histograms and scatters class AnalysisObject { public: /// Collection type for annotations, as a string-string map. typedef std::map Annotations; /// @name Creation and destruction //@{ /// Default constructor AnalysisObject() { } /// Constructor giving a type, a path and an optional title AnalysisObject(const std::string& type, const std::string& path, const std::string& title="") { setAnnotation("Type", type); setPath(path); setTitle(title); } /// Constructor giving a type, a path, another AO to copy annotation from, and an optional title AnalysisObject(const std::string& type, const std::string& path, const AnalysisObject& ao, const std::string& title="") { for (const std::string& a : ao.annotations()) setAnnotation(a, ao.annotation(a)); setAnnotation("Type", type); // might override the copied ones setPath(path); setTitle(title); } // /// Default copy constructor // AnalysisObject(const AnalysisObject& ao) { // if (ao.path().length() > 0) setPath(ao.path()); // if (ao.title().length() > 0) setTitle(ao.title()); // } /// Default destructor virtual ~AnalysisObject() { } /// Default copy assignment operator virtual AnalysisObject& operator = (const AnalysisObject& ao) { if (ao.path().length() > 0) setPath(ao.path()); if (ao.title().length() > 0) setTitle(ao.title()); return *this; } /// Make a copy on the heap, via 'new' virtual AnalysisObject* newclone() const = 0; //@} /// @name Modifiers //@{ /// Reset this analysis object virtual void reset() = 0; - - // variation parser - void parseVariations(){ return ; } //@} ///@name Annotations //@{ /// Get all the annotation names /// @todo Change this to return the str->str map, with a separate annotationKeys, etc. std::vector annotations() const { std::vector rtn; rtn.reserve(_annotations.size()); for (const Annotations::value_type& kv : _annotations) rtn.push_back(kv.first); return rtn; } /// Check if an annotation is defined bool hasAnnotation(const std::string& name) const { return _annotations.find(name) != _annotations.end(); } /// Get an annotation by name (as a string) const std::string& annotation(const std::string& name) const { Annotations::const_iterator v = _annotations.find(name); // If not found... written this way round on purpose if (v == _annotations.end()) { std::string missing = "YODA::AnalysisObject: No annotation named " + name; throw AnnotationError(missing); } return v->second; } /// Get an annotation by name (as a string) with a default in case the annotation is not found const std::string& annotation(const std::string& name, const std::string& defaultreturn) const { Annotations::const_iterator v = _annotations.find(name); if (v != _annotations.end()) return v->second; return defaultreturn; } /// @brief Get an annotation by name (copied to another type) /// /// @note Templated on return type template const T annotation(const std::string& name) const { std::string s = annotation(name); return Utils::lexical_cast(s); } /// @brief Get an annotation by name (copied to another type) with a default in case the annotation is not found /// /// @note Templated on return type template const T annotation(const std::string& name, const T& defaultreturn) const { try { std::string s = annotation(name); return Utils::lexical_cast(s); } catch (const AnnotationError& ae) { return defaultreturn; } } /// @brief Add or set a string-valued annotation by name void setAnnotation(const std::string& name, const std::string& value) { _annotations[name] = value; } /// @brief Add or set a double-valued annotation by name /// @todo Can we cover all FP types in one function via SFINAE? void setAnnotation(const std::string& name, double value) { // Recipe from Boost docs std::stringstream ss; ss << std::setprecision(std::numeric_limits::max_digits10) << std::scientific << value; setAnnotation(name, ss.str()); } /// @brief Add or set a float-valued annotation by name /// @todo Can we cover all FP types in one function via SFINAE? void setAnnotation(const std::string& name, float value) { // Recipe from Boost docs std::stringstream ss; ss << std::setprecision(std::numeric_limits::max_digits10) << std::scientific << value; setAnnotation(name, ss.str()); } /// @brief Add or set a long-double-valued annotation by name /// @todo Can we cover all FP types in one function via SFINAE? void setAnnotation(const std::string& name, long double value) { // Recipe from Boost docs std::stringstream ss; ss << std::setprecision(std::numeric_limits::max_digits10) << std::scientific << value; setAnnotation(name, ss.str()); } /// @brief Add or set an annotation by name (templated for remaining types) /// /// @note Templated on arg type, but stored as a string. template void setAnnotation(const std::string& name, const T& value) { setAnnotation(name, Utils::lexical_cast(value)); } /// Set all annotations at once void setAnnotations(const Annotations& anns) { _annotations = anns; } /// @brief Add or set an annotation by name /// /// Note: Templated on arg type, but stored as a string. This is just a synonym for setAnnotation. template void addAnnotation(const std::string& name, const T& value) { setAnnotation(name, value); } /// Delete an annotation by name void rmAnnotation(const std::string& name) { _annotations.erase(name); } /// Delete an annotation by name void clearAnnotations() { _annotations.clear(); } //@} /// @name Standard annotations //@{ /// @brief Get the AO title. /// /// Returns a null string if undefined, rather than throwing an exception cf. the annotation("Title"). const std::string title() const { return annotation("Title", ""); } /// Set the AO title void setTitle(const std::string& title) { setAnnotation("Title", title); } /// @brief Get the AO path. /// /// Returns a null string if undefined, rather than throwing an exception cf. annotation("Path"). /// @note A leading / will be prepended if not already set. const std::string path() const { const std::string p = annotation("Path", ""); // If not set at all, return an empty string if (p.empty()) return p; // If missing a leading slash, one will be prepended return p.find("/") == 0 ? p : ("/"+p); } /// Set the AO path /// /// @note A leading / will be prepended if not already given. void setPath(const std::string& path) { const std::string p = (path.find("/") == 0) ? path : "/"+path; // if (path.length() > 0 && path.find("/") != 0) { // throw AnnotationError("Histo paths must start with a slash (/) character."); // } setAnnotation("Path", p); } /// Get the AO name -- the last part of the path. /// Returns a null string if path is undefined const std::string name() const { const std::string p = path(); const size_t lastslash = p.rfind("/"); if (lastslash == std::string::npos) return p; return p.substr(lastslash+1); } //@} public: /// @name Persistency hooks / object type info //@{ /// Get name of the analysis object type virtual std::string type() const { return annotation("Type"); } /// @brief Get the dimension of the analysis object type /// /// @note For fillable types this is the dimension of the fill space (e.g. Histo1D -> dim=1). /// For scatter types, it is the total dimension of the points (e.g. Scatter3D -> dim=3). virtual size_t dim() const = 0; //@} private: /// The annotations indexed by name std::map _annotations; }; // Convenience alias using AO = AnalysisObject; } #endif // YODA_AnalysisObject_h diff --git a/include/YODA/Point.h b/include/YODA/Point.h --- a/include/YODA/Point.h +++ b/include/YODA/Point.h @@ -1,112 +1,97 @@ // -*- C++ -*- // // This file is part of YODA -- Yet more Objects for Data Analysis // Copyright (C) 2008-2018 The YODA collaboration (see AUTHORS for details) // #ifndef YODA_POINT_H #define YODA_POINT_H #include "YODA/AnalysisObject.h" namespace YODA { /// Base class for all Point*Ds, providing generic access to their numerical properties class Point { public: typedef std::pair ValuePair; - + /// Virtual destructor for inheritance virtual ~Point() {}; - + + /// Space dimension of the point virtual size_t dim() = 0; //get the error map for the highest dimension virtual const std::map< std::string, std::pair> & errMap() const =0; - - //Parse the annotation from the parent AO which contains any variations - virtual void getVariationsFromParent() const =0; /// Get the point value for direction @a i virtual double val(size_t i) const = 0; /// Set the point value for direction @a i virtual void setVal(size_t i, double val) = 0; /// Get error values for direction @a i virtual const std::pair& errs(size_t i, std::string source="") const = 0; /// Set symmetric error for direction @a i virtual void setErr(size_t i, double e, std::string source="") = 0; /// Set symmetric error for direction @a i (alias) virtual void setErrs(size_t i, double e, std::string source="") { return setErr(i,e, source); } /// Set asymmetric error for direction @a i virtual void setErrs(size_t i, double eminus, double eplus, std::string source="") = 0; /// Set asymmetric error for direction @a i virtual void setErrs(size_t i, std::pair& e, std::string source="") = 0; /// Get negative error value for direction @a i virtual double errMinus(size_t i, std::string source="") const = 0; /// Set negative error for direction @a i virtual void setErrMinus(size_t i, double eminus, std::string source="") = 0; /// Get positive error value for direction @a i virtual double errPlus(size_t i, std::string source="") const = 0; /// Set positive error for direction @a i virtual void setErrPlus(size_t i, double eplus, std::string source="") = 0; /// Get average error value for direction @a i virtual double errAvg(size_t i, std::string source="") const = 0; // /// Get value minus negative error for direction @a i // double min(size_t i) const = 0; // /// Get value plus positive error for direction @a i // double max(size_t i) const = 0; //@} /// @todo Support multiple errors /// @name Combined value and error setters //@{ /// Set value and symmetric error for direction @a i virtual void set(size_t i, double val, double e, std::string source="") = 0; /// Set value and asymmetric error for direction @a i virtual void set(size_t i, double val, double eminus, double eplus, std::string source="") = 0; /// Set value and asymmetric error for direction @a i virtual void set(size_t i, double val, std::pair& e, std::string source="") = 0; //@} // @name Manipulations //@{ // /// Scaling of direction @a i // void scale(size_t i, double scale) = 0; /// @todo void transform(size_t i, FN f) = 0; //@} - - void setParentAO(AnalysisObject* parent){ - _parentAO=parent; - } - - AnalysisObject* getParentAO() const{ - return _parentAO; - } - - private: - // pointer back to the parent AO which these points belong to. - AnalysisObject* _parentAO=0; - }; } #endif diff --git a/include/YODA/Point1D.h b/include/YODA/Point1D.h --- a/include/YODA/Point1D.h +++ b/include/YODA/Point1D.h @@ -1,366 +1,356 @@ // -*- C++ -*- // // This file is part of YODA -- Yet more Objects for Data Analysis // Copyright (C) 2008-2018 The YODA collaboration (see AUTHORS for details) // #ifndef YODA_POINT1D_H #define YODA_POINT1D_H #include "YODA/Point.h" #include "YODA/Exceptions.h" #include "YODA/Utils/MathUtils.h" #include namespace YODA { /// A 1D data point to be contained in a Scatter1D class Point1D : public Point { public: /// @name Constructors //@{ // Default constructor Point1D() { } /// Constructor from values with optional symmetric errors Point1D(double x, double ex=0.0, std::string source="") : _x(x) { _ex[source] = std::make_pair(ex, ex); } /// Constructor from values with explicit asymmetric errors Point1D(double x, double exminus, double explus, std::string source="") : _x(x) { _ex[source] = std::make_pair(exminus, explus); } /// Constructor from values with asymmetric errors Point1D(double x, const std::pair& ex, std::string source="") : _x(x) { _ex[source] = ex; } /// Copy constructor Point1D(const Point1D& p) : _x(p._x), _ex(p._ex) - { - this->setParentAO( p.getParentAO()); - } + { } /// Copy assignment Point1D& operator = (const Point1D& p) { _x = p._x; _ex = p._ex; - this->setParentAO( p.getParentAO()); return *this; } //@} public: /// Space dimension of the point size_t dim() { return 1; } /// @name Value accessors //@{ /// Get x value double x() const { return _x; } /// Set x value void setX(double x) { _x = x; } /// @todo Uniform "coords" accessor across all Scatters: returning fixed-size tuple? //@} /// @name x error accessors //@{ /// Get x-error values const std::pair& xErrs( std::string source="") const { if (!_ex.count(source)) throw RangeError("xErrs has no such key: "+source); return _ex.at(source); } /// Get negative x-error value double xErrMinus( std::string source="") const { if (!_ex.count(source)) throw RangeError("xErrs has no such key: "+source); return _ex.at(source).first; } /// Get positive x-error value double xErrPlus( std::string source="") const { if (!_ex.count(source)) throw RangeError("xErrs has no such key: "+source); return _ex.at(source).second; } /// Get average x-error value double xErrAvg( std::string source="") const { if (!_ex.count(source)) throw RangeError("xErrs has no such key: "+source); return (_ex.at(source).first + _ex.at(source).second)/2.0; } /// Set negative x error void setXErrMinus(double exminus, std::string source="") { - if (source!="") getVariationsFromParent(); if (!_ex.count(source)) _ex[source] = std::make_pair(0.,0.); _ex.at(source).first = exminus; } /// Set positive x error void setXErrPlus(double explus, std::string source="") { - if (source!="") getVariationsFromParent(); if (!_ex.count(source)) _ex[source] = std::make_pair(0.,0.); _ex.at(source).second = explus; } /// Set symmetric x error void setXErr(double ex, std::string source="") { setXErrMinus(ex, source); setXErrPlus(ex, source); } /// Set symmetric x error (alias) void setXErrs(double ex, std::string source="") { setXErr(ex, source); } /// Set asymmetric x error void setXErrs(double exminus, double explus, std::string source="") { setXErrMinus(exminus, source); setXErrPlus(explus, source); } /// Set asymmetric x error void setXErrs(const std::pair& ex, std::string source="") { - if (source!="") getVariationsFromParent(); _ex[source] = ex; } /// Get value minus negative x-error double xMin(std::string source="") const { if (!_ex.count(source)) throw RangeError("xErrs has no such key: "+source); return _x - _ex.at(source).first; } /// Get value plus positive x-error double xMax(std::string source="") const { if (!_ex.count(source)) throw RangeError("xErrs has no such key: "+source); return _x + _ex.at(source).second; } //@} /// @name Combined x value and error setters //@{ /// Set x value and symmetric error void setX(double x, double ex, std::string source="") { setX(x); setXErr(ex, source); } /// Set x value and asymmetric error void setX(double x, double exminus, double explus, std::string source="") { setX(x); setXErrs(exminus, explus, source); } /// Set x value and asymmetric error void setX(double x, std::pair& ex, std::string source="") { setX(x); setXErrs(ex, source); } //@} // @name Manipulations //@{ /// Scaling of x axis void scaleX(double scalex) { setX(x()*scalex); for (const auto &source : _ex){ setXErrs(xErrMinus()*scalex, xErrPlus()*scalex, source.first); } } //@} /// @name Integer axis accessor equivalents //@{ /// Get the point value for direction @a i double val(size_t i) const { if (i == 0 || i > 1) throw RangeError("Invalid axis int, must be in range 1..dim"); return x(); } /// Set the point value for direction @a i void setVal(size_t i, double val) { if (i != 1) throw RangeError("Invalid axis int, must be in range 1..dim"); setX(val); } /// Get error map for direction @a i const std::map< std::string, std::pair> & errMap() const { - getVariationsFromParent(); return _ex; } - - // Parse the variations from the parent AO if it exists - void getVariationsFromParent() const; /// Get error values for direction @a i const std::pair& errs(size_t i, std::string source="") const { if (i != 1) throw RangeError("Invalid axis int, must be in range 1..dim"); return xErrs(source); } /// Get negative error value for direction @a i double errMinus(size_t i, std::string source="") const { if (i != 1) throw RangeError("Invalid axis int, must be in range 1..dim"); return xErrMinus(source); } /// Get positive error value for direction @a i double errPlus(size_t i, std::string source="") const { if (i != 1) throw RangeError("Invalid axis int, must be in range 1..dim"); return xErrPlus(source); } /// Get average error value for direction @a i double errAvg(size_t i, std::string source="") const { if (i != 1) throw RangeError("Invalid axis int, must be in range 1..dim"); return xErrAvg(source); } /// Set negative error for direction @a i void setErrMinus(size_t i, double eminus, std::string source="") { if (i != 1) throw RangeError("Invalid axis int, must be in range 1..dim"); setXErrMinus(eminus, source); } /// Set positive error for direction @a i void setErrPlus(size_t i, double eplus, std::string source="") { if (i != 1) throw RangeError("Invalid axis int, must be in range 1..dim"); setXErrPlus(eplus, source); } /// Set symmetric error for direction @a i void setErr(size_t i, double e, std::string source="") { if (i != 1) throw RangeError("Invalid axis int, must be in range 1..dim"); setXErr(e, source); } /// Set asymmetric error for direction @a i void setErrs(size_t i, double eminus, double eplus, std::string source="") { if (i != 1) throw RangeError("Invalid axis int, must be in range 1..dim"); setXErrs(eminus, eplus, source); } /// Set asymmetric error for direction @a i void setErrs(size_t i, std::pair& e, std::string source="") { if (i != 1) throw RangeError("Invalid axis int, must be in range 1..dim"); setXErrs(e, source); } /// Set value and symmetric error for direction @a i void set(size_t i, double val, double e, std::string source="") { if (i != 1) throw RangeError("Invalid axis int, must be in range 1..dim"); setX(val, e, source); } /// Set value and asymmetric error for direction @a i void set(size_t i, double val, double eminus, double eplus, std::string source="") { if (i != 1) throw RangeError("Invalid axis int, must be in range 1..dim"); setX(val, eminus, eplus, source); } /// Set value and asymmetric error for direction @a i void set(size_t i, double val, std::pair& e, std::string source="") { if (i != 1) throw RangeError("Invalid axis int, must be in range 1..dim"); setX(val, e, source); } //@} protected: /// @name Value and error variables //@{ double _x; // a map of the errors for each source. Nominal stored under "" // to ensure backward compatibility std::map< std::string, std::pair > _ex; //@} }; /// @name Comparison operators //@{ /// Equality test of x characteristics only /// @todo Base on a named fuzzyEquals(a,b,tol=1e-3) unbound function inline bool operator==(const YODA::Point1D& a, const YODA::Point1D& b) { if (!YODA::fuzzyEquals(a.x(), b.x()) || !YODA::fuzzyEquals(a.xErrMinus(), b.xErrMinus()) || !YODA::fuzzyEquals(a.xErrPlus(), b.xErrPlus()) ) return false; return true; } /// Equality test of x characteristics only inline bool operator != (const YODA::Point1D& a, const YODA::Point1D& b) { return !(a == b); } /// Less-than operator used to sort bins by x-ordering inline bool operator < (const YODA::Point1D& a, const YODA::Point1D& b) { if (!YODA::fuzzyEquals(a.x(), b.x())) { return a.x() < b.x(); } if (!YODA::fuzzyEquals(a.xErrMinus(), b.xErrMinus())) { return a.xErrMinus() < b.xErrMinus(); } if (!YODA::fuzzyEquals(a.xErrPlus(), b.xErrPlus())) { return a.xErrPlus() < b.xErrPlus(); } return false; } /// Less-than-or-equals operator used to sort bins by x-ordering inline bool operator <= (const YODA::Point1D& a, const YODA::Point1D& b) { if (a == b) return true; return a < b; } /// Greater-than operator used to sort bins by x-ordering inline bool operator > (const YODA::Point1D& a, const YODA::Point1D& b) { return !(a <= b); } /// Greater-than-or-equals operator used to sort bins by x-ordering inline bool operator >= (const YODA::Point1D& a, const YODA::Point1D& b) { return !(a < b); } //@} } #endif diff --git a/include/YODA/Point2D.h b/include/YODA/Point2D.h --- a/include/YODA/Point2D.h +++ b/include/YODA/Point2D.h @@ -1,576 +1,561 @@ // -*- C++ -*- // // This file is part of YODA -- Yet more Objects for Data Analysis // Copyright (C) 2008-2018 The YODA collaboration (see AUTHORS for details) // #ifndef YODA_POINT2D_H #define YODA_POINT2D_H #include "YODA/Point.h" #include "YODA/Exceptions.h" #include "YODA/Utils/MathUtils.h" #include - namespace YODA { /// A 2D data point to be contained in a Scatter2D class Point2D : public Point { public: /// @name Constructors //@{ // Default constructor Point2D() { } /// Constructor from values with optional symmetric errors Point2D(double x, double y, double ex=0.0, double ey=0.0, std::string source="") : _x(x), _y(y) { _ex = std::make_pair(ex, ex); _ey[source] = std::make_pair(ey, ey); } /// Constructor from values with explicit asymmetric errors Point2D(double x, double y, double exminus, double explus, double eyminus, double eyplus, std::string source="") : _x(x), _y(y) { _ex = std::make_pair(exminus, explus); _ey[source] = std::make_pair(eyminus, eyplus); } // /// Constructor from values with symmetric errors on x and asymmetric errors on y // Point2D(double x, double y, double ex, const std::pair& ey) // : _x(x), _y(y), _ey(ey) // { // _ex = std::make_pair(ex, ex); // } // /// Constructor from values with asymmetric errors on x and symmetric errors on y // Point2D(double x, double y, const std::pair& ex, double ey) // : _x(x), _y(y), _ex(ex) // { // _ey = std::make_pair(ey, ey); // } /// Constructor from values with asymmetric errors on both x and y Point2D(double x, double y, const std::pair& ex, const std::pair& ey, std::string source="") : _x(x), _y(y) { _ex = ex; _ey[source] = ey; } /// Copy constructor Point2D(const Point2D& p) : _x(p._x), _y(p._y) { _ex = p._ex; _ey = p._ey; - this->setParentAO( p.getParentAO()); } /// Copy assignment Point2D& operator = (const Point2D& p) { _x = p._x; _y = p._y; _ex = p._ex; _ey = p._ey; - this->setParentAO( p.getParentAO()); return *this; } //@} public: /// Space dimension of the point size_t dim() { return 2; } /// @name Value accessors //@{ /// Get x value double x() const { return _x; } /// Set x value void setX(double x) { _x = x; } /// Get y value double y() const { return _y; } /// Set y value void setY(double y) { _y = y; } /// @todo Uniform "coords" accessor across all Scatters: returning fixed-size tuple? /// Get x,y value pair std::pair xy() const { return std::make_pair(_x, _y); } /// Set x and y values void setXY(double x, double y) { setX(x); setY(y); } /// Set x and y values void setXY(const std::pair& xy) { setX(xy.first); setY(xy.second); } //@} /// @name x error accessors //@{ /// Get x-error values const std::pair& xErrs() const { return _ex; } /// Get negative x-error value double xErrMinus() const { return _ex.first; } /// Get positive x-error value double xErrPlus() const { return _ex.second; } /// Get average x-error value double xErrAvg() const { return (_ex.first + _ex.second)/2.0; } /// Set negative x error void setXErrMinus(double exminus) { _ex.first = exminus; } /// Set positive x error void setXErrPlus(double explus) { _ex.second = explus; } /// Set symmetric x error void setXErr(double ex) { setXErrMinus(ex); setXErrPlus(ex); } /// Set symmetric x error (alias) void setXErrs(double ex) { setXErr(ex); } /// Set asymmetric x error void setXErrs(double exminus, double explus) { setXErrMinus(exminus); setXErrPlus(explus); } /// Set asymmetric x error void setXErrs(const std::pair& ex) { _ex = ex; } /// Get value minus negative x-error /// @todo Remove (or extend) when multiple errors are supported /// No: doesn't need to change since (for now) we only store multiple /// errors for the highest dimentsion double xMin() const { return _x - _ex.first; } /// Get value plus positive x-error /// @todo Remove (or extend) when multiple errors are supported /// No: doesn't need to change since (for now) we only store multiple /// errors for the highest dimentsion double xMax() const { return _x + _ex.second; } //@} /// @name y error accessors //@{ /// Get y-error values const std::pair& yErrs(std::string source="") const { - if (source!="") getVariationsFromParent(); if (!_ey.count(source)) throw RangeError("yErrs has no such key: "+source); return _ey.at(source); } /// Get negative y-error value double yErrMinus(std::string source="") const { - if (source!="") getVariationsFromParent(); if (!_ey.count(source)) throw RangeError("yErrs has no such key: "+source); return _ey.at(source).first; } /// Get positive y-error value double yErrPlus(std::string source="") const { - if (source!="") getVariationsFromParent(); if (!_ey.count(source)) throw RangeError("yErrs has no such key: "+source); return _ey.at(source).second; } /// Get average y-error value double yErrAvg(std::string source="") const { - if (source!="") getVariationsFromParent(); if (!_ey.count(source)) throw RangeError("yErrs has no such key: "+source); - double res=(fabs(_ey.at(source).first) + fabs(_ey.at(source).second))/2.; - return res; + return (_ey.at(source).first + _ey.at(source).second)/2.0; } /// Set negative y error void setYErrMinus(double eyminus, std::string source="") { - if (!_ey.count(source)) { - _ey[source] = std::make_pair(0.,0.); - } + if (!_ey.count(source)) _ey[source] = std::make_pair(0.,0.); _ey.at(source).first = eyminus; } /// Set positive y error void setYErrPlus(double eyplus, std::string source="") { if (!_ey.count(source)) _ey[source] = std::make_pair(0.,0.); _ey.at(source).second = eyplus; } /// Set symmetric y error void setYErr(double ey, std::string source="") { setYErrMinus(ey, source ); setYErrPlus(ey, source ); } /// Set symmetric y error (alias) void setYErrs(double ey, std::string source="") { setYErr(ey, source); } /// Set asymmetric y error void setYErrs(double eyminus, double eyplus, std::string source="") { setYErrMinus(eyminus, source); setYErrPlus(eyplus, source ); } /// Set asymmetric y error void setYErrs(const std::pair& ey, std::string source="") { _ey[source] = ey; } /// Get value minus negative y-error double yMin(std::string source="") const { - if (source!="") getVariationsFromParent(); if (!_ey.count(source)) throw RangeError("yErrs has no such key: "+source); return _y - _ey.at(source).first; } /// Get value plus positive y-error double yMax(std::string source="") const { - if (source!="") getVariationsFromParent(); if (!_ey.count(source)) throw RangeError("yErrs has no such key: "+source); return _y + _ey.at(source).second; } //@} /// @name Combined x/y value and error setters //@{ /// Set x value and symmetric error void setX(double x, double ex) { setX(x); setXErrs(ex); } /// Set x value and asymmetric error void setX(double x, double exminus, double explus) { setX(x); setXErrs(exminus, explus); } /// Set x value and asymmetric error void setX(double x, std::pair& ex) { setX(x); setXErrs(ex); } /// Set y value and symmetric error void setY(double y, double ey, std::string source="") { setY(y); setYErrs(ey, source); } /// Set y value and asymmetric error void setY(double y, double eyminus, double eyplus, std::string source="") { setY(y); setYErrs(eyminus, eyplus, source); } /// Set y value and asymmetric error void setY(double y, std::pair& ey, std::string source="") { setY(y); setYErrs(ey, source); } //@} // @name Manipulations //@{ /// Scaling of x axis void scaleX(double scalex) { setX(x()*scalex); setXErrs(xErrMinus()*scalex, xErrPlus()*scalex); } /// Scaling of y axis void scaleY(double scaley) { setY(y()*scaley); for (const auto &source : _ey){ setYErrs(yErrMinus()*scaley, yErrPlus()*scaley, source.first); } } /// Scaling of both axes void scaleXY(double scalex, double scaley) { scaleX(scalex); scaleY(scaley); } /// Scaling of both axes /// @deprecated Use scaleXY void scale(double scalex, double scaley) { scaleXY(scalex, scaley); } //@} /// @name Integer axis accessor equivalents //@{ /// Get the point value for direction @a i double val(size_t i) const { switch (i) { case 1: return x(); case 2: return y(); default: throw RangeError("Invalid axis int, must be in range 1..dim"); } } /// Set the point value for direction @a i void setVal(size_t i, double val) { switch (i) { case 1: setX(val); break; case 2: setY(val); break; default: throw RangeError("Invalid axis int, must be in range 1..dim"); } } /// Get error map for direction @a i - const std::map< std::string, std::pair> & errMap() const; - - // Parse the variations from the parent AO if it exists - void getVariationsFromParent() const; + const std::map< std::string, std::pair> & errMap() const { + return _ey; + } /// Get error values for direction @a i const std::pair& errs(size_t i, std::string source="") const { switch (i) { case 1: return xErrs(); case 2: return yErrs(source); default: throw RangeError("Invalid axis int, must be in range 1..dim"); } } /// Get negative error value for direction @a i double errMinus(size_t i, std::string source="") const { switch (i) { case 1: return xErrMinus(); case 2: return yErrMinus(source); default: throw RangeError("Invalid axis int, must be in range 1..dim"); } } /// Get positive error value for direction @a i double errPlus(size_t i, std::string source="") const { switch (i) { case 1: return xErrPlus(); case 2: return yErrPlus(source); default: throw RangeError("Invalid axis int, must be in range 1..dim"); } } /// Get average error value for direction @a i double errAvg(size_t i, std::string source="") const { switch (i) { case 1: return xErrAvg(); case 2: return yErrAvg(source); default: throw RangeError("Invalid axis int, must be in range 1..dim"); } } /// Set negative error for direction @a i void setErrMinus(size_t i, double eminus, std::string source="") { switch (i) { case 1: setXErrMinus(eminus); break; case 2: setYErrMinus(eminus, source); break; default: throw RangeError("Invalid axis int, must be in range 1..dim"); } } /// Set positive error for direction @a i void setErrPlus(size_t i, double eplus, std::string source="") { switch (i) { case 1: setXErrPlus(eplus); break; case 2: setYErrPlus(eplus, source); break; default: throw RangeError("Invalid axis int, must be in range 1..dim"); } } /// Set symmetric error for direction @a i void setErr(size_t i, double e, std::string source="") { switch (i) { case 1: setXErrs(e); break; case 2: setYErrs(e, source); break; default: throw RangeError("Invalid axis int, must be in range 1..dim"); } } /// Set asymmetric error for direction @a i void setErrs(size_t i, double eminus, double eplus, std::string source="") { switch (i) { case 1: setXErrs(eminus, eplus); break; case 2: setYErrs(eminus, eplus, source); break; default: throw RangeError("Invalid axis int, must be in range 1..dim"); } } /// Set asymmetric error for direction @a i void setErrs(size_t i, std::pair& e, std::string source="") { switch (i) { case 1: setXErrs(e); break; case 2: setYErrs(e, source); break; default: throw RangeError("Invalid axis int, must be in range 1..dim"); } } /// Set value and symmetric error for direction @a i void set(size_t i, double val, double e, std::string source="") { switch (i) { case 1: setX(val, e); break; case 2: setY(val, e, source); break; default: throw RangeError("Invalid axis int, must be in range 1..dim"); } } /// Set value and asymmetric error for direction @a i void set(size_t i, double val, double eminus, double eplus, std::string source="") { switch (i) { case 1: setX(val, eminus, eplus); break; case 2: setY(val, eminus, eplus, source); break; default: throw RangeError("Invalid axis int, must be in range 1..dim"); } } /// Set value and asymmetric error for direction @a i void set(size_t i, double val, std::pair& e, std::string source="") { switch (i) { case 1: setX(val, e); break; case 2: setY(val, e, source); break; default: throw RangeError("Invalid axis int, must be in range 1..dim"); } } - //@} protected: /// @name Value and error variables //@{ - double _x; double _y; std::pair _ex; // a map of the errors for each source. Nominal stored under "" // to ensure backward compatibility std::map< std::string, std::pair > _ey; //@} }; /// @name Comparison operators //@{ /// Equality test of x & y characteristics only /// @todo Base on a named fuzzyEquals(a,b,tol=1e-3) unbound function inline bool operator==(const YODA::Point2D& a, const YODA::Point2D& b) { if (!YODA::fuzzyEquals(a.x(), b.x()) || !YODA::fuzzyEquals(a.xErrMinus(), b.xErrMinus()) || !YODA::fuzzyEquals(a.xErrPlus(), b.xErrPlus()) ) return false; if (!YODA::fuzzyEquals(a.y(), b.y()) || !YODA::fuzzyEquals(a.yErrMinus(), b.yErrMinus()) || !YODA::fuzzyEquals(a.yErrPlus(), b.yErrPlus()) ) return false; return true; } /// Equality test of x characteristics only inline bool operator != (const YODA::Point2D& a, const YODA::Point2D& b) { return !(a == b); } /// Less-than operator used to sort bins by x-ordering inline bool operator < (const YODA::Point2D& a, const YODA::Point2D& b) { if (!YODA::fuzzyEquals(a.x(), b.x())) { return a.x() < b.x(); } if (!YODA::fuzzyEquals(a.xErrMinus(), b.xErrMinus())) { return a.xErrMinus() < b.xErrMinus(); } if (!YODA::fuzzyEquals(a.xErrPlus(), b.xErrPlus())) { return a.xErrPlus() < b.xErrPlus(); } return false; } /// Less-than-or-equals operator used to sort bins by x-ordering inline bool operator <= (const YODA::Point2D& a, const YODA::Point2D& b) { if (a == b) return true; return a < b; } /// Greater-than operator used to sort bins by x-ordering inline bool operator > (const YODA::Point2D& a, const YODA::Point2D& b) { return !(a <= b); } /// Greater-than-or-equals operator used to sort bins by x-ordering inline bool operator >= (const YODA::Point2D& a, const YODA::Point2D& b) { return !(a < b); } //@} } #endif diff --git a/include/YODA/Point3D.h b/include/YODA/Point3D.h --- a/include/YODA/Point3D.h +++ b/include/YODA/Point3D.h @@ -1,695 +1,679 @@ // -*- C++ -*- // // This file is part of YODA -- Yet more Objects for Data Analysis // Copyright (C) 2008-2018 The YODA collaboration (see AUTHORS for details) // #ifndef YODA_POINT3D_H #define YODA_POINT3D_H #include "YODA/Point.h" #include "YODA/Exceptions.h" #include "YODA/Utils/MathUtils.h" #include namespace YODA { /// A 3D data point to be contained in a Scatter3D class Point3D : public Point { public: /// @name Constructors //@{ // Default constructor Point3D() { } /// Constructor from values with optional symmetric errors Point3D(double x, double y, double z, double ex=0.0, double ey=0.0, double ez=0.0, std::string source="") : _x(x), _y(y), _z(z) { _ex = std::make_pair(ex, ex); _ey = std::make_pair(ey, ey); _ez[source] = std::make_pair(ez, ez); } /// Constructor from values with explicit asymmetric errors Point3D(double x, double y, double z, double exminus, double explus, double eyminus, double eyplus, double ezminus, double ezplus, std::string source="") : _x(x), _y(y), _z(z) { _ex = std::make_pair(exminus, explus); _ey = std::make_pair(eyminus, eyplus); _ez[source] = std::make_pair(ezminus, ezplus); } /// Constructor from asymmetric errors given as vectors Point3D(double x, double y, double z, const std::pair& ex, const std::pair& ey, const std::pair& ez, std::string source="") : _x(x), _y(y), _z(z), _ex(ex), _ey(ey) { _ez[source] = ez; } /// Copy constructor Point3D(const Point3D& p) : _x(p._x), _y(p._y), _z(p._z), _ex(p._ex), _ey(p._ey), _ez(p._ez) - { - this->setParentAO( p.getParentAO()); - } + { } /// Copy assignment Point3D& operator = (const Point3D& p) { _x = p._x; _y = p._y; _z = p._z; _ex = p._ex; _ey = p._ey; _ez = p._ez; - this->setParentAO( p.getParentAO()); return *this; } //@} public: /// Space dimension of the point size_t dim() { return 3; } /// @name Value and error accessors //@{ /// Get x value double x() const { return _x; } /// Set x value void setX(double x) { _x = x; } /// Get y value double y() const { return _y; } /// Set y value void setY(double y) { _y = y; } /// Get z value double z() const { return _z;} /// Set z value void setZ(double z) { _z = z;} /// @todo Uniform "coords" accessor across all Scatters: returning fixed-size tuple? // /// Get x,y,z value tuple // triple xyz() const { return std::triple(_x, _y, _z); } /// Set x, y and z values void setXYZ(double x, double y, double z) { setX(x); setY(y); setZ(z); } // /// Set x and y values // void setXY(triple xyz) { setX(xy.first); setY(xy.second); setZ(xy.third); } //@} /// @name x error accessors //@{ /// Get x-error values const std::pair& xErrs() const { return _ex; } /// Get negative x-error value double xErrMinus() const { return _ex.first; } /// Get positive x-error value double xErrPlus() const { return _ex.second; } /// Get average x-error value double xErrAvg() const { return (_ex.first + _ex.second)/2.0; } /// Set negative x error void setXErrMinus(double exminus) { _ex.first = exminus; } /// Set positive x error void setXErrPlus(double explus) { _ex.second = explus; } /// Set symmetric x error void setXErr(double ex) { setXErrMinus(ex); setXErrPlus(ex); } /// Set symmetric x error (alias) void setXErrs(double ex) { setXErr(ex); } /// Set asymmetric x error void setXErrs(double exminus, double explus) { setXErrMinus(exminus); setXErrPlus(explus); } /// Set asymmetric x error void setXErrs(const std::pair& ex) { _ex = ex; } /// Get value minus negative x-error double xMin() const { return _x - _ex.first; } /// Get value plus positive x-error double xMax() const { return _x + _ex.second; } //@} /// @name y error accessors //@{ /// Get y-error values const std::pair& yErrs() const { return _ey; } /// Get negative y-error value double yErrMinus() const { return _ey.first; } /// Get positive y-error value double yErrPlus() const { return _ey.second; } /// Get average y-error value double yErrAvg() const { return (_ey.first + _ey.second)/2.0; } /// Set negative y error void setYErrMinus(double eyminus) { _ey.first = eyminus; } /// Set positive y error void setYErrPlus(double eyplus) { _ey.second = eyplus; } /// Set symmetric y error void setYErr(double ey) { setYErrMinus(ey); setYErrPlus(ey); } /// Set symmetric y error (alias) void setYErrs(double ey) { setYErr(ey); } /// Set asymmetric y error void setYErrs(double eyminus, double eyplus) { setYErrMinus(eyminus); setYErrPlus(eyplus); } /// Set asymmetric y error void setYErrs(const std::pair& ey) { _ey = ey; } /// Get value minus negative y-error double yMin() const { return _y - _ey.first; } /// Get value plus positive y-error double yMax() const { return _y + _ey.second; } //@} /// @name z error accessors //@{ /// Get z-error values const std::pair& zErrs( std::string source="") const { - if (source!="") getVariationsFromParent(); if (!_ez.count(source)) throw RangeError("zErrs has no such key: "+source); return _ez.at(source); } /// Get negative z-error value double zErrMinus( std::string source="") const { - if (source!="") getVariationsFromParent(); if (!_ez.count(source)) throw RangeError("zErrs has no such key: "+source); return _ez.at(source).first; } /// Get positive z-error value double zErrPlus( std::string source="") const { - if (source!="") getVariationsFromParent(); if (!_ez.count(source)) throw RangeError("zErrs has no such key: "+source); return _ez.at(source).second; } /// Get average z-error value double zErrAvg( std::string source="") const { - if (source!="") getVariationsFromParent(); if (!_ez.count(source)) throw RangeError("zErrs has no such key: "+source); return (_ez.at(source).first + _ez.at(source).second)/2.0; } /// Set negative z error void setZErrMinus(double ezminus, std::string source="") { - if (source!="") getVariationsFromParent(); if (!_ez.count(source)) _ez[source] = std::make_pair(0.,0.); _ez.at(source).first = ezminus; } /// Set positive z error void setZErrPlus(double ezplus, std::string source="") { - if (source!="") getVariationsFromParent(); if (!_ez.count(source)) _ez[source] = std::make_pair(0.,0.); _ez.at(source).second = ezplus; } /// Set symmetric z error void setZErr(double ez, std::string source="") { setZErrMinus(ez, source); setZErrPlus(ez, source); } /// Set symmetric z error (alias) void setZErrs(double ez, std::string source="") { setZErr(ez, source); } /// Set asymmetric z error void setZErrs(double ezminus, double ezplus, std::string source="") { setZErrMinus(ezminus, source); setZErrPlus(ezplus, source); } /// Set asymmetric z error void setZErrs(const std::pair& ez, std::string source="") { - if (source!="") getVariationsFromParent(); _ez[source] = ez; } /// Get value minus negative z-error double zMin( std::string source="") const { - if (source!="") getVariationsFromParent(); if (!_ez.count(source)) throw RangeError("zErrs has no such key: "+source); return _z - _ez.at(source).first; } /// Get value plus positive z-error double zMax( std::string source="") const { - if (source!="") getVariationsFromParent(); if (!_ez.count(source)) throw RangeError("zErrs has no such key: "+source); return _z + _ez.at(source).second; } //@} /// @name Combined x/y value and error setters //@{ /// Set x value and symmetric error void setX(double x, double ex) { setX(x); setXErrs(ex); } /// Set x value and asymmetric error void setX(double x, double exminus, double explus) { setX(x); setXErrs(exminus, explus); } /// Set x value and asymmetric error void setX(double x, std::pair& ex) { setX(x); setXErrs(ex); } /// Set y value and symmetric error void setY(double y, double ey) { setY(y); setYErrs(ey); } /// Set y value and asymmetric error void setY(double y, double eyminus, double eyplus) { setY(y); setYErrs(eyminus, eyplus); } /// Set y value and asymmetric error void setY(double y, std::pair& ey) { setY(y); setYErrs(ey); } /// Set z value and symmetric error void setZ(double z, double ez, std::string source="") { setZ(z); setZErrs(ez, source); } /// Set z value and asymmetric error void setZ(double z, double ezminus, double ezplus, std::string source="") { setZ(z); setZErrs(ezminus, ezplus, source); } /// Set z value and asymmetric error void setZ(double z, std::pair& ez, std::string source="") { setZ(z); setZErrs(ez, source); } //@} // @name Manipulations //@{ /// Scaling of x axis void scaleX(double scalex) { setX(x()*scalex); setXErrs(xErrMinus()*scalex, xErrPlus()*scalex); } /// Scaling of y axis void scaleY(double scaley) { setY(y()*scaley); setYErrs(yErrMinus()*scaley, yErrPlus()*scaley); } /// Scaling of z axis void scaleZ(double scalez) { setZ(z()*scalez); for (const auto &source : _ez){ setZErrs(zErrMinus()*scalez, zErrPlus()*scalez, source.first); } } /// Scaling of all three axes void scaleXYZ(double scalex, double scaley, double scalez) { scaleX(scalex); scaleY(scaley); scaleZ(scalez); } /// Scaling of both axes /// @deprecated Use scaleXYZ void scale(double scalex, double scaley, double scalez) { scaleXYZ(scalex, scaley, scalez); } //@} /// @name Integer axis accessor equivalents //@{ /// Get the point value for direction @a i double val(size_t i) const { switch (i) { case 1: return x(); case 2: return y(); case 3: return z(); default: throw RangeError("Invalid axis int, must be in range 1..dim"); } } /// Set the point value for direction @a i void setVal(size_t i, double val) { switch (i) { case 1: setX(val); break; case 2: setY(val); break; case 3: setZ(val); break; default: throw RangeError("Invalid axis int, must be in range 1..dim"); } } /// Get error map for direction @a i const std::map< std::string, std::pair> & errMap() const { - getVariationsFromParent(); return _ez; } - - // Parse the variations from the parent AO if it exists - void getVariationsFromParent() const; /// Get error values for direction @a i const std::pair& errs(size_t i, std::string source="") const { switch (i) { case 1: return xErrs(); case 2: return yErrs(); case 3: return zErrs(source); default: throw RangeError("Invalid axis int, must be in range 1..dim"); } } /// Get negative error value for direction @a i double errMinus(size_t i, std::string source="") const { switch (i) { case 1: return xErrMinus(); case 2: return yErrMinus(); case 3: return zErrMinus(source); default: throw RangeError("Invalid axis int, must be in range 1..dim"); } } /// Get positive error value for direction @a i double errPlus(size_t i, std::string source="") const { switch (i) { case 1: return xErrPlus(); case 2: return yErrPlus(); case 3: return zErrPlus(source); default: throw RangeError("Invalid axis int, must be in range 1..dim"); } } /// Get average error value for direction @a i double errAvg(size_t i, std::string source="") const { switch (i) { case 1: return xErrAvg(); case 2: return yErrAvg(); case 3: return zErrAvg(source); default: throw RangeError("Invalid axis int, must be in range 1..dim"); } } /// Set negative error for direction @a i void setErrMinus(size_t i, double eminus, std::string source="") { switch (i) { case 1: setXErrMinus(eminus); break; case 2: setYErrMinus(eminus); break; case 3: setZErrMinus(eminus, source); break; default: throw RangeError("Invalid axis int, must be in range 1..dim"); } } /// Set positive error for direction @a i void setErrPlus(size_t i, double eplus, std::string source="") { switch (i) { case 1: setXErrPlus(eplus); break; case 2: setYErrPlus(eplus); break; case 3: setZErrPlus(eplus, source); break; default: throw RangeError("Invalid axis int, must be in range 1..dim"); } } /// Set symmetric error for direction @a i void setErr(size_t i, double e, std::string source="") { switch (i) { case 1: setXErrs(e); break; case 2: setYErrs(e); break; case 3: setZErrs(e, source); break; default: throw RangeError("Invalid axis int, must be in range 1..dim"); } } /// Set asymmetric error for direction @a i void setErrs(size_t i, double eminus, double eplus, std::string source="") { switch (i) { case 1: setXErrs(eminus, eplus); break; case 2: setYErrs(eminus, eplus); break; case 3: setZErrs(eminus, eplus, source); break; default: throw RangeError("Invalid axis int, must be in range 1..dim"); } } /// Set asymmetric error for direction @a i void setErrs(size_t i, std::pair& e, std::string source="") { switch (i) { case 1: setXErrs(e); break; case 2: setYErrs(e); break; case 3: setZErrs(e, source); break; default: throw RangeError("Invalid axis int, must be in range 1..dim"); } } /// Set value and symmetric error for direction @a i void set(size_t i, double val, double e, std::string source="") { switch (i) { case 1: setX(val, e); break; case 2: setY(val, e); break; case 3: setZ(val, e, source); break; default: throw RangeError("Invalid axis int, must be in range 1..dim"); } } /// Set value and asymmetric error for direction @a i void set(size_t i, double val, double eminus, double eplus, std::string source="") { switch (i) { case 1: setX(val, eminus, eplus); break; case 2: setY(val, eminus, eplus); break; case 3: setZ(val, eminus, eplus, source); break; default: throw RangeError("Invalid axis int, must be in range 1..dim"); } } /// Set value and asymmetric error for direction @a i void set(size_t i, double val, std::pair& e, std::string source="") { switch (i) { case 1: setX(val, e); break; case 2: setY(val, e); break; case 3: setZ(val, e, source); break; default: throw RangeError("Invalid axis int, must be in range 1..dim"); } } //@} protected: /// @name Value and error variables //@{ double _x; double _y; double _z; std::pair _ex; std::pair _ey; // a map of the errors for each source. Nominal stored under "" // to ensure backward compatibility std::map< std::string, std::pair >_ez; //@} }; /// @name Comparison operators //@{ /// Equality test of x, y & z characteristics only /// @todo Base on a named fuzzyEquals(a,b,tol=1e-3) unbound function inline bool operator==(const Point3D& a, const YODA::Point3D& b) { if (!YODA::fuzzyEquals(a.x(), b.x()) || !YODA::fuzzyEquals(a.xErrMinus(), b.xErrMinus()) || !YODA::fuzzyEquals(a.xErrPlus(), b.xErrPlus()) ) return false; if (!YODA::fuzzyEquals(a.y(), b.y()) || !YODA::fuzzyEquals(a.yErrMinus(), b.yErrMinus()) || !YODA::fuzzyEquals(a.yErrPlus(), b.yErrPlus()) ) return false; if (!YODA::fuzzyEquals(a.z(), b.z()) || !YODA::fuzzyEquals(a.zErrMinus(), b.zErrMinus()) || !YODA::fuzzyEquals(a.zErrPlus(), b.zErrPlus()) ) return false; return true; const bool same_val = fuzzyEquals(a.x(), b.x()) && fuzzyEquals(a.y(), b.y()); const bool same_eminus = fuzzyEquals(a.xErrMinus(), b.xErrMinus()) && fuzzyEquals(a.yErrMinus(), b.yErrMinus()); const bool same_eplus = fuzzyEquals(a.xErrPlus(), b.xErrPlus()) && fuzzyEquals(a.yErrPlus(), b.yErrPlus()); return same_val && same_eminus && same_eplus; } /// Inequality operator inline bool operator != (const Point3D& a, const YODA::Point3D& b) { return !(a == b); } /// Less-than operator used to sort bins by x-first ordering inline bool operator < (const Point3D& a, const YODA::Point3D& b) { if (! fuzzyEquals(a.x(), b.x())) { return a.x() < b.x(); } if (!fuzzyEquals(a.y(), b.y())) { return a.y() < b.y(); } if (! fuzzyEquals(a.xErrMinus(), b.xErrMinus())) { return a.xErrMinus() < b.xErrMinus(); } if (!fuzzyEquals(a.yErrMinus(), b.yErrMinus())) { return a.yErrMinus() < b.yErrMinus(); } if (! fuzzyEquals(a.xErrPlus(), b.xErrPlus())) { return a.xErrPlus() < b.xErrPlus(); } if (!fuzzyEquals(a.yErrPlus(), b.yErrPlus())) { return a.yErrPlus() < b.yErrPlus(); } return false; } /// Less-than-or-equals operator inline bool operator <= (const Point3D& a, const YODA::Point3D& b) { if (a == b) return true; return a < b; } /// Greater-than operator inline bool operator > (const Point3D& a, const YODA::Point3D& b) { return !(a <= b); } /// Greater-than-or-equals operator inline bool operator >= (const Point3D& a, const YODA::Point3D& b) { return !(a < b); } //@} } #endif diff --git a/include/YODA/Scatter1D.h b/include/YODA/Scatter1D.h --- a/include/YODA/Scatter1D.h +++ b/include/YODA/Scatter1D.h @@ -1,342 +1,330 @@ // -*- C++ -*- // // This file is part of YODA -- Yet more Objects for Data Analysis // Copyright (C) 2008-2018 The YODA collaboration (see AUTHORS for details) // #ifndef YODA_SCATTER1D_H #define YODA_SCATTER1D_H #include "YODA/AnalysisObject.h" #include "YODA/Point1D.h" #include "YODA/Utils/sortedvector.h" #include #include namespace YODA { // Forward declarations class Counter; /// A very generic data type which is just a collection of 1D data points with errors class Scatter1D : public AnalysisObject { public: /// Type of the native Point1D collection typedef Point1D Point; typedef Utils::sortedvector Points; typedef std::shared_ptr Ptr; /// @name Constructors //@{ /// Empty constructor Scatter1D(const std::string& path="", const std::string& title="") : AnalysisObject("Scatter1D", path, title) { } /// Constructor from a set of points Scatter1D(const Points& points, const std::string& path="", const std::string& title="") : AnalysisObject("Scatter1D", path, title), _points(points) { } /// Constructor from a vector of x values with no errors Scatter1D(const std::vector& x, const std::string& path="", const std::string& title="") : AnalysisObject("Scatter1D", path, title) { for (size_t i = 0; i < x.size(); ++i) addPoint(x[i]); } /// Constructor from vectors of x values with symmetric errors Scatter1D(const std::vector& x, const std::vector& ex, const std::string& path="", const std::string& title="") : AnalysisObject("Scatter1D", path, title) { if (x.size() != ex.size()) throw UserError("x and ex vectors must have same length"); for (size_t i = 0; i < x.size(); ++i) addPoint(x[i], ex[i]); } /// Constructor from x values with asymmetric errors Scatter1D(const std::vector& x, const std::vector >& ex, const std::string& path="", const std::string& title="") : AnalysisObject("Scatter1D", path, title) { if (x.size() != ex.size()) throw UserError("x and ex vectors must have same length"); for (size_t i = 0; i < x.size(); ++i) addPoint(Point1D(x[i], ex[i])); } /// Constructor from values with completely explicit asymmetric errors Scatter1D(const std::vector& x, const std::vector& exminus, const std::vector& explus, const std::string& path="", const std::string& title="") : AnalysisObject("Scatter1D", path, title) { if (x.size() != exminus.size()) throw UserError("x and ex vectors must have same length"); if (exminus.size() != explus.size()) throw UserError("ex plus and minus vectors must have same length"); for (size_t i = 0; i < x.size(); ++i) addPoint(Point1D(x[i], exminus[i], explus[i])); } /// Copy constructor with optional new path /// @todo Also allow title setting from the constructor? Scatter1D(const Scatter1D& s1, const std::string& path="") : AnalysisObject("Scatter1D", (path.size() == 0) ? s1.path() : path, s1, s1.title()), _points(s1._points) { for ( auto &ann : annotations()){ setAnnotation(ann, annotation(ann)); } } /// Assignment operator Scatter1D& operator = (const Scatter1D& s1) { AnalysisObject::operator = (s1); //< AO treatment of paths etc. _points = s1._points; return *this; } /// Make a copy on the stack Scatter1D clone() const { return Scatter1D(*this); } /// Make a copy on the heap, via 'new' Scatter1D* newclone() const { return new Scatter1D(*this); } //@} /// Dimension of this data object size_t dim() const { return 1; } /// @name Modifiers //@{ /// Clear all points void reset() { _points.clear(); } /// Scaling of x axis void scaleX(double scalex) { for (Point1D& p : _points) p.scaleX(scalex); } //@} /////////////////////////////////////////////////// - - void parseVariations() ; /// Get the list of variations stored in the points const std::vector variations() const ; /// @name Point accessors //@{ /// Number of points in the scatter size_t numPoints() const { return _points.size(); } /// Get the collection of points (non-const) Points& points() { return _points; } /// Get the collection of points (const) const Points& points() const { return _points; } /// Get a reference to the point with index @a index (non-const) Point1D& point(size_t index) { if (index >= numPoints()) throw RangeError("There is no point with this index"); return _points.at(index); } /// Get a reference to the point with index @a index (const) const Point1D& point(size_t index) const { if (index >= numPoints()) throw RangeError("There is no point with this index"); return _points.at(index); } //@} /// @name Point inserters //@{ /// Insert a new point void addPoint(const Point1D& pt) { _points.insert(pt); } /// Insert a new point, defined as the x value and no errors void addPoint(double x) { - Point1D thisPoint=Point1D(x); - thisPoint.setParentAO(this); - _points.insert(thisPoint); + _points.insert(Point1D(x)); } /// Insert a new point, defined as the x value and symmetric errors void addPoint(double x, double ex) { - Point1D thisPoint=Point1D(x, ex); - thisPoint.setParentAO(this); - _points.insert(thisPoint); + _points.insert(Point1D(x, ex)); } /// Insert a new point, defined as the x value and an asymmetric error pair void addPoint(double x, const std::pair& ex) { - Point1D thisPoint=Point1D(x, ex); - thisPoint.setParentAO(this); - _points.insert(thisPoint); + _points.insert(Point1D(x, ex)); } /// Insert a new point, defined as the x value and explicit asymmetric errors void addPoint(double x, double exminus, double explus) { - Point1D thisPoint=Point1D(x, exminus, explus); - thisPoint.setParentAO(this); - _points.insert(thisPoint); + _points.insert(Point1D(x, exminus, explus)); } /// Insert a collection of new points void addPoints(const Points& pts) { for (const Point1D& pt : pts) addPoint(pt); } //@} /// @name Combining sets of scatter points //@{ /// @todo Better name? void combineWith(const Scatter1D& other) { addPoints(other.points()); } /// @todo Better name? Make this the add operation? /// @todo Convert/extend to accept a Range or generic void combineWith(const std::vector& others) { for (const Scatter1D& s : others) combineWith(s); } //@} /// Equality operator bool operator == (const Scatter1D& other) { return _points == other._points; } /// Non-equality operator bool operator != (const Scatter1D& other) { return ! operator == (other); } ////////////////////////////////// private: Points _points; - - bool _variationsParsed =false ; }; /// Convenience typedef typedef Scatter1D S1D; /// @name Combining scatters by merging sets of points //@{ inline Scatter1D combine(const Scatter1D& a, const Scatter1D& b) { Scatter1D rtn = a; rtn.combineWith(b); return rtn; } inline Scatter1D combine(const std::vector& scatters) { Scatter1D rtn; rtn.combineWith(scatters); return rtn; } //@} ////////////////////////////////// /// @name Conversion functions from other data types //@{ /// Make a Scatter1D representation of a Histo1D Scatter1D mkScatter(const Counter& c); /// Make a Scatter1D representation of... erm, a Scatter1D! /// @note Mainly exists to allow mkScatter to be called on any AnalysisObject type inline Scatter1D mkScatter(const Scatter1D& s) { return Scatter1D(s); } //@} - + ///////////////////////////////// /// @name Transforming operations on Scatter1D //@{ /// @brief Apply transformation fx(x) to all values and error positions (operates in-place on @a s) /// /// fx should be a function which takes double x -> double newx template inline void transformX(Scatter1D& s, FNX fx) { for (size_t i = 0; i < s.numPoints(); ++i) { Point1D& p = s.point(i); const double newx = fx(p.x()); const double fx_xmin = fx(p.xMin()); const double fx_xmax = fx(p.xMax()); // Deal with possible inversions of min/max ordering under the transformation const double newxmin = std::min(fx_xmin, fx_xmax); const double newxmax = std::max(fx_xmin, fx_xmax); // Set new point x values p.setX(newx); /// @todo Be careful about transforms which could switch around min and max errors, or send both in the same direction! p.setXErrMinus(newx - newxmin); p.setXErrPlus(newxmax - newx); } } //@} - + } #endif diff --git a/include/YODA/Scatter2D.h b/include/YODA/Scatter2D.h --- a/include/YODA/Scatter2D.h +++ b/include/YODA/Scatter2D.h @@ -1,441 +1,421 @@ // -*- C++ -*- // // This file is part of YODA -- Yet more Objects for Data Analysis // Copyright (C) 2008-2018 The YODA collaboration (see AUTHORS for details) // #ifndef YODA_SCATTER2D_H #define YODA_SCATTER2D_H #include "YODA/AnalysisObject.h" #include "YODA/Point2D.h" #include "YODA/Utils/sortedvector.h" #include #include namespace YODA { // Forward declarations class Histo1D; class Profile1D; /// A very generic data type which is just a collection of 2D data points with errors class Scatter2D : public AnalysisObject { public: /// Type of the native Point2D collection typedef Point2D Point; typedef Utils::sortedvector Points; typedef std::shared_ptr Ptr; /// @name Constructors //@{ /// Empty constructor Scatter2D(const std::string& path="", const std::string& title="") : AnalysisObject("Scatter2D", path, title) { } /// Constructor from a set of points Scatter2D(const Points& points, const std::string& path="", const std::string& title="") : AnalysisObject("Scatter2D", path, title), _points(points) { } /// Constructor from a vector of values with no errors Scatter2D(const std::vector& x, const std::vector& y, const std::string& path="", const std::string& title="") : AnalysisObject("Scatter2D", path, title) { if (x.size() != y.size()) throw UserError("x and y vectors must have same length"); for (size_t i = 0; i < x.size(); ++i) addPoint(x[i], y[i]); } /// Constructor from vectors of values with symmetric errors on x and y Scatter2D(const std::vector& x, const std::vector& y, const std::vector& ex, const std::vector& ey, const std::string& path="", const std::string& title="") : AnalysisObject("Scatter2D", path, title) { if (x.size() != y.size()) throw UserError("x and y vectors must have same length"); if (x.size() != ex.size()) throw UserError("x and ex vectors must have same length"); if (y.size() != ey.size()) throw UserError("y and ey vectors must have same length"); for (size_t i = 0; i < x.size(); ++i) addPoint(x[i], y[i], ex[i], ey[i]); } /// Constructor from values with asymmetric errors on both x and y Scatter2D(const std::vector& x, const std::vector& y, const std::vector >& ex, const std::vector >& ey, const std::string& path="", const std::string& title="") : AnalysisObject("Scatter2D", path, title) { if (x.size() != y.size()) throw UserError("x and y vectors must have same length"); if (x.size() != ex.size()) throw UserError("x and ex vectors must have same length"); if (y.size() != ey.size()) throw UserError("y and ey vectors must have same length"); for (size_t i = 0; i < x.size(); ++i) addPoint(Point2D(x[i], y[i], ex[i], ey[i])); } /// Constructor from values with completely explicit asymmetric errors Scatter2D(const std::vector& x, const std::vector& y, const std::vector& exminus, const std::vector& explus, const std::vector& eyminus, const std::vector& eyplus, const std::string& path="", const std::string& title="") : AnalysisObject("Scatter2D", path, title) { if (x.size() != y.size()) throw UserError("x and y vectors must have same length"); if (x.size() != exminus.size()) throw UserError("x and ex vectors must have same length"); if (y.size() != eyminus.size()) throw UserError("y and ey vectors must have same length"); if (exminus.size() != explus.size()) throw UserError("ex plus and minus vectors must have same length"); if (eyminus.size() != eyplus.size()) throw UserError("ey plus and minus vectors must have same length"); for (size_t i = 0; i < x.size(); ++i) addPoint(Point2D(x[i], y[i], exminus[i], explus[i], eyminus[i], eyplus[i])); } /// Copy constructor with optional new path /// @todo Also allow title setting from the constructor? Scatter2D(const Scatter2D& s2, const std::string& path="") : AnalysisObject("Scatter2D", (path.size() == 0) ? s2.path() : path, s2, s2.title()), _points(s2._points) { for ( auto &ann : annotations()){ setAnnotation(ann, annotation(ann)); } } /// Assignment operator Scatter2D& operator = (const Scatter2D& s2) { AnalysisObject::operator = (s2); //< AO treatment of paths etc. _points = s2._points; return *this; } /// Make a copy on the stack Scatter2D clone() const { return Scatter2D(*this); } /// Make a copy on the heap, via 'new' Scatter2D* newclone() const { return new Scatter2D(*this); } //@} /// Dimension of this data object size_t dim() const { return 2; } /// @name Modifiers //@{ /// Clear all points void reset() { _points.clear(); } /// Scaling of x axis void scaleX(double scalex) { for (Point2D& p : _points) p.scaleX(scalex); } /// Scaling of y axis void scaleY(double scaley) { for (Point2D& p : _points) p.scaleY(scaley); } /// Scaling of both axes void scaleXY(double scalex, double scaley) { for (Point2D& p : _points) p.scaleXY(scalex, scaley); } /// Scaling of both axes /// @deprecated Use scaleXY void scale(double scalex, double scaley) { scaleXY(scalex, scaley); } //@} /////////////////////////////////////////////////// - void parseVariations() ; - /// Get the list of variations stored in the points const std::vector variations() const; - // Construct a covariance matrix from the error breakdown - std::vector > covarianceMatrix(bool ignoreOffDiagonalTerms=false) ; - /// @name Point accessors //@{ /// Number of points in the scatter size_t numPoints() const { return _points.size(); } /// Get the collection of points (non-const) Points& points() { return _points; } /// Get the collection of points (const) const Points& points() const { return _points; } /// Get a reference to the point with index @a index (non-const) Point2D& point(size_t index) { if (index >= numPoints()) throw RangeError("There is no point with this index"); return _points.at(index); } /// Get a reference to the point with index @a index (const) const Point2D& point(size_t index) const { if (index >= numPoints()) throw RangeError("There is no point with this index"); return _points.at(index); } //@} /// @name Point inserters //@{ /// Insert a new point void addPoint(const Point2D& pt) { _points.insert(pt); } /// Insert a new point, defined as the x/y value pair and no errors void addPoint(double x, double y) { - Point2D thisPoint= Point2D(x, y); - thisPoint.setParentAO(this); - _points.insert(thisPoint); + _points.insert(Point2D(x, y)); } /// Insert a new point, defined as the x/y value pair and symmetric errors void addPoint(double x, double y, double ex, double ey) { - Point2D thisPoint= Point2D(x, y, ex, ey); - thisPoint.setParentAO(this); - _points.insert(thisPoint); + _points.insert(Point2D(x, y, ex, ey)); } /// Insert a new point, defined as the x/y value pair and asymmetric error pairs void addPoint(double x, double y, const std::pair& ex, const std::pair& ey) { - Point2D thisPoint= Point2D(x, y, ex, ey); - thisPoint.setParentAO(this); - _points.insert(thisPoint); + _points.insert(Point2D(x, y, ex, ey)); } /// Insert a new point, defined as the x/y value pair and asymmetric errors void addPoint(double x, double y, double exminus, double explus, double eyminus, double eyplus) { - Point2D thisPoint=Point2D(x, y, exminus, explus, eyminus, eyplus); - thisPoint.setParentAO(this); - _points.insert(thisPoint); + _points.insert(Point2D(x, y, exminus, explus, eyminus, eyplus)); } /// Insert a collection of new points void addPoints(const Points& pts) { - for (const Point2D& pt : pts) { - addPoint(pt); - } + for (const Point2D& pt : pts) addPoint(pt); } //@} /// @name Combining sets of scatter points //@{ /// @todo Better name? Make this the add operation? void combineWith(const Scatter2D& other) { addPoints(other.points()); //return *this; } /// @todo Better name? /// @todo Convert/extend to accept a Range or generic void combineWith(const std::vector& others) { for (const Scatter2D& s : others) combineWith(s); //return *this; } //@} /// Equality operator bool operator == (const Scatter2D& other) { return _points == other._points; } /// Non-equality operator bool operator != (const Scatter2D& other) { return ! operator == (other); } - ////////////////////////////////// private: Points _points; - bool _variationsParsed =false ; - }; /// Convenience typedef typedef Scatter2D S2D; /// @name Combining scatters by merging sets of points //@{ inline Scatter2D combine(const Scatter2D& a, const Scatter2D& b) { Scatter2D rtn = a; rtn.combineWith(b); return rtn; } inline Scatter2D combine(const std::vector& scatters) { Scatter2D rtn; rtn.combineWith(scatters); return rtn; } //@} ////////////////////////////////// /// @name Conversion functions from other data types //@{ /// Make a Scatter2D representation of a Histo1D /// /// Optional @c usefocus argument can be used to position the point at the bin - /// focus rather than geometric midpoint. Optional @c binwidthdiv argument can be - /// used to disable the default (physical, differential!) scaling of y values and - /// errors by 1/bin-width. - Scatter2D mkScatter(const Histo1D& h, bool usefocus=false, bool binwidthdiv=true); + /// focus rather than geometric midpoint. + Scatter2D mkScatter(const Histo1D& h, bool usefocus=false); /// Make a Scatter2D representation of a Profile1D /// /// Optional @c usefocus argument can be used to position the point at the bin /// focus rather than geometric midpoint. Optional @c usestddev argument can /// be used to draw the y-distribution sigma rather than the standard error on /// the mean as the y-error bar size. Scatter2D mkScatter(const Profile1D& p, bool usefocus=false, bool usestddev=false); /// Make a Scatter2D representation of... erm, a Scatter2D! /// @note Mainly exists to allow mkScatter to be called on any AnalysisObject type inline Scatter2D mkScatter(const Scatter2D& s) { return Scatter2D(s); } // /// @note The usefocus arg is just for consistency and has no effect for Scatter -> Scatter // inline Scatter2D mkScatter(const Scatter2D& s, bool) { return mkScatter(s); } //@} ////////////////////////////////// /// @name Transforming operations on Scatter2D //@{ /// @brief Apply transformation fx(x) to all values and error positions (operates in-place on @a s) /// /// fx should be a function which takes double x -> double newx template inline void transformX(Scatter2D& s, FNX fx) { for (size_t i = 0; i < s.numPoints(); ++i) { Point2D& p = s.point(i); const double newx = fx(p.x()); const double fx_xmin = fx(p.xMin()); const double fx_xmax = fx(p.xMax()); // Deal with possible inversions of min/max ordering under the transformation const double newxmin = std::min(fx_xmin, fx_xmax); const double newxmax = std::max(fx_xmin, fx_xmax); // Set new point x values p.setX(newx); /// @todo Be careful about transforms which could switch around min and max errors, or send both in the same direction! p.setXErrMinus(newx - newxmin); p.setXErrPlus(newxmax - newx); } } /// @brief Apply transformation fy(y) to all values and error positions (operates in-place on @a s) /// /// fy should be a function which takes double y -> double newy template inline void transformY(Scatter2D& s, FNY fy) { for (size_t i = 0; i < s.numPoints(); ++i) { Point2D& p = s.point(i); const double newy = fy(p.y()); const double fy_ymin = fy(p.yMin()); const double fy_ymax = fy(p.yMax()); // Deal with possible inversions of min/max ordering under the transformation const double newymin = std::min(fy_ymin, fy_ymax); const double newymax = std::max(fy_ymin, fy_ymax); // Set new point y values p.setY(newy); /// @todo Be careful about transforms which could switch around min and max errors, or send both in the same direction! p.setYErrMinus(newy - newymin); p.setYErrPlus(newymax - newy); } } /// @todo Add external scale, scaleX, scaleY functions /// Exchange the x and y axes (operates in-place on @a s) inline void flip(Scatter2D& s) { for (size_t i = 0; i < s.numPoints(); ++i) { Point2D& p = s.point(i); const double newx = p.y(); const double newy = p.x(); const double newxmin = p.yMin(); const double newxmax = p.yMax(); const double newymin = p.xMin(); const double newymax = p.xMax(); p.setX(newx); p.setY(newy); /// @todo Be careful about transforms which could switch around min and max errors, or send both in the same direction! p.setXErrMinus(newx - newxmin); p.setXErrPlus(newxmax - newx); p.setYErrMinus(newy - newymin); p.setYErrPlus(newymax - newy); } } //@} } #endif diff --git a/include/YODA/Scatter3D.h b/include/YODA/Scatter3D.h --- a/include/YODA/Scatter3D.h +++ b/include/YODA/Scatter3D.h @@ -1,449 +1,436 @@ // -*- C++ -*- // // This file is part of YODA -- Yet more Objects for Data Analysis // Copyright (C) 2008-2018 The YODA collaboration (see AUTHORS for details) // #ifndef YODA_SCATTER3D_H #define YODA_SCATTER3D_H #include "YODA/AnalysisObject.h" #include "YODA/Point3D.h" #include "YODA/Utils/sortedvector.h" #include #include namespace YODA { // Forward declarations class Histo2D; class Profile2D; /// A very generic data type which is just a collection of 3D data points with errors class Scatter3D : public AnalysisObject { public: /// Types of the native Point3D collection typedef Point3D Point; typedef Utils::sortedvector Points; typedef std::shared_ptr Ptr; /// @name Constructors //@{ /// Empty constructor Scatter3D(const std::string& path="", const std::string& title="") : AnalysisObject("Scatter3D", path, title) { } /// Constructor from a set of points Scatter3D(const Points& points, const std::string& path="", const std::string& title="") : AnalysisObject("Scatter3D", path, title), _points(points) { std::sort(_points.begin(), _points.end()); } /// Constructor from vectors of values with no errors Scatter3D(const std::vector& x, const std::vector& y, const std::vector& z, const std::string& path="", const std::string& title="") : AnalysisObject("Scatter3D", path, title) { if (x.size() != y.size() || y.size() != z.size()) { throw RangeError("There are different numbers of x, y, and z values in the provided vectors."); } const std::pair nullerr = std::make_pair(0.0, 0.0); for (size_t i = 0; i < x.size(); ++i) { addPoint(Point3D(x[i], y[i], z[i], nullerr, nullerr, nullerr)); } std::sort(_points.begin(), _points.end()); } /// Constructor from vectors of values with asymmetric errors on both x and y Scatter3D(const std::vector& x, const std::vector& y, const std::vector& z, const std::vector >& ex, const std::vector >& ey, const std::vector >& ez, const std::string& path="", const std::string& title="") : AnalysisObject("Scatter3D", path, title) { if (x.size() != y.size() || y.size() != z.size()) { throw RangeError("There are different numbers of x, y, and z values in the provided vectors."); } if (x.size() != ex.size() || y.size() != ey.size() || z.size() != ez.size()) { throw RangeError("The sizes of the provided error vectors don't match the corresponding x, y, or z value vectors."); } for (size_t i = 0; i < x.size(); ++i) { addPoint(Point3D(x[i], y[i], z[i], ex[i], ey[i], ez[i])); } std::sort(_points.begin(), _points.end()); } /// Constructor from vectors of values with completely explicit asymmetric errors Scatter3D(const std::vector& x, const std::vector& y, const std::vector z, const std::vector& exminus, const std::vector& explus, const std::vector& eyminus, const std::vector& eyplus, const std::vector& ezminus, const std::vector& ezplus, const std::string& path="", const std::string& title="") : AnalysisObject("Scatter3D", path, title) { if(x.size() != y.size() || y.size() != z.size() || x.size() != exminus.size() || x.size() != explus.size() || y.size() != eyminus.size() || y.size() != eyplus.size() || z.size() != ezminus.size() || z.size() != ezplus.size()) throw RangeError("There are either different amounts of points on x/y/z vectors or not every of these vectors has properly defined error vectors!"); for (size_t i = 0; i < x.size(); ++i) { addPoint(Point3D(x[i], y[i], z[i], exminus[i], explus[i], eyminus[i], eyplus[i], ezminus[i], ezplus[i])); } std::sort(_points.begin(), _points.end()); } /// Copy constructor with optional new path /// @todo Also allow title setting from the constructor? Scatter3D(const Scatter3D& s3, const std::string& path="") : AnalysisObject("Scatter3D", (path.size() == 0) ? s3.path() : path, s3, s3.title()), _points(s3._points) - { + { for ( auto &ann : annotations()){ setAnnotation(ann, annotation(ann)); } } /// Assignment operator Scatter3D& operator = (const Scatter3D& s3) { AnalysisObject::operator = (s3); //< AO treatment of paths etc. _points = s3._points; return *this; } /// Make a copy on the stack Scatter3D clone() const { return Scatter3D(*this); } /// Make a copy on the heap, via 'new' Scatter3D* newclone() const { return new Scatter3D(*this); } //@} /// Dimension of this data object size_t dim() const { return 3; } /// @name Modifiers //@{ /// Clear all points void reset() { _points.clear(); } /// Scaling of x axis void scaleX(double scalex) { for (Point3D& p : _points) p.scaleX(scalex); } /// Scaling of y axis void scaleY(double scaley) { for (Point3D& p : _points) p.scaleY(scaley); } /// Scaling of z axis void scaleZ(double scalez) { for (Point3D& p : _points) p.scaleZ(scalez); } /// Scaling of all three axes void scaleXYZ(double scalex, double scaley, double scalez) { for (Point3D& p : _points) p.scaleXYZ(scalex, scaley, scalez); } /// Scaling of all three axes /// @deprecated Use scaleXYZ void scale(double scalex, double scaley, double scalez) { scaleXYZ(scalex, scaley, scalez); } //@} /////////////////////////////////////////////////// - void parseVariations() ; - - /// Get the list of variations stored in the points + /// Get the list of variations stored in the points const std::vector variations() const; /// @name Point accessors //@{ /// Number of points in the scatter size_t numPoints() const { return _points.size(); } /// Get the collection of points (non-const) Points& points() { return _points; } /// Get the collection of points (const) const Points& points() const { return _points; } /// Get a reference to the point with index @a index Point3D& point(size_t index) { if (index >= numPoints()) throw RangeError("There is no point with this index"); return _points.at(index); } /// Get the point with index @a index (const version) const Point3D& point(size_t index) const { if (index >= numPoints()) throw RangeError("There is no point with such index!"); return _points.at(index); } //@} /// @name Point inserters //@{ /// Insert a new point void addPoint(const Point3D& pt) { _points.insert(pt); } /// Insert a new point, defined as the x/y/z value triplet and no errors void addPoint(double x, double y, double z) { - Point3D thisPoint=Point3D(x, y, z); - thisPoint.setParentAO(this); - _points.insert(thisPoint); + _points.insert(Point3D(x, y, z)); } /// Insert a new point, defined as the x/y/z value triplet and symmetric errors void addPoint(double x, double y, double z, double ex, double ey, double ez) { - Point3D thisPoint=Point3D(x, y, z, ex, ey, ez); - thisPoint.setParentAO(this); - _points.insert(thisPoint); + _points.insert(Point3D(x, y, z, ex, ey, ez)); } /// Insert a new point, defined as the x/y/z value triplet and asymmetric error pairs void addPoint(double x, double y, double z, const std::pair& ex, const std::pair& ey, const std::pair& ez) { - Point3D thisPoint= Point3D(x, y, z, ex, ey, ez); - thisPoint.setParentAO(this); - _points.insert(thisPoint); + _points.insert(Point3D(x, y, z, ex, ey, ez)); } /// Insert a new point, defined as the x/y/z value triplet and asymmetric errors void addPoint(double x, double y, double z, double exminus, double explus, double eyminus, double eyplus, double ezminus, double ezplus) { - Point3D thisPoint = Point3D(x, y, z, exminus, explus, eyminus, eyplus, ezminus, ezplus); - thisPoint.setParentAO(this); - _points.insert(thisPoint); + _points.insert(Point3D(x, y, z, exminus, explus, eyminus, eyplus, ezminus, ezplus)); } /// Insert a collection of new points void addPoints(const Points& pts) { for (const Point3D& pt : pts) addPoint(pt); } //@} /// @todo Better name? void combineWith(const Scatter3D& other) { addPoints(other.points()); //return *this; } /// @todo Better name? /// @todo Convert to accept a Range or generic void combineWith(const std::vector& others) { for (const Scatter3D& s : others) combineWith(s); } /// Equality operator bool operator == (const Scatter3D& other) { return _points == other._points; } /// Non-equality operator bool operator != (const Scatter3D& other) { return ! operator == (other); } - + ////////////////////////////////// - - + + private: Points _points; - bool _variationsParsed =false ; - }; /// Convenience typedef typedef Scatter3D S3D; /// @name Combining scatters by merging sets of points //@{ inline Scatter3D combine(const Scatter3D& a, const Scatter3D& b) { Scatter3D rtn = a; rtn.combineWith(b); return rtn; } inline Scatter3D combine(const std::vector& scatters) { Scatter3D rtn; rtn.combineWith(scatters); return rtn; } //@} ////////////////////////////////// /// @name Conversion functions from other data types //@{ /// Make a Scatter3D representation of a Histo2D /// /// Optional @c usefocus argument can be used to position the point at the bin /// focus rather than geometric midpoint. - Scatter3D mkScatter(const Histo2D& h, bool usefocus=false, bool binareadiv=true); + Scatter3D mkScatter(const Histo2D& h, bool usefocus=false); /// Make a Scatter3D representation of a Profile2D /// /// Optional @c usefocus argument can be used to position the point at the bin /// focus rather than geometric midpoint. Optional @c usestddev argument can /// be used to draw the distribution sigma rather than the standard error on /// the mean as the z-error bar size. Scatter3D mkScatter(const Profile2D& p, bool usefocus=false, bool usestddev=false); /// Make a Scatter3D representation of... erm, a Scatter3D! /// @note Mainly exists to allow mkScatter to be called on any AnalysisObject type inline Scatter3D mkScatter(const Scatter3D& s) { return Scatter3D(s); } // /// @note The usefocus arg is just for consistency and has no effect for Scatter -> Scatter //inline Scatter3D mkScatter(const Scatter3D& s, bool) { return mkScatter(s); } //@} ///////////////////////////////// /// @name Transforming operations on Scatter3D //@{ /// @brief Apply transformation fx(x) to all values and error positions (operates in-place on @a s) /// /// fx should be a function which takes double x -> double newx template inline void transformX(Scatter3D& s, FNX fx) { for (size_t i = 0; i < s.numPoints(); ++i) { Point3D& p = s.point(i); const double newx = fx(p.x()); const double fx_xmin = fx(p.xMin()); const double fx_xmax = fx(p.xMax()); // Deal with possible inversions of min/max ordering under the transformation const double newxmin = std::min(fx_xmin, fx_xmax); const double newxmax = std::max(fx_xmin, fx_xmax); // Set new point x values p.setX(newx); /// @todo Be careful about transforms which could switch around min and max errors, or send both in the same direction! p.setXErrMinus(newx - newxmin); p.setXErrPlus(newxmax - newx); } } /// @brief Apply transformation fy(y) to all values and error positions (operates in-place on @a s) /// /// fy should be a function which takes double y -> double newy template inline void transformY(Scatter3D& s, FNY fy) { for (size_t i = 0; i < s.numPoints(); ++i) { Point3D& p = s.point(i); const double newy = fy(p.y()); const double fy_ymin = fy(p.yMin()); const double fy_ymax = fy(p.yMax()); // Deal with possible inversions of min/max ordering under the transformation const double newymin = std::min(fy_ymin, fy_ymax); const double newymax = std::max(fy_ymin, fy_ymax); // Set new point y values p.setY(newy); /// @todo Be careful about transforms which could switch around min and max errors, or send both in the same direction! p.setYErrMinus(newy - newymin); p.setYErrPlus(newymax - newy); } } /// @brief Apply transformation fz(z) to all values and error positions (operates in-place on @a s) /// /// fz should be a function which takes double z -> double newz template inline void transformZ(Scatter3D& s, FNZ fz) { for (size_t i = 0; i < s.numPoints(); ++i) { Point3D& p = s.point(i); const double newz = fz(p.z()); const double fz_zmin = fz(p.zMin()); const double fz_zmax = fz(p.zMax()); // Deal with possible inversions of min/max ordering under the transformation const double newzmin = std::min(fz_zmin, fz_zmax); const double newzmax = std::max(fz_zmin, fz_zmax); // Set new point z values p.setZ(newz); /// @todo Be careful about transforms which could switch around min and max errors, or send both in the same direction! p.setZErrMinus(newz - newzmin); p.setZErrPlus(newzmax - newz); } } /// @todo Add external scale, scaleX, scaleY, scaleZ functions //@} - } #endif diff --git a/pyext/yoda/declarations.pxd b/pyext/yoda/declarations.pxd --- a/pyext/yoda/declarations.pxd +++ b/pyext/yoda/declarations.pxd @@ -1,1437 +1,1426 @@ from libcpp.map cimport map from libcpp.pair cimport pair from libcpp.vector cimport vector from libcpp cimport bool from libcpp.string cimport string from cython.operator cimport dereference as deref cdef extern from "YODA/Config/YodaConfig.h" namespace "YODA": string version() # Import the error handling C++ routine cdef extern from "errors.hh": # Have a look in errors.cpp for implementation specifics void yodaerr "translate_yoda_error" () ctypedef map[string, string] Annotations ctypedef double (*dbl_dbl_fptr) (double) ctypedef map[string, pair[double,double]] errMap # Math utils {{{ cdef extern from "YODA/Utils/MathUtils.h" namespace "YODA": # bool isZero(double a, double tolerance) # bool fuzzyEquals(double a, double b, double tolerance) # bool fuzzyGtrEquals(double a, double b, double tolerance) # bool fuzzyLessEquals(double a, double b, double tolerance) vector[double] linspace(size_t nbins, double start, double end) vector[double] logspace(size_t nbins, double start, double end) int index_between(double&, vector[double]& binedges) double mean(vector[int]& sample) double covariance(vector[int]& sample1, vector[int]& sample2) double correlation(vector[int]& sample1, vector[int]& sample2) # }}} # Dbn0D {{{ cdef extern from "YODA/Dbn0D.h" namespace "YODA": cdef cppclass Dbn0D: Dbn0D () Dbn0D (Dbn0D) void fill(double weight, double fraction) void reset() void scaleW(double) # Raw distribution running sums unsigned long numEntries() except +yodaerr double effNumEntries() except +yodaerr double sumW() except +yodaerr double sumW2() except +yodaerr double errW() except +yodaerr double relErrW() except +yodaerr Dbn0D operator+ (Dbn0D) Dbn0D operator- (Dbn0D) # TODO: += and -= operators #}}} Dbn0D # Dbn1D {{{ cdef extern from "YODA/Dbn1D.h" namespace "YODA": cdef cppclass Dbn1D: Dbn1D () Dbn1D (Dbn1D) void fill(double val, double weight, double fraction) void reset() void scaleW(double) void scaleX(double) double errW() except +yodaerr double relErrW() except +yodaerr double xMean() except +yodaerr double xVariance() except +yodaerr double xStdDev() except +yodaerr double xStdErr() except +yodaerr double xRMS() except +yodaerr # Raw distribution running sums unsigned long numEntries() except +yodaerr double effNumEntries() except +yodaerr double sumW() except +yodaerr double sumW2() except +yodaerr double sumWX() except +yodaerr double sumWX2() except +yodaerr Dbn1D operator+ (Dbn1D) Dbn1D operator- (Dbn1D) # TODO: += and -= operators #}}} Dbn1D # Dbn2D {{{ cdef extern from "YODA/Dbn2D.h" namespace "YODA": cdef cppclass Dbn2D: Dbn2D () Dbn2D (Dbn2D) void fill(double x, double y, double weight, double fraction) except +yodaerr void reset() except +yodaerr void scaleW(double) except +yodaerr void scaleX(double) except +yodaerr void scaleY(double) except +yodaerr void scaleXY(double, double) except +yodaerr double errW() except +yodaerr double relErrW() except +yodaerr double xMean() except +yodaerr double xVariance() except +yodaerr double xStdDev() except +yodaerr double xStdErr() except +yodaerr double xRMS() except +yodaerr double yMean() except +yodaerr double yVariance() except +yodaerr double yStdDev() except +yodaerr double yStdErr() except +yodaerr double yRMS() except +yodaerr # Raw distribution running sums unsigned long numEntries() except +yodaerr double effNumEntries() except +yodaerr double sumW() except +yodaerr double sumW2() except +yodaerr double sumWX() except +yodaerr double sumWX2() except +yodaerr double sumWY() except +yodaerr double sumWY2() except +yodaerr double sumWXY() except +yodaerr # Operators void flipXY() except +yodaerr Dbn1D transformX() except +yodaerr Dbn1D transformY() except +yodaerr Dbn2D operator + (Dbn2D) Dbn2D operator - (Dbn2D) # TODO: += and -= operators #}}} Dbn2D # Dbn3D {{{ cdef extern from "YODA/Dbn3D.h" namespace "YODA": cdef cppclass Dbn3D: Dbn3D () Dbn3D (Dbn3D) void fill(double x, double y, double z, double weight, double fraction) void reset() void scaleW(double) void scaleX(double) void scaleY(double) void scaleZ(double) # void scaleXY(double, double) # void scaleYZ(double, double) # void scaleXZ(double, double) void scaleXYZ(double, double, double) double errW() except +yodaerr double relErrW() except +yodaerr double xMean() double xVariance() double xStdDev() double xStdErr() double xRMS() double yMean() double yVariance() double yStdDev() double yStdErr() double yRMS() double zMean() double zVariance() double zStdDev() double zStdErr() double zRMS() # Raw distribution running sums unsigned long numEntries() double effNumEntries() double sumW() double sumW2() double sumWX() double sumWX2() double sumWY() double sumWY2() double sumWZ() double sumWZ2() double sumWXY() double sumWXZ() double sumWYZ() double sumWXYZ() # Operators void flipXY() void flipXZ() void flipYZ() Dbn1D transformX() Dbn1D transformY() Dbn1D transformZ() Dbn3D operator + (Dbn3D) Dbn3D operator - (Dbn3D) # TODO: += and -= operators #}}} Dbn3D # Point {{{ cdef extern from "YODA/Point.h" namespace "YODA": cdef cppclass Point: int dim() except +yodaerr double val(size_t i) except +yodaerr void setVal(size_t i, double val) except +yodaerr pair[double,double] errs(size_t i) except +yodaerr pair[double,double] errs(size_t i, string source) except +yodaerr double errMinus(size_t i) except +yodaerr double errMinus(size_t i, string source) except +yodaerr void setErrMinus(size_t i, double eminus) except +yodaerr void setErrMinus(size_t i, double eminus, string source) except +yodaerr double errPlus(size_t i) except +yodaerr double errPlus(size_t i, string source) except +yodaerr void setErrPlus(size_t i, double eplus) except +yodaerr void setErrPlus(size_t i, double eplus, string source) except +yodaerr double errAvg(size_t i) except +yodaerr double errAvg(size_t i, string source) except +yodaerr void setErr(size_t i, double e) except +yodaerr void setErr(size_t i, double e, string source) except +yodaerr # void setErrs(size_t i, double e) except +yodaerr # void setErrs(size_t i, double eminus, double eplus) except +yodaerr void setErrs(size_t i, pair[double,double]& e) except +yodaerr void setErrs(size_t i, pair[double,double]& e, string source) except +yodaerr # void set(size_t i, double val, double e) except +yodaerr # void set(size_t i, double val, double eminus, double eplus) except +yodaerr void set(size_t i, double val, pair[double,double]& e) except +yodaerr void set(size_t i, double val, pair[double,double]& e, string source) except +yodaerr errMap errMap() except +yodaerr #}}} Point # Point1D {{{ cdef extern from "YODA/Point1D.h" namespace "YODA": cdef cppclass Point1D(Point): Point1D () except +yodaerr Point1D (Point1D p) except +yodaerr Point1D (double x, double exminus, double explus) except +yodaerr Point1D (double x, double exminus, double explus, string source) except +yodaerr double x() except +yodaerr void setX(double x) except +yodaerr pair[double,double] xErrs() except +yodaerr pair[double,double] xErrs(string source) except +yodaerr void setXErrs(pair[double, double]&) except +yodaerr void setXErrs(pair[double, double]&, string source) except +yodaerr double xErrAvg() except +yodaerr double xErrAvg(string source) except +yodaerr double xMin() except +yodaerr double xMin(string source) except +yodaerr double xMax() except +yodaerr double xMax(string source) except +yodaerr void scaleX(double) except +yodaerr bool operator == (Point1D) except +yodaerr bool operator != (Point1D b) except +yodaerr bool operator < (Point1D b) except +yodaerr bool operator <= (Point1D b) except +yodaerr bool operator > (Point1D b) except +yodaerr bool operator >= (Point1D b) except +yodaerr # }}} Point1D # Point2D {{{ cdef extern from "YODA/Point2D.h" namespace "YODA": cdef cppclass Point2D(Point): Point2D () except +yodaerr Point2D (Point2D p) except +yodaerr Point2D (double x, double y, double exminus, double explus, double eyminus, double eyplus) except +yodaerr Point2D (double x, double y, double exminus, double explus, double eyminus, double eyplus, string source) except +yodaerr double x() except +yodaerr double y() except +yodaerr void setX(double x) except +yodaerr void setY(double y) except +yodaerr pair[double,double] xy() except +yodaerr void setXY(pair[double,double]&) except +yodaerr pair[double,double] xErrs() except +yodaerr pair[double,double] yErrs() except +yodaerr pair[double,double] yErrs(string source) except +yodaerr void setXErrs(pair[double, double]&) except +yodaerr void setYErrs(pair[double, double]&) except +yodaerr void setYErrs(pair[double, double]&, string source) except +yodaerr double xErrAvg() except +yodaerr double yErrAvg() except +yodaerr double yErrAvg(string source) except +yodaerr double xMin() except +yodaerr double xMax() except +yodaerr double yMin() except +yodaerr double yMin(string source) except +yodaerr double yMax() except +yodaerr double yMax(string source) except +yodaerr void scaleX(double) except +yodaerr void scaleY(double) except +yodaerr void scaleXY(double, double) except +yodaerr #void scale(double, double) except +yodaerr bool operator == (Point2D) except +yodaerr bool operator != (Point2D b) except +yodaerr bool operator < (Point2D b) except +yodaerr bool operator <= (Point2D b) except +yodaerr bool operator > (Point2D b) except +yodaerr bool operator >= (Point2D b) except +yodaerr # }}} Point2D # Point3D {{{ cdef extern from "YODA/Point3D.h" namespace "YODA": cdef cppclass Point3D(Point): Point3D () except +yodaerr Point3D (Point3D& p) except +yodaerr Point3D (double x, double y, double z, double exminus, double explus, double eyminus, double eyplus, double ezminus, double ezplus) except +yodaerr Point3D (double x, double y, double z, double exminus, double explus, double eyminus, double eyplus, double ezminus, double ezplus, string source) except +yodaerr double x() except +yodaerr double y() except +yodaerr double z() except +yodaerr void setX(double x) except +yodaerr void setY(double y) except +yodaerr void setZ(double z) except +yodaerr pair[double,double] xErrs() except +yodaerr pair[double,double] yErrs() except +yodaerr pair[double,double] zErrs() except +yodaerr pair[double,double] zErrs(string source) except +yodaerr void setXErrs(pair[double, double]&) except +yodaerr void setYErrs(pair[double, double]&) except +yodaerr void setZErrs(pair[double, double]&) except +yodaerr void setZErrs(pair[double, double]&, string source) except +yodaerr double xErrAvg() double yErrAvg() double zErrAvg() double zErrAvg(string source) double xMin() except +yodaerr double xMax() except +yodaerr double yMin() except +yodaerr double yMax() except +yodaerr double zMin() except +yodaerr double zMin(string source) except +yodaerr double zMax() except +yodaerr double zMax(string source) except +yodaerr void scaleX(double) except +yodaerr void scaleY(double) except +yodaerr void scaleZ(double) except +yodaerr void scaleXYZ(double, double, double) except +yodaerr #void scale(double, double, double) except +yodaerr bool operator == (Point3D b) bool operator != (Point3D b) bool operator < (Point3D b) bool operator <= (Point3D b) bool operator > (Point3D b) bool operator >= (Point3D b) #}}} Point3D # Bin {{{ cdef extern from "YODA/Bin.h" namespace "YODA": cdef cppclass Bin: int dim() except +yodaerr unsigned long numEntries() except +yodaerr double effNumEntries() except +yodaerr double sumW() except +yodaerr double sumW2() except +yodaerr # }}} Bin #Bin1D {{{ cdef extern from "YODA/Bin1D.h" namespace "YODA": cdef cppclass Bin1D[DBN](Bin): Bin1D(pair[double, double] edges) except +yodaerr Bin1D(pair[double, double] edges, DBN dbn) except +yodaerr Bin1D(Bin1D) except +yodaerr # THIS IS A CYTHON LIMITATION... DO NOT CALL THIS Bin1D() # (DO NOT CALL THIS DO NOT CALL THIS) ### ################################################# #We're fine as long as we don't try to instantiate these from Python # void scaleW(double scale) except +yodaerr # void scaleX(double scale) except +yodaerr void reset() except +yodaerr pair[double, double] edges() except +yodaerr double xMin() except +yodaerr double xMax() except +yodaerr double xMid() except +yodaerr double xWidth() except +yodaerr double xFocus() except +yodaerr # x statistics double xMean() except +yodaerr double xVariance() except +yodaerr double xStdDev() except +yodaerr double xStdErr() except +yodaerr double xRMS() except +yodaerr # raw statistics double sumWX() except +yodaerr double sumWX2() except +yodaerr void merge (Bin1D&) except +yodaerr Bin1D operator + (Bin1D&) Bin1D operator - (Bin1D&) ctypedef Bin1D[Dbn1D] Bin1D_Dbn1D ctypedef Bin1D[Dbn2D] Bin1D_Dbn2D ctypedef Bin1D[Dbn3D] Bin1D_Dbn3D #}}} Bin1D # Bin2D {{{ cdef extern from "YODA/Bin2D.h" namespace "YODA": cdef cppclass Bin2D[DBN](Bin): Bin2D(pair[double, double] xedges, pair[double, double] yedges) except+ Bin2D(Bin2D bin) except +yodaerr # CYTHON HACK DO NOT CALL THIS IT DOES NOT EXIST Bin2D() # (DO NOT CALL DO NOT CALL) ################################################ # void scaleW(double scale) except +yodaerr # void scaleXY(double, double) except +yodaerr void reset() except +yodaerr pair[double, double] xEdges() except +yodaerr pair[double, double] yEdges() except +yodaerr double xMin() except +yodaerr double yMin() except +yodaerr double xMax() except +yodaerr double yMax() except +yodaerr double xMid() except +yodaerr double yMid() except +yodaerr double xWidth() except +yodaerr double yWidth() except +yodaerr double area() except +yodaerr double xFocus() except +yodaerr double yFocus() except +yodaerr pair[double, double] xyFocus() except +yodaerr pair[double, double] xyMid() except +yodaerr # x statistics double xMean() except +yodaerr double xVariance() except +yodaerr double xStdDev() except +yodaerr double xStdErr() except +yodaerr double xRMS() except +yodaerr double yMean() except +yodaerr double yVariance() except +yodaerr double yStdDev() except +yodaerr double yStdErr() except +yodaerr double yRMS() except +yodaerr # Raw statistics double sumWX() except +yodaerr double sumWY() except +yodaerr double sumWXY() except +yodaerr double sumWX2() except +yodaerr double sumWY2() except +yodaerr #void merge(Bin2D) except +yodaerr Bin2D operator + (Bin2D) Bin2D operator - (Bin2D) int adjacentTo(Bin2D) except +yodaerr ctypedef Bin2D[Dbn2D] Bin2D_Dbn2D ctypedef Bin2D[Dbn3D] Bin2D_Dbn3D # }}} Bin2D # HistoBin1D {{{ cdef extern from "YODA/HistoBin1D.h" namespace "YODA": cdef cppclass HistoBin1D(Bin1D_Dbn1D): HistoBin1D(double lowedge, double highedge) except +yodaerr HistoBin1D(HistoBin1D) except +yodaerr # void fill(double x, double weight, double fraction) except +yodaerr # void fillBin(double weight, double fraction) except +yodaerr double area() except +yodaerr double height() except +yodaerr double areaErr() except +yodaerr double heightErr() except +yodaerr double relErr() except +yodaerr HistoBin1D operator+(HistoBin1D) HistoBin1D operator-(HistoBin1D) #}}} HistoBin1D cdef extern from "merge.hh": void HistoBin1D_iadd_HistoBin1D "cython_iadd" (HistoBin1D*, HistoBin1D*) void HistoBin1D_isub_HistoBin1D "cython_isub" (HistoBin1D*, HistoBin1D*) # void HistoBin1D_imul_dbl "cython_imul_dbl" (HistoBin1D*, double) # void HistoBin1D_idiv_dbl "cython_idiv_dbl" (HistoBin1D*, double) HistoBin1D* HistoBin1D_add_HistoBin1D "cython_add" (HistoBin1D*, HistoBin1D*) HistoBin1D* HistoBin1D_sub_HistoBin1D "cython_sub" (HistoBin1D*, HistoBin1D*) HistoBin1D* HistoBin1D_div_HistoBin1D "cython_div" (HistoBin1D*, HistoBin1D*) # HistoBin2D {{{ cdef extern from "YODA/HistoBin2D.h" namespace "YODA": cdef cppclass HistoBin2D(Bin2D_Dbn2D): HistoBin2D(double xmin, double xmax, double ymin, double ymax) except +yodaerr HistoBin2D(HistoBin2D) except +yodaerr # void fill(double x, double y, double weight, double fraction) except +yodaerr # void fillBin(double weight, double fraction) except +yodaerr void reset() # Accessors double volume() except +yodaerr double volumeErr() except +yodaerr double height() except +yodaerr double heightErr() except +yodaerr double relErr() except +yodaerr HistoBin2D operator+(HistoBin2D) HistoBin2D operator-(HistoBin2D) #Bin2D_Dbn2D merge(HistoBin2D b) #}}} HistoBin2D # ProfileBin1D {{{ cdef extern from "YODA/ProfileBin1D.h" namespace "YODA": cdef cppclass ProfileBin1D(Bin1D_Dbn2D): ProfileBin1D(ProfileBin1D) except +yodaerr ProfileBin1D(double, double) except +yodaerr #void fill(double x, double y, double weight, double fraction) except +yodaerr #void fillBin(double y, double weight, double fraction) except +yodaerr void reset() except +yodaerr double mean() except +yodaerr double stdDev() except +yodaerr double variance() except +yodaerr double stdErr() except +yodaerr double rms() except +yodaerr double sumWY() except +yodaerr double sumWY2() except +yodaerr ProfileBin1D operator + (ProfileBin1D) ProfileBin1D operator - (ProfileBin1D) # void scaleY(double) except +yodaerr # }}} ProfileBin1D cdef extern from "merge.hh": void ProfileBin1D_iadd_ProfileBin1D "cython_iadd" (ProfileBin1D*, ProfileBin1D*) void ProfileBin1D_isub_ProfileBin1D "cython_isub" (ProfileBin1D*, ProfileBin1D*) # void ProfileBin1D_imul_dbl "cython_imul_dbl" (ProfileBin1D*, double) # void ProfileBin1D_idiv_dbl "cython_idiv_dbl" (ProfileBin1D*, double) ProfileBin1D* ProfileBin1D_add_ProfileBin1D "cython_add" (ProfileBin1D*, ProfileBin1D*) ProfileBin1D* ProfileBin1D_sub_ProfileBin1D "cython_sub" (ProfileBin1D*, ProfileBin1D*) ProfileBin1D* ProfileBin1D_div_ProfileBin1D "cython_div" (ProfileBin1D*, ProfileBin1D*) # ProfileBin2D {{{ cdef extern from "YODA/ProfileBin2D.h" namespace "YODA": cdef cppclass ProfileBin2D(Bin2D_Dbn3D): ProfileBin2D (ProfileBin2D h) except +yodaerr ProfileBin2D (double, double, double, double) except +yodaerr # void fill(double x, double y, double z, double weight, double fraction) except +yodaerr # void fillBin(double z, double weight, double fraction) except +yodaerr double mean() except +yodaerr double stdDev() except +yodaerr double variance() except +yodaerr double stdErr() except +yodaerr double rms() except +yodaerr double sumWZ() except +yodaerr double sumWZ2() except +yodaerr ProfileBin2D operator + (ProfileBin2D) ProfileBin2D operator - (ProfileBin2D) # void scaleZ(double) except +yodaerr # }}} ProfileBin2D # AnalysisObject {{{ cdef extern from "YODA/AnalysisObject.h" namespace "YODA": cdef cppclass AnalysisObject: # Constructors AnalysisObject(string type, string path, string title) except +yodaerr AnalysisObject(string type, string path, AnalysisObject ao, string title) except +yodaerr AnalysisObject() #AnalysisObject* newclone() except +yodaerr ## String used in automatic type determination string type() except +yodaerr ## Data object fill- or plot-space dimension int dim() except +yodaerr ## Annotations vector[string] annotations() except +yodaerr bool hasAnnotation(string key) except +yodaerr string annotation(string key) except +yodaerr string annotation(string key, string default) except +yodaerr void setAnnotation(string, string) except +yodaerr void rmAnnotation(string name) except +yodaerr void clearAnnotations() except +yodaerr ## Standard annotations string title() except +yodaerr void setTitle(string title) except +yodaerr string path() except +yodaerr void setPath(string title) except +yodaerr string name() except +yodaerr # }}} AnalysisObject cdef extern from "YODA/Utils/sortedvector.h" namespace "YODA::Utils": cdef cppclass sortedvector[T](vector): sortedvector(vector[T]) except +yodaerr void insert(T) except +yodaerr # TODO: forward declarations for bin-copying constructors # Counter {{{ cdef extern from "YODA/Counter.h" namespace "YODA": cdef cppclass Counter(AnalysisObject): Counter() except +yodaerr Counter(string path, string title) except +yodaerr #Counter(Dbn0D dbn, string path, string title) except +yodaerr Counter(Counter c, string path) Counter clone() except +yodaerr Counter* newclone() except +yodaerr void reset() except +yodaerr void fill(double weight, double fraction) except +yodaerr unsigned long numEntries() except +yodaerr double effNumEntries() except +yodaerr double sumW() except +yodaerr double sumW2() except +yodaerr double val() except +yodaerr double err() except +yodaerr double relErr() except +yodaerr void scaleW(double) except +yodaerr # operator += (Counter) # operator -= (Counter) Scatter1D Counter_div_Counter "divide" (const Counter&, const Counter&) except +yodaerr Scatter1D Counter_eff_Counter "efficiency" (const Counter&, const Counter&) except +yodaerr cdef extern from "merge.hh": void Counter_iadd_Counter "cython_iadd" (Counter*, Counter*) void Counter_isub_Counter "cython_isub" (Counter*, Counter*) # void Counter_imul_dbl "cython_imul_dbl" (Counter*, double) # void Counter_idiv_dbl "cython_idiv_dbl" (Counter*, double) Counter* Counter_add_Counter "cython_add" (Counter*, Counter*) Counter* Counter_sub_Counter "cython_sub" (Counter*, Counter*) #Counter* Counter_div_Counter "cython_div" (Counter*, Counter*) cdef extern from "YODA/Scatter1D.h" namespace "YODA": Scatter1D mkScatter_Counter "YODA::mkScatter" (const Counter&) except +yodaerr #}}} Counter # Scatter1D {{{ cdef extern from "YODA/Scatter1D.h" namespace "YODA": cdef cppclass Scatter1D(AnalysisObject): Scatter1D() except +yodaerr Scatter1D(string path, string title) except +yodaerr Scatter1D(sortedvector[Point1D], string path, string title) except +yodaerr Scatter1D(vector[double], vector[double], vector[pair[double, double]], vector[pair[double, double]]) except +yodaerr Scatter1D(Scatter1D p, string path) Scatter1D clone() except +yodaerr Scatter1D* newclone() except +yodaerr void reset() except +yodaerr size_t numPoints() except +yodaerr # TODO: have to ignore exception handling on ref-returning methods until Cython bug is fixed vector[Point1D]& points() #except +yodaerr Point1D& point(size_t index) #except +yodaerr void addPoint(const Point1D&) #except +yodaerr void addPoint(double) #except +yodaerr void addPoint(double, const pair[double, double]&) #except +yodaerr void addPoints(const sortedvector[Point1D]&) #except +yodaerr void combineWith(const Scatter1D&) #except +yodaerr void combineWith(const vector[Scatter1D]&) #except +yodaerr void scaleX(double) except +yodaerr - void parseVariations() except +yodaerr - vector[string] variations() except +yodaerr - - vector[vector[double]] covarianceMatrix(bool) except +yodaerr void Scatter1D_transformX "YODA::transformX" (Scatter1D&, dbl_dbl_fptr) #}}} Scatter1D # cdef extern from "merge.hh": # Scatter2D* Scatter2D_add_Scatter2D "cython_add" (Scatter2D*, Scatter2D*) # Scatter2D* Scatter2D_sub_Scatter2D "cython_sub" (Scatter2D*, Scatter2D*) cdef extern from "YODA/Scatter1D.h" namespace "YODA": Scatter1D mkScatter_Scatter1D "YODA::mkScatter" (const Scatter1D&) except +yodaerr # Scatter2D {{{ cdef extern from "YODA/Scatter2D.h" namespace "YODA": cdef cppclass Scatter2D(AnalysisObject): Scatter2D() except +yodaerr Scatter2D(string path, string title) except +yodaerr Scatter2D(sortedvector[Point2D], string path, string title) except +yodaerr Scatter2D(vector[double], vector[double], vector[pair[double, double]], vector[pair[double, double]]) except +yodaerr Scatter2D(Scatter2D p, string path) Scatter2D clone() except +yodaerr Scatter2D* newclone() except +yodaerr void reset() except +yodaerr size_t numPoints() except +yodaerr # TODO: have to ignore exception handling on ref-returning methods until Cython bug is fixed vector[Point2D]& points() #except +yodaerr Point2D& point(size_t index) #except +yodaerr void addPoint(const Point2D&) #except +yodaerr void addPoint(double, double) #except +yodaerr void addPoint(double, double, const pair[double, double]&, const pair[double, double]&) #except +yodaerr void addPoints(const sortedvector[Point2D]&) #except +yodaerr void combineWith(const Scatter2D&) #except +yodaerr void combineWith(const vector[Scatter2D]&) #except +yodaerr void scaleX(double) except +yodaerr void scaleY(double) except +yodaerr void scaleXY(double, double) except +yodaerr #void scale(double, double) except +yodaerr - void parseVariations() except +yodaerr vector[string] variations() except +yodaerr - - vector[vector[double]] covarianceMatrix(bool) except +yodaerr void Scatter2D_transformX "YODA::transformX" (Scatter2D&, dbl_dbl_fptr) void Scatter2D_transformY "YODA::transformY" (Scatter2D&, dbl_dbl_fptr) #}}} Scatter2D # cdef extern from "merge.hh": # Scatter2D* Scatter2D_add_Scatter2D "cython_add" (Scatter2D*, Scatter2D*) # Scatter2D* Scatter2D_sub_Scatter2D "cython_sub" (Scatter2D*, Scatter2D*) cdef extern from "YODA/Scatter2D.h" namespace "YODA": Scatter2D mkScatter_Scatter2D "YODA::mkScatter" (const Scatter2D&) except +yodaerr # Scatter3D {{{ cdef extern from "YODA/Scatter3D.h" namespace "YODA": cdef cppclass Scatter3D(AnalysisObject): Scatter3D() except +yodaerr Scatter3D(string path, string title) except +yodaerr Scatter3D(sortedvector[Point3D], string path, string title) except +yodaerr Scatter3D(vector[double], vector[double], vector[pair[double, double]], vector[pair[double, double]], vector[pair[double, double]]) except +yodaerr Scatter3D(Scatter3D p, string path) Scatter3D clone() except +yodaerr Scatter3D* newclone() except +yodaerr void reset() except +yodaerr size_t numPoints() except +yodaerr # TODO: have to ignore exception handling on ref-returning methods until Cython bug is fixed sortedvector[Point3D]& points() #except +yodaerr Point3D& point(size_t index) #except +yodaerr void addPoint(const Point3D&) #except +yodaerr void addPoint(double, double, double) #except +yodaerr void addPoint(double, double, double, const pair[double, double]&, const pair[double, double]&, const pair[double, double]&) #except +yodaerr void addPoints(const sortedvector[Point3D]&) #except +yodaerr void combineWith(const Scatter3D&) #except +yodaerr void combineWith(const vector[Scatter3D]&) #except +yodaerr void scaleX(double) except +yodaerr void scaleY(double) except +yodaerr void scaleZ(double) except +yodaerr void scaleXYZ(double, double, double) except +yodaerr #void scale(double, double, double) except +yodaerr - void parseVariations() except +yodaerr vector[string] variations() except +yodaerr - - vector[vector[double]] covarianceMatrix() except +yodaerr - vector[vector[double]] covarianceMatrix(bool) except +yodaerr void Scatter3D_transformX "YODA::transformX" (Scatter3D&, dbl_dbl_fptr) void Scatter3D_transformY "YODA::transformY" (Scatter3D&, dbl_dbl_fptr) void Scatter3D_transformZ "YODA::transformZ" (Scatter3D&, dbl_dbl_fptr) #}}} Scatter3D # cdef extern from "merge.hh": # Scatter3D* Scatter3D_add_Scatter3D "cython_add" (Scatter3D*, Scatter3D*) # Scatter3D* Scatter3D_sub_Scatter3D "cython_sub" (Scatter3D*, Scatter3D*) cdef extern from "YODA/Scatter3D.h" namespace "YODA": Scatter3D mkScatter_Scatter3D "YODA::mkScatter" (const Scatter3D&) except +yodaerr # Histo1D#{{{ cdef extern from "YODA/Histo1D.h" namespace "YODA": cdef cppclass Histo1D(AnalysisObject): Histo1D() except +yodaerr Histo1D(string path, string title) except +yodaerr Histo1D(size_t nbins, double lower, double upper, string path, string title) except +yodaerr Histo1D(vector[double] binedges, string path, string title) except +yodaerr Histo1D(vector[Bin] bins, string path, string title) except +yodaerr Histo1D(Histo1D h, string path) except +yodaerr #Histo1D(Profile1D p, string path) #Histo1D(Scatter2D p, string path) Histo1D clone() except +yodaerr Histo1D* newclone() except +yodaerr void reset() except +yodaerr void fill(double x, double weight, double fraction) except +yodaerr void fillBin(size_t i, double weight, double fraction) except +yodaerr void scaleW(double s) except +yodaerr void normalize(double normto, bool includeoverflows) except +yodaerr void mergeBins(size_t, size_t) except +yodaerr void rebinBy(unsigned int n, size_t begin, size_t end) except +yodaerr void rebinTo(vector[double] edges) except +yodaerr void addBin(double, double) except +yodaerr void addBins(vector[double] edges) except +yodaerr void eraseBin(size_t index) except +yodaerr vector[double] xEdges() except +yodaerr double xMin() except +yodaerr double xMax() except +yodaerr size_t numBins() except +yodaerr vector[HistoBin1D]& bins() int binIndexAt(double x) except +yodaerr const HistoBin1D& bin(size_t ix) const HistoBin1D& binAt(double x) except +yodaerr # TODO: Some Cython mapping problem? Dbn1D& totalDbn() Dbn1D& underflow() Dbn1D& overflow() # Whole histo data double integral(bool) double integralTo(int, bool) double integralRange(int, int) unsigned long numEntries(bool) double effNumEntries(bool) double sumW(bool) double sumW2(bool) double xMean(bool) double xVariance(bool) double xStdDev(bool) double xStdErr(bool) double xRMS(bool) # operator == (Histo1D) # operator != (Histo1D) operator + (Histo1D) operator - (Histo1D) operator / (Histo1D) Scatter2D Histo1D_toIntegral "toIntegralHisto" (const Histo1D& h, bool includeunderflow) except +yodaerr Scatter2D Histo1D_toIntegralEff "toIntegralEfficiencyHisto" (const Histo1D& h, bool includeunderflow, bool includeoverflow) except +yodaerr Scatter2D Histo1D_div_Histo1D "divide" (const Histo1D&, const Histo1D&) except +yodaerr Scatter2D Histo1D_eff_Histo1D "efficiency" (const Histo1D&, const Histo1D&) except +yodaerr cdef extern from "merge.hh": void Histo1D_iadd_Histo1D "cython_iadd" (Histo1D*, Histo1D*) void Histo1D_isub_Histo1D "cython_isub" (Histo1D*, Histo1D*) # void Histo1D_imul_dbl "cython_imul_dbl" (Histo1D*, double) # void Histo1D_idiv_dbl "cython_idiv_dbl" (Histo1D*, double) Histo1D* Histo1D_add_Histo1D "cython_add" (Histo1D*, Histo1D*) Histo1D* Histo1D_sub_Histo1D "cython_sub" (Histo1D*, Histo1D*) Histo1D* Histo1D_div_Histo1D "cython_div" (Histo1D*, Histo1D*) cdef extern from "YODA/Scatter2D.h" namespace "YODA": Scatter2D mkScatter_Histo1D "YODA::mkScatter" (const Histo1D&, bool) except +yodaerr #}}} Histo1D # Histo2D {{{ cdef extern from "YODA/Histo2D.h" namespace "YODA": cdef cppclass Histo2D(AnalysisObject): Histo2D() except +yodaerr Histo2D(string path, string title) except +yodaerr Histo2D(size_t nBinsX, double lowerX, double upperX, size_t nBinsY, double lowerY, double upperY, string path, string title) except +yodaerr Histo2D(vector[double] xedges, vector[double] yedges, string path, string title) except +yodaerr Histo2D(Histo2D, string path) #Histo2D(Profile1D p, string path) #Histo2D(Scatter2D p, string path) Histo2D clone() except +yodaerr Histo2D* newclone() except +yodaerr # TODO: add missing functions and enable refs + exceptions when Cython allows void reset() except +yodaerr void fill(double x, double y, double weight, double fraction) except +yodaerr void fillBin(size_t i, double weight, double fraction) except +yodaerr void normalize(double normto, bool includeoverflows) except +yodaerr void scaleW(double scalefactor) except +yodaerr void scaleXY(double, double) # void mergeBins(size_t, size_t) except +yodaerr # void rebin(unsigned int n) except +yodaerr size_t numBins() except +yodaerr size_t numBinsX() except +yodaerr size_t numBinsY() except +yodaerr vector[HistoBin2D]& bins() #except +yodaerr int binIndexAt(double x, double y) except +yodaerr const HistoBin2D& bin(size_t ix) #except +yodaerr const HistoBin2D& binAt(double x, double y) #except +yodaerr void addBin(const pair[double, double]&, const pair[double, double]&) void addBins(const vector[HistoBin2D]&) void addBin(double, double) except +yodaerr void addBins(const vector[double]& edges) except +yodaerr # void eraseBin(size_t index) except +yodaerr vector[double] xEdges() except +yodaerr vector[double] yEdges() except +yodaerr double xMin() except +yodaerr double xMax() except +yodaerr double yMin() except +yodaerr double yMax() except +yodaerr # Dbn2D& outflow(int, int) #except +yodaerr # Whole histo data Dbn2D& totalDbn() #except +yodaerr double integral(bool) unsigned long numEntries(bool) double effNumEntries(bool) double sumW(bool) double sumW2(bool) double xMean(bool) double yMean(bool) double xVariance(bool) double yVariance(bool) double xStdDev(bool) double yStdDev(bool) double xStdErr(bool) double yStdErr(bool) double xRMS(bool) double yRMS(bool) # operator == (Histo2D) # operator != (Histo2D) operator + (Histo2D) operator - (Histo2D) operator / (Histo2D) Scatter3D Histo2D_div_Histo2D "divide" (const Histo2D&, const Histo2D&) except +yodaerr Scatter3D Histo2D_eff_Histo2D "efficiency" (const Histo2D&, const Histo2D&) except +yodaerr cdef extern from "merge.hh": void Histo2D_iadd_Histo2D "cython_iadd" (Histo2D*, Histo2D*) void Histo2D_isub_Histo2D "cython_isub" (Histo2D*, Histo2D*) # void Histo2D_imul_dbl "cython_imul_dbl" (Histo2D*, double) # void Histo2D_idiv_dbl "cython_idiv_dbl" (Histo2D*, double) Histo2D* Histo2D_add_Histo2D "cython_add" (Histo2D*, Histo2D*) Histo2D* Histo2D_sub_Histo2D "cython_sub" (Histo2D*, Histo2D*) Histo2D* Histo2D_div_Histo2D "cython_div" (Histo2D*, Histo2D*) cdef extern from "YODA/Scatter3D.h" namespace "YODA": Scatter3D mkScatter_Histo2D "YODA::mkScatter" (const Histo2D&, bool) except +yodaerr # Histo2D }}} # Profile1D {{{ cdef extern from "YODA/Profile1D.h" namespace "YODA": cdef cppclass Profile1D(AnalysisObject): Profile1D() except +yodaerr Profile1D(string path, string title) except +yodaerr Profile1D(size_t nxbins, double xlower, double xupper, string path, string title) except +yodaerr Profile1D(vector[double] xbinedges, string path, string title) except +yodaerr Profile1D(Profile1D p, string path) except +yodaerr Profile1D(Scatter2D s, string path) except +yodaerr #Profile1D(Histo1D p, string path) Profile1D clone() except +yodaerr Profile1D* newclone() except +yodaerr void reset() except +yodaerr void fill(double x, double y, double weight, double fraction) except +yodaerr void fillBin(size_t i, double y, double weight, double fraction) except +yodaerr void scaleW(double s) except +yodaerr void scaleY(double s) except +yodaerr void mergeBins(size_t, size_t) except +yodaerr void rebinBy(unsigned int n, size_t begin, size_t end) except +yodaerr void rebinTo(vector[double] edges) except +yodaerr void addBin(double, double) except +yodaerr void addBins(vector[double] edges) except +yodaerr # TODO: void eraseBin(size_t index) except +yodaerr vector[double] xEdges() except +yodaerr double xMin() except +yodaerr double xMax() except +yodaerr size_t numBins() except +yodaerr vector[ProfileBin1D] bins() #except +yodaerr int binIndexAt(double x) except +yodaerr const ProfileBin1D& bin(size_t ix) #except +yodaerr const ProfileBin1D& binAt(double x) #except +yodaerr # The trick here is to treat these not as references. # I suppose when you think about it, it makes sense Dbn2D& totalDbn() Dbn2D& underflow() Dbn2D& overflow() unsigned long numEntries(bool) double effNumEntries(bool) double sumW(bool) double sumW2(bool) double xMean(bool) double xVariance(bool) double xStdDev(bool) double xStdErr(bool) double xRMS(bool) operator + (Profile1D) operator - (Profile1D) operator / (Profile1D) Scatter2D Profile1D_div_Profile1D "divide" (const Profile1D&, const Profile1D&) except +yodaerr cdef extern from "merge.hh": void Profile1D_iadd_Profile1D "cython_iadd" (Profile1D*, Profile1D*) void Profile1D_isub_Profile1D "cython_isub" (Profile1D*, Profile1D*) # void Profile1D_imul_dbl "cython_imul_dbl" (Profile1D*, double) # void Profile1D_idiv_dbl "cython_idiv_dbl" (Profile1D*, double) Profile1D* Profile1D_add_Profile1D "cython_add" (Profile1D*, Profile1D*) Profile1D* Profile1D_sub_Profile1D "cython_sub" (Profile1D*, Profile1D*) Profile1D* Profile1D_div_Profile1D "cython_div" (Profile1D*, Profile1D*) cdef extern from "YODA/Scatter2D.h" namespace "YODA": Scatter2D mkScatter_Profile1D "YODA::mkScatter" (const Profile1D&, bool, bool) except +yodaerr #}}} Profile1D # Profile2D {{{ cdef extern from "YODA/Profile2D.h" namespace "YODA": cdef cppclass Profile2D(AnalysisObject): Profile2D() except +yodaerr Profile2D(string path, string title) except +yodaerr Profile2D(size_t nbinsX, double lowerX, double upperX, size_t nbinsY, double lowerY, double upperY, string path, string title) except +yodaerr Profile2D(vector[double] xedges, vector[double] yedges, string path, string title) except +yodaerr Profile2D(Profile2D p, string path) except +yodaerr #Profile2D(Scatter3D s, string path) except +yodaerr #Profile2D(Histo2D p, string path) Profile2D clone() except +yodaerr Profile2D* newclone() except +yodaerr # TODO: add missing functions and enable refs + exceptions when Cython allows void reset() except +yodaerr void fill(double x, double y, double z, double weight, double fraction) except +yodaerr void fillBin(size_t i, double z, double weight, double fraction) except +yodaerr void scaleW(double s) except +yodaerr void scaleXY(double, double) # void mergeBins(size_t, size_t) except +yodaerr # void rebin(unsigned int n) except +yodaerr size_t numBins() except +yodaerr size_t numBinsX() except +yodaerr size_t numBinsY() except +yodaerr vector[ProfileBin2D]& bins() #except +yodaerr int binIndexAt(double x, double y) except +yodaerr const ProfileBin2D& bin(size_t ix) #except +yodaerr const ProfileBin2D& binAt(double x, y) #except +yodaerr void addBin(const pair[double, double]&, const pair[double, double]&) except +yodaerr void addBins(const vector[double]&, const vector[double]&) except +yodaerr # void eraseBin(size_t index) except +yodaerr vector[double] xEdges() except +yodaerr vector[double] yEdges() except +yodaerr double xMin() except +yodaerr double xMax() except +yodaerr double yMin() except +yodaerr double yMax() except +yodaerr # Dbn3D& outflow(int, int) #except +yodaerr # Whole histo data Dbn3D& totalDbn() #except +yodaerr unsigned long numEntries(bool) double effNumEntries(bool) double sumW(bool) double sumW2(bool) double xMean(bool) double yMean(bool) double xVariance(bool) double yVariance(bool) double xStdDev(bool) double yStdDev(bool) double xStdErr(bool) double yStdErr(bool) double xRMS(bool) double yRMS(bool) operator + (Profile2D) operator - (Profile2D) operator / (Profile2D) Scatter3D Profile2D_div_Profile2D "divide" (const Profile2D&, const Profile2D&) except +yodaerr cdef extern from "merge.hh": void Profile2D_iadd_Profile2D "cython_iadd" (Profile2D*, Profile2D*) void Profile2D_isub_Profile2D "cython_isub" (Profile2D*, Profile2D*) # void Profile2D_imul_dbl "cython_imul_dbl" (Profile2D*, double) # void Profile2D_idiv_dbl "cython_idiv_dbl" (Profile2D*, double) Profile2D* Profile2D_add_Profile2D "cython_add" (Profile2D*, Profile2D*) Profile2D* Profile2D_sub_Profile2D "cython_sub" (Profile2D*, Profile2D*) Profile2D* Profile2D_div_Profile2D "cython_div" (Profile2D*, Profile2D*) cdef extern from "YODA/Scatter3D.h" namespace "YODA": Scatter3D mkScatter_Profile2D "YODA::mkScatter" (const Profile2D&, bool, bool) except +yodaerr #}}} Profile2D # Streams {{{ cdef extern from "" namespace "std": cdef cppclass istringstream: istringstream() string& str(string&) cdef cppclass ostringstream: ostringstream() string& str() cdef extern from "YODA/IO.h" namespace "YODA": void IO_read_from_file "YODA::read" (string&, vector[AnalysisObject*]&) except +yodaerr cdef extern from "YODA/Reader.h" namespace "YODA": cdef cppclass Reader: void read(istringstream&, vector[AnalysisObject*]&) except +yodaerr void read_from_file "YODA::Reader::read" (string&, vector[AnalysisObject*]&) except +yodaerr cdef extern from "YODA/ReaderYODA.h" namespace "YODA": Reader& ReaderYODA_create "YODA::ReaderYODA::create" () cdef extern from "YODA/ReaderFLAT.h" namespace "YODA": Reader& ReaderFLAT_create "YODA::ReaderFLAT::create" () cdef extern from "YODA/ReaderAIDA.h" namespace "YODA": Reader& ReaderAIDA_create "YODA::ReaderAIDA::create" () cdef extern from "YODA/Reader.h" namespace "YODA": Reader& Reader_create "YODA::mkReader" (string& filename) cdef extern from "YODA/IO.h" namespace "YODA": void IO_write_to_file "YODA::write" (string&, vector[AnalysisObject*]&) except +yodaerr cdef extern from "YODA/Writer.h" namespace "YODA": cdef cppclass Writer: void write(ostringstream&, vector[AnalysisObject*]&) except +yodaerr void write_to_file "YODA::Writer::write" (string&, vector[AnalysisObject*]&) except +yodaerr cdef extern from "YODA/WriterYODA.h" namespace "YODA": Writer& WriterYODA_create "YODA::WriterYODA::create" () cdef extern from "YODA/WriterFLAT.h" namespace "YODA": Writer& WriterFLAT_create "YODA::WriterFLAT::create" () cdef extern from "YODA/WriterAIDA.h" namespace "YODA": Writer& WriterAIDA_create "YODA::WriterAIDA::create" () cdef extern from "YODA/Reader.h" namespace "YODA": Writer& Writer_create "YODA::mkWriter" (string& filename) # Streams }}} # Axis1D {{{ cdef extern from "YODA/Axis1D.h" namespace "YODA": cdef cppclass Axis1D[BIN1D, DBN]: Axis1D() except +yodaerr Axis1D(vector[double] binedges) except +yodaerr Axis1D(size_t, double, double) except +yodaerr Axis1D(vector[BIN1D] bins) except +yodaerr void addBin(double, double) except +yodaerr size_t numBins() except +yodaerr vector[BIN1D]& bins() double xMin() except +yodaerr double xMax() except +yodaerr vector[double] xEdges() except +yodaerr long getBinIndex(double) void reset() DBN& totalDbn() DBN& underflow() DBN& overflow() void eraseBin(size_t index) except +yodaerr void mergeBins(size_t, size_t) except +yodaerr # Axis1D }}} # Axis2D {{{ cdef extern from "YODA/Axis2D.h" namespace "YODA": cdef cppclass Axis2D[BIN2D, DBN]: Axis2D() except +yodaerr Axis2D(vector[double], vector[double]) except +yodaerr Axis2D(size_t, pair[double, double], size_t, pair[double, double]) except +yodaerr Axis2D(vector[BIN2D] bins) except +yodaerr void addBin(pair[double, double], pair[double, double]) except +yodaerr size_t numBins() except +yodaerr vector[BIN2D]& bins() double xMin() except +yodaerr double xMax() except +yodaerr double yMin() except +yodaerr double yMax() except +yodaerr long getBinIndex(double, double) void reset() DBN& totalDbn() # TODO: reinstate DBN& outflow(int, int) void eraseBin(size_t index) except +yodaerr void mergeBins(size_t, size_t) except +yodaerr # Axis2D }}} diff --git a/pyext/yoda/include/AnalysisObject.pyx b/pyext/yoda/include/AnalysisObject.pyx --- a/pyext/yoda/include/AnalysisObject.pyx +++ b/pyext/yoda/include/AnalysisObject.pyx @@ -1,157 +1,130 @@ cimport util cdef class AnalysisObject(util.Base): """ AnalysisObject is the base class of the main user-facing objects, such as the Histo, Profile and Scatter classes. """ # Pointer upcasting mechanism cdef inline c.AnalysisObject* aoptr(self) except NULL: return self.ptr() # Pointer upcasting mechanism # DEPRECATED cdef inline c.AnalysisObject* _AnalysisObject(self) except NULL: return self.ptr() # Deallocator (only needed as a base class) def __dealloc__(self): p = self.aoptr() if self._deallocate: del p - #@property + @property def type(self): "String identifier for this type" return self.aoptr().type().decode('utf-8') - #@property + @property def dim(self): "Fill dimension or plot dimension of this object, for fillables and scatters respectively" return self.aoptr().dim() - #@property + @property def annotations(self): """() -> list[str] A list of all annotation/metadata keys.""" return [ a.decode('utf-8') for a in self.aoptr().annotations() ] - #@property + @property def annotationsDict(self): """() -> dict[str->str] A dict of all annotations/metadata entries.""" # TODO: add a map equivalent to C++? - return dict((k.lower(), self.annotation(k)) for k in self.annotations()) + return dict((k.lower(), self.annotation(k)) for k in self.annotations) def annotation(self, k, default=None): """Get annotation k from this object (falling back to default if not set). The annotation string will be automatically converted to Python native types as far as possible -- more complex types are possible - via the ast and yaml modules.""" + if the yaml module is installed.""" try: - rtn = self.aoptr().annotation(k.encode('utf-8')) + astr = self.aoptr().annotation(k.encode('utf-8')) try: import yaml - rtn = yaml.full_load(rtn) - except: - rtn = util._autotype(rtn, True) + return yaml.load(astr) + except ImportError: + return util._autotype(astr) except: - rtn = default - return rtn + return default def setAnnotation(self, k, v): """Set annotation k on this object.""" self.aoptr().setAnnotation(k.encode('utf-8'), util._autostr(v).encode('utf-8')) def hasAnnotation(self, k): """Check if this object has annotation k.""" return self.aoptr().hasAnnotation(k.encode('utf-8')) def rmAnnotation(self, k): """Remove annotation k from this object.""" self.aoptr().rmAnnotation(k.encode('utf-8')) def clearAnnotations(self): """Clear the annotations dictionary.""" self.aoptr().clearAnnotations() def dump(self): """A human readable representation of this object.""" try: from cStringIO import StringIO except ImportError: from io import StringIO f = StringIO() writeFLAT([self], f) f.seek(0) return f.read().strip() - #@property + @property def name(self): """ Return the histogram name, i.e. the last part of the path (which may be empty). """ return self.aoptr().name().decode('utf-8') - def path(self): + property path: """ Used for persistence and as a unique identifier. Must begin with a '/' if not the empty string. """ - return self.aoptr().path().decode('utf-8') + def __get__(self): + return self.aoptr().path().decode('utf-8') - def setPath(self, path): + def __set__(self, path): + self.aoptr().setPath(path.encode('utf-8')) + + + property title: """ - Used for persistence and as a unique identifier. Must begin with - a '/' if not the empty string. + Convenient access to the histogram title (optional). """ - self.aoptr().setPath(path.encode('utf-8')) + def __get__(self): + return self.aoptr().title().decode('utf-8') - # property path: - # """ - # Used for persistence and as a unique identifier. Must begin with - # a '/' if not the empty string. - # """ - # def __get__(self): - # return self.aoptr().path().decode('utf-8') - - # def __set__(self, path): - # self.aoptr().setPath(path.encode('utf-8')) - - - # def title(self): - # """ - # Histogram title - # """ - # return self.aoptr().title().decode('utf-8') - - # def setTitle(self, title): - # """ - # Set the histogram title (optional) - # """ - # self.aoptr().setTitle(title.encode('utf-8')) - - # property title: - # """ - # Convenient access to the histogram title (optional). - # """ - # def __get__(self): - # return self.aoptr().title().decode('utf-8') - - # def __set__(self, title): - # self.aoptr().setTitle(title.encode('utf-8')) + def __set__(self, title): + self.aoptr().setTitle(title.encode('utf-8')) def __repr__(self): return "<%s '%s'>" % (self.__class__.__name__, self.path) ## Convenience alias AO = AnalysisObject diff --git a/pyext/yoda/include/Axis1D_BIN1D_DBN.pyx b/pyext/yoda/include/Axis1D_BIN1D_DBN.pyx --- a/pyext/yoda/include/Axis1D_BIN1D_DBN.pyx +++ b/pyext/yoda/include/Axis1D_BIN1D_DBN.pyx @@ -1,66 +1,66 @@ cimport util # TODO (when there is absolutely nothing else to do) docstrings (but never will # it be a user facing class... it's merely there for tests) cdef class Axis1D_${BIN1D}_${DBN}(util.Base): cdef inline c.Axis1D[c.${BIN1D}, c.${DBN}]* a1dptr(self) except NULL: return self.ptr() # TODO: remove cdef inline c.Axis1D[c.${BIN1D}, c.${DBN}]* _Axis1D(self) except NULL: return self.ptr() def __dealloc__(self): cdef c.Axis1D[c.${BIN1D}, c.${DBN}]* p = self.a1ptr() if self._deallocate: del p def __init__(self): cutil.set_owned_ptr(self, new c.Axis1D[c.${BIN1D}, c.${DBN}]()) def __repr__(self): return "" % self.numBins - #@property + @property def numBins(self): return self.a1ptr().bins().size() def __len__(self): return self.numBins # TODO: remove # def __getitem__(self, py_ix): # cdef size_t i = cutil.pythonic_index(py_ix, self.a1ptr().bins().size()) # return cutil.new_borrowed_cls(${BIN1D}, & self.a1ptr().bins().at(i), self) def addBin(self, a, b): self.a1ptr().addBin(a, b) - #@property + @property def totalDbn(self): return cutil.new_borrowed_cls(${DBN}, &self.a1ptr().totalDbn(), self) - #@property + @property def underflow(self): return cutil.new_borrowed_cls(${DBN}, &self.a1ptr().underflow(), self) - #@property + @property def overflow(self): return cutil.new_borrowed_cls(${DBN}, &self.a1ptr().overflow(), self) def reset(self): self.a1ptr().reset() def eraseBin(self, i): self.a1ptr().eraseBin(i) def getBinIndex(self, x): return self.a1ptr().getBinIndex(x) def mergeBins(self, a, b): self.a1ptr().mergeBins(a, b) #def binAt(self, x): # return self[self.a1ptr().getBinIndex(x)] diff --git a/pyext/yoda/include/Axis2D_BIN2D_DBN.pyx b/pyext/yoda/include/Axis2D_BIN2D_DBN.pyx --- a/pyext/yoda/include/Axis2D_BIN2D_DBN.pyx +++ b/pyext/yoda/include/Axis2D_BIN2D_DBN.pyx @@ -1,66 +1,66 @@ cimport util # TODO: docstrings cdef class Axis2D_${BIN2D}_${DBN}(util.Base): cdef inline c.Axis2D[c.${BIN2D}, c.${DBN}]* a2ptr(self) except NULL: return self.ptr() # TODO: remove cdef inline c.Axis2D[c.${BIN2D}, c.${DBN}]* _Axis2D(self) except NULL: return self.ptr() def __dealloc__(self): cdef c.Axis2D[c.${BIN2D}, c.${DBN}]* p = self.a2ptr() if self._deallocate: del p def __init__(self, nx, xl, xu, ny, yl, yu): cutil.set_owned_ptr(self, new c.Axis2D[c.${BIN2D}, c.${DBN}]( nx, pair[double, double](xl, xu), ny, pair[double, double](yl, yu))) - #@property + @property def numBins(self): return self._Axis1D().bins().size() def __len__(self): return self.numBins # TODO: remove # def __getitem__(self, py_ix): # cdef size_t i = cutil.pythonic_index(py_ix, self.a2ptr().bins().size()) # return cutil.new_borrowed_cls(${BIN2D}, & self.a2ptr().bins().at(i), self) def __repr__(self): # TODO: improve return "" % self.numBins - #@property + @property def totalDbn(self): return cutil.new_owned_cls( ${DBN}, new c.${DBN}(self.a2ptr().totalDbn())) def addBin(self, a, b, c, d): self.a2ptr().addBin(a, b, c, d) - #@property + @property def outflow(self, ix, iy): return cutil.new_owned_cls(${DBN}, new c.${DBN}(self.a2ptr().outflow(ix, iy))) - #@property + @property def edges(self): return util.XY( util.EdgePair(self.a2ptr().xMin(), self.a2ptr().xMax()), util.EdgePair(self.a2ptr().yMin(), self.a2ptr().yMax()) ) def reset(self): self.a2ptr().reset() def binAt(self, x, y): cdef int ix = self.a2ptr().getBinIndex(x, y) if ix < 0: raise YodaExc_RangeError('No bin found!') return self[ix] diff --git a/pyext/yoda/include/Bin.pyx b/pyext/yoda/include/Bin.pyx --- a/pyext/yoda/include/Bin.pyx +++ b/pyext/yoda/include/Bin.pyx @@ -1,49 +1,49 @@ cimport util cdef class Bin(util.Base): cdef inline c.Bin* bptr(self) except NULL: return self.ptr() def __dealloc__(self): cdef c.Bin* p = self.bptr() if self._deallocate: del p - #@property + @property def dim(self): """None -> int Dimension of the fill space (should match containing Histo/Profile)""" return self.bptr().dim() - #@property + @property def numEntries(self): """ The number of entries that have filled the bin. """ return self.bptr().numEntries() - #@property + @property def effNumEntries(self): """ The effective number of entries in the bin. s.effNumEntries <==> (s.sumW ** 2) / s.sumW2 """ return self.bptr().effNumEntries() - #@property + @property def sumW(self): """ The sum of weights: sum(weights). """ return self.bptr().sumW() - #@property + @property def sumW2(self): """ The sum of weights-squared: sum(weights * weights) """ return self.bptr().sumW2() diff --git a/pyext/yoda/include/Bin1D_DBN.pyx b/pyext/yoda/include/Bin1D_DBN.pyx --- a/pyext/yoda/include/Bin1D_DBN.pyx +++ b/pyext/yoda/include/Bin1D_DBN.pyx @@ -1,138 +1,138 @@ cdef class Bin1D_${DBN}(Bin): """ 1D Bin based on an underlying ${DBN}. ProfileBin1D(xlow, xhigh) """ cdef inline c.Bin1D_${DBN}* b1ptr(self) except NULL: return self.ptr() # TODO: remove cdef inline c.Bin1D_${DBN}* _Bin1D(self) except NULL: return self.ptr() def __init__(self, xlow, xhigh): cutil.set_owned_ptr(self, new c.Bin1D_${DBN}(pair[double, double](xlow, xhigh))) def __repr__(self): return '<%s x=[%g, %g)>' % (self.__class__.__name__, self.xMin, self.xMax) # def scaleX(self, ax): # """ # float -> None # Scale this bin's x arguments by ax. # """ # self.b1ptr().scaleX(ax) # def scaleW(self, a): # """ # float -> None # Scale this bin's weights by a. # """ # self.b1ptr().scaleW(a) - #@property + @property def xEdges(self): """The lower and upper edges.""" return (self.xMin, self.xMax) - #@property + @property def xMin(self): """The lower bin edge.""" return self.b1ptr().xMin() - #@property + @property def xMax(self): """The upper bin edge.""" return self.b1ptr().xMax() - #@property + @property def xMid(self): """The midpoint of the bin.""" return self.b1ptr().xMid() - #@property + @property def xWidth(self): """The width of the bin.""" return self.b1ptr().xWidth() - #@property + @property def xMean(self): """The mean of the x-values that have filled the bin.""" return self.b1ptr().xMean() - #@property + @property def xFocus(self): """ The focus of the bin. If the bin has been filled, then this is the mean fill on this bin. If the bin has not been filled, then the focus is the midpoint of the bin. """ return self.b1ptr().xFocus() - #@property + @property def xVariance(self): """ The variance of the x-values that have filled the bin. """ return self.b1ptr().xVariance() - #@property + @property def xStdDev(self): """ The standard deviation of the x-values that have filled the bin. """ return self.b1ptr().xStdDev() - #@property + @property def xStdErr(self): """ The standard error of the x-values that have filled the bin. """ return self.b1ptr().xStdErr() - #@property + @property def xRMS(self): """ The root-mean-square of the x-values that have filled the bin. """ return self.b1ptr().xRMS() - #@property + @property def sumWX(self): """ The sum of weights-times-x: sum(weights * x) """ return self.b1ptr().sumWX() - #@property + @property def sumWX2(self): """ The sum of weights-times-x-squared: sum(weights * x * x) """ return self.b1ptr().sumWX2() def merge(Bin1D_${DBN} self, Bin1D_${DBN} other): """ merge(other) -> Bin1D_${DBN}. Merge this bin with another of the same type. Only directly adjacent bins, i.e. those sharing a common edge, can be merged. """ self.b1ptr().merge(deref(other.b1ptr())) return self def __add__(Bin1D_${DBN} self, Bin1D_${DBN} other): return cutil.new_owned_cls(Bin1D_${DBN}, new c.Bin1D_${DBN}(deref(self.b1ptr()) + deref(other.b1ptr()))) def __sub__(Bin1D_${DBN} self, Bin1D_${DBN} other): return cutil.new_owned_cls(Bin1D_${DBN}, new c.Bin1D_${DBN}(deref(self.b1ptr()) - deref(other.b1ptr()))) diff --git a/pyext/yoda/include/Bin2D_DBN.pyx b/pyext/yoda/include/Bin2D_DBN.pyx --- a/pyext/yoda/include/Bin2D_DBN.pyx +++ b/pyext/yoda/include/Bin2D_DBN.pyx @@ -1,243 +1,243 @@ cimport util # TODO: docstrings cdef class Bin2D_${DBN}(Bin): """2D Bin class templated on a ${DBN}""" cdef inline c.Bin2D_${DBN}* b2ptr(self) except NULL: return self.ptr() # TODO: remove cdef inline c.Bin2D_${DBN}* _Bin2D(self) except NULL: return self.ptr() def __init__(self, xlow, xhigh, ylow, yhigh): cutil.set_owned_ptr(self, new c.Bin2D_${DBN}( pair[double, double](xlow, xhigh), pair[double, double](ylow, yhigh) )) def __repr__(self): return '<%s x=[%g, %g), y=[%g, %g)>' % (self.__class__.__name__, self.xMin, self.xMax, self.yMin, self.yMax) # def scaleXY(self, x=1.0, y=1.0): # self.b2ptr().scaleXY(x, y) # def scaleW(self, w): # self.b2ptr().scaleW(w) - #@property + @property def xEdges(self): """ The lower and upper x edges. """ cdef pair[double, double] x = self.b2ptr().xEdges() return (x.first, x.second) - #@property + @property def yEdges(self): """ The lower and upper y edges. """ cdef pair[double, double] y = self.b2ptr().yEdges() return (y.first, y.second) - #@property + @property def xyEdges(self): """ The lower and upper x,y edge pairs. """ return util.XY(self.xEdges, self.yEdges) - #@property + @property def xMin(self): """Low edge in x.""" return self.b2ptr().xMin() - #@property + @property def yMin(self): """Low edge in y.""" return self.b2ptr().yMin() - #@property + @property def xyMin(self): """Low edges in x,y.""" return util.XY(self.xMin, self.yMin) - #@property + @property def xMax(self): """High edge in x.""" return self.b2ptr().xMax() - #@property + @property def yMax(self): """High edge in y.""" return self.b2ptr().yMax() - #@property + @property def xyMax(self): """High edges in x,y.""" return util.XY(self.xMax, self.yMax) - #@property + @property def xMid(self): """Geometric centre of the bin in x""" return self.b2ptr().xMid() - #@property + @property def yMid(self): """Geometric centre of the bin in y""" return self.b2ptr().yMid() - #@property + @property def xyMid(self): """Geometric centre of the bin""" return util.XY(self.xMid, self.yMid) - #@property + @property def xWidth(self): """Width of the bin in x""" return self.b2ptr().xWidth() - #@property + @property def yWidth(self): """Width of the bin in y""" return self.b2ptr().yWidth() - #@property + @property def xyWidths(self): """The widths of this bin in the x- and y-dimensions.""" return util.XY(self.xWidth, self.yWidth) - #@property + @property def area(self): """The area of this bin in the x-y plane.""" return self.b2ptr().area() - #@property + @property def xFocus(self): """Focus of the bin in x""" return self.b2ptr().xFocus() - #@property + @property def yFocus(self): """Focus of the bin in y""" return self.b2ptr().yFocus() - #@property + @property def xyFocus(self): """The focus of the bin in the x- and y-dimensions""" return util.XY(self.xFocus, self.yFocus) - #@property + @property def xMean(self): return self.b2ptr().xMean() - #@property + @property def yMean(self): return self.b2ptr().xMean() - #@property + @property def xyMean(self): return util.XY(self.xMean, self.yMean) - #@property + @property def xVariance(self): return self.b2ptr().xVariance() - #@property + @property def yVariance(self): return self.b2ptr().xVariance() - #@property + @property def xyVariance(self): return util.XY(self.xVariance, self.yVariance) - #@property + @property def xStdDev(self): return self.b2ptr().xStdDev() - #@property + @property def yStdDev(self): return self.b2ptr().yStdDev() - #@property + @property def xyStdDev(self): return util.XY(self.xStdDev, self.yStdDev) - #@property + @property def xStdErr(self): return self.b2ptr().xStdErr() - #@property + @property def yStdErr(self): return self.b2ptr().yStdErr() - #@property + @property def xyStdErr(self): return util.XY(self.xStdErr, self.yStdErr) - #@property + @property def xRMS(self): return self.b2ptr().xRMS() - #@property + @property def yRMS(self): return self.b2ptr().yRMS() - #@property + @property def xyRMS(self): return util.XY(self.xRMS, self.yRMS) # Raw statistics # ################## - #@property + @property def sumWX(self): return self.b2ptr().sumWX() - #@property + @property def sumWY(self): return self.b2ptr().sumWY() - #@property + @property def sumWXY(self): return self.b2ptr().sumWXY() - #@property + @property def sumWX2(self): return self.b2ptr().sumWX2() - #@property + @property def sumWY2(self): return self.b2ptr().sumWY2() #def merge(Bin2D_${DBN} self, Bin2D_${DBN} other): # self.b2ptr().merge(deref(other.b2ptr())) # return self def adjacentTo(Bin2D_${DBN} self, Bin2D_${DBN} other): return self.b2ptr().adjacentTo(deref(other.b2ptr())) def __add__(Bin2D_${DBN} self, Bin2D_${DBN} other): return cutil.new_owned_cls( Bin2D_${DBN}, new c.Bin2D_${DBN}(deref(self.b2ptr()) + deref(other.b2ptr()))) def __sub__(Bin2D_${DBN} self, Bin2D_${DBN} other): return cutil.new_owned_cls( Bin2D_${DBN}, new c.Bin2D_${DBN}(deref(self.b2ptr()) - deref(other.b2ptr()))) diff --git a/pyext/yoda/include/Counter.pyx b/pyext/yoda/include/Counter.pyx --- a/pyext/yoda/include/Counter.pyx +++ b/pyext/yoda/include/Counter.pyx @@ -1,139 +1,139 @@ cdef class Counter(AnalysisObject): """ Weight counter. Like a histogram without any axis (and hence only one bin). Call fill() like with a histogram. Sums of weights can be returned, with val() and err() being shorthand for the sum of weights and its binomial error. Counter(path="", title=""). Construct a counter with optional path and title but no bins. """ cdef inline c.Counter* cptr(self) except NULL: return self.ptr() # TODO: remove cdef inline c.Counter* _Counter(self) except NULL: return self.ptr() def __init__(self, path="", title=""): path = path.encode('utf-8') title = title.encode('utf-8') - cutil.set_owned_ptr(self, new c.Counter(path, + cutil.set_owned_ptr(self, new c.Counter(path, title)) def __repr__(self): - return "<%s '%s' sumw=%0.2g, err=%s>" % (self.__class__.__name__, self.path(), self.val(), self.err()) + return "<%s '%s' sumw=%0.2g, err=%s>" % (self.__class__.__name__, self.path, self.val, self.err) def reset(self): """None -> None. Reset the counter.""" self.cptr().reset() def clone(self): """None -> Couner. Clone this Counter.""" return cutil.new_owned_cls(Counter, self.cptr().newclone()) def fill(self, weight=1.0, fraction=1.0): """([w]) -> None. Fill with given optional weight.""" self.cptr().fill(weight, fraction) #@property def numEntries(self): """None -> float Number of times this counter was filled.""" return self.cptr().numEntries() #@property def effNumEntries(self): """None -> float Effective number of times this counter was filled, computed from weights.""" return self.cptr().effNumEntries() #@property def sumW(self): """() -> float Sum of weights filled into this counter.""" return self.cptr().sumW() #@property def sumW2(self): """() -> float Sum of weights filled into this counter.""" return self.cptr().sumW2() - #@property + @property def val(self): """() -> float Sum of weights filled into this counter.""" return self.cptr().val() - #@property + @property def err(self): """() -> float Binomial uncertainty on the sum of weights filled into this counter.""" return self.cptr().err() - #@property + @property def relErr(self): """() -> float Relative binomial uncertainty on the sum of weights filled into this counter.""" return self.cptr().relErr() def scaleW(self, w): """ (float) -> None. Rescale the weights in this counter by the factor w.""" self.cptr().scaleW(w) def mkScatter(self): """None -> Scatter1D. Convert this Counter to a Scatter1D, with x representing the value and error.""" cdef c.Scatter1D s1 = c.mkScatter_Counter(deref(self.cptr())) return cutil.new_owned_cls(Scatter1D, s1.newclone()) def divideBy(self, Counter other, efficiency=False): cdef c.Scatter1D s1 if not efficiency: s1 = c.Counter_div_Counter(deref(self.cptr()), deref(other.cptr())) else: s1 = c.Counter_eff_Counter(deref(self.cptr()), deref(other.cptr())) return cutil.new_owned_cls(Scatter1D, s1.newclone()) ## In-place special methods def __iadd__(Counter self, Counter other): c.Counter_iadd_Counter(self.cptr(), other.cptr()) return self def __isub__(Counter self, Counter other): c.Counter_isub_Counter(self.cptr(), other.cptr()) return self ## Unbound special methods def __add__(Counter self, Counter other): h = Counter() cutil.set_owned_ptr(h, c.Counter_add_Counter(self.cptr(), other.cptr())) return h def __sub__(Counter self, Counter other): h = Counter() cutil.set_owned_ptr(h, c.Counter_sub_Counter(self.cptr(), other.cptr())) return h def __div__(Counter self, Counter other): return self.divideBy(other) def __truediv__(Counter self, Counter other): return self.divideBy(other) diff --git a/pyext/yoda/include/Dbn0D.pyx b/pyext/yoda/include/Dbn0D.pyx --- a/pyext/yoda/include/Dbn0D.pyx +++ b/pyext/yoda/include/Dbn0D.pyx @@ -1,92 +1,92 @@ cimport util cdef class Dbn0D(util.Base): """ A zero-dimensional 'counter', used and exposed by Counter. """ cdef c.Dbn0D* d0ptr(self) except NULL: return self.ptr() # TODO: remove! cdef c.Dbn0D *_Dbn0D(self) except NULL: return self.ptr() def __dealloc__(self): cdef c.Dbn0D *p = self.d0ptr() if self._deallocate: del p def __init__(self): cutil.set_owned_ptr(self, new c.Dbn0D()) def __repr__(self): return '' % (self.val, self.err) def copy(self): return cutil.set_owned_ptr(self, new c.Dbn0D(deref(self.d0ptr()))) def reset(self): """ () -> None Reset the distribution counters to the unfilled state. """ self.d0ptr().reset() def fill(self, weight=1.0, fraction=1.0): """ (float weight=1.0) -> None Fills the distribution with the given weight at given x. """ self.d0ptr().fill(weight, fraction) def scaleW(self, w): """ (float) -> None Scale the weights by the given factor. """ self.d0ptr().scaleW(w) - #@property + @property def numEntries(self): """The number of entries""" return self.d0ptr().numEntries() - #@property + @property def effNumEntries(self): """Effective number of entries (for weighted events)""" return self.d0ptr().effNumEntries() - #@property + @property def errW(self): """Error on sumW""" return self.d0ptr().errW() - #@property + @property def relErrW(self): """Relative error on sumW""" return self.d0ptr().relErrW() - #@property + @property def sumW(self): """sum(weights)""" return self.d0ptr().sumW() - #@property + @property def sumW2(self): """sum(weights * weights)""" return self.d0ptr().sumW2() def __add__(Dbn0D self, Dbn0D other): return cutil.new_owned_cls(Dbn0D, new c.Dbn0D(deref(self.d0ptr()) + deref(other.d0ptr()))) def __sub__(Dbn0D self, Dbn0D other): return cutil.new_owned_cls(Dbn0D, new c.Dbn0D(deref(self.d0ptr()) - deref(other.d0ptr()))) diff --git a/pyext/yoda/include/Dbn1D.pyx b/pyext/yoda/include/Dbn1D.pyx --- a/pyext/yoda/include/Dbn1D.pyx +++ b/pyext/yoda/include/Dbn1D.pyx @@ -1,137 +1,137 @@ cimport util cdef class Dbn1D(util.Base): """ A 1D distribution 'counter', used and exposed by 1D histograms and their bins. """ cdef c.Dbn1D* d1ptr(self) except NULL: return self.ptr() # TODO: remove! cdef c.Dbn1D *_Dbn1D(self) except NULL: return self.ptr() def __dealloc__(self): cdef c.Dbn1D *p = self.d1ptr() if self._deallocate: del p def __init__(self): cutil.set_owned_ptr(self, new c.Dbn1D()) def __repr__(self): mean = self.mean if self.sumW > 0 else None sd = self.stdDev if self.sumW > 0 else None return '' % (mean, sd) def copy(self): return cutil.set_owned_ptr(self, new c.Dbn1D(deref(self.d1ptr()))) def reset(self): """ () -> None Reset the distribution counters to the unfilled state. """ self.d1ptr().reset() def fill(self, x, weight=1.0, fraction=1.0): """ (float x, float weight=1.0) -> None Fills the distribution with the given weight at given x. """ self.d1ptr().fill(x, weight, fraction) def scaleW(self, w): """ (float) -> None Scale the weights by the given factor. """ self.d1ptr().scaleW(w) def scaleX(self, x): """ (float) -> None Scale the x dimension by the given factor. """ self.d1ptr().scaleX(x) - #@property + @property def xMean(self): """Weighted mean of x""" return self.d1ptr().xMean() - - #@property + + @property def xVariance(self): """Weighted variance of x""" return self.d1ptr().xVariance() - #@property + @property def xStdDev(self): """Weighted standard deviation of x""" return self.d1ptr().xStdDev() - #@property + @property def xStdErr(self): """Weighted standard error on """ return self.d1ptr().xStdErr() - #@property + @property def xRMS(self): """Weighted root mean squared (RMS) of x""" return self.d1ptr().xRMS() - #@property + @property def numEntries(self): """The number of entries""" return self.d1ptr().numEntries() - #@property + @property def effNumEntries(self): """Effective number of entries (for weighted events)""" return self.d1ptr().effNumEntries() - #@property + @property def errW(self): """Error on sumW""" return self.d1ptr().errW() - #@property + @property def relErrW(self): """Relative error on sumW""" return self.d1ptr().relErrW() - #@property + @property def sumW(self): """sum(weights)""" return self.d1ptr().sumW() - #@property + @property def sumW2(self): """sum(weights * weights)""" return self.d1ptr().sumW2() - #@property + @property def sumWX(self): """sum(weights * xs)""" return self.d1ptr().sumWX() - #@property + @property def sumWX2(self): """sum(weights * xs * xs)""" return self.d1ptr().sumWX2() def __add__(Dbn1D self, Dbn1D other): return cutil.new_owned_cls(Dbn1D, new c.Dbn1D(deref(self.d1ptr()) + deref(other.d1ptr()))) def __sub__(Dbn1D self, Dbn1D other): return cutil.new_owned_cls(Dbn1D, new c.Dbn1D(deref(self.d1ptr()) - deref(other.d1ptr()))) diff --git a/pyext/yoda/include/Dbn2D.pyx b/pyext/yoda/include/Dbn2D.pyx --- a/pyext/yoda/include/Dbn2D.pyx +++ b/pyext/yoda/include/Dbn2D.pyx @@ -1,176 +1,176 @@ cimport util cdef class Dbn2D(util.Base): """ A 2D distribution 'counter', used and exposed by 2D histograms and 1D profiles and their bins. TODO: also provide normal scalar access to quantities like xRMS """ cdef c.Dbn2D* d2ptr(self) except NULL: return self.ptr() # TODO: remove! cdef c.Dbn2D* _Dbn2D(self) except NULL: return self.ptr() def __dealloc__(self): cdef c.Dbn2D *p = self.d2ptr() if self._deallocate: del p def __init__(self): cutil.set_owned_ptr(self, new c.Dbn2D()) def __repr__(self): mean = self.mean if self.sumW > 0 else None sd = self.stdDev if self.sumW > 0 else None return '' % (mean, sd) def copy(self): return cutil.new_owned_cls(Dbn2D, new c.Dbn2D(deref(self.d2ptr()))) def reset(self): """ () -> None Reset the distribution counters to the unfilled state.""" self.d2ptr().reset() def fill(self, x, y, weight=1.0, fraction=1.0): """ (x, y, weight=1.0) -> None Fills the distribution with the given weight at given (x, y). """ self.d2ptr().fill(x, y, weight, fraction) def scaleW(self, w): """ (float) -> None Scale the weights by the given factor. """ self.d2ptr().scaleW(w) def scaleX(self, x): """ (float) -> None Scale the x dimension by the given factor. """ self.d2ptr().scaleX(x) def scaleY(self, y): """ (float) -> None Scale the y dimension by the given factor. """ self.d2ptr().scaleY(y) def scaleXY(self, x, y): """ (float, float) -> None Scale the x and y dimensions by the given factors. """ self.d2ptr().scaleXY(x, y) # TODO: map direct properties from C++ - #@property + @property def mean(self): """Weighted mean of x""" return util.XY(self.d2ptr().xMean(), self.d2ptr().yMean()) - #@property + @property def variance(self): """Weighted variance of x""" return util.XY(self.d2ptr().xVariance(), self.d2ptr().yVariance()) - #@property + @property def stdDev(self): """Weighted standard deviation of x""" return util.XY(self.d2ptr().xStdDev(), self.d2ptr().yStdDev()) - #@property + @property def stdErr(self): """Weighted standard error on """ return util.XY(self.d2ptr().xStdErr(), self.d2ptr().yStdErr()) - #@property + @property def rms(self): """Weighted root mean squared (RMS) of x""" return util.XY(self.d2ptr().xRMS(), self.d2ptr().yRMS()) - #@property + @property def numEntries(self): """The number of entries""" return self.d2ptr().numEntries() - #@property + @property def effNumEntries(self): """Effective number of entries (for weighted events)""" return self.d2ptr().effNumEntries() - #@property + @property def errW(self): """Error on sumW""" return self.d2ptr().errW() - #@property + @property def relErrW(self): """Relative error on sumW""" return self.d2ptr().relErrW() - #@property + @property def sumW(self): """sum(weights)""" return self.d2ptr().sumW() - #@property + @property def sumW2(self): """sum(weights * weights)""" return self.d2ptr().sumW2() - #@property + @property def sumWX(self): """sum(weights * xs)""" return self.d2ptr().sumWX() - #@property + @property def sumWY(self): """sum(weights * ys)""" return self.d2ptr().sumWY() - #@property + @property def sumWX2(self): """sum(weights * xs * xs)""" return self.d2ptr().sumWX2() - #@property + @property def sumWY2(self): """sum(weights * ys * ys)""" return self.d2ptr().sumWY2() - #@property + @property def sumWXY(self): """sum(weights xs * ys)""" return self.d2ptr().sumWXY() def __add__(Dbn2D self, Dbn2D other): return cutil.new_owned_cls(Dbn2D, new c.Dbn2D(deref(self.d2ptr()) + deref(other.d2ptr()))) def __sub__(Dbn2D self, Dbn2D other): return cutil.new_owned_cls(Dbn2D, new c.Dbn2D(deref(self.d2ptr()) - deref(other.d2ptr()))) diff --git a/pyext/yoda/include/Dbn3D.pyx b/pyext/yoda/include/Dbn3D.pyx --- a/pyext/yoda/include/Dbn3D.pyx +++ b/pyext/yoda/include/Dbn3D.pyx @@ -1,213 +1,213 @@ cimport util cdef class Dbn3D(util.Base): """ A 3D distribution 'counter', used and exposed by 2D profiles and their bins. TODO: also provide normal scalar access to quantities like xRMS """ cdef c.Dbn3D* d3ptr(self) except NULL: return self.ptr() # TODO: remove cdef c.Dbn3D* _Dbn3D(self) except NULL: return self.ptr() def __dealloc__(self): cdef c.Dbn3D *p = self.d3ptr() if self._deallocate: del p def __init__(self): cutil.set_owned_ptr(self, new c.Dbn3D()) def __repr__(self): mean = self.mean if self.sumW > 0 else None sd = self.stdDev if self.sumW > 0 else None return 'Dbn3D(mean=%s, stddev=%s)' % (mean, sd) def copy(self): return cutil.new_owned_cls(Dbn3D, new c.Dbn3D(deref(self.d3ptr()))) def fill(self, x, y, z, weight=1.0, fraction=1.0): """ (x, y, z, weight=1.0) -> None Fills the distribution with the given weight at given (x, y). """ self.d3ptr().fill(x, y, z, weight, fraction) def reset(self): """ () -> None Reset the distribution counters to the unfilled state.""" self.d3ptr().reset() def scaleW(self, w): """ (float) -> None Scale the weights by the given factor. """ self.d3ptr().scaleW(w) def scaleX(self, x): """ (float) -> None Scale the x dimension by the given factor. """ self.d3ptr().scaleX(x) def scaleY(self, y): """ (float) -> None Scale the y dimension by the given factor. """ self.d3ptr().scaleY(y) def scaleZ(self, z): """ (float) -> None Scale the z dimension by the given factor. """ self.d3ptr().scaleZ(z) def scaleXYZ(self, x, y, z): """ (float, float, float) -> None Scale the x, y and z dimensions by the given factors. """ self.d3ptr().scaleXYZ(x, y, z) # TODO: map direct properties from C++ - #@property + @property def mean(self): """Weighted mean of x""" return util.XYZ(self.d3ptr().xMean(), self.d3ptr().yMean(), self.d3ptr().zMean()) - #@property + @property def variance(self): """Weighted variance of x""" return util.XYZ(self.d3ptr().xVariance(), self.d3ptr().yVariance(), self.d3ptr().zVariance()) - #@property + @property def stdDev(self): """Weighted standard deviation of x""" return util.XYZ(self.d3ptr().xStdDev(), self.d3ptr().yStdDev(), self.d3ptr().zStdDev()) - #@property + @property def stdErr(self): """Weighted standard error on """ return util.XYZ(self.d3ptr().xStdErr(), self.d3ptr().yStdErr(), self.d3ptr().zStdErr()) - #@property + @property def rms(self): """Weighted root mean squared (RMS) of x""" return util.XYZ(self.d3ptr().xRMS(), self.d3ptr().yRMS(), self.d3ptr().zRMS()) - #@property + @property def numEntries(self): """The number of entries""" return self.d3ptr().numEntries() - #@property + @property def effNumEntries(self): """Effective number of entries (for weighted events)""" return self.d3ptr().effNumEntries() - #@property + @property def errW(self): """Error on sumW""" return self.d3ptr().errW() - #@property + @property def relErrW(self): """Relative error on sumW""" return self.d3ptr().relErrW() - #@property + @property def sumW(self): """sum(weights)""" return self.d3ptr().sumW() - #@property + @property def sumW2(self): """sum(weights * weights)""" return self.d3ptr().sumW2() - #@property + @property def sumWX(self): """sum(weights * xs)""" return self.d3ptr().sumWX() - #@property + @property def sumWY(self): """sum(weights * ys)""" return self.d3ptr().sumWY() - #@property + @property def sumWZ(self): """sum(weights * zs)""" return self.d3ptr().sumWZ() - #@property + @property def sumWX2(self): """sum(weights * xs * xs)""" return self.d3ptr().sumWX2() - #@property + @property def sumWY2(self): """sum(weights * ys * ys)""" return self.d3ptr().sumWY2() - #@property + @property def sumWZ2(self): """sum(weights * zs * zs)""" return self.d3ptr().sumWZ2() - #@property + @property def sumWXY(self): """sum(weights * xs * ys)""" return self.d3ptr().sumWXY() - #@property + @property def sumWXZ(self): """sum(weights * xs * zs)""" return self.d3ptr().sumWXZ() - #@property + @property def sumWYZ(self): """sum(weights * ys * zs)""" return self.d3ptr().sumWYZ() def __add__(Dbn3D self, Dbn3D other): return cutil.new_owned_cls(Dbn3D, new c.Dbn3D(deref(self.d3ptr()) + deref(other.d3ptr()))) def __sub__(Dbn3D self, Dbn3D other): return cutil.new_owned_cls(Dbn3D, new c.Dbn3D(deref(self.d3ptr()) - deref(other.d3ptr()))) diff --git a/pyext/yoda/include/Histo1D.pyx b/pyext/yoda/include/Histo1D.pyx --- a/pyext/yoda/include/Histo1D.pyx +++ b/pyext/yoda/include/Histo1D.pyx @@ -1,512 +1,512 @@ cimport util cdef class Histo1D(AnalysisObject): """ 1D histogram, with distinction between bin areas and heights. Complete histogram binning is supported, including uniform/regular binning, variable-width binning, unbinned gaps in the covered range, and under/overflows. Rebinning by integer factors, or by explicit merging of contiguous bins is also supported. Rescaling of weights and/or the x axis is permitted in-place: the result is still a valid Histo1D. Binning-compatible 1D histograms may be divided, resulting in a Scatter2D since further fills would not be meaningful. Several sets of arguments are tried by the constructor in the following order. Histo1D(path="", title=""). Construct a histogram with optional path and title but no bins. Histo1D(nbins, low, high, path="", title="") Construct a histogram with optional path and title, and nbins bins uniformly distributed between low and high. Histo1D(B, path="", title=""). Construct a histogram with optional path and title, from an iterator of bins, B. """ cdef inline c.Histo1D* h1ptr(self) except NULL: return self.ptr() def __init__(self, *args, **kwargs): util.try_loop([self.__init2, self.__init5, self.__init3], *args, **kwargs) def __init2(self, path="", title=""): path = path.encode('utf-8') title = title.encode('utf-8') cutil.set_owned_ptr(self, new c.Histo1D(path, title)) # TODO: Is Cython clever enough that we can make 3a and 3b versions and let it do the type inference? def __init3(self, bins_or_edges, path="", title=""): # TODO: Do this type-checking better cdef vector[double] edges try: path = path.encode('utf-8') title = title.encode('utf-8') ## If float conversions work for all elements, it's a list of edges: edges = list(float(x) for x in bins_or_edges) cutil.set_owned_ptr(self, new c.Histo1D(edges, path, title)) except: ## Assume it's a list of HistoBin1D bins = bins_or_edges self.__init2(path, title) self.addBins(bins) def __init5(self, nbins, low, high, path="", title=""): path = path.encode('utf-8') title = title.encode('utf-8') cutil.set_owned_ptr(self, new c.Histo1D(nbins, low, high, path, title)) def __len__(self): "Number of bins" - return self.numBins() + return self.numBins def __getitem__(self, i): "Direct access to bins" - cdef size_t ii = cutil.pythonic_index(i, self.numBins()) + cdef size_t ii = cutil.pythonic_index(i, self.h1ptr().numBins()) return cutil.new_borrowed_cls(HistoBin1D, & self.h1ptr().bin(ii), self) def __repr__(self): xmean = None if self.sumW() != 0: xmean = "%0.2e" % self.xMean() return "<%s '%s' %d bins, sumw=%0.2g, xmean=%s>" % \ - (self.__class__.__name__, self.path(), - len(self.bins()), self.sumW(), xmean) + (self.__class__.__name__, self.path, + len(self.bins), self.sumW(), xmean) def reset(self): """None -> None. Reset the histogram but leave the bin structure.""" self.h1ptr().reset() def clone(self): """None -> Histo1D. Clone this Histo1D.""" return cutil.new_owned_cls(Histo1D, self.h1ptr().newclone()) def fill(self, x, weight=1.0, fraction=1.0): """(x,[w]) -> None. Fill with given x value and optional weight.""" self.h1ptr().fill(x, weight, fraction) def fillBin(self, size_t ix, weight=1.0, fraction=1.0): """(ix,[w]) -> None. Fill bin ix and optional weight.""" self.h1ptr().fillBin(ix, weight, fraction) - #@property + @property def totalDbn(self): """None -> Dbn1D The Dbn1D representing the total distribution.""" return cutil.new_borrowed_cls(Dbn1D, &self.h1ptr().totalDbn(), self) - #@property + @property def underflow(self): """None -> Dbn1D The Dbn1D representing the underflow distribution.""" return cutil.new_borrowed_cls(Dbn1D, &self.h1ptr().underflow(), self) - #@property + @property def overflow(self): """None -> Dbn1D The Dbn1D representing the overflow distribution.""" return cutil.new_borrowed_cls(Dbn1D, &self.h1ptr().overflow(), self) def integral(self, includeoverflows=True): """([bool]) -> float Histogram integral, optionally excluding the overflows.""" return self.h1ptr().integral(includeoverflows) def integralRange(self, int ia, int ib): """(int, int) -> float Integral between bins ia..ib inclusive""" return self.h1ptr().integralRange(ia, ib) def integralTo(self, int ia, includeunderflow=True): """(int, [bool]) -> float Integral up to bin ia inclusive, optionally excluding the underflow""" return self.h1ptr().integralRange(ia, includeunderflow) def numEntries(self, includeoverflows=True): """([bool]) -> float Number of times this histogram was filled, optionally excluding the overflows.""" return self.h1ptr().numEntries(includeoverflows) def effNumEntries(self, includeoverflows=True): """([bool]) -> float Effective number of times this histogram was filled, computed from weights, and optionally excluding the overflows.""" return self.h1ptr().effNumEntries(includeoverflows) def sumW(self, includeoverflows=True): """([bool]) -> float Sum of weights filled into this histogram, optionally excluding the overflows.""" return self.h1ptr().sumW(includeoverflows) def sumW2(self, includeoverflows=True): """([bool]) -> float Sum of weights filled into this histogram, optionally excluding the overflows.""" return self.h1ptr().sumW2(includeoverflows) def xMean(self, includeoverflows=True): """([bool]) -> float Mean x of the histogram, optionally excluding the overflows.""" return self.h1ptr().xMean(includeoverflows) def xVariance(self, includeoverflows=True): """([bool]) -> float Variance in x of the histogram, optionally excluding the overflows.""" return self.h1ptr().xVariance(includeoverflows) def xStdDev(self, includeoverflows=True): """([bool]) -> float Standard deviation in x of the histogram, optionally excluding the overflows.""" return self.h1ptr().xStdDev(includeoverflows) def xStdErr(self, includeoverflows=True): """([bool]) -> float Standard error on the mean x of the histogram, optionally excluding the overflows.""" return self.h1ptr().xStdErr(includeoverflows) def xRMS(self, includeoverflows=True): """([bool]) -> float RMS in x of the histogram, optionally excluding the overflows.""" return self.h1ptr().xRMS(includeoverflows) def scaleW(self, w): """ (float) -> None. Rescale the weights in this histogram by the factor w.""" self.h1ptr().scaleW(w) def normalize(self, normto=1.0, includeoverflows=True): """ (float, bool) -> None. Normalize the histogram.""" self.h1ptr().normalize(normto, includeoverflows) - #@property + @property def xMin(self): """Low x edge of the histo.""" return self.h1ptr().xMin() - #@property + @property def xMax(self): """High x edge of the histo.""" return self.h1ptr().xMax() - #@property + @property def numBins(self): """() -> int Number of bins (not including overflows).""" return self.h1ptr().numBins() - #@property + @property def bins(self): """Access the ordered bins list.""" return list(self) def bin(self, i): """Get the i'th bin (equivalent to bins[i]""" # cdef size_t ii = cutil.pythonic_index(i, self.h1ptr().numBins()) return cutil.new_borrowed_cls(HistoBin1D, & self.h1ptr().bin(i), self) def binIndexAt(self, x): """Get the bin index containing position x""" return self.h1ptr().binIndexAt(x) def binAt(self, x): """Get the bin containing position x""" # TODO: what's the problem with this direct mapping? Produces compile error re. no default constructor... #return cutil.new_borrowed_cls(HistoBin1D, & self.h1ptr().binAt(x), self) # TODO: need out-of-range check to return None? return self.bin(self.binIndexAt(x)) def addBin(self, low, high): """(low, high) -> None. Add a bin.""" self.h1ptr().addBin(low, high) def addBins(self, edges_or_bins): """Add several bins.""" # TODO: simplify / make consistent arg = list(edges_or_bins) util.try_loop([self.__addBins_edges, self.__addBins_tuples, self.__addBins_points, self.__addBins_bins], arg) def __addBins_edges(self, edges): cdef vector[double] cedges for edge in edges: cedges.push_back(edge) if len(edges): self.h1ptr().addBins(cedges) def __addBins_bins(self, bins): self.__addBins_tuples([ b.xEdges for b in bins ]) def __addBins_points(self, points): self.__addBins_tuples([ p.xWidth for p in points ]) def __addBins_tuples(self, tuples): cdef double a, b for a, b in tuples: self.h1ptr().addBin(a, b) def mergeBins(self, ia, ib): """mergeBins(ia, ib) -> None. Merge bins from indices ia through ib.""" self.h1ptr().mergeBins(ia, ib) def rebinBy(self, n, begin=0, end=None): """(n) -> None. Merge every group of n bins together (between begin and end, if specified).""" if end is None: - end = self.numBins() + end = self.numBins self.h1ptr().rebinBy(int(n), begin, end) def rebinTo(self, edges): """([edges]) -> None. Merge bins to produce the given new edges... which must be a subset of the current ones.""" self.h1ptr().rebinTo(edges) def rebin(self, arg, **kwargs): """(n) -> None or ([edges]) -> None Merge bins, like rebinBy if an int argument is given; like rebinTo if an iterable is given.""" if hasattr(arg, "__iter__"): self.rebinTo(arg, **kwargs) else: self.rebinBy(arg, **kwargs) def mkScatter(self, usefocus=False): """None -> Scatter2D. Convert this Histo1D to a Scatter2D, with y representing bin heights (not sumW) and height errors.""" cdef c.Scatter2D s2 = c.mkScatter_Histo1D(deref(self.h1ptr()), usefocus) return cutil.new_owned_cls(Scatter2D, s2.newclone()) def toIntegral(self, efficiency=False, includeunderflow=True, includeoverflow=True): """None -> Scatter2D. Convert this Histo1D to a Scatter2D representing an integral (i.e. cumulative) histogram constructed from this differential one. The efficiency argument is used to construct an 'efficiency integral' histogram and the includeXXXflow bools determine whether under and overflows are included in computing the (efficiency) integral. """ cdef c.Scatter2D s if not efficiency: s = c.Histo1D_toIntegral(deref(self.h1ptr()), includeunderflow) else: s = c.Histo1D_toIntegralEff(deref(self.h1ptr()), includeunderflow, includeoverflow) return cutil.new_owned_cls(Scatter2D, s.newclone()) def divideBy(self, Histo1D h, efficiency=False): """Histo1D -> Scatter2D Divide this histogram by h, returning a Scatter2D. The optional 'efficiency' argument, if set True, will use a binomial efficiency treatment of the errors. """ # if type(h) is not Histo1D: # raise ValueError("Histograms must be of the same type to be divided") cdef c.Scatter2D s if not efficiency: s = c.Histo1D_div_Histo1D(deref(self.h1ptr()), deref(h.h1ptr())) else: s = c.Histo1D_eff_Histo1D(deref(self.h1ptr()), deref(h.h1ptr())) return cutil.new_owned_cls(Scatter2D, s.newclone()) ## In-place special methods def __iadd__(Histo1D self, Histo1D other): c.Histo1D_iadd_Histo1D(self.h1ptr(), other.h1ptr()) return self def __isub__(Histo1D self, Histo1D other): c.Histo1D_isub_Histo1D(self.h1ptr(), other.h1ptr()) return self # def __imul__(Histo1D self, double x): # c.Histo1D_imul_dbl(self.h1ptr(), x) # return self # def __idiv__(Histo1D self, double x): # c.Histo1D_idiv_dbl(self.h1ptr(), x) # return self ## Unbound special methods # # TODO: only to bootstrap sum(), but doesn't work properly? Seems to treat *self* as the int... # def __radd__(Histo1D self, zero): # #assert zero == 0 # print "FOO" # return self.clone() def __add__(Histo1D self, Histo1D other): # print "BAR" h = Histo1D() cutil.set_owned_ptr(h, c.Histo1D_add_Histo1D(self.h1ptr(), other.h1ptr())) return h # TODO: Cython doesn't support type overloading for special functions? # def __add__(Histo1D self, int x): # """ # Special operator support to allow use of sum(histos) which starts from 0. # """ # assert(x == 0) # return self # TODO: Cython doesn't support type overloading for special functions? # def __radd__(Histo1D self, int x): # """ # Special operator support to allow use of sum(histos) which starts from 0. # """ # assert(x == 0) # return self def __sub__(Histo1D self, Histo1D other): h = Histo1D() cutil.set_owned_ptr(h, c.Histo1D_sub_Histo1D(self.h1ptr(), other.h1ptr())) return h # def __mul__(Histo1D self, double x): # h = c.Histo1D_mul_dbl(self.h1ptr(), x) # return h # def __rmul__(Histo1D self, double x): # h = c.Histo1D_mul_dbl(self.h1ptr(), x) # return h def __div__(Histo1D self, Histo1D other): return self.divideBy(other) def __truediv__(Histo1D self, Histo1D other): return self.divideBy(other) ## Functions for array-based plotting, chi2 calculations, etc. # def sumWs(self): # """All sumWs of the histo.""" # return [b.sumW for b in self.bins] def _mknp(self, xs): try: import numpy return numpy.array(xs) except ImportError: return xs #@property def xEdges(self): """All x edges of the histo.""" return self._mknp(self.h1ptr().xEdges()) def xMins(self): """All x low edges of the histo.""" - return self._mknp([b.xMin() for b in self.bins()]) + return self._mknp([b.xMin for b in self.bins]) def xMaxs(self): """All x high edges of the histo.""" - return self._mknp([b.xMax() for b in self.bins()]) + return self._mknp([b.xMax for b in self.bins]) def xMids(self): """All x bin midpoints of the histo.""" - return self._mknp([b.xMid() for b in self.bins()]) + return self._mknp([b.xMid for b in self.bins]) def xFoci(self): """All x bin foci of the histo.""" - return self._mknp([b.xFocus() for b in self.bins()]) + return self._mknp([b.xFocus for b in self.bins]) def xVals(self, foci=False): return self.xFoci() if foci else self.xMids() def xErrs(self, foci=False): if foci: - return [(b.xFocus()-b.xMin(), b.xMax()-b.xFocus()) for b in self.bins()] + return [(b.xFocus-b.xMin, b.xMax-b.xFocus) for b in self.bins] else: - return [(b.xMid()-b.xMin(), b.xMax()-b.xMid()) for b in self.bins()] + return [(b.xMid-b.xMin, b.xMax-b.xMid) for b in self.bins] def xMin(self): """Lowest x value.""" return min(self.xMins()) def xMax(self): """Highest x value.""" return max(self.xMaxs()) def heights(self): """All y heights of the histo.""" - return self._mknp([b.height() for b in self.bins()]) + return self._mknp([b.height for b in self.bins]) def areas(self): """All areas of the histo.""" - return self._mknp([b.area() for b in self.bins()]) + return self._mknp([b.area for b in self.bins]) def yVals(self, area=False): return self.areas() if area else self.heights() def heightErrs(self): #, asymm=False): """All height errors of the histo. TODO: asymm arg / heightErrsMinus/Plus? """ - return self._mknp([b.heightErr() for b in self.bins()]) + return self._mknp([b.heightErr for b in self.bins]) def areaErrs(self): #, asymm=False): """All area errors of the histo. TODO: asymm arg / areaErrsMinus/Plus? """ # Use symmetrised errors by default, or return a list of (-,+) pairs if asymm is requested.""" # if asymm: # pass #else: - return self._mknp([b.areaErr() for b in self.bins()]) + return self._mknp([b.areaErr for b in self.bins]) def yErrs(self, area=False): return self.areaErrs() if area else self.heightErrs() def yMins(self, area=False): ys = self.yVals(area) es = self.yErrs(area) return self._mknp([y-e for (y,e) in zip(ys,es)]) def yMaxs(self, area=False): ys = self.yVals(area) es = self.yErrs(area) return self._mknp([y+e for (y,e) in zip(ys,es)]) def yMin(self): """Lowest x value.""" return min(self.yMins()) def yMax(self): """Highest y value.""" return max(self.yMaxs()) ## Convenience alias H1D = Histo1D diff --git a/pyext/yoda/include/Histo2D.pyx b/pyext/yoda/include/Histo2D.pyx --- a/pyext/yoda/include/Histo2D.pyx +++ b/pyext/yoda/include/Histo2D.pyx @@ -1,502 +1,502 @@ cimport util cdef class Histo2D(AnalysisObject): """ 2D histogram. Complete histogramming is supported, including uniform/regular binning, variable-width bininng, unbinned gaps in the covered range, and outflows (under/overflows around all edges and corners). Rebinning by integer factors, or by explicit merging of contiguous bins is also supported, but in development. Rescaling of weights and/or the x axis is permitted in-place: the result is still a valid Histo2D. Binning-compatible 2D histograms may be divided, resulting in a Scatter3D since further fills would not be meaningful. Several sets of arguments are tried by the constructor in the following order. Histo2D(path="", title=""). Construct a histogram with optional path and title but no bins. Histo2D(nxbins, xlow, xhigh, nybins, ylow, yhigh, path="", title=""). Construct a histogram with nxbins on the x axis and nybins on the y axis, distributed linearly between the respective low--high limits. """ cdef inline c.Histo2D* h2ptr(self) except NULL: return self.ptr() def __init__(self, *args, **kwargs): util.try_loop([self.__init2, self.__init4, self.__init8], *args, **kwargs) def __init2(Histo2D self, path="", title=""): path = path.encode('utf-8') title = title.encode('utf-8') cutil.set_owned_ptr(self, new c.Histo2D(path, title)) def __init4(Histo2D self, xedges, yedges, path="", title=""): path = path.encode('utf-8') title = title.encode('utf-8') # TODO: Do some type-checking and allow iterables of HistoBin2D as well? cutil.set_owned_ptr(self, new c.Histo2D(xedges, yedges, path, title)) def __init8(Histo2D self, nxbins, xlow, xhigh, nybins, ylow, yhigh, path="", title=""): path = path.encode('utf-8') title = title.encode('utf-8') cutil.set_owned_ptr(self, new c.Histo2D(nxbins, xlow, xhigh, nybins, ylow, yhigh, path, title)) def __len__(self): "Number of bins" - return self.numBins() + return self.numBins def __getitem__(self, py_ix): "Direct access to bins" - cdef size_t i = cutil.pythonic_index(py_ix, self.numBins()) + cdef size_t i = cutil.pythonic_index(py_ix, self.h2ptr().numBins()) return cutil.new_borrowed_cls(HistoBin2D, & self.h2ptr().bins().at(i), self) def __repr__(self): - return "<%s '%s' %d bins, sumw=%.2g>" % (self.__class__.__name__, self.path(), len(self.bins()), self.sumW()) + return "<%s '%s' %d bins, sumw=%.2g>" % (self.__class__.__name__, self.path, len(self.bins), self.sumW()) def reset(self): """None -> None. Reset the histogram but leave the bin structure.""" self.h2ptr().reset() def clone(self): """None -> Histo2D. Clone this Profile2D.""" return cutil.new_owned_cls(Histo2D, self.h2ptr().newclone()) def fill(self, double x, double y, weight=1.0, fraction=1.0): """(x,y,[w]) -> None. Fill with given x,y values and optional weight.""" self.h2ptr().fill(x, y, weight, fraction) def fillBin(self, size_t i, weight=1.0, fraction=1.0): """(i,[w]) -> None. Fill bin i and optional weight.""" self.h2ptr().fillBin(i, weight, fraction) - #@property + @property def totalDbn(self): """() -> Dbn2D The Dbn2D representing the total distribution.""" return cutil.new_borrowed_cls(Dbn2D, &self.h2ptr().totalDbn(), self) # TODO: reinstate # def outflow(self, ix, iy): # """(ix,iy) -> Dbn2D # The Dbn2D representing the ix,iy outflow distribution.""" # return cutil.new_borrowed_cls(Dbn2D, &self.h2ptr().outflow(ix, iy), self) def integral(self, includeoverflows=True): """([bool]) -> float Histogram integral, optionally excluding the overflows.""" return self.h2ptr().integral(includeoverflows) def numEntries(self, includeoverflows=True): """([bool]) -> float Number of times this histogram was filled, optionally excluding overflows.""" return self.h2ptr().numEntries(includeoverflows) def effNumEntries(self, includeoverflows=True): """([bool]) -> float Effective number of times this histogram was filled, computed from weights and optionally excluding overflows.""" return self.h2ptr().effNumEntries(includeoverflows) def sumW(self, includeoverflows=True): """([bool]) -> float Sum of weights filled into this histogram.""" return self.h2ptr().sumW(includeoverflows) def sumW2(self, includeoverflows=True): """([bool]) -> float Sum of squared weights filled into this histogram.""" return self.h2ptr().sumW2(includeoverflows) def xMean(self, includeoverflows=True): """([bool]) -> float Mean x of the histogram, optionally excluding the overflows.""" return self.h2ptr().xMean(includeoverflows) def yMean(self, includeoverflows=True): """([bool]) -> float Mean y of the histogram, optionally excluding the overflows.""" return self.h2ptr().yMean(includeoverflows) def xyMean(self, includeoverflows=True): """([bool]) -> (float,float) Mean (x,y) of the histogram, optionally excluding the overflows.""" return util.XY(self.xMean(includeoverflows), self.yMean(includeoverflows)) def xVariance(self, includeoverflows=True): """([bool]) -> float Variance in x of the histogram, optionally excluding the overflows.""" return self.h2ptr().xVariance(includeoverflows) def yVariance(self, includeoverflows=True): """([bool]) -> float Variance in y of the histogram, optionally excluding the overflows.""" return self.h2ptr().yVariance(includeoverflows) def xyVariance(self, includeoverflows=True): """([bool]) -> (float,float) Variances in (x,y) of the histogram, optionally excluding the overflows.""" return util.XY(self.xVariance(includeoverflows), self.yVariance(includeoverflows)) def xStdDev(self, includeoverflows=True): """([bool]) -> float Standard deviation in x of the histogram, optionally excluding the overflows.""" return self.h2ptr().xStdDev(includeoverflows) def yStdDev(self, includeoverflows=True): """([bool]) -> float Standard deviation in y of the histogram, optionally excluding the overflows.""" return self.h2ptr().yStdDev(includeoverflows) def xyStdDev(self, includeoverflows=True): """([bool]) -> (float,float) Standard deviations in (x,y) of the histogram, optionally excluding the overflows.""" return util.XY(self.xStdDev(includeoverflows), self.yStdDev(includeoverflows)) def xStdErr(self, includeoverflows=True): """([bool]) -> float Standard error on the mean x of the histogram, optionally excluding the overflows.""" return self.h2ptr().xStdErr(includeoverflows) def yStdErr(self, includeoverflows=True): """([bool]) -> float Standard error on the mean y of the histogram, optionally excluding the overflows.""" return self.h2ptr().yStdErr(includeoverflows) def xyStdErr(self, includeoverflows=True): """([bool]) -> (float,float) Standard errors on the mean (x,y) of the histogram, optionally excluding the overflows.""" return util.XY(self.xStdErr(includeoverflows), self.yStdErr(includeoverflows)) def xRMS(self, includeoverflows=True): """([bool]) -> float RMS in x of the histogram, optionally excluding the overflows.""" return self.h2ptr().xRMS(includeoverflows) def yRMS(self, includeoverflows=True): """([bool]) -> float RMS in y of the histogram, optionally excluding the overflows.""" return self.h2ptr().yRMS(includeoverflows) def xyRMS(self, includeoverflows=True): """([bool]) -> (float,float) RMS in (x,y) of the histogram, optionally excluding the overflows.""" return util.XY(self.xRMS(includeoverflows), self.yRMS(includeoverflows)) def scaleW(self, w): """(float) -> None. Rescale the weights in this histogram by the factor w.""" self.h2ptr().scaleW(w) def normalize(self, double normto=1.0, bint includeoverflows=True): """(float, bool) -> None. Normalize the histogram.""" self.h2ptr().normalize(normto, includeoverflows) - #@property + @property def xMin(self): """Low x edge of the histo.""" return self.h2ptr().xMin() - #@property + @property def xMax(self): """High x edge of the histo.""" return self.h2ptr().xMax() - #@property + @property def yMin(self): """Low y edge of the histo.""" return self.h2ptr().yMin() - #@property + @property def yMax(self): """High y edge of the histo.""" return self.h2ptr().yMax() - #@property + @property def numBins(self): """() -> int Number of bins (not including overflows).""" return self.h2ptr().numBins() - #@property + @property def numBinsX(self): """() -> int Number of bins (edges) along the x axis.""" return self.h2ptr().numBinsX() - #@property + @property def numBinsY(self): """() -> int Number of bins (edges) along the y axis.""" return self.h2ptr().numBinsY() - #@property + @property def bins(self): """Access the ordered bins list.""" return [self.bin(i) for i in xrange( self.h2ptr().numBins())] def bin(self, i): """Get the i'th bin""" # cdef size_t ii = cutil.pythonic_index(i, self.h2ptr().numBins()) return cutil.new_borrowed_cls(HistoBin2D, & self.h2ptr().bin(i), self) # TODO: it's more intuitive to have an index for each axis # def bin(self, i, j): # """Get the (i,j)'th bin""" # # cdef size_t ii = cutil.pythonic_index(i, self.h2ptr().numBins()) # # cdef size_t jj = cutil.pythonic_index(j, self.h2ptr().numBins()) # return cutil.new_borrowed_cls(HistoBin2D, & self.h2ptr().bin(i,j), self) def binIndexAt(self, x, y): """Get the bin index pair containing position (x,y)""" return self.h2ptr().binIndexAt(x, y) def binAt(self, x, y): """Get the bin containing position (x,y)""" # TODO: what's the problem with this direct mapping? Produces compile error re. no default constructor... #return cutil.new_borrowed_cls(HistoBin2D, & self.h2ptr().binAt(x,y), self) # TODO: need out-of-range check to return None? return self.bin(self.binIndexAt(x,y)) def addBin(self, xlow, xhigh, ylow, yhigh): """Add a bin.""" self.h2ptr().addBin(pair[double, double](xlow, xhigh), pair[double, double](ylow, yhigh)) return self def addBins(self, bounds): """Add several bins.""" # TODO: simplify / make consistent for xlow, xhigh, ylow, yhigh in bounds: self.h2ptr().addBin(pair[double, double](xlow, xhigh), pair[double, double](ylow, yhigh)) # def mergeBins(self, size_t a, size_t b): # self.h2ptr().mergeBins(a, b) # def rebin(self, int n): # self.h2ptr().rebin(n) def mkScatter(self, usefocus=False): """None -> Scatter3D. Convert this Histo2D to a Scatter3D, with y representing bin heights (not sumW) and height errors.""" cdef c.Scatter3D s3 = c.mkScatter_Histo2D(deref(self.h2ptr()), usefocus) return cutil.new_owned_cls(Scatter3D, s3.newclone()) def divideBy(self, Histo2D h, efficiency=False): """Histo2D -> Scatter3D Divide this histogram by Histo2D h, returning a Scatter3D. The optional 'efficiency' argument, if set True, will use a binomial efficiency treatment of the errors. """ # if type(h) is not Histo2D: # raise ValueError("Histograms must be of the same type to be divided") # TODO: allow dividing profiles by histos, etc.? (But then what do the errors mean? Add in quad?) cdef c.Scatter3D s if not efficiency: s = c.Histo2D_div_Histo2D(deref(self.h2ptr()), deref(h.h2ptr())) else: s = c.Histo2D_eff_Histo2D(deref(self.h2ptr()), deref(h.h2ptr())) return cutil.new_owned_cls(Scatter3D, s.newclone()) def __iadd__(Histo2D self, Histo2D other): c.Histo2D_iadd_Histo2D(self.h2ptr(), other.h2ptr()) return self def __isub__(Histo2D self, Histo2D other): c.Histo2D_isub_Histo2D(self.h2ptr(), other.h2ptr()) return self def __add__(Histo2D self, Histo2D other): h = Histo2D() cutil.set_owned_ptr(h, c.Histo2D_add_Histo2D(self.h2ptr(), other.h2ptr())) return h def __sub__(Histo2D self, Histo2D other): h = Histo2D() cutil.set_owned_ptr(h, c.Histo2D_sub_Histo2D(self.h2ptr(), other.h2ptr())) return h def __div__(Histo2D self, Histo2D other): return self.divideBy(other) def __truediv__(Histo2D self, Histo2D other): return self.divideBy(other) ## Functions for array-based plotting, chi2 calculations, etc. # def sumWs(self): # """All sumWs of the histo.""" - # return [b.sumW for b in self.bins()] + # return [b.sumW for b in self.bins] def _mknp(self, xs): try: import numpy return numpy.array(xs) except ImportError: return xs def xEdges(self): """All x edges of the histo.""" return self._mknp(self.h2ptr().xEdges()) def xMins(self): """All x low edges of the histo.""" - return self._mknp([b.xMin() for b in self.bins()]) + return self._mknp([b.xMin for b in self.bins]) def xMins(self): """All x low edges of the histo.""" - return self._mknp([b.xMin() for b in self.bins()]) + return self._mknp([b.xMin for b in self.bins]) def xMaxs(self): """All x high edges of the histo.""" - return self._mknp([b.xMax() for b in self.bins()]) + return self._mknp([b.xMax for b in self.bins]) def xMids(self): """All x bin midpoints of the histo.""" - return self._mknp([b.xMid() for b in self.bins()]) + return self._mknp([b.xMid for b in self.bins]) def xFoci(self): """All x bin foci of the histo.""" - return self._mknp([b.xFocus() for b in self.bins()]) + return self._mknp([b.xFocus for b in self.bins]) def xVals(self, foci=False): return self.xFoci() if foci else self.xMids() def xErrs(self, foci=False): if foci: - return [(b.xFocus()-b.xMin(), b.xMax()-b.xFocus()) for b in self.bins()] + return [(b.xFocus-b.xMin, b.xMax-b.xFocus) for b in self.bins] else: - return [(b.xMid()-b.xMin(), b.xMax()-b.xMid()) for b in self.bins()] + return [(b.xMid-b.xMin, b.xMax-b.xMid) for b in self.bins] # def xMin(self): # """Lowest x value.""" # return min(self.xMins()) # def xMax(self): # """Highest x value.""" # return max(self.xMaxs()) def yEdges(self): """All y edges of the histo.""" return self._mknp(self.h2ptr().yEdges()) def yMins(self): """All y low edges of the histo.""" - return self._mknp([b.yMin() for b in self.bins()]) + return self._mknp([b.yMin for b in self.bins]) def yMaxs(self): """All y high edges of the histo.""" - return self._mknp([b.yMax() for b in self.bins()]) + return self._mknp([b.yMax for b in self.bins]) def yMids(self): """All y bin midpoints of the histo.""" - return self._mknp([b.yMid() for b in self.bins()]) + return self._mknp([b.yMid for b in self.bins]) def yFoci(self): """All y bin foci of the histo.""" - return self._mknp([b.yFocus() for b in self.bins()]) + return self._mknp([b.yFocus for b in self.bins]) def yVals(self, foci=False): return self.yFoci() if foci else self.yMids() def yErrs(self, foci=False): if foci: - return [(b.yFocus()-b.yMin(), b.yMax()-b.yFocus()) for b in self.bins()] + return [(b.yFocus-b.yMin, b.yMax-b.yFocus) for b in self.bins] else: - return [(b.yMid()-b.yMin(), b.yMax()-b.yMid()) for b in self.bins()] + return [(b.yMid-b.yMin, b.yMax-b.yMid) for b in self.bins] # def yMin(self): # """Lowest y value.""" # return min(self.yMins()) # def yMax(self): # """Highest y value.""" # return max(self.yMaxs()) def heights(self): """All y heights of the histo.""" - return self._mknp([b.height() for b in self.bins()]) + return self._mknp([b.height for b in self.bins]) def volumes(self): """All areas of the histo.""" - return self._mknp([b.area() for b in self.bins()]) + return self._mknp([b.area for b in self.bins]) def zVals(self, vol=False): return self.volumes() if vol else self.heights() def heightErrs(self): #, asymm=False): """All height errors of the histo. TODO: asymm arg / heightErrsMinus/Plus? """ - return self._mknp([b.heightErr() for b in self.bins()]) + return self._mknp([b.heightErr for b in self.bins]) def volumeErrs(self): #, asymm=False): """All volume errors of the histo. TODO: asymm arg / areaErrsMinus/Plus? """ # Use symmetrised errors by default, or return a list of (-,+) pairs if asymm is requested.""" # if asymm: # pass #else: - return self._mknp([b.volumeErr() for b in self.bins()]) + return self._mknp([b.volumeErr for b in self.bins]) def zErrs(self, vol=False): return self.volErrs() if vol else self.heightErrs() def zMins(self, area=False): zs = self.zVals(area) es = self.zErrs(area) return self._mknp([z-e for (z,e) in zip(zs,es)]) def zMaxs(self, area=False): zs = self.zVals(area) es = self.zErrs(area) return self._mknp([z+e for (z,e) in zip(zs,es)]) def zMin(self, area=False): """Lowest z value.""" return min(self.zMins(area)) def zMax(self, area=False): """Highest z value.""" return max(self.zMaxs(area)) ## Convenience alias H2D = Histo2D diff --git a/pyext/yoda/include/HistoBin1D.pyx b/pyext/yoda/include/HistoBin1D.pyx --- a/pyext/yoda/include/HistoBin1D.pyx +++ b/pyext/yoda/include/HistoBin1D.pyx @@ -1,88 +1,88 @@ cdef class HistoBin1D(Bin1D_Dbn1D): cdef inline c.HistoBin1D* hb1ptr(self) except NULL: return self.ptr() # TODO: remove cdef inline c.HistoBin1D* _HistoBin1D(self) except NULL: return self.ptr() def __init__(self, double a, double b): cutil.set_owned_ptr(self, new c.HistoBin1D(a, b)) # def fill(self, value, double weight=1.0, fraction=1.0): # """ # (value=None, weight=1.0) # Fill this bin with the given value and given weight. # """ # self.hb1ptr().fill(value, weight, fraction) # def fillBin(self, weight=1.0, fraction=1.0): # """ # (weight=1.0) -> None. Fill this bin with given weight. # """ # self.hb1ptr().fillBin(weight, fraction) - #@property + @property def area(self): """ b.area <==> b.sumW The area of the bin is the sum of weights of the bin; it is independent of width. """ return self.hb1ptr().area() - #@property + @property def height(self): """ b.height <==> b.area / b.width The height of the bin is defined as the area divided by the width. """ return self.hb1ptr().height() - #@property + @property def areaErr(self): """ Error computed using binomial statistics on squared sum of bin weights, i.e. s.areaErr = sqrt(s.sumW2) """ return self.hb1ptr().areaErr() - #@property + @property def heightErr(self): """ Height error - scales the s.areaError by the reciprocal of the bin width. """ return self.hb1ptr().heightErr() - #@property + @property def relErr(self): """ Relative error - same for either area or height interpretations. """ return self.hb1ptr().relErr() def __iadd__(HistoBin1D self, HistoBin1D other): c.HistoBin1D_iadd_HistoBin1D(self.hb1ptr(), other.hb1ptr()) return self def __isub__(HistoBin1D self, HistoBin1D other): c.HistoBin1D_isub_HistoBin1D(self.hb1ptr(), other.hb1ptr()) return self def __add__(HistoBin1D a, HistoBin1D b): return cutil.new_owned_cls(HistoBin1D, new c.HistoBin1D(deref(a.hb1ptr()) + deref(b.hb1ptr()))) def __sub__(HistoBin1D a, HistoBin1D b): return cutil.new_owned_cls(HistoBin1D, new c.HistoBin1D(deref(a.hb1ptr()) - deref(b.hb1ptr()))) diff --git a/pyext/yoda/include/HistoBin2D.pyx b/pyext/yoda/include/HistoBin2D.pyx --- a/pyext/yoda/include/HistoBin2D.pyx +++ b/pyext/yoda/include/HistoBin2D.pyx @@ -1,46 +1,46 @@ # TODO: tidy once we have a working Histo2D cdef class HistoBin2D(Bin2D_Dbn2D): cdef inline c.HistoBin2D* hb2ptr(self) except NULL: return self.ptr() # TODO: remove cdef inline c.HistoBin2D* _HistoBin2D(self) except NULL: return self.ptr() def __init__(self, xlow, xhigh, ylow, yhigh): cutil.set_owned_ptr(self, new c.HistoBin2D(xlow, xhigh, ylow, yhigh)) # def fill(self, x, y, weight=1.0, fraction=1.0): # self.hb2ptr().fill(x, y, weight, fraction) - #@property + @property def volume(self): return self.hb2ptr().volume() - #@property + @property def height(self): return self.hb2ptr().height() - #@property + @property def volumeErr(self): return self.hb2ptr().volumeErr() - #@property + @property def heightErr(self): return self.hb2ptr().heightErr() - #@property + @property def relErr(self): return self.hb2ptr().relErr() def __add__(HistoBin2D a, HistoBin2D b): return cutil.new_owned_cls(HistoBin2D, new c.HistoBin2D(deref(a.hb2ptr()) + deref(b.hb2ptr()))) def __sub__(HistoBin2D a, HistoBin2D b): return cutil.new_owned_cls(HistoBin2D, new c.HistoBin2D(deref(a.hb2ptr()) - deref(b.hb2ptr()))) def __repr__(self): return 'HistoBin2D(%g, %g; %g, %g; sumw=%g)' % (self.xEdges[0], self.xEdges[1], self.yEdges[0], self.yEdges[1], self.sumW) diff --git a/pyext/yoda/include/IO.pyx b/pyext/yoda/include/IO.pyx --- a/pyext/yoda/include/IO.pyx +++ b/pyext/yoda/include/IO.pyx @@ -1,257 +1,257 @@ # cython: c_string_type=unicode """Readers and writers The basic idea here is to provide Python IO semantics by using Python to do the IO. Otherwise we get C++ IO semantics in Python. It also means we can use dummy files, e.g. anything with read/write attributes. Generally a much better idea than just 'give this a filename', and well worth the inefficiencies and potential memory limits. """ import sys ## Check if a string matches any of the given patterns, and that it doesn't match any unpatterns (for path filtering) def _pattern_check(name, patterns, unpatterns): import re if patterns: if not isinstance(patterns, (list,tuple)): patterns = [patterns] ## Compile on the fly: works because compile(compiled_re) -> compiled_re if not any(re.compile(patt).search(name) for patt in patterns): return False if unpatterns: if not isinstance(unpatterns, (list,tuple)): unpatterns = [unpatterns] ## Compile on the fly: works because compile(compiled_re) -> compiled_re if any(re.compile(patt).search(name) for patt in unpatterns): return False return True ## Make a Python list of analysis objects from a C++ vector of them cdef list _aobjects_to_list(vector[c.AnalysisObject*]* aobjects, patterns, unpatterns): cdef list out = [] cdef c.AnalysisObject* ao cdef size_t i for i in range(aobjects.size()): ao = deref(aobjects)[i] ## NOTE: automatic type conversion by passing the type() as a key to globals() newao = cutil.new_owned_cls(globals()[ao.type().decode('utf-8')], ao) - if _pattern_check(newao.path(), patterns, unpatterns): + if _pattern_check(newao.path, patterns, unpatterns): out.append(newao) return out ## Make a Python dict of analysis objects from a C++ vector of them cdef dict _aobjects_to_dict(vector[c.AnalysisObject*]* aobjects, patterns, unpatterns): cdef dict out = {} cdef c.AnalysisObject* ao cdef size_t i for i in range(aobjects.size()): ao = deref(aobjects)[i] ## NOTE: automatic type conversion by passing the type() as a key to globals() newao = cutil.new_owned_cls( globals()[ao.type().decode('utf-8')], ao) - if _pattern_check(newao.path(), patterns, unpatterns): - out[newao.path()] = newao + if _pattern_check(newao.path, patterns, unpatterns): + out[newao.path] = newao return out # ## Set a istringstream's string from a C/Python string # cdef void _make_iss(c.istringstream &iss, string s): # iss.str(s) ## Read a file's contents as a returned string ## The file argument can either be a file object, filename, or special "-" reference to stdin def _str_from_file(file_or_filename): if hasattr(file_or_filename, 'read'): s = file_or_filename.read() elif file_or_filename == "-": s = sys.stdin.read() else: with open(file_or_filename, "r") as f: s = f.read() return s ## Write a string to a file ## The file argument can either be a file object, filename, or special "-" reference to stdout def _str_to_file(s, file_or_filename): s = s.decode('utf-8') if hasattr(file_or_filename, 'write'): file_or_filename.write(s) elif file_or_filename == "-": sys.stdout.write(s) else: with open(file_or_filename, "w") as f: f.write(s) ## ## Readers ## def read(filename, asdict=True, patterns=None, unpatterns=None): """ Read data objects from the provided filename, auto-determining the format from the file extension. The loaded data objects can be filtered on their path strings, using the optional patterns and unpatterns arguments. These can be strings, compiled regex objects with a 'match' method, or any iterable of those types. If given, only analyses with paths which match at least one pattern, and do not match any unpatterns, will be returned. Returns a dict or list of analysis objects depending on the asdict argument. """ # cdef c.istringstream iss # cdef vector[c.AnalysisObject*] aobjects # with open(filename, "r") as f: # s = f.read() # _make_iss(iss, s.encode('utf-8')) # c.Reader_create(filename.encode('utf-8')).read(iss, aobjects) # return _aobjects_to_dict(&aobjects, patterns, unpatterns) if asdict \ # else _aobjects_to_list(&aobjects, patterns, unpatterns) # cdef vector[c.AnalysisObject*] aobjects c.IO_read_from_file(filename.encode('utf-8'), aobjects) return _aobjects_to_dict(&aobjects, patterns, unpatterns) if asdict \ else _aobjects_to_list(&aobjects, patterns, unpatterns) def readYODA(filename, asdict=True, patterns=None, unpatterns=None): """ Read data objects from the provided YODA-format file. The loaded data objects can be filtered on their path strings, using the optional patterns and unpatterns arguments. These can be strings, compiled regex objects with a 'match' method, or any iterable of those types. If given, only analyses with paths which match at least one pattern, and do not match any unpatterns, will be returned. Returns a dict or list of analysis objects depending on the asdict argument. """ # cdef c.istringstream iss cdef vector[c.AnalysisObject*] aobjects # s = _str_from_file(file_or_filename) # _make_iss(iss, s.encode('utf-8')) # c.ReaderYODA_create().read(iss, aobjects) c.ReaderYODA_create().read_from_file(filename.encode('utf-8'), aobjects) return _aobjects_to_dict(&aobjects, patterns, unpatterns) if asdict \ else _aobjects_to_list(&aobjects, patterns, unpatterns) def readFLAT(filename, asdict=True, patterns=None, unpatterns=None): """ Read data objects from the provided FLAT-format file. The loaded data objects can be filtered on their path strings, using the optional patterns and unpatterns arguments. These can be strings, compiled regex objects with a 'match' method, or any iterable of those types. If given, only analyses with paths which match at least one pattern, and do not match any unpatterns, will be returned. Returns a dict or list of analysis objects depending on the asdict argument. """ # cdef c.istringstream iss cdef vector[c.AnalysisObject*] aobjects # s = _str_from_file(file_or_filename) # _make_iss(iss, s.encode('utf-8')) # c.ReaderFLAT_create().read(iss, aobjects) c.ReaderFLAT_create().read_from_file(filename.encode('utf-8'), aobjects) return _aobjects_to_dict(&aobjects, patterns, unpatterns) if asdict \ else _aobjects_to_list(&aobjects, patterns, unpatterns) def readAIDA(filename, asdict=True, patterns=None, unpatterns=None): """ Read data objects from the provided AIDA-format file. The loaded data objects can be filtered on their path strings, using the optional patterns and unpatterns arguments. These can be strings, compiled regex objects with a 'match' method, or any iterable of those types. If given, only analyses with paths which match at least one pattern, and do not match any unpatterns, will be returned. Returns a dict or list of analysis objects depending on the asdict argument. DEPRECATED: AIDA is a dead format. At some point we will stop supporting it. """ # cdef c.istringstream iss cdef vector[c.AnalysisObject*] aobjects # s = _str_from_file(file_or_filename) # _make_iss(iss, s.encode('utf-8')) # c.ReaderAIDA_create().read(iss, aobjects) c.ReaderAIDA_create().read_from_file(filename.encode('utf-8'), aobjects) return _aobjects_to_dict(&aobjects, patterns, unpatterns) if asdict \ else _aobjects_to_list(&aobjects, patterns, unpatterns) ## ## Writers ## def write(ana_objs, filename): """ Write data objects to the provided filename, auto-determining the format from the file extension. """ # cdef c.ostringstream oss cdef vector[c.AnalysisObject*] vec cdef AnalysisObject a aolist = ana_objs.values() if hasattr(ana_objs, "values") else ana_objs \ if hasattr(ana_objs, "__iter__") else [ana_objs] for a in aolist: vec.push_back(a._AnalysisObject()) c.IO_write_to_file(filename.encode('utf-8'), vec) #_str_to_file(oss.str(), filename) def writeYODA(ana_objs, file_or_filename): """ Write data objects to the provided file in YODA format. """ cdef c.ostringstream oss cdef vector[c.AnalysisObject*] vec cdef AnalysisObject a aolist = ana_objs.values() if hasattr(ana_objs, "values") else ana_objs \ if hasattr(ana_objs, "__iter__") else [ana_objs] for a in aolist: vec.push_back(a._AnalysisObject()) if type(file_or_filename) is str: c.WriterYODA_create().write_to_file(file_or_filename, vec) else: c.WriterYODA_create().write(oss, vec) _str_to_file(oss.str(), file_or_filename) def writeFLAT(ana_objs, file_or_filename): """ Write data objects to the provided file in FLAT format. """ cdef c.ostringstream oss cdef vector[c.AnalysisObject*] vec cdef AnalysisObject a aolist = ana_objs.values() if hasattr(ana_objs, "values") else ana_objs \ if hasattr(ana_objs, "__iter__") else [ana_objs] for a in aolist: vec.push_back(a._AnalysisObject()) if type(file_or_filename) is str: c.WriterFLAT_create().write_to_file(file_or_filename, vec) else: c.WriterFLAT_create().write(oss, vec) _str_to_file(oss.str(), file_or_filename) def writeAIDA(ana_objs, file_or_filename): """ Write data objects to the provided file in AIDA format. """ cdef c.ostringstream oss cdef vector[c.AnalysisObject*] vec cdef AnalysisObject a aolist = ana_objs.values() if hasattr(ana_objs, "values") else ana_objs \ if hasattr(ana_objs, "__iter__") else [ana_objs] for a in aolist: vec.push_back(a._AnalysisObject()) if type(file_or_filename) is str: c.WriterAIDA_create().write_to_file(file_or_filename, vec) else: c.WriterAIDA_create().write(oss, vec) _str_to_file(oss.str(), file_or_filename) diff --git a/pyext/yoda/include/Point.pyx b/pyext/yoda/include/Point.pyx --- a/pyext/yoda/include/Point.pyx +++ b/pyext/yoda/include/Point.pyx @@ -1,150 +1,133 @@ cimport util cdef class Point(util.Base): """ A generic point with errors, used by the Scatter classes. """ cdef c.Point* pptr(self) except NULL: return self.ptr() def __dealloc__(self): cdef c.Point *p = self.pptr() if self._deallocate: del p # def __init__(self): # cutil.set_owned_ptr(self, new c.Point()) # def copy(self): # return cutil.new_owned_cls(Point, new c.Point(deref(self.pptr()))) # TODO: add clone() as mapping to (not yet existing) C++ newclone()? - #@property + @property def dim(self): """None -> int Space dimension of the point (should match containing Scatter)""" return self.pptr().dim() def val(self, i): """int -> float Value on axis i""" return self.pptr().val(i) def setVal(self, i, val): """(int, float) -> None Value on axis i""" self.pptr().setVal(i, val) def errs(self, i, source=""): """int -> float Errors on axis i""" if source is None: source = "" - if isinstance(source, str): - source = source.encode('utf-8') return util.read_error_pair(self.pptr().errs(i,source)) def setErr(self, i, e, source=""): """(int, float) -> None Set symmetric errors on axis i""" if source is None: source = "" - if isinstance(source, str): - source = source.encode('utf-8') + print "LC DEBUG setErr ", e, source self.pptr().setErr(i, e, source) def setErrs(self, i, *es): """(int, float) -> None (int, [float, float]) -> None (int, float, float) -> None Set asymmetric errors on axis i""" source=None es=list(es) if type(es[-1]) is str: source=es[-1] es=es[:-1] else: pass errs = es if source is None: source="" if len(errs) == 1: if not hasattr(errs[0], "__iter__"): self.setErr(i,errs[0], source) return errs=errs[0] # assert len(errs) == 2: - if isinstance(source, str): - source = source.encode('utf-8') self.pptr().setErrs(i, tuple(errs), source) def errMinus(self, i, source=""): """int -> float Minus error on axis i""" if source is None: source = "" - if isinstance(source, str): - source = source.encode('utf-8') return self.pptr().errMinus(i ,source) def setErrMinus(self, i, e, source=""): """(int, float) -> None Set minus error on axis i""" if source is None: source = "" - if isinstance(source, str): - source = source.encode('utf-8') self.pptr().setErrMinus(i, e, source) def errPlus(self, i, source=""): """int -> float Plus error on axis i""" if source is None: source = "" - if isinstance(source, str): - source = source.encode('utf-8') return self.pptr().errPlus(i, source) def setErrPlus(self, i, e, source=""): """(int, float) -> None Set plus error on axis i""" if source is None: source = "" - if isinstance(source, str): - source = source.encode('utf-8') self.pptr().setErrPlus(i, e, source) def errAvg(self, i, source=""): """int -> float Average error on axis i""" if source is None: source = "" - if isinstance(source, str): - source = source.encode('utf-8') return self.pptr().errAvg(i, source) def set(self, i, val, *es, source=""): """(int, float, float) -> None (int, float, [float, float]) -> None (int, float, float, float) -> None Set value and errors on axis i""" errs = es if source is None: source = "" if len(es) == 1: if hasattr(es[0], "__iter__"): errs = [es[0], es[0]] else: errs = es[0] # assert len(errs) == 2: - if isinstance(source, str): - source = source.encode('utf-8') self.pptr().set(i, val, errs, source) def errMap(self): """None -> {string: [float,float]} error map of this point""" return self.pptr().errMap() # def __repr__(self): # return '' % self.x diff --git a/pyext/yoda/include/Point1D.pyx b/pyext/yoda/include/Point1D.pyx --- a/pyext/yoda/include/Point1D.pyx +++ b/pyext/yoda/include/Point1D.pyx @@ -1,121 +1,77 @@ cimport util cdef class Point1D(Point): """ A 1D point with errors, used by the Scatter1D class. """ cdef c.Point1D* p1ptr(self) except NULL: return self.ptr() def __init__(self, x=0, xerrs=0, source=""): if source==None: source="" cutil.set_owned_ptr(self, new c.Point1D()) - self.setX(x) - self.setXErrs(xerrs, source) + self.x = x + self.setXErrs(xerrs,source) def copy(self): return cutil.new_owned_cls(Point1D, new c.Point1D(deref(self.p1ptr()))) # TODO: add clone() as mapping to (not yet existing) C++ newclone()? - + def setXErrs(self, val, source): if source==None: source="" self.p1ptr().setXErrs(util.read_symmetric(val)) - # property x: - # """x coordinate""" - # def __get__(self): - # return self.p1ptr().x() - # def __set__(self, x): - # self.p1ptr().setX(x) + property x: + """x coordinate""" + def __get__(self): + return self.p1ptr().x() + def __set__(self, x): + self.p1ptr().setX(x) - # property xErrs: - # """The x errors""" - # def __get__(self): - # return util.read_error_pair(self.p1ptr().xErrs()) - # def __set__(self, val): - # self.p1ptr().setXErrs(util.read_symmetric(val)) + property xErrs: + """The x errors""" + def __get__(self): + return util.read_error_pair(self.p1ptr().xErrs()) + def __set__(self, val): + self.p1ptr().setXErrs(util.read_symmetric(val)) - def x(self): - """The x value""" - return self.p1ptr().x() - def setX(self, x): - """Set the x value""" - self.p1ptr().setX(x) - def xErrs(self): - """The x errors""" - return util.read_error_pair(self.p1ptr().xErrs()) - - def xErrsFromSource(self, source): - """The y errors""" - if isinstance(source, str): - source = source.encode('utf-8') - return util.read_error_pair(self.p1ptr().xErrs(source)) - - def setXErrs(self, *es): - """(int, float) -> None - (int, [float, float]) -> None - (int, float, float) -> None - Set asymmetric errors on axis i""" - source = None - es = list(es) - if type(es[-1]) is str: - source = es[-1] - es = es[:-1] - else: - pass - errs = es - if source is None: - source = "" - if len(errs) == 1: - if not hasattr(errs[0], "__iter__"): - self.setErr(1,errs[0], source) - return - errs = errs[0] - # assert len(errs) == 2: - if isinstance(source, str): - source = source.encode('utf-8') - self.pptr().setErrs(1, tuple(errs), source) - - def setYErrs(self, val, source): - if source is None: - source = "" - self.p1ptr().setXErrs(util.read_symmetric(val)) - - #@property + @property def xMin(self): """The minimum x position, i.e. lowest error""" return self.p1ptr().xMin() - #@property + @property def xMax(self): """The maximum x position, i.e. highest error""" return self.p1ptr().xMax() - def xErrAvg(self): - return self.p1ptr().xErrAvg() + + property xErrAvg: + def __get__(self): + return self.p1ptr().xErrAvg() def scaleX(self, a): """(float) -> None Scale the x values and errors by factor a.""" self.p1ptr().scaleX(a) def __repr__(self): - return '' % self.x() + return '' % self.x def __richcmp__(Point1D self, Point1D other, int op): if op == 0: return deref(self.p1ptr()) < deref(other.p1ptr()) elif op == 1: return deref(self.p1ptr()) <= deref(other.p1ptr()) elif op == 2: return deref(self.p1ptr()) == deref(other.p1ptr()) elif op == 3: return deref(self.p1ptr()) != deref(other.p1ptr()) elif op == 4: return deref(self.p1ptr()) > deref(other.p1ptr()) elif op == 5: return deref(self.p1ptr()) >= deref(other.p1ptr()) diff --git a/pyext/yoda/include/Point2D.pyx b/pyext/yoda/include/Point2D.pyx --- a/pyext/yoda/include/Point2D.pyx +++ b/pyext/yoda/include/Point2D.pyx @@ -1,175 +1,141 @@ cimport util cdef class Point2D(Point): """ A 2D point with errors, used by the Scatter2D class. """ cdef c.Point2D* p2ptr(self) except NULL: return self.ptr() def __init__(self, x=0, y=0, xerrs=0, yerrs=0, source=""): if source==None: source="" cutil.set_owned_ptr(self, new c.Point2D()) - self.setX(x) - self.setY(y) - self.setXErrs(xerrs) - self.setYErrs(yerrs, source) + self.x = x + self.y = y + self.xErrs = xerrs + self.setYErrs(yerrs,source) def copy(self): return cutil.new_owned_cls(Point2D, new c.Point2D(deref(self.p2ptr()))) # TODO: add clone() as mapping to (not yet existing) C++ newclone()? + def setYErrs(self, val, source): + if source==None: source="" + self.p2ptr().setYErrs(util.read_symmetric(val)) - def x(self): - """The x value""" - return self.p2ptr().x() - def setX(self, x): - """Set the x value""" - self.p2ptr().setX(x) + property x: + """x coordinate""" + def __get__(self): + return self.p2ptr().x() + def __set__(self, x): + self.p2ptr().setX(x) - def xErrs(self): + property y: + """y coordinate""" + def __get__(self): + return self.p2ptr().y() + def __set__(self, y): + self.p2ptr().setY(y) + + property xy: + """x and y coordinates as a tuple""" + def __get__(self): + return util.XY(self.x, self.y) + def __set__(self, val): + self.x, self.y = val + + + # TODO: How does this fit into the multi-error API? Still useful, but just reports first errs... how to get _all_ +- err pairs? + # LC: This is fine because preserntly only the highest dimension supports multi-errors + property xErrs: """The x errors""" - return util.read_error_pair(self.p2ptr().xErrs()) + def __get__(self): + return util.read_error_pair(self.p2ptr().xErrs()) + def __set__(self, val): + self.p2ptr().setXErrs(util.read_symmetric(val)) - def setXErrs(self, val): - """Set the x errors""" - self.p2ptr().setXErrs(util.read_symmetric(val)) + # TODO: How does this fit into the multi-error API? Still useful, but just reports first errs... how to get _all_ +- err pairs? + # LC: I think it's Ok to leave this like this, for most users the nominal is what they want anyway, + # and for those who want multi-errs, they can set using a method eg setErrs(dim,(ed,eu),source) and access using errs(dim,(ed,eu),source) + property yErrs: + """The y errors""" + def __get__(self): + return util.read_error_pair(self.p2ptr().yErrs()) + def __set__(self, val): + self.p2ptr().setYErrs(util.read_symmetric(val)) + + @property def xMin(self): """The minimum x position, i.e. lowest error""" return self.p2ptr().xMin() + @property def xMax(self): """The maximum x position, i.e. highest error""" return self.p2ptr().xMax() - def xErrAvg(self): - return self.p2ptr().xErrAvg() - - - def y(self): - """The y value""" - return self.p2ptr().y() - def setY(self, y): - """Set the y value""" - self.p2ptr().setY(y) - - def yErrs(self): - """The y errors""" - return util.read_error_pair(self.p2ptr().yErrs()) - - def yErrsFromSource(self, source): - """The y errors""" - if isinstance(source, str): - source = source.encode('utf-8') - return util.read_error_pair(self.p2ptr().yErrs(source)) - # def setYErrs(self, val): - # """Set the y errors""" - # self.p2ptr().setYErrs(util.read_symmetric(val)) - def setYErrs(self, *es): - """(int, float) -> None - (int, [float, float]) -> None - (int, float, float) -> None - Set asymmetric errors on axis i""" - source = None - es = list(es) - if type(es[-1]) is str: - source = es[-1] - es = es[:-1] - else: - pass - errs = es - if source is None: - source = "" - if len(errs) == 1: - if not hasattr(errs[0], "__iter__"): - self.setErr(2,errs[0], source) - return - errs = errs[0] - # assert len(errs) == 2: - if isinstance(source, str): - source = source.encode('utf-8') - self.pptr().setErrs(2, tuple(errs), source) - - def setYErrs(self, val, source): - if source is None: - source = "" - self.p2ptr().setYErrs(util.read_symmetric(val)) - + @property def yMin(self): """The minimum y position, i.e. lowest error""" return self.p2ptr().yMin() + @property def yMax(self): """The maximum y position, i.e. highest error""" return self.p2ptr().yMax() - def yErrAvg(self): - return self.p2ptr().yErrAvg() + property xErrAvg: + def __get__(self): + return self.p2ptr().xErrAvg() - - # property xy: - # """x and y coordinates as a tuple""" - # def __get__(self): - # return util.XY(self.x, self.y) - # def __set__(self, val): - # self.x, self.y = val - - - - # # TODO: How does this fit into the multi-error API? Still useful, but just reports first errs... how to get _all_ +- err pairs? - # # LC: I think it's Ok to leave this like this, for most users the nominal is what they want anyway, - # # and for those who want multi-errs, they can set using a method eg setErrs(dim,(ed,eu),source) and access using errs(dim,(ed,eu),source) - # property yErrs: - # """The y errors""" - # def __get__(self): - # return util.read_error_pair(self.p2ptr().yErrs()) - # def __set__(self, val): - # self.p2ptr().setYErrs(util.read_symmetric(val)) + property yErrAvg: + def __get__(self): + return self.p2ptr().yErrAvg() def scaleX(self, a): """(float) -> None Scale the x values and errors by factor a.""" self.p2ptr().scaleX(a) def scaleY(self, a): """(float) -> None Scale the y values and errors by factor a.""" self.p2ptr().scaleY(a) def scaleXY(self, x=1.0, y=1.0): """ (float=1, float=1) -> None Scale the point coordinates by the given factors. """ self.p2ptr().scaleXY(x, y) # TODO: remove def scale(self, x=1.0, y=1.0): """ (float=1, float=1) -> None DEPRECATED! Use scaleXY Scale the point coordinates by the given factors. """ self.p2ptr().scaleXY(x, y) def __repr__(self): - return '' % (self.x(), self.y()) + return '' % (self.x, self.y) def __richcmp__(Point2D self, Point2D other, int op): if op == 0: return deref(self.p2ptr()) < deref(other.p2ptr()) elif op == 1: return deref(self.p2ptr()) <= deref(other.p2ptr()) elif op == 2: return deref(self.p2ptr()) == deref(other.p2ptr()) elif op == 3: return deref(self.p2ptr()) != deref(other.p2ptr()) elif op == 4: return deref(self.p2ptr()) > deref(other.p2ptr()) elif op == 5: return deref(self.p2ptr()) >= deref(other.p2ptr()) diff --git a/pyext/yoda/include/Point3D.pyx b/pyext/yoda/include/Point3D.pyx --- a/pyext/yoda/include/Point3D.pyx +++ b/pyext/yoda/include/Point3D.pyx @@ -1,228 +1,178 @@ cimport util cdef class Point3D(Point): """ A 3D point with errors, used by the Scatter3D class. """ cdef c.Point3D* p3ptr(self) except NULL: return self.ptr() def __init__(self, x=0, y=0, z=0, xerrs=0, yerrs=0, zerrs=0, source=""): if source==None: source="" cutil.set_owned_ptr(self, new c.Point3D()) - # TODO: need shortcuts - self.setX(x) - self.setY(y) - self.setZ(z) - self.setXErrs(xerrs) - self.setYErrs(yerrs) - self.setZErrs(zerrs, source) + self.xyz = x, y, z + self.xErrs = xerrs + self.yErrs = yerrs + self.setZErrs(zerrs,source) def copy(self): return cutil.new_owned_cls(Point3D, new c.Point3D(deref(self.p3ptr()))) + + def setZErrs(self, val, source): + if source==None: source="" + self.p3ptr().setZErrs(util.read_symmetric(val)) + property x: + """x coordinate""" + def __get__(self): + return self.p3ptr().x() + def __set__(self, x): + self.p3ptr().setX(x) - def x(self): - """The x value""" - return self.p3ptr().x() - def setX(self, x): - """Set the x value""" - self.p3ptr().setX(x) + property y: + """y coordinate""" + def __get__(self): + return self.p3ptr().y() + def __set__(self, y): + self.p3ptr().setY(y) - def xErrs(self): - """The x errors""" - return util.read_error_pair(self.p3ptr().xErrs()) + property z: + """y coordinate""" + def __get__(self): + return self.p3ptr().z() + def __set__(self, z): + self.p3ptr().setZ(z) - def setXErrs(self, val): - """Set the x errors""" - self.p3ptr().setXErrs(util.read_symmetric(val)) + property xyz: + def __get__(self): + return util.XYZ(self.x, self.y, self.z) + def __set__(self, val): + self.x, self.y, self.z = val + + # TODO: How does this fit into the multi-error API? Still useful, but just reports first errs... how to get _all_ +- err pairs? + # LC: This is fine because preserntly only the highest dimension supports multi-errors + property xErrs: + def __get__(self): + return util.read_error_pair(self.p3ptr().xErrs()) + def __set__(self, val): + self.p3ptr().setXErrs(util.read_symmetric(val)) + + # TODO: How does this fit into the multi-error API? Still useful, but just reports first errs... how to get _all_ +- err pairs? + # LC: This is fine because preserntly only the highest dimension supports multi-errors + property yErrs: + def __get__(self): + return util.read_error_pair(self.p3ptr().yErrs()) + def __set__(self, val): + self.p3ptr().setYErrs(util.read_symmetric(val)) + + # TODO: How does this fit into the multi-error API? Still useful, but just reports first errs... how to get _all_ +- err pairs? + # LC: I think it's Ok to leave this like this, for most users the nominal is what they want anyway, + # and for those who want multi-errs, they can set using a method eg setErrs(dim,(ed,eu),source) and access using errs(dim,(ed,eu),source) + property zErrs: + def __get__(self): + return util.read_error_pair(self.p3ptr().zErrs()) + def __set__(self, val): + self.p3ptr().setZErrs(util.read_symmetric(val)) + + + @property def xMin(self): """The minimum x position, i.e. lowest error""" return self.p3ptr().xMin() + @property def xMax(self): """The maximum x position, i.e. highest error""" return self.p3ptr().xMax() - def xErrAvg(self): - return self.p3ptr().xErrAvg() - - - def y(self): - """The y value""" - return self.p3ptr().y() - def setY(self, y): - """Set the y value""" - self.p3ptr().setY(y) - - def yErrs(self): - """The y errors""" - return util.read_error_pair(self.p3ptr().yErrs()) - - def setYErrs(self, val): - """Set the y errors""" - self.p3ptr().setYErrs(util.read_symmetric(val)) - + @property def yMin(self): """The minimum y position, i.e. lowest error""" return self.p3ptr().yMin() + @property def yMax(self): """The maximum y position, i.e. highest error""" return self.p3ptr().yMax() - def yErrAvg(self): - return self.p3ptr().yErrAvg() - - - def z(self): - """The z value""" - return self.p3ptr().z() - def setZ(self, z): - """Set the z value""" - self.p3ptr().setZ(z) - - def zErrs(self): - """The z errors""" - return util.read_error_pair(self.p3ptr().zErrs()) - - def zErrsFromSource(self, source): - """The z errors""" - if isinstance(source, str): - source = source.encode('utf-8') - return util.read_error_pair(self.p3ptr().zErrs(source)) - # def setZErrs(self, val): - # """Set the z errors""" - # self.p3ptr().setZErrs(util.read_symmetric(val)) - def setZErrs(self, *es): - """(int, float) -> None - (int, [float, float]) -> None - (int, float, float) -> None - Set asymmetric errors on axis i""" - source = None - es = list(es) - if type(es[-1]) is str: - source = es[-1] - es = es[:-1] - else: - pass - errs = es - if source is None: - source = "" - if len(errs) == 1: - if not hasattr(errs[0], "__iter__"): - self.setErr(2,errs[0], source) - return - errs = errs[0] - # assert len(errs) == 2: - if isinstance(source, str): - source = source.encode('utf-8') - self.pptr().setErrs(2, tuple(errs), source) - def setZErrs(self, val, source): - if source is None: - source = "" - self.p3ptr().setZErrs(util.read_symmetric(val)) - + @property def zMin(self): """The minimum z position, i.e. lowest error""" return self.p3ptr().zMin() + @property def zMax(self): """The maximum z position, i.e. highest error""" return self.p3ptr().zMax() - def zErrAvg(self): - return self.p3ptr().zErrAvg() + property xErrAvg: + def __get__(self): + return self.p3ptr().xErrAvg() - # property xyz: - # def __get__(self): - # return util.XYZ(self.x, self.y, self.z) - # def __set__(self, val): - # self.x, self.y, self.z = val + property yErrAvg: + def __get__(self): + return self.p3ptr().yErrAvg() - - # # TODO: How does this fit into the multi-error API? Still useful, but just reports first errs... how to get _all_ +- err pairs? - # # LC: This is fine because preserntly only the highest dimension supports multi-errors - # property xErrs: - # def __get__(self): - # return util.read_error_pair(self.p3ptr().xErrs()) - # def __set__(self, val): - # self.p3ptr().setXErrs(util.read_symmetric(val)) - - # # TODO: How does this fit into the multi-error API? Still useful, but just reports first errs... how to get _all_ +- err pairs? - # # LC: This is fine because preserntly only the highest dimension supports multi-errors - # property yErrs: - # def __get__(self): - # return util.read_error_pair(self.p3ptr().yErrs()) - # def __set__(self, val): - # self.p3ptr().setYErrs(util.read_symmetric(val)) - - # # TODO: How does this fit into the multi-error API? Still useful, but just reports first errs... how to get _all_ +- err pairs? - # # LC: I think it's Ok to leave this like this, for most users the nominal is what they want anyway, - # # and for those who want multi-errs, they can set using a method eg setErrs(dim,(ed,eu),source) and access using errs(dim,(ed,eu),source) - # property zErrs: - # def __get__(self): - # return util.read_error_pair(self.p3ptr().zErrs()) - # def __set__(self, val): - # self.p3ptr().setZErrs(util.read_symmetric(val)) - + property zErrAvg: + def __get__(self): + return self.p3ptr().zErrAvg() def scaleX(self, ax): """ (float) -> None Scale the x point coordinates by the given factor. """ self.p3ptr().scaleX(ax) def scaleY(self, ay): """ (float) -> None Scale the y point coordinates by the given factor. """ self.p3ptr().scaleY(ay) def scaleZ(self, az): """ (float) -> None Scale the z point coordinates by the given factor. """ self.p3ptr().scaleZ(az) def scaleXYZ(self, ax=1.0, ay=1.0, az=1.0): """ (float=1.0, float=1.0, float=1.0) -> None Scale the point coordinates by the given factors. """ self.p3ptr().scaleXYZ(ax, ay, az) # TODO: remove def scaleXYZ(self, ax=1.0, ay=1.0, az=1.0): """ (double=1.0, double=1.0, double=1.0) -> None DEPRECATED: USE scaleXYZ Scale the point coordinates by the given factors. """ self.scaleXYZ(ax, ay, az) # TODO: transformX,Y,Z def __repr__(self): - return '' % (self.x(), self.y(), self.z()) + return '' % (self.x, self.y, self.z) def __richcmp__(Point3D self, Point3D other, int op): if op == 0: return deref(self.p3ptr()) < deref(other.p3ptr()) elif op == 1: return deref(self.p3ptr()) <= deref(other.p3ptr()) elif op == 2: return deref(self.p3ptr()) == deref(other.p3ptr()) elif op == 3: return deref(self.p3ptr()) != deref(other.p3ptr()) elif op == 4: return deref(self.p3ptr()) > deref(other.p3ptr()) elif op == 5: return deref(self.p3ptr()) >= deref(other.p3ptr()) diff --git a/pyext/yoda/include/Profile1D.pyx b/pyext/yoda/include/Profile1D.pyx --- a/pyext/yoda/include/Profile1D.pyx +++ b/pyext/yoda/include/Profile1D.pyx @@ -1,392 +1,392 @@ cimport util cdef class Profile1D(AnalysisObject): """ 1D profile histogram, used to measure mean values of a y variable, binned in x. Complete histogram binning is supported, including uniform/regular binning, variable-width binning, unbinned gaps in the covered range, and under/overflows. Rebinning by integer factors, or by explicit merging of contiguous bins is also supported. Rescaling of weights and/or the x axis is permitted in-place: the result is still a valid Histo1D. Binning-compatible 1D histograms may be divided, resulting in a Scatter2D since further fills would not be meaningful. Several sets of arguments are tried by the constructor in the following order. Profile1D(path="", title=""). Construct a histogram with optional path and title but no bins. Profile1D(nbins, low, high, path="", title="") Construct a histogram with optional path and title, and nbins bins uniformly distributed between low and high. Profile1D(B, path="", title=""). Construct a histogram with optional path and title, from an iterator of bins, B. """ cdef inline c.Profile1D* p1ptr(self) except NULL: return self.ptr() def __init__(self, *args, **kwargs): util.try_loop([self.__init2, self.__init3, self.__init5], *args, **kwargs) def __init2(self, path="", title=""): path = path.encode('utf-8') title = title.encode('utf-8') cutil.set_owned_ptr( self, new c.Profile1D(path, title)) # TODO: Is Cython clever enough that we can make 3a and 3b versions and let it do the type inference? def __init3(self, bins_or_edges, path="", title=""): # TODO: Do this type-checking better cdef vector[double] edges try: path = path.encode('utf-8') title = title.encode('utf-8') ## If float conversions work for all elements, it's a list of edges: edges = list(float(x) for x in bins_or_edges) cutil.set_owned_ptr(self, new c.Profile1D(edges, path, title)) except: ## Assume it's a list of HistoBin1D bins = bins_or_edges self.__init2(path, title) self.addBins(bins) def __init5(self, size_t nbins, double lower, double upper, path="", title=""): path = path.encode('utf-8') title = title.encode('utf-8') cutil.set_owned_ptr( self, new c.Profile1D(nbins, lower, upper, path, title)) def __len__(self): "Number of bins" return self.p1ptr().bins().size() def __getitem__(self, i): "Direct access to bins" cdef size_t ii = cutil.pythonic_index(i, self.p1ptr().bins().size()) return cutil.new_borrowed_cls(ProfileBin1D, & self.p1ptr().bin(ii), self) def __repr__(self): return "<%s '%s' %d bins, sumw=%0.2g>" % \ - (self.__class__.__name__, self.path(), - len(self.bins()), self.sumW()) + (self.__class__.__name__, self.path, + len(self.bins), self.sumW()) def reset(self): """None -> None. Reset the histogram but leave the bin structure.""" self.p1ptr().reset() def clone(self): """None -> Profile1D. Clone this Profile1D.""" return cutil.new_owned_cls(Profile1D, self.p1ptr().newclone()) def fill(self, x, y, weight=1.0, fraction=1.0): """(x,y,[w]) -> None. Fill with given x & y values and optional weight.""" self.p1ptr().fill(x, y, weight, fraction) def fillBin(self, size_t ix, double y, double weight=1.0, double fraction=1.0): """(ix,y,[w]) -> None. Fill bin ix with y value and optional weight.""" self.p1ptr().fillBin(ix, y, weight, fraction) - #@property + @property def totalDbn(self): """() -> Dbn2D The Dbn2D representing the total distribution.""" return cutil.new_borrowed_cls( Dbn2D, &self.p1ptr().totalDbn(), self) - #@property + @property def underflow(self): """() -> Dbn2D The Dbn2D representing the underflow distribution.""" return cutil.new_borrowed_cls( Dbn2D, &self.p1ptr().underflow(), self) - #@property + @property def overflow(self): """() -> Dbn2D The Dbn2D representing the overflow distribution.""" return cutil.new_borrowed_cls( Dbn2D, &self.p1ptr().overflow(), self) def numEntries(self, includeoverflows=True): """([bool]) -> float Number of times this histogram was filled, optionally excluding the overflows.""" return self.p1ptr().numEntries(includeoverflows) def effNumEntries(self, includeoverflows=True): """([bool]) -> float Effective number of times this histogram was filled, computed from weights and optionally excluding the overflows.""" return self.p1ptr().effNumEntries(includeoverflows) def sumW(self, includeoverflows=True): """([bool]) -> float Sum of weights filled into this histogram.""" return self.p1ptr().sumW(includeoverflows) def sumW2(self, includeoverflows=True): """([bool]) -> float Sum of weights filled into this histogram.""" return self.p1ptr().sumW2(includeoverflows) def xMean(self, includeoverflows=True): """([bool]) -> float Mean x of the histogram, optionally excluding the overflows.""" return self.p1ptr().xMean(includeoverflows) def xVariance(self, includeoverflows=True): """([bool]) -> float Variance in x of the histogram, optionally excluding the overflows.""" return self.p1ptr().xVariance(includeoverflows) def xStdDev(self, includeoverflows=True): """([bool]) -> float Standard deviation in x of the histogram, optionally excluding the overflows.""" return self.p1ptr().xStdDev(includeoverflows) def xStdErr(self, includeoverflows=True): """([bool]) -> float Standard error on the mean x of the histogram, optionally excluding the overflows.""" return self.p1ptr().xStdErr(includeoverflows) def xRMS(self, includeoverflows=True): """([bool]) -> float RMS in x of the histogram, optionally excluding the overflows.""" return self.p1ptr().xRMS(includeoverflows) def scaleW(self, double w): """(float) -> None. Rescale the weights in this histogram by the factor w.""" self.p1ptr().scaleW(w) def scaleY(self, double f): """(float) -> None. Scale the y-direction (profiled value) in this histogram by the factor f.""" self.p1ptr().scaleY(f) - #@property + @property def xMin(self): """Low x edge of the histo.""" return self.p1ptr().xMin() - #@property + @property def xMax(self): """High x edge of the histo.""" return self.p1ptr().xMax() - #@property + @property def numBins(self): """() -> int Number of bins (not including overflows).""" return self.p1ptr().numBins() - #@property + @property def bins(self): """Access the ordered bins list.""" return list(self) def bin(self, i): """Get the i'th bin""" - # cdef size_t ii = cutil.pythonic_index(i, self.numBins()) + # cdef size_t ii = cutil.pythonic_index(i, self.p2ptr().numBins()) return cutil.new_borrowed_cls(ProfileBin1D, & self.p1ptr().bin(i), self) def binIndexAt(self, x): """Get the bin index containing position x""" return self.p1ptr().binIndexAt(x) def binAt(self, x): """Get the bin containing position x""" # TODO: what's the problem with this direct mapping? Produces compile error re. no default constructor... # return cutil.new_borrowed_cls(ProfileBin1D, & self.p1ptr().binAt(x), self) # TODO: need out-of-range check to return None? return self.bin(self.binIndexAt(x)) def addBin(self, low, high): """Add a bin.""" self.p1ptr().addBin(low, high) return self def addBins(self, edges): """Add several bins.""" # TODO: simplify / make consistent cdef vector[double] cedges for i in edges: cedges.push_back(i) self.p1ptr().addBins(cedges) return self def mergeBins(self, a, b): """mergeBins(ia, ib) -> None. Merge bins from indices ia through ib.""" self.p1ptr().mergeBins(a, b) def rebinBy(self, n, begin=0, end=None): """(n) -> None. Merge every group of n bins together (between begin and end, if specified).""" if end is None: - end = self.numBins() + end = self.numBins self.p1ptr().rebinBy(int(n), begin, end) def rebinTo(self, edges): """([edges]) -> None. Merge bins to produce the given new edges... which must be a subset of the current ones.""" self.p1ptr().rebinTo(edges) def rebin(self, arg, **kwargs): """(n) -> None or ([edges]) -> None Merge bins, like rebinBy if an int argument is given; like rebinTo if an iterable is given.""" if hasattr(arg, "__iter__"): self.rebinTo(arg, **kwargs) else: self.rebinBy(arg, **kwargs) def mkScatter(self, usefocus=False, usestddev=False): """None -> Scatter2D. Convert this Profile1D to a Scatter2D, with y representing mean bin y values and their standard errors.""" cdef c.Scatter2D s2 = c.mkScatter_Profile1D(deref(self.p1ptr()), usefocus, usestddev) return cutil.new_owned_cls(Scatter2D, s2.newclone()) def divideBy(self, Profile1D h): cdef c.Scatter2D s = c.Profile1D_div_Profile1D(deref(self.p1ptr()), deref(h.p1ptr())) return cutil.new_owned_cls(Scatter2D, s.newclone()) def __iadd__(Profile1D self, Profile1D other): c.Profile1D_iadd_Profile1D(self.p1ptr(), other.p1ptr()) return self def __isub__(Profile1D self, Profile1D other): c.Profile1D_isub_Profile1D(self.p1ptr(), other.p1ptr()) return self def __add__(Profile1D self, Profile1D other): h = Profile1D() cutil.set_owned_ptr(h, c.Profile1D_add_Profile1D(self.p1ptr(), other.p1ptr())) return h def __sub__(Profile1D self, Profile1D other): h = Profile1D() cutil.set_owned_ptr(h, c.Profile1D_sub_Profile1D(self.p1ptr(), other.p1ptr())) return h def __div__(Profile1D self, Profile1D other): return self.divideBy(other) def __truediv__(Profile1D self, Profile1D other): return self.divideBy(other) ## Functions for array-based plotting, chi2 calculations, etc. # def sumWs(self): # """All sumWs of the histo.""" - # return [b.sumW() for b in self.bins()] + # return [b.sumW for b in self.bins] # TODO: xyVals,Errs properties should be in a common Drawable2D (?) type (hmm, need a consistent nD convention...) # TODO: x bin properties should be in a common Binned1D type def _mknp(self, xs): try: import numpy return numpy.array(xs) except ImportError: return xs #@property def xEdges(self): """All x edges of the histo.""" return self._mknp(self.p1ptr().xEdges()) def xMins(self): """All x low edges of the histo.""" - return self._mknp([b.xMin() for b in self.bins()]) + return self._mknp([b.xMin for b in self.bins]) def xMaxs(self): """All x high edges of the histo.""" - return self._mknp([b.xMax() for b in self.bins()]) + return self._mknp([b.xMax for b in self.bins]) def xMids(self): """All x bin midpoints of the histo.""" - return self._mknp([b.xMid() for b in self.bins()]) + return self._mknp([b.xMid for b in self.bins]) def xFoci(self): """All x bin foci of the histo.""" - return self._mknp([b.xFocus() for b in self.bins()]) + return self._mknp([b.xFocus for b in self.bins]) def xVals(self, foci=False): return self.xFoci() if foci else self.xMids() def xErrs(self, foci=False): if foci: - return [(b.xFocus()-b.xMin(), b.xMax()-b.xFocus()) for b in self.bins()] + return [(b.xFocus-b.xMin, b.xMax-b.xFocus) for b in self.bins] else: - return [(b.xMid()-b.xMin(), b.xMax()-b.xMid()) for b in self.bins()] + return [(b.xMid-b.xMin, b.xMax-b.xMid) for b in self.bins] def xMin(self): """Lowest x value.""" return min(self.xMins()) def xMax(self): """Highest x value.""" return max(self.xMaxs()) def yMeans(self): """All y heights y means.""" - return self._mknp([b.yMean() for b in self.bins()]) + return self._mknp([b.yMean for b in self.bins]) def yVals(self): return self.yMeans() def yStdErrs(self): """All standard errors on the y means.""" - return self._mknp([b.yStdErr() for b in self.bins()]) + return self._mknp([b.yStdErr for b in self.bins]) def yStdDevs(self): """All standard deviations of the y distributions.""" - return self._mknp([b.yStdDev() for b in self.bins()]) + return self._mknp([b.yStdDev for b in self.bins]) def yErrs(self, sd=False): return self.yStdDevs() if sd else self.yStdErrs() def yMins(self, sd=False): ys = self.yVals() es = self.yErrs(sd) return self._mknp([y-e for (y,e) in zip(ys,es)]) def yMaxs(self, sd=False): ys = self.yVals() es = self.yErrs(sd) return self._mknp([y+e for (y,e) in zip(ys,es)]) def yMin(self, sd=False): """Lowest y value.""" return min(self.yMins(sd)) def yMax(self, sd=False): """Highest y value.""" return max(self.yMaxs(sd)) ## Convenience alias P1D = Profile1D diff --git a/pyext/yoda/include/Profile2D.pyx b/pyext/yoda/include/Profile2D.pyx --- a/pyext/yoda/include/Profile2D.pyx +++ b/pyext/yoda/include/Profile2D.pyx @@ -1,475 +1,475 @@ cimport util cdef class Profile2D(AnalysisObject): """ 2D profile histogram, used to measure mean values of a z variable, binned in x and y. Complete histogramming is supported, including uniform/regular binning, variable-width bininng, unbinned gaps in the covered range, and outflows (under/overflows around all edges and corners). Rebinning by integer factors, or by explicit merging of contiguous bins is also supported, but in development. Rescaling of weights and/or the x axis is permitted in-place: the result is still a valid Histo2D. Binning-compatible 1D histograms may be divided, resulting in a Scatter3D since further fills would not be meaningful. Several sets of arguments are tried by the constructor in the following order. Histo2D(path="", title=""). Construct a histogram with optional path and title but no bins. Histo2D(nxbins, xlow, xhigh, nybins, ylow, yhigh, path="", title=""). Construct a histogram with nxbins on the x axis and nybins on the y axis, distributed linearly between the respective low--high limits. NOT YET FINISHED: please contact the YODA authors if you require extra functionality. """ cdef inline c.Profile2D* p2ptr(self) except NULL: return self.ptr() def __init__(self, *args, **kwargs): util.try_loop([self.__init2, self.__init4, self.__init8], *args, **kwargs) def __init2(Profile2D self, path="", title=""): path = path.encode('utf-8') title = title.encode('utf-8') cutil.set_owned_ptr(self, new c.Profile2D(path, title)) def __init4(Profile2D self, xedges, yedges, path="", title=""): path = path.encode('utf-8') title = title.encode('utf-8') # TODO: Do some type-checking and allow iterables of ProfileBin2D as well? cutil.set_owned_ptr(self, new c.Profile2D(xedges, yedges, path, title)) def __init8(Profile2D self, nxbins, xlow, xhigh, nybins, ylow, yhigh, path="", title=""): path = path.encode('utf-8') title = title.encode('utf-8') cutil.set_owned_ptr(self, new c.Profile2D(nxbins, xlow, xhigh, nybins, ylow, yhigh, path, title)) def __len__(self): return self.p2ptr().numBins() def __getitem__(self, py_ix): "Direct access to bins" - cdef size_t i = cutil.pythonic_index(py_ix, self.numBins()) + cdef size_t i = cutil.pythonic_index(py_ix, self.p2ptr().numBins()) return cutil.new_borrowed_cls(ProfileBin2D, & self.p2ptr().bins().at(i), self) def __repr__(self): - return "<%s '%s' %d bins, sumw=%0.2g>" % (self.__class__.__name__, self.path(), len(self.bins()), self.sumW()) + return "<%s '%s' %d bins, sumw=%0.2g>" % (self.__class__.__name__, self.path, len(self.bins), self.sumW()) def reset(self): """None -> None. Reset the histogram but leave the bin structure.""" self.p2ptr().reset() def clone(self): """None -> Profile2D. Clone this Profile2D.""" return cutil.new_owned_cls(Profile2D, self.p2ptr().newclone()) def fill(self, double x, double y, double z, double weight=1.0, double fraction=1.0): """(x,y,z,[w]) -> None. Fill with given x,y & z values and optional weight and fill fraction.""" self.p2ptr().fill(x, y, z, weight, fraction) def fillBin(self, size_t i, double z, double weight=1.0, double fraction=1.0): """(i,z,[w]) -> None. Fill bin i with value z and optional weight and fill fraction.""" self.p2ptr().fillBin(i, z, weight, fraction) - #@property + @property def totalDbn(self): """() -> Dbn3D The Dbn3D representing the total distribution.""" return cutil.new_borrowed_cls( Dbn3D, new c.Dbn3D(self.p2ptr().totalDbn()), self) # TODO: reinstate # def outflow(self, ix, iy): # """(ix,iy) -> Dbn3D # The Dbn3D representing the ix,iy outflow distribution.""" # return cutil.new_borrowed_cls( # Dbn3D, new c.Dbn3D(self.p2ptr().outflow(ix, iy)), self) def numEntries(self, includeoverflows=True): """([bool]) -> float Number of times this histogram was filled, optionally excluding the overflows.""" return self.p2ptr().numEntries(includeoverflows) def effNumEntries(self, includeoverflows=True): """([bool]) -> float Effective number of times this histogram was filled, computed from weights and optionally excluding the overflows.""" return self.p2ptr().effNumEntries(includeoverflows) def sumW(self, includeoverflows=True): """([bool]) -> float Sum of weights filled into this histogram.""" return self.p2ptr().sumW(includeoverflows) def sumW2(self, includeoverflows=True): """([bool]) -> float Sum of squared weights filled into this histogram.""" return self.p2ptr().sumW2(includeoverflows) def xMean(self, includeoverflows=True): """([bool]) -> float Mean x of the histogram, optionally excluding the overflows.""" return self.p2ptr().xMean(includeoverflows) def yMean(self, includeoverflows=True): """([bool]) -> float Mean y of the histogram, optionally excluding the overflows.""" return self.p2ptr().yMean(includeoverflows) def xyMean(self, includeoverflows=True): """([bool]) -> (float,float) Mean (x,y) of the histogram, optionally excluding the overflows.""" return util.XY(self.xMean(includeoverflows), self.yMean(includeoverflows)) def xVariance(self, includeoverflows=True): """([bool]) -> float Variance in x of the histogram, optionally excluding the overflows.""" return self.p2ptr().xVariance(includeoverflows) def yVariance(self, includeoverflows=True): """([bool]) -> float Variance in y of the histogram, optionally excluding the overflows.""" return self.p2ptr().yVariance(includeoverflows) def xyVariance(self, includeoverflows=True): """([bool]) -> (float,float) Variances in (x,y) of the histogram, optionally excluding the overflows.""" return util.XY(self.xVariance(includeoverflows), self.yVariance(includeoverflows)) def xStdDev(self, includeoverflows=True): """([bool]) -> float Standard deviation in x of the histogram, optionally excluding the overflows.""" return self.p2ptr().xStdDev(includeoverflows) def yStdDev(self, includeoverflows=True): """([bool]) -> float Standard deviation in y of the histogram, optionally excluding the overflows.""" return self.p2ptr().yStdDev(includeoverflows) def xyStdDev(self, includeoverflows=True): """([bool]) -> (float,float) Standard deviations in (x,y) of the histogram, optionally excluding the overflows.""" return util.XY(self.xStdDev(includeoverflows), self.yStdDev(includeoverflows)) def xStdErr(self, includeoverflows=True): """([bool]) -> float Standard error on the mean x of the histogram, optionally excluding the overflows.""" return self.p2ptr().xStdErr(includeoverflows) def yStdErr(self, includeoverflows=True): """([bool]) -> float Standard error on the mean y of the histogram, optionally excluding the overflows.""" return self.p2ptr().yStdErr(includeoverflows) def xyStdErr(self, includeoverflows=True): """([bool]) -> (float,float) Standard errors on the mean (x,y) of the histogram, optionally excluding the overflows.""" return util.XY(self.xStdErr(includeoverflows), self.yStdErr(includeoverflows)) def xRMS(self, includeoverflows=True): """([bool]) -> float RMS in x of the histogram, optionally excluding the overflows.""" return self.p2ptr().xRMS(includeoverflows) def yRMS(self, includeoverflows=True): """([bool]) -> float RMS in y of the histogram, optionally excluding the overflows.""" return self.p2ptr().yRMS(includeoverflows) def xyRMS(self, includeoverflows=True): """([bool]) -> (float,float) RMS in (x,y) of the histogram, optionally excluding the overflows.""" return util.XY(self.xRMS(includeoverflows), self.yRMS(includeoverflows)) def scaleW(self, w): """(float) -> None. Rescale the weights in this histogram by the factor w.""" self.p2ptr().scaleW(w) def scaleZ(self, double f): """(float) -> None. Scale the z-direction (profiled value) in this histogram by the factor f.""" self.p1ptr().scaleZ(f) - #@property + @property def xMin(self): """Low x edge of the histo.""" return self.p2ptr().xMin() - #@property + @property def xMax(self): """High x edge of the histo.""" return self.p2ptr().xMax() - #@property + @property def yMin(self): """Low y edge of the histo.""" return self.p2ptr().yMin() - #@property + @property def yMax(self): """High y edge of the histo.""" return self.p2ptr().yMax() - #@property + @property def numBins(self): """() -> int Number of bins (not including overflows).""" return self.p2ptr().numBins() - #@property + @property def numBinsX(self): """() -> int Number of bins (edges) along the x axis.""" return self.p2ptr().numBinsX() - #@property + @property def numBinsY(self): """() -> int Number of bins (edges) along the y axis.""" return self.p2ptr().numBinsY() - #@property + @property def bins(self): """Access the ordered bins list.""" return list(self) def bin(self, i): """Get the i'th bin""" # cdef size_t ii = cutil.pythonic_index(i, self.p2ptr().numBins()) return cutil.new_borrowed_cls(ProfileBin2D, & self.p2ptr().bin(i), self) # TODO: it's more intuitive to have an index for each axis # def bin(self, i, j): # """Get the (i,j)'th bin""" # # cdef size_t ii = cutil.pythonic_index(i, self.p2ptr().numBins()) # # cdef size_t jj = cutil.pythonic_index(j, self.p2ptr().numBins()) # return cutil.new_borrowed_cls(ProfileBin2D, & self.p2ptr().bin(i,j), self) def binIndexAt(self, x, y): """Get the bin index pair containing position (x,y)""" return self.p2ptr().binIndexAt(x, y) def binAt(self, x, y): """Get the bin containing position (x,y)""" # TODO: what's the problem with this direct mapping? Produces compile error re. no default constructor... #return cutil.new_borrowed_cls(ProfileBin2D, & self.p2ptr().binAt(x,y), self) # TODO: need out-of-range check to return None? return self.bin(self.binIndexAt(x,y)) def addBin(self, double xlow, double xhigh, double ylow, double yhigh): """Add a bin.""" self.p2ptr().addBin(pair[double, double](xlow, xhigh), pair[double, double](ylow, yhigh)) return self def addBins(self, xcuts, ycuts): """Add several bins.""" # TODO: simplify / make consistent cdef vector[double] _xcuts cdef vector[double] _ycuts for x in xcuts: _xcuts.push_back(x) for y in ycuts: _ycuts.push_back(y) self.p2ptr().addBins(_xcuts, _ycuts) return self # def mergeBins(self, size_t a, size_t b): # self.p2ptr().mergeBins(a, b) # def rebin(self, int n): # self.p2ptr().rebin(n) def mkScatter(self, usefocus=False, usestddev=False): """None -> Scatter3D. Convert this Profile2D to a Scatter3D, with z representing mean bin y values and their standard errors.""" cdef c.Scatter3D s3 = c.mkScatter_Profile2D(deref(self.p2ptr()), usefocus, usestddev) return cutil.new_owned_cls(Scatter3D, s3.newclone()) def divideBy(self, Profile2D h): cdef c.Scatter3D s = c.Profile2D_div_Profile2D(deref(self.p2ptr()), deref(h.p2ptr())) return cutil.new_owned_cls(Scatter3D, s.newclone()) def __iadd__(Profile2D self, Profile2D other): c.Profile2D_iadd_Profile2D(self.p2ptr(), other.p2ptr()) return self def __isub__(Profile2D self, Profile2D other): c.Profile2D_isub_Profile2D(self.p2ptr(), other.p2ptr()) return self def __add__(Profile2D self, Profile2D other): h = Profile2D() cutil.set_owned_ptr(h, c.Profile2D_add_Profile2D(self.p2ptr(), other.p2ptr())) return h def __sub__(Profile2D self, Profile2D other): h = Profile2D() cutil.set_owned_ptr(h, c.Profile2D_sub_Profile2D(self.p2ptr(), other.p2ptr())) return h def __div__(Profile2D self, Profile2D other): return self.divideBy(other) def __truediv__(Profile2D self, Profile2D other): return self.divideBy(other) # def sumWs(self): # """All sumWs of the histo.""" - # return [b.sumW() for b in self.bins()] + # return [b.sumW for b in self.bins] def _mknp(self, xs): try: import numpy return numpy.array(xs) except ImportError: return xs def xEdges(self): """All x edges of the histo.""" return self._mknp(self.p2ptr().xEdges()) def xMins(self): """All x low edges of the histo.""" - return self._mknp([b.xMin() for b in self.bins()]) + return self._mknp([b.xMin for b in self.bins]) def xMaxs(self): """All x high edges of the histo.""" - return self._mknp([b.xMax() for b in self.bins()]) + return self._mknp([b.xMax for b in self.bins]) def xMids(self): """All x bin midpoints of the histo.""" - return self._mknp([b.xMid() for b in self.bins()]) + return self._mknp([b.xMid for b in self.bins]) def xFoci(self): """All x bin foci of the histo.""" - return self._mknp([b.xFocus() for b in self.bins()]) + return self._mknp([b.xFocus for b in self.bins]) def xVals(self, foci=False): return self.xFoci() if foci else self.xMids() def xErrs(self, foci=False): if foci: - return [(b.xFocus()-b.xMin(), b.xMax()-b.xFocus()) for b in self.bins()] + return [(b.xFocus-b.xMin, b.xMax-b.xFocus) for b in self.bins] else: - return [(b.xMid()-b.xMin(), b.xMax()-b.xMid()) for b in self.bins()] + return [(b.xMid-b.xMin, b.xMax-b.xMid) for b in self.bins] # def xMin(self): # """Lowest x value.""" # return min(self.xMins()) # def xMax(self): # """Highest x value.""" # return max(self.xMaxs()) def yEdges(self): """All y edges of the histo.""" return self._mknp(self.p2ptr().yEdges()) def yMins(self): """All y low edges of the histo.""" - return self._mknp([b.yMin() for b in self.bins()]) + return self._mknp([b.yMin for b in self.bins]) def yMaxs(self): """All y high edges of the histo.""" - return self._mknp([b.yMax() for b in self.bins()]) + return self._mknp([b.yMax for b in self.bins]) def yMids(self): """All y bin midpoints of the histo.""" - return self._mknp([b.yMid() for b in self.bins()]) + return self._mknp([b.yMid for b in self.bins]) def yFoci(self): """All y bin foci of the histo.""" - return self._mknp([b.yFocus() for b in self.bins()]) + return self._mknp([b.yFocus for b in self.bins]) def yVals(self, foci=False): return self.yFoci() if foci else self.yMids() def yErrs(self, foci=False): if foci: - return [(b.yFocus()-b.yMin(), b.yMax()-b.yFocus()) for b in self.bins()] + return [(b.yFocus-b.yMin, b.yMax-b.yFocus) for b in self.bins] else: - return [(b.yMid()-b.yMin(), b.yMax()-b.yMid()) for b in self.bins()] + return [(b.yMid-b.yMin, b.yMax-b.yMid) for b in self.bins] # def yMin(self): # """Lowest y value.""" # return min(self.yMins()) # def yMax(self): # """Highest y value.""" # return max(self.yMaxs()) def zMeans(self): """All y heights of the histo.""" - return self._mknp([b.height() for b in self.bins()]) + return self._mknp([b.height for b in self.bins]) def zVals(self): return self.zMeans() def zStdErrs(self): """All standard errors on the z means.""" - return self._mknp([b.zStdErr() for b in self.bins()]) + return self._mknp([b.zStdErr for b in self.bins]) def zStdDevs(self): """All standard deviations on the z means.""" - return self._mknp([b.zStdDev() for b in self.bins()]) + return self._mknp([b.zStdDev for b in self.bins]) def zErrs(self, sd=False): return self.zStdDevs() if sd else self.zStdErrs() def zMins(self, sd=False): zs = self.zVals() es = self.zErrs(sd) return self._mknp([z-e for (z,e) in zip(zs,es)]) def zMaxs(self, sd=False): zs = self.zVals() es = self.zErrs(sd) return self._mknp([z+e for (z,e) in zip(zs,es)]) def zMin(self, sd=False): """Lowest z value.""" return min(self.zMins(sd)) def zMax(self, sd=False): """Highest z value.""" return max(self.zMaxs(sd)) ## Convenience alias P2D = Profile2D diff --git a/pyext/yoda/include/ProfileBin1D.pyx b/pyext/yoda/include/ProfileBin1D.pyx --- a/pyext/yoda/include/ProfileBin1D.pyx +++ b/pyext/yoda/include/ProfileBin1D.pyx @@ -1,94 +1,94 @@ cdef class ProfileBin1D(Bin1D_Dbn2D): """ A 1D profile bin, as stored inside Profile1D. Only one constructor: * ProfileBin1D(xlow, xhigh) """ cdef inline c.ProfileBin1D* pb1ptr(self) except NULL: return self.ptr() # TODO: remove cdef inline c.ProfileBin1D* _ProfileBin1D(self) except NULL: return self.ptr() def __init__(self, double a, double b): cutil.set_owned_ptr(self, new c.ProfileBin1D(a, b)) # def fill(self, x, y, weight=1.0, fraction=1.0): # """ # (x, y, weight=1.0) -> None. Fill this bin with given values and weight. # """ # self.pb1ptr().fill(x, y, weight, fraction) # def fillBin(self, y, weight=1.0, fraction=1.0): # """ # (y, weight=1.0) -> None. Fill this bin with given y-value and weight. # """ # self.pb1ptr().fillBin(y, weight, fraction) # def scaleY(self, ay): # """ # float -> None # Scale y values by ay. # """ # self.pb1ptr().scaleY(ay) - #@property + @property def mean(self): """The mean of the y-values that have filled the bin.""" return self.pb1ptr().mean() - #@property + @property def variance(self): """The variance of the y-values that have filled the bin.""" return self.pb1ptr().variance() - #@property + @property def stdDev(self): """The standard deviation of the y-values that have filled the bin.""" return self.pb1ptr().stdDev() - - #@property + + @property def stdErr(self): """The standard error of the y-values that have filled the bin.""" return self.pb1ptr().stdErr() - - #@property + + @property def rms(self): """The RMS of the y-values that have filled the bin.""" return self.pb1ptr().rms() - #@property + @property def sumWY(self): """sum(weights * ys)""" return self.pb1ptr().sumWY() - #@property + @property def sumWY2(self): """sum(weights * ys * ys)""" return self.pb1ptr().sumWY2() def __iadd__(ProfileBin1D self, ProfileBin1D other): c.ProfileBin1D_iadd_ProfileBin1D(self.pb1ptr(), other.pb1ptr()) return self def __isub__(ProfileBin1D self, ProfileBin1D other): c.ProfileBin1D_isub_ProfileBin1D(self.pb1ptr(), other.pb1ptr()) return self def __add__(ProfileBin1D a, ProfileBin1D b): return cutil.new_owned_cls(ProfileBin1D, new c.ProfileBin1D(deref(a.pb1ptr()) + deref(b.pb1ptr()))) def __sub__(ProfileBin1D a, ProfileBin1D b): return cutil.new_owned_cls(ProfileBin1D, new c.ProfileBin1D(deref(a.pb1ptr()) - deref(b.pb1ptr()))) diff --git a/pyext/yoda/include/ProfileBin2D.pyx b/pyext/yoda/include/ProfileBin2D.pyx --- a/pyext/yoda/include/ProfileBin2D.pyx +++ b/pyext/yoda/include/ProfileBin2D.pyx @@ -1,54 +1,54 @@ #TODO improve this once we have a working Profile2D cdef class ProfileBin2D(Bin2D_Dbn3D): cdef inline c.ProfileBin2D* pb2ptr(self) except NULL: return self.ptr() # TODO: remove cdef inline c.ProfileBin2D* _ProfileBin2D(self) except NULL: return self.ptr() def __init__(self, xlow, xhigh, ylow, yhigh): cutil.set_owned_ptr(self, new c.ProfileBin2D(xlow, xhigh, ylow, yhigh)) # def fill(self, x, y, z, weight=1.0, fraction=1.0): # self.pb2ptr().fill(x, y, z, weight, fraction) # def fill_bin(self, z, weight=1.0, fraction=1.0): # self.pb2ptr().fillBin(z, weight, fraction) - #@property + @property def mean(self): return self.pb2ptr().mean() - #@property + @property def stdDev(self): return self.pb2ptr().stdDev() - - #@property + + @property def variance(self): return self.pb2ptr().variance() - #@property + @property def stdErr(self): return self.pb2ptr().stdErr() - - #@property + + @property def rms(self): return self.pb2ptr().rms() - #@property + @property def sumWZ(self): return self.pb2ptr().sumWZ() - #@property + @property def sumWZ2(self): return self.pb2ptr().sumWZ2() def __add__(ProfileBin2D a, ProfileBin2D b): return cutil.new_owned_cls(ProfileBin2D, new c.ProfileBin2D(deref(a.pb2ptr()) + deref(b.pb2ptr()))) def __sub__(ProfileBin2D a, ProfileBin2D b): return cutil.new_owned_cls(ProfileBin2D, new c.ProfileBin2D(deref(a.pb2ptr()) - deref(b.pb2ptr()))) def __repr__(self): return 'ProfileBin2D(%g, %g, %g, %g)' % (self.edges.x + self.edges.y) diff --git a/pyext/yoda/include/Scatter1D.pyx b/pyext/yoda/include/Scatter1D.pyx --- a/pyext/yoda/include/Scatter1D.pyx +++ b/pyext/yoda/include/Scatter1D.pyx @@ -1,176 +1,176 @@ cimport util cdef class Scatter1D(AnalysisObject): """ 1D scatter plot, i.e. a collection of Point1D objects with positions and errors. Constructor calling idioms: Scatter1D(path="", title="") Create a new empty scatter, with optional path and title. Scatter1D(points, path="", title=""): Create a new empty scatter from an iterable of points, with optional path and title. TODO: more documentation! """ cdef inline c.Scatter1D* s1ptr(self) except NULL: return self.ptr() def __init__(self, *args, **kwargs): util.try_loop([self.__init_2, self.__init_3], *args, **kwargs) def __init_2(self, path="", title=""): path = path.encode('utf-8') title = title.encode('utf-8') cutil.set_owned_ptr(self, new c.Scatter1D(path, title)) def __init_3(self, points, path="", title=""): self.__init_2(path, title) self.addPoints(points) def clone(self): """() -> Scatter1D. Clone this Scatter1D.""" return cutil.new_owned_cls(Scatter1D, self.s1ptr().newclone()) def __repr__(self): - return "<%s '%s' %d points>" % (self.__class__.__name__, self.path(), len(self.points())) + return "<%s '%s' %d points>" % (self.__class__.__name__, self.path, len(self.points)) - #@property + @property def numPoints(self): """() -> int Number of points in this scatter.""" return self.s1ptr().numPoints() def __len__(self): - return self.numPoints() + return self.numPoints - #@property + @property def points(self): """Access the ordered list of points.""" - return [self.point(i) for i in xrange(self.numPoints())] + return [self.point(i) for i in xrange(self.numPoints)] def point(self, size_t i): """Access the i'th point.""" return cutil.new_borrowed_cls(Point1D, &self.s1ptr().point(i), self) def __getitem__(self, py_ix): - cdef size_t i = cutil.pythonic_index(py_ix, self.numPoints()) + cdef size_t i = cutil.pythonic_index(py_ix, self.s1ptr().numPoints()) return cutil.new_borrowed_cls(Point1D, &self.s1ptr().point(i), self) def addPoint(self, *args, **kwargs): """Add a new point. Provide either a single yoda.Point1D object, or the two args: x, xerrs=0. """ try: self.__addPoint_point(*args, **kwargs) except TypeError: self.__addPoint_explicit(*args, **kwargs) def __addPoint_explicit(self, x, xerrs=0): self.__addPoint_point(Point1D(x, xerrs)) def __addPoint_point(self, Point1D p): self.s1ptr().addPoint(p.p1ptr()[0]) def addPoints(self, iterable): """Add several new points.""" for row in iterable: try: self.addPoint(*row) except TypeError: self.addPoint(row) def combineWith(self, others): """Try to add points from other Scatter1Ds into this one.""" cdef Scatter1D other try: # Can we type it as a Scatter1D? other = others except TypeError: # Could be an iterable... for other in others: self.s1ptr().combineWith(deref(other.s1ptr())) else: self.s1ptr().combineWith(deref(other.s1ptr())) def mkScatter(self): """None -> Scatter1D. Make a new Scatter1D. Exists to allow mkScatter calls on any AnalysisObject, even if it already is a scatter.""" cdef c.Scatter1D s2 = c.mkScatter_Scatter1D(deref(self.s1ptr())) return cutil.new_owned_cls(Scatter1D, s2.newclone()) def scaleX(self, a): """(float) -> None Scale the x values and errors of the points in this scatter by factor a.""" self.s1ptr().scaleX(a) def transformX(self, f): """(fn) -> None Transform the x values and errors of the points in this scatter by function f.""" import ctypes try: callback = ctypes.CFUNCTYPE(ctypes.c_double, ctypes.c_double)(f) except: raise RuntimeError("Callback is not of type (double) -> double") fptr = (ctypes.addressof(callback))[0] c.Scatter1D_transformX(deref(self.s1ptr()), fptr) def variations(self): """None -> vector[string] Get the list of variations stored in the poins of the Scatter""" return self.s1ptr().variations() # # TODO: remove? # def __add__(Scatter1D self, Scatter1D other): # return cutil.new_owned_cls(Scatter1D, c.Scatter1D_add_Scatter1D(self.s1ptr(), other.s1ptr())) # # TODO: remove? # def __sub__(Scatter1D self, Scatter1D other): # return cutil.new_owned_cls(Scatter1D, c.Scatter1D_sub_Scatter1D(self.s1ptr(), other.s1ptr())) def _mknp(self, xs): try: import numpy return numpy.array(xs) except ImportError: return xs def xVals(self): - return self._mknp([p.x() for p in self.points()]) + return self._mknp([p.x for p in self.points]) def xMins(self): """All x low values.""" - return self._mknp([p.xMin() for p in self.points()]) + return self._mknp([p.xMin for p in self.points]) def xMaxs(self): """All x high values.""" - return self._mknp([p.xMax() for p in self.points()]) + return self._mknp([p.xMax for p in self.points]) # TODO: xErrs def xMin(self): """Lowest x value.""" return min(self.xMins()) def xMax(self): """Highest x value.""" return max(self.xMaxs()) ## Convenience alias S1D = Scatter1D diff --git a/pyext/yoda/include/Scatter2D.pyx b/pyext/yoda/include/Scatter2D.pyx --- a/pyext/yoda/include/Scatter2D.pyx +++ b/pyext/yoda/include/Scatter2D.pyx @@ -1,294 +1,224 @@ cimport util cdef class Scatter2D(AnalysisObject): """ 2D scatter plot, i.e. a collection of Point2D objects with positions and errors. Constructor calling idioms: Scatter2D(path="", title="") Create a new empty scatter, with optional path and title. Scatter2D(points, path="", title=""): Create a new empty scatter from an iterable of points, with optional path and title. TODO: more documentation! """ cdef inline c.Scatter2D* s2ptr(self) except NULL: return self.ptr() def __init__(self, *args, **kwargs): util.try_loop([self.__init_2, self.__init_3], *args, **kwargs) def __init_2(self, path="", title=""): path = path.encode('utf-8') title = title.encode('utf-8') cutil.set_owned_ptr(self, new c.Scatter2D(path, title)) def __init_3(self, points, path="", title=""): self.__init_2(path, title) self.addPoints(points) def clone(self): """() -> Scatter2D. Clone this Scatter2D.""" return cutil.new_owned_cls(Scatter2D, self.s2ptr().newclone()) def __repr__(self): - return "<%s '%s' %d points>" % (self.__class__.__name__, self.path(), len(self.points())) + return "<%s '%s' %d points>" % (self.__class__.__name__, self.path, len(self.points)) - #@property + @property def numPoints(self): """() -> int Number of points in this scatter.""" return self.s2ptr().numPoints() def __len__(self): - return self.numPoints() + return self.numPoints - #@property + @property def points(self): """Access the ordered list of points.""" - return [self.point(i) for i in xrange(self.numPoints())] + return [self.point(i) for i in xrange(self.numPoints)] def point(self, size_t i): """Access the i'th point.""" return cutil.new_borrowed_cls(Point2D, &self.s2ptr().point(i), self) def __getitem__(self, py_ix): - cdef size_t i = cutil.pythonic_index(py_ix, self.numPoints()) + cdef size_t i = cutil.pythonic_index(py_ix, self.s2ptr().numPoints()) return cutil.new_borrowed_cls(Point2D, &self.s2ptr().point(i), self) def addPoint(self, *args, **kwargs): """Add a new point. Provide either a single yoda.Point2D object, or the four args: x, y, xerrs=0, yerrs=0. """ try: self.__addPoint_point(*args, **kwargs) except TypeError: self.__addPoint_explicit(*args, **kwargs) def __addPoint_explicit(self, x, y, xerrs=0, yerrs=0): self.__addPoint_point(Point2D(x, y, xerrs, yerrs)) def __addPoint_point(self, Point2D p): self.s2ptr().addPoint(p.p2ptr()[0]) def addPoints(self, iterable): """Add several new points.""" for row in iterable: try: self.addPoint(*row) except TypeError: self.addPoint(row) def combineWith(self, others): """Try to add points from other Scatter2Ds into this one.""" cdef Scatter2D other try: # Can we type it as a Scatter2D? other = others except TypeError: # Could be an iterable... for other in others: self.s2ptr().combineWith(deref(other.s2ptr())) else: self.s2ptr().combineWith(deref(other.s2ptr())) def mkScatter(self): """None -> Scatter2D. Make a new Scatter2D. Exists to allow mkScatter calls on any AnalysisObject, even if it already is a scatter.""" cdef c.Scatter2D s2 = c.mkScatter_Scatter2D(deref(self.s2ptr())) return cutil.new_owned_cls(Scatter2D, s2.newclone()) def scaleX(self, a): """(float) -> None Scale the x values and errors of the points in this scatter by factor a.""" self.s2ptr().scaleX(a) def scaleY(self, a): """(float) -> None Scale the y values and errors of the points in this scatter by factor a.""" self.s2ptr().scaleY(a) def scaleXY(self, ax=1.0, ay=1.0): """(float=1, float=1) -> None Scale the values and errors of the points in this scatter by factors ax, ay.""" self.s2ptr().scaleXY(ax, ay) # TODO: remove def scale(self, ax=1.0, ay=1.0): """(float=1, float=1) -> None DEPRECATED: USE scaleXY Scale the values and errors of the points in this scatter by factors ax, ay.""" self.scaleXY(ax, ay) def transformX(self, f): """(fn) -> None Transform the x values and errors of the points in this scatter by function f.""" import ctypes try: callback = ctypes.CFUNCTYPE(ctypes.c_double, ctypes.c_double)(f) except: raise RuntimeError("Callback is not of type (double) -> double") fptr = (ctypes.addressof(callback))[0] c.Scatter2D_transformX(deref(self.s2ptr()), fptr) def transformY(self, f): """(fn) -> None Transform the y values and errors of the points in this scatter by function f.""" import ctypes try: callback = ctypes.CFUNCTYPE(ctypes.c_double, ctypes.c_double)(f) except: raise RuntimeError("Callback is not of type (double) -> double") fptr = (ctypes.addressof(callback))[0] c.Scatter2D_transformY(deref(self.s2ptr()), fptr) - - def parseVariations(self): - """None -> None - Parse the YAML which contains the variations stored in the poins of the Scatter. - Only needs to be done once!""" - return self.s2ptr().parseVariations() def variations(self): """None -> vector[string] Get the list of variations stored in the poins of the Scatter""" return self.s2ptr().variations() - def _mknp(self, xs): - try: - import numpy - return numpy.array(xs) - except ImportError: - return xs - - - def covarianceMatrix(self, *ignoreOffDiagonalTerms): - """bool -> vector[vector[float]] - Construct the covariance matrix""" - return self._mknp(self.s2ptr().covarianceMatrix(ignoreOffDiagonalTerms)) - # # TODO: remove? # def __add__(Scatter2D self, Scatter2D other): # return cutil.new_owned_cls(Scatter2D, c.Scatter2D_add_Scatter2D(self.s2ptr(), other.s2ptr())) # # TODO: remove? # def __sub__(Scatter2D self, Scatter2D other): # return cutil.new_owned_cls(Scatter2D, c.Scatter2D_sub_Scatter2D(self.s2ptr(), other.s2ptr())) - def hasValidErrorBreakdown(self): - """ - Check if the AO's error breakdown is not empty and has no bins withh 0 uncertainty - """ - counter = -1 - for p in self.points(): - counter += 1 - binErrs = p.errMap() - if len(binErrs) < 2: - return False - binTotal = [0.,0.] - for sys, err in binErrs.iteritems(): - binTotal[0] = (binTotal[0]**2 + err[0]**2)**0.5 - binTotal[1] = (binTotal[1]**2 + err[1]**2)**0.5 - if binTotal[0] == 0 and binTotal[1] == 0: - return False - return True - - def correlationMatrix(self): - """ - `covMatrix` numpy matrix - Convert a covariance matrix to a correlation matrix (ie normalise entry in i,j by uncertainty of bin i * uncertainty in bin j) - """ - covMatrix = self.covarianceMatrix() - nbins = len(covMatrix) - corr = [[0 for i in range(nbins)] for j in range (nbins)] - for i in range(nbins): - sigma_i = covMatrix[i][i] - for j in range(nbins): - sigma_j = covMatrix[j][j] - corr[i][j] = covMatrix[i][j] / (sigma_i * sigma_j)**0.5 - return self._mknp(corr) - + def _mknp(self, xs): + try: + import numpy + return numpy.array(xs) + except ImportError: + return xs def xVals(self): - return self._mknp([p.x() for p in self.points()]) + return self._mknp([p.x for p in self.points]) def xMins(self): """All x low values.""" - # TODO: add extra dimensionality for multiple errors? - return self._mknp([p.xMin() for p in self.points()]) + return self._mknp([p.xMin for p in self.points]) def xMaxs(self): """All x high values.""" - # TODO: add extra dimensionality for multiple errors? - return self._mknp([p.xMax() for p in self.points()]) + return self._mknp([p.xMax for p in self.points]) - def xErrs(self): - """All x error pairs""" - # TODO: add extra dimensionality for multiple errors? - return self._mknp([p.xErrs() for p in self.points()]) - - def xErrAvgs(self): - """All x average errors""" - # TODO: add extra dimensionality for multiple errors? - return self._mknp([p.xAvgErr() for p in self.points()]) + # TODO: xErrs def xMin(self): """Lowest x value.""" - # TODO: add extra dimensionality for multiple errors? return min(self.xMins()) def xMax(self): """Highest x value.""" - # TODO: add extra dimensionality for multiple errors? return max(self.xMaxs()) def yVals(self): - return self._mknp([p.y() for p in self.points()]) + return self._mknp([p.y for p in self.points]) def yMins(self): """All y low values.""" - # TODO: add extra dimensionality for multiple errors? - return self._mknp([p.yMin() for p in self.points()]) + return self._mknp([p.yMin for p in self.points]) def yMaxs(self): """All y high values.""" - # TODO: add extra dimensionality for multiple errors? - return self._mknp([p.yMax() for p in self.points()]) + return self._mknp([p.yMax for p in self.points]) - def yErrs(self): - """All y error pairs""" - # TODO: add extra dimensionality for multiple errors? - return self._mknp([p.yErrs() for p in self.points()]) - - def yErrAvgs(self): - """All y average errors""" - # TODO: add extra dimensionality for multiple errors? - return self._mknp([p.yAvgErr() for p in self.points()]) + # TODO: yErrs def yMin(self): """Lowest x value.""" - # TODO: add extra dimensionality for multiple errors? return min(self.yMins()) def yMax(self): """Highest y value.""" - # TODO: add extra dimensionality for multiple errors? return max(self.yMaxs()) ## Convenience alias S2D = Scatter2D diff --git a/pyext/yoda/include/Scatter3D.pyx b/pyext/yoda/include/Scatter3D.pyx --- a/pyext/yoda/include/Scatter3D.pyx +++ b/pyext/yoda/include/Scatter3D.pyx @@ -1,263 +1,263 @@ cimport util cdef class Scatter3D(AnalysisObject): """ 3D scatter plot, i.e. a collection of Point3D objects with positions and errors. Constructor calling idioms: Scatter3D(path="", title="") Create a new empty scatter, with optional path and title. Scatter3D(points, path="", title=""): Create a new empty scatter from an iterable of points, with optional path and title. TODO: more documentation! """ cdef inline c.Scatter3D* s3ptr(self) except NULL: return self.ptr() def __init__(self, *args, **kwargs): util.try_loop([self.__init_2, self.__init_3], *args, **kwargs) def __init_2(self, path="", title=""): path = path.encode('utf-8') title = title.encode('utf-8') cutil.set_owned_ptr(self, new c.Scatter3D(path, title)) def __init_3(self, points, char* path="", char* title=""): self.__init_2(path, title) self.addPoints(points) def clone(self): """() -> Scatter3D. Clone this Scatter3D.""" return cutil.new_owned_cls(Scatter3D, self.s3ptr().newclone()) def __repr__(self): - return "<%s '%s' %d points>" % (self.__class__.__name__, self.path(), len(self.points())) + return "<%s '%s' %d points>" % (self.__class__.__name__, self.path, len(self.points)) - #@property + @property def numPoints(self): """() -> int Number of points in this scatter.""" return self.s3ptr().numPoints() def __len__(self): - return self.numPoints() + return self.numPoints - #@property + @property def points(self): """Access the ordered list of points.""" - return [self.point(i) for i in xrange(self.numPoints())] + return [self.point(i) for i in xrange(self.numPoints)] def point(self, size_t i): """Access the i'th point.""" return cutil.new_borrowed_cls(Point3D, &self.s3ptr().point(i), self) def __getitem__(self, py_ix): - cdef size_t i = cutil.pythonic_index(py_ix, self.numPoints()) + cdef size_t i = cutil.pythonic_index(py_ix, self.s3ptr().numPoints()) return cutil.new_borrowed_cls(Point3D, &self.s3ptr().point(i), self) def addPoint(self, *args, **kwargs): """Add a new point. Provide either a single yoda.Point3D object, or the 3-6 args: x, y, z, xerrs=0, yerrs=0, zerrs=0. """ try: self.__addPoint_point(*args, **kwargs) except TypeError: self.__addPoint_explicit(*args, **kwargs) def __addPoint_explicit(self, x, y, z, xerrs=0, yerrs=0, zerrs=0): self.__addPoint_point(Point3D(x, y, z, xerrs, yerrs, zerrs)) def __addPoint_point(self, Point3D p): self.s3ptr().addPoint(p.p3ptr()[0]) def addPoints(self, iterable): """Add several new points.""" for row in iterable: try: self.addPoint(*row) except TypeError: self.addPoint(row) def combineWith(self, others): """Try to add points from other Scatter3Ds into this one.""" cdef Scatter3D other try: # Can we type it as a Scatter3D? other = others except TypeError: # Could be an iterable... for other in others: self.s3ptr().combineWith(deref(other.s3ptr())) else: self.s3ptr().combineWith(deref(other.s3ptr())) def mkScatter(self): """None -> Scatter3D. Make a new Scatter3D. Exists to allow mkScatter calls on any AnalysisObject, even if it already is a scatter.""" cdef c.Scatter3D s3 = c.mkScatter_Scatter3D(deref(self.s3ptr())) return cutil.new_owned_cls(Scatter3D, s3.newclone()) def scaleX(self, a): """(float) -> None Scale the x values and errors of the points in this scatter by factor a.""" self.s3ptr().scaleX(a) def scaleY(self, a): """(float) -> None Scale the y values and errors of the points in this scatter by factor a.""" self.s3ptr().scaleY(a) def scaleZ(self, a): """(float) -> None Scale the z values and errors of the points in this scatter by factor a.""" self.s3ptr().scaleZ(a) def scaleXYZ(self, ax=1, ay=1, az=1): """(float=1, float=1, float=1) -> None Scale the values and errors of the points in this scatter by factors ax, ay, az.""" self.s3ptr().scaleXYZ(ax, ay, az) # TODO: remove def scale(self, ax=1, ay=1, az=1): """(float=1, float=1, float=1) -> None DEPRECATED: USE scaleXYZ Scale the values and errors of the points in this scatter by factors ax, ay, az.""" self.scaleXYZ(ax, ay, az) def transformX(self, f): """(fn) -> None Transform the x values and errors of the points in this scatter by function f.""" import ctypes try: callback = ctypes.CFUNCTYPE(ctypes.c_double, ctypes.c_double)(f) except: raise RuntimeError("Callback is not of type (double) -> double") fptr = (ctypes.addressof(callback))[0] c.Scatter3D_transformX(deref(self.s3ptr()), fptr) def transformY(self, f): """(fn) -> None Transform the y values and errors of the points in this scatter by function f.""" import ctypes try: callback = ctypes.CFUNCTYPE(ctypes.c_double, ctypes.c_double)(f) except: raise RuntimeError("Callback is not of type (double) -> double") fptr = (ctypes.addressof(callback))[0] c.Scatter3D_transformY(deref(self.s3ptr()), fptr) def transformZ(self, f): """(fn) -> None Transform the z values and errors of the points in this scatter by function f.""" import ctypes try: callback = ctypes.CFUNCTYPE(ctypes.c_double, ctypes.c_double)(f) except: raise RuntimeError("Callback is not of type (double) -> double") fptr = (ctypes.addressof(callback))[0] c.Scatter3D_transformZ(deref(self.s3ptr()), fptr) # # TODO: remove? # def __add__(Scatter3D self, Scatter3D other): # return cutil.new_owned_cls(Scatter3D, c.Scatter3D_add_Scatter3D(self.s3ptr(), other.s3ptr())) # # TODO: remove? # def __sub__(Scatter3D self, Scatter3D other): # return cutil.new_owned_cls(Scatter3D, c.Scatter3D_sub_Scatter3D(self.s3ptr(), other.s3ptr())) def variations(self): """None -> vector[string] Get the list of variations stored in the points of the Scatter""" return self.s3ptr().variations() def _mknp(self, xs): try: import numpy return numpy.array(xs) except ImportError: return xs def xVals(self): - return self._mknp([p.x() for p in self.points()]) + return self._mknp([p.x for p in self.points]) def xMins(self): """All x low values.""" - return self._mknp([p.xMin() for p in self.points()]) + return self._mknp([p.xMin for p in self.points]) def xMaxs(self): """All x high values.""" - return self._mknp([p.xMax() for p in self.points()]) + return self._mknp([p.xMax for p in self.points]) # TODO: xErrs def xMin(self): """Lowest x value.""" return min(self.xMins()) def xMax(self): """Highest x value.""" return max(self.xMaxs()) def yVals(self): - return self._mknp([p.y() for p in self.points()]) + return self._mknp([p.y for p in self.points]) def yMins(self): """All x low values.""" - return self._mknp([p.yMin() for p in self.points()]) + return self._mknp([p.yMin for p in self.points]) def yMaxs(self): """All x high values.""" - return self._mknp([p.yMax() for p in self.points()]) + return self._mknp([p.yMax for p in self.points]) # TODO: yErrs def yMin(self): """Lowest x value.""" return min(self.yMins()) def yMax(self): """Highest y value.""" return max(self.yMaxs()) def zVals(self): - return self._mknp([p.z() for p in self.points()]) + return self._mknp([p.z for p in self.points]) def zMins(self): """All z low values.""" - return self._mknp([p.zMin() for p in self.points()]) + return self._mknp([p.zMin for p in self.points]) def zMaxs(self): """All z high values.""" - return self._mknp([p.zMax() for p in self.points()]) + return self._mknp([p.zMax for p in self.points]) # TODO: zErrs def zMin(self): """Lowest z value.""" return min(self.zMins()) def zMax(self): """Highest z value.""" return max(self.zMaxs()) ## Convenience alias S3D = Scatter3D diff --git a/pyext/yoda/plotting.py b/pyext/yoda/plotting.py --- a/pyext/yoda/plotting.py +++ b/pyext/yoda/plotting.py @@ -1,453 +1,450 @@ # -*- python -*- """ Plotting utilities, particularly for interaction with matplotlib and Rivet make-plots """ import yoda import sys import numpy as np import matplotlib as mpl # TODO: Move to core objects # def same_binning_as(self, other): # if self.dim != other.dim: # return False # if not (other.x == self.x).all() and \ # (other.exminus == self.exminus).all() and \ # (other.explus == self.explus).all(): # return False # if self.dim == 2: # return True # return (other.y == self.y).all() and \ # (other.eyminus == self.eyminus).all() and \ # (other.eyplus == self.eyplus).all() def read_plot_keys(datfile): import re re_begin = re.compile("#*\s*BEGIN\s+PLOT\s*(\w*)") re_comment = re.compile("#.*") re_attr = re.compile("(\w+)\s*=\s*(.*)") re_end = re.compile("#*\s*END\s+PLOT\s+\w*") plotkeys = {} with open(datfile) as f: inplot = False name = None for line in f: l = line.strip() if re_begin.match(l): inplot = True name = re_begin.match(l).group(1) elif re_end.match(l): inplot = False name = None elif re_comment.match(l): continue elif inplot: m = re_attr.match(l) if m is None: continue plotkeys.setdefault(name, {})[m.group(1)] = m.group(2) return plotkeys def mplinit(engine="MPL", font="TeX Gyre Pagella", fontsize=17, mfont=None, textfigs=True): """One-liner matplotlib (mpl) setup. By default mpl will be configured with its native MathText rendering backend, and a Palatino-like font for both text and math contexts, using 'lower-case numerals' if supported. Setting the engine to 'TEX' will use standard mpl rendering, with calls to LaTeX for axis labels and other text; setting it to 'PGF' will use the TeX PGF renderer: both these modes are much slower than MPL mode, but the latter only supports a limited set of LaTeX macros and does not render as nicely as the TeX backends. The font and mfont optional arguments can be used to choose a different text font and math font respectively; if mfont is None, it defaults to the same as the text font. The textfigs boolean argument can be set false to disable the lower-case/text/old-style numerals and use 'upper-case' numerals everywhere. These options do not currently apply to the MPL rendering engine. """ mpl.rcParams.update({ "text.usetex" : (engine != "MPL"), "font.size" : int(fontsize), "font.family" : "serif", #< TODO: make configurable? auto-detect? }) texpreamble = [r"\usepackage{amsmath,amssymb}", r"\usepackage{mathspec}"] mfont = mfont if mfont else font fontopts = "[Numbers=OldStyle]" if textfigs else "" mfontopts = fontopts.replace("]", ",") + "Scale=MatchUppercase" + "]" texpreamble.append( r"\setmainfont{fopts}{{{font}}}".format(fopts=fontopts, font=font) ) texpreamble.append( r"\setmathsfont(Digits,Latin){fopts}{{{font}}}".format(fopts=mfontopts, font=mfont) ) if engine.upper() == "PGF": mpl.use("pgf") mpl.rcParams["pgf.preamble"] = texpreamble # TODO: Fix? # elif engine.upper() == "TEX": # mpl.rcParams["text.latex.preamble"] = texpreamble return mpl ## Alias initmpl = mplinit setup_mpl = mplinit def show(): """ Convenience call to matplotlib.pyplot.show() NOTE: done this way to avoid import of pyplot before mplinit() or mpl.use() has been (optionally) called. """ import matplotlib.pyplot as plt plt.show() def mk_figaxes_1d(ratio=True, title=None, figsize=(8,6)): "Make a standard main+ratio plot figure and subplot layout" ## We need to use pyplot here to set up the backend-specific canvas import matplotlib.pyplot as plt fig = plt.figure(figsize=figsize) #fig = mpl.figure.Figure(figsize=figsize, tight_layout=True) if title: fig.suptitle(title, horizontalalignment="left", x=0.13) ## Make axes. GridSpec may not be available, in which case fall back ~gracefully axmain, axratio = None, None if ratio: try: gs = mpl.gridspec.GridSpec(2, 1, height_ratios=[3,1], hspace=0) axmain = fig.add_subplot(gs[0]) #axmain.hold(True) axratio = fig.add_subplot(gs[1], sharex=axmain) #axratio.hold(True) axratio.axhline(1.0, color="gray") #< Ratio = 1 marker line except: sys.stderr.write("matplotlib.gridspec not available: falling back to plotting without a ratio\n") ratio = False if not ratio: axmain = fig.add_subplot(1,1,1) #axmain.hold(True) return fig, (axmain, axratio) def set_axis_labels_1d(axmain, axratio, xlabel=None, ylabel=None, ratioylabel=None): axmain.set_ylabel(ylabel, y=1, ha="right", labelpad=None) if axratio: axmain.xaxis.set_major_locator(mpl.ticker.NullLocator()) axratio.set_xlabel(xlabel, x=1, ha="right", labelpad=None) axratio.set_ylabel(ratioylabel) else: axmain.set_xlabel(xlabel, x=1, ha="right", labelpad=None) def mk_lowcase_dict(d): "Convert the keys of a str->obj dict to lower-case" return dict((k.lower(), v) for (k,v) in d.items()) # TODO: Needs generalisation for 2D marginal axes) def setup_axes_1d(axmain, axratio, **plotkeys): ## Case-insensitize the plotkeys dict plotkeys = mk_lowcase_dict(plotkeys) ## Axis labels first xlabel = plotkeys.get("xlabel", "") ylabel = plotkeys.get("xlabel", "") ratioylabel = plotkeys.get("ratioylabel", "ratio") set_axis_labels_1d(axmain, axratio, xlabel, ylabel, ratioylabel) ## log/lin measures # TODO: Dynamic default based on data ranges? # TODO: take log axes and preference for round numbers into account in setting default axis limits xmeasure = "log" if yoda.util.as_bool(plotkeys.get("logX", False)) else "linear" ymeasure = "log" if yoda.util.as_bool(plotkeys.get("logY", False)) else "linear" ratioymeasure = "log" if yoda.util.as_bool(plotkeys.get("ratiology", False)) else "linear" axmain.set_xscale(xmeasure) axmain.set_yscale(ymeasure) if axratio: axratio.set_xscale(xmeasure) axratio.set_yscale(ratioymeasure) ## Plot range limits if "ymin" in plotkeys: axmain.set_ylim(bottom=float(plotkeys.get("ymin"))) if "ymax" in plotkeys: axmain.set_ylim(top=float(plotkeys.get("ymax"))) # if "xmin" in plotkeys: axmain.set_xlim(left=float(plotkeys.get("xmin"))) if "xmax" in plotkeys: axmain.set_xlim(right=float(plotkeys.get("xmax"))) # if axratio: # TODO: RatioSymmRange option # axratio.set_xlim([xmin-0.001*xdiff, xmax+0.001*xdiff]) # <- TODO: bad on a log scale! if "xmin" in plotkeys: axratio.set_xlim(left=float(plotkeys.get("xmin"))) if "xmax" in plotkeys: axratio.set_xlim(right=float(plotkeys.get("xmax"))) if "ratioymin" in plotkeys: axratio.set_ylim(bottom=float(plotkeys.get("ratioymin"))) if "ratioymax" in plotkeys: axratio.set_ylim(top=float(plotkeys.get("ratioymax"))) # TODO: Ratio plot manual ticks def plot_hist_on_axes_1d(axmain, axratio, h, href=None, default_color="black", default_linestyle="-", **plotkeys): ## Case-insensitize the plotkeys dict - hkeys = mk_lowcase_dict(h.annotationsDict()) + hkeys = mk_lowcase_dict(h.annotationsDict) hkeys.update(plotkeys) plotkeys = hkeys # TODO: Split into different plot styles: line/filled/range, step/diag/smooth, ...? ## Styles default_color = plotkeys.get("color", default_color) marker = plotkeys.get("marker", plotkeys.get("polymarker", None)) # <- make-plots translation marker = {"*":"o"}.get(marker, marker) # <- make-plots translation mcolor = plotkeys.get("linecolor", default_color) errbar = plotkeys.get("errorbars", None) ecolor = plotkeys.get("errorbarscolor", default_color) line = plotkeys.get("line", None) lcolor = plotkeys.get("linecolor", default_color) lstyle = plotkeys.get("linestyle", default_linestyle) lstyle = {"solid":"-", "dashed":"--", "dotdashed":"-.", "dashdotted":"-.", "dotted":":"}.get(lstyle, lstyle) # <- make-plots translation lwidth = 1.4 msize = 7 ## If no drawing is enabled, default to a step line if not any([marker, line, errbar]): line = "step" ## MPL plotting # TODO: Split this into different functions for each kind of data preparation (and smoothing as an extra function?) # TODO: First convert h to scatter artists = None if errbar: artists = axmain.errorbar(h.xVals(), h.yVals(), xerr=h.xErrs(), yerr=h.yErrs(), color=ecolor, linestyle="none", linewidth=lwidth, capthick=lwidth) # linestyle="-", marker="o", if line == "step": artists = axmain.step(np.append(h.xMins(), h.xMax()), np.append(h.yVals(), h.yVals()[-1]), where="post", color=lcolor, linestyle=lstyle, linewidth=lwidth) elif line == "diag": artists = axmain.plot(h.xVals(), h.yVals(), color=lcolor, linestyle=lstyle, linewidth=lwidth) elif line == "smooth": from scipy.interpolate import spline xnew = np.linspace(min(h.xVals()), max(h.xVals()), 3*h.numBins) ynew = spline(h.xVals(), h.yVals(), xnew) artists = axmain.plot(xnew, ynew, color=lcolor, linestyle=lstyle, linewidth=lwidth) if marker: artists = axmain.plot(h.xVals(), h.yVals(), marker=marker, markersize=msize, linestyle="none", color=mcolor, markeredgecolor=mcolor) ## Legend entry - if h.annotation("Title") and artists: - artists[0].set_label(h.annotation("Title")) + if h.title and artists: + artists[0].set_label(h.title) ## Ratio ratioartists = None if href and h is not href: # TODO: exclude and specify order via RatioIndex # assert h.same_binning_as(href) # TODO: log ratio or #sigma deviation yratios = np.array(h.yVals())/np.array(href.yVals()) # TODO: Same styling control as for main plot (with Ratio prefix, default to main plot style) ## Stepped plot ratioartists = axratio.step(np.append(href.xMins(), href.xMax()), np.append(yratios, yratios[-1]), where="post", color=lcolor, linestyle=lstyle, linewidth=lwidth) # TODO: Diag plot # axratio.plot(href["x"], yratios, color="r", linestyle="--") # TODO: Smoothed plot return artists def plot(hs, outfile=None, ratio=True, show=False, axmain=None, axratio=None, **plotkeys): """ Plot the given histograms on a single figure, returning (fig, (main_axis, ratio_axis)). Show to screen if the second arg is True, and saving to outfile if it is otherwise non-null. """ ## Case-insensitize the plotkeys dict plotkeys = mk_lowcase_dict(plotkeys) ## Handle single histo args if isinstance(hs, yoda.AnalysisObject): hs = [hs,] ratio = False ## Get data ranges (calculated or forced) xmin = float(plotkeys.get("xmin", min(h.xMin() for h in hs))) xmax = float(plotkeys.get("xmax", max(h.xMax() for h in hs))) xdiff = xmax - xmin # print xmin, xmax, xdiff # TODO: Tweak max-padding for top tick label... sensitive to log/lin measure - ymin = plotkeys.get("ymin", min(min(h.yVals()) for h in hs)) - print( max(max(h.yVals()) for h in hs) ) - ymax = plotkeys.get("ymax", 1.1*max(max(h.yVals()) for h in hs)) - ymin = float(ymin) - ymax = float(ymax) + ymin = float(plotkeys.get("ymin", min(min(h.yVals()) for h in hs))) + ymax = float(plotkeys.get("ymax", 1.1*max(max(h.yVals()) for h in hs))) ydiff = ymax - ymin # print ymin, ymax, ydiff ## Identify reference histo by annotation (unless explicitly disabled) href = None # TODO: Use ratio to setdefault RatioPlot in plotkeys, then use that to decide whether to look for href if ratio: for h in hs: hkeys = mk_lowcase_dict(h.annotationsDict) if yoda.util.as_bool(hkeys.get("ratioref", False)): if href is None: href = h else: #print "Multiple ratio references set: using first value = {}".format(href.path) break if href is None: #< no ref found -- maybe all were disabled? ratio = False ## Make figure and subplot grid layout title = plotkeys.get("title", "") if not axmain: fig, (axmain, axratio) = mk_figaxes_1d(ratio and not axratio, title) else: fig = axmain.get_figure() ## Setup axes appearances axmain.set_xlim([xmin, xmax]) axmain.set_ylim([ymin, ymax]) if axratio: axratio.set_xlim([xmin, xmax]) axratio.set_ylim(auto=True) setup_axes_1d(axmain, axratio, **plotkeys) # TODO: specify ratio display in log/lin, abs, or #sigma, and as x/r or (x-r)/r ## Draw ratio error band (do this before looping over cmp lines) # TODO: Actually we can call this when we hit the href, and force the zorder into groups: bands, lines, dots, legend, text, frame if axratio: ref_ymax_ratios = np.array(href.yMaxs())/np.array(href.yVals()) ref_ymin_ratios = np.array(href.yMins())/np.array(href.yVals()) # TODO: Diag: (needs -> limit handling at ends) # axratio.fill_between(href.x, ref_ymin_ratios, ref_ymax_ratios, edgecolor="none", facecolor=ratioerrcolor, interpolate=False) # Stepped: def xedges_dbl(h): edges = np.empty((2*len(h.xVals()),)) edges[0::2] = h.xMins() edges[1::2] = h.xMaxs() return edges def dbl_array(arr): return sum(([x,x] for x in arr), []) ratioerrcolor = plotkeys.get("ratioerrcolor", "yellow") axratio.fill_between(xedges_dbl(href), dbl_array(ref_ymin_ratios), dbl_array(ref_ymax_ratios), edgecolor="none", facecolor=ratioerrcolor) # TODO: Smoothed: (needs -> limit handling at ends) # Redraw ratio = 1 marker line: axratio.axhline(1.0, color="gray") COLORS = ["red", "blue", "magenta", "orange", "green"] LSTYLES = ["-", "--", "-.", ":"] ## Dataset plotting some_valid_label = False for ih, h in enumerate(hs): #print ih, h.path aa = plot_hist_on_axes_1d(axmain, axratio, h, href, COLORS[ih % len(COLORS)], LSTYLES[ih % len(LSTYLES)]) if aa and not aa[0].get_label().startswith("_"): # print "@@@", aa[0].get_label() some_valid_label = True ## Legend # TODO: allow excluding and specify order via LegendIndex if some_valid_label: #< No point in writing a legend if there are no labels pass #axmain.legend(loc=plotkeys.get("LegendPos", "best"), fontsize=plotkeys.get("LegendFontSize", "x-small"), frameon=False) ## Tweak layout now that everything is in place # TODO: merge tight_layout() into the Figure constructor, and maybe the ratio ticker when retrospectively drawing the zorder'ed err band if axratio: axratio.yaxis.set_major_locator(mpl.ticker.MaxNLocator(4, prune="upper")) fig.tight_layout() ## Save to an image file if we were asked to if outfile: #print "Saving to " + outfile fig.savefig(outfile) ## Show to screen if requested if show: import matplotlib.pyplot as plt plt.show() ## Return the figure objects return fig, (axmain, axratio) ## Aliases plot_hists_1d = plot plot_hist_1d = plot def _plot1arg(args): "Helper function for mplot, until Py >= 3.3 multiprocessing.pool.starmap() is available" return plot(*args) def nplot(hs, outfiles=None, ratio=True, show=False, nproc=1, **plotkeys): """ Plot the given list of histogram(s), cf. many calls to plot(). hs must be an iterable, each entry of which will be the content of a single plot: the entries can either be single histograms or lists of histograms, i.e. either kind of valid first argument to plot(). Outfiles must be an iterable corresponding to hs, and ratio may either be a bool or such an iterable. The return value is a list of the return tuples from each call to plot(), of the same length as the hs arg. MULTIPROCESSING -- *WARNING* CURRENTLY BROKEN The main point of this function, other than convenience, is that the Python multiprocessing module can be used to distribute the work on to multiple parallel processes. The nproc argument should be the integer number of parallel processes on which to distribute the plotting. nproc = None (the default value) will use Ncpu-1 or 1 process, whichever is larger. If nproc = 1, multiprocessing will not be used -- this avoids overhead and eases debugging. """ argslist = [] for i, hs_arg in enumerate(hs): outfile_arg = outfiles[i] if outfiles else None ratio_arg = ratio[i] if hasattr(ratio, "__iter__") else ratio show_arg = False #< we just do this once, at the end plotkeys_arg = plotkeys if type(plotkeys) is dict else plotkeys[i] argslist.append( (hs_arg, outfile_arg, ratio_arg, show_arg, None, None, plotkeys_arg) ) #print argslist # TODO: make the multiprocessing work import multiprocessing nproc = nproc or multiprocessing.cpu_count() or 1 if nproc > 1: pool = multiprocessing.Pool(processes=nproc) res = pool.map_async(_plot1arg, argslist) rtn = res.get() else: ## Run this way in the 1 proc case for easier debugging rtn = [_plot1arg(args) for args in argslist] if show: import matplotlib.pyplot as plt plt.show() return rtn diff --git a/pyext/yoda/util.pxd b/pyext/yoda/util.pxd --- a/pyext/yoda/util.pxd +++ b/pyext/yoda/util.pxd @@ -1,72 +1,72 @@ ## A base CppObject which prevents null pointer access cdef class Base: cdef void* _ptr cdef bint _deallocate cdef object _owner cdef inline void* ptr(self) except NULL: if self._ptr == NULL: raise MemoryError('Null pointer referenced: perhaps the class is uninitialised.') return self._ptr ## Magic for setting pointers ## Use this for setting a pointer that is owned by the object obj ## e.g. if you want deallocation to happen automatically when this object goes ## out of scope cdef inline set_owned_ptr(Base obj, void* ptr): obj._ptr = ptr obj._deallocate = True ## Use this for setting a pointer that is *not* owned by the object obj e.g. if ## you were given this object by a class which will deallocate it once it's ## finished cdef inline set_borrowed_ptr(Base obj, void* ptr, object owner): obj._ptr = ptr obj._deallocate = False obj._owner = owner ## Use this to create a new object of type cls from the pointer ptr. The class is ## one which owns its pointer, and will deallocate when it's done. It's this one ## that you want to use for e.g. loaders or factory methods where the user is ## expected to manage memory. Or where you've explicitly called new. cdef inline object new_owned_cls(object cls, void* ptr): obj = cls.__new__(cls) set_owned_ptr(obj, ptr) return obj ## Use this to create a thin wrapper around a pointer that will *not* be ## deallocated when it goes out of scope. Useful when you're given a reference ## from a class which is not expecting you to delete its innards! cdef inline object new_borrowed_cls(object cls, void* ptr, object owner): obj = cls.__new__(cls) set_borrowed_ptr(obj, ptr, owner) return obj ## Translation utility to allow use of 'Pythonic' negative indices -cdef inline size_t pythonic_index(int i, size_t size) except ? 0: +cdef inline size_t pythonic_index(int i, size_t size) except? 0: if i < 0: i += size - if 0 <= i and i < size: + if 0 <= i < size: return i else: raise IndexError # ## Utils for handling error conversions to/from std::pair # from libcpp.pair cimport pair # cdef inline object read_edge_pair(pair[double, double] es): # return EdgePair(es.first, es.second) # cdef inline object read_error_pair(pair[double, double] es): # return ErrorPair(es.first, es.second) # cdef inline pair[double, double] read_symmetric(object val) except *: # try: # a, b = val # except TypeError: # a = b = val # return pair[double, double](a, b) diff --git a/pyext/yoda/util.pyx b/pyext/yoda/util.pyx --- a/pyext/yoda/util.pyx +++ b/pyext/yoda/util.pyx @@ -1,93 +1,88 @@ from collections import namedtuple from operator import itemgetter def as_bool(x): if type(x) is bool: return x s = str(x) if s.lower() in ("true", "yes", "on", "1", "1.0"): return True if s.lower() in ("false", "no", "off", "0", "0.0"): return False raise Exception("'{}' cannot be parsed as a boolean flag".format(s)) -def _autotype(var, autobool=False): +def _autotype(var, autobool=True): """Automatically convert strings to numerical types if possible.""" if type(var) is not str: return var - ## Convert via Python ast parser + if var.isdigit() or (var.startswith("-") and var[1:].isdigit()): + return int(var) try: - import ast - var = ast.literal_eval(var) - except: - # TODO: print a warning? - pass - ## Try friendly string conversions to bool - if autobool and type(var) is str: + return float(var) + except: pass + if autobool: try: - var = as_bool(var) - except: - pass - ## Finally return + return as_bool(var) + except: pass return var # def _autonp(var): # """Automatically return lists as numpy arrays if numpy is imported""" # if "numpy" in dir(): # return numpy.array(var) # elif "np" in dir(): # return np.array(var) # else: # return var def _autostr(var, precision=8): """Automatically format numerical types as the right sort of string.""" if type(var) is float: return ("% ." + str(precision) + "e") % var elif not isinstance(var, (list,tuple)): return str(var) else: return ",".join(_autostr(subval) for subval in var) cdef class Base: pass def try_loop(fs, *args, char *_msg='Invalid arguments', **kwargs): for f in fs: try: f(*args, **kwargs) return except (TypeError, AttributeError): pass raise TypeError(_msg) XY = namedtuple('XY', ('x', 'y')) XYZ = namedtuple('XYZ', ('x', 'y', 'z')) EdgePair = namedtuple('EdgePair', ('low', 'high')) ErrorPair = namedtuple('ErrorPair', ('minus', 'plus')) ## Utils for handling error conversions to/from std::pair from libcpp.pair cimport pair def read_edge_pair(pair[double, double] es): return EdgePair(es.first, es.second) def read_error_pair(pair[double, double] es): return ErrorPair(es.first, es.second) def read_symmetric(val): try: a, b = val except TypeError: a = b = val return pair[double, double](a, b) diff --git a/src/Makefile.am b/src/Makefile.am --- a/src/Makefile.am +++ b/src/Makefile.am @@ -1,33 +1,30 @@ SUBDIRS = tinyxml yamlcpp . lib_LTLIBRARIES = libYODA.la libYODA_la_SOURCES = \ Exceptions.cc \ Reader.cc \ ReaderYODA.cc \ ReaderFLAT.cc \ ReaderAIDA.cc \ Writer.cc \ WriterYODA.cc \ WriterFLAT.cc \ WriterAIDA.cc \ Dbn0D.cc \ - Dbn1D.cc \ + Dbn1D.cc \ Counter.cc \ Histo1D.cc \ Histo2D.cc \ Profile1D.cc \ Profile2D.cc \ Scatter1D.cc \ Scatter2D.cc \ - Scatter3D.cc \ - Point1D.cc \ - Point2D.cc \ - Point3D.cc + Scatter3D.cc libYODA_la_LDFLAGS = -avoid-version libYODA_la_LIBADD = $(builddir)/tinyxml/libyoda-tinyxml.la $(builddir)/yamlcpp/libyoda-yaml-cpp.la libYODA_la_CPPFLAGS = $(AM_CPPFLAGS) -DTIXML_USE_STL -I$(srcdir)/yamlcpp -I$(srcdir) -DYAML_NAMESPACE=YODA_YAML EXTRA_DIST = zstr diff --git a/src/Point1D.cc b/src/Point1D.cc deleted file mode 100644 --- a/src/Point1D.cc +++ /dev/null @@ -1,10 +0,0 @@ -#include "YODA/Point1D.h" -#include "YODA/Scatter1D.h" - -namespace YODA { - - - void Point1D::getVariationsFromParent() const{ - if (this->getParentAO()) ((Scatter1D*) this->getParentAO())->parseVariations(); - } -} diff --git a/src/Point2D.cc b/src/Point2D.cc deleted file mode 100644 --- a/src/Point2D.cc +++ /dev/null @@ -1,16 +0,0 @@ -#include "YODA/Point2D.h" -#include "YODA/Scatter2D.h" - -namespace YODA { - - - /// Get error map for direction @a i - const std::map< std::string, std::pair> & Point2D::errMap() const { - getVariationsFromParent(); - return _ey; - } - - void Point2D::getVariationsFromParent() const{ - if (this->getParentAO()) ((Scatter2D*) this->getParentAO())->parseVariations(); - } -} diff --git a/src/Point3D.cc b/src/Point3D.cc deleted file mode 100644 --- a/src/Point3D.cc +++ /dev/null @@ -1,11 +0,0 @@ -#include "YODA/Point3D.h" -#include "YODA/Scatter3D.h" - -namespace YODA { - - - - void Point3D::getVariationsFromParent() const{ - if (this->getParentAO()) ((Scatter3D*) this->getParentAO())->parseVariations(); - } -} diff --git a/src/ReaderYODA.cc b/src/ReaderYODA.cc --- a/src/ReaderYODA.cc +++ b/src/ReaderYODA.cc @@ -1,528 +1,551 @@ // -*- C++ -*- // // This file is part of YODA -- Yet more Objects for Data Analysis // Copyright (C) 2008-2018 The YODA collaboration (see AUTHORS for details) // #include "YODA/ReaderYODA.h" #include "YODA/Utils/StringUtils.h" #include "YODA/Utils/getline.h" #include "YODA/Exceptions.h" #include "YODA/Config/DummyConfig.h" #include "YODA/Counter.h" #include "YODA/Histo1D.h" #include "YODA/Histo2D.h" #include "YODA/Profile1D.h" #include "YODA/Profile2D.h" #include "YODA/Scatter1D.h" #include "YODA/Scatter2D.h" #include "YODA/Scatter3D.h" #include "yaml-cpp/yaml.h" #ifdef YAML_NAMESPACE #define YAML YAML_NAMESPACE #endif #ifdef HAVE_LIBZ #define _XOPEN_SOURCE 700 #include "zstr/zstr.hpp" #endif #include #include using namespace std; namespace YODA { /// Singleton creation function Reader& ReaderYODA::create() { static ReaderYODA _instance; return _instance; } namespace { /// Fast ASCII tokenizer, extended from FastIStringStream by Gavin Salam. class aistringstream { public: // Constructor from char* aistringstream(const char* line=0) { reset(line); } // Constructor from std::string aistringstream(const string& line) { reset(line); } // Re-init to new line as char* void reset(const char* line=0) { _next = const_cast(line); _new_next = _next; _error = false; } // Re-init to new line as std::string void reset(const string& line) { reset(line.c_str()); } // Tokenizing stream operator (forwards to specialisations) template aistringstream& operator >> (T& value) { _get(value); if (_new_next == _next) _error = true; // handy error condition behaviour! _next = _new_next; return *this; } // Allow use of operator>> in a while loop operator bool() const { return !_error; } private: void _get(double& x) { x = std::strtod(_next, &_new_next); } void _get(float& x) { x = std::strtof(_next, &_new_next); } void _get(int& i) { i = std::strtol(_next, &_new_next, 10); } // force base 10! void _get(long& i) { i = std::strtol(_next, &_new_next, 10); } // force base 10! void _get(unsigned int& i) { i = std::strtoul(_next, &_new_next, 10); } // force base 10! void _get(long unsigned int& i) { i = std::strtoul(_next, &_new_next, 10); } // force base 10! void _get(string& x) { /// @todo If _next and _new_next become null? while (std::isspace(*_next)) _next += 1; _new_next = _next; while (!std::isspace(*_new_next)) _new_next += 1; x = string(_next, _new_next-_next); } char *_next, *_new_next; bool _error; }; } void ReaderYODA::read(istream& stream_, vector& aos) { #ifdef HAVE_LIBZ // NB. zstr auto-detects if file is deflated or plain-text zstr::istream stream(stream_); #else istream& stream = stream_; #endif + // Data format parsing states, representing current data type /// @todo Extension to e.g. "bar" or multi-counter or binned-value types, and new formats for extended Scatter types enum Context { NONE, //< outside any data block SCATTER1D, SCATTER2D, SCATTER3D, COUNTER, HISTO1D, HISTO2D, PROFILE1D, PROFILE2D }; /// State of the parser: line number, line, parser context, and pointer(s) to the object currently being assembled unsigned int nline = 0; string s; Context context = NONE; // AnalysisObject* aocurr = NULL; //< Generic current AO pointer vector h1binscurr; //< Current H1 bins container vector h2binscurr; //< Current H2 bins container vector p1binscurr; //< Current P1 bins container vector p2binscurr; //< Current P2 bins container vector pt1scurr; //< Current Point1Ds container vector pt2scurr; //< Current Point2Ds container vector pt3scurr; //< Current Point3Ds container Counter* cncurr = NULL; Histo1D* h1curr = NULL; Histo2D* h2curr = NULL; Profile1D* p1curr = NULL; Profile2D* p2curr = NULL; Scatter1D* s1curr = NULL; Scatter2D* s2curr = NULL; Scatter3D* s3curr = NULL; - //std::vector variationscurr; + std::vector variationscurr; string annscurr; // Loop over all lines of the input file aistringstream aiss; bool in_anns = false; string fmt = "1"; //int nfmt = 1; while (Utils::getline(stream, s)) { nline += 1; // CLEAN LINES IF NOT IN ANNOTATION MODE if (!in_anns) { // Trim the line Utils::itrim(s); // Ignore blank lines if (s.empty()) continue; // Ignore comments (whole-line only, without indent, and still allowed for compatibility on BEGIN/END lines) if (s.find("#") == 0 && s.find("BEGIN") == string::npos && s.find("END") == string::npos) continue; } // STARTING A NEW CONTEXT if (context == NONE) { // We require a BEGIN line to start a context if (s.find("BEGIN ") == string::npos) { stringstream ss; ss << "Unexpected line in YODA format parsing when BEGIN expected: '" << s << "' on line " << nline; throw ReadError(ss.str()); } // Remove leading #s from the BEGIN line if necessary while (s.find("#") == 0) s = Utils::trim(s.substr(1)); // Split into parts vector parts; istringstream iss(s); string tmp; while (iss >> tmp) parts.push_back(tmp); // Extract context from BEGIN type if (parts.size() < 2 || parts[0] != "BEGIN") { stringstream ss; ss << "Unexpected BEGIN line structure when BEGIN expected: '" << s << "' on line " << nline; throw ReadError(ss.str()); } // Second part is the context name const string ctxstr = parts[1]; // Get block path if possible const string path = (parts.size() >= 3) ? parts[2] : ""; // Set the new context and create a new AO to populate /// @todo Use the block format version for (occasional, careful) format evolution if (Utils::startswith(ctxstr, "YODA_COUNTER")) { context = COUNTER; cncurr = new Counter(path); aocurr = cncurr; } else if (Utils::startswith(ctxstr, "YODA_SCATTER1D")) { context = SCATTER1D; s1curr = new Scatter1D(path); aocurr = s1curr; } else if (Utils::startswith(ctxstr, "YODA_SCATTER2D")) { context = SCATTER2D; s2curr = new Scatter2D(path); aocurr = s2curr; } else if (Utils::startswith(ctxstr, "YODA_SCATTER3D")) { context = SCATTER3D; s3curr = new Scatter3D(path); aocurr = s3curr; } else if (Utils::startswith(ctxstr, "YODA_HISTO1D")) { context = HISTO1D; h1curr = new Histo1D(path); aocurr = h1curr; } else if (Utils::startswith(ctxstr, "YODA_HISTO2D")) { context = HISTO2D; h2curr = new Histo2D(path); aocurr = h2curr; } else if (Utils::startswith(ctxstr, "YODA_PROFILE1D")) { context = PROFILE1D; p1curr = new Profile1D(path); aocurr = p1curr; } else if (Utils::startswith(ctxstr, "YODA_PROFILE2D")) { context = PROFILE2D; p2curr = new Profile2D(path); aocurr = p2curr; } // cout << aocurr->path() << " " << nline << " " << context << endl; // Get block format version if possible (assume version=1 if none found) const size_t vpos = ctxstr.find_last_of("V"); fmt = vpos != string::npos ? ctxstr.substr(vpos+1) : "1"; // cout << fmt << endl; // From version 2 onwards, use the in_anns state from BEGIN until --- if (fmt != "1") in_anns = true; } else { //< not a BEGIN line // Throw error if a BEGIN line is found if (s.find("BEGIN ") != string::npos) ///< @todo require pos = 0 from fmt=V2 throw ReadError("Unexpected BEGIN line in YODA format parsing before ending current BEGIN..END block"); // FINISHING THE CURRENT CONTEXT // Clear/reset context and register AO /// @todo Throw error if mismatch between BEGIN (context) and END types if (s.find("END ") != string::npos) { ///< @todo require pos = 0 from fmt=V2 switch (context) { case COUNTER: break; case HISTO1D: h1curr->addBins(h1binscurr); h1binscurr.clear(); break; case HISTO2D: h2curr->addBins(h2binscurr); h2binscurr.clear(); break; case PROFILE1D: p1curr->addBins(p1binscurr); p1binscurr.clear(); break; case PROFILE2D: p2curr->addBins(p2binscurr); p2binscurr.clear(); break; case SCATTER1D: - for (auto &p : pt1scurr) { p.setParentAO(s1curr); } s1curr->addPoints(pt1scurr); pt1scurr.clear(); break; case SCATTER2D: - for (auto &p : pt2scurr) { p.setParentAO(s2curr); } s2curr->addPoints(pt2scurr); pt2scurr.clear(); break; case SCATTER3D: - for (auto &p : pt3scurr) { p.setParentAO(s3curr); } s3curr->addPoints(pt3scurr); pt3scurr.clear(); break; case NONE: break; } // Set all annotations try { YAML::Node anns = YAML::Load(annscurr); // for (YAML::const_iterator it = anns.begin(); it != anns.end(); ++it) { for (const auto& it : anns) { const string key = it.first.as(); // const string val = it.second.as(); YAML::Emitter em; em << YAML::Flow << it.second; //< use single-line formatting, for lists & maps const string val = em.c_str(); - // if (!(key.find("ErrorBreakdown") != string::npos)) - aocurr->setAnnotation(key, val); + // + // The Variations annotation is just a placeholder to help collect the right columns + // Don't want to be saving it to the actual AO, since the method variations() + // provides the info that's needed without needing to keep the annotation up to date + if (!(key.find("Variations") != string::npos)) aocurr->setAnnotation(key, val); } } catch (...) { /// @todo Is there a case for just giving up on these annotations, printing the error msg, and keep going? As an option? const string err = "Problem during annotation parsing of YAML block:\n'''\n" + annscurr + "\n'''"; // cerr << err << endl; throw ReadError(err); } annscurr.clear(); - //variationscurr.clear(); + variationscurr.clear(); in_anns = false; // Put this AO in the completed stack aos.push_back(aocurr); // Clear all current-object pointers aocurr = nullptr; cncurr = nullptr; h1curr = nullptr; h2curr = nullptr; p1curr = nullptr; p2curr = nullptr; s1curr = nullptr; s2curr = nullptr; s3curr = nullptr; context = NONE; continue; } // ANNOTATIONS PARSING if (fmt == "1") { // First convert to one-key-per-line YAML syntax const size_t ieq = s.find("="); if (ieq != string::npos) s.replace(ieq, 1, ": "); // Special-case treatment for syntax clashes const size_t icost = s.find(": *"); if (icost != string::npos) { s.replace(icost, 1, ": '*"); s += "'"; } // Store reformatted annotation const size_t ico = s.find(":"); if (ico != string::npos) { annscurr += (annscurr.empty() ? "" : "\n") + s; continue; } } else if (in_anns) { if (s == "---") { in_anns = false; } else { annscurr += (annscurr.empty() ? "" : "\n") + s; // In order to handle multi-error points in scatters, we need to know which variations are stored, if any // can't wait until we process the annotations at the end, since need to know when filling points. // This is a little inelegant though... - //if (s.find("ErrorBreakdown") != string::npos) { - // errorBreakdown = YAML::Load(s)["ErrorBreakdown"]; - //for (const auto& it : errorBreakdown) { - // const string val0 = it.first.as(); - //for (const auto& it2 : it.second) { - // const string val = it2.as(); - //} - // } - // } + if (s.find("Variations") != string::npos) { + YAML::Node anns = YAML::Load(s); + for (const auto& it : anns) { + assert(it.second.IsSequence()); + for (const auto& it2 : it.second) { + const string val = it2.as(); + //const string val=""; + variationscurr.push_back(val); + } + } + } } continue; } // DATA PARSING aiss.reset(s); // double sumw(0), sumw2(0), sumwx(0), sumwx2(0), sumwy(0), sumwy2(0), sumwz(0), sumwz2(0), sumwxy(0), sumwxz(0), sumwyz(0), n(0); switch (context) { - + case COUNTER: { double sumw(0), sumw2(0), n(0); aiss >> sumw >> sumw2 >> n; cncurr->setDbn(Dbn0D(n, sumw, sumw2)); } break; - + case HISTO1D: { string xoflow1, xoflow2; double xmin(0), xmax(0); double sumw(0), sumw2(0), sumwx(0), sumwx2(0), n(0); /// @todo Improve/factor this "bin" string-or-float parsing... esp for mixed case of 2D overflows /// @todo When outflows are treated as "infinity bins" and don't require a distinct type, string replace under/over -> -+inf if (s.find("Total") != string::npos || s.find("Underflow") != string::npos || s.find("Overflow") != string::npos) { aiss >> xoflow1 >> xoflow2; } else { aiss >> xmin >> xmax; } // The rest is the same for overflows and in-range bins aiss >> sumw >> sumw2 >> sumwx >> sumwx2 >> n; const Dbn1D dbn(n, sumw, sumw2, sumwx, sumwx2); if (xoflow1 == "Total") h1curr->setTotalDbn(dbn); else if (xoflow1 == "Underflow") h1curr->setUnderflow(dbn); else if (xoflow1 == "Overflow") h1curr->setOverflow(dbn); // else h1curr->addBin(HistoBin1D(std::make_pair(xmin,xmax), dbn)); else h1binscurr.push_back(HistoBin1D(std::make_pair(xmin,xmax), dbn)); } break; - + case HISTO2D: { string xoflow1, xoflow2, yoflow1, yoflow2; double xmin(0), xmax(0), ymin(0), ymax(0); double sumw(0), sumw2(0), sumwx(0), sumwx2(0), sumwy(0), sumwy2(0), sumwxy(0), n(0); /// @todo Improve/factor this "bin" string-or-float parsing... esp for mixed case of 2D overflows /// @todo When outflows are treated as "infinity bins" and don't require a distinct type, string replace under/over -> -+inf if (s.find("Total") != string::npos) { aiss >> xoflow1 >> xoflow2; // >> yoflow1 >> yoflow2; } else if (s.find("Underflow") != string::npos || s.find("Overflow") != string::npos) { throw ReadError("2D histogram overflow syntax is not yet defined / handled"); } else { aiss >> xmin >> xmax >> ymin >> ymax; } // The rest is the same for overflows and in-range bins aiss >> sumw >> sumw2 >> sumwx >> sumwx2 >> sumwy >> sumwy2 >> sumwxy >> n; const Dbn2D dbn(n, sumw, sumw2, sumwx, sumwx2, sumwy, sumwy2, sumwxy); if (xoflow1 == "Total") h2curr->setTotalDbn(dbn); // else if (xoflow1 == "Underflow") p1curr->setUnderflow(dbn); // else if (xoflow1 == "Overflow") p1curr->setOverflow(dbn); else { assert(xoflow1.empty()); // h2curr->addBin(HistoBin2D(std::make_pair(xmin,xmax), std::make_pair(ymin,ymax), dbn)); h2binscurr.push_back(HistoBin2D(std::make_pair(xmin,xmax), std::make_pair(ymin,ymax), dbn)); } } break; - + case PROFILE1D: { string xoflow1, xoflow2; double xmin(0), xmax(0); double sumw(0), sumw2(0), sumwx(0), sumwx2(0), sumwy(0), sumwy2(0), n(0); /// @todo Improve/factor this "bin" string-or-float parsing... esp for mixed case of 2D overflows /// @todo When outflows are treated as "infinity bins" and don't require a distinct type, string replace under/over -> -+inf if (s.find("Total") != string::npos || s.find("Underflow") != string::npos || s.find("Overflow") != string::npos) { aiss >> xoflow1 >> xoflow2; } else { aiss >> xmin >> xmax; } // The rest is the same for overflows and in-range bins aiss >> sumw >> sumw2 >> sumwx >> sumwx2 >> sumwy >> sumwy2 >> n; const double DUMMYWXY = 0; const Dbn2D dbn(n, sumw, sumw2, sumwx, sumwx2, sumwy, sumwy2, DUMMYWXY); if (xoflow1 == "Total") p1curr->setTotalDbn(dbn); else if (xoflow1 == "Underflow") p1curr->setUnderflow(dbn); else if (xoflow1 == "Overflow") p1curr->setOverflow(dbn); // else p1curr->addBin(ProfileBin1D(std::make_pair(xmin,xmax), dbn)); else p1binscurr.push_back(ProfileBin1D(std::make_pair(xmin,xmax), dbn)); } break; - + case PROFILE2D: { string xoflow1, xoflow2, yoflow1, yoflow2; double xmin(0), xmax(0), ymin(0), ymax(0); double sumw(0), sumw2(0), sumwx(0), sumwx2(0), sumwy(0), sumwy2(0), sumwz(0), sumwz2(0), sumwxy(0), sumwxz(0), sumwyz(0), n(0); /// @todo Improve/factor this "bin" string-or-float parsing... esp for mixed case of 2D overflows /// @todo When outflows are treated as "infinity bins" and don't require a distinct type, string replace under/over -> -+inf if (s.find("Total") != string::npos) { aiss >> xoflow1 >> xoflow2; // >> yoflow1 >> yoflow2; } else if (s.find("Underflow") != string::npos || s.find("Overflow") != string::npos) { throw ReadError("2D profile overflow syntax is not yet defined / handled"); } else { aiss >> xmin >> xmax >> ymin >> ymax; } // The rest is the same for overflows and in-range bins aiss >> sumw >> sumw2 >> sumwx >> sumwx2 >> sumwy >> sumwy2 >> sumwz >> sumwz2 >> sumwxy >> sumwxz >> sumwyz >> n; const Dbn3D dbn(n, sumw, sumw2, sumwx, sumwx2, sumwy, sumwy2, sumwz, sumwz2, sumwxy, sumwxz, sumwyz); if (xoflow1 == "Total") p2curr->setTotalDbn(dbn); // else if (xoflow1 == "Underflow") p2curr->setUnderflow(dbn); // else if (xoflow1 == "Overflow") p2curr->setOverflow(dbn); else { assert(xoflow1.empty()); // p2curr->addBin(ProfileBin2D(std::make_pair(xmin,xmax), std::make_pair(ymin,ymax), dbn)); p2binscurr.push_back(ProfileBin2D(std::make_pair(xmin,xmax), std::make_pair(ymin,ymax), dbn)); } } break; - + case SCATTER1D: { double x(0), exm(0), exp(0); aiss >> x >> exm >> exp; // set nominal point Point1D thispoint=Point1D(x, exm, exp); // check if we stored variations of this point - //if (variationscurr.size()>0){ - // // for each variation, store the alt errors. - // // start at 1 since we have already filled nominal ! - // for (unsigned int ivar=1; ivar> exm >> exp; - // thispoint.setXErrs(exm,exp,thisvariation); - // } - //} + if (variationscurr.size()>0){ + // for each variation, store the alt errors. + // start at 1 since we have already filled nominal ! + for (unsigned int ivar=1; ivar> exm >> exp; + thispoint.setXErrs(exm,exp,thisvariation); + } + } pt1scurr.push_back(thispoint); } break; - + case SCATTER2D: { double x(0), y(0), exm(0), exp(0), eym(0), eyp(0); aiss >> x >> exm >> exp >> y >> eym >> eyp; // set nominal point Point2D thispoint=Point2D(x, y, exm, exp, eym, eyp); // check if we stored variations of this point - // for each variation, store the alt errors. - // start at 1 since we have already filled nominal ! + if (variationscurr.size()>0){ + // for each variation, store the alt errors. + // start at 1 since we have already filled nominal ! + for (unsigned int ivar=1; ivar> eym >> eyp; + thispoint.setYErrs(eym,eyp,thisvariation); + } + } pt2scurr.push_back(thispoint); } break; - + case SCATTER3D: { double x(0), y(0), z(0), exm(0), exp(0), eym(0), eyp(0), ezm(0), ezp(0); aiss >> x >> exm >> exp >> y >> eym >> eyp >> z >> ezm >> ezp; // set nominal point Point3D thispoint=Point3D(x, y, z, exm, exp, eym, eyp, ezm, ezp); + // check if we stored variations of this point + if (variationscurr.size()>0){ + // for each variation, store the alt errors. + // start at 1 since we have already filled nominal ! + for (unsigned int ivar=1; ivar> ezm >> ezp; + thispoint.setZErrs(ezm,ezp,thisvariation); + } + } pt3scurr.push_back(thispoint); } break; default: throw ReadError("Unknown context in YODA format parsing: how did this happen?"); + } - } // cout << "AO CONTENT " << nline << endl; // cout << " " << xmin << " " << xmax << " " << ymin << " " << ymax << " / '" << xoflow1 << "' '" << xoflow2 << "' '" << yoflow1 << "' '" << yoflow2 << "'" << endl; // cout << " " << sumw << " " << sumw2 << " " << sumwx << " " << sumwx2 << " " << sumwy << " " << sumwy2 << " " << sumwz << " " << sumwz2 << " " << sumwxy << " " << sumwxz << " " << sumwyz << " " << n << endl; // cout << " " << x << " " << y << " " << z << " " << exm << " " << exp << " " << eym << " " << eyp << " " << ezm << " " << ezp << endl; - } - } + + } + } + } + + } - diff --git a/src/Scatter1D.cc b/src/Scatter1D.cc --- a/src/Scatter1D.cc +++ b/src/Scatter1D.cc @@ -1,59 +1,30 @@ #include "YODA/Scatter1D.h" #include "YODA/Counter.h" #include -#include "yaml-cpp/yaml.h" -#ifdef YAML_NAMESPACE -#define YAML YAML_NAMESPACE -#endif namespace YODA { /// Make a Scatter1D representation of a Histo1D Scatter1D mkScatter(const Counter& c) { Scatter1D rtn; for (const std::string& a : c.annotations()) rtn.setAnnotation(a, c.annotation(a)); rtn.setAnnotation("Type", c.type()); // might override the copied ones - Point1D pt(c.val(), c.err()); - pt.setParentAO(&rtn); - rtn.addPoint(pt); + rtn.addPoint(c.val(), c.err()); return rtn; } - - void Scatter1D::parseVariations() { - if (this-> _variationsParsed) { return;} - if (!(this->hasAnnotation("ErrorBreakdown"))) { return; } - YAML::Node errorBreakdown; - errorBreakdown = YAML::Load(this->annotation("ErrorBreakdown")); - - if (errorBreakdown.size()) { - for (unsigned int thisPointIndex=0 ; thisPointIndex< this->numPoints() ; ++thisPointIndex){ - Point1D &thispoint = this->_points[thisPointIndex]; - YAML::Node variations = errorBreakdown[thisPointIndex]; - for (const auto& variation : variations) { - const std::string variationName = variation.first.as(); - double eyp = variation.second["up"].as(); - double eym = variation.second["dn"].as(); - thispoint.setXErrs(eym,eyp,variationName); - } - } - this-> _variationsParsed =true; - } - } - const std::vector Scatter1D::variations() const { std::vector vecvariations; for (auto &point : this->_points){ for (auto &it : point.errMap()){ //if the variation is not already in the vector, add it ! if (std::find(vecvariations.begin(), vecvariations.end(), it.first) == vecvariations.end()){ vecvariations.push_back(it.first); } } } return vecvariations; } - } diff --git a/src/Scatter2D.cc b/src/Scatter2D.cc --- a/src/Scatter2D.cc +++ b/src/Scatter2D.cc @@ -1,164 +1,85 @@ #include "YODA/Scatter2D.h" #include "YODA/Histo1D.h" #include "YODA/Profile1D.h" #include -#include "yaml-cpp/yaml.h" -#ifdef YAML_NAMESPACE -#define YAML YAML_NAMESPACE -#endif namespace YODA { /// Make a Scatter2D representation of a Histo1D - Scatter2D mkScatter(const Histo1D& h, bool usefocus, bool binwidthdiv) { + Scatter2D mkScatter(const Histo1D& h, bool usefocus) { Scatter2D rtn; - for (const std::string& a : h.annotations()) rtn.setAnnotation(a, h.annotation(a)); + for (const std::string& a : h.annotations()) + rtn.setAnnotation(a, h.annotation(a)); rtn.setAnnotation("Type", h.type()); // might override the copied ones - for (const HistoBin1D& b : h.bins()) { const double x = usefocus ? b.xFocus() : b.xMid(); const double ex_m = x - b.xMin(); const double ex_p = b.xMax() - x; double y; try { - y = b.sumW(); + y = b.height(); } catch (const Exception&) { // LowStatsError or WeightError y = std::numeric_limits::quiet_NaN(); } - if (binwidthdiv) y /= b.xWidth(); - const double ey = b.relErr() * y; + double ey; + try { + ey = b.heightErr(); + } catch (const Exception&) { // LowStatsError or WeightError + ey = std::numeric_limits::quiet_NaN(); + } - // Attach the point to its parent - Point2D pt(x, y, ex_m, ex_p, ey, ey); - pt.setParentAO(&rtn); + const Point2D pt(x, y, ex_m, ex_p, ey, ey); rtn.addPoint(pt); } - assert(h.numBins() == rtn.numPoints()); return rtn; } /// Make a Scatter2D representation of a Profile1D Scatter2D mkScatter(const Profile1D& p, bool usefocus, bool usestddev) { Scatter2D rtn; for (const std::string& a : p.annotations()) rtn.setAnnotation(a, p.annotation(a)); rtn.setAnnotation("Type", p.type()); for (const ProfileBin1D& b : p.bins()) { const double x = usefocus ? b.xFocus() : b.xMid(); const double ex_m = x - b.xMin(); const double ex_p = b.xMax() - x; double y; try { y = b.mean(); } catch (const Exception&) { // LowStatsError or WeightError y = std::numeric_limits::quiet_NaN(); } double ey; try { ey = usestddev ? b.stdDev() : b.stdErr(); ///< Control y-error scheme via usestddev arg } catch (const Exception&) { // LowStatsError or WeightError ey = std::numeric_limits::quiet_NaN(); } - //const Point2D pt(x, y, ex_m, ex_p, ey, ey); - Point2D pt(x, y, ex_m, ex_p, ey, ey); - pt.setParentAO(&rtn); + const Point2D pt(x, y, ex_m, ex_p, ey, ey); rtn.addPoint(pt); } assert(p.numBins() == rtn.numPoints()); return rtn; } - - // retrieve variations from annoation, parse them as YAML, and update the points - void Scatter2D::parseVariations() { - if (this-> _variationsParsed) { return; } - if (!(this->hasAnnotation("ErrorBreakdown"))) { return; } - YAML::Node errorBreakdown; - errorBreakdown = YAML::Load(this->annotation("ErrorBreakdown")); - - if (errorBreakdown.size()) { - for (unsigned int thisPointIndex=0 ; thisPointIndex< this->numPoints() ; ++thisPointIndex){ - Point2D &thispoint = this->_points[thisPointIndex]; - YAML::Node variations = errorBreakdown[thisPointIndex]; - for (const auto& variation : variations) { - const std::string variationName = variation.first.as(); - double eyp = variation.second["up"].as(); - double eym = variation.second["dn"].as(); - thispoint.setYErrs(eym,eyp,variationName); - } - } - this-> _variationsParsed =true; - } - } - + const std::vector Scatter2D::variations() const { - std::vector vecVariations; + std::vector vecvariations; for (auto &point : this->_points){ for (auto &it : point.errMap()){ //if the variation is not already in the vector, add it ! - if (std::find(vecVariations.begin(), vecVariations.end(), it.first) == vecVariations.end()){ - vecVariations.push_back(it.first); + if (std::find(vecvariations.begin(), vecvariations.end(), it.first) == vecvariations.end()){ + vecvariations.push_back(it.first); } } } - return vecVariations; - } - - - std::vector > Scatter2D::covarianceMatrix( bool ignoreOffDiagonalTerms) { - int nPoints= this->numPoints(); - //double covM[nPoints][nPoints] ={}; - std::vector > covM; - - - // initialose cov matrix to be the right shape! - for (int i=0; i row; - row.resize(nPoints); - covM.push_back(row); - } - - // case where only have nominal, ie total uncertainty, labelled "" (empty string) - if (this->variations().size()==1) { - for (int i=0; i_points[i].yErrs().first+this->_points[i].yErrs().second)/2),2); - if (covM[i][i]==0 ) covM[i][i]=1; - } - return covM; - } - //more interesting case where we actually have some uncertainty breakdown! - auto systList= this->variations(); - for (auto sname : systList){ - if (sname.length()==0) continue; - std::vector< double> systErrs; - systErrs.resize(nPoints); - for (int i=0; i_points[i]; - try { - auto variations=point.errMap().at(sname); - systErrs[i]=(fabs(variations.first)+fabs(variations.second))*0.5 ;//up/dn are symmetrized since this method can't handle asymmetric errors - } catch (const std::exception e) { // Missing bin. - systErrs[i]=0.0; - } - } - if (ignoreOffDiagonalTerms || sname.find("stat") != std::string::npos || sname.find("uncor") != std::string::npos){ - for (int i=0; i -#include "yaml-cpp/yaml.h" -#ifdef YAML_NAMESPACE -#define YAML YAML_NAMESPACE -#endif namespace YODA { - Scatter3D mkScatter(const Histo2D& h, bool usefocus, bool binareadiv) { + Scatter3D mkScatter(const Histo2D& h, bool usefocus) { Scatter3D rtn; - for (const std::string& a : h.annotations()) rtn.setAnnotation(a, h.annotation(a)); + for (const std::string& a : h.annotations()) + rtn.setAnnotation(a, h.annotation(a)); rtn.setAnnotation("Type", h.type()); - for (size_t i = 0; i < h.numBins(); ++i) { const HistoBin2D& b = h.bin(i); /// SAME FOR ALL 2D BINS double x = b.xMid(); if (usefocus) { try { x = b.xFocus(); } catch (const LowStatsError& lse) { x = b.xMid(); } } const double exminus = x - b.xMin(); const double explus = b.xMax() - x; double y = b.yMid(); if (usefocus) { try { y = b.yFocus(); } catch (const LowStatsError& lse) { y = b.yMid(); } } const double eyminus = y - b.yMin(); const double eyplus = b.yMax() - y; /// END SAME FOR ALL 2D BINS - double z; - try { - z = b.sumW(); - } catch (const Exception&) { // LowStatsError or WeightError - z = std::numeric_limits::quiet_NaN(); - } - if (binareadiv) z /= b.xWidth()*b.yWidth(); - const double ez = b.relErr() * z; + const double z = b.height(); + const double ez = b.heightErr(); - - Point3D pt(x, y, z, exminus, explus, eyminus, eyplus, ez, ez); - pt.setParentAO(&rtn); - rtn.addPoint(pt); + rtn.addPoint(x, y, z, exminus, explus, eyminus, eyplus, ez, ez); } - assert(h.numBins() == rtn.numPoints()); return rtn; } Scatter3D mkScatter(const Profile2D& h, bool usefocus, bool usestddev) { Scatter3D rtn; for (const std::string& a : h.annotations()) rtn.setAnnotation(a, h.annotation(a)); rtn.setAnnotation("Type", h.type()); for (size_t i = 0; i < h.numBins(); ++i) { const ProfileBin2D& b = h.bin(i); /// SAME FOR ALL 2D BINS double x = b.xMid(); if (usefocus) { try { x = b.xFocus(); } catch (const LowStatsError& lse) { x = b.xMid(); } } const double exminus = x - b.xMin(); const double explus = b.xMax() - x; double y = b.yMid(); if (usefocus) { try { y = b.yFocus(); } catch (const LowStatsError& lse) { y = b.yMid(); } } const double eyminus = y - b.yMin(); const double eyplus = b.yMax() - y; /// END SAME FOR ALL 2D BINS double z; try { z = b.mean(); } catch (const LowStatsError& lse) { z = std::numeric_limits::quiet_NaN(); } double ez; try { ez = usestddev ? b.stdDev() : b.stdErr(); ///< Control z-error scheme via usestddev arg } catch (const LowStatsError& lse) { ez = std::numeric_limits::quiet_NaN(); } rtn.addPoint(x, y, z, exminus, explus, eyminus, eyplus, ez, ez); } return rtn; } - - void Scatter3D::parseVariations() { - if (this-> _variationsParsed) { return; } - if (!(this->hasAnnotation("ErrorBreakdown"))) { return;} - YAML::Node errorBreakdown; - errorBreakdown = YAML::Load(this->annotation("ErrorBreakdown")); - if (errorBreakdown.size()) { - for (unsigned int thisPointIndex=0 ; thisPointIndex< this->numPoints() ; ++thisPointIndex){ - Point3D &thispoint = this->_points[thisPointIndex]; - YAML::Node variations = errorBreakdown[thisPointIndex]; - for (const auto& variation : variations) { - const std::string variationName = variation.first.as(); - double eyp = variation.second["up"].as(); - double eym = variation.second["dn"].as(); - thispoint.setZErrs(eym,eyp,variationName); - } - } - this-> _variationsParsed =true; - } - } - - + + const std::vector Scatter3D::variations() const { std::vector vecvariations; for (auto &point : this->_points){ for (auto &it : point.errMap()){ //if the variation is not already in the vector, add it ! if (std::find(vecvariations.begin(), vecvariations.end(), it.first) == vecvariations.end()){ vecvariations.push_back(it.first); } } } return vecvariations; } } diff --git a/src/WriterYODA.cc b/src/WriterYODA.cc --- a/src/WriterYODA.cc +++ b/src/WriterYODA.cc @@ -1,385 +1,384 @@ // -*- C++ -*- // // This file is part of YODA -- Yet more Objects for Data Analysis // Copyright (C) 2008-2018 The YODA collaboration (see AUTHORS for details) // #include "YODA/WriterYODA.h" #include "yaml-cpp/yaml.h" #ifdef YAML_NAMESPACE #define YAML YAML_NAMESPACE #endif #include #include using namespace std; namespace YODA { /// Singleton creation function Writer& WriterYODA::create() { static WriterYODA _instance; _instance.setPrecision(6); return _instance; } // Format version: // - V1/empty = make-plots annotations style // - V2 = YAML annotations static const int YODA_FORMAT_VERSION = 2; // Version-formatting helper function inline string _iotypestr(const string& baseiotype) { ostringstream os; os << "YODA_" << Utils::toUpper(baseiotype) << "_V" << YODA_FORMAT_VERSION; return os.str(); } void WriterYODA::_writeAnnotations(std::ostream& os, const AnalysisObject& ao) { os << scientific << setprecision(_precision); for (const string& a : ao.annotations()) { if (a.empty()) continue; /// @todo Write out floating point annotations as scientific notation - string ann = ao.annotation(a); - // remove stpurious line returns at the end of a string so that we don't - // end up with two line returns. - ann.erase(std::remove(ann.begin(), ann.end(), '\n'), ann.end()); - os << a << ": " << ann << "\n"; + os << a << ": " << ao.annotation(a) << "\n"; } os << "---\n"; } void WriterYODA::writeCounter(std::ostream& os, const Counter& c) { ios_base::fmtflags oldflags = os.flags(); os << scientific << showpoint << setprecision(_precision); os << "BEGIN " << _iotypestr("COUNTER") << " " << c.path() << "\n"; _writeAnnotations(os, c); os << "# sumW\t sumW2\t numEntries\n"; os << c.sumW() << "\t" << c.sumW2() << "\t" << c.numEntries() << "\n"; - os << "END " << _iotypestr("COUNTER") << "\n\n"; + os << "END " << _iotypestr("COUNTER") << "\n"; os.flags(oldflags); } void WriterYODA::writeHisto1D(std::ostream& os, const Histo1D& h) { ios_base::fmtflags oldflags = os.flags(); os << scientific << showpoint << setprecision(_precision); os << "BEGIN " << _iotypestr("HISTO1D") << " " << h.path() << "\n"; _writeAnnotations(os, h); try { //if ( h.totalDbn().effNumEntries() > 0 ) { os << "# Mean: " << h.xMean() << "\n"; os << "# Area: " << h.integral() << "\n"; } catch (LowStatsError& e) { // } os << "# ID\t ID\t sumw\t sumw2\t sumwx\t sumwx2\t numEntries\n"; os << "Total \tTotal \t"; os << h.totalDbn().sumW() << "\t" << h.totalDbn().sumW2() << "\t"; os << h.totalDbn().sumWX() << "\t" << h.totalDbn().sumWX2() << "\t"; os << h.totalDbn().numEntries() << "\n"; os << "Underflow\tUnderflow\t"; os << h.underflow().sumW() << "\t" << h.underflow().sumW2() << "\t"; os << h.underflow().sumWX() << "\t" << h.underflow().sumWX2() << "\t"; os << h.underflow().numEntries() << "\n"; os << "Overflow\tOverflow\t"; os << h.overflow().sumW() << "\t" << h.overflow().sumW2() << "\t"; os << h.overflow().sumWX() << "\t" << h.overflow().sumWX2() << "\t"; os << h.overflow().numEntries() << "\n"; os << "# xlow\t xhigh\t sumw\t sumw2\t sumwx\t sumwx2\t numEntries\n"; for (const HistoBin1D& b : h.bins()) { os << b.xMin() << "\t" << b.xMax() << "\t"; os << b.sumW() << "\t" << b.sumW2() << "\t"; os << b.sumWX() << "\t" << b.sumWX2() << "\t"; os << b.numEntries() << "\n"; } - os << "END " << _iotypestr("HISTO1D") << "\n\n"; + os << "END " << _iotypestr("HISTO1D") << "\n"; os.flags(oldflags); } void WriterYODA::writeHisto2D(std::ostream& os, const Histo2D& h) { ios_base::fmtflags oldflags = os.flags(); os << scientific << showpoint << setprecision(_precision); os << "BEGIN " << _iotypestr("HISTO2D") << " " << h.path() << "\n"; _writeAnnotations(os, h); try { //if ( h.totalDbn().numEntries() > 0 ) os << "# Mean: (" << h.xMean() << ", " << h.yMean() << ")\n"; os << "# Volume: " << h.integral() << "\n"; } catch (LowStatsError& e) { // } os << "# ID\t ID\t sumw\t sumw2\t sumwx\t sumwx2\t sumwy\t sumwy2\t sumwxy\t numEntries\n"; // Total distribution const Dbn2D& td = h.totalDbn(); os << "Total \tTotal \t"; os << td.sumW() << "\t" << td.sumW2() << "\t"; os << td.sumWX() << "\t" << td.sumWX2() << "\t"; os << td.sumWY() << "\t" << td.sumWY2() << "\t"; os << td.sumWXY() << "\t"; os << td.numEntries() << "\n"; // Outflows /// @todo Disabled for now, reinstate with a *full* set of outflow info to allow marginalisation os << "# 2D outflow persistency not currently supported until API is stable\n"; // for (int ix = -1; ix <= 1; ++ix) { // for (int iy = -1; iy <= 1; ++iy) { // if (ix == 0 && iy == 0) continue; // os << "Outflow\t" << ix << ":" << iy << "\t"; // const Dbn2D& d = h.outflow(ix, iy); // os << d.sumW() << "\t" << d.sumW2() << "\t"; // os << d.sumWX() << "\t" << d.sumWX2() << "\t"; // os << d.sumWY() << "\t" << d.sumWY2() << "\t"; // os << d.sumWXY() << "\t"; // os << d.numEntries() << "\n"; // } // } // Bins os << "# xlow\t xhigh\t ylow\t yhigh\t sumw\t sumw2\t sumwx\t sumwx2\t sumwy\t sumwy2\t sumwxy\t numEntries\n"; for (const HistoBin2D& b : h.bins()) { os << b.xMin() << "\t" << b.xMax() << "\t"; os << b.yMin() << "\t" << b.yMax() << "\t"; os << b.sumW() << "\t" << b.sumW2() << "\t"; os << b.sumWX() << "\t" << b.sumWX2() << "\t"; os << b.sumWY() << "\t" << b.sumWY2() << "\t"; os << b.sumWXY() << "\t"; os << b.numEntries() << "\n"; } - os << "END " << _iotypestr("HISTO2D") << "\n\n"; + os << "END " << _iotypestr("HISTO2D") << "\n"; os.flags(oldflags); } void WriterYODA::writeProfile1D(std::ostream& os, const Profile1D& p) { ios_base::fmtflags oldflags = os.flags(); os << scientific << showpoint << setprecision(_precision); os << "BEGIN " << _iotypestr("PROFILE1D") << " " << p.path() << "\n"; _writeAnnotations(os, p); os << "# ID\t ID\t sumw\t sumw2\t sumwx\t sumwx2\t sumwy\t sumwy2\t numEntries\n"; os << "Total \tTotal \t"; os << p.totalDbn().sumW() << "\t" << p.totalDbn().sumW2() << "\t"; os << p.totalDbn().sumWX() << "\t" << p.totalDbn().sumWX2() << "\t"; os << p.totalDbn().sumWY() << "\t" << p.totalDbn().sumWY2() << "\t"; os << p.totalDbn().numEntries() << "\n"; os << "Underflow\tUnderflow\t"; os << p.underflow().sumW() << "\t" << p.underflow().sumW2() << "\t"; os << p.underflow().sumWX() << "\t" << p.underflow().sumWX2() << "\t"; os << p.underflow().sumWY() << "\t" << p.underflow().sumWY2() << "\t"; os << p.underflow().numEntries() << "\n"; os << "Overflow\tOverflow\t"; os << p.overflow().sumW() << "\t" << p.overflow().sumW2() << "\t"; os << p.overflow().sumWX() << "\t" << p.overflow().sumWX2() << "\t"; os << p.overflow().sumWY() << "\t" << p.overflow().sumWY2() << "\t"; os << p.overflow().numEntries() << "\n"; os << "# xlow\t xhigh\t sumw\t sumw2\t sumwx\t sumwx2\t sumwy\t sumwy2\t numEntries\n"; for (const ProfileBin1D& b : p.bins()) { os << b.xMin() << "\t" << b.xMax() << "\t"; os << b.sumW() << "\t" << b.sumW2() << "\t"; os << b.sumWX() << "\t" << b.sumWX2() << "\t"; os << b.sumWY() << "\t" << b.sumWY2() << "\t"; os << b.numEntries() << "\n"; } - os << "END " << _iotypestr("PROFILE1D") << "\n\n"; + os << "END " << _iotypestr("PROFILE1D") << "\n"; os.flags(oldflags); } void WriterYODA::writeProfile2D(std::ostream& os, const Profile2D& p) { ios_base::fmtflags oldflags = os.flags(); os << scientific << showpoint << setprecision(_precision); os << "BEGIN " << _iotypestr("PROFILE2D") << " " << p.path() << "\n"; _writeAnnotations(os, p); os << "# sumw\t sumw2\t sumwx\t sumwx2\t sumwy\t sumwy2\t sumwz\t sumwz2\t sumwxy\t numEntries\n"; // Total distribution const Dbn3D& td = p.totalDbn(); os << "Total \tTotal \t"; os << td.sumW() << "\t" << td.sumW2() << "\t"; os << td.sumWX() << "\t" << td.sumWX2() << "\t"; os << td.sumWY() << "\t" << td.sumWY2() << "\t"; os << td.sumWZ() << "\t" << td.sumWZ2() << "\t"; os << td.sumWXY() << "\t"; // << td.sumWXZ() << "\t" << td.sumWYZ() << "\t"; os << td.numEntries() << "\n"; // Outflows /// @todo Disabled for now, reinstate with a *full* set of outflow info to allow marginalisation os << "# 2D outflow persistency not currently supported until API is stable\n"; // for (int ix = -1; ix <= 1; ++ix) { // for (int iy = -1; iy <= 1; ++iy) { // if (ix == 0 && iy == 0) continue; // os << "Outflow\t" << ix << ":" << iy << "\t"; // const Dbn3D& d = p.outflow(ix, iy); // os << d.sumW() << "\t" << d.sumW2() << "\t"; // os << d.sumWX() << "\t" << d.sumWX2() << "\t"; // os << d.sumWY() << "\t" << d.sumWY2() << "\t"; // os << d.sumWZ() << "\t" << d.sumWZ2() << "\t"; // os << d.sumWXY() << "\t"; // << d.sumWXZ() << "\t" << d.sumWYZ() << "\t"; // os << d.numEntries() << "\n"; // } // } // Bins os << "# xlow\t xhigh\t ylow\t yhigh\t sumw\t sumw2\t sumwx\t sumwx2\t sumwy\t sumwy2\t sumwz\t sumwz2\t sumwxy\t numEntries\n"; for (const ProfileBin2D& b : p.bins()) { os << b.xMin() << "\t" << b.xMax() << "\t"; os << b.yMin() << "\t" << b.yMax() << "\t"; os << b.sumW() << "\t" << b.sumW2() << "\t"; os << b.sumWX() << "\t" << b.sumWX2() << "\t"; os << b.sumWY() << "\t" << b.sumWY2() << "\t"; os << b.sumWZ() << "\t" << b.sumWZ2() << "\t"; os << b.sumWXY() << "\t"; // << b.sumWXZ() << "\t" << b.sumWYZ() << "\t"; os << b.numEntries() << "\n"; } - os << "END " << _iotypestr("PROFILE2D") << "\n\n"; + os << "END " << _iotypestr("PROFILE2D") << "\n"; os.flags(oldflags); } void WriterYODA::writeScatter1D(std::ostream& os, const Scatter1D& s) { ios_base::fmtflags oldflags = os.flags(); os << scientific << showpoint << setprecision(_precision); os << "BEGIN " << _iotypestr("SCATTER1D") << " " << s.path() << "\n"; //first write the Variations, a dummy annotation which //contains the additional columns which will be written out //for sytematic variations YAML::Emitter out; out << YAML::Flow ; out << s.variations(); - //os << "Variations" << ": " << out.c_str() << "\n"; + os << "Variations" << ": " << out.c_str() << "\n"; // then write the regular annotations _writeAnnotations(os, s); std::vector variations= s.variations(); //write headers std::string headers="# xval\t "; for (const auto &source : variations){ headers+=" xerr-"+source+"\t xerr+"+source+"\t"; } os << headers << "\n"; //write points for (const Point1D& pt : s.points()) { // fill central value os << pt.x(); // fill errors for variations. The first should always be "" which is nominal. // Assumes here that all points in the Scatter have the same // variations... if not a range error will get thrown from // the point when the user tries to access a variation it // doesn't have... @todo maybe better way to do this? for (const auto &source : variations){ os << "\t" << pt.xErrMinus(source) << "\t" << pt.xErrPlus(source) ; } os << "\n"; } - os << "END " << _iotypestr("SCATTER1D") << "\n\n"; + os << "END " << _iotypestr("SCATTER1D") << "\n"; os << flush; os.flags(oldflags); } void WriterYODA::writeScatter2D(std::ostream& os, const Scatter2D& s) { ios_base::fmtflags oldflags = os.flags(); os << scientific << showpoint << setprecision(_precision); + os << "BEGIN " << _iotypestr("SCATTER2D") << " " << s.path() << "\n"; //first write the Variations, a dummy annotation which //contains the additional columns which will be written out //for sytematic variations YAML::Emitter out; - out << YAML::Flow << YAML::BeginMap; - int counter=0; - std::vector variations= s.variations(); - // write annotations + out << YAML::Flow ; + out << s.variations(); + os << "Variations" << ": " << out.c_str() << "\n"; + // then write the regular annotations _writeAnnotations(os, s); + std::vector variations= s.variations(); //write headers /// @todo Change ordering to {vals} {errs} {errs} ... - std::string headers="# xval\t xerr-\t xerr+\t yval\t yerr-\t yerr+\t"; - //for (const auto &source : variations){ - // headers+=" yerr-"+source+"\t yerr+"+source+"\t"; - //} + std::string headers="# xval\t xerr-\t xerr+\t yval\t"; + for (const auto &source : variations){ + headers+=" yerr-"+source+"\t yerr+"+source+"\t"; + } os << headers << "\n"; //write points for (const Point2D& pt : s.points()) { /// @todo Change ordering to {vals} {errs} {errs} ... // fill central value os << pt.x() << "\t" << pt.xErrMinus() << "\t" << pt.xErrPlus() << "\t"; os << pt.y(); // fill errors for variations. The first should always be "" which is nominal. // Assumes here that all points in the Scatter have the same // variations... if not a range error will get thrown from // the point when the user tries to access a variation it // doesn't have... @todo maybe better way to do this? - //for (const auto &source : variations){ - os << "\t" << pt.yErrMinus() << "\t" << pt.yErrPlus() ; - // } + for (const auto &source : variations){ + os << "\t" << pt.yErrMinus(source) << "\t" << pt.yErrPlus(source) ; + } os << "\n"; } - os << "END " << _iotypestr("SCATTER2D") << "\n\n"; + os << "END " << _iotypestr("SCATTER2D") << "\n"; os << flush; os.flags(oldflags); } void WriterYODA::writeScatter3D(std::ostream& os, const Scatter3D& s) { ios_base::fmtflags oldflags = os.flags(); os << scientific << showpoint << setprecision(_precision); os << "BEGIN " << _iotypestr("SCATTER3D") << " " << s.path() << "\n"; //first write the Variations, a dummy annotation which //contains the additional columns which will be written out //for sytematic variations YAML::Emitter out; out << YAML::Flow ; out << s.variations(); + os << "Variations" << ": " << out.c_str() << "\n"; // then write the regular annotations _writeAnnotations(os, s); std::vector variations= s.variations(); //write headers /// @todo Change ordering to {vals} {errs} {errs} ... std::string headers="# xval\t xerr-\t xerr+\t yval\t yerr-\t yerr+\t zval\t "; for (const auto &source : variations){ headers+=" zerr-"+source+"\t zerr+"+source+"\t"; } os << headers << "\n"; //write points for (const Point3D& pt : s.points()) { /// @todo Change ordering to {vals} {errs} {errs} ... // fill central value os << pt.x() << "\t" << pt.xErrMinus() << "\t" << pt.xErrPlus() << "\t"; os << pt.y() << "\t" << pt.yErrMinus() << "\t" << pt.yErrPlus() << "\t"; os << pt.z(); // fill errors for variations. The first should always be "" which is nominal. // Assumes here that all points in the Scatter have the same // variations... if not a range error will get thrown from // the point when the user tries to access a variation it // doesn't have... @todo maybe better way to do this? for (const auto &source : variations){ os << "\t" << pt.zErrMinus(source) << "\t" << pt.zErrPlus(source) ; } os << "\n"; } - os << "END " << _iotypestr("SCATTER3D") << "\n\n"; + os << "END " << _iotypestr("SCATTER3D") << "\n"; os << flush; os.flags(oldflags); } } diff --git a/tests/pytest-counter b/tests/pytest-counter --- a/tests/pytest-counter +++ b/tests/pytest-counter @@ -1,15 +1,15 @@ #! /usr/bin/env python import yoda, random c = yoda.Counter(path="/foo", title="MyTitle") NUM_SAMPLES = 1000 for i in range(NUM_SAMPLES): c.fill(random.gauss(10,3)) -print(c.val(), "+-", c.err()) +print(c.val, "+-", c.err) yoda.write([c], "counter.yoda") aos = yoda.read("counter.yoda") for _, ao in aos.items(): print(ao) diff --git a/tests/pytest-div b/tests/pytest-div --- a/tests/pytest-div +++ b/tests/pytest-div @@ -1,20 +1,20 @@ #! /usr/bin/env python import yoda, random h1, h2 = [yoda.Histo1D(4, 0, 10) for _ in range(2)] for i in range(1000): h1.fill(random.uniform(0,10)) h2.fill(random.uniform(0,10)) s = h1 / h2 print(s) -for p in s.points(): +for p in s.points: print(" ", p) print() s = h1.divideBy(h2) print(s) -for p in s.points(): +for p in s.points: print(" ", p) diff --git a/tests/pytest-rebin b/tests/pytest-rebin --- a/tests/pytest-rebin +++ b/tests/pytest-rebin @@ -1,42 +1,42 @@ #! /usr/bin/env python import yoda import numpy as np h = yoda.Histo1D(10, 0, 5) for x in (0.1, 0.2, 1.3, 2.1, 2.7, 2.8, 4.0, 4.1): h.fill(x) -print(h.numBins()) +print(h.numBins) print(h.xEdges()) print("Ha1") ha1 = h.clone() ha1.rebinBy(2) print(ha1.numBins) print(ha1.xEdges()) -assert ha1.numBins() == 5 +assert ha1.numBins == 5 assert np.allclose(ha1.xEdges(), [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]) print("Ha2") ha2 = h.clone() ha2.rebinBy(2, 2, 7) -print(ha2.numBins()) +print(ha2.numBins) print(ha2.xEdges()) -assert ha2.numBins() == 7 +assert ha2.numBins == 7 assert np.allclose(ha2.xEdges(), [0.0, 0.5, 1.0, 2.0, 3.0, 4.0, 4.5, 5.0]) print("Hb1") hb1 = h.clone() hb1.rebinTo([0., 1., 3., 5.]) -print(hb1.numBins()) +print(hb1.numBins) print(hb1.xEdges()) -assert hb1.numBins() == 3 +assert hb1.numBins == 3 assert np.allclose(hb1.xEdges(), [0.0, 1.0, 3.0, 5.0]) print("Hb2") hb2 = h.clone() hb2.rebin([1., 1.5, 3., 4.5]) -print(hb2.numBins()) +print(hb2.numBins) print(hb2.xEdges()) -assert hb2.numBins() == 3 +assert hb2.numBins == 3 assert np.allclose(hb2.xEdges(), [1.0, 1.5, 3.0, 4.5])