diff --git a/ChangeLog b/ChangeLog
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,593 +1,597 @@
+2018-02-07  Andy Buckley  <andy.buckley@cern.ch>
+
+	* Add numCoeffs, numParams, and exprString methods to Ipol.
+
 2017-10-25  Holger Schulz  <iamholger@gmail.com>
-      
+
         * Bugfix missing import statement
 
 2017-12-29  Holger Schulz  <iamholger@gmail.com>
-      
+
         * Exposed more C++ functions to python for Pade approximation testing
 
 2017-11-10  Holger Schulz  <iamholger@gmail.com>
 
 	* Release 2.2.2 --- this is the result of many months of fiddling
 	  with especially multinest requirements.
 
 	* Sampling --- sobol, latin hypercube and random uniform sampler
 	           --- something like PARAM 0 1 2 3 result in sub space
 		       sampling
 		   --- biases now work e.g. PARAM 0 1 exp(x)
 	* Ipol     --- --order auto and friends automatically determine
 	               best order, this avoids oversampling.
 		   --- --logy parmeterises the logy of things, useful
 		       for large paramspace and especially likelihood
 		   --- --medianfilt can drop anchor points based on error
 		       this is experimental and meant to deal with the
 		       occasional huge NLO weight
 		   --- The output has the valid region for each object now
 		   stored at the end of the coefficients, this is also where
 		   they are read from --- this allows the usage of
 		   --medianfilt etc
 	* Tune     --- Replace the grid sampling with the already existing
 	           sampler machinery, so can use random, latin, sobol when
 		   using --scan to find good start point for minuit
 		   --- --profiles and --contours stores the minuit and minos
 		   plots
 		   --- limits are by default now set to the ipol region, this
 		   can be overridden when using -x CL arg
 		   --- tune script can read more than one ipol file
 		   A lot of stuff has been put into libraries to allow reusing
 		   individual steps.
 
 	    * Multinest --- this is now supported in prof2-tune-nest
 	    * GP        --- gp_minimize via prof2-tune-gp
 	    * emcee     --- MCMC sampling via prof2-emcee
 
 	* Eigentunes --- prof2-eigentunes is back and working, deltaChi2
 	                 must be provided, however, via --target
 
 	    * Bootstrapping in principle working but smearing is probably
 	      more complicated than what we have now prof2-bootstrap
-		
+
 
 
 2017-08-22  Holger Schulz  <holger.schulz@durham.ac.uk>
 
 	* Improvements to Ipol --- we allow different orders for each bin now,
 	there is also --order auto which does some jackknifing to figure out
 	the best suited polynomial order to avoid overfitting. The Ipol
 	outputformat is now binned 3 which dumps the minparam values and
 	maxparamvalues at the end of the coefficient string. This is necessary
 	as we now also allow to filter the inputs by median error (necessary
 	to tame NLO spiky histograms). The ranges are read back for each bin
 	separately now, happens in the C++ part.
 
 	* Overhaul of sampling, including things such as full support of sobol
 	sampling. Also, param range files with more than two numbers on a line
 	are interpreted as sub spaces for patch sampling.
 
 	* Add script prof2-ipolenvelopes which for each bin finds the minimum
 	and maximum polynomial value (using migrad) in the range of the
 	ipols validity. This is nice especially to check envelopes once
 	filtering in the ipol step was done
 
 2017-05-18  Holger Schulz  <holger.schulz@durham.ac.uk>
 
 	* Refitting of ipols using smeared ipol output.
 	Required some modification in data io structure.
 
 2017-05-04  Holger Schulz  <holger.schulz@durham.ac.uk>
 
 	* Fix in prof2-tune when checking analysis names, at the same time
 	allow for for closure test tuning, i.e. tune against input folder.
 
 
 2017-05-02  Holger Schulz  <holger.schulz@durham.ac.uk>
 
 	* Use  -march=native according to eigen3 faq for better performance
 	as well as -O3 in pyext
 
 	* Replace modulus based status message by simple counter in ipol stage
 
 	* Set version to 2.2.2beta for imminent release
 
 2017-04-28  Holger Schulz  <holger.schulz@durham.ac.uk>
 
 	* Use BDSSVD in case Eigen >=3.3.3
 
 	* Reworked prof2-ipol and ipol.py to use mk_ipolbin rather than
 	mk_ipolhisto, some speedups plus additional benefit of being
 	able to fiddle with inividual bins in the ipol building stage
 
 	* add --minos switch to prof2-tune toi run MINOS
 
 	* Write GOF and NDOF into result file
 
 2017-02-21  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Change prof2-ipol ierr mode default to 'symm'
 
 2017-02-17  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add a --limit-errs argument for prof2-tune, to avoid annoying
 	re-reading of large run dirs
 
 	* Tweaks to default error ipol order, and release figure handles
 	in envelope plotting.
 
 2017-02-02  Holger Schulz  <holger.schulz@durham.ac.uk>
 
 	* Update Copyright string
 
 	* Instead of exiting when prof2-ipol encounters nan coefficients in
 	value or error ipols, spit out a warning and ignore that histogram
 	entirely --- this makes prof2-ipol and friends much more robust
 
 	* tag as 2.2.1 and release.
 
 2017-02-01  Holger Schulz  <holger.schulz@durham.ac.uk>
 
 	* Use Agg as default matplotlib backend.
 
 	* Bump version to 2.2.1
 
 2016-12-19  Holger Schulz  <holger.schulz@cern.ch>
 
 	* Even more robust prof-tune, added --filter CL option to
 	automatically get rid of zero error bins
 
 	* Fixed a bug where IpolBin.n was never set which is needed for
 	the PointMatcher (i.e. weights syntax)
 
 	* Not using YODA Pointmatcher but own due to different regexes
 	required.
 
 	* Make release 2.2
 
 2016-12-15  Holger Schulz  <holger.schulz@cern.ch>
 
 	* More robust scripts and more meaningful error messages, especially
 	prof2-tune
 
 2016-11-30  Holger Schulz  <holger.schulz@cern.ch>
 
 	* Replace Counter code when running mk_Structure with much faster code
 
 	* Bump version to 2.2
 
 	* Bump DataFormat to binned 2
 
 	* Allow passing file names or open files (or similar streams) to
 	ipolio functions
 
 	* prof2-ipol can read yaml now
 
 	* Add some of Elliot's code for choosing sub spaces in prof2-ipol and
 	some plot routines to contrib
 
 	* Add SOBOL sampling to contrib
 
 2016-08-26  Holger Schulz  <holger.schulz@cern.ch>
 
 	* Use more readable Cython DL path in bootstrap
 
 2016-07-18  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Typo fixes -- thanks to Leif Gellersen.
 
 2016-07-14  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Release version 2.1.4
 
 	* Improve Makefile tests for ROOT and Cython versions.
 
 2016-07-11  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add Ipol min/max param vals getters and map them into Python.
 
 	* Add a built-in /REF-stripping option to data histo loading.
 
 	* Improvements to the Python ipol metadata interface.
 
 2016-07-05  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Try to optimise long-vector building by not executing pow() if the exponent would be zero.
 
 	* Split C++ calcValue function into two, to allow re-use of a precalculated long-vector.
 
 2016-06-23  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Split 'dot product' part of ipol value calculation into a standalone calcValue function.
 
 	* Use const references when passing const vectors in C++ API.
 
 2016-04-14  Holger Schulz  <holger.schulz@cern.ch>
 
 	* Introduce 1D sensitivity based on gradient
 
 	* User needs to specify --cmap or --grad when running prof2-sens now
 
 2016-04-01  Holger Schulz  <holger.schulz@cern.ch>
 
 	* Add a first version of the parametrisation using Machine Learning
 
 2016-02-16  Holger Schulz  <holger.schulz@cern.ch>
 
 	* Bump to version 2.1.3
 
 2016-02-15  Holger Schulz  <holger.schulz@cern.ch>
 
 	* prof2-sens: bugfix, add weight file usage, clean up CL
 
 	* prof2-I: add weight file usage
 
 	* prof2-predict: add weight file usage, fix output writing
 
 	* Extended documentation
 
 2016-02-12  Holger Schulz  <holger.schulz@cern.ch>
 
 	* Documentation including asciinema
 
 2016-02-11  Holger Schulz  <holger.schulz@cern.ch>
 
 	* Remove min_runs function, replace with numCoeffs
 	from C++ code
 
 	* Add prof2-envelopes script
 
 	* In prof-tune simplechi2 --- continue on weight==0
 	to allow some robustness when switching off bins
 
 
 2016-02-10  Holger Schulz  <holger.schulz@cern.ch>
 
 	* First version of prof2-runcombs plus sampling functions
 	salvaged from Prof1, modified to have no scipy dependence
 
 	* Add --rc switch to prof2-ipol to optionally read runcombs
 	Syntax is  --rc runcoms.dat[colon]5
 	(Minimally invasive addon)
 
 	* Code cull in prof2-ipol. Only support multiprocessing
 	version, default 1 thread. Changed CL switch to '-j'.
 
 
 2015-12-16  Holger Schulz  <holger.schulz@cern.ch>
 
 	* Only calculate polynomials structure once per instance
 	-> significant speed increase
 
 	* Add the brute force grid scan (--scan) to prof2-tune
 
 
 2015-12-11  Holger Schulz  <holger.schulz@cern.ch>
 
 	* Minimiser tweaks --- no more need for param translation,
 	make strategy steerable (--strategy or -s), -q suppresses
 	iminuit output now, calculation and writing of correlation
 	matrix into results, dump weights into results file
 
 2015-12-10  Holger Schulz  <holger.schulz@cern.ch>
 
 	* Undo ui madness
 
 	* Remove numpy dependence (except sampling.py)
 
 	* Add prof-sens for sensitivity plotting exploting derivatives
 
 2015-12-09  Holger Schulz  <holger.schulz@cern.ch>
 
 	* Can specify PREFIX when calling make
 
 2015-12-09  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Improve prof2-predict UI.
 
 	* Add metadata printing in prof2-lsipol debug mode.
 
 	* Add -v and -q flag shortcuts to all scripts.
 
 2015-12-07  Holger Schulz  <holger.schulz@cern.ch>
 
 	* Use __all__ in __init__.py
 
 	* Started sphinx documentation
 
 	* Support only iminuit, bump version to 2.2.alpha, restructure imports
 
 	* Sanity checks when calling parametrisation
 
 2015-12-03  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Provide a version() function in the C++ library, and map it into
 	Python. Both sources use the VERSION variable defined in the
 	Makefile at build time.
 
 	* Reduce SVD fitting threshold to 1e-20.
 
 2015-12-03  Holger Schulz  <holger.schulz@cern.ch>
 
 	* Make derivative calculation work also when scaling params
 
 	* Let YODA write YODA
 
 2015-12-02  Holger Schulz  <holger.schulz@cern.ch>
 
 	* Derivative calculation
 
 2015-11-28  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Version 2.1.2
 
 	* Revert range shift in Ipol.cc, since it produced (small)
 	numerical errors rather than improved stability. To be
 	understood...
 
 	* Add test/mkpolydata script, for generating polynomial pseudodata
 	used in closure tests.
 
 2015-11-27  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Fix bug in histo loading.
 
 	* Adding log binning and other options to prof2-residuals.
 
 2015-11-24  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Version 2.1.1 release.
 
 	* Fix prof2-predict YODA writing, and improve DataHisto.toYODA.
 
 	* Change parameter rescaling from using the [0,1] range to instead
 	use [1,2], to avoid arbitrarily scaled param numbers. BREAKS IPOL
 	FILE COMPATIBILITY WITH 2.1.0!!
 
 	* Set minimizer initial step size = prange/10.
 
 	* Add automatic determination of maximum MC errors in prof-tune,
 	and use them to regularize interpolated errs which could distort
 	the fit.
 
 	* Separate internal Minuit function names (with syntax
 	restrictions) from the free-form external param names.
 
 2015-11-23  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add pyext/professor2/chi2.py
 
 	* Add find_maxerrs() to the Python interface, for use in error regularisation.
 
 	* Use the param file name as a regex.
 
 	* Use __slots__ in the internal histogram and bin types, for better memory efficiency.
 
 	* Fix (re-fix?) use of inappropriate indices in calculation of mean and median MC uncertainties.
 
 	* More proactive data object deletion after conversion to YODA data format.
 
 2015-11-22  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Version 2.1.0 (significant version change due to scaling, all-orders, and ROOT support)
 
 2015-11-18  Andy Buckley  <andy.buckley@cern.ch>
 
 	* dataio.py: Add ROOT file reading ability.
 
 	* Change rescaling I/O behaviour a bit, moving some hacked
 	Python-only Ipol string constructors into explicit API improvements, and
 	only activating rescaling if DoParamScaling is set true.
 
 2015-11-16  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add CPPFLAGS steering in Makefile and API tidying.
 
 2015-11-11  Holger Schulz  <holger.schulz@cern.ch>
 
 	* Add automatic raw param value -> unit range mapping to Ipol.
 
 	* Add any-order long vector calculation.
 
 2015-11-10  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add prof2-lsipol as a handy way to make a tuning weights file
 	and for general convenience.
 
 2015-10-06  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Version 2.0.0
 
 	* Add 'make dist' target to Makefile, and tweak/reduce Cython necessity
 
 	* Allow calls to IpolBin and IpolHisto methods with params passed
 	as unpacked *args as well as a packed iterable.
 
 	* Add possibility to pass params as an dict-like as well as a list
 	or tuple of floats -- note that the dict-like must return
 	*ordered* values consistent with the Ipol training. Also ensure
 	the float type.
 
 	* Add conversion of C++ exceptions to Python exceptions.
 
 	* Add professor2.utils module, providing an opportunistic use of
 	OrderedDict for params so the params order required by Ipol is the
 	same as the order in which they are specified in the (first run's)
 	params.dat file.
 
 	* Change Python-mapped Ipol methods with no args to be properties.
 
 	* Add nice __repr__ methods for Histo and Bin objects.
 
 	* Specialise Histo as IpolHisto and DataHisto, and provide
 	toData{Histo,Bin} on IpolHisto and IpolBin respectively.
 
 	* Remove ProfMaster.
 
 2015-10-04  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add protection of svd.setThreshold call to ensure it is
 	supported/needed, via Eigen version number macros.
 
 	* Add vmin and vmax args to Ipol and IpolBin in Python.
 
 2015-10-02  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Put histo file reading in a try..except block so read failures
 	on non-histo files in the run dirs are not fatal.
 
 	* Add protection in the histo loader, so we don't trip up on
 	Rivet's new cross-section and counter objects.
 
 2015-09-30  Andy Buckley  <andy.buckley@cern.ch>
 
 	* src/Ipol.h: Use Eigen/SVD include path rather than non-standard eigen3/Eigen/SVD.
 
 2015-09-14  Holger Schulz  <holger.schulz@durham.ac.uk>
 
 	* Use Eigen's svd.setThreshold(1e-20) to prevent the errors we saw with Eigen
 	3.2.2 and later when looking at 5th order polynomials in 3 dimensinos
 	with 900+ anchors
 
 	* Some clean up
 
 2015-09-11  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding prof2-residuals script for ipol faithfulness testing.
 
 	* Rename scripts from prof-* to prof2-* to allow parallel installations of Prof1 and Prof2.
 
 	* Move some non-core scripts from bin to contrib.
 
 	* Make root-config dependency optional
 
 	* Lots of reworking to make prof-tune work again, and to use weight file parsing.
 
 2015-09-10  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Use weight file path parsing in prof-ipol.
 
 	* Add prof.weights submodule, with a copy of PointMatcher and a
 	new read_pointmatchers function for handling weight files.
 
 	* Provide __version__attribute in the Python module.
 
 	* User script CLI consistency, simplification, general smoothing...
 
 2015-08-30  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add recursive scangrid generator function, and other tweaks.
 
 2015-08-18  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Tweaks to ipol file parsing functions.
 
 	* Fix a bug in IpolBin's use of interpolated errors.
 
 	* Convert mean and median error parameterisation to use new 0th order ipols.
 
 2015-08-15  Holger Schulz  <holger.schulz@durham.ac.uk>
 
 	* A first version of catching singular matrices in the SVD
 
 	* Ipol has 1 additional argument, "threshold", that determines what
 	singular values are considered 0, fully propagated to pyext
 
 	* Going 2 Pro 2 Fessional
 
 	* Some startup checks of prof- scripts
 
 	* remove bin/prof-sampling, rename prof-prediction -> prof-predict
 
 2015-08-13  Holger Schulz  <holger.schulz@durham.ac.uk>
 
 	* Reverted unintended commit of hack.
 
 	* Added very basic prof-config script to bin
 
 	* Update PATH when sourcing setup.sh
 
 2015-08-12  Holger Schulz  <holger.schulz@durham.ac.uk>
 
 	* Added prof-prediction which reads in a ifile and a point in
 	parameter space (either a text file, a comma separated list or just
 	the args) and writes out an ipolhisto. Tested to work.
 
 2015-08-03  Holger Schulz  <holger.schulz@durham.ac.uk>
 
 	* Added 0-order polynomials, i.e. constant values.
 	Currently, the coefficent is simply the value of
 	the first anochorpoint.
 
 2015-07-15  Holger Schulz  <holger.schulz@durham.ac.uk>
 
 	* Some pull plotting functionality
 
 2015-07-08  Holger Schulz  <holger.schulz@durham.ac.uk>
 
 	* Calculate and print G.o.f.
 
 	* Write some meta info to results
 
 	* Read limits, fixed params from single textfile --limits
 
 	* Output files steered by --output
 
 	* Remove now obsolete bin/prof-interpolate
 
 
 2015-07-08  Holger Schulz  <holger.schulz@durham.ac.uk>
 
 	* First working version of prof-tune (pyminuit, simpleGOF)
 
 	* Multiprocessing to speed up prof-ipol (--multi)
 
 	* prof-ipol-tabulated to write out interpolation of tabulated data
 
 	* Minor bugfixes
 
 
 2015-06-16  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding mean and median strategies for assigning constant ipol bin errors.
 
 2015-06-03  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding a Bin base class in the Python side, and a coherent handling of multiple (interpolated) bin errors.
 
 	* Various tweaks and minor fixes.
 
 2015-05-10  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add 'Minuit' class importing to the __init__.py file.
 
 2015-05-05  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Rewrite C++ ipolstring parsing using STL string functions rather than Boost: shorter! Boost dependency GONE :-)
 
 	* Replace boost::tuple with std::pair and clean up ParamPoints interface a bit.
 
 	* Add a --veto option to prof-sample, for a user-specified vetoing function.
 
 	* Move Sampler from prof-sample into professor2/__init__.py
 
 2015-05-04  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Adding a first version of prof-tune with ipol reading from file.
 
 	* Adding options for (average) error interpolation and ipol persistency to prof-ipol and module functions.
 
 2015-05-03  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Add bin/prof-ipol simple script using the functions below -- to be merged with prof-interpolate.
 
 	* professor2/__init__.py: Add Histo, DataBin, IpolBin and basic data handling functions.
 
 	* Remove unnecessary dlopen fiddling from professor2/__init__.py
 
 2015-04-23  Andy Buckley  <andy.buckley@cern.ch>
 
 	* Remove bound ParamPoints pointer from Ipol, and ditch lazy coeff evaluation in favour of simplicity and construct-time coeff calculating.
 
 	* Move long vector and coeff building into unbound functions rather than Ipol methods.
 
 	* Start of ChangeLog. Library has already iterated quite a bit.
 
 	* FILL IN EARLY HISTORY FROM HG LOG
diff --git a/include/Professor/Ipol.h b/include/Professor/Ipol.h
--- a/include/Professor/Ipol.h
+++ b/include/Professor/Ipol.h
@@ -1,216 +1,232 @@
 #ifndef PROF_IPOL_H
 #define PROF_IPOL_H
 
 #include "Professor/ParamPoints.h"
 #include <string>
 #include <vector>
 #include <sstream>
 #include <iostream>
 #include <stdexcept>
 
 namespace Professor {
 
 
   /// Throwable error
   struct IpolError : public std::runtime_error {
     IpolError(const std::string& reason) : std::runtime_error(reason) { }
   };
 
 
   /// @name Calculator functions for parameterisation elements
   //@{
 
   /// Calculate the number of coefficients for the given parameter space dimension and polynomial order
   /// @todo Deal in uints
   int numCoeffs(int dim, int order);
 
   /// Calculate parametrisation coefficients
   /// @note structure is the pre-calculated algebraic structure of the polynomial
   /// @todo Provide an (unscaled) arrays-only version with a vector<vector<double>> in place of ParamPoints
   std::vector<double> calcCoeffs(const ParamPoints& pts, const std::vector<double>& vals, int order,
                                  double threshold, const std::vector<std::vector<int> >& structure);
 
   /// Calculate an interpolated value
   double calcValue(const std::vector<double>& params,
                    const std::vector<double>& coeffs, int order,
                    const std::vector<std::vector<int> >& structure);
 
   /// Calculate an interpolated value
   double calcValue(const std::vector<double>& paramslongvector,
                    const std::vector<double>& coeffs);
 
   /// @brief Make the algebraic coefficient structure
   ///
   /// The structure is a nested vector of ints, with each entry in the outer vector
   /// representing a term in the polynomial and the inner vectors being the powers
   /// to which each param should be raised in that term.
   ///
   /// @note In a given param space, the structure can be reused between any values
   ///   or errors parameterised at the same polynomial order. So pre-computing
   ///   structures at 3rd, 4th and 5th order would cover everything needed...
   std::vector< std::vector<int> > mkStructure(int dim, int order);
 
   /// Make the vector of polynomial terms to which the coeffs are to be applied, at the given order
   ///
   /// @note The same long vector can be used in any parameterised value calculation
   ///    in the "active" parameter space, between values & errors and multiple bins.
   ///    Just need to "dot" it with the appropriate fixed coefficient vector.
   std::vector<double> mkLongVector(const std::vector<double>& params, int order,
                                    const std::vector< std::vector<int> >& structure);
 
   // vector<double> mkLongVectorDerivative(const vector<double>& params, int order,
   //                                       const vector<double>& minPV, const vector<double>& maxPV,
   //                                       const vector<vector<int> >& structure);
 
   // vector<double> mkLongVectorGradient(const vector<double>& params, int coord, int order,
   //                                     const vector<double>& minPV, const vector<double>& maxPV,
   //                                     const vector<vector<int> >& structure);
 
   //@}
 
 
 
   /// The heart of Professor: the interpolation of a single numerical value through the parameter space
   class Ipol {
   public:
 
     /// @brief Constructor for calculation of coefficients
     ///
     /// The @a pts list of N-dimensional parameter points must correspond to the @a ptvals
     /// list of values at those points, to be interpolated at the given polynomial @a order.
     /// A name may optionally be given.
     ///
     /// @note Expert settings: The stability of the SVD operation is controlled
     /// by the @a svdthreshold parameter, which should not normally be
     /// touched. The stability is normally ensured by internally scaling
     /// parameter points into unit ranges within the sampled hypercube defined
     /// by @a pts; changing @doscaling to false will disable this scaling, which
     /// simplifies Ipol I/O (no PMin/Max metadata is needed) but risks SVD
     /// instability.
     ///
     Ipol(const ParamPoints& pts, const std::vector<double>& ptvals, int order,
          const std::string& name="", double svdthreshold=1e-20, bool doscaling=true) {
       _dim = pts.dim();
       _order = order;
       _name = name;
       _structure = mkStructure(_dim, _order);
       if (doscaling) {
         _minPV = pts.ptmins();
         _maxPV = pts.ptmaxs();
       }
       _coeffs = calcCoeffs(pts, ptvals, _order, svdthreshold, _structure);
     };
 
     /// Constructor to read ipol from file (one string for each object)
     /// @todo Also allow optional passing of pmins, pmaxs vectors for the case where the string includes scaling?
     Ipol(const std::string& s) {
       fromString(s);
     };
 
     ~Ipol() {
       _coeffs.clear();
       _structure.clear();
     };
 
-    /// Get string representation
+
+    /// @name String representations
+    //@{
+
+    /// Get a persistent string representation of this Ipol
     std::string toString(const std::string& name="") const;
 
     /// Read and set coefficients (name), order from string
     /// @todo Also allow optional passing of pmins, pmaxs vectors for the case where the string includes scaling?
     void fromString(const std::string& s);
 
+    /// Get a string representation of the mathematical expression
+    std::string exprString() const;
+
+    //@}
+
 
     /// @name Calculations
     //@{
 
     /// Get the value of the parametrisation at point p
     double value(const std::vector<double>& p) const;
 
     /// Get the value of the derivative of the parametrisation at point p
     /// @todo Expose as a standalone calcDerivative function, cf. calcValue
     double derivative(const std::vector<double>& p) const;
 
     /// Get the gradient of the parametrisation at point p
     /// @todo Expose as a standalone calcGradient function, cf. calcValue
     std::vector<double> gradient(const std::vector<double>& p) const;
 
     //@}
 
 
     /// @name Coefficient access
     //@{
 
+    /// Get the number of coefficients
+    const size_t numCoeffs() const {
+      // ::numCoeffs(dim(),order())
+      return _coeffs.size();
+    }
+
     /// Get the vector of coefficients by const reference
     const std::vector<double>& coeffs() const { return _coeffs; }
 
     /// Get a single coefficient
     const double& coeff(size_t i) const { return coeffs()[i]; }
 
     //@}
 
 
     /// @name Polynomial term structure
     //@{
 
     /// Get the polynomial term exponent structure
     const std::vector< std::vector<int> >& structure() const { return _structure; }
 
     /// Convert params to scaled params
     std::vector<double> sparams(const std::vector<double>& params) const;
 
     /// Get a long vector of polynomial terms (sans coefficients) for the given param point
     std::vector<double> longVector(const std::vector<double>& params) const {
       return mkLongVector(sparams(params), order(), structure());
     }
 
     //@}
 
 
     /// @name Basic ipol properties
     //@{
 
-    /// Accessor to the dimension of the param points
+    /// Accessors to the dimension of the param points
     int dim() const { return _dim; }
+    int numParams() const { return dim(); }
 
     /// Get the order of the parametrisation
     int order() const { return _order; }
 
     /// Get the name of the parametrised object
     std::string name() const { return _name; }
 
     //@}
 
 
     /// @name Limit-setting
     //@{
 
     void setParamLimits(const std::vector<double>& minpvs, const std::vector<double>& maxpvs) {
       setMinParamVals(minpvs);
       setMaxParamVals(maxpvs);
     }
 
     const std::vector<double>& minParamVals() { return _minPV; }
     const std::vector<double>& maxParamVals() { return _maxPV; }
 
     // The if statements here guarantee backwards compatibility with the format 'binned 2' where the
     // ranges were read from the ipol meta. In binned 3 they are read for each ipol from the coefficient
     // output
     void setMinParamVals(const std::vector<double>& minpvs) { if (_minPV.empty()) _minPV = minpvs; }
     void setMaxParamVals(const std::vector<double>& maxpvs) { if (_maxPV.empty()) _maxPV = maxpvs; }
 
     //@}
 
 
   private:
 
     int _dim, _order;
     std::vector<std::vector<int> > _structure;
     std::string _name;
     std::vector<double> _coeffs, _minPV, _maxPV;
 
   };
 
 
 }
 
 #endif
diff --git a/pyext/professor2/core.pyx b/pyext/professor2/core.pyx
--- a/pyext/professor2/core.pyx
+++ b/pyext/professor2/core.pyx
@@ -1,209 +1,221 @@
 #cython: embedsignature=True
 
 cimport professor as c
 cimport cython.operator.dereference as deref
 
 
 def version(astuple=False):
     "Professor version code, as a string by default or a tuple on request"
     v = c.version()
     return v.split(".") if astuple else v
 
 
 def numCoeffs(dim, order):
     return c.numCoeffs(dim, order)
 
 def mkStructure(dim, order):
     return c.mkStructure(dim, order)
 
 def mkLongVector(params, order, structure):
     return c.mkLongVector(params, order, structure)
 
 
 def mkProfessorMatrix(params, order):
     dim=len(params[0])
     s = mkStructure(dim, order)
     import numpy as np
     PM = np.zeros((len(params), numCoeffs(dim, order)))
 
     for a, p in enumerate(params):
         lv = mkLongVector(p, order, s)
         for i in xrange(len(lv)):
             PM[a][i] = lv[i];
     return PM
 
 
 cdef class ParamPoints:
     cdef c.ParamPoints* _ptr
 
     def __cinit__(self, pvec):
         self._ptr = new c.ParamPoints(pvec)
 
     def __del__(self):
         del self._ptr
 
     def __dealloc__(self):
         del self._ptr
 
 
 cdef class Ipol:
     """An interpolation of a scalar function built from a list of values across
     a set of parameter point anchors.
 
     The main workhorse object in Professor. The interpolation coefficients are
     calculated lazily, i.e. when first used.
     """
     cdef c.Ipol* _ptr
 
     def __cinit__(self, *args):
         # NOTE: we shouldn't invent Python-only constructors for a one-off purpose that can be done adequately/better with an explicit call...
         # if len(args) == 3 and type(args[0]) is str and type(args[1]) is str and type(args[2]) is str:
         #     self._ptr = new c.Ipol(args[0])
         #     self._ptr.setMinParamVals([float(x) for x in args[1].split()])
         #     self._ptr.setMaxParamVals([float(x) for x in args[2].split()])
         if len(args) == 1 and type(args[0]) is str: # Backward compatibility --- no scaling
             self._ptr = new c.Ipol(args[0])
             # self._ptr.setMinParamVals([0 for x in xrange(self._ptr.dim())])
             # self._ptr.setMaxParamVals([1 for x in xrange(self._ptr.dim())])
         else:
             pp = ParamPoints(args[0])
             vals = list(args[1])
             order = int(args[2])
             name = ""
             threshold = 1e-40 #< ???
             if len(args) == 4:
                 try:
                     threshold = float(args[3])
                 except:
                     name = str(args[3])
             if len(args) == 5:
                     name = str(args[3])
                     threshold = float(args[4])
             self._ptr = new c.Ipol(deref(pp._ptr), vals, order, name, threshold, True)
 
     def __del__(self):
         del self._ptr
 
     def __dealloc__(self):
         del self._ptr
 
 
     @property
+    def numCoeffs(self):
+        return self._ptr.numCoeffs()
+
+    @property
     def coeffs(self):
         return self._ptr.coeffs()
 
     @property
     def structure(self):
         #rtn = []
         return self._ptr.structure()
 
     def longvector(self, params):
         return self._ptr.longVector(params)
 
     @property
     def dim(self):
         return self._ptr.dim()
 
     @property
+    def numParams(self):
+        return self._ptr.numParams()
+
+    @property
     def order(self):
         return self._ptr.order()
 
     @property
     def name(self):
         return self._ptr.name()
 
     def value(self, *params, vmin=None, vmax=None):
         """Calculate the value of this interpolation at the given params point,
         forcing return within the range vmin..vmax.
 
         params can be an expanded tuple of floats, an unexpanded iterable of
         floats, or an ordered dict of paramname -> value.
         """
 
         import collections
 
         ## Detect if the params have been passed as a single iterable and convert
         if len(params) == 1 and isinstance(params[0], collections.Iterable):
             params = params[0]
             ## Further, detect if the params have been passed as a (ordered!) dict-like and extract the (ordered) values
             if isinstance(params, collections.Mapping):
                 params = params.values()
 
         ## Ensure that the param values are floats
         params = [float(p) for p in params]
 
         ## Compute the interpolated value at 'params' and impose optional range limits
         v = self._ptr.value(params)
         if vmin is not None and v < vmin:
             return vmin
         if vmax is not None and v > vmax:
             return vmax
 
         return v
 
     ## Alias
     val = value
 
 
     def derivative(self, *params):
         import collections
 
         ## Detect if the params have been passed as a single iterable and convert
         if len(params) == 1 and isinstance(params[0], collections.Iterable):
             params = params[0]
             ## Further, detect if the params have been passed as a (ordered!) dict-like and extract the (ordered) values
             if isinstance(params, collections.Mapping):
                 params = params.values()
 
         ## Ensure that the param values are floats
         params = [float(p) for p in params]
         return  self._ptr.derivative(params)
 
     ## Alias
     der = derivative
 
 
     def gradient(self, *params):
         import collections
 
         ## Detect if the params have been passed as a single iterable and convert
         if len(params) == 1 and isinstance(params[0], collections.Iterable):
             params = params[0]
             ## Further, detect if the params have been passed as a (ordered!) dict-like and extract the (ordered) values
             if isinstance(params, collections.Mapping):
                 params = params.values()
 
         ## Ensure that the param values are floats
         params = [float(p) for p in params]
         return  self._ptr.gradient(params)
 
     ## Alias
     grad = gradient
 
 
 
     def setParamLimits(self, pmins, pmaxs):
         "Set the minimum and maximum param values via 2 lists ordered cf. the param names. Used in SVD internal scaling."
         self._ptr.setParamLimits(pmins, pmaxs)
 
     def minParamVals(self):
         "Get the minimum param values used in SVD internal scaling."
         return self._ptr.minParamVals()
     def setMinParamVals(self, pmins):
         "Set the minimum param values via a list of values ordered cf. the param names. Used in SVD internal scaling."
         self._ptr.setMinParamVals(pmins)
 
     def maxParamVals(self):
         "Get the maximum param values used in SVD internal scaling."
         return self._ptr.maxParamVals()
     def setMaxParamVals(self, pmaxs):
         "Set the maximum param values via a list of values ordered cf. the param names. Used in SVD internal scaling."
         self._ptr.setMaxParamVals(pmaxs)
 
 
     def toString(self, name=""):
         "Produce a persistent string representing this Ipol object"
         return self._ptr.toString(name)
 
+    def exprString(self):
+        "Produce a human-readable string representing this Ipol's algebraic expression"
+        return self._ptr.exprString()
+
     def __repr__(self):
         return self.toString(self.name)
diff --git a/pyext/professor2/professor.pxd b/pyext/professor2/professor.pxd
--- a/pyext/professor2/professor.pxd
+++ b/pyext/professor2/professor.pxd
@@ -1,52 +1,55 @@
 from libcpp.map cimport map
 from libcpp.pair cimport pair
 from libcpp.vector cimport vector
 from libcpp cimport bool
 from libcpp.string cimport string
 
 
 cdef extern from "Professor/Professor.h" namespace "Professor":
     string version()
 
 cdef extern from "Professor/Ipol.h" namespace "Professor":
     int numCoeffs(int dim, int order)
     vector[vector[int] ] mkStructure(int, int)
     vector[double] mkLongVector(const vector[double]&, int, const vector[vector[int] ]&)
 
 cdef extern from "Professor/ParamPoints.h" namespace "Professor":
     cdef cppclass ParamPoints:
         ParamPoints(const vector[vector[double]]&) except +
 
 
 cdef extern from "Professor/Ipol.h" namespace "Professor":
     cdef cppclass Ipol:
 
         Ipol(const ParamPoints& p, const vector[double]&, int, const string&, double, bool) except +
         Ipol(const string&) except +
 
         string name() except +
         int order() except +
         int dim() except +
 
+        size_t numCoeffs() except +
         const vector[double]& coeffs() except +
         const double& coeff(size_t) except +
 
         const vector[vector[int]]& structure() except +
         vector[double] longVector(const vector[double]&) except +
 
+        int numParams() except +
         const ParamPoints& params() except +
 
         double value(const vector[double]&) except +
 
         double derivative(const vector[double]&) except +
         const vector[double] gradient(const vector[double]&) except +
 
         string toString() except +
         string toString(const string&) except +
 
+        string exprString() except +
+
         const vector[double]& minParamVals() except +
         const vector[double]& maxParamVals() except +
         void setParamLimits(const vector[double]&, const vector[double]&) except +
         void setMinParamVals(const vector[double]&) except +
         void setMaxParamVals(const vector[double]&) except +
-
diff --git a/src/Ipol.cc b/src/Ipol.cc
--- a/src/Ipol.cc
+++ b/src/Ipol.cc
@@ -1,427 +1,444 @@
 #include "Professor/Ipol.h"
 #include "Eigen/SVD"
 #include <sstream>
 #include <cassert>
 #include <cmath>
 #include <set>
 #include <algorithm>
 
 namespace Professor {
 
   using namespace std;
   using namespace Eigen;
 
 
   namespace { //< hide this symbol, since not in API
 
     // Scaling function to map x from [a,b] into [1,2].
     // NB. Target range does not touch 0, e.g. [0,1] to avoid raising very small numbers to large powers.
     double map_prange(double x, double a, double b) {
       return (x-a)/(b-a);
     }
   }
 
 
   // NB. Not a member function
   int numCoeffs(int dim, int order) {
     int ntok = 1;
     int r = min(order, dim);
     for (int i = 0; i < r; ++i) {
       ntok = ntok*(dim+order-i)/(i+1);
     }
     return ntok;
   }
 
 
   // NB. Not a member function
   std::vector<double> calcCoeffs(const ParamPoints& pts, const vector<double>& vals, int order,
                                  double threshold, const vector<vector<int> >& structure) {
 
     // Early exit if this is a trivial 0th order polynomial
     vector<double> rtn;
     if (order == 0) {
       rtn.push_back(vals[0]);
       return rtn;
     }
 
     // Check the inputs
     if (pts.numPoints() != vals.size())
       throw IpolError("pts.numPoints() != vals.size() in calcCoeffs");
     const int ncoeff = numCoeffs(pts.dim(), order);
     if (ncoeff > pts.numPoints()) {
       stringstream ss;
       ss << "Ipol: not enough (" << ncoeff << " vs. " << pts.numPoints() << ") anchor points "
          << "for interpolating with " << pts.dim() << " params at order " << order;
       for (unsigned int i_order=1;i_order<order;i_order++) {
         if (numCoeffs(pts.dim(), i_order)<=pts.numPoints())
           ss << "\n Order " << i_order  << " requires " << numCoeffs(pts.dim(), i_order) << " anchors";
       }
       throw IpolError(ss.str());
     }
 
     // Create Eigen objects for the SVD solving
     MatrixXd DP = MatrixXd(pts.numPoints(), ncoeff);
     VectorXd MC = VectorXd(pts.numPoints());
 
     // The parameter scaling business
     std::vector<std::vector<double> > origpoints = pts.points();
     std::vector<std::vector<double> > scaledpoints;
     std::vector<double> minPV = pts.ptmins();
     std::vector<double> maxPV = pts.ptmaxs();
 
     for (int p = 0; p < origpoints.size(); ++p) {
       std::vector<double> temp;
       for (int i = 0; i < pts.dim(); ++i) {
         temp.push_back(map_prange(origpoints[p][i], minPV[i], maxPV[i]));
       }
       scaledpoints.push_back(temp);
     }
 
 
     // Populate the matrix to be inverted
     vector<double> tempLV;
     for (int a = 0; a < pts.numPoints(); ++a) {
       tempLV = mkLongVector(scaledpoints[a], order, structure);
       for (size_t i = 0; i < tempLV.size(); ++i) {
         DP(a, i) = tempLV[i];
       }
       // The vector of values (corresponding to anchors)
       MC[a] = vals[a];
     }
 
     #if EIGEN_WORLD_VERSION >= 3 && EIGEN_MAJOR_VERSION >= 3 && EIGEN_MINOR_VERSION >= 3
     BDCSVD<MatrixXd> svd(DP,ComputeFullU|ComputeFullV );
     #else
     JacobiSVD<MatrixXd> svd = DP.jacobiSvd(ComputeThinU|ComputeThinV);
     #endif
 
     #if EIGEN_WORLD_VERSION >= 3 && EIGEN_MAJOR_VERSION >= 2 && EIGEN_MINOR_VERSION >= 1
     svd.setThreshold(threshold); // Needed TODO find transform for dependence on stuff
     #endif
 
     // Check for singular values, i.e. fully correlated parameters
     /// @todo Maybe figure out how to use Eigen's setThreshold better?
     VectorXd svals = svd.singularValues();
     for (unsigned int i = 0; i < svd.nonzeroSingularValues();++i) {
       if (fabs(svals[i]) < threshold) {
         std::cout << "Singular value encountered, aborting" << std::endl;
         abort();
       }
     }
 
     // Solve for coefficients
     VectorXd co = svd.solve(MC);
 
     // Populate the coefficient std::vector and return
     for (size_t i = 0; i < ncoeff; ++i) rtn.push_back(co[i]);
     return rtn;
   }
 
 
   double calcValue(const vector<double>& params,
                    const vector<double>& coeffs, int order,
                    const vector< vector<int> >& structure) {
     const vector<double> lv = mkLongVector(params, order, structure);
     return calcValue(lv, coeffs);
   }
 
 
   double calcValue(const vector<double>& paramslongvector,
                    const vector<double>& coeffs) {
     // Dot product of params long-vector with coeffs -> value
     assert(paramslongvector.size() == coeffs.size());
     double v = 0.0;
     for (size_t i = 0; i < paramslongvector.size(); ++i) {
       //cout << i << ": " << coeffs[i] << " * " << paramslongvector[i] << " = " << coeffs[i]*paramslongvector[i] << endl;
       v += coeffs[i] * paramslongvector[i];
     }
     return v;
   }
 
 
   vector<vector<int> > mkStructure(int dim, int order) {
     if (order < 0)
       throw IpolError("Polynomial order " + to_string(order) + " not implemented");
 
     const vector<int> zero(dim, 0);
     set< vector<int> > rtn; // The set takes care of not having duplicates.
                             // We really only keep it for bookkeeping
     vector<vector<int> > rtn2;
     rtn.insert(zero); // The constant offsets in all dimensions
 
     rtn2.push_back(zero);
 
 
 
     if (order>0) {
     // The set of parameter base vectors, e.g.
     //
     // [1,0,0]
     // [0,1,0]
     // [0,0,1]
     //
     // Note: these are also used in the structure as they
     // are the linear terms.
     //
       vector<vector<int> > BS;
       for (unsigned int d = 0; d < dim; ++d) {
         vector<int> p(dim,0); // Initialise a dim-dimensional vector with all elements 0
         p[d]=1;
         rtn.insert(p); // Add it to the structure
         rtn2.push_back(p);
         BS.push_back(p);
       }
 
 
       auto temp = BS;
       vector<vector<int> > temp2;
       vector<int> e(dim,0); // Initialise a dim-dimensional vector with all elements 0
 
       // Recursively add base vectors
       for (unsigned int o = 1; o < order; ++o) {
         temp2.clear();
-       
+
         for ( auto const & t : temp) {
           for (auto const & bs : BS) {
             // Create a new element
             for (unsigned int d = 0; d < dim; d++) {
               e[d] = t[d] + bs[d];
             }
             temp2.push_back(e);
           }
         }
-        temp=temp2; // For the next order, we want to add base vectors 
+        temp=temp2; // For the next order, we want to add base vectors
                     // to each element of the current order
         for (auto const &v : temp2) {
           if (rtn.count(v) == 0) {
             rtn2.push_back(v);
           }
           rtn.insert(v); // The set takes care of not having duplicates.
         }
       }
     }
     return rtn2;
   }
 
 
   vector<double> mkLongVector(const vector<double>& params, int order, const vector< vector<int> >& structure) {
     if (order < 0)
       throw IpolError("Polynomial order " + to_string(order) + " not implemented");
 
     vector<double> rtn;
     for (const vector<int>& v : structure) {
       double prod = 1.0;
       for (size_t i = 0; i < v.size(); ++i) {
         if (v[i] == 0) continue;
         /// @todo Can be speeded with (precomputable?) integer powers / exp-by-doubling?
         prod *= std::pow(params[i], v[i]);
       }
       rtn.push_back(prod);
     }
     return rtn;
   }
 
 
   /// @todo Why the min/maxPV args?
   /// @todo Expose to API
   vector<double> mkLongVectorDerivative(const vector<double>& params, int order,
                                         const vector<double>& minPV, const vector<double>& maxPV,
                                         const vector<vector<int> >& structure) {
     if (order < 0)
       throw IpolError("Polynomial order " + to_string(order) + " not implemented");
 
     vector<double> rtn;
     bool firstItem = true;
     for (const vector<int>& s : structure) {
 
       if (firstItem) {
         rtn.push_back(0.0); // Derivative of constant term
         firstItem = false;
         continue;
       }
       double part = 0.0;
       // Differentiate x^a*y^b*z^c*...
       for (unsigned int c = 0; c < s.size(); c++) { // d/dx, d/dy, d/dz, ...
 
         double temp2 = 1.0;
         for (unsigned int i = 0; i <s.size(); i++) { // x, y, z
           if (c==i) {  // d/dx x*y*z
             temp2 *= s[i];
             if (s[c] == 0) continue;
             else temp2 *= std::pow(params[i], s[i]-1)/(maxPV[i]- minPV[i]); // Jacobian factor: 'd map_prange / dx' = 1./(b-a)
           } else {
             temp2 *= std::pow(params[i], s[i] );
           }
         }
         part += temp2;
       }
       rtn.push_back(part);
     }
 
     return rtn;
   }
 
 
   /// @todo Why the min/maxPV args?
   /// @todo Expose to API
   vector<double> mkLongVectorGradient(const vector<double>& params, int coord, int order,
                                       const vector<double>& minPV, const vector<double>& maxPV,
                                       const vector<vector<int> >& structure) {
     if (order < 0)
       throw IpolError("Polynomial order " + to_string(order) + " not implemented");
 
     vector<double> rtn;
     bool firstItem = true;
     for (const vector<int>& s : structure) {
       if (firstItem) {
         rtn.push_back(0.0); // Derivative of constant term
         firstItem = false;
         continue;
       }
 
       if (s[coord] == 0) {
         rtn.push_back(0);
         continue;
       }
       double temp = 1.0;
       for (unsigned int i = 0; i <s.size(); i++) { // x, y, z
         if (i == coord) {  // d/dx x*y*z
           temp *= s[i];  // d/dx  x^a = a*x^(a-1)
           temp *= std::pow(params[i], s[i]-1)/(maxPV[i]- minPV[i]); // Jacobian factor: 'd map_prange / dx' = 1./(b-a)
         } else {
           temp *= std::pow(params[i], s[i] );
         }
       }
       rtn.push_back(temp);
     }
 
     return rtn;
   }
 
 
   ///////////////////////////////////////////////////////
 
 
 
   string Ipol::toString(const string& name) const {
     stringstream ss;
     if (!name.empty()) ss << name << ": ";
     else if (!_name.empty()) ss << _name << ": ";
     ss << this->dim() << " ";
     ss << this->order() << " ";
     for (const double& a : coeffs())
       ss << a << " ";
     return ss.str();
   }
 
 
   /// TODO: How do we want to read in the MinMaxValues here?
   void Ipol::fromString(const string& s) {
     // Extract a name if given at the start of the string
     _name = (s.find(":") != std::string::npos) ? s.substr(0, s.find(":")) : "";
     // Load the rest of the string into a stringstream and load into numerical variables
     istringstream numss( (s.find(":") != std::string::npos) ? s.substr(s.find(":")+1) : s );
     numss >> _dim;
     numss >> _order;
-    int ncoeffs = numCoeffs(dim(),order());
+    const int ncoeffs = numCoeffs();
     double tmp;
     while (numss >> tmp) {
       // Read coefficients
       if (_coeffs.size() < ncoeffs) _coeffs.push_back(tmp);
       // If there are more bits to read in it must be format 'binned 3'
       // i.e. read in the min/max paramvalues directly.
       else if (_minPV.size() < dim()) _minPV.push_back(tmp);
       else  _maxPV.push_back(tmp);
     }
     _structure = mkStructure(dim(), order());
   }
 
 
+  string Ipol::exprString() const {
+    stringstream ss;
+    const vector< vector<int> > struc = structure();
+    for (size_t i = 0; i < numCoeffs(); ++i) {
+      if (coeff(i) == 0) continue;
+      if (i != 0) ss << "+ ";
+      ss << coeff(i) << " ";
+      const vector<int>& exps = struc[i];
+      for (int j = 0; j < dim(); ++j) {
+        if (exps[j] == 0) continue;
+        ss << "p" << j << "^" << exps[j] << " ";
+      }
+    }
+    return ss.str();
+  }
+
+
   vector<double> Ipol::sparams(const vector<double>& params) const {
     if (params.size() != dim()) {
       stringstream ss;
       ss << "Incorrect number of parameters given ("
          << dim() << " params required, " << params.size() << " supplied)";
       throw IpolError(ss.str());
     }
 
     // Param scaling into [0,1] ranges defined by sampling limits (if set)
     vector<double> sparams = params;
     if (!_minPV.empty() && !_maxPV.empty()) {
       for (size_t i = 0; i < dim(); ++i) {
         sparams[i] = map_prange(params[i], _minPV[i], _maxPV[i]);
       }
     }
     return sparams;
   }
 
 
   double Ipol::value(const vector<double>& params) const {
     return calcValue(sparams(params), coeffs(), order(), _structure);
   }
 
 
   double Ipol::derivative(const vector<double>& params) const {
     /// @todo Extract into a standalone calc function
     if (params.size() != dim()) {
       stringstream ss;
       ss << "Incorrect number of parameters passed to Ipol::derivative ("
          << dim() << " params required, " << params.size() << " supplied)";
       throw IpolError(ss.str());
     }
 
     // Param scaling into [0,1] ranges defined by sampling limits (if set)
     vector<double> sparams = params;
     if (!_minPV.empty() && !_maxPV.empty()) {
       for (size_t i = 0; i < dim(); ++i) {
         sparams[i] = map_prange(params[i], _minPV[i], _maxPV[i]);
       }
     }
 
     // Dot product for value
     const vector<double> lv = mkLongVectorDerivative(sparams, order(), _minPV, _maxPV, _structure);
     assert(lv.size() == coeffs().size());
     double v = 0.0;
     for (size_t i = 1; i < lv.size(); ++i) {
       v += lv[i] * coeff(i);
     }
     return v;
   }
 
 
   vector<double> Ipol::gradient(const vector<double>& params) const {
     /// @todo Extract into a standalone calc function
     if (params.size() != dim()) {
       stringstream ss;
       ss << "Incorrect number of parameters passed to Ipol::gradient ("
          << dim() << " params required, " << params.size() << " supplied)";
       throw IpolError(ss.str());
     }
 
     vector<double> grad;
 
     // Param scaling into [0,1] ranges defined by sampling limits (if set)
     vector<double> sparams = params;
     if (!_minPV.empty() && !_maxPV.empty()) {
       for (size_t i = 0; i < dim(); ++i) {
         sparams[i] = map_prange(params[i], _minPV[i], _maxPV[i]);
       }
     }
 
     for (int c=0; c< params.size(); c++) {
       // Dot product for value
       const vector<double> lv = mkLongVectorGradient(sparams, c, order(), _minPV, _maxPV, _structure);
       assert(lv.size() == coeffs().size());
       double v = 0.0;
       for (size_t i = 1; i < lv.size(); ++i) {
         v += lv[i] * coeff(i);
       }
       grad.push_back(v);
 
     }
 
     return grad;
   }
 
 
 }