Page MenuHomeHEPForge

No OneTemporary

diff --git a/MatrixElement/Matchbox/Tests/WeightAnalyzer.cc b/MatrixElement/Matchbox/Tests/WeightAnalyzer.cc
--- a/MatrixElement/Matchbox/Tests/WeightAnalyzer.cc
+++ b/MatrixElement/Matchbox/Tests/WeightAnalyzer.cc
@@ -1,283 +1,283 @@
// -*- C++ -*-
//
// This is the implementation of the non-inlined, non-templated member
// functions of the WeightAnalyzer class.
//
#include "WeightAnalyzer.h"
#include "ThePEG/Interface/ClassDocumentation.h"
#include "ThePEG/EventRecord/Particle.h"
#include "ThePEG/Repository/UseRandom.h"
#include "ThePEG/Repository/EventGenerator.h"
#include "ThePEG/Utilities/DescribeClass.h"
#include "ThePEG/Interface/Switch.h"
#include "ThePEG/EventRecord/SubProcess.h"
#include "ThePEG/EventRecord/SubProcessGroup.h"
#include "ThePEG/Persistency/PersistentOStream.h"
#include "ThePEG/Persistency/PersistentIStream.h"
using namespace Herwig;
WeightAnalyzer::WeightAnalyzer()
: sumWeights(0.0), sumPositiveWeights(0.0),
sumNegativeWeights(0.0),
sumGroupWeights(0.0), sumPositiveGroupWeights(0.0),
sumNegativeGroupWeights(0.0),
maxDeviationGroupWeight(0.0),
maxDeviationEventWeight(0.0),
nPositiveWeights(0.0),
nNegativeWeights(0.0),
maxPositiveWeight(0.0),
maxNegativeWeight(0.0),
gnuplot(true) {}
WeightAnalyzer::~WeightAnalyzer(){}
#ifndef LWH_AIAnalysisFactory_H
#ifndef LWH
#define LWH ThePEGLWH
#endif
#include "ThePEG/Analysis/LWH/AnalysisFactory.h"
#endif
void WeightAnalyzer::analyze(tEventPtr event, long ieve, int loop, int state) {
AnalysisHandler::analyze(event, ieve, loop, state);
double weight = event->weight();
sumWeights += weight;
if ( weight > 0.0 ) {
sumPositiveWeights += weight;
maxPositiveWeight = max(maxPositiveWeight,weight);
nPositiveWeights += 1;
map<double,double>::iterator b = positiveWeightDistribution.upper_bound(weight);
if ( b != positiveWeightDistribution.end() )
b->second += 1;
else
(--positiveWeightDistribution.end())->second += 1;
}
if ( weight < 0.0 ) {
sumNegativeWeights += weight;
maxNegativeWeight = max(maxNegativeWeight,abs(weight));
nNegativeWeights += 1;
map<double,double>::iterator b = negativeWeightDistribution.upper_bound(abs(weight));
if ( b != negativeWeightDistribution.end() )
b->second += 1;
else
(--negativeWeightDistribution.end())->second += 1;
}
tSubProPtr sub = event->primarySubProcess();
Ptr<SubProcessGroup>::tptr grp =
dynamic_ptr_cast<Ptr<SubProcessGroup>::tptr>(sub);
double sumEvents = 0.0;
double sumGroups = 0.0;
sumGroupWeights += weight*sub->groupWeight();
if ( weight*sub->groupWeight() > 0.0 )
sumPositiveGroupWeights += weight*sub->groupWeight();
if ( weight*sub->groupWeight() < 0.0 )
sumNegativeGroupWeights += weight*sub->groupWeight();
sumEvents += weight*sub->groupWeight();
sumGroups += sub->groupWeight();
if ( grp ) {
for ( SubProcessVector::const_iterator s = grp->dependent().begin();
s != grp->dependent().end(); ++s ) {
sumGroupWeights += weight*(**s).groupWeight();
if ( weight*(**s).groupWeight() > 0.0 )
sumPositiveGroupWeights += weight*(**s).groupWeight();
if ( weight*(**s).groupWeight() < 0.0 )
sumNegativeGroupWeights += weight*(**s).groupWeight();
sumEvents += weight*(**s).groupWeight();
sumGroups += (**s).groupWeight();
}
}
maxDeviationGroupWeight = max(maxDeviationGroupWeight,abs(sumGroups-1));
maxDeviationEventWeight = max(maxDeviationEventWeight,abs(sumEvents/weight-1));
}
void WeightAnalyzer::dofinish() {
AnalysisHandler::dofinish();
if ( nPositiveWeights != 0 )
for ( map<double,double>::iterator b = positiveWeightDistribution.begin();
b != positiveWeightDistribution.end(); ++b ) {
b->second /= nPositiveWeights;
}
if ( nNegativeWeights != 0 )
for ( map<double,double>::iterator b = negativeWeightDistribution.begin();
b != negativeWeightDistribution.end(); ++b ) {
b->second /= nNegativeWeights;
}
string dataName = generator()->filename() + "Weights."+(gnuplot?"gp":"dat");
ofstream data(dataName.c_str());
- data << setprecision(20)
+ data << setprecision(17)
<< "# --------------------------------------------------------------------------------\n"
<< "# WeightAnalyzer information\n"
<< "# --------------------------------------------------------------------------------\n"
<< "# sum of weights : " << sumWeights << "\n"
<< "# sum of positive weights : " << sumPositiveWeights << "\n"
<< "# sum of negative weights : " << sumNegativeWeights << "\n"
<< "# sum of weights (from groups) : " << sumGroupWeights << "\n"
<< "# sum of positive weights (from groups) : " << sumPositiveGroupWeights << "\n"
<< "# sum of negative weights (from groups) : " << sumNegativeGroupWeights << "\n"
<< "# maximum devation group weights : " << maxDeviationGroupWeight << "\n"
<< "# maximum devation event weights : " << maxDeviationEventWeight << "\n"
<< "# number of positive weights : " << nPositiveWeights << "\n"
<< "# number of negative weights : " << nNegativeWeights << "\n"
<< "# maximum positive weight : " << maxPositiveWeight << "\n"
<< "# maximum negative weight : " << maxNegativeWeight << "\n"
<< "# --------------------------------------------------------------------------------\n"
<< flush;
data << "\n\n";
if(gnuplot){
data << "set terminal pdf\n"
<< "set xlabel 'weights'\n"
<< "set ylabel '\n"
<< "set logscale \n"
<< "set output '" << generator()->filename()<<"Weights.pdf'\n"
<< "plot \"-\" using 2:3 with histeps lc rgbcolor \"#00AACC\" t \"positive weights\"\n"
<< "# low up val\n";
}
for ( map<double,double>::const_iterator b = positiveWeightDistribution.begin();
b != positiveWeightDistribution.end(); ++b ) {
if ( b->second != 0 ) {
double l,u;
if ( b == positiveWeightDistribution.begin() ) {
l = 0.; u = b->first;
} else if ( b == --positiveWeightDistribution.end() ) {
map<double,double>::const_iterator bl = b; --bl;
l = bl->first; u = Constants::MaxDouble;
} else {
map<double,double>::const_iterator bl = b; --bl;
l = bl->first; u = b->first;
}
data << l << " " << u << " " << b->second << "\n";
}
}
data << flush;
if(gnuplot)data<< "e";
data << "\n\n";
if(gnuplot && sumNegativeGroupWeights>0.){
data<< "plot \"-\" using 2:3 with histeps lc rgbcolor \"#00AACC\" t \"negative weights\"\n"
<< "# low up val\n";
}
for ( map<double,double>::const_iterator b = negativeWeightDistribution.begin();
b != negativeWeightDistribution.end(); ++b ) {
if ( b->second != 0 ) {
double l,u;
if ( b == negativeWeightDistribution.begin() ) {
l = 0.; u = b->first;
} else if ( b == --negativeWeightDistribution.end() ) {
map<double,double>::const_iterator bl = b; --bl;
l = bl->first; u = Constants::MaxDouble;
} else {
map<double,double>::const_iterator bl = b; --bl;
l = bl->first; u = b->first;
}
data << l << " " << u << " " << b->second << "\n";
}
}
if(gnuplot&& sumNegativeGroupWeights>0.)data<< "e";
data << flush;
}
void WeightAnalyzer::doinitrun() {
AnalysisHandler::doinitrun();
positiveWeightDistribution.clear();
negativeWeightDistribution.clear();
unsigned int nbins = 200;
nbins += 1;
double low = 1.e-8;
double up = 1.e8;
double c = log10(up/low) / (nbins-1.);
for ( unsigned int k = 1; k < nbins + 1; ++k ) { // mind the overflow bin
positiveWeightDistribution[low*pow(10.0,k*c)] = 0.;
negativeWeightDistribution[low*pow(10.0,k*c)] = 0.;
}
}
IBPtr WeightAnalyzer::clone() const {
return new_ptr(*this);
}
IBPtr WeightAnalyzer::fullclone() const {
return new_ptr(*this);
}
// If needed, insert default implementations of virtual function defined
// in the InterfacedBase class here (using ThePEG-interfaced-impl in Emacs).
void WeightAnalyzer::persistentOutput(PersistentOStream &) const {}
void WeightAnalyzer::persistentInput(PersistentIStream &, int) {}
// *** Attention *** The following static variable is needed for the type
// description system in ThePEG. Please check that the template arguments
// are correct (the class and its base class), and that the constructor
// arguments are correct (the class name and the name of the dynamically
// loadable library where the class implementation can be found).
DescribeClass<WeightAnalyzer,AnalysisHandler>
describeHerwigWeightAnalyzer("Herwig::WeightAnalyzer", "Herwig.so");
void WeightAnalyzer::Init() {
static ClassDocumentation<WeightAnalyzer> documentation
("There is no documentation for the WeightAnalyzer class");
static Switch<WeightAnalyzer,bool> interfacekeepinputtopmass
("Gnuplot output",
"Switch On/Off gnuplot",
&WeightAnalyzer::gnuplot, true, false, false);
static SwitchOption interfacekeepinputtopmassTrue
(interfacekeepinputtopmass,
"On",
"On",
true);
static SwitchOption interfacekeepinputtopmassFalse
(interfacekeepinputtopmass,
"Off",
"Off",
false);
}
diff --git a/Sampling/CellGrids/CellGrid.cc b/Sampling/CellGrids/CellGrid.cc
--- a/Sampling/CellGrids/CellGrid.cc
+++ b/Sampling/CellGrids/CellGrid.cc
@@ -1,458 +1,458 @@
// -*- C++ -*-
//
// CellGrid.cpp is a part of ExSample
// Copyright (C) 2012-2013 Simon Platzer
//
// ExSample is licenced under version 2 of the GPL, see COPYING for details.
//
#include "CellGrid.h"
#include <exception>
#include <stdexcept>
#include <sstream>
#include <iomanip>
using namespace ExSample;
using namespace std;
double CellGrid::volume(const vector<double>& lowerLeft,
const vector<double>& upperRight) const {
assert(lowerLeft.size() == upperRight.size());
double res = 1.0;
vector<double>::const_iterator upper = upperRight.begin();
vector<double>::const_iterator lower = lowerLeft.begin();
for ( ; lower != lowerLeft.end(); ++lower, ++upper ) {
assert(*lower <= *upper);
res *= *upper - *lower;
}
return res;
}
CellGrid::CellGrid(const vector<double>& newLowerLeft,
const vector<double>& newUpperRight,
double newWeight)
: theLowerLeft(newLowerLeft), theUpperRight(newUpperRight),
theVolumeOrIntegral(volume(newLowerLeft,newUpperRight)),
theWeight(newWeight) {
theUpperBoundInclusive.resize(lowerLeft().size(),true);
}
CellGrid::~CellGrid() {
if ( !isLeaf() ) {
delete theChildren[0];
delete theChildren[1];
theChildren.clear();
}
}
void CellGrid::boundaries(const std::vector<double>& newLowerLeft,
const std::vector<double>& newUpperRight) {
if ( !lowerLeft().empty() )
throw runtime_error("[ExSample::CellGrid] Cannot set the boundaries of non-empty grids.");
theLowerLeft = newLowerLeft;
theUpperRight = newUpperRight;
theUpperBoundInclusive.resize(lowerLeft().size(),true);
theVolumeOrIntegral = volume(newLowerLeft,newUpperRight);
}
CellGrid* CellGrid::makeInstance() const {
return new CellGrid();
}
CellGrid* CellGrid::makeInstance(const vector<double>& newLowerLeft,
const vector<double>& newUpperRight,
double newWeight) const {
return new CellGrid(newLowerLeft,newUpperRight,newWeight);
}
void CellGrid::weight(double w) {
if ( !isLeaf() )
throw runtime_error("[ExSample::CellGrid] Cannot set the weight of branching nodes.");
theWeight = w;
}
void CellGrid::updateIntegral() {
if ( isLeaf() )
return;
firstChild().updateIntegral();
secondChild().updateIntegral();
theVolumeOrIntegral = firstChild().integral() + secondChild().integral();
theWeight = 0.0;
}
pair<size_t,double> CellGrid::splitPoint() const {
if ( isLeaf() )
throw runtime_error("[ExSample::CellGrid] Leaf nodes have no splits.");
pair<size_t,double> res;
for ( res.first = 0; res.first != firstChild().upperRight().size(); ++res.first ) {
if ( firstChild().upperRight()[res.first] != secondChild().upperRight()[res.first] ) {
res.second = firstChild().upperRight()[res.first];
break;
}
}
assert(res.first < firstChild().upperRight().size());
return res;
}
const CellGrid& CellGrid::firstChild() const {
if ( isLeaf() )
throw runtime_error("[ExSample::CellGrid] Cannot access children of leaf nodes.");
return *theChildren[0];
}
CellGrid& CellGrid::firstChild() {
if ( isLeaf() )
throw runtime_error("[ExSample::CellGrid] Cannot access children of leaf nodes.");
return *theChildren[0];
}
const CellGrid& CellGrid::secondChild() const {
if ( isLeaf() )
throw runtime_error("[ExSample::CellGrid] Cannot access children of leaf nodes.");
return *theChildren[1];
}
CellGrid& CellGrid::secondChild() {
if ( isLeaf() )
throw runtime_error("[ExSample::CellGrid] Cannot access children of leaf nodes.");
return *theChildren[1];
}
void CellGrid::split(size_t newSplitDimension, double newSplitCoordinate) {
if ( !isLeaf() )
throw runtime_error("[ExSample::CellGrid] Cannot split an already branching node.");
if ( newSplitDimension > lowerLeft().size() )
throw runtime_error("[ExSample::CellGrid] Cannot split along non-existing dimension.");
assert(lowerLeft()[newSplitDimension] <= newSplitCoordinate &&
newSplitCoordinate <= upperRight()[newSplitDimension]);
vector<double> firstUpperRight = upperRight();
firstUpperRight[newSplitDimension] = newSplitCoordinate;
vector<double> secondLowerLeft = lowerLeft();
secondLowerLeft[newSplitDimension] = newSplitCoordinate;
theChildren.resize(2);
theChildren[0] = makeInstance(lowerLeft(),firstUpperRight);
theChildren[1] = makeInstance(secondLowerLeft,upperRight());
firstChild().theUpperBoundInclusive = upperBoundInclusive();
firstChild().theUpperBoundInclusive[newSplitDimension] = false;
secondChild().theUpperBoundInclusive = upperBoundInclusive();
}
void CellGrid::splitCoordinates(size_t dimension, set<double>& coordinates) const {
if ( dimension > lowerLeft().size() )
throw runtime_error("[ExSample::CellGrid] Cannot get splits for non-existing dimension.");
if ( isLeaf() ) {
coordinates.insert(lowerLeft()[dimension]);
coordinates.insert(upperRight()[dimension]);
return;
}
firstChild().splitCoordinates(dimension,coordinates);
secondChild().splitCoordinates(dimension,coordinates);
}
bool CellGrid::contains(const vector<double>& point,
const vector<bool>& parameterFlags) const {
assert(point.size()==parameterFlags.size());
assert(point.size()==lowerLeft().size());
for ( size_t k = 0; k < point.size(); ++k ) {
if ( !parameterFlags[k] )
continue;
if ( !upperBoundInclusive()[k] ) {
if ( point[k] < lowerLeft()[k] || point[k] >= upperRight()[k] )
return false;
} else {
if ( point[k] < lowerLeft()[k] || point[k] > upperRight()[k] )
return false;
}
}
return true;
}
double CellGrid::nonParametricVolume(const vector<double>& point,
const vector<bool>& parameterFlags) const {
assert(point.size()==parameterFlags.size());
assert(point.size()==lowerLeft().size());
double v = 1.0;
for ( size_t k = 0; k < point.size(); ++k ) {
if ( parameterFlags[k] )
continue;
v *= upperRight()[k] - lowerLeft()[k];
}
return v;
}
void CellGrid::updateIntegral(const vector<double>& point,
const vector<bool>& parameterFlags,
vector<bool>::iterator hashPosition) {
if ( !contains(point,parameterFlags) ) {
theVolumeOrIntegral = 0.0;
*hashPosition = false;
return;
}
if ( isLeaf() ) {
theVolumeOrIntegral = nonParametricVolume(point,parameterFlags);
*hashPosition = true;
return;
}
firstChild().updateIntegral(point,parameterFlags,hashPosition+1);
secondChild().updateIntegral(point,parameterFlags,hashPosition+firstChild().size()+1);
theVolumeOrIntegral = firstChild().integral() + secondChild().integral();
theWeight = 0.0;
*hashPosition = true;
}
void CellGrid::doMinimumSelection(double r,
double ref) {
if ( isLeaf() ) {
theWeight = ref/theVolumeOrIntegral;
return;
}
double refFirst =
integral() != 0.0 ? firstChild().integral()*ref/integral() : 0.5*ref;
double refSecond =
integral() != 0.0 ? secondChild().integral()*ref/integral() : 0.5*ref;
if ( refFirst/ref < r &&
refSecond/ref < r ) {
refFirst = 0.5*ref;
refSecond = 0.5*ref;
} else if ( refFirst/ref < r &&
refSecond/ref >= r ) {
refFirst = r*ref;
refSecond = (1.-r)*ref;
} else if ( refFirst/ref >= r &&
refSecond/ref < r ) {
refFirst = (1.-r)*ref;
refSecond = r*ref;
}
theVolumeOrIntegral = refFirst + refSecond;
firstChild().doMinimumSelection(r,refFirst);
secondChild().doMinimumSelection(r,refSecond);
}
void CellGrid::minimumSelection(double p) {
updateIntegral();
double ref = integral();
if ( ref == 0.0 )
ref = 1.0;
doMinimumSelection(p,ref);
}
double CellGrid::volume() const {
if ( !isLeaf() )
throw runtime_error("[ExSample::CellGrid] No volume is stored for branching nodes.");
return theVolumeOrIntegral;
}
double CellGrid::integral() const {
if ( !isLeaf() )
return theVolumeOrIntegral;
return theWeight*theVolumeOrIntegral;
}
bool CellGrid::active() const {
return theVolumeOrIntegral != 0.0;
}
size_t CellGrid::depth() const {
if ( !isLeaf() )
return max(firstChild().depth(),secondChild().depth()) + 1;
return 0;
}
size_t CellGrid::size() const {
if ( !isLeaf() )
return firstChild().size() + secondChild().size() + 1;
return 1;
}
double CellGrid::projectInterval(const pair<double,double>& interval,
size_t dimension) const {
if ( dimension > lowerLeft().size() )
throw runtime_error("[ExSample::CellGrid] Cannot project to non-existing dimension.");
if ( (interval.first <= lowerLeft()[dimension] &&
interval.second <= lowerLeft()[dimension]) ||
(interval.first >= upperRight()[dimension] &&
interval.second >= upperRight()[dimension]) )
return 0.0;
if ( interval.first >= lowerLeft()[dimension] &&
interval.first <= upperRight()[dimension] &&
interval.second >= lowerLeft()[dimension] &&
interval.second <= upperRight()[dimension] ) {
if ( !isLeaf() ) {
double res = 0.0;
if ( firstChild().active() )
res += firstChild().projectInterval(interval,dimension);
if ( secondChild().active() )
res += secondChild().projectInterval(interval,dimension);
return res;
}
double res = integral();
res /= upperRight()[dimension]-lowerLeft()[dimension];
return res;
} else {
throw runtime_error("[ExSample::CellGrid] Integration interval needs to fully be contained in the grid.");
}
}
map<pair<double,double>,double>
CellGrid::project(pair<double,double> interval,
size_t dimension) const {
set<double> splits;
splitCoordinates(dimension,splits);
assert(splits.size() > 1);
while ( *splits.begin() < interval.first ) {
splits.erase(splits.begin());
if ( splits.empty() )
break;
}
while ( *(--splits.end()) > interval.second ) {
splits.erase(--splits.end());
if ( splits.empty() )
break;
}
splits.insert(interval.first);
splits.insert(interval.second);
map<pair<double,double>,double> res;
while ( splits.size() > 1 ) {
interval.first = *splits.begin();
interval.second = *(++splits.begin());
res[interval] = projectInterval(interval,dimension);
splits.erase(splits.begin());
}
return res;
}
void CellGrid::fromXML(const XML::Element& grid) {
size_t dimension = 0;
bool branching = false;
grid.getFromAttribute("dimension",dimension);
grid.getFromAttribute("branching",branching);
if ( branching ) {
grid.getFromAttribute("integral",theVolumeOrIntegral);
} else {
grid.getFromAttribute("volume",theVolumeOrIntegral);
grid.getFromAttribute("weight",theWeight);
}
theLowerLeft.resize(dimension);
theUpperRight.resize(dimension);
theUpperBoundInclusive.resize(dimension);
list<XML::Element>::const_iterator cit;
cit = grid.findFirst(XML::ElementTypes::Element,"Boundaries");
if ( cit == grid.children().end() )
throw runtime_error("[ExSample::CellGrid] Expected a Boundaries element.");
const XML::Element& boundaries = *cit;
cit = boundaries.findFirst(XML::ElementTypes::ParsedCharacterData,"");
if ( cit == boundaries.children().end() )
throw runtime_error("[ExSample::CellGrid] Expected boundary data.");
istringstream bdata(cit->content());
for ( size_t k = 0; k < lowerLeft().size(); ++k ) {
bdata >> theLowerLeft[k] >> theUpperRight[k];
bool x; bdata >> x;
theUpperBoundInclusive[k] = x;
}
if ( branching ) {
theChildren.resize(2,0);
theChildren[0] = makeInstance();
theChildren[1] = makeInstance();
cit = grid.findFirst(XML::ElementTypes::Element,"FirstChild");
if ( cit == grid.children().end() )
throw runtime_error("[ExSample::CellGrid] Expected a FirstChild element.");
const XML::Element& first = *cit;
cit = first.findFirst(XML::ElementTypes::Element,"CellGrid");
if ( cit == first.children().end() )
throw runtime_error("[ExSample::CellGrid] Expected a CellGrid element.");
firstChild().fromXML(*cit);
cit = grid.findFirst(XML::ElementTypes::Element,"SecondChild");
if ( cit == grid.children().end() )
throw runtime_error("[ExSample::CellGrid] Expected a SecondChild element.");
const XML::Element& second = *cit;
cit = second.findFirst(XML::ElementTypes::Element,"CellGrid");
if ( cit == second.children().end() )
throw runtime_error("[ExSample::CellGrid] Expected a CellGrid element.");
secondChild().fromXML(*cit);
}
}
XML::Element CellGrid::toXML() const {
XML::Element grid(XML::ElementTypes::Element,"CellGrid");
grid.appendAttribute("dimension",lowerLeft().size());
grid.appendAttribute("branching",!isLeaf());
if ( !isLeaf() ) {
- ostringstream i; i << setprecision(20) << integral();
+ ostringstream i; i << setprecision(17) << integral();
grid.appendAttribute("integral",i.str());
} else {
- ostringstream v; v << setprecision(20) << volume();
+ ostringstream v; v << setprecision(17) << volume();
grid.appendAttribute("volume",v.str());
- ostringstream w; w << setprecision(20) << weight();
+ ostringstream w; w << setprecision(17) << weight();
grid.appendAttribute("weight",w.str());
}
XML::Element boundaries(XML::ElementTypes::Element,"Boundaries");
ostringstream bdata;
- bdata << setprecision(20);
+ bdata << setprecision(17);
for ( size_t k = 0; k < lowerLeft().size(); ++k )
bdata << lowerLeft()[k] << " " << upperRight()[k] << " "
<< upperBoundInclusive()[k] << " ";
XML::Element belem(XML::ElementTypes::ParsedCharacterData,bdata.str());
boundaries.append(belem);
grid.append(boundaries);
if ( !isLeaf() ) {
XML::Element first(XML::ElementTypes::Element,"FirstChild");
first.append(firstChild().toXML());
grid.append(first);
XML::Element second(XML::ElementTypes::Element,"SecondChild");
second.append(secondChild().toXML());
grid.append(second);
}
return grid;
}
#ifdef units
void CellGrid::dumpToC(ostream& os,
const string& name) const {
os << "double " << name << "::evaluate(const vector<double>& p) const {\n";
dumpPartToC(os," ");
os << "}\n";
}
void CellGrid::dumpPartToC(ostream& os,
string prefix) const {
if ( isLeaf() ) {
os << prefix << "return " << weight() << ";\n";
return;
}
pair<size_t,double> sp = splitPoint();
os << prefix << "if ( p[" << sp.first << "] < " << sp.second << " ) {\n";
firstChild().dumpPartToC(os,prefix+" ");
os << prefix << "} else {\n";
secondChild().dumpPartToC(os,prefix+" ");
os << prefix << "}\n";
}
#endif
diff --git a/Sampling/GeneralSampler.cc b/Sampling/GeneralSampler.cc
--- a/Sampling/GeneralSampler.cc
+++ b/Sampling/GeneralSampler.cc
@@ -1,1025 +1,1025 @@
// -*- C++ -*-
//
// GeneralSampler.cc is a part of Herwig - A multi-purpose Monte Carlo event generator
// Copyright (C) 2002-2012 The Herwig Collaboration
//
// Herwig is licenced under version 2 of the GPL, see COPYING for details.
// Please respect the MCnet academic guidelines, see GUIDELINES for details.
//
//
// This is the implementation of the non-inlined, non-templated member
// functions of the GeneralSampler class.
//
#include "GeneralSampler.h"
#include "ThePEG/Interface/ClassDocumentation.h"
#include "ThePEG/EventRecord/Particle.h"
#include "ThePEG/Repository/UseRandom.h"
#include "ThePEG/Repository/EventGenerator.h"
#include "ThePEG/Repository/Repository.h"
#include "ThePEG/Utilities/DescribeClass.h"
#include "ThePEG/Utilities/LoopGuard.h"
#include "ThePEG/Interface/Reference.h"
#include "ThePEG/Interface/Switch.h"
#include "ThePEG/Interface/Parameter.h"
#include "ThePEG/Persistency/PersistentOStream.h"
#include "ThePEG/Persistency/PersistentIStream.h"
#include "ThePEG/Handlers/StandardEventHandler.h"
#include "ThePEG/Handlers/StandardXComb.h"
#include "Herwig/Utilities/RunDirectories.h"
#include "Herwig/Utilities/XML/ElementIO.h"
#include <boost/progress.hpp>
#include <boost/filesystem.hpp>
#include <cstdlib>
#include <sstream>
using namespace Herwig;
GeneralSampler::GeneralSampler()
: theVerbose(false),
theIntegratedXSec(ZERO), theIntegratedXSecErr(ZERO),
theUpdateAfter(1), crossSectionCalls(0), gotCrossSections(false),
theSumWeights(0.), theSumWeights2(0.),
theAttempts(0), theAccepts(0),
theMaxWeight(0.0), theAddUpSamplers(false),
theGlobalMaximumWeight(true), theFlatSubprocesses(false),
isSampling(false), theMinSelection(0.01), runCombinationData(false),
theAlmostUnweighted(false), maximumExceeds(0),
maximumExceededBy(0.), correctWeights(0.),theMaxEnhancement(1.05), didReadGrids(false),
theParallelIntegration(false),
theIntegratePerJob(0), theIntegrationJobs(0), theIntegrationJobsCreated(0),
justAfterIntegrate(false), theWriteGridsOnFinish(false) {}
GeneralSampler::~GeneralSampler() {}
IBPtr GeneralSampler::clone() const {
return new_ptr(*this);
}
IBPtr GeneralSampler::fullclone() const {
return new_ptr(*this);
}
double sign(double x) {
return x >= 0. ? 1. : -1.;
}
void GeneralSampler::initialize() {
if ( theParallelIntegration &&
runLevel() == ReadMode )
throw Exception()
<< "\n--------------------------------------------------------------------------------\n\n"
<< "Parallel integration is only supported in the build/integrate/run mode\n\n"
<< "--------------------------------------------------------------------------------\n"
<< Exception::abortnow;
if ( runLevel() == ReadMode ||
runLevel() == IntegrationMode ) {
assert(theSamplers.empty());
if ( !theGrids.children().empty() )
Repository::clog()
<< "--------------------------------------------------------------------------------\n\n"
<< "Using an existing grid. Please consider re-running the grid adaption\n"
<< "when there have been significant changes to parameters, cuts, etc.\n\n"
<< "--------------------------------------------------------------------------------\n"
<< flush;
}
if ( theParallelIntegration ) {
if ( !theIntegratePerJob && !theIntegrationJobs )
throw Exception()
<< "Please specify the number of subprocesses per integration job or the "
<< "number of integration jobs to be created."
<< Exception::abortnow;
if ( theIntegrationJobs ) {
unsigned int nintegrate = eventHandler()->nBins()/theIntegrationJobs;
if ( eventHandler()->nBins() % theIntegrationJobs != 0 )
++nintegrate;
theIntegratePerJob = max(theIntegratePerJob,nintegrate);
}
unsigned int jobCount = 0;
ofstream* jobList = 0;
generator()->log()
<< "--------------------------------------------------------------------------------\n"
<< "preparing integration jobs ...\n" << flush;
vector<int> randomized;
vector<int> pickfrom;
for ( int b = 0; b < eventHandler()->nBins(); ++b )
pickfrom.push_back(b);
//set<int> check;
while ( !pickfrom.empty() ) {
size_t idx = UseRandom::irnd(pickfrom.size());
randomized.push_back(pickfrom[idx]);
pickfrom.erase(pickfrom.begin() + idx);
}
int b = 0;
for ( vector<int>::const_iterator bx = randomized.begin();
bx != randomized.end(); ++bx, ++b ) {
if ( b == 0 || b % theIntegratePerJob == 0 ) {
if ( jobList ) {
jobList->close();
delete jobList;
jobList = 0;
}
ostringstream name;
string prefix = RunDirectories::buildStorage();
if ( prefix.empty() )
prefix = "./";
else if ( *prefix.rbegin() != '/' )
prefix += "/";
name << prefix << "integrationJob" << jobCount;
++jobCount;
string fname = name.str();
jobList = new ofstream(fname.c_str());
if ( !*jobList ) {
delete jobList;
throw Exception() << "Failed to write integration job list"
<< Exception::abortnow;
}
}
*jobList << *bx << " ";
}
theIntegrationJobsCreated = jobCount;
generator()->log()
<< "--------------------------------------------------------------------------------\n\n"
<< "Wrote " << jobCount << " integration jobs\n"
<< "Please submit integration jobs with the\nintegrate --jobid=x\ncommand for job ids "
<< "from 0 to " << (jobCount-1) << "\n\n"
<< "--------------------------------------------------------------------------------\n"
<< flush;
if ( jobList ) {
jobList->close();
delete jobList;
jobList = 0;
}
theParallelIntegration = false;
return;
}
if ( runLevel() == BuildMode )
return;
if ( !samplers().empty() )
return;
if ( binSampler()->adaptsOnTheFly() ) {
if ( !theAddUpSamplers ) {
Repository::clog() << "Warning: On-the-fly adapting samplers require cross section calculation from "
<< "adding up individual samplers. The AddUpSamplers flag will be switched on.";
}
theAddUpSamplers = true;
}
if ( !weighted() && !binSampler()->canUnweight() )
throw Exception() << "Unweighted events requested from weighted bin sampler object.";
if ( theFlatSubprocesses && !theGlobalMaximumWeight ) {
Repository::clog() << "Warning: Can only use a global maximum weight when selecting subprocesses "
<< "uniformly. The GlobalMaximumWeight flag will be switched on.";
theGlobalMaximumWeight = true;
}
set<int> binsToIntegrate;
if ( integrationList() != "" ) {
string prefix = RunDirectories::buildStorage();
if ( prefix.empty() )
prefix = "./";
else if ( *prefix.rbegin() != '/' )
prefix += "/";
string fname = prefix + integrationList();
ifstream jobList(fname.c_str());
if ( jobList ) {
int b = 0;
while ( jobList >> b )
binsToIntegrate.insert(b);
} else {
Repository::clog()
<< "Job list '"
<< integrationList() << "' not found.\n"
<< "Assuming empty integration job\n" << flush;
return;
}
}
if ( binsToIntegrate.empty() ) {
for ( int b = 0; b < eventHandler()->nBins(); ++b )
binsToIntegrate.insert(b);
}
boost::progress_display* progressBar = 0;
if ( !theVerbose && !justAfterIntegrate ) {
Repository::clog() << "integrating subprocesses";
progressBar = new boost::progress_display(binsToIntegrate.size(),Repository::clog());
}
for ( set<int>::const_iterator bit = binsToIntegrate.begin(); bit != binsToIntegrate.end(); ++bit ) {
Ptr<BinSampler>::ptr s = theBinSampler->cloneMe();
s->eventHandler(eventHandler());
s->sampler(this);
s->bin(*bit);
lastSampler(s);
s->doWeighted(eventHandler()->weighted());
s->setupRemappers(theVerbose);
if ( justAfterIntegrate )
s->readIntegrationData();
s->initialize(theVerbose);
samplers()[*bit] = s;
if ( !theVerbose && !justAfterIntegrate )
++(*progressBar);
if ( s->nanPoints() && theVerbose ) {
Repository::clog() << "warning: "
<< s->nanPoints() << " of "
<< s->allPoints() << " points with nan or inf weight.\n"
<< flush;
}
}
if ( progressBar ) {
delete progressBar;
progressBar = 0;
}
if ( runLevel() == IntegrationMode ) {
theGrids = XML::Element(XML::ElementTypes::Element,"Grids");
for ( map<double,Ptr<BinSampler>::ptr>::iterator s = samplers().begin();
s != samplers().end(); ++s ) {
s->second->saveGrid();
s->second->saveRemappers();
s->second->saveIntegrationData();
}
writeGrids();
return;
}
if ( theVerbose ) {
bool oldAdd = theAddUpSamplers;
theAddUpSamplers = true;
try {
Repository::clog() << "estimated total cross section is ( "
<< integratedXSec()/nanobarn << " +/- "
<< integratedXSecErr()/nanobarn << " ) nb\n" << flush;
} catch (...) {
theAddUpSamplers = oldAdd;
throw;
}
theAddUpSamplers = oldAdd;
}
updateSamplers();
if ( samplers().empty() ) {
throw Exception() << "No processes with non-zero cross section present."
<< Exception::abortnow;
}
if ( !justAfterIntegrate ) {
theGrids = XML::Element(XML::ElementTypes::Element,"Grids");
for ( map<double,Ptr<BinSampler>::ptr>::iterator s = samplers().begin();
s != samplers().end(); ++s ) {
s->second->saveGrid();
s->second->saveRemappers();
}
writeGrids();
}
}
double GeneralSampler::generate() {
long excptTries = 0;
gotCrossSections = false;
lastSampler(samplers().upper_bound(UseRandom::rnd())->second);
double weight = 0.;
while ( true ) {
try {
weight = 1.0;
double p = lastSampler()->referenceWeight()/lastSampler()->bias()/theMaxWeight;
if ( weighted() )
weight *= p;
else if ( p < UseRandom::rnd() ){
weight = 0.0;
// The lastSampler was picked according to the bias of the process.
--excptTries;
}
if ( weight != 0.0 )
weight *= lastSampler()->generate()/lastSampler()->referenceWeight();
} catch(BinSampler::NextIteration) {
updateSamplers();
lastSampler(samplers().upper_bound(UseRandom::rnd())->second);
if ( ++excptTries == eventHandler()->maxLoop() )
break;
continue;
} catch (...) {
throw;
}
if ( isnan(lastSampler()->lastWeight()) || isinf(lastSampler()->lastWeight()) ) {
lastSampler() = samplers().upper_bound(UseRandom::rnd())->second;
if ( ++excptTries == eventHandler()->maxLoop() )
break;
continue;
}
theAttempts += 1;
if ( abs(weight) == 0.0 ) {
lastSampler(samplers().upper_bound(UseRandom::rnd())->second);
if ( ++excptTries == eventHandler()->maxLoop() )
break;
continue;
}
if ( !eventHandler()->weighted() && !theAlmostUnweighted ) {
if ( abs(weight) > 1. ) {
++maximumExceeds;
maximumExceededBy += abs(weight)-1.;
}
correctWeights+=weight;
if ( weight > 0.0 )
weight = 1.;
else
weight = -1.;
}
break;
}
theAccepts += 1;
if ( excptTries == eventHandler()->maxLoop() )
throw Exception()
<< "GeneralSampler::generate() : Maximum number of tries to re-run event "
<< "selection reached. Aborting now." << Exception::runerror;
lastPoint() = lastSampler()->lastPoint();
lastSampler()->accept();
theSumWeights += weight;
theSumWeights2 += sqr(weight);
return weight;
}
void GeneralSampler::rejectLast() {
if ( !lastSampler() )
return;
double w = 0.0;
if ( weighted() )
w = lastSampler()->lastWeight()/lastSampler()->bias()/theMaxWeight;
else
w = lastSampler()->lastWeight()/lastSampler()->referenceWeight();
lastSampler()->reject();
theSumWeights -= w;
theSumWeights2 -= sqr(w);
theAttempts -= 1;
theAccepts -= 1;
}
void GeneralSampler::updateSamplers() {
map<double,Ptr<BinSampler>::ptr> checkedSamplers;
for ( map<double,Ptr<BinSampler>::ptr>::iterator s = samplers().begin();
s != samplers().end(); ++s ) {
if ( s->second->averageAbsWeight() == 0.0 ) {
generator()->log() << "Warning: no phase space points with non-zero cross section\n"
<< "could be obtained for the process: "
<< s->second->process() << "\n"
<< "This process will not be considered. Try increasing InitialPoints.\n"
<< flush;
if ( s->second->nanPoints() ) {
generator()->log() << "Warning: "
<< s->second->nanPoints() << " of "
<< s->second->allPoints() << " points with nan or inf weight\n"
<< "in " << s->second->process() << "\n" << flush;
}
continue;
}
checkedSamplers.insert(*s);
}
theSamplers = checkedSamplers;
if ( samplers().empty() )
return;
double allMax = 0.0;
double sumbias = 0.;
for ( map<double,Ptr<BinSampler>::ptr>::iterator s = samplers().begin();
s != samplers().end(); ++s ) {
double bias = 1.;
if ( !theFlatSubprocesses )
bias *= s->second->averageAbsWeight();
s->second->bias(bias);
sumbias += bias;
allMax = max(allMax,s->second->maxWeight()*theMaxEnhancement);
}
double nsumbias = 0.0;
bool needAdjust = false;
for ( map<double,Ptr<BinSampler>::ptr>::iterator s = samplers().begin();
s != samplers().end(); ++s ) {
needAdjust |= s->second->bias()/sumbias < theMinSelection;
s->second->bias(max(s->second->bias()/sumbias,theMinSelection));
nsumbias += s->second->bias();
}
if ( nsumbias == 0.0 ) {
samplers().clear();
return;
}
if ( needAdjust ) {
for ( map<double,Ptr<BinSampler>::ptr>::iterator s = samplers().begin();
s != samplers().end(); ++s ) {
s->second->bias(s->second->bias()/nsumbias);
}
}
theMaxWeight = 0.0;
for ( map<double,Ptr<BinSampler>::ptr>::iterator s = samplers().begin();
s != samplers().end(); ++s ) {
double wref = theGlobalMaximumWeight ? allMax :
s->second->maxWeight()*theMaxEnhancement;
s->second->referenceWeight(wref);
theMaxWeight = max(theMaxWeight,wref/s->second->bias());
if ( (isSampling && s->second == lastSampler()) ||
!isSampling )
s->second->nextIteration();
}
map<double,Ptr<BinSampler>::ptr> newSamplers;
double current = 0.;
for ( map<double,Ptr<BinSampler>::ptr>::iterator s = samplers().begin();
s != samplers().end(); ++s ) {
if ( s->second->bias() == 0.0 )
continue;
current += s->second->bias();
newSamplers[current] = s->second;
}
samplers() = newSamplers;
}
void GeneralSampler::currentCrossSections() const {
if ( !theAddUpSamplers ) {
double n = attempts();
if ( n > 1 ) {
theIntegratedXSec = sumWeights()*maxXSec()/attempts();
double sw = sumWeights(); double sw2 = sumWeights2();
theIntegratedXSecErr = maxXSec()*sqrt(abs(sw2/n-sqr(sw/n))/(n-1));
} else {
theIntegratedXSec = ZERO;
theIntegratedXSecErr = ZERO;
}
return;
}
if ( gotCrossSections )
return;
if ( crossSectionCalls > 0 ) {
if ( ++crossSectionCalls == theUpdateAfter ) {
crossSectionCalls = 0;
} else return;
}
++crossSectionCalls;
gotCrossSections = true;
theIntegratedXSec = ZERO;
double var = 0.0;
for ( map<double,Ptr<BinSampler>::ptr>::const_iterator s = samplers().begin();
s != samplers().end(); ++s ) {
theIntegratedXSec += s->second->integratedXSec();
var += sqr(s->second->integratedXSecErr()/nanobarn);
}
theIntegratedXSecErr = sqrt(var)*nanobarn;
}
// If needed, insert default implementations of virtual function defined
// in the InterfacedBase class here (using ThePEG-interfaced-impl in Emacs).
void GeneralSampler::doinit() {
if ( RunDirectories::empty() )
RunDirectories::pushRunId(generator()->runName());
if ( integratePerJob() || integrationJobs() ) {
theParallelIntegration = true;
theIntegratePerJob = integratePerJob();
theIntegrationJobs = integrationJobs();
}
readGrids();
if ( theGrids.children().empty() && runLevel() == RunMode )
generator()->log()
<< "\n--------------------------------------------------------------------------------\n\n"
<< "Warning: No grid file could be found at the start of this run.\n\n"
<< "* For a read/run setup intented to be used with --setupfile please consider\n"
<< " using the build/integrate/run setup.\n"
<< "* For a build/integrate/run setup to be used with --setupfile please ensure\n"
<< " that the same setupfile is provided to both, the integrate and run steps.\n\n"
<< "--------------------------------------------------------------------------------\n" << flush;
if ( samplers().empty() && runLevel() == RunMode )
justAfterIntegrate = true;
SamplerBase::doinit();
}
void GeneralSampler::dofinish() {
set<string> compensating;
for ( map<double,Ptr<BinSampler>::ptr>::const_iterator s =
samplers().begin(); s != samplers().end(); ++s ) {
if ( s->second->compensating() ) {
compensating.insert(s->second->process());
}
if ( s->second->nanPoints() ) {
generator()->log() << "warning: "
<< s->second->nanPoints() << " of "
<< s->second->allPoints() << " points with nan or inf weight\n"
<< "in " << s->second->process() << "\n" << flush;
}
s->second->finalize(theVerbose);
}
if ( theVerbose ) {
if ( !compensating.empty() ) {
generator()->log() << "warning: sampling for the following processes is still compensating:\n";
for ( set<string>::const_iterator c = compensating.begin();
c != compensating.end(); ++c )
generator()->log() << *c << "\n";
}
generator()->log() << "final integrated cross section is ( "
<< integratedXSec()/nanobarn << " +/- "
<< integratedXSecErr()/nanobarn << " ) nb\n" << flush;
}
if ( !compensating.empty() ) {
generator()->log() << "Warning: Some samplers are still in compensating mode.\n" << flush;
}
if ( maximumExceeds != 0 ) {
//generator()->log() << maximumExceeds << " of " << theAttempts
// << " attempted points exceeded the guessed maximum weight\n"
// << "with an average relative deviation of "
// << maximumExceededBy/maximumExceeds << "\n\n" << flush;
generator()->log() <<"\n\n\nNote: In this run "<<maximumExceeds<<" of the "<<theAccepts<<" accepted events\n"
<<"were found with a weight W larger than the expected Wmax.\n";
generator()->log() <<"This corresponds to a cross section difference between:\n"
<<" UnitWeights: "<< theMaxWeight*theSumWeights/theAttempts<<"nb\n"
<<" AlmostUnweighted: "<< theMaxWeight*correctWeights/theAttempts<< "nb\n"<<
" use 'set Sampler:AlmostUnweighted On' to switch to non-unit weights.\n\n";
generator()->log() <<"The maximum weight determined in the read/integrate step has been enhanced by \n"<<
" set /Herwig/Samplers/Sampler:MaxEnhancement "<< theMaxEnhancement<<
".\nIf the rate of excessions ("<<(double)maximumExceeds*100/(double)theAccepts<<
"%) or the change of the cross section is large,\nyou can try to:\n\n"<<
"Enhance the number of points used in the read/integrate step\n"<<
" set /Herwig/Samplers/Sampler:BinSampler:InitialPoints ...\n\n"<<
"and/or enhance the reference weight found in the read/integrate step\n"<<
" set /Herwig/Samplers/Sampler:MaxEnhancement 1.x\n\n"<<
"If this does not help (and your process is well defined by cuts)\n"<<
"don't hesitate to contact herwig@projects.hepforge.org.\n\n";
}
if ( runCombinationData ) {
string dataName = RunDirectories::runStorage();
if ( dataName.empty() )
dataName = "./";
else if ( *dataName.rbegin() != '/' )
dataName += "/";
dataName += "HerwigSampling.dat";
ofstream data(dataName.c_str());
double runXSec =
theMaxWeight*theSumWeights/theAttempts;
double runXSecErr =
sqr(theMaxWeight)*(1./theAttempts)*(1./(theAttempts-1.))*
abs(theSumWeights2 - sqr(theSumWeights)/theAttempts);
- data << setprecision(20);
+ data << setprecision(17);
data << "CrossSectionCombined "
<< (integratedXSec()/nanobarn) << " +/- "
<< (integratedXSecErr()/nanobarn) << "\n"
<< "CrossSectionRun "
<< runXSec << " +/- " << sqrt(runXSecErr) << "\n"
<< "PointsAttempted " << theAttempts << "\n"
<< "PointsAccepted " << theAccepts << "\n"
<< "SumWeights " << theSumWeights*theMaxWeight << "\n"
<< "SumWeights2 " << theSumWeights2*sqr(theMaxWeight) << "\n"
<< flush;
}
theGrids = XML::Element(XML::ElementTypes::Element,"Grids");
for ( map<double,Ptr<BinSampler>::ptr>::iterator s = samplers().begin();
s != samplers().end(); ++s ) {
s->second->saveGrid();
s->second->saveRemappers();
if ( justAfterIntegrate )
s->second->saveIntegrationData();
}
if ( theWriteGridsOnFinish )
writeGrids();
SamplerBase::dofinish();
}
void GeneralSampler::doinitrun() {
readGrids();
if ( theGrids.children().empty() && !didReadGrids )
generator()->log()
<< "\n--------------------------------------------------------------------------------\n\n"
<< "Warning:No grid file could be found at the start of this run.\n\n"
<< "* For a read/run setup intented to be used with --setupfile please consider\n"
<< " using the build/integrate/run setup.\n"
<< "* For a build/integrate/run setup to be used with --setupfile please ensure\n"
<< " that the same setupfile is provided to both, the integrate and run steps.\n\n"
<< "--------------------------------------------------------------------------------\n" << flush;
if ( samplers().empty() ) {
justAfterIntegrate = true;
if ( !hasSetupFile() )
initialize();
} else {
for ( map<double,Ptr<BinSampler>::ptr>::iterator s = samplers().begin();
s != samplers().end(); ++s ) {
s->second->setupRemappers(theVerbose);
if ( justAfterIntegrate )
s->second->readIntegrationData();
s->second->initialize(theVerbose);
}
}
isSampling = true;
SamplerBase::doinitrun();
}
void GeneralSampler::rebind(const TranslationMap & trans) {
for ( map<double,Ptr<BinSampler>::ptr>::iterator s =
samplers().begin(); s != samplers().end(); ++s )
s->second = trans.translate(s->second);
SamplerBase::rebind(trans);
}
IVector GeneralSampler::getReferences() {
IVector ret = SamplerBase::getReferences();
for ( map<double,Ptr<BinSampler>::ptr>::iterator s =
samplers().begin(); s != samplers().end(); ++s )
ret.push_back(s->second);
return ret;
}
void GeneralSampler::writeGrids() const {
if ( theGrids.children().empty() )
return;
string dataName = RunDirectories::runStorage();
if ( dataName.empty() )
dataName = "./";
else if ( *dataName.rbegin() != '/' )
dataName += "/";
dataName += "HerwigGrids.xml";
ofstream out(dataName.c_str());
XML::ElementIO::put(theGrids,out);
}
void GeneralSampler::readGrids() {
// return if grids were already read
if ( didReadGrids )
return;
// check for global HerwigGrids.xml file or combine integration jobs to a global HerwigGrids.xml file
// Show messages of integration job combination only in the first run (if no global HerwigGrids.xml file is found in one of the directories)
// or in case of an error
// Check if a globalHerwigGridsFileFound was found and keep messages in a stringstream buffer beforehand
bool globalHerwigGridsFileFound = false;
bool integrationJobCombinationSuccessful = true;
std::stringstream messageBuffer;
RunDirectories directories;
while ( directories && !didReadGrids ) {
string dataName = directories.nextRunStorage();
if ( dataName.empty() )
dataName = "./";
else if ( *dataName.rbegin() != '/' )
dataName += "/";
string directoryName = dataName;
dataName += "HerwigGrids.xml";
ifstream in(dataName.c_str());
if ( in ) {
theGrids = XML::ElementIO::get(in);
didReadGrids = true;
// Set to true if in any of the directories a global HerwigGrid.xml file was found
globalHerwigGridsFileFound = true;
}
else {
// Check if integrationJob was split and try to merge single integrationJobs together
// integrationJobsCreated() == 0 indicates that parallel integration has not been
// requested, while the parallel integration parameters may well yield a single job
if(integrationJobsCreated() >= 1 && runLevel() == RunMode) {
messageBuffer << "\n\n* Global HerwigGrids.xml file does not exist yet"
<< "\n and integration jobs were split into " << integrationJobsCreated() << " integration jobs."
<< "\n Trying to combine single integration jobs to a global HerwigGrids.xml file"
<< "\n using the following directory " << directoryName << ".";
theGrids = XML::Element(XML::ElementTypes::Element,"Grids");
integrationJobCombinationSuccessful = true;
for(unsigned int currentProcessedIntegrationJobNum = 0; currentProcessedIntegrationJobNum < integrationJobsCreated(); ++currentProcessedIntegrationJobNum) {
ostringstream currentProcessedIntegrationJob;
currentProcessedIntegrationJob << directoryName << "integrationJob" << currentProcessedIntegrationJobNum << "/HerwigGrids.xml";
if(boost::filesystem::exists(boost::filesystem::path(currentProcessedIntegrationJob.str()))) {
ifstream localGridFileIN(currentProcessedIntegrationJob.str().c_str());
if(localGridFileIN) {
theGrids = theGrids + XML::ElementIO::get(localGridFileIN);
messageBuffer << "\n* Added integration job " << currentProcessedIntegrationJobNum << " to global HerwigGrids.xml file.";
}
else {
integrationJobCombinationSuccessful = false;
messageBuffer << "\n* Could not open/add integration job " << currentProcessedIntegrationJobNum << " to global HerwigGrids.xml file.";
}
}
else {
integrationJobCombinationSuccessful = false;
messageBuffer << "\n* Could not find integration job " << currentProcessedIntegrationJob.str();
}
}
if(integrationJobCombinationSuccessful) {
string globalGridFile = directoryName + "HerwigGrids.xml";
ofstream globalGridFileOF(globalGridFile.c_str());
XML::ElementIO::put(theGrids,globalGridFileOF);
messageBuffer << "\n* Global HerwigGrids.xml file was created, the integration jobs 0 to " << integrationJobsCreated()-1
<< " were combined."
<< "\n* If previous warnings in regards to the HerwigGrids.xml file occured, these can be safely ignored."
<< "\n* Note: This message will occur only in the first run and will be suppressed in further runs.\n"
<< flush;
didReadGrids = true;
}
else {
messageBuffer << "\n* Global HerwigGrids.xml file could not be created due to failed combination of integration jobs."
<< "\n Please check the above-mentioned missing/failed integration jobs which are needed for the combination."
<< "\n* Note: It can be that the HerwigGrids.xml file is searched and can be found in further directories."
<< "\n In this case you can ignore this warning message.\n" << flush;
}
}
}
}
// Show messages if global HerwigGrids.xml file was not found or first combination run
if (!globalHerwigGridsFileFound && (theVerbose || !integrationJobCombinationSuccessful))
BaseRepository::cout() << messageBuffer.str() << "\n" << flush;
if ( !didReadGrids )
theGrids = XML::Element(XML::ElementTypes::Element,"Grids");
}
void GeneralSampler::persistentOutput(PersistentOStream & os) const {
os << theVerbose << theBinSampler << theSamplers << theLastSampler
<< theUpdateAfter << crossSectionCalls << gotCrossSections
<< ounit(theIntegratedXSec,nanobarn)
<< ounit(theIntegratedXSecErr,nanobarn)
<< theSumWeights << theSumWeights2
<< theAttempts << theAccepts << theMaxWeight
<< theAddUpSamplers << theGlobalMaximumWeight
<< theFlatSubprocesses << isSampling << theMinSelection
<< runCombinationData << theAlmostUnweighted << maximumExceeds
<< maximumExceededBy << correctWeights << theMaxEnhancement
<< theParallelIntegration
<< theIntegratePerJob << theIntegrationJobs
<< theIntegrationJobsCreated << theWriteGridsOnFinish;
}
void GeneralSampler::persistentInput(PersistentIStream & is, int) {
is >> theVerbose >> theBinSampler >> theSamplers >> theLastSampler
>> theUpdateAfter >> crossSectionCalls >> gotCrossSections
>> iunit(theIntegratedXSec,nanobarn)
>> iunit(theIntegratedXSecErr,nanobarn)
>> theSumWeights >> theSumWeights2
>> theAttempts >> theAccepts >> theMaxWeight
>> theAddUpSamplers >> theGlobalMaximumWeight
>> theFlatSubprocesses >> isSampling >> theMinSelection
>> runCombinationData >> theAlmostUnweighted >> maximumExceeds
>> maximumExceededBy >> correctWeights >> theMaxEnhancement
>> theParallelIntegration
>> theIntegratePerJob >> theIntegrationJobs
>> theIntegrationJobsCreated >> theWriteGridsOnFinish;
}
// *** Attention *** The following static variable is needed for the type
// description system in ThePEG. Please check that the template arguments
// are correct (the class and its base class), and that the constructor
// arguments are correct (the class name and the name of the dynamically
// loadable library where the class implementation can be found).
DescribeClass<GeneralSampler,SamplerBase>
describeHerwigGeneralSampler("Herwig::GeneralSampler", "HwSampling.so");
void GeneralSampler::Init() {
static ClassDocumentation<GeneralSampler> documentation
("A GeneralSampler class");
static Reference<GeneralSampler,BinSampler> interfaceBinSampler
("BinSampler",
"The bin sampler to be used.",
&GeneralSampler::theBinSampler, false, false, true, false, false);
static Parameter<GeneralSampler,size_t> interfaceUpdateAfter
("UpdateAfter",
"Update cross sections every number of events.",
&GeneralSampler::theUpdateAfter, 1, 1, 0,
false, false, Interface::lowerlim);
static Switch<GeneralSampler,bool> interfaceVerbose
("Verbose",
"",
&GeneralSampler::theVerbose, false, false, false);
static SwitchOption interfaceVerboseOn
(interfaceVerbose,
"On",
"",
true);
static SwitchOption interfaceVerboseOff
(interfaceVerbose,
"Off",
"",
false);
static Switch<GeneralSampler,bool> interfaceAddUpSamplers
("AddUpSamplers",
"Calculate cross sections from adding up individual samplers.",
&GeneralSampler::theAddUpSamplers, false, false, false);
static SwitchOption interfaceAddUpSamplersOn
(interfaceAddUpSamplers,
"On",
"",
true);
static SwitchOption interfaceAddUpSamplersOff
(interfaceAddUpSamplers,
"Off",
"",
false);
static Switch<GeneralSampler,bool> interfaceGlobalMaximumWeight
("GlobalMaximumWeight",
"Use a global maximum weight instead of partial unweighting.",
&GeneralSampler::theGlobalMaximumWeight, true, false, false);
static SwitchOption interfaceGlobalMaximumWeightOn
(interfaceGlobalMaximumWeight,
"On",
"",
true);
static SwitchOption interfaceGlobalMaximumWeightOff
(interfaceGlobalMaximumWeight,
"Off",
"",
false);
static Parameter<GeneralSampler,double> interfaceMaxEnhancement
("MaxEnhancement",
"Enhance the maximum reference weight found in the read step.",
&GeneralSampler::theMaxEnhancement, 1.1, 1.0, 1.5,
false, false, Interface::limited);
static Switch<GeneralSampler,bool> interfaceFlatSubprocesses
("FlatSubprocesses",
"[debug] Perform a flat subprocess selection.",
&GeneralSampler::theFlatSubprocesses, false, false, false);
static SwitchOption interfaceFlatSubprocessesOn
(interfaceFlatSubprocesses,
"On",
"",
true);
static SwitchOption interfaceFlatSubprocessesOff
(interfaceFlatSubprocesses,
"Off",
"",
false);
static Parameter<GeneralSampler,double> interfaceMinSelection
("MinSelection",
"A minimum subprocess selection probability.",
&GeneralSampler::theMinSelection, 0.01, 0.0, 1.0,
false, false, Interface::limited);
static Switch<GeneralSampler,bool> interfaceRunCombinationData
("RunCombinationData",
"",
&GeneralSampler::runCombinationData, false, false, false);
static SwitchOption interfaceRunCombinationDataOn
(interfaceRunCombinationData,
"On",
"",
true);
static SwitchOption interfaceRunCombinationDataOff
(interfaceRunCombinationData,
"Off",
"",
false);
static Switch<GeneralSampler,bool> interfaceAlmostUnweighted
("AlmostUnweighted",
"",
&GeneralSampler::theAlmostUnweighted, false, false, false);
static SwitchOption interfaceAlmostUnweightedOn
(interfaceAlmostUnweighted,
"On",
"",
true);
static SwitchOption interfaceAlmostUnweightedOff
(interfaceAlmostUnweighted,
"Off",
"",
false);
static Switch<GeneralSampler,bool> interfaceParallelIntegration
("ParallelIntegration",
"Prepare parallel jobs for integration.",
&GeneralSampler::theParallelIntegration, false, false, false);
static SwitchOption interfaceParallelIntegrationYes
(interfaceParallelIntegration,
"Yes",
"",
true);
static SwitchOption interfaceParallelIntegrationNo
(interfaceParallelIntegration,
"No",
"",
false);
static Parameter<GeneralSampler,unsigned int> interfaceIntegratePerJob
("IntegratePerJob",
"The number of subprocesses to integrate per job.",
&GeneralSampler::theIntegratePerJob, 0, 0, 0,
false, false, Interface::lowerlim);
static Parameter<GeneralSampler,unsigned int> interfaceIntegrationJobs
("IntegrationJobs",
"The maximum number of integration jobs to create.",
&GeneralSampler::theIntegrationJobs, 0, 0, 0,
false, false, Interface::lowerlim);
static Parameter<GeneralSampler,unsigned int> interfaceIntegrationJobsCreated
("IntegrationJobsCreated",
"The number of integration jobs which were actually created.",
&GeneralSampler::theIntegrationJobsCreated, 1, 1, 0,
false, false, Interface::lowerlim);
static Switch<GeneralSampler,bool> interfaceWriteGridsOnFinish
("WriteGridsOnFinish",
"Write grids on finishing a run.",
&GeneralSampler::theWriteGridsOnFinish, false, false, false);
static SwitchOption interfaceWriteGridsOnFinishYes
(interfaceWriteGridsOnFinish,
"Yes",
"",
true);
static SwitchOption interfaceWriteGridsOnFinishNo
(interfaceWriteGridsOnFinish,
"No",
"",
false);
}
diff --git a/Sampling/MonacoSampler.cc b/Sampling/MonacoSampler.cc
--- a/Sampling/MonacoSampler.cc
+++ b/Sampling/MonacoSampler.cc
@@ -1,398 +1,398 @@
// -*- C++ -*-
//
// MonacoSampler.cc is a part of Herwig - A multi-purpose Monte Carlo event generator
// Copyright (C) 2002-2012 The Herwig Collaboration
//
// Herwig is licenced under version 2 of the GPL, see COPYING for details.
// Please respect the MCnet academic guidelines, see GUIDELINES for details.
//
//
// This is the implementation of the non-inlined, non-templated member
// functions of the MonacoSampler class.
//
#include "MonacoSampler.h"
#include "ThePEG/Interface/ClassDocumentation.h"
#include "ThePEG/EventRecord/Particle.h"
#include "ThePEG/Repository/UseRandom.h"
#include "ThePEG/Repository/EventGenerator.h"
#include "ThePEG/Utilities/DescribeClass.h"
#include "ThePEG/Repository/Repository.h"
#include "ThePEG/Interface/Parameter.h"
#include "ThePEG/Persistency/PersistentOStream.h"
#include "ThePEG/Persistency/PersistentIStream.h"
#include "ThePEG/Handlers/StandardEventHandler.h"
#include "ThePEG/Handlers/StandardXComb.h"
#include <boost/progress.hpp>
#include "MonacoSampler.h"
#include "Herwig/Sampling/GeneralSampler.h"
using namespace Herwig;
MonacoSampler::MonacoSampler()
: BinSampler(),
theAlpha(0.875),
theGridDivisions(48),
theIterationPoints(0) {}
MonacoSampler::~MonacoSampler() {}
IBPtr MonacoSampler::clone() const {
return new_ptr(*this);
}
IBPtr MonacoSampler::fullclone() const {
return new_ptr(*this);
}
double MonacoSampler::generate() {
double w = 1.;
// cout<<"\npoint: ";
std::valarray<int> upperb(dimension());
for ( int k = 0; k < dimension(); ++k ) {
double div = (1 - UseRandom::rnd()) * theGridDivisions;
upperb[k] = static_cast<int>(div);
double gupper, glower;
if ( upperb[k] <= 0 ) {
upperb[k] = 0;
glower = 0.;
gupper = theGrid(k,0);
} else if (upperb[k] >= static_cast<int>(theGridDivisions)) {
upperb[k] = theGridDivisions-1;
glower = theGrid(k,theGridDivisions-2);
gupper = theGrid(k,theGridDivisions-1);
} else {
glower = theGrid(k,upperb[k]-1);
gupper = theGrid(k,upperb[k]);
}
double gdiff = gupper - glower;
lastPoint()[k] = glower + (div-upperb[k])*gdiff;
w *= gdiff * theGridDivisions;
}
// cout<<lastPoint()[k]<<" ";
try {
w *= eventHandler()->dSigDR(lastPoint()) / nanobarn;
} catch (Veto&) {
w = 0.0;
} catch (...) {
throw;
}
// only store numbers
double wgt = w;
if ( isnan(wgt) || isinf(wgt) ) wgt = 0;
// save results for later grid optimization
theIterationPoints++;
for ( int k = 0; k < dimension(); ++k ) {
theGridData(k,upperb[k]) += wgt*wgt;
}
if (randomNumberString()!="")
for ( size_t k = 0; k < lastPoint().size(); ++k ) {
RandomNumberHistograms[RandomNumberIndex(id(),k)].first.book(lastPoint()[k],wgt);
RandomNumberHistograms[RandomNumberIndex(id(),k)].second+=wgt;
}
if ( !weighted() && initialized() ) {
double p = min(abs(w),referenceWeight())/referenceWeight();
double sign = w >= 0. ? 1. : -1.;
if ( p < 1 && UseRandom::rnd() > p )
w = 0.;
else
w = sign*max(abs(w),referenceWeight());
}
select(w);
if ( w != 0.0 )
accept();
return w;
}
void MonacoSampler::saveGrid() const {
XML::Element grid = toXML();
grid.appendAttribute("process",id());
sampler()->grids().append(grid);
}
void MonacoSampler::initialize(bool progress) {
//read in grid
bool haveGrid = false;
list<XML::Element>::iterator git = sampler()->grids().children().begin();
for ( ; git != sampler()->grids().children().end(); ++git ) {
if ( git->type() != XML::ElementTypes::Element )
continue;
if ( git->name() != "Monaco" )
continue;
string proc;
git->getFromAttribute("process",proc);
if ( proc == id() ) {
haveGrid = true;
break;
}
}
if ( haveGrid ) {
fromXML(*git);
sampler()->grids().erase(git);
didReadGrids();
} else {
// flat grid
theGrid.resize(dimension(),theGridDivisions);
for (int k = 0; k < dimension(); k++)
for (size_t l = 0; l < theGridDivisions; l++)
theGrid(k,l) = (l+1)/static_cast<double>(theGridDivisions);
theGridData = boost::numeric::ublas::zero_matrix<double>(dimension(),theGridDivisions);
theIterationPoints = 0;
}
lastPoint().resize(dimension());
if (randomNumberString()!="")
for(size_t i=0;i<lastPoint().size();i++){
RandomNumberHistograms[RandomNumberIndex(id(),i)] = make_pair( RandomNumberHistogram(),0.);
}
if ( initialized() ) {
if ( !hasGrids() )
throw Exception() << "MonacoSampler: Require existing grid when starting to run.\n"
<< "Did you miss setting --setupfile?"
<< Exception::abortnow;
return;
}
if ( haveGrid ) {
if ( !integrated() ) {
runIteration(initialPoints(),progress);
adapt();
}
isInitialized();
return;
}
// if ( !sampler()->grids().children().empty() ) {
// nIterations(1);
// }
unsigned long points = initialPoints();
for ( unsigned long k = 0; k < nIterations(); ++k ) {
runIteration(points,progress);
if ( k < nIterations() - 1 ) {
points = (unsigned long)(points*enhancementFactor());
adapt();
nextIteration();
}
}
adapt();
didReadGrids();
isInitialized();
}
void MonacoSampler::adapt() {
int dim = dimension();
// refine grid
std::valarray<double> gridcumul(dim);
for (int k=0; k<dim; ++k) {
double gridold = theGridData(k,0);
double gridnew = theGridData(k,1);
theGridData(k,0) = (gridold + gridnew) / 2.0;
gridcumul[k] = theGridData(k,0);
for (size_t l=1; l<theGridDivisions-1; ++l) {
theGridData(k,l) = gridold + gridnew;
gridold = gridnew;
gridnew = theGridData(k,l+1);
theGridData(k,l) = (theGridData(k,l) + gridnew) / 3.0;
gridcumul[k] += theGridData(k,l);
}
theGridData(k,theGridDivisions-1) = (gridnew + gridold) / 2.0;
gridcumul[k] += theGridData(k,theGridDivisions-1);
}
for (int k=0; k<dim; ++k) {
double rc = 0.;
std::valarray<double> ri(theGridDivisions);
for (size_t l=0; l<theGridDivisions; ++l) {
ri[l] = 0.;
if ((theGridData(k,l) >= 0) && (gridcumul[k] != 0)) {
theGridData(k,l) = max( 1.0e-30, theGridData(k,l) );
double gpart = gridcumul[k] / theGridData(k,l);
ri[l] = pow( (gpart - 1.0) / (gpart * log( gpart )), theAlpha);
} else {
ri[l] = pow( 1. / log( 1e30 ), theAlpha);
}
rc += ri[l];
}
rc /= theGridDivisions;
double gridold = 0, gridnew = 0.;
double deltar = 0.;
unsigned int m = 0;
std::valarray<double> theGridRowNew(theGridDivisions);
for (size_t l = 0; l < theGridDivisions; ++l) {
deltar += ri[l];
gridold = gridnew;
gridnew = theGrid(k,l);
for (; deltar > rc; m++) {
deltar -= rc;
theGridRowNew[m] = gridnew - (gridnew - gridold) * deltar / ri[l];
}
}
for (size_t l = 0; l < theGridDivisions-1; ++l) {
theGrid(k,l) = theGridRowNew[l];
}
theGrid(k,theGridDivisions-1) = 1.0;
}
theGridData = boost::numeric::ublas::zero_matrix<double>(dimension(),theGridDivisions);
theIterationPoints = 0;
}
void MonacoSampler::finalize(bool) {
// save grid
adapt();
XML::Element grid = MonacoSampler::toXML();
grid.appendAttribute("process",id());
sampler()->grids().append(grid);
if (randomNumberString()!="")
for ( map<RandomNumberIndex,pair<RandomNumberHistogram,double> >::
const_iterator b = RandomNumberHistograms.begin();
b != RandomNumberHistograms.end(); ++b ) {
b->second.first.dump(randomNumberString(), b->first.first,shortprocess(),b->first.second);
}
}
void MonacoSampler::fromXML(const XML::Element& grid) {
int dim = 0;
grid.getFromAttribute("Dimension",dim);
if ( dim != dimension() ) {
throw std::runtime_error("[MonacoSampler] Number of dimensions in grid file does not match expectation.");
}
size_t griddivisions = 0;
grid.getFromAttribute("GridDivisions",griddivisions);
boost::numeric::ublas::matrix<double> tmpgrid(dim,griddivisions);
pair<multimap<pair<int,string>,list<XML::Element>::iterator>::const_iterator,multimap<pair<int,string>,list<XML::Element>::iterator>::const_iterator> cit;
cit = grid.findAll(XML::ElementTypes::Element,"GridVector");
if ( cit.first->second == grid.children().end() )
throw std::runtime_error("[MonacoSampler] Expected a GridVector element.");
for (multimap<pair<int,string>,list<XML::Element>::iterator>::const_iterator iit=cit.first; iit!=cit.second; ++iit) {
const XML::Element& gridvector = *iit->second;
int k = 0;
gridvector.getFromAttribute("Index",k);
if ( k >= dim ) {
throw std::runtime_error("[MonacoSampler] Index of grid dimension larger than grid size.");
} else {
list<XML::Element>::const_iterator git;
git = gridvector.findFirst(XML::ElementTypes::ParsedCharacterData,"");
if ( git == gridvector.children().end() )
throw std::runtime_error("[MonacoSampler] Expected grid data.");
istringstream bdata(git->content());
for ( size_t l = 0; l < griddivisions; ++l ) {
bdata >> tmpgrid(k,l);
}
}
}
// store back into main variable
// if griddivisions do not match, rebin preserving bin density
theGrid.resize(dim,theGridDivisions);
theIterationPoints = 0;
double divratio = griddivisions / static_cast<double>(theGridDivisions);
for (int k = 0; k < dim; k++) {
double xold = 0, xnew = 0, deltar = 0;
size_t l = 0;
for (size_t m = 0; m < griddivisions; m++) {
deltar += 1;
xold = xnew;
xnew = tmpgrid(k,m);
for (; deltar > divratio; l++) {
deltar -= divratio;
theGrid(k,l) = xnew - (xnew - xold) * deltar;
}
}
theGrid(k,theGridDivisions-1) = 1.0;
}
theGridData = boost::numeric::ublas::zero_matrix<double>(dimension(),theGridDivisions);
}
XML::Element MonacoSampler::toXML() const {
XML::Element grid(XML::ElementTypes::Element,"Monaco");
grid.appendAttribute("Dimension",dimension());
grid.appendAttribute("GridDivisions",theGridDivisions);
for ( int k = 0; k < dimension(); ++k ) {
XML::Element gridvector(XML::ElementTypes::Element,"GridVector");
gridvector.appendAttribute("Index",k);
ostringstream bdata;
- bdata << setprecision(20);
+ bdata << setprecision(17);
for ( size_t l = 0; l < theGridDivisions; ++l )
bdata << theGrid(k,l) << " ";
XML::Element belem(XML::ElementTypes::ParsedCharacterData,bdata.str());
gridvector.append(belem);
grid.append(gridvector);
}
return grid;
}
// If needed, insert default implementations of virtual function defined
// in the InterfacedBase class here (using ThePEG-interfaced-impl in Emacs).
void MonacoSampler::persistentOutput(PersistentOStream & os) const {
BinSampler::put(os);
os << theAlpha << theGridDivisions;
}
void MonacoSampler::persistentInput(PersistentIStream & is, int) {
BinSampler::get(is);
is >> theAlpha >> theGridDivisions;
}
// *** Attention *** The following static variable is needed for the type
// description system in ThePEG. Please check that the template arguments
// are correct (the class and its base class), and that the constructor
// arguments are correct (the class name and the name of the dynamically
// loadable library where the class implementation can be found).
DescribeClass<MonacoSampler,BinSampler>
describeHerwigMonacoSampler("Herwig::MonacoSampler", "HwSampling.so");
void MonacoSampler::Init() {
static ClassDocumentation<MonacoSampler> documentation
("MonacoSampler samples XCombs bins. This implementation performs weighted MC integration using Monaco, an adapted Vegas algorithm.");
static Parameter<MonacoSampler,double> interfaceAlpha
("Alpha",
"Rate of grid modification (0 for no modification).",
&MonacoSampler::theAlpha, 0.875, 0.0, 0,
false, false, Interface::lowerlim);
static Parameter<MonacoSampler,size_t> interfaceGridDivisions
("GridDivisions",
"The number of divisions per grid dimension.",
&MonacoSampler::theGridDivisions, 48, 1, 0,
false, false, Interface::lowerlim);
}
diff --git a/Sampling/Remapper.cc b/Sampling/Remapper.cc
--- a/Sampling/Remapper.cc
+++ b/Sampling/Remapper.cc
@@ -1,247 +1,247 @@
// -*- C++ -*-
//
// Remapper.cc is a part of Herwig - A multi-purpose Monte Carlo event generator
// Copyright (C) 2002-2012 The Herwig Collaboration
//
// Herwig is licenced under version 2 of the GPL, see COPYING for details.
// Please respect the MCnet academic guidelines, see GUIDELINES for details.
//
#include <cassert>
#include <cmath>
#include <sstream>
#include <stdexcept>
#include <iomanip>
#include <cstdlib>
#include "Remapper.h"
using namespace Herwig;
using namespace std;
Remapper::Remapper()
: minSelection(0.0), smooth(false) {}
Remapper::Remapper(unsigned int nBins,
double nMinSelection,
bool nSmooth)
: minSelection(nMinSelection), smooth(nSmooth) {
assert(minSelection > 0.0);
double step = 1./nBins;
for ( unsigned int k = 1; k <= nBins; ++k )
weights[k*step] = 0.0;
}
void Remapper::fill(double x, double w) {
map<double,double>::iterator k =
weights.upper_bound(x);
assert(k != weights.end());
k->second += abs(w);
}
void Remapper::finalize() {
map<double,double> nweights = weights;
double step = nweights.begin()->first;
double norm = 0.0;
for ( map<double,double>::const_iterator k =
nweights.begin(); k != nweights.end(); ++k )
norm += k->second;
double sum = 0.0;
for ( map<double,double>::iterator k =
nweights.begin(); k != nweights.end(); ++k ) {
k->second /= norm;
k->second = max(k->second,minSelection);
sum += k->second;
}
if ( smooth ) {
assert(nweights.size() >= 2);
map<double,double> nnweights = nweights;
nnweights.begin()->second =
0.5*(nweights.begin()->second + (++nweights.begin())->second);
(--nnweights.end())->second =
0.5*((--nweights.end())->second + (--(--nweights.end()))->second);
sum = nnweights.begin()->second + (--nnweights.end())->second;
map<double,double>::iterator nb = ++nnweights.begin();
map<double,double>::const_iterator b = ++nweights.begin();
for ( ; b != --nweights.end(); ++b, ++nb ) {
map<double,double>::const_iterator bm = b; --bm;
map<double,double>::const_iterator bp = b; ++bp;
nb->second = (bm->second + b->second + bp->second)/3.;
sum += nb->second;
}
nweights = nnweights;
}
norm = 0.0;
for ( map<double,double>::const_iterator k =
nweights.begin(); k != nweights.end(); ++k ) {
norm += k->second;
SelectorEntry s;
s.lower = k->first - step;
s.upper = k->first;
s.value = k->second / sum / step;
selector[norm/sum] = s;
}
}
pair<double,double> Remapper::generate(double r) const {
map<double,SelectorEntry>::const_iterator bin
= selector.upper_bound(r);
// happens, if there is truely non-zero cross section
// then let the integrator pick up this result downstream
if ( bin == selector.end() )
return make_pair(r,1.);
const SelectorEntry& binInfo = bin->second;
double intUp = bin->first;
double intLow =
bin != selector.begin() ? (--bin)->first : 0.0;
double x =
((binInfo.upper-binInfo.lower)/(intUp-intLow))*
(r-(intLow*binInfo.upper-intUp*binInfo.lower)/(binInfo.upper-binInfo.lower));
return pair<double,double>(x,binInfo.value);
}
void Remapper::fromXML(const XML::Element& elem) {
elem.getFromAttribute("MinSelection",minSelection);
elem.getFromAttribute("Smooth",smooth);
size_t nbins = 0;
elem.getFromAttribute("NBins",nbins);
list<XML::Element>::const_iterator cit;
cit = elem.findFirst(XML::ElementTypes::Element,"BinData");
if ( cit == elem.children().end() )
throw runtime_error("[ExSample::Remapper] Expected a BinData element.");
const XML::Element& bindata = *cit;
cit = bindata.findFirst(XML::ElementTypes::ParsedCharacterData,"");
if ( cit == bindata.children().end() )
throw runtime_error("[ExSample::Remapper] Expected bin data.");
istringstream bdata(cit->content());
for ( size_t k = 0; k < nbins; ++k ) {
double x,w;
bdata >> w >> x;
weights[w] = x;
}
cit = elem.findFirst(XML::ElementTypes::Element,"SelectorData");
if ( cit == elem.children().end() )
throw runtime_error("[ExSample::Remapper] Expected a SelectorData element.");
const XML::Element& selectordata = *cit;
cit = selectordata.findFirst(XML::ElementTypes::ParsedCharacterData,"");
if ( cit == selectordata.children().end() )
throw runtime_error("[ExSample::Remapper] Expected selector data.");
istringstream sdata(cit->content());
for ( size_t k = 0; k < nbins; ++k ) {
double w; SelectorEntry s;
sdata >> w >> s.lower >> s.upper >> s.value;
selector[w] = s;
}
}
XML::Element Remapper::toXML() const {
XML::Element res(XML::ElementTypes::Element,"Remapper");
res.appendAttribute("MinSelection",minSelection);
res.appendAttribute("Smooth",smooth);
res.appendAttribute("NBins",weights.size());
XML::Element bindata(XML::ElementTypes::Element,"BinData");
ostringstream bdata;
- bdata << setprecision(20);
+ bdata << setprecision(17);
for ( map<double,double>::const_iterator b = weights.begin();
b != weights.end(); ++b )
bdata << b->first << " " << b->second << " ";
XML::Element belem(XML::ElementTypes::ParsedCharacterData,bdata.str());
bindata.append(belem);
res.append(bindata);
XML::Element selectordata(XML::ElementTypes::Element,"SelectorData");
ostringstream sdata;
- sdata << setprecision(20);
+ sdata << setprecision(17);
for ( map<double,SelectorEntry>::const_iterator b = selector.begin();
b != selector.end(); ++b )
sdata << b->first << " " << b->second.lower << " "
<< b->second.upper << " " << b->second.value << " ";
XML::Element selem(XML::ElementTypes::ParsedCharacterData,sdata.str());
selectordata.append(selem);
res.append(selectordata);
return res;
}
inline double sqr(double x) {
return x*x;
}
void Remapper::test(size_t n, std::ostream& os) {
double sumwFlat = 0.0;
double sumw2Flat = 0.0;
for ( size_t k = 0; k < n; ++k ) {
double x = drand48();
double fx = x < 0.7 ? 5.*pow(x,0.4)*pow(0.7-x,2.4) : ( x > 0.8 ? x*x : 0.0 );
sumwFlat += fx;
sumw2Flat += fx*fx;
fill(x,fx);
}
finalize();
Remapper check(weights.size(),0.001,false);
double sumw = 0.0;
double sumw2 = 0.0;
for ( size_t k = 0; k < n; ++k ) {
double r = drand48();
pair<double,double> rw = generate(r);
double x = rw.first;
double fx = x < 0.7 ? 5.*pow(x,0.4)*pow(0.7-x,2.4) : ( x > 0.8 ? x*x : 0.0 );
fx /= rw.second;
sumw += fx;
sumw2 += fx*fx;
check.fill(x,1.);
}
cerr << setprecision(6)
<< "int flat = "
<< (sumwFlat/n) << " +/- " << sqrt(abs(sqr(sumwFlat/n)-sumw2Flat/n)/(n-1.)) << "\n"
<< "int mapped = "
<< (sumw/n) << " +/- " << sqrt(abs(sqr(sumw/n)-sumw2/n)/(n-1.)) << "\n"
<< "int exact = 0.353848\n" << flush;
double sum = 0.0;
double sumCheck = 0.0;
map<double,double>::const_iterator w = weights.begin();
map<double,double>::const_iterator wx = check.weights.begin();
for ( ; w != weights.end(); ++w, ++wx ) {
sum += w->second;
sumCheck += wx->second;
}
double step = weights.begin()->first;
map<double,SelectorEntry>::const_iterator s = selector.begin();
w = weights.begin();
wx = check.weights.begin();
for ( ; s != selector.end(); ++s, ++w, ++wx )
os << s->second.lower << " " << s->second.upper << " "
<< w->second/sum/step << " " << s->second.value << " "
<< s->first << " " << wx->second/sumCheck/step << "\n"
<< flush;
}

File Metadata

Mime Type
text/x-diff
Expires
Tue, Nov 19, 5:58 PM (1 d, 17 h)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
3798099
Default Alt Text
(79 KB)

Event Timeline