Page MenuHomeHEPForge

No OneTemporary

This file is larger than 256 KB, so syntax highlighting was skipped.
diff --git a/MatrixElement/Matchbox/External/MadGraph/MadGraphAmplitude.cc b/MatrixElement/Matchbox/External/MadGraph/MadGraphAmplitude.cc
--- a/MatrixElement/Matchbox/External/MadGraph/MadGraphAmplitude.cc
+++ b/MatrixElement/Matchbox/External/MadGraph/MadGraphAmplitude.cc
@@ -1,844 +1,849 @@
// -*- C++ -*-
//
// MadGraphAmplitude.cc is a part of Herwig - A multi-purpose Monte Carlo event generator
// Copyright (C) 2002-2012 The Herwig Collaboration
//
// Herwig is licenced under version 2 of the GPL, see COPYING for details.
// Please respect the MCnet academic guidelines, see GUIDELINES for details.
//
//
// This is the implementation of the non-inlined, non-templated member
// functions of the MadGraphAmplitude class.
//
#include "MadGraphAmplitude.h"
#include "ThePEG/Interface/ClassDocumentation.h"
#include "ThePEG/EventRecord/Particle.h"
#include "ThePEG/Repository/UseRandom.h"
#include "ThePEG/Repository/EventGenerator.h"
#include "ThePEG/Utilities/DescribeClass.h"
#include "Herwig/MatrixElement/Matchbox/MatchboxFactory.h"
#include "ThePEG/Interface/Parameter.h"
#include "ThePEG/Interface/Switch.h"
#include "ThePEG/Persistency/PersistentOStream.h"
#include "ThePEG/Persistency/PersistentIStream.h"
#include "ThePEG/PDT/EnumParticles.h"
#include <boost/lexical_cast.hpp>
#include <boost/filesystem.hpp>
#include <cstdlib>
#include <dlfcn.h>
#include <errno.h>
#include <sstream>
using namespace Herwig;
#ifndef HERWIG_BINDIR
#error Makefile.am needs to define HERWIG_BINDIR
#endif
#ifndef HERWIG_PKGDATADIR
#error Makefile.am needs to define HERWIG_PKGDATADIR
#endif
#ifndef MADGRAPH_PREFIX
#error Makefile.am needs to define MADGRAPH_PREFIX
#endif
extern "C" void mginitproc_(char *i,int);
extern "C" void MG_Calculate_wavefunctions_virt(int* proc,double*,double*);
extern "C" void MG_Calculate_wavefunctions_born(int* proc,double*, int*);
extern "C" void MG_Jamp (int* proc,int*, double*);
extern "C" void MG_LNJamp (int* proc,int*, double*);
extern "C" void MG_Virt (int* proc,double*);
extern "C" void MG_NCol (int* proc,int*);
extern "C" void MG_vxxxxx (double* p,double* n,int* inc,double* );
extern "C" void MG_Colour (int* proc,int* i,int* j ,int* color);
MadGraphAmplitude::MadGraphAmplitude()
: theMGmodel("loop_sm"),keepinputtopmass(false),
bindir_(HERWIG_BINDIR), includedir_(HERWIG_INCLUDEDIR), pkgdatadir_(HERWIG_PKGDATADIR), madgraphPrefix_(MADGRAPH_PREFIX)
{}
MadGraphAmplitude::~MadGraphAmplitude() {
}
IBPtr MadGraphAmplitude::clone() const {
return new_ptr(*this);
}
IBPtr MadGraphAmplitude::fullclone() const {
return new_ptr(*this);
}
bool MadGraphAmplitude::initializedMad=false;
vector<string> MadGraphAmplitude::BornAmplitudes=vector<string>();
vector<string> MadGraphAmplitude::VirtAmplitudes=vector<string>();
void MadGraphAmplitude::initProcess(const cPDVector& ) {
if ( lastMatchboxXComb()->initialized() )
return;
if ( !DynamicLoader::load(mgProcLibPath()+"InterfaceMadGraph.so") )
throw Exception() << "MadGraphAmplitude: Failed to load MadGraph amplitudes\n"
<< DynamicLoader::lastErrorMessage
<< Exception::runerror;
if (!initializedMad){
string mstr=(factory()->runStorage()+"MadGraphAmplitudes"+"/param_card"+((theMGmodel=="loop_sm")?"":("_"+theMGmodel))+".dat");
if( theMGmodel[0]=='/')mstr="param_card.dat";
size_t len = mstr.size();
mginitproc_(const_cast<char*>(mstr.c_str()),len);
initializedMad=true;
}
lastMatchboxXComb()->isInitialized();
}
bool MadGraphAmplitude::writeAmplitudesDat(){
bool res=false;
string born= mgProcLibPath()+"BornAmplitudes.dat";
if ( !boost::filesystem::exists(born) ) {
ofstream borns(born.c_str());
for (vector<string>::iterator amps=BornAmplitudes.begin();amps!=BornAmplitudes.end();amps++)
borns<<*amps<<endl;
borns.close();
res=true;
}
string virt= mgProcLibPath()+"VirtAmplitudes.dat";
if ( !boost::filesystem::exists(virt) ) {
ofstream virts(virt.c_str());
for (vector<string>::iterator amps=VirtAmplitudes.begin();amps!=VirtAmplitudes.end();amps++)
virts<<*amps<<endl;
virts.flush();
virts.close();
res=true;
}
return res;
}
bool MadGraphAmplitude::checkAmplitudes(){
string born= mgProcLibPath()+"BornAmplitudes.dat";
string virt= mgProcLibPath()+"VirtAmplitudes.dat";
assert ( boost::filesystem::exists(born)|| boost::filesystem::exists(virt));
bool foundallborns=true;
for (vector<string>::iterator amps=BornAmplitudes.begin();amps!=BornAmplitudes.end();amps++){
ifstream borns(born.c_str());
string line;
bool foundthisborn=false;
while (std::getline(borns, line)) {
if(line==*amps)foundthisborn=true;
}
foundallborns&=foundthisborn;
}
bool foundallvirts=true;
for (vector<string>::iterator amps=VirtAmplitudes.begin();amps!=VirtAmplitudes.end();amps++){
ifstream virts(virt.c_str());
string line;
bool foundthisvirt=false;
while (std::getline(virts, line)) {
if(line==*amps)foundthisvirt=true;
}
foundallvirts&=foundthisvirt;
}
if (!foundallborns||!foundallvirts)
- throw Exception() << "MadGraphAmplitude: One amplitude has no externalId. Please remove the MadGraphAmplitude-folder and rebuild.\n" << Exception::runerror;
+
+ throw Exception() << "MadGraphAmplitude: The MadGraph amplitudes did not match the process.\n"
+ << " Please remove:"<<mgProcLibPath()<< "\n"
+ << " or set a process path via the interface:\n"
+ << " set /Herwig/MatrixElements/Matchbox/Amplitudes/MadGraph:ProcessPath ..."
+ << Exception::runerror;
return foundallborns && foundallvirts;
}
string MadGraphAmplitude::mgProcLibPath(){
string res=theProcessPath == "" ? factory()->buildStorage()+"MadGraphAmplitudes" : theProcessPath;
if (res.at(res.length()-1) != '/') res.append("/");
return res;
}
bool MadGraphAmplitude::initializeExternal() {
if ( boost::filesystem::exists(mgProcLibPath()) ) {
if ( !boost::filesystem::is_directory(mgProcLibPath()) )
throw Exception() << "MadGraphAmplitude: MadGraph amplitude storage '"
<< mgProcLibPath() << "' existing but not a directory."
<< Exception::runerror;
} else {
boost::filesystem::create_directories(mgProcLibPath());
}
string runAmplitudes = factory()->runStorage() + "/MadGraphAmplitudes";
if ( boost::filesystem::exists(runAmplitudes) ) {
if ( !boost::filesystem::is_directory(runAmplitudes) )
throw Exception() << "MadGraphAmplitude: MadGraph amplitude storage '"
<< runAmplitudes << "' existing but not a directory."
<< Exception::runerror;
} else {
boost::filesystem::create_directories(runAmplitudes);
}
//EW-consistency check:
Energy MW=getParticleData(ParticleID::Wplus)->hardProcessMass();
Energy MZ=getParticleData(ParticleID::Z0)->hardProcessMass();
if( MW!= sqrt(MZ*MZ/2.0+sqrt(MZ*MZ*MZ*MZ/4.0-Constants::pi*SM().alphaEMMZ()*MZ*MZ/ sqrt(2.0)/SM().fermiConstant()))){
generator()->log()<<"\n\n-----!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!-----";
generator()->log() << "\nYou are using a EW scheme which is inconsistent with the MadGraph parametisation:\n\n"
<<MW/GeV<< " GeV==MW!= sqrt(MZ^2/2+sqrt(MZ^4/4.0-pi*alphaEMMZ*MZ^2/ sqrt(2)/G_f))=="<<
sqrt(MZ*MZ/2.0+sqrt(MZ*MZ*MZ*MZ/4.0-Constants::pi*SM().alphaEMMZ()*MZ*MZ/ sqrt(2.0)/SM().fermiConstant()))/GeV
<<" GeV\n\n-----!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!-----\n";
}
string para= factory()->runStorage()+"/MadGraphAmplitudes"+"/MG-Parameter.dat";
ofstream params(para.c_str());
params<<"$WZ$ " <<std::setiosflags(ios::scientific) <<getParticleData(ParticleID::Z0)->hardProcessWidth() /GeV;
params<<"\n$WW$ " <<std::setiosflags(ios::scientific) <<getParticleData(ParticleID::Wplus)->hardProcessWidth()/GeV;
params<<"\n$alphas$ " <<std::setiosflags(ios::scientific) <<SM().alphaS();
params<<"\n$GF$ " <<std::setiosflags(ios::scientific) <<SM().fermiConstant()*GeV2 ;
params<<"\n$alphaMZ$ " <<std::setiosflags(ios::scientific) <<1/SM().alphaEMMZ();
params<<"\n$MZ$ " <<std::setiosflags(ios::scientific) <<getParticleData(ParticleID::Z0)->hardProcessMass() /GeV<<flush;
params<<"\n$MW$ " <<std::setiosflags(ios::scientific) <<getParticleData(ParticleID::Wplus)->hardProcessMass() /GeV<<flush;
params<<"\n$sw2$ " <<std::setiosflags(ios::scientific) << SM().sin2ThetaW() <<flush;
if(theMGmodel=="heft"&&!keepinputtopmass){
if ( factory()->initVerbose() ) {
generator()->log()<<"\n---------------------------------------------------------------";
generator()->log()<<"\n---------------------------------------------------------------";
generator()->log()<<"\nNote: You are using the Higgs Effective model (heft) in ";
generator()->log()<<"\n Madgraph. We assume you try to calculate NLO with ";
generator()->log()<<"\n the GoSam virtual amplitudes. To match the models we ";
generator()->log()<<"\n therefore set the topmass to 10000000 GeV.";
generator()->log()<<"\n\n For more information see the \\tau parameter in:";
generator()->log()<<"\n https://cp3.irmp.ucl.ac.be/projects/madgraph/wiki/Models/HiggsEffective";
generator()->log()<<"\n\n The Effective Higgs model in Gosam is using mT=infinity";
generator()->log()<<"\n\n\n If you want to use the LO matrixelements of MadGraph with finite' topmass you need to add: ";
generator()->log()<<"\n\n set Madgraph:KeepInputTopMass True";
generator()->log()<<"\n\n to your input file.";
generator()->log()<<"\n---------------------------------------------------------------";
generator()->log()<<"\n---------------------------------------------------------------\n";
}
params<<"\n$MT$ 10000000." <<flush;
}else{
params<<"\n$MT$ " <<std::setiosflags(ios::scientific) << getParticleData(ParticleID::t)->hardProcessMass() /GeV <<flush;
}
params<<"\n$WT$ " <<std::setiosflags(ios::scientific) << getParticleData(ParticleID::t)->hardProcessWidth() /GeV <<flush;
params<<"\n$MB$ " <<std::setiosflags(ios::scientific) << getParticleData(ParticleID::b)->hardProcessMass() /GeV <<flush;
params<<"\n$MH$ " <<std::setiosflags(ios::scientific) << getParticleData(ParticleID::h0)->hardProcessMass() /GeV <<flush;
params<<"\n$WH$ " <<std::setiosflags(ios::scientific) << getParticleData(ParticleID::h0)->hardProcessWidth() /GeV <<flush;
params<<"\n$MTA$ " <<std::setiosflags(ios::scientific) << getParticleData(ParticleID::tauplus)->hardProcessMass() /GeV <<flush;
string cmd = "python " + bindir_ + "/mg2herwig ";
cmd +=" --buildpath "+mgProcLibPath();
cmd += !theProcessPath.empty() ? " --absolute-links" : "";
cmd +=" --model "+theMGmodel;
cmd +=" --runpath "+factory()->runStorage()+"/MadGraphAmplitudes ";
cmd +=" --datadir "+pkgdatadir_;
cmd +=" --includedir "+includedir_;
std::stringstream as,aem;
as << factory()->orderInAlphaS();
cmd +=" --orderas "+as.str() ;
aem <<factory()->orderInAlphaEW();
cmd +=" --orderew "+aem.str();
// TODO move to boost::system
writeAmplitudesDat();
if (boost::filesystem::exists(mgProcLibPath()+"InterfaceMadGraph.so") ){
//set the parameters
checkAmplitudes();
std::system(cmd.c_str());
ranMadGraphInitializeExternal = true;
return true;
}
char cwd[1024];
if ( !getcwd(cwd,sizeof(cwd)) )
throw Exception() << "MadGraphAmplitude: failed to determine current working directory\n"
<< Exception::runerror;
cmd +=" --madgraph " + madgraphPrefix_ + "/bin " ;
cmd +="--build > ";
cmd += mgProcLibPath()+"MG.log 2>&1";
generator()->log() << "\n>>> Compiling MadGraph amplitudes. This may take some time -- please be patient.\n"
<< ">>> In case of problems see " << mgProcLibPath() << "MG.log for details.\n\n"
<< flush;
std::system(cmd.c_str());
cmd = "python " + bindir_ + "/mg2herwig ";
cmd +=" --buildpath "+mgProcLibPath();
cmd +=" --model "+theMGmodel;
cmd +=" --runpath "+factory()->runStorage()+"/MadGraphAmplitudes ";
cmd +=" --datadir "+pkgdatadir_;
as.clear();
aem.clear();
as << factory()->orderInAlphaS();
cmd +=" --orderas "+as.str() ;
aem <<factory()->orderInAlphaEW();
cmd +=" --orderew "+aem.str();
std::system(cmd.c_str());
ranMadGraphInitializeExternal = true;
return boost::filesystem::exists(mgProcLibPath()+"InterfaceMadGraph.so");
}
int MadGraphAmplitude::externalId(const cPDVector& proc) {
for (int i=0;i<100;i++){
colourindex.push_back(-2);
}
assert(!BornAmplitudes.empty()||!VirtAmplitudes.empty());
writeAmplitudesDat();
int res=0;
string amp="";
int k=0;
for (cPDVector::const_iterator it=proc.begin();it!=proc.end();it++,k++){
amp+=boost::lexical_cast<string>( (*it)->id())+" ";if (k==1)amp+=" > ";
}
string born= mgProcLibPath()+"BornAmplitudes.dat";
string virt= mgProcLibPath()+"VirtAmplitudes.dat";
assert ( boost::filesystem::exists(born)|| boost::filesystem::exists(virt));
ifstream borns(born.c_str());
string line;
while (std::getline(borns, line)) {
res+=1;
if(line==amp)return res;
}
ifstream virts(virt.c_str());
while (std::getline(virts, line)) {
res+=1;
if(line==amp)return res;
}
throw Exception() << "MadGraphAmplitude: One amplitude has no externalId. Please remove the MadGraphAmplitude-folder and rebuild.\n" << Exception::runerror;
return res;
}
bool MadGraphAmplitude::ranMadGraphInitializeExternal = false;
void MadGraphAmplitude::doinit() {
if ( !ranMadGraphInitializeExternal ) {
initializeExternal();
}
MatchboxAmplitude::doinit();
}
void MadGraphAmplitude::doinitrun() {
if ( !ranMadGraphInitializeExternal ) {
initializeExternal();
}
MatchboxAmplitude::doinitrun();
}
bool MadGraphAmplitude::canHandle(const PDVector& p,
Ptr<MatchboxFactory>::tptr factory,
bool virt) const {
if ( factory->processData()->diagramMap().find(p) !=
factory->processData()->diagramMap().end() )
return true;
vector<Ptr<Tree2toNDiagram>::ptr> diags =
factory->diagramGenerator()->generate(p,orderInGs(),orderInGem());
if ( diags.empty() )
return false;
factory->processData()->diagramMap()[p] = diags;
string amp="";
int k=0;
for (PDVector::const_iterator it=p.begin();it!=p.end();it++,k++){
amp+=boost::lexical_cast<string>( (*it)->id())+" ";if (k==1)amp+=" > ";
}
if (virt && factory->highestVirt()>=p.size()){
VirtAmplitudes.push_back(amp);
}else{
BornAmplitudes.push_back(amp);
}
return true;
}
void MadGraphAmplitude::prepareAmplitudes(Ptr<MatchboxMEBase>::tcptr me) {
useMe();
if ( !calculateTreeAmplitudes() ) {
MatchboxAmplitude::prepareAmplitudes(me);
return;
}
if (colourindex.empty()) {
for (int i=0;i<100;i++){
colourindex.push_back(-2);
}
}
lastMatchboxXComb()->clearheljamp();
lastMatchboxXComb()->clearhelLNjamp();
initProcess(mePartonData());
MatchboxAmplitude::prepareAmplitudes(me);
}
Complex MadGraphAmplitude::evaluate(size_t i, const vector<int>& hel, Complex& largeN) {
//find the colourline:
int ii = -1;
int xx=lastMatchboxXComb()->externalId();
if (colourindex.size()<=i) {
colourindex.clear();
for (size_t l=0;l<=i+10;l++){
colourindex.push_back(-2);
}
}
if(colourindex[i]!=-2){
ii = colourindex[i];
if (ii==-1) {
largeN = Complex(0.0);
return Complex(0.0);
}
} else {
set<vector<size_t> > a = colourOrdering(i);
int ncol=-1;
MG_NCol(&xx,&ncol);
assert(ncol!=-1);
for( int it = 0; it < ncol; it++ ){
int n = 0;
for ( cPDVector::const_iterator nx = mePartonData().begin();
nx != mePartonData().end(); nx++ )
if ( (*nx)->coloured() ) n++;
set<vector<size_t> > tmpset;
vector<size_t> tmpvek;
for ( int it2 = 0; it2 < n; it2++ ) {
int ret=-2;
MG_Colour(&xx,&it,&it2,&ret);
assert(ret !=-2);
if (ret== -1)
break;
if ( ret == 0 ) {
n++;
tmpset.insert(tmpvek);
tmpvek.clear();
} else {
tmpvek.push_back(ret-1);
}
if( it2 == n-1 ) tmpset.insert(tmpvek);
}
bool found_all = true;
for ( set<vector<size_t> >::iterator it3 = a.begin(); it3 != a.end(); it3++ ) {
bool found_it3=false;
for ( set<vector<size_t> >::iterator it4 = tmpset.begin(); it4 != tmpset.end(); it4++ ) {
vector<size_t> it3tmp = gluonsFirst(*it3);
vector<size_t> it4tmp = (*it4);
if ( it3tmp.size() != it4tmp.size() ) continue;
if ( it3tmp == it4tmp ) found_it3 = true;
}
found_all = found_all && found_it3;
}
if ( found_all ) {
colourindex[i]=it;
ii=it;
}
}
}
if ( ii == -1 ){
colourindex[i]=ii;
largeN = Complex(0.0);
return Complex(0.0);
}
const map<vector<int>,vector < complex<double> > >& tmp = lastMatchboxXComb()->heljamp();
const map<vector<int>,vector < complex<double> > >& tmpLN = lastMatchboxXComb()->helLNjamp();
if( tmp.find(hel) != tmp.end()) {
largeN = tmpLN.find(hel)->second[ii];
return tmp.find(hel)->second[ii];;
}
double units = pow(sqrt(lastSHat())/GeV,int(hel.size())-4);
int heltmp[10];
for(size_t j=0;j<hel.size();j++){
int cross=crossingMap()[j];
if( (cross>1&&j<=1)||(cross<=1&&j>1)){
heltmp[cross]=-1*hel[j];}
else{heltmp[cross]=hel[j];}
}
vector<Lorentz5Momentum> reshuffled = meMomenta();
if ( !reshuffleMasses().empty() && reshuffled.size() > 3 ) {
const cPDVector& pdata = mePartonData();
const map<long,Energy>& masses = reshuffleMasses();
lastMatchboxXComb()->reshuffle(reshuffled,pdata,masses);
}
double momenta[50];
size_t j=0;
for (size_t i=0;i<mePartonData().size();i++){
momenta[j]=abs(reshuffled[i].e()/GeV)<1.e-13?0.:double(reshuffled[i].e()/GeV);
momenta[j+1]=abs(reshuffled[i].x()/GeV)<1.e-13?0.:double(reshuffled[i].x()/GeV);
momenta[j+2]=abs(reshuffled[i].y()/GeV)<1.e-13?0.:double(reshuffled[i].y()/GeV);
momenta[j+3]=abs(reshuffled[i].z()/GeV)<1.e-13?0.:double(reshuffled[i].z()/GeV);
if(momenta[j ] == 0. && momenta[j+1] == 0. &&
momenta[j+2] == 0. && momenta[j+3] == 0. )
return 0.;
j+=4;
}
MG_Calculate_wavefunctions_born(&xx, &momenta[0], &heltmp[0]);
int ncol=-1;
MG_NCol(&xx,&ncol);
Complex res;
Complex resLN;
for( int it = 0; it < ncol; it++ ){
double dd[2];
MG_Jamp(&xx,&it,&dd[0]);
Complex d(dd[0],dd[1]);
if(it==ii)res=d*units;
lastMatchboxXComb()->pushheljamp(hel,d*units);
double ddLN[2];
MG_LNJamp(&xx,&it,&ddLN[0]);
Complex dLN(ddLN[0],ddLN[1]);
if(it==ii)resLN=dLN*units;
lastMatchboxXComb()->pushhelLNjamp(hel,dLN*units);
}
largeN = resLN;
return res;
}
vector<unsigned int> MadGraphAmplitude::physicalHelicities(const vector<int>& hel) const {
vector<unsigned int> res(hel.size(),0);
for ( size_t j = 0; j < hel.size(); ++j ) {
int cross = crossingMap()[j];
int xhel = 0;
if ( (cross > 1 && j <= 1) || (cross <= 1 && j > 1) )
xhel = -1*hel[j];
else
xhel = hel[j];
if ( mePartonData()[cross]->iSpin() == PDT::Spin1Half )
res[cross] = (xhel == -1 ? 0 : 1);
else if ( mePartonData()[cross]->iSpin() == PDT::Spin1 )
res[cross] = (unsigned int)(xhel + 1);
else if ( mePartonData()[cross]->iSpin() == PDT::Spin0 )
res[cross] = 0;
else assert(false);
}
return res;
}
LorentzVector<Complex> MadGraphAmplitude::plusPolarization(const Lorentz5Momentum& p,
const Lorentz5Momentum& n,
int i) const {
int tmp=i;
double pg[4],ng[4],poltmp[8];
pg[0]=p.e()/GeV;pg[1]=p.x()/GeV;pg[2]=p.y()/GeV;pg[3]=p.z()/GeV;
ng[0]=n.e()/GeV;ng[1]=n.x()/GeV;ng[2]=n.y()/GeV;ng[3]=n.z()/GeV;
MG_vxxxxx(&pg[0],&ng[0],&tmp,&poltmp[0]);
complex<double> pol[6];
pol[0]=Complex(poltmp[0],poltmp[1]);
pol[1]=Complex(poltmp[2],poltmp[3]);
pol[2]=Complex(poltmp[4],poltmp[5]);
pol[3]=Complex(poltmp[6],poltmp[7]);
LorentzVector<Complex> polarization(pol[1],pol[2],pol[3],pol[0]);
return polarization;
}
bool equalsModulo(unsigned int i, const vector<int>& a, const vector<int>& b) {
assert(a.size()==b.size());
if ( a[i] == b[i] )
return false;
for ( unsigned int k = 0; k < a.size(); ++k ) {
if ( k == i )
continue;
if ( a[k] != b[k] )
return false;
}
return true;
}
vector<size_t> MadGraphAmplitude::gluonsFirst(vector<size_t> vec) {
vector<size_t> vecout;
for(vector<size_t>::iterator it= vec.begin();it!= vec.end();++it)
if ( mePartonData()[crossingMap()[*it]]->id()==21)
vecout.push_back(crossingMap()[*it]);
for(vector<size_t>::iterator it= vec.begin();it!= vec.end();++it)
if ( mePartonData()[crossingMap()[*it]]->id()!=21)
vecout.push_back(crossingMap()[*it]);
return vecout;
}
double MadGraphAmplitude::spinColourCorrelatedME2(pair<int,int> ij,
const SpinCorrelationTensor& c) const {
vector<Lorentz5Momentum> reshuffled = meMomenta();
if ( !reshuffleMasses().empty() && reshuffled.size() > 3 ) {
const cPDVector& pdata = mePartonData();
const map<long,Energy>& masses = reshuffleMasses();
lastMatchboxXComb()->reshuffle(reshuffled,pdata,masses);
}
Lorentz5Momentum p = reshuffled[ij.first];
Lorentz5Momentum n = reshuffled[ij.second];
LorentzVector<Complex> polarization = plusPolarization(p,n,ij.first<2?-1:1);
int iCrossed = -1;
for ( unsigned int k = 0; k < crossingMap().size(); ++k )
if ( crossingMap()[k] == ij.first ) {
iCrossed = k;
break;
}
assert(iCrossed!=-1);
if(ij.first>1) polarization =polarization.conjugate();
if(iCrossed<2) polarization =polarization.conjugate();
Complex pFactor = (polarization*c.momentum())/sqrt(abs(c.scale()));
double avg =
colourCorrelatedME2(ij)*(-c.diagonal()+ (c.scale() > ZERO ? 1. : -1.)*norm(pFactor));
Complex csCorr = 0.0;
if ( calculateColourSpinCorrelator(ij) ) {
set<const CVector*> done;
for ( AmplitudeConstIterator a = lastAmplitudes().begin();
a != lastAmplitudes().end(); ++a ) {
if ( done.find(&(a->second)) != done.end() )
continue;
AmplitudeConstIterator b = lastAmplitudes().begin();
while ( !equalsModulo(iCrossed,a->first,b->first) )
if ( ++b == lastAmplitudes().end() )
break;
if ( b == lastAmplitudes().end() || done.find(&(b->second)) != done.end() )
continue;
done.insert(&(a->second)); done.insert(&(b->second));
if ( a->first[iCrossed] == 1 )
swap(a,b);
csCorr -= colourBasis()->colourCorrelatedInterference(ij,mePartonData(),a->second,b->second);
}
lastColourSpinCorrelator(ij,csCorr);
} else {
csCorr = lastColourSpinCorrelator(ij);
}
double corr =
2.*real(csCorr*sqr(pFactor));
double Nc = generator()->standardModel()->Nc();
double cfac = 1.;
if ( mePartonData()[ij.first]->iColour() == PDT::Colour8 ) {
cfac = Nc;
} else if ( mePartonData()[ij.first]->iColour() == PDT::Colour3 ||
mePartonData()[ij.first]->iColour() == PDT::Colour3bar ) {
cfac = (sqr(Nc)-1.)/(2.*Nc);
} else assert(false);
return
( avg +(c.scale() > ZERO ? 1. : -1.)*corr/cfac);
}
void MadGraphAmplitude::prepareOneLoopAmplitudes(Ptr<MatchboxMEBase>::tcptr ){
assert(false);
}
double MadGraphAmplitude::oneLoopInterference() const {
if ( !calculateOneLoopInterference() )
return lastOneLoopInterference();
evaloneLoopInterference();
return lastOneLoopInterference();
}
void MadGraphAmplitude::evaloneLoopInterference() const {
double units = pow(lastSHat()/GeV2,int(mePartonData().size())-4);
vector<Lorentz5Momentum> reshuffled = meMomenta();
if ( !reshuffleMasses().empty() && reshuffled.size() > 3 ) {
const cPDVector& pdata = mePartonData();
const map<long,Energy>& masses = reshuffleMasses();
lastMatchboxXComb()->reshuffle(reshuffled,pdata,masses);
}
double virt[20];
double momenta[50];
size_t j=0;
for (size_t i=0;i<mePartonData().size();i++){
momenta[j]=abs(reshuffled[i].e()/GeV)<1.e-13?0.:double(reshuffled[i].e()/GeV);
momenta[j+1]=abs(reshuffled[i].x()/GeV)<1.e-13?0.:double(reshuffled[i].x()/GeV);
momenta[j+2]=abs(reshuffled[i].y()/GeV)<1.e-13?0.:double(reshuffled[i].y()/GeV);
momenta[j+3]=abs(reshuffled[i].z()/GeV)<1.e-13?0.:double(reshuffled[i].z()/GeV);
j+=4;
}
int xx=lastMatchboxXComb()->externalId();
MG_Calculate_wavefunctions_virt(&xx,&momenta[0],&virt[0]);
double ifact = 1.;
ifact = 1./4.;
if (lastMatchboxXComb()->matchboxME()->mePartonData()[0]->iColour() == PDT::Colour3 ||
lastMatchboxXComb()->matchboxME()->mePartonData()[0]->iColour() == PDT::Colour3bar )
ifact /= SM().Nc();
else if ( lastMatchboxXComb()->matchboxME()->mePartonData()[0]->iColour() == PDT::Colour8 )
ifact /= (SM().Nc()*SM().Nc()-1.);
if ( lastMatchboxXComb()->matchboxME()->mePartonData()[1]->iColour() == PDT::Colour3 ||
lastMatchboxXComb()->matchboxME()->mePartonData()[1]->iColour() == PDT::Colour3bar )
ifact /= SM().Nc();
else if ( mePartonData()[1]->iColour() == PDT::Colour8 )
ifact /= (SM().Nc()*SM().Nc()-1.);
ifact *= lastMatchboxXComb()->matchboxME()->finalStateSymmetry();
lastOneLoopInterference(virt[1]/ifact*units);
lastOneLoopPoles(pair<double, double>(virt[2]/ifact*units,virt[3]/ifact*units));
}
void MadGraphAmplitude::persistentOutput(PersistentOStream & os) const {
os << theOrderInGs << theOrderInGem << BornAmplitudes << VirtAmplitudes
<< colourindex<<crossing << theProcessPath << theMGmodel << bindir_
<< pkgdatadir_ << madgraphPrefix_;
}
void MadGraphAmplitude::persistentInput(PersistentIStream & is, int) {
is >> theOrderInGs >> theOrderInGem >> BornAmplitudes >> VirtAmplitudes
>> colourindex>>crossing >> theProcessPath >> theMGmodel >> bindir_
>> pkgdatadir_ >> madgraphPrefix_;
}
// *** Attention *** The following static variable is needed for the type
// description system in ThePEG. Please check that the template arguments
// are correct (the class and its base class), and that the constructor
// arguments are correct (the class name and the name of the dynamically
// loadable library where the class implementation can be found).
DescribeClass<MadGraphAmplitude,MatchboxAmplitude>
describeHerwigMadGraphAmplitude("Herwig::MadGraphAmplitude", "HwMatchboxMadGraph.so");
void MadGraphAmplitude::Init() {
static ClassDocumentation<MadGraphAmplitude>
documentation("MadGraphAmplitude",
"Matrix elements have been calculated using MadGraph5 \\cite{Alwall:2011uj}",
"%\\cite{Alwall:2011uj}\n"
"\\bibitem{Alwall:2011uj}\n"
"J. Alwall et al.,\n"
"``MadGraph 5 : Going Beyond,''\n"
"arXiv:1106.0522 [hep-ph].\n"
"%%CITATION = ARXIV:1106.0522;%%");
static Parameter<MadGraphAmplitude,string> interfaceProcessPath
("ProcessPath",
"The Process Path.",
&MadGraphAmplitude::theProcessPath, "",false, false);
static Parameter<MadGraphAmplitude,string> interfaceModel
("Model",
"The MadGraph-Model.",
&MadGraphAmplitude::theMGmodel, "loop_sm",false, false);
static Switch<MadGraphAmplitude,bool> interfacekeepinputtopmass
("KeepInputTopMass",
"Switch On/Off formopt",
&MadGraphAmplitude::keepinputtopmass, false, false, false);
static SwitchOption interfacekeepinputtopmassTrue
(interfacekeepinputtopmass,
"On",
"On",
true);
static SwitchOption interfacekeepinputtopmassFalse
(interfacekeepinputtopmass,
"Off",
"Off",
false);
static Parameter<MadGraphAmplitude,string> interfaceBinDir
("BinDir",
"The location for the installed executable",
&MadGraphAmplitude::bindir_, string(HERWIG_BINDIR),
false, false);
static Parameter<MadGraphAmplitude,string> interfacePKGDATADIR
("DataDir",
"The location for the installed Herwig data files",
&MadGraphAmplitude::pkgdatadir_, string(HERWIG_PKGDATADIR),
false, false);
static Parameter<MadGraphAmplitude,string> interfaceMadgraphPrefix
("MadgraphPrefix",
"The prefix for the location of MadGraph",
&MadGraphAmplitude::madgraphPrefix_, string(MADGRAPH_PREFIX),
false, false);
}
diff --git a/MatrixElement/Matchbox/External/MadGraph/mg2herwig.py.in b/MatrixElement/Matchbox/External/MadGraph/mg2herwig.py.in
--- a/MatrixElement/Matchbox/External/MadGraph/mg2herwig.py.in
+++ b/MatrixElement/Matchbox/External/MadGraph/mg2herwig.py.in
@@ -1,387 +1,392 @@
#! /usr/bin/env python
import os,sys,glob,errno,shutil,time,fnmatch #argparse
from optparse import OptionParser
# helper to replace all sourceText in fileName with replaceText
def replacetext(fileName, sourceText, replaceText):
file = open(fileName, "r")
text = file.read()
file.close()
file = open(fileName, "w")
file.write(text.replace(sourceText, replaceText))
file.close()
# helper to build recursivly path
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
# helper to find all files of with name in path
def find(name, path):
for root, dirs, files in os.walk(path):
if name in files:
return os.path.join(root, name)
# helper to find all file paths which contain file names matching filepattern
def finddirs(filepattern, path):
founddirs = []
for root, dirs, files in os.walk(path):
if fnmatch.filter(files, filepattern):
founddirs.append(root)
return founddirs
# fill the proc.dat file from BornAmplitudes.dat and VirtAmplitudes.dat.
def fillprocs(model,oras,orew):
bornlist=[]
virtlist=[]
fileproc=open("proc.dat","w")
fileproc.write("set fortran_compiler @FC@ --no_save\n")
fileproc.write("import model "+model+"\n")
borns="BornAmplitudes.dat"
virts="VirtAmplitudes.dat"
first=True
procnr=0
virtlines=""
bornlines=""
minlegs=100
legs=0
for i in [borns, virts]:
file = open(i, "r")
for line in file:
if (len(line.split(" "))<minlegs):
minlegs=len(line.split(" "))
for it in line.split(" "):
if it.replace("-","").isdigit():
legs+=1
file.close()
#conversion for heft model to go from (2QCD+1QED)->1HIG for each FS HIGGS.
HIG=0
if (model=="heft"):
HIG=(int(oras)+int(orew)-legs+2)/2
if (int(oras)+int(orew)-legs+2)%2!=0:
print "Warning: No possible coupling power:(int(oras)+int(orew)-legs+2)%2!=0 "
exit()
return
file = open(borns, "r")
for line in file:
#this assumes extra QCD emmissions
addalphas=len(line.split(" "))-minlegs
linetmp=line.rstrip()
procnr+=1
bornlist+=[str(procnr)]
if first:
if HIG ==0 :
bornlines+="generate "+linetmp+" QCD="+str(int(oras)+addalphas)+" QED="+str(orew)+" @"+str(procnr)+"\n"
else:
bornlines+="generate "+linetmp+" HIG="+str(HIG)+" QCD="+str(int(oras)+addalphas-2*HIG)+" QED="+str(int(orew)-HIG)+" @"+str(procnr)+"\n"
first=False
else:
if HIG ==0 :
bornlines+="add process "+linetmp+" QCD="+str(int(oras)+addalphas)+" QED="+str(orew)+" @"+str(procnr)+"\n"
else:
bornlines+="add process "+linetmp+" HIG="+str(HIG)+" QCD="+str(int(oras)+addalphas-2*HIG)+" QED="+str(int(orew)-HIG)+" @"+str(procnr)+"\n"
file.close()
first=True
file = open(virts, "r")
for line in file:
addalphas=len(line.split(" "))-minlegs
linetmp=line.rstrip()+" QCD="+str(int(oras)+addalphas)+" QED="+str(int(orew))+" [ virt=QCD ]"
procnr+=1
virtlist+=[str(procnr)]
if first:
virtlines+="generate "+linetmp+" @"+str(procnr)+"\n"
first=False
else:
virtlines+="add process "+linetmp+" @"+str(procnr)+"\n"
file.close()
fileproc.write(bornlines)
if virtlines!="" and bornlines!="":
fileproc.write("output matchbox MG5 --postpone_model\n")
fileproc.write(virtlines)
fileproc.write("output matchbox MG5 -f\n")
fileproc.close()
return bornlist,virtlist
def build_matchbox_tmp(pwd,buildpath,absolute_links):
cwd=os.getcwd()
os.chdir(pwd)
mkdir_p(pwd+"/Herwig-scratch/MG_tmp/")
if not buildpath.startswith("/"):
buildpath=pwd+"/"+buildpath.lstrip("./")
if not buildpath.endswith("/"):
buildpath=buildpath + "/"
resources=glob.glob(buildpath +"MG5/SubProcesses/MadLoop5_resources/*")
resources+=glob.glob(buildpath +"MG5/Cards/*")
resources+=glob.glob(buildpath +"MG5/Cards/SubProcesses/*")
for i in resources:
if not os.path.isfile( pwd+"/Herwig-scratch/MG_tmp/"+os.path.basename(i)) \
and not os.path.islink( pwd+"/Herwig-scratch/MG_tmp/"+os.path.basename(i)):
if not absolute_links:
source=os.path.dirname(i)
dest=pwd+"/Herwig-scratch/MG_tmp/"
os.chdir(dest)
os.symlink(os.path.relpath(source,dest)+"/"+os.path.basename(i),"./" + os.path.basename(i))
else:
os.symlink(i, pwd+"/Herwig-scratch/MG_tmp/"+os.path.basename(i))
os.chdir(cwd)
parser = OptionParser()
parser.add_option("-a", "--buildpath", dest="buildpath",help="Do not use this script. Only for Herwig internal use. ")
parser.add_option("-b", "--build", action="store_true", dest="build", default=True,help="Do not use this script. Only for Herwig internal use.")
parser.add_option("-c", "--madgraph", dest="madgraph",help="Do not use this script. Only for Herwig internal use.")
parser.add_option("-d", "--runpath", dest="runpath",help="Do not use this script. Only for Herwig internal use.")
parser.add_option("-e", "--model", dest="model",help="Do not use this script. Only for Herwig internal use.")
parser.add_option("-f", "--orderas", dest="orderas",help="Do not use this script. Only for Herwig internal use.")
parser.add_option("-g", "--orderew", dest="orderew",help="Do not use this script. Only for Herwig internal use.")
parser.add_option("-i", "--datadir",dest="datadir",help="Do not use this script. Only for Herwig internal use.")
parser.add_option("-I", "--includedir",dest="includedir",help="Do not use this script. Only for Herwig internal use.")
parser.add_option("-l", "--absolute-links",action="store_true", dest="absolute_links", default=False,\
help="Do not use this script. Only for Herwig internal use.")
(options, args) = parser.parse_args()
#parser = argparse.ArgumentParser()
#parser.add_argument('--buildpath', help='installpath')
#parser.add_argument('--build', help='build', action="store_true")
#parser.add_argument('--madgraph', help='madgraph_installpath')
#parser.add_argument('--runpath', help='runpath')
#parser.add_argument('--model', help='model')
#parser.add_argument('--orderas', help='orderas')
#parser.add_argument('--orderew', help='orderew')
#parser.add_argument('--datadir', help='datadir')
#args = parser.parse_args()
pwd=os.getcwd()
param_card=""
mkdir_p(pwd+"/Herwig-scratch/MG_tmp/")
if options.model=="loop_sm" or options.model=="heft":
if options.model=="loop_sm":
param_card="param_card.dat"
else:
param_card="param_card_"+options.model+".dat"
file = open("%s/MadGraphInterface/%s.in" % (options.datadir,param_card) , "r")
paramcard = file.read()
file.close()
file = open(options.runpath+"/"+param_card, "w")
params=open(options.runpath+"/MG-Parameter.dat", "r")
for line in params:
a=line.split()
paramcard=paramcard.replace(a[0],a[1])
params.close()
file.write(paramcard)
file.close()
elif options.model.startswith("/"):
os.system("python %s/write_param_card.py " % options.model)
else:
print "---------------------------------------------------------------"
print "---------------------------------------------------------------"
print "Warning: The model set for the MadGraph Interface "
print " needs a parameter setting by hand."
print " Please fill the param_card_"+options.model+".dat"
print " with your favourite assumptions."
print " And make sure Herwig uses the same parameters."
print "---------------------------------------------------------------"
print "---------------------------------------------------------------"
if os.path.isfile(options.buildpath +"/MG5/Cards/param_card.dat") and not os.path.isfile(options.runpath+"/"+"param_card_"+options.model+".dat"):
shutil.copyfile(options.buildpath +"/MG5/Cards/param_card.dat", options.runpath+"/"+"param_card_"+options.model+".dat")
time.sleep(1)
if not os.path.isdir(options.buildpath):
print "The MadGraph Install path was not existend. It has been created for you."
print "Just start Herwig read again.."
mkdir_p(options.buildpath)
exit()
os.chdir(options.buildpath)
if os.path.isfile("InterfaceMadGraph.so"):
build_matchbox_tmp(pwd,options.buildpath,options.absolute_links)
exit()
Bornlist,Virtlist=fillprocs(options.model,options.orderas,options.orderew)
if not options.madgraph and not os.path.isfile("InterfaceMadGraph.so"):
print "*** warning *** MadGraph build failed, check logfile for details"
+ print "Known issue: If this is your first NLO calculation with pure Madgraph Amplitudes"
+ print " the CutTools compilation can result in a non usable configuration."
+ print " Please open $HERWIG_ENV/opt/madgraph/vendor/CutTools/makefile"
+ print " and add a \\ after FC=gfortran... in the ARGS variable."
+ print " Then run make clean && make in the CutTools folder. "
exit()
os.system("python "+options.madgraph+"/mg5_aMC proc.dat")
routines=[["","BORN(momenta,hel)"],
["","SLOOPMATRIX(momenta,virt)"],
["","GET_JAMP(color,Jamp)"],
["","GET_LNJAMP(color,Jamp)"],
["","GET_NCOL(color)"],
["","GET_NCOLOR(i,j,color)"]]
for routine in routines:
for i in Bornlist + list(set(Virtlist) - set(Bornlist)):
if routine[1]=="Virt(amp)" or routine[1]=="SLOOPMATRIX(momenta,virt)" and i not in Virtlist:
continue
if routine[0]=="":
routine[0]+=" SELECT CASE (proc) \n"
routine[0]+=" CASE("+i+") \n CALL "
routine[0]+= "MG5_"+i+"_"+routine[1]+"\n"
else:
routine[0]+=" CASE("+i+") \n"\
" CALL "
routine[0]+= "MG5_"+i+"_"+routine[1]+"\n"
if routine[0]!="":
routine[0]+=" CASE DEFAULT\n"
routine[0]+=" WRITE(*,*) '##W02A WARNING No id found '\n"
routine[0]+=" END SELECT \n"
shutil.copyfile("%s/MadGraphInterface/InterfaceMadGraph.f.in" % options.datadir, "InterfaceMadGraph.f")
replacetext("InterfaceMadGraph.f","MG_CalculateBORNtxt",routines[0][0])
replacetext("InterfaceMadGraph.f","MG_CalculateVIRTtxt",routines[1][0])
replacetext("InterfaceMadGraph.f","MG_Jamptxt", routines[2][0])
replacetext("InterfaceMadGraph.f","MG_LNJamptxt", routines[3][0])
replacetext("InterfaceMadGraph.f","MG_NColtxt", routines[4][0])
replacetext("InterfaceMadGraph.f","MG_ColourMattxt",routines[5][0])
MG_vxxxxxtxt=""
if routines[1][0]!="":
MG_vxxxxxtxt=""" subroutine MG_vxxxxx(p, n,inc,VC)
$ bind(c, name='MG_vxxxxx')
IMPLICIT NONE
double precision p(0:3)
double precision n(0:3)
INTEGER inc
double precision VC(0:7)
double complex VCtmp(8)
call vxxxxx(p, 0d0,1,inc ,VCtmp)
VC(0)= real(VCtmp(5))
VC(1)=aimag(VCtmp(5))
VC(2)= real(VCtmp(6))
VC(3)=aimag(VCtmp(6))
VC(4)= real(VCtmp(7))
VC(5)=aimag(VCtmp(7))
VC(6)= real(VCtmp(8))
VC(7)=aimag(VCtmp(8))
END"""
else:
MG_vxxxxxtxt=""" subroutine MG_vxxxxx(p, n,inc,VC)
$ bind(c, name='MG_vxxxxx')
IMPLICIT NONE
double precision p(0:3)
double precision n(0:3)
INTEGER inc
double precision VC(0:7)
double complex VCtmp(6)
call vxxxxx(p, 0d0,1,inc ,VCtmp)
VC(0)= real(VCtmp(3))
VC(1)=aimag(VCtmp(3))
VC(2)= real(VCtmp(4))
VC(3)=aimag(VCtmp(4))
VC(4)= real(VCtmp(5))
VC(5)=aimag(VCtmp(5))
VC(6)= real(VCtmp(6))
VC(7)=aimag(VCtmp(6))
END"""
replacetext("InterfaceMadGraph.f","MG_vxxxxxtxt",MG_vxxxxxtxt)
make=" "
fortanfiles=glob.glob('*/*/*.f')+glob.glob('*/*/*/*.f')
for i in fortanfiles:
if "check_sa" not in i:
if not os.path.islink(i):
make += " "+i+"\\\n "
incfiles=glob.glob('*/*/*.inc')+glob.glob('*/*/*/*.inc')
coefdir=""
for i in incfiles:
if "nexternal.inc" in i:
coefdir+=" -I"+i.replace("nexternal.inc"," ")
file=open("makefile","w")
file.write("include MG5/Source/make_opts ")
if Virtlist!=[]:
file.write("\nLIBDIR = MG5/lib\nLINKLIBS = -L$(LIBDIR) -lcts -liregi -L$(LIBDIR)/golem95_lib -lgolem")
file.write("\nLIBS = $(LIBDIR)libcts.$(libext) $(LIBDIR)libgolem.$(libext) $(LIBDIR)libiregi.$(libext)")
file.write("\nPROCESS= InterfaceMadGraph.f "+make+"\n\nall: \n\t @FC@ @FFLAGS@ -w -fbounds-check -ffixed-line-length-132 -fPIC -fno-f2c -shared -s -o InterfaceMadGraph.so -IMG5/SubProcesses/" )
if Virtlist!=[]:
file.write(" -IMG5/lib/golem95_include ")
# Find all .mod files also in /usr/include if golem was build there.
# There can be an error message in the MadGraph output to add the golem include path to the makefiles.
# Usually MadGraph finds the path if its Golem was build in an separate dictionary.
# Our bootstrap script installs golem with gosam beside boost. Here MadGraph creates a link (->errormessage).
# If we can find the modfiles easily the user doesn't need to change the makefiles.
moddirs=finddirs('*.mod',options.includedir)
for moddir in moddirs:
file.write(" -I%s " % moddir)
if os.path.isdir("/usr/include"):
moddirs=finddirs('*.mod',"/usr/include")
for moddir in moddirs:
file.write(" -I%s " % moddir)
if coefdir != "":
file.write(coefdir)
file.write(" $(PROCESS) $(LINKLIBS) ")
file.close()
os.chdir(pwd)
os.chdir(options.buildpath)
replacetext("MG5/Source/MODEL/lha_read.f", "ident_card.dat","Herwig-scratch/MG_tmp/ident_card.dat")
replacetext("MG5/Source/MODEL/lha_read.f", "param.log","Herwig-scratch/MG_tmp/param.log")
if Virtlist!=[]:
replacetext("MG5/SubProcesses/MadLoopCommons.f", "PREFIX='./'","PREFIX='./Herwig-scratch/MG_tmp/'")
os.system("make")
build_matchbox_tmp(pwd,options.buildpath,options.absolute_links)
diff --git a/MatrixElement/Matchbox/MatchboxFactory.cc b/MatrixElement/Matchbox/MatchboxFactory.cc
--- a/MatrixElement/Matchbox/MatchboxFactory.cc
+++ b/MatrixElement/Matchbox/MatchboxFactory.cc
@@ -1,2109 +1,2226 @@
// -*- C++ -*-
//
// MatchboxFactory.cc is a part of Herwig - A multi-purpose Monte Carlo event generator
// Copyright (C) 2002-2012 The Herwig Collaboration
//
// Herwig is licenced under version 2 of the GPL, see COPYING for details.
// Please respect the MCnet academic guidelines, see GUIDELINES for details.
//
//
// This is the implementation of the non-inlined, non-templated member
// functions of the MatchboxFactory class.
//
#include "MatchboxFactory.h"
#include "ThePEG/Interface/ClassDocumentation.h"
#include "ThePEG/Utilities/DescribeClass.h"
#include "ThePEG/Interface/Reference.h"
#include "ThePEG/Interface/RefVector.h"
#include "ThePEG/Interface/Switch.h"
#include "ThePEG/Interface/Parameter.h"
#include "ThePEG/Interface/Command.h"
#include "ThePEG/Utilities/StringUtils.h"
#include "ThePEG/Repository/Repository.h"
#include "ThePEG/Repository/EventGenerator.h"
#include "ThePEG/Handlers/EventHandler.h"
#include "ThePEG/Handlers/SamplerBase.h"
#include "Herwig/MatrixElement/Matchbox/Base/DipoleRepository.h"
#include "Herwig/MatrixElement/Matchbox/Utility/SU2Helper.h"
#include "Herwig/Utilities/RunDirectories.h"
#include <boost/progress.hpp>
#include <boost/filesystem.hpp>
#include <iterator>
using std::ostream_iterator;
using namespace Herwig;
using std::ostream_iterator;
MatchboxFactory::MatchboxFactory()
: SubProcessHandler(), theNLight(0),
theOrderInAlphaS(0), theOrderInAlphaEW(0),
theBornContributions(true), theVirtualContributions(true),
theRealContributions(true), theIndependentVirtuals(false),
theIndependentPKs(false),
theSubProcessGroups(false),
theFactorizationScaleFactor(1.0), theRenormalizationScaleFactor(1.0),
theFixedCouplings(false), theFixedQEDCouplings(false), theVetoScales(false),
theDipoleSet(0), theVerbose(false), theDiagramWeightVerbose(false),
theDiagramWeightVerboseNBins(200),
theInitVerbose(false),
theSubtractionData(""), theSubtractionPlotType(1), theSubtractionScatterPlot(false),
thePoleData(""), theRealEmissionScales(false), theAllProcesses(false),
theMECorrectionsOnly(false), theLoopSimCorrections(false), ranSetup(false),
theFirstPerturbativePDF(true), theSecondPerturbativePDF(true),
inProductionMode(false), theSpinCorrelations(false),theAlphaParameter(1.),
theEnforceChargeConservation(true), theEnforceColourConservation(false),
theEnforceLeptonNumberConservation(false), theEnforceQuarkNumberConservation(false),
theLeptonFlavourDiagonal(false), theQuarkFlavourDiagonal(false) {}
MatchboxFactory::~MatchboxFactory() {}
bool& MatchboxFactory::theIsMatchboxRun() {
static bool flag = false;
return flag;
}
IBPtr MatchboxFactory::clone() const {
return new_ptr(*this);
}
IBPtr MatchboxFactory::fullclone() const {
return new_ptr(*this);
}
void MatchboxFactory::prepareME(Ptr<MatchboxMEBase>::ptr me) {
Ptr<MatchboxAmplitude>::ptr amp =
dynamic_ptr_cast<Ptr<MatchboxAmplitude>::ptr>((*me).amplitude());
me->matchboxAmplitude(amp);
me->factory(this);
if ( phasespace() && !me->phasespace() )
me->phasespace(phasespace());
if ( scaleChoice() && !me->scaleChoice() )
me->scaleChoice(scaleChoice());
if ( !reweighters().empty() ) {
for ( vector<ReweightPtr>::const_iterator rw = reweighters().begin();
rw != reweighters().end(); ++rw )
me->addReweighter(*rw);
}
if ( !preweighters().empty() ) {
for ( vector<ReweightPtr>::const_iterator rw = preweighters().begin();
rw != preweighters().end(); ++rw )
me->addPreweighter(*rw);
}
}
string pid(const PDVector& key) {
ostringstream res;
res << "[" << key[0]->PDGName() << ","
<< key[1]->PDGName() << "->";
for ( PDVector::const_iterator k =
key.begin() + 2; k != key.end(); ++k )
res << (**k).PDGName() << (k != --key.end() ? "," : "");
res << "]";
return res.str();
}
vector<Ptr<MatchboxMEBase>::ptr> MatchboxFactory::
makeMEs(const vector<string>& proc, unsigned int orderas, bool virt) {
generator()->log() << "determining subprocesses for ";
copy(proc.begin(),proc.end(),ostream_iterator<string>(generator()->log()," "));
generator()->log() << "\n" << flush;
map<Ptr<MatchboxAmplitude>::ptr,set<Process> > ampProcs;
map<Process,set<Ptr<MatchboxAmplitude>::ptr> > procAmps;
set<PDVector> processes = makeSubProcesses(proc);
// TODO Fix me for 3.0.x
// At the moment we got troubles with processes with no coloured
// legs so they will not be supported
set<PDVector> colouredProcesses;
for ( set<PDVector>::const_iterator pr = processes.begin();
pr != processes.end(); ++pr ) {
for ( PDVector::const_iterator pp = pr->begin();
pp != pr->end(); ++pp ) {
if ( (**pp).coloured() ) {
colouredProcesses.insert(*pr);
break;
}
}
}
if ( colouredProcesses.size() != processes.size() ) {
generator()->log()
<< "Some or all of the generated subprocesses do not contain coloured legs.\n"
<< "Processes of this kind are currently not supported.\n" << flush;
}
if ( colouredProcesses.empty() ) {
throw Exception() << "MatchboxFactory::makeMEs(): No processes with coloured legs have been found. "
<< "This run will be aborted." << Exception::runerror;
}
processes = colouredProcesses;
// end unsupported processes
// detect external particles with non-zero width for the hard process
bool trouble = false;
string troubleMaker;
for ( set<PDVector>::const_iterator pr = processes.begin();
pr != processes.end(); ++pr ) {
for ( PDVector::const_iterator pp = pr->begin();
pp != pr->end(); ++pp ) {
if ( (**pp).hardProcessWidth() != ZERO ) {
trouble = true;
troubleMaker = (**pp).PDGName();
break;
}
}
}
if ( trouble ) {
throw Exception()
<< "MatchboxFactory::makeMEs(): Particle '"
<< troubleMaker << "' appears as external\nprocess leg with non-zero "
<< "width to be used in the hard process calculation.\n"
<< "Please check your setup and consider setting HardProcessWidth to zero."
<< Exception::runerror;
}
vector<Ptr<MatchboxAmplitude>::ptr> matchAmplitudes;
unsigned int lowestAsOrder =
allProcesses() ? 0 : orderas;
unsigned int highestAsOrder = orderas;
unsigned int lowestAeOrder =
allProcesses() ? 0 : orderInAlphaEW();
unsigned int highestAeOrder = orderInAlphaEW();
for ( unsigned int oas = lowestAsOrder; oas <= highestAsOrder; ++oas ) {
for ( unsigned int oae = lowestAeOrder; oae <= highestAeOrder; ++oae ) {
for ( vector<Ptr<MatchboxAmplitude>::ptr>::const_iterator amp
= amplitudes().begin(); amp != amplitudes().end(); ++amp ) {
if ( !theSelectedAmplitudes.empty() ) {
if ( find(theSelectedAmplitudes.begin(),theSelectedAmplitudes.end(),*amp)
== theSelectedAmplitudes.end() )
continue;
}
if ( !theDeselectedAmplitudes.empty() ) {
if ( find(theDeselectedAmplitudes.begin(),theDeselectedAmplitudes.end(),*amp)
!= theDeselectedAmplitudes.end() )
continue;
}
(**amp).orderInGs(oas);
(**amp).orderInGem(oae);
if ( (**amp).orderInGs() != oas ||
(**amp).orderInGem() != oae ) {
continue;
}
matchAmplitudes.push_back(*amp);
}
}
}
size_t combinations = processes.size()*matchAmplitudes.size();
size_t procCount = 0;
generator()->log() << "building matrix elements." << flush;
boost::progress_display * progressBar =
new boost::progress_display(combinations,generator()->log());
for ( unsigned int oas = lowestAsOrder; oas <= highestAsOrder; ++oas ) {
for ( unsigned int oae = lowestAeOrder; oae <= highestAeOrder; ++oae ) {
for ( vector<Ptr<MatchboxAmplitude>::ptr>::const_iterator amp
= matchAmplitudes.begin(); amp != matchAmplitudes.end(); ++amp ) {
(**amp).orderInGs(oas);
(**amp).orderInGem(oae);
for ( set<PDVector>::const_iterator p = processes.begin();
p != processes.end(); ++p ) {
++(*progressBar);
if ( !(**amp).canHandle(*p,this,virt) )
continue;
if ( (**amp).isExternal() )
externalAmplitudes().insert(*amp);
++procCount;
Process proc(*p,oas,oae);
ampProcs[*amp].insert(proc);
procAmps[proc].insert(*amp);
}
}
}
}
delete progressBar;
generator()->log() << flush;
bool clash = false;
for ( map<Process,set<Ptr<MatchboxAmplitude>::ptr> >::const_iterator check =
procAmps.begin(); check != procAmps.end(); ++check ) {
if ( check->second.size() > 1 ) {
clash = true;
generator()->log() << "Several different amplitudes have been found for: "
<< check->first.legs[0]->PDGName() << " "
<< check->first.legs[1]->PDGName() << " -> ";
for ( PDVector::const_iterator p = check->first.legs.begin() + 2;
p != check->first.legs.end(); ++p )
generator()->log() << (**p).PDGName() << " ";
generator()->log() << "at alpha_s^" << check->first.orderInAlphaS
<< " and alpha_ew^" << check->first.orderInAlphaEW
<< "\n";
generator()->log() << "The following amplitudes claim responsibility:\n";
for ( set<Ptr<MatchboxAmplitude>::ptr>::const_iterator a = check->second.begin();
a != check->second.end(); ++a ) {
generator()->log() << (**a).name() << " ";
}
generator()->log() << "\n";
}
}
if ( clash ) {
throw Exception() << "MatchboxFactory: Ambiguous amplitude setup - please check your input files.\n"
<< "To avoid this problem use the SelectAmplitudes or DeselectAmplitudes interfaces.\n"
<< Exception::runerror;
}
bool canDoSpinCorrelations = true;
vector<Ptr<MatchboxMEBase>::ptr> res;
for ( map<Ptr<MatchboxAmplitude>::ptr,set<Process> >::const_iterator
ap = ampProcs.begin(); ap != ampProcs.end(); ++ap ) {
canDoSpinCorrelations &= ap->first->canFillRhoMatrix();
for ( set<Process>::const_iterator m = ap->second.begin();
m != ap->second.end(); ++m ) {
Ptr<MatchboxMEBase>::ptr me = ap->first->makeME(m->legs);
me->subProcess() = *m;
me->amplitude(ap->first);
me->matchboxAmplitude(ap->first);
prepareME(me);
string pname = "ME" + ap->first->name() + pid(m->legs);
if ( ! (generator()->preinitRegister(me,pname) ) )
throw Exception() << "MatchboxFactory: Matrix element " << pname << " already existing."
<< Exception::runerror;
if ( me->diagrams().empty() )continue;
res.push_back(me);
if ( theFirstPerturbativePDF )
theIncoming.insert(m->legs[0]->id());
if ( theSecondPerturbativePDF )
theIncoming.insert(m->legs[1]->id());
}
}
if ( spinCorrelations() && !canDoSpinCorrelations ) {
generator()->log() << "Warning: Spin correlations have been requested, but no amplitude is "
<< "capable of performing these.\n";
theSpinCorrelations = false;
}
generator()->log() << "created "
<< procCount << " subprocesses.\n";
generator()->log() << "--------------------------------------------------------------------------------\n"
<< flush;
return res;
}
int MatchboxFactory::orderOLPProcess(const Process& proc,
Ptr<MatchboxAmplitude>::tptr amp,
int type) {
map<pair<Process,int>,int>& procs =
olpProcesses()[amp];
map<pair<Process,int>,int>::const_iterator it =
procs.find(make_pair(proc,type));
if ( it != procs.end() )
return it->second;
int id = procs.size();
procs[make_pair(proc,type)] = id + 1;
return id + 1;
}
void MatchboxFactory::productionMode() {
if ( inProductionMode )
return;
if ( !bornContributions() && !virtualContributions() && !realContributions() )
throw Exception() << "MatchboxFactory: At least one cross section contribution needs to be enabled.\n"
<< "Please check your setup.\n"
<< Exception::runerror;
bool needTrueVirtuals =
virtualContributions() && !meCorrectionsOnly() && !loopSimCorrections();
for ( vector<Ptr<MatchboxAmplitude>::ptr>::iterator amp
= amplitudes().begin(); amp != amplitudes().end(); ++amp ) {
if ( !needTrueVirtuals && (**amp).oneLoopAmplitude() ) {
Repository::clog() << "One-loop contributions from '"
<< (**amp).name()
<< "' are not required and will be disabled.\n"
<< flush;
(**amp).disableOneLoop();
}
}
if ( subtractionData() != "" && !subProcessGroups() ) {
throw Exception() << "MatchboxFactory: Plain NLO settings are required for subtraction checks.\n"
<< "Please check your setup.\n"
<< Exception::runerror;
}
if ( showerApproximation() && !virtualContributions() && !realContributions() ) {
Repository::clog() << "Warning: Matching requested for LO run. Matching disabled.\n" << flush;
showerApproximation(Ptr<ShowerApproximation>::tptr());
}
if ( showerApproximation() && (subtractionData() != "" || subProcessGroups()) ) {
Repository::clog() << "Warning: Matching requested for plain NLO run. Matching disabled.\n" << flush;
showerApproximation(Ptr<ShowerApproximation>::tptr());
}
if ( showerApproximation() ) {
if ( spinCorrelations() && !showerApproximation()->hasSpinCorrelations() ) {
Repository::clog() << "Warning: Spin correlations have been requested but the matching "
<< "object is not capable of these. Spin correlations will be turned of.\n"
<< flush;
theSpinCorrelations = false;
}
}
inProductionMode = true;
}
void MatchboxFactory::setup() {
useMe();
if ( !ranSetup ) {
if ( !inProductionMode )
throw Exception() << "MatchboxFactory: The MatchboxFactory object '"
<< name() << "' has not been switched to production mode.\n"
<< "Did you use 'do "
<< name() << ":ProductionMode' before isolating the event generator?\n"
<< Exception::runerror;
olpProcesses().clear();
externalAmplitudes().clear();
theHighestVirtualsize = 0;
theIncoming.clear();
bool needTrueVirtuals =
virtualContributions() && !meCorrectionsOnly() && !loopSimCorrections();
for ( vector<Ptr<MatchboxAmplitude>::ptr>::iterator amp
= amplitudes().begin(); amp != amplitudes().end(); ++amp )
(**amp).factory(this);
if ( bornMEs().empty() ) {
if ( particleGroups().find("j") == particleGroups().end() )
throw Exception() << "MatchboxFactory: Could not find a jet particle group named 'j'"
<< Exception::runerror;
// rebind the particle data objects
for ( map<string,PDVector>::iterator g = particleGroups().begin();
g != particleGroups().end(); ++g )
for ( PDVector::iterator p = g->second.begin();
p != g->second.end(); ++p ) {
#ifndef NDEBUG
long checkid = (**p).id();
#endif
*p = getParticleData((**p).id());
assert((**p).id() == checkid);
}
const PDVector& partons = particleGroups()["j"];
unsigned int nl = 0;
for ( PDVector::const_iterator p = partons.begin();
p != partons.end(); ++p ) {
if ( abs((**p).id()) < 7 && (**p).hardProcessMass() == ZERO )
++nl;
if ( (**p).id() > 0 && (**p).id() < 7 && (**p).hardProcessMass() == ZERO )
nLightJetVec( (**p).id() );
if ( (**p).id() > 0 && (**p).id() < 7 && (**p).hardProcessMass() != ZERO )
nHeavyJetVec( (**p).id() );
}
nLight(nl/2);
if ( particleGroups().find("p") == particleGroups().end() )
throw Exception() << "MatchboxFactory: Could not find a hadron particle group named 'p'"
<< Exception::runerror;
const PDVector& partonsInP = particleGroups()["p"];
for ( PDVector::const_iterator pip = partonsInP.begin();
pip != partonsInP.end(); ++pip ) {
if ( (**pip).id() > 0 && (**pip).id() < 7 && (**pip).hardProcessMass() == ZERO )
nLightProtonVec( (**pip).id() );
}
vector<Ptr<MatchboxMEBase>::ptr> mes;
for ( vector<vector<string> >::const_iterator p = processes.begin();
p != processes.end(); ++p ) {
if( needTrueVirtuals ) {
theHighestVirtualsize = max(theHighestVirtualsize,(int((*p).size())));
}
mes = makeMEs(*p,orderInAlphaS(),needTrueVirtuals);
copy(mes.begin(),mes.end(),back_inserter(bornMEs()));
if ( realContributions() || meCorrectionsOnly() ||
(showerApproximation() && virtualContributions()) ||
(showerApproximation() && loopSimCorrections()) ) {
if ( realEmissionProcesses.empty() ) {
vector<string> rproc = *p;
rproc.push_back("j");
mes = makeMEs(rproc,orderInAlphaS()+1,false);
copy(mes.begin(),mes.end(),back_inserter(realEmissionMEs()));
}
}
}
if ( realContributions() || meCorrectionsOnly() ||
(showerApproximation() && virtualContributions()) ||
(showerApproximation() && loopSimCorrections()) ) {
if ( !realEmissionProcesses.empty() ) {
for ( vector<vector<string> >::const_iterator q =
realEmissionProcesses.begin(); q != realEmissionProcesses.end(); ++q ) {
mes = makeMEs(*q,orderInAlphaS()+1,false);
copy(mes.begin(),mes.end(),back_inserter(realEmissionMEs()));
}
}
}
}
if ( loopInducedMEs().empty() ) {
for ( vector<vector<string> >::const_iterator p = loopInducedProcesses.begin();
p != loopInducedProcesses.end(); ++p ) {
vector<Ptr<MatchboxMEBase>::ptr> mes = makeMEs(*p,orderInAlphaS(),false);
copy(mes.begin(),mes.end(),back_inserter(loopInducedMEs()));
}
}
if( bornMEs().empty() && realEmissionMEs().empty() && loopInducedMEs().empty() )
throw Exception() << "MatchboxFactory: No matrix elements have been found.\n\
Please check if your order of Alpha_s and Alpha_ew have the right value.\n"
<< Exception::runerror;
// check if we have virtual contributions
bool haveVirtuals = true;
// check DR conventions of virtual contributions
bool virtualsAreDR = false;
bool virtualsAreCDR = false;
// check finite term conventions of virtual contributions
bool virtualsAreCS = false;
bool virtualsAreBDK = false;
bool virtualsAreExpanded = false;
// renormalization scheme
bool virtualsAreDRbar = false;
// check and prepare the Born and virtual matrix elements
for ( vector<Ptr<MatchboxMEBase>::ptr>::iterator born
= bornMEs().begin(); born != bornMEs().end(); ++born ) {
prepareME(*born);
haveVirtuals &= (**born).haveOneLoop();
if ( needTrueVirtuals ) {
if ( (**born).haveOneLoop() ) {
virtualsAreDRbar |= (**born).isDRbar();
virtualsAreDR |= (**born).isDR();
virtualsAreCDR |= !(**born).isDR();
virtualsAreCS |= (**born).isCS();
virtualsAreBDK |= (**born).isBDK();
virtualsAreExpanded |= (**born).isExpanded();
}
}
}
// prepare the loop induced matrix elements
for ( vector<Ptr<MatchboxMEBase>::ptr>::iterator looped
= loopInducedMEs().begin(); looped != loopInducedMEs().end(); ++looped ) {
prepareME(*looped);
}
if ( needTrueVirtuals ) {
// check the additional insertion operators
if ( !virtuals().empty() )
haveVirtuals = true;
for ( vector<Ptr<MatchboxInsertionOperator>::ptr>::const_iterator virt
= virtuals().begin(); virt != virtuals().end(); ++virt ) {
virtualsAreDRbar |= (**virt).isDRbar();
virtualsAreDR |= (**virt).isDR();
virtualsAreCDR |= !(**virt).isDR();
virtualsAreCS |= (**virt).isCS();
virtualsAreBDK |= (**virt).isBDK();
virtualsAreExpanded |= (**virt).isExpanded();
}
// check for consistent conventions on virtuals, if we are to include them
if ( virtualContributions() ) {
if ( !haveVirtuals ) {
throw Exception() << "MatchboxFactory: Could not find amplitudes for all virtual contributions needed.\n"
<< Exception::runerror;
}
if ( virtualsAreDR && virtualsAreCDR ) {
throw Exception() << "MatchboxFactory: Virtual corrections use inconsistent regularization schemes.\n"
<< Exception::runerror;
}
if ( (virtualsAreCS && virtualsAreBDK) ||
(virtualsAreCS && virtualsAreExpanded) ||
(virtualsAreBDK && virtualsAreExpanded) ||
(!virtualsAreCS && !virtualsAreBDK && !virtualsAreExpanded) ) {
throw Exception() << "MatchboxFactory: Virtual corrections use inconsistent conventions on finite terms.\n"
<< Exception::runerror;
}
}
// prepare dipole insertion operators
if ( virtualContributions() ) {
for ( vector<Ptr<MatchboxInsertionOperator>::ptr>::const_iterator virt
= DipoleRepository::insertionIOperators(dipoleSet()).begin();
virt != DipoleRepository::insertionIOperators(dipoleSet()).end(); ++virt ) {
(**virt).factory(this);
if ( virtualsAreDRbar )
(**virt).useDRbar();
if ( virtualsAreDR )
(**virt).useDR();
else
(**virt).useCDR();
if ( virtualsAreCS )
(**virt).useCS();
if ( virtualsAreBDK )
(**virt).useBDK();
if ( virtualsAreExpanded )
(**virt).useExpanded();
}
for ( vector<Ptr<MatchboxInsertionOperator>::ptr>::const_iterator virt
= DipoleRepository::insertionPKOperators(dipoleSet()).begin();
virt != DipoleRepository::insertionPKOperators(dipoleSet()).end(); ++virt ) {
(**virt).factory(this);
if ( virtualsAreDRbar )
(**virt).useDRbar();
if ( virtualsAreDR )
(**virt).useDR();
else
(**virt).useCDR();
if ( virtualsAreCS )
(**virt).useCS();
if ( virtualsAreBDK )
(**virt).useBDK();
if ( virtualsAreExpanded )
(**virt).useExpanded();
}
}
}
// prepare the real emission matrix elements
if ( realContributions() || meCorrectionsOnly() ||
(showerApproximation() && virtualContributions()) ||
(showerApproximation() && loopSimCorrections()) ) {
for ( vector<Ptr<MatchboxMEBase>::ptr>::iterator real
= realEmissionMEs().begin(); real != realEmissionMEs().end(); ++real ) {
prepareME(*real);
}
}
// start creating matrix elements
MEs().clear();
// setup born and virtual contributions
if ( bornContributions() || virtualContributions() ) {
generator()->log() << "preparing Born"
<< (virtualContributions() ? " and virtual" : "")
<< " matrix elements.\n" << flush;
}
if ( (bornContributions() && !virtualContributions()) ||
(bornContributions() && meCorrectionsOnly()) ||
(bornContributions() && virtualContributions() && independentVirtuals()) ) {
for ( vector<Ptr<MatchboxMEBase>::ptr>::iterator born
= bornMEs().begin(); born != bornMEs().end(); ++born ) {
if ( (**born).onlyOneLoop() )
continue;
Ptr<MatchboxMEBase>::ptr bornme = (**born).cloneMe();
string pname = fullName() + "/" + (**born).name();
if ( virtualContributions() && independentVirtuals() )
pname += ".Born";
if ( ! (generator()->preinitRegister(bornme,pname) ) )
throw Exception() << "MatchboxFactory: Matrix element " << pname << " already existing."
<< Exception::runerror;
if ( bornme->isOLPTree() ) {
int id = orderOLPProcess(bornme->subProcess(),
(**born).matchboxAmplitude(),
ProcessType::treeME2);
bornme->olpProcess(ProcessType::treeME2,id);
}
bornme->needsNoCorrelations();
bornme->cloneDependencies();
MEs().push_back(bornme);
}
}
if ( bornContributions() && !loopInducedMEs().empty() ) {
for ( vector<Ptr<MatchboxMEBase>::ptr>::iterator looped
= loopInducedMEs().begin(); looped != loopInducedMEs().end(); ++looped ) {
Ptr<MatchboxMEBase>::ptr loopme = (**looped).cloneMe();
string pname = fullName() + "/" + (**looped).name() + ".LoopInduced";
if ( ! (generator()->preinitRegister(loopme,pname) ) )
throw Exception() << "MatchboxFactory: Matrix element " << pname << " already existing."
<< Exception::runerror;
if ( loopme->isOLPTree() ) {
int id = orderOLPProcess(loopme->subProcess(),
(**looped).matchboxAmplitude(),
ProcessType::loopInducedME2);
loopme->olpProcess(ProcessType::loopInducedME2,id);
}
loopme->needsNoCorrelations();
loopme->cloneDependencies();
MEs().push_back(loopme);
}
}
if ( needTrueVirtuals ) {
bornVirtualMEs().clear();
boost::progress_display * progressBar =
new boost::progress_display(bornMEs().size(),generator()->log());
if ( thePoleData != "" )
if ( thePoleData[thePoleData.size()-1] != '/' )
thePoleData += "/";
for ( vector<Ptr<MatchboxMEBase>::ptr>::iterator born
= bornMEs().begin(); born != bornMEs().end(); ++born ) {
Ptr<MatchboxMEBase>::ptr nlo = (**born).cloneMe();
string pname = fullName() + "/" + (**born).name();
if ( !independentVirtuals() && !(!bornContributions() && virtualContributions()) )
pname += ".BornVirtual";
else if ( independentPKs() && !nlo->onlyOneLoop() )
pname += ".VirtualVI";
else
pname += ".Virtual";
if ( ! (generator()->preinitRegister(nlo,pname) ) )
throw Exception() << "MatchboxFactory: NLO ME " << pname << " already existing."
<< Exception::runerror;
nlo->virtuals().clear();
if ( !nlo->onlyOneLoop() ) {
for ( vector<Ptr<MatchboxInsertionOperator>::ptr>::const_iterator virt
= virtuals().begin(); virt != virtuals().end(); ++virt ) {
if ( (**virt).apply((**born).diagrams().front()->partons()) )
nlo->virtuals().push_back(*virt);
}
for ( vector<Ptr<MatchboxInsertionOperator>::ptr>::const_iterator virt
= DipoleRepository::insertionIOperators(dipoleSet()).begin();
virt != DipoleRepository::insertionIOperators(dipoleSet()).end(); ++virt ) {
if ( (**virt).apply((**born).diagrams().front()->partons()) )
nlo->virtuals().push_back(*virt);
}
if ( !independentVirtuals() || ( independentVirtuals() && !independentPKs() ) ) {
for ( vector<Ptr<MatchboxInsertionOperator>::ptr>::const_iterator virt
= DipoleRepository::insertionPKOperators(dipoleSet()).begin();
virt != DipoleRepository::insertionPKOperators(dipoleSet()).end(); ++virt ) {
if ( (**virt).apply((**born).diagrams().front()->partons()) )
nlo->virtuals().push_back(*virt);
}
}
if ( nlo->virtuals().empty() )
throw Exception() << "MatchboxFactory: No insertion operators have been found for "
<< (**born).name() << "\n"
<< Exception::runerror;
if ( checkPoles() ) {
if ( !virtualsAreExpanded ) {
throw Exception()
<< "MatchboxFactory: Cannot check epsilon poles if virtuals are not in `expanded' convention.\n"
<< Exception::runerror;
}
}
}
if ( !bornContributions() || independentVirtuals() ) {
nlo->doOneLoopNoBorn();
} else {
nlo->doOneLoop();
}
if ( nlo->isOLPLoop() ) {
int id = orderOLPProcess(nlo->subProcess(),
(**born).matchboxAmplitude(),
ProcessType::oneLoopInterference);
nlo->olpProcess(ProcessType::oneLoopInterference,id);
if ( !nlo->onlyOneLoop() && nlo->needsOLPCorrelators() ) {
id = orderOLPProcess(nlo->subProcess(),
(**born).matchboxAmplitude(),
ProcessType::colourCorrelatedME2);
nlo->olpProcess(ProcessType::colourCorrelatedME2,id);
}
}
nlo->needsCorrelations();
nlo->cloneDependencies();
bornVirtualMEs().push_back(nlo);
MEs().push_back(nlo);
if ( independentVirtuals() && independentPKs() && !nlo->onlyOneLoop() ) {
Ptr<MatchboxMEBase>::ptr nlopk = (**born).cloneMe();
string pnamepk = fullName() + "/" + (**born).name();
pnamepk += ".VirtualPK";
if ( ! (generator()->preinitRegister(nlopk,pnamepk) ) )
throw Exception() << "MatchboxFactory: NLO ME " << pnamepk << " already existing."
<< Exception::runerror;
nlopk->virtuals().clear();
for ( vector<Ptr<MatchboxInsertionOperator>::ptr>::const_iterator virt
= DipoleRepository::insertionPKOperators(dipoleSet()).begin();
virt != DipoleRepository::insertionPKOperators(dipoleSet()).end(); ++virt ) {
if ( (**virt).apply((**born).diagrams().front()->partons()) )
nlopk->virtuals().push_back(*virt);
}
if ( !nlopk->virtuals().empty() ) {
nlopk->doOneLoopNoBorn();
nlopk->doOneLoopNoLoops();
if ( nlopk->isOLPLoop() ) {
int id = orderOLPProcess(nlopk->subProcess(),
(**born).matchboxAmplitude(),
ProcessType::treeME2);
nlopk->olpProcess(ProcessType::treeME2,id);
if ( nlopk->needsOLPCorrelators() ) {
id = orderOLPProcess(nlopk->subProcess(),
(**born).matchboxAmplitude(),
ProcessType::colourCorrelatedME2);
nlopk->olpProcess(ProcessType::colourCorrelatedME2,id);
}
}
nlopk->needsCorrelations();
nlopk->cloneDependencies();
bornVirtualMEs().push_back(nlopk);
MEs().push_back(nlopk);
}
}
++(*progressBar);
}
delete progressBar;
generator()->log() << "--------------------------------------------------------------------------------\n"
<< flush;
}
theSplittingDipoles.clear();
set<cPDVector> bornProcs;
if ( showerApproximation() ) {
if ( showerApproximation()->needsSplittingGenerator() ) {
for ( vector<Ptr<MatchboxMEBase>::ptr>::iterator born
= bornMEs().begin(); born != bornMEs().end(); ++born )
for ( MEBase::DiagramVector::const_iterator d = (**born).diagrams().begin();
d != (**born).diagrams().end(); ++d )
bornProcs.insert((**d).partons());
}
}
if ( realContributions() || meCorrectionsOnly() ||
(showerApproximation() && virtualContributions()) ||
(showerApproximation() && loopSimCorrections()) ) {
generator()->log() << "preparing subtracted matrix elements.\n" << flush;
if ( theSubtractionData != "" )
if ( theSubtractionData[theSubtractionData.size()-1] != '/' )
theSubtractionData += "/";
subtractedMEs().clear();
for ( vector<Ptr<MatchboxMEBase>::ptr>::iterator born
= bornMEs().begin(); born != bornMEs().end(); ++born ) {
if ( (**born).onlyOneLoop() )
continue;
(**born).needsCorrelations();
if ( (**born).isOLPTree() ) {
int id = orderOLPProcess((**born).subProcess(),
(**born).matchboxAmplitude(),
ProcessType::colourCorrelatedME2);
(**born).olpProcess(ProcessType::colourCorrelatedME2,id);
bool haveGluon = false;
for ( PDVector::const_iterator p = (**born).subProcess().legs.begin();
p != (**born).subProcess().legs.end(); ++p )
if ( (**p).id() == 21 ) {
haveGluon = true;
break;
}
if ( haveGluon ) {
id = orderOLPProcess((**born).subProcess(),
(**born).matchboxAmplitude(),
ProcessType::spinColourCorrelatedME2);
(**born).olpProcess(ProcessType::spinColourCorrelatedME2,id);
}
if ( showerApproximation() ) {
id = orderOLPProcess((**born).subProcess(),
(**born).matchboxAmplitude(),
ProcessType::treeME2);
(**born).olpProcess(ProcessType::treeME2,id);
}
}
}
boost::progress_display * progressBar =
new boost::progress_display(realEmissionMEs().size(),generator()->log());
for ( vector<Ptr<MatchboxMEBase>::ptr>::iterator real
= realEmissionMEs().begin(); real != realEmissionMEs().end(); ++real ) {
Ptr<SubtractedME>::ptr sub = new_ptr(SubtractedME());
string pname = fullName() + "/" + (**real).name() + ".SubtractedReal";
if ( ! (generator()->preinitRegister(sub,pname) ) )
throw Exception() << "MatchboxFactory: Subtracted ME " << pname << " already existing."
<< Exception::runerror;
sub->factory(this);
(**real).needsNoCorrelations();
if ( (**real).isOLPTree() ) {
int id = orderOLPProcess((**real).subProcess(),
(**real).matchboxAmplitude(),
ProcessType::treeME2);
(**real).olpProcess(ProcessType::treeME2,id);
}
sub->head(*real);
sub->dependent().clear();
sub->getDipoles();
if ( sub->dependent().empty() ) {
// finite real contribution
if ( realContributions() ) {
Ptr<MatchboxMEBase>::ptr fme =
dynamic_ptr_cast<Ptr<MatchboxMEBase>::ptr>(sub->head())->cloneMe();
string qname = fullName() + "/" + (**real).name() + ".FiniteReal";
if ( ! (generator()->preinitRegister(fme,qname) ) )
throw Exception() << "MatchboxFactory: ME " << qname << " already existing."
<< Exception::runerror;
MEs().push_back(fme);
finiteRealMEs().push_back(fme);
}
sub->head(tMEPtr());
++(*progressBar);
continue;
}
if ( realEmissionScales() )
sub->doRealEmissionScales();
subtractedMEs().push_back(sub);
if ( realContributions() )
if ( !showerApproximation() || (showerApproximation() && showerApproximation()->hasHEvents()) )
MEs().push_back(sub);
if ( showerApproximation() ) {
if ( virtualContributions() && !meCorrectionsOnly() && !loopSimCorrections() ) {
Ptr<SubtractedME>::ptr subv = new_ptr(*sub);
string vname = sub->fullName() + ".SubtractionIntegral";
if ( ! (generator()->preinitRegister(subv,vname) ) )
throw Exception() << "MatchboxFactory: Subtracted ME " << vname << " already existing."
<< Exception::runerror;
subv->cloneDependencies(vname);
subv->doVirtualShowerSubtraction();
subtractedMEs().push_back(subv);
MEs().push_back(subv);
}
if ( loopSimCorrections() ) {
Ptr<SubtractedME>::ptr subv = new_ptr(*sub);
string vname = sub->fullName() + ".SubtractionIntegral";
if ( ! (generator()->preinitRegister(subv,vname) ) )
throw Exception() << "MatchboxFactory: Subtracted ME " << vname << " already existing."
<< Exception::runerror;
subv->cloneDependencies(vname);
subv->doLoopSimSubtraction();
subtractedMEs().push_back(subv);
MEs().push_back(subv);
}
sub->doRealShowerSubtraction();
if ( showerApproximation()->needsSplittingGenerator() )
for ( set<cPDVector>::const_iterator p = bornProcs.begin();
p != bornProcs.end(); ++p ) {
vector<Ptr<SubtractionDipole>::ptr> sdip = sub->splitDipoles(*p);
set<Ptr<SubtractionDipole>::ptr>& dips = theSplittingDipoles[*p];
copy(sdip.begin(),sdip.end(),inserter(dips,dips.begin()));
}
}
++(*progressBar);
}
delete progressBar;
generator()->log() << "--------------------------------------------------------------------------------\n"
<< flush;
}
if ( !theSplittingDipoles.empty() ) {
map<Ptr<SubtractionDipole>::ptr,Ptr<SubtractionDipole>::ptr> cloneMap;
for ( map<cPDVector,set<Ptr<SubtractionDipole>::ptr> >::const_iterator sd = theSplittingDipoles.begin();
sd != theSplittingDipoles.end(); ++sd ) {
for ( set<Ptr<SubtractionDipole>::ptr>::const_iterator d = sd->second.begin();
d != sd->second.end(); ++d ) {
cloneMap[*d] = Ptr<SubtractionDipole>::ptr();
}
}
for ( map<Ptr<SubtractionDipole>::ptr,Ptr<SubtractionDipole>::ptr>::iterator cd =
cloneMap.begin(); cd != cloneMap.end(); ++cd ) {
Ptr<SubtractionDipole>::ptr cloned = cd->first->cloneMe();
string dname = cd->first->fullName() + ".splitting";
if ( ! (generator()->preinitRegister(cloned,dname)) )
throw Exception() << "MatchboxFactory: Dipole '" << dname << "' already existing."
<< Exception::runerror;
cloned->cloneDependencies();
cloned->showerApproximation(Ptr<ShowerApproximation>::tptr());
cloned->doSplitting();
cd->second = cloned;
}
for ( map<cPDVector,set<Ptr<SubtractionDipole>::ptr> >::iterator sd = theSplittingDipoles.begin();
sd != theSplittingDipoles.end(); ++sd ) {
set<Ptr<SubtractionDipole>::ptr> cloned;
for ( set<Ptr<SubtractionDipole>::ptr>::iterator d = sd->second.begin();
d != sd->second.end(); ++d ) {
cloned.insert(cloneMap[*d]);
}
sd->second = cloned;
}
}
if ( !externalAmplitudes().empty() ) {
generator()->log() << "Initializing external amplitudes.\n" << flush;
for ( set<Ptr<MatchboxAmplitude>::tptr>::const_iterator ext =
externalAmplitudes().begin(); ext != externalAmplitudes().end(); ++ext ) {
if ( !(**ext).initializeExternal() ) {
throw Exception() << "Failed to initialize amplitude '" << (**ext).name() << "'\n"
<< Exception::runerror;
}
}
generator()->log() << "--------------------------------------------------------------------------------\n"
<< flush;
}
if ( !olpProcesses().empty() ) {
generator()->log() << "Initializing one-loop provider(s).\n" << flush;
map<Ptr<MatchboxAmplitude>::tptr,map<pair<Process,int>,int> > olps;
for ( map<Ptr<MatchboxAmplitude>::tptr,map<pair<Process,int>,int> >::const_iterator
oit = olpProcesses().begin(); oit != olpProcesses().end(); ++oit ) {
olps[oit->first] = oit->second;
}
for ( map<Ptr<MatchboxAmplitude>::tptr,map<pair<Process,int>,int> >::const_iterator
olpit = olps.begin(); olpit != olps.end(); ++olpit ) {
if ( !olpit->first->startOLP(olpit->second) ) {
throw Exception() << "MatchboxFactory: Failed to start OLP for amplitude '" << olpit->first->name() << "'\n"
<< Exception::runerror;
}
}
generator()->log() << "--------------------------------------------------------------------------------\n"
<< flush;
}
generator()->log() << "Process setup finished.\n" << flush;
ranSetup = true;
}
}
void MatchboxFactory::SplittingChannel::print(ostream& os) const {
os << "--- SplittingChannel setup -----------------------------------------------------\n";
os << " Born process ";
const StandardXComb& bxc = *bornXComb;
os << bxc.mePartonData()[0]->PDGName() << " "
<< bxc.mePartonData()[1]->PDGName() << " -> ";
for ( cPDVector::const_iterator p = bxc.mePartonData().begin() + 2;
p != bxc.mePartonData().end(); ++p ) {
os << (**p).PDGName() << " ";
}
os << "\n";
os << " to real emission process ";
const StandardXComb& rxc = *realXComb;
os << rxc.mePartonData()[0]->PDGName() << " "
<< rxc.mePartonData()[1]->PDGName() << " -> ";
for ( cPDVector::const_iterator p = rxc.mePartonData().begin() + 2;
p != rxc.mePartonData().end(); ++p ) {
os << (**p).PDGName() << " ";
}
os << "\n";
os << " with dipole:\n";
dipole->print(os);
os << "--------------------------------------------------------------------------------\n";
os << flush;
}
list<MatchboxFactory::SplittingChannel>
MatchboxFactory::getSplittingChannels(tStdXCombPtr xcptr) const {
if ( xcptr->lastProjector() )
xcptr = xcptr->lastProjector();
const StandardXComb& xc = *xcptr;
cPDVector proc = xc.mePartonData();
map<cPDVector,set<Ptr<SubtractionDipole>::ptr> >::const_iterator splitEntries
= splittingDipoles().find(proc);
list<SplittingChannel> res;
if ( splitEntries == splittingDipoles().end() )
return res;
const set<Ptr<SubtractionDipole>::ptr>& splitDipoles = splitEntries->second;
SplittingChannel channel;
if ( !splitDipoles.empty() ) {
Ptr<MatchboxMEBase>::tptr bornME =
const_ptr_cast<Ptr<MatchboxMEBase>::tptr>((**splitDipoles.begin()).underlyingBornME());
channel.bornXComb =
bornME->makeXComb(xc.maxEnergy(),xc.particles(),xc.eventHandlerPtr(),
const_ptr_cast<tSubHdlPtr>(xc.subProcessHandler()),
xc.pExtractor(),xc.CKKWHandler(),
xc.partonBins(),xc.cuts(),xc.diagrams(),xc.mirror(),
PartonPairVec());
}
for ( set<Ptr<SubtractionDipole>::ptr>::const_iterator sd =
splitDipoles.begin(); sd != splitDipoles.end(); ++sd ) {
channel.dipole = *sd;
vector<StdXCombPtr> realXCombs = (**sd).makeRealXCombs(channel.bornXComb);
for ( vector<StdXCombPtr>::const_iterator rxc = realXCombs.begin();
rxc != realXCombs.end(); ++rxc ) {
channel.realXComb = *rxc;
if ( showerApproximation()->needsTildeXCombs() ) {
channel.tildeXCombs.clear();
assert(!channel.dipole->partnerDipoles().empty());
for ( vector<Ptr<SubtractionDipole>::tptr>::const_iterator p =
channel.dipole->partnerDipoles().begin();
p != channel.dipole->partnerDipoles().end(); ++p ) {
StdXCombPtr txc = channel.dipole->makeBornXComb(channel.realXComb);
if ( txc )
channel.tildeXCombs.push_back(txc);
}
}
res.push_back(channel);
}
}
if ( initVerbose() ) {
generator()->log()
<< "--- MatchboxFactory splitting channels ----------------------------------------------\n";
const StandardXComb& bxc = *xcptr;
generator()->log() << " hard process handled is: ";
generator()->log() << bxc.mePartonData()[0]->PDGName() << " "
<< bxc.mePartonData()[1]->PDGName() << " -> ";
for ( cPDVector::const_iterator p = bxc.mePartonData().begin() + 2;
p != bxc.mePartonData().end(); ++p ) {
generator()->log() << (**p).PDGName() << " ";
}
generator()->log() << "\n";
for ( list<MatchboxFactory::SplittingChannel>::const_iterator sp =
res.begin(); sp != res.end(); ++sp ) {
sp->print(generator()->log());
}
generator()->log()
<< "-------------------------------------------------------------------------------------\n"
<< flush;
}
return res;
}
void MatchboxFactory::print(ostream& os) const {
os << "--- MatchboxFactory setup -----------------------------------------------------------\n";
if ( !amplitudes().empty() ) {
os << " generated Born matrix elements:\n";
for ( vector<Ptr<MatchboxMEBase>::ptr>::const_iterator m = bornMEs().begin();
m != bornMEs().end(); ++m ) {
(**m).print(os);
}
os << flush;
os << " generated real emission matrix elements:\n";
for ( vector<Ptr<MatchboxMEBase>::ptr>::const_iterator m = realEmissionMEs().begin();
m != realEmissionMEs().end(); ++m ) {
(**m).print(os);
}
os << flush;
}
os << " generated Born+virtual matrix elements:\n";
for ( vector<Ptr<MatchboxMEBase>::ptr>::const_iterator bv
= bornVirtualMEs().begin(); bv != bornVirtualMEs().end(); ++bv ) {
(**bv).print(os);
}
os << " generated subtracted matrix elements:\n";
for ( vector<Ptr<SubtractedME>::ptr>::const_iterator sub
= subtractedMEs().begin(); sub != subtractedMEs().end(); ++sub ) {
os << " '" << (**sub).name() << "'\n";
}
os << "--------------------------------------------------------------------------------\n";
os << flush;
}
+void MatchboxFactory::summary(ostream& os) const {
+ os << "\n\n================================================================================\n"
+ << " Matchbox hard process summary\n"
+ << "================================================================================\n\n";
+
+ os << " Electro-weak parameter summary:\n"
+ << "--------------------------------------------------------------------------------\n\n";
+
+ os << " Electro-weak scheme : ";
+ switch ( SM().ewScheme() ) {
+
+ case 0: os << "Default"; break;
+ case 1: os << "GMuScheme"; break;
+ case 2: os << "alphaMZScheme"; break;
+ case 3: os << "NoMass"; break;
+ case 4: os << "mW"; break;
+ case 5: os << "mZ"; break;
+ case 6: os << "Independent"; break;
+ case 7: os << "FeynRulesUFO"; break;
+ default: assert(false);
+
+ }
+
+ os << "\n";
+
+ os << " alphaEM is "
+ << (SM().ewScheme() == 0 && !theFixedQEDCouplings ? "running" : "fixed at alphaEM(m(Z))") << "\n";
+
+ if ( SM().ewScheme() == 0 && !theFixedQEDCouplings )
+ os << " alphaEM is running at " << SM().alphaEMPtr()->nloops()
+ << " loops\n\n";
+ else
+ os << "\n";
+
+ os << (SM().ewScheme() != 0 ? " Tree level relations " : " Best values ")
+ << "yield:\n\n"
+ << " m(Z)/GeV = "
+ << getParticleData(ParticleID::Z0)->hardProcessMass()/GeV
+ << "\n"
+ << " g(Z)/GeV = "
+ << getParticleData(ParticleID::Z0)->hardProcessWidth()/GeV
+ << "\n"
+ << " m(W)/GeV = "
+ << getParticleData(ParticleID::Wplus)->hardProcessMass()/GeV
+ << "\n"
+ << " g(W)/GeV = "
+ << getParticleData(ParticleID::Wplus)->hardProcessWidth()/GeV
+ << "\n"
+ << " m(H)/GeV = "
+ << getParticleData(ParticleID::h0)->hardProcessMass()/GeV
+ << "\n"
+ << " g(H)/GeV = "
+ << getParticleData(ParticleID::h0)->hardProcessWidth()/GeV
+ << "\n"
+ << " alphaEM(m(Z)) = "
+ << SM().alphaEMME(sqr(getParticleData(ParticleID::Z0)->hardProcessMass())) << "\n"
+ << " sin^2(theta) = " << SM().sin2ThetaW()
+ << "\n"
+ << " GeV^2 GF = " << GeV2*SM().fermiConstant()
+ << "\n\n";
+
+ os << " Quark masses and widths are:\n"
+ << "--------------------------------------------------------------------------------\n\n"
+ << " m(u)/GeV = " << getParticleData(ParticleID::u)->hardProcessMass()/GeV << "\n"
+ << " m(d)/GeV = " << getParticleData(ParticleID::d)->hardProcessMass()/GeV << "\n"
+ << " m(c)/GeV = " << getParticleData(ParticleID::c)->hardProcessMass()/GeV << "\n"
+ << " m(s)/GeV = " << getParticleData(ParticleID::s)->hardProcessMass()/GeV << "\n"
+ << " m(t)/GeV = " << getParticleData(ParticleID::t)->hardProcessMass()/GeV << "\n"
+ << " g(t)/GeV = " << getParticleData(ParticleID::t)->hardProcessWidth()/GeV << "\n"
+ << " m(b)/GeV = " << getParticleData(ParticleID::b)->hardProcessMass()/GeV << "\n\n";
+
+ os << " Lepton masses and widths are:\n"
+ << "--------------------------------------------------------------------------------\n\n"
+ << " m(n_e)/GeV = " << getParticleData(ParticleID::nu_e)->hardProcessMass()/GeV << "\n"
+ << " m(e)/GeV = " << getParticleData(ParticleID::eminus)->hardProcessMass()/GeV << "\n"
+ << " m(n_mu)/GeV = " << getParticleData(ParticleID::nu_mu)->hardProcessMass()/GeV << "\n"
+ << " m(mu)/GeV = " << getParticleData(ParticleID::muminus)->hardProcessMass()/GeV << "\n"
+ << " m(nu_tau)/GeV = " << getParticleData(ParticleID::nu_tau)->hardProcessMass()/GeV << "\n"
+ << " m(tau)/GeV = " << getParticleData(ParticleID::tauminus)->hardProcessMass()/GeV << "\n\n";
+
+
+ os << " Strong coupling summary:\n"
+ << "--------------------------------------------------------------------------------\n\n";
+
+ os << " alphaS is";
+ if ( !theFixedCouplings ) {
+ os << " running at " << SM().alphaSPtr()->nloops()
+ << " loops with\n"
+ << " alphaS(m(Z)) = " << SM().alphaSPtr()->value(sqr(getParticleData(ParticleID::Z0)->mass()))
+ << "\n\n";
+ } else {
+ os << " fixed at "
+ << SM().alphaS()
+ << "\n\n";
+ }
+
+ if ( !theFixedCouplings ) {
+ os << " flavour thresholds are matched at\n";
+ for ( long id = 1; id <= 6; ++id ) {
+ os << " m(" << id << ")/GeV = "
+ << (SM().alphaSPtr()->quarkMasses().empty() ?
+ getParticleData(id)->mass()/GeV :
+ SM().alphaSPtr()->quarkMasses()[id-1]/GeV)
+ << "\n";
+ }
+ }
+
+ os << "\n\n" << flush;
+
+}
+
+
void MatchboxFactory::doinit() {
theIsMatchboxRun() = true;
if ( RunDirectories::empty() )
RunDirectories::pushRunId(generator()->runName());
setup();
if ( theShowerApproximation )
theShowerApproximation->init();
if ( initVerbose() && !ranSetup )
print(Repository::clog());
Ptr<StandardEventHandler>::tptr eh =
dynamic_ptr_cast<Ptr<StandardEventHandler>::tptr>(generator()->eventHandler());
assert(eh);
+ if ( initVerbose() && !ranSetup ) {
+ assert(standardModel());
+ standardModel()->init();
+ summary(Repository::clog());
+ }
SubProcessHandler::doinit();
}
void MatchboxFactory::doinitrun() {
theIsMatchboxRun() = true;
if ( theShowerApproximation )
theShowerApproximation->initrun();
Ptr<StandardEventHandler>::tptr eh =
dynamic_ptr_cast<Ptr<StandardEventHandler>::tptr>(generator()->eventHandler());
assert(eh);
SubProcessHandler::doinitrun();
}
const string& MatchboxFactory::buildStorage() {
return RunDirectories::buildStorage();
}
const string& MatchboxFactory::runStorage() {
return RunDirectories::runStorage();
}
void MatchboxFactory::persistentOutput(PersistentOStream & os) const {
os << theDiagramGenerator << theProcessData
<< theNLight
<< theNLightJetVec << theNHeavyJetVec << theNLightProtonVec
<< theOrderInAlphaS << theOrderInAlphaEW
<< theBornContributions << theVirtualContributions
<< theRealContributions << theIndependentVirtuals << theIndependentPKs
<< theSubProcessGroups
<< thePhasespace << theScaleChoice
<< theFactorizationScaleFactor << theRenormalizationScaleFactor
<< theFixedCouplings << theFixedQEDCouplings << theVetoScales
<< theAmplitudes
<< theBornMEs << theVirtuals << theRealEmissionMEs << theLoopInducedMEs
<< theBornVirtualMEs << theSubtractedMEs << theFiniteRealMEs
<< theVerbose<<theDiagramWeightVerbose
<<theDiagramWeightVerboseNBins
<< theInitVerbose << theSubtractionData << theSubtractionPlotType
<< theSubtractionScatterPlot << thePoleData
<< theParticleGroups << processes << loopInducedProcesses << realEmissionProcesses
<< theShowerApproximation << theSplittingDipoles
<< theRealEmissionScales << theAllProcesses
<< theOLPProcesses << theExternalAmplitudes
<< theSelectedAmplitudes << theDeselectedAmplitudes
<< theDipoleSet << theReweighters << thePreweighters
<< theMECorrectionsOnly<< theLoopSimCorrections<<theHighestVirtualsize << ranSetup
<< theIncoming << theFirstPerturbativePDF << theSecondPerturbativePDF
<< inProductionMode << theSpinCorrelations << theAlphaParameter
<< theEnforceChargeConservation << theEnforceColourConservation
<< theEnforceLeptonNumberConservation << theEnforceQuarkNumberConservation
<< theLeptonFlavourDiagonal << theQuarkFlavourDiagonal;
}
void MatchboxFactory::persistentInput(PersistentIStream & is, int) {
is >> theDiagramGenerator >> theProcessData
>> theNLight
>> theNLightJetVec >> theNHeavyJetVec >> theNLightProtonVec
>> theOrderInAlphaS >> theOrderInAlphaEW
>> theBornContributions >> theVirtualContributions
>> theRealContributions >> theIndependentVirtuals >> theIndependentPKs
>> theSubProcessGroups
>> thePhasespace >> theScaleChoice
>> theFactorizationScaleFactor >> theRenormalizationScaleFactor
>> theFixedCouplings >> theFixedQEDCouplings >> theVetoScales
>> theAmplitudes
>> theBornMEs >> theVirtuals >> theRealEmissionMEs >> theLoopInducedMEs
>> theBornVirtualMEs >> theSubtractedMEs >> theFiniteRealMEs
>> theVerbose >> theDiagramWeightVerbose
>> theDiagramWeightVerboseNBins
>> theInitVerbose >> theSubtractionData >> theSubtractionPlotType
>> theSubtractionScatterPlot >> thePoleData
>> theParticleGroups >> processes >> loopInducedProcesses >> realEmissionProcesses
>> theShowerApproximation >> theSplittingDipoles
>> theRealEmissionScales >> theAllProcesses
>> theOLPProcesses >> theExternalAmplitudes
>> theSelectedAmplitudes >> theDeselectedAmplitudes
>> theDipoleSet >> theReweighters >> thePreweighters
>> theMECorrectionsOnly>> theLoopSimCorrections>>theHighestVirtualsize >> ranSetup
>> theIncoming >> theFirstPerturbativePDF >> theSecondPerturbativePDF
>> inProductionMode >> theSpinCorrelations >> theAlphaParameter
>> theEnforceChargeConservation >> theEnforceColourConservation
>> theEnforceLeptonNumberConservation >> theEnforceQuarkNumberConservation
>> theLeptonFlavourDiagonal >> theQuarkFlavourDiagonal;
}
string MatchboxFactory::startParticleGroup(string name) {
particleGroupName = StringUtils::stripws(name);
particleGroup.clear();
return "";
}
string MatchboxFactory::endParticleGroup(string) {
if ( particleGroup.empty() )
throw Exception() << "MatchboxFactory: Empty particle group."
<< Exception::runerror;
particleGroups()[particleGroupName] = particleGroup;
particleGroup.clear();
return "";
}
vector<string> MatchboxFactory::parseProcess(string in) {
vector<string> process = StringUtils::split(in);
if ( process.size() < 3 )
throw Exception() << "MatchboxFactory: Invalid process."
<< Exception::runerror;
for ( vector<string>::iterator p = process.begin();
p != process.end(); ++p ) {
*p = StringUtils::stripws(*p);
}
vector<string> pprocess;
for ( vector<string>::const_iterator p = process.begin();
p != process.end(); ++p ) {
if ( *p == "->" )
continue;
pprocess.push_back(*p);
}
return pprocess;
}
string MatchboxFactory::doProcess(string in) {
processes.push_back(parseProcess(in));
return "";
}
string MatchboxFactory::doLoopInducedProcess(string in) {
loopInducedProcesses.push_back(parseProcess(in));
return "";
}
string MatchboxFactory::doSingleRealProcess(string in) {
realEmissionProcesses.push_back(parseProcess(in));
return "";
}
struct SortPID {
inline bool operator()(PDPtr a, PDPtr b) const {
return a->id() < b->id();
}
};
//
// @TODO
//
// SP: After improving this for standard model process building this should
// actually got into a separate process builder class or something along these
// lines to have it better factored for use with BSM models.
//
//
set<PDVector> MatchboxFactory::
makeSubProcesses(const vector<string>& proc) const {
if ( proc.empty() )
throw Exception() << "MatchboxFactory: No process specified."
<< Exception::runerror;
vector<PDVector> groups;
typedef map<string,PDVector>::const_iterator GroupIterator;
for ( vector<string>::const_iterator gr = proc.begin();
gr != proc.end(); ++gr ) {
GroupIterator git = particleGroups().find(*gr);
if ( git == particleGroups().end() ) {
throw Exception() << "MatchboxFactory: Particle group '"
<< *gr << "' not defined." << Exception::runerror;
}
groups.push_back(git->second);
}
vector<size_t> counts(groups.size(),0);
PDVector proto(groups.size());
set<PDVector> allProcs;
while ( true ) {
for ( size_t k = 0; k < groups.size(); ++k )
proto[k] = groups[k][counts[k]];
int charge = 0;
int colour = 0;
int nleptons = 0;
int nquarks = 0;
int ncolour = 0;
int nleptonsGen[4];
int nquarksGen[4];
for ( size_t i = 0; i < 4; ++i ) {
nleptonsGen[i] = 0;
nquarksGen[i] = 0;
}
for ( size_t k = 0; k < proto.size(); ++k ) {
int sign = k > 1 ? 1 : -1;
charge += sign * proto[k]->iCharge();
colour += sign * proto[k]->iColour();
if ( abs(proto[k]->id()) <= 8 ) {
int generation = (abs(proto[k]->id()) - 1)/2;
nquarks += sign * ( proto[k]->id() < 0 ? -1 : 1);
nquarksGen[generation] += sign * ( proto[k]->id() < 0 ? -1 : 1);
}
if ( abs(proto[k]->id()) > 10 &&
abs(proto[k]->id()) <= 18 ) {
int generation = (abs(proto[k]->id()) - 11)/2;
nleptons += sign * ( proto[k]->id() < 0 ? -1 : 1);
nleptonsGen[generation] += sign * ( proto[k]->id() < 0 ? -1 : 1);
}
if ( proto[k]->coloured() )
++ncolour;
}
bool pass = true;
if ( theEnforceChargeConservation )
pass &= (charge == 0);
if ( theEnforceColourConservation )
pass &= (colour % 8 == 0) && (ncolour > 1);
if ( theEnforceLeptonNumberConservation ) {
pass &= (nleptons == 0);
if ( theLeptonFlavourDiagonal ) {
for ( size_t i = 0; i < 4; ++i )
pass &= (nleptonsGen[i] == 0);
}
}
if ( theEnforceQuarkNumberConservation ) {
pass &= (nquarks == 0);
if ( theQuarkFlavourDiagonal ) {
for ( size_t i = 0; i < 4; ++i )
pass &= (nquarksGen[i] == 0);
}
}
if ( pass ) {
for ( int i = 0; i < 2; ++i ) {
if ( proto[i]->coloured() &&
proto[i]->hardProcessMass() != ZERO )
throw Exception()
<< "Inconsistent flavour scheme detected with massive incoming "
<< proto[i]->PDGName() << ". Check your setup."
<< Exception::runerror;
}
sort(proto.begin()+2,proto.end(),SortPID());
allProcs.insert(proto);
}
vector<size_t>::reverse_iterator c = counts.rbegin();
vector<PDVector>::const_reverse_iterator g = groups.rbegin();
while ( c != counts.rend() ) {
if ( ++(*c) == g->size() ) {
*c = 0;
++c; ++g;
} else {
break;
}
}
if ( c == counts.rend() )
break;
}
return allProcs;
}
void MatchboxFactory::Init() {
static ClassDocumentation<MatchboxFactory> documentation
("MatchboxFactory",
"NLO QCD corrections have been calculated "
"using Matchbox \\cite{Platzer:2011bc}, \\cite{Matchbox:2015}",
"%\\cite{Platzer:2011bc}\n"
"\\bibitem{Platzer:2011bc}\n"
"S.~Platzer and S.~Gieseke,\n"
"``Dipole Showers and Automated NLO Matching in Herwig,''\n"
"arXiv:1109.6256 [hep-ph].\n"
"%%CITATION = ARXIV:1109.6256;%%\n"
"%\\cite{Matchbox:2015}\n"
"\\bibitem{Matchbox:2015}\n"
"Herwig collaboration,\n"
"``Precision LHC Event Generation with Herwig,''\n"
"in preparation.");
static Reference<MatchboxFactory,Tree2toNGenerator> interfaceDiagramGenerator
("DiagramGenerator",
"Set the diagram generator.",
&MatchboxFactory::theDiagramGenerator, false, false, true, true, false);
static Reference<MatchboxFactory,ProcessData> interfaceProcessData
("ProcessData",
"Set the process data object to be used.",
&MatchboxFactory::theProcessData, false, false, true, true, false);
static Parameter<MatchboxFactory,unsigned int> interfaceOrderInAlphaS
("OrderInAlphaS",
"The order in alpha_s to consider.",
&MatchboxFactory::theOrderInAlphaS, 0, 0, 0,
false, false, Interface::lowerlim);
static Parameter<MatchboxFactory,unsigned int> interfaceOrderInAlphaEW
("OrderInAlphaEW",
"The order in alpha_EW",
&MatchboxFactory::theOrderInAlphaEW, 2, 0, 0,
false, false, Interface::lowerlim);
static Switch<MatchboxFactory,bool> interfaceBornContributions
("BornContributions",
"Switch on or off the Born contributions.",
&MatchboxFactory::theBornContributions, true, false, false);
static SwitchOption interfaceBornContributionsOn
(interfaceBornContributions,
"On",
"Switch on Born contributions.",
true);
static SwitchOption interfaceBornContributionsOff
(interfaceBornContributions,
"Off",
"Switch off Born contributions.",
false);
static Switch<MatchboxFactory,bool> interfaceVirtualContributions
("VirtualContributions",
"Switch on or off the virtual contributions.",
&MatchboxFactory::theVirtualContributions, true, false, false);
static SwitchOption interfaceVirtualContributionsOn
(interfaceVirtualContributions,
"On",
"Switch on virtual contributions.",
true);
static SwitchOption interfaceVirtualContributionsOff
(interfaceVirtualContributions,
"Off",
"Switch off virtual contributions.",
false);
static Switch<MatchboxFactory,bool> interfaceRealContributions
("RealContributions",
"Switch on or off the real contributions.",
&MatchboxFactory::theRealContributions, true, false, false);
static SwitchOption interfaceRealContributionsOn
(interfaceRealContributions,
"On",
"Switch on real contributions.",
true);
static SwitchOption interfaceRealContributionsOff
(interfaceRealContributions,
"Off",
"Switch off real contributions.",
false);
static Switch<MatchboxFactory,bool> interfaceIndependentVirtuals
("IndependentVirtuals",
"Switch on or off virtual contributions as separate subprocesses.",
&MatchboxFactory::theIndependentVirtuals, true, false, false);
static SwitchOption interfaceIndependentVirtualsOn
(interfaceIndependentVirtuals,
"On",
"Switch on virtual contributions as separate subprocesses.",
true);
static SwitchOption interfaceIndependentVirtualsOff
(interfaceIndependentVirtuals,
"Off",
"Switch off virtual contributions as separate subprocesses.",
false);
static Switch<MatchboxFactory,bool> interfaceIndependentPKs
("IndependentPKOperators",
"Switch on or off PK oeprators as separate subprocesses.",
&MatchboxFactory::theIndependentPKs, true, false, false);
static SwitchOption interfaceIndependentPKsOn
(interfaceIndependentPKs,
"On",
"Switch on PK operators as separate subprocesses.",
true);
static SwitchOption interfaceIndependentPKsOff
(interfaceIndependentPKs,
"Off",
"Switch off PK operators as separate subprocesses.",
false);
static Switch<MatchboxFactory,bool> interfaceSubProcessGroups
("SubProcessGroups",
"Switch on or off production of sub-process groups.",
&MatchboxFactory::theSubProcessGroups, false, false, false);
static SwitchOption interfaceSubProcessGroupsOn
(interfaceSubProcessGroups,
"On",
"On",
true);
static SwitchOption interfaceSubProcessGroupsOff
(interfaceSubProcessGroups,
"Off",
"Off",
false);
static Reference<MatchboxFactory,MatchboxPhasespace> interfacePhasespace
("Phasespace",
"Set the phasespace generator.",
&MatchboxFactory::thePhasespace, false, false, true, true, false);
static Reference<MatchboxFactory,MatchboxScaleChoice> interfaceScaleChoice
("ScaleChoice",
"Set the scale choice object.",
&MatchboxFactory::theScaleChoice, false, false, true, true, false);
static Parameter<MatchboxFactory,double> interfaceFactorizationScaleFactor
("FactorizationScaleFactor",
"The factorization scale factor.",
&MatchboxFactory::theFactorizationScaleFactor, 1.0, 0.0, 0,
false, false, Interface::lowerlim);
static Parameter<MatchboxFactory,double> interfaceRenormalizationScaleFactor
("RenormalizationScaleFactor",
"The renormalization scale factor.",
&MatchboxFactory::theRenormalizationScaleFactor, 1.0, 0.0, 0,
false, false, Interface::lowerlim);
static Switch<MatchboxFactory,bool> interfaceFixedCouplings
("FixedCouplings",
"Switch on or off fixed couplings.",
&MatchboxFactory::theFixedCouplings, true, false, false);
static SwitchOption interfaceFixedCouplingsOn
(interfaceFixedCouplings,
"On",
"On",
true);
static SwitchOption interfaceFixedCouplingsOff
(interfaceFixedCouplings,
"Off",
"Off",
false);
static Switch<MatchboxFactory,bool> interfaceFixedQEDCouplings
("FixedQEDCouplings",
"Switch on or off fixed QED couplings.",
&MatchboxFactory::theFixedQEDCouplings, true, false, false);
static SwitchOption interfaceFixedQEDCouplingsOn
(interfaceFixedQEDCouplings,
"On",
"On",
true);
static SwitchOption interfaceFixedQEDCouplingsOff
(interfaceFixedQEDCouplings,
"Off",
"Off",
false);
static Switch<MatchboxFactory,bool> interfaceVetoScales
("VetoScales",
"Switch on or setting veto scales.",
&MatchboxFactory::theVetoScales, false, false, false);
static SwitchOption interfaceVetoScalesOn
(interfaceVetoScales,
"On",
"On",
true);
static SwitchOption interfaceVetoScalesOff
(interfaceVetoScales,
"Off",
"Off",
false);
static RefVector<MatchboxFactory,MatchboxAmplitude> interfaceAmplitudes
("Amplitudes",
"The amplitude objects.",
&MatchboxFactory::theAmplitudes, -1, false, false, true, true, false);
static RefVector<MatchboxFactory,MatchboxMEBase> interfaceBornMEs
("BornMEs",
"The Born matrix elements to be used",
&MatchboxFactory::theBornMEs, -1, false, false, true, true, false);
static RefVector<MatchboxFactory,MatchboxInsertionOperator> interfaceVirtuals
("Virtuals",
"The virtual corrections to include",
&MatchboxFactory::theVirtuals, -1, false, false, true, true, false);
static RefVector<MatchboxFactory,MatchboxMEBase> interfaceRealEmissionMEs
("RealEmissionMEs",
"The RealEmission matrix elements to be used",
&MatchboxFactory::theRealEmissionMEs, -1, false, false, true, true, false);
static RefVector<MatchboxFactory,MatchboxMEBase> interfaceBornVirtuals
("BornVirtualMEs",
"The generated Born/virtual contributions",
&MatchboxFactory::theBornVirtualMEs, -1, false, true, true, true, false);
static RefVector<MatchboxFactory,SubtractedME> interfaceSubtractedMEs
("SubtractedMEs",
"The generated subtracted real emission contributions",
&MatchboxFactory::theSubtractedMEs, -1, false, true, true, true, false);
static RefVector<MatchboxFactory,MatchboxMEBase> interfaceFiniteRealMEs
("FiniteRealMEs",
"The generated finite real contributions",
&MatchboxFactory::theFiniteRealMEs, -1, false, true, true, true, false);
static Switch<MatchboxFactory,bool> interfaceVerbose
("Verbose",
"Print full infomation on each evaluated phase space point.",
&MatchboxFactory::theVerbose, false, false, false);
static SwitchOption interfaceVerboseOn
(interfaceVerbose,
"On",
"On",
true);
static SwitchOption interfaceVerboseOff
(interfaceVerbose,
"Off",
"Off",
false);
static Switch<MatchboxFactory,bool> interfaceVerboseDia
("DiagramWeightVerbose",
"Print full infomation on each evaluated phase space point.",
&MatchboxFactory::theDiagramWeightVerbose, false, false, false);
static SwitchOption interfaceVerboseDiaOn
(interfaceVerboseDia,
"On",
"On",
true);
static SwitchOption interfaceVerboseDiaOff
(interfaceVerboseDia,
"Off",
"Off",
false);
static Parameter<MatchboxFactory,int> interfaceVerboseDiaNbins
("DiagramWeightVerboseNBins",
"No. of Bins for DiagramWeightVerbose Diagrams.",
&MatchboxFactory::theDiagramWeightVerboseNBins, 200, 0, 0,
false, false, Interface::lowerlim);
static Switch<MatchboxFactory,bool> interfaceInitVerbose
("InitVerbose",
"Print setup information.",
&MatchboxFactory::theInitVerbose, false, false, false);
static SwitchOption interfaceInitVerboseOn
(interfaceInitVerbose,
"On",
"On",
true);
static SwitchOption interfaceInitVerboseOff
(interfaceInitVerbose,
"Off",
"Off",
false);
static Parameter<MatchboxFactory,string> interfaceSubtractionData
("SubtractionData",
"Prefix for subtraction check data.",
&MatchboxFactory::theSubtractionData, "",
false, false);
static Switch<MatchboxFactory,int> interfaceSubtractionPlotType
("SubtractionPlotType",
"Switch for controlling what kind of plot is generated for checking the subtraction",
&MatchboxFactory::theSubtractionPlotType, 1, false, false);
static SwitchOption interfaceSubtractionPlotTypeLinearRatio
(interfaceSubtractionPlotType,
"LinRatio",
"Switch on the linear plot of the ratio",
1);
static SwitchOption interfaceSubtractionPlotTypeLogRelDiff
(interfaceSubtractionPlotType,
"LogRelDiff",
"Switch on the logarithmic plot of the relative difference",
2);
static Switch<MatchboxFactory,bool> interfaceSubtractionScatterPlot
("SubtractionScatterPlot",
"Switch for controlling whether subtraction data should be plotted for each phase space point individually",
&MatchboxFactory::theSubtractionScatterPlot, false, false, false);
static SwitchOption interfaceSubtractionScatterPlotOff
(interfaceSubtractionScatterPlot,
"Off", "Switch off the scatter plot", false);
static SwitchOption interfaceSubtractionScatterPlotOn
(interfaceSubtractionScatterPlot,
"On", "Switch on the scatter plot", true);
static Parameter<MatchboxFactory,string> interfacePoleData
("PoleData",
"Prefix for subtraction check data.",
&MatchboxFactory::thePoleData, "",
false, false);
static RefVector<MatchboxFactory,ParticleData> interfaceParticleGroup
("ParticleGroup",
"The particle group just started.",
&MatchboxFactory::particleGroup, -1, false, false, true, false, false);
static Command<MatchboxFactory> interfaceStartParticleGroup
("StartParticleGroup",
"Start a particle group.",
&MatchboxFactory::startParticleGroup, false);
static Command<MatchboxFactory> interfaceEndParticleGroup
("EndParticleGroup",
"End a particle group.",
&MatchboxFactory::endParticleGroup, false);
static Command<MatchboxFactory> interfaceProcess
("Process",
"Set the process(es) to consider.",
&MatchboxFactory::doProcess, false);
static Command<MatchboxFactory> interfaceLoopInducedProcess
("LoopInducedProcess",
"Set the loop induced process(es) to consider.",
&MatchboxFactory::doLoopInducedProcess, false);
static Command<MatchboxFactory> interfaceSingleRealProcess
("SingleRealProcess",
"Set the real emission process(es) to consider.",
&MatchboxFactory::doSingleRealProcess, false);
static Reference<MatchboxFactory,ShowerApproximation> interfaceShowerApproximation
("ShowerApproximation",
"Set the shower approximation to be considered.",
&MatchboxFactory::theShowerApproximation, false, false, true, true, false);
static Switch<MatchboxFactory,bool> interfaceRealEmissionScales
("RealEmissionScales",
"Switch on or off calculation of subtraction scales from real emission kinematics.",
&MatchboxFactory::theRealEmissionScales, false, false, false);
static SwitchOption interfaceRealEmissionScalesOn
(interfaceRealEmissionScales,
"On",
"On",
true);
static SwitchOption interfaceRealEmissionScalesOff
(interfaceRealEmissionScales,
"Off",
"Off",
false);
static Switch<MatchboxFactory,bool> interfaceAllProcesses
("AllProcesses",
"Consider all processes up to a maximum coupling order specified by the coupling order interfaces.",
&MatchboxFactory::theAllProcesses, false, false, false);
static SwitchOption interfaceAllProcessesYes
(interfaceAllProcesses,
"Yes",
"Include all processes.",
true);
static SwitchOption interfaceAllProcessesNo
(interfaceAllProcesses,
"No",
"Only consider processes matching the exact order in the couplings.",
false);
static RefVector<MatchboxFactory,MatchboxAmplitude> interfaceSelectAmplitudes
("SelectAmplitudes",
"The amplitude objects to be favoured in clashing responsibilities.",
&MatchboxFactory::theSelectedAmplitudes, -1, false, false, true, true, false);
static RefVector<MatchboxFactory,MatchboxAmplitude> interfaceDeselectAmplitudes
("DeselectAmplitudes",
"The amplitude objects to be disfavoured in clashing responsibilities.",
&MatchboxFactory::theDeselectedAmplitudes, -1, false, false, true, true, false);
static Switch<MatchboxFactory,int> interfaceDipoleSet
("DipoleSet",
"The set of subtraction terms to be considered.",
&MatchboxFactory::theDipoleSet, 0, false, false);
static SwitchOption interfaceDipoleSetCataniSeymour
(interfaceDipoleSet,
"CataniSeymour",
"Use default Catani-Seymour dipoles.",
0);
static RefVector<MatchboxFactory,ReweightBase> interfaceReweighters
("Reweighters",
"Reweight objects for matrix elements.",
&MatchboxFactory::theReweighters, -1, false, false, true, false, false);
static RefVector<MatchboxFactory,ReweightBase> interfacePreweighters
("Preweighters",
"Preweight objects for matrix elements.",
&MatchboxFactory::thePreweighters, -1, false, false, true, false, false);
static Switch<MatchboxFactory,bool> interfaceMECorrectionsOnly
("MECorrectionsOnly",
"Prepare only ME corrections, but no NLO calculation.",
&MatchboxFactory::theMECorrectionsOnly, false, false, false);
static SwitchOption interfaceMECorrectionsOnlyYes
(interfaceMECorrectionsOnly,
"Yes",
"Produce only ME corrections.",
true);
static SwitchOption interfaceMECorrectionsOnlyNo
(interfaceMECorrectionsOnly,
"No",
"Produce full NLO.",
false);
static Switch<MatchboxFactory,bool> interfaceLoopSimCorrections
("LoopSimCorrections",
"Prepare LoopSim corrections.",
&MatchboxFactory::theLoopSimCorrections, false, false, false);
static SwitchOption interfaceLoopSimCorrectionsYes
(interfaceLoopSimCorrections,
"Yes",
"Produce loopsim corrections.",
true);
static SwitchOption interfaceLoopSimCorrectionsNo
(interfaceLoopSimCorrections,
"No",
"Produce full NLO.",
false);
static Switch<MatchboxFactory,bool> interfaceFirstPerturbativePDF
("FirstPerturbativePDF",
"",
&MatchboxFactory::theFirstPerturbativePDF, true, false, false);
static SwitchOption interfaceFirstPerturbativePDFYes
(interfaceFirstPerturbativePDF,
"Yes",
"",
true);
static SwitchOption interfaceFirstPerturbativePDFNo
(interfaceFirstPerturbativePDF,
"No",
"",
false);
static Switch<MatchboxFactory,bool> interfaceSecondPerturbativePDF
("SecondPerturbativePDF",
"",
&MatchboxFactory::theSecondPerturbativePDF, true, false, false);
static SwitchOption interfaceSecondPerturbativePDFYes
(interfaceSecondPerturbativePDF,
"Yes",
"",
true);
static SwitchOption interfaceSecondPerturbativePDFNo
(interfaceSecondPerturbativePDF,
"No",
"",
false);
static Command<MatchboxFactory> interfaceProductionMode
("ProductionMode",
"Switch this factory to production mode.",
&MatchboxFactory::doProductionMode, false);
static Switch<MatchboxFactory,bool> interfaceSpinCorrelations
("SpinCorrelations",
"Fill information for the spin correlations, if possible.",
&MatchboxFactory::theSpinCorrelations, false, false, false);
static SwitchOption interfaceSpinCorrelationsYes
(interfaceSpinCorrelations,
"Yes",
"",
true);
static SwitchOption interfaceSpinCorrelationsNo
(interfaceSpinCorrelations,
"No",
"",
false);
static Parameter<MatchboxFactory,double> interfaceAlphaParameter
("AlphaParameter",
"Nagy-AlphaParameter.",
&MatchboxFactory::theAlphaParameter, 1.0, 0.0, 0,
false, false, Interface::lowerlim);
static Switch<MatchboxFactory,bool> interfaceEnforceChargeConservation
("EnforceChargeConservation",
"Enforce charge conservation while generating the hard process.",
&MatchboxFactory::theEnforceChargeConservation, true, false, false);
static SwitchOption interfaceEnforceChargeConservationYes
(interfaceEnforceChargeConservation,
"Yes",
"Enforce charge conservation.",
true);
static SwitchOption interfaceEnforceChargeConservationNo
(interfaceEnforceChargeConservation,
"No",
"Do not enforce charge conservation.",
false);
static Switch<MatchboxFactory,bool> interfaceEnforceColourConservation
("EnforceColourConservation",
"Enforce colour conservation while generating the hard process.",
&MatchboxFactory::theEnforceColourConservation, false, false, false);
static SwitchOption interfaceEnforceColourConservationYes
(interfaceEnforceColourConservation,
"Yes",
"Enforce colour conservation.",
true);
static SwitchOption interfaceEnforceColourConservationNo
(interfaceEnforceColourConservation,
"No",
"Do not enforce colour conservation.",
false);
static Switch<MatchboxFactory,bool> interfaceEnforceLeptonNumberConservation
("EnforceLeptonNumberConservation",
"Enforce lepton number conservation while generating the hard process.",
&MatchboxFactory::theEnforceLeptonNumberConservation, false, false, false);
static SwitchOption interfaceEnforceLeptonNumberConservationYes
(interfaceEnforceLeptonNumberConservation,
"Yes",
"Enforce lepton number conservation.",
true);
static SwitchOption interfaceEnforceLeptonNumberConservationNo
(interfaceEnforceLeptonNumberConservation,
"No",
"Do not enforce lepton number conservation.",
false);
static Switch<MatchboxFactory,bool> interfaceEnforceQuarkNumberConservation
("EnforceQuarkNumberConservation",
"Enforce quark number conservation while generating the hard process.",
&MatchboxFactory::theEnforceQuarkNumberConservation, false, false, false);
static SwitchOption interfaceEnforceQuarkNumberConservationYes
(interfaceEnforceQuarkNumberConservation,
"Yes",
"Enforce quark number conservation.",
true);
static SwitchOption interfaceEnforceQuarkNumberConservationNo
(interfaceEnforceQuarkNumberConservation,
"No",
"Do not enforce quark number conservation.",
false);
static Switch<MatchboxFactory,bool> interfaceLeptonFlavourDiagonal
("LeptonFlavourDiagonal",
"Assume that lepton interactions are flavour diagonal while generating the hard process.",
&MatchboxFactory::theLeptonFlavourDiagonal, false, false, false);
static SwitchOption interfaceLeptonFlavourDiagonalYes
(interfaceLeptonFlavourDiagonal,
"Yes",
"Assume that lepton interactions are flavour diagonal.",
true);
static SwitchOption interfaceLeptonFlavourDiagonalNo
(interfaceLeptonFlavourDiagonal,
"No",
"Do not assume that lepton interactions are flavour diagonal.",
false);
static Switch<MatchboxFactory,bool> interfaceQuarkFlavourDiagonal
("QuarkFlavourDiagonal",
"Assume that quark interactions are flavour diagonal while generating the hard process.",
&MatchboxFactory::theQuarkFlavourDiagonal, false, false, false);
static SwitchOption interfaceQuarkFlavourDiagonalYes
(interfaceQuarkFlavourDiagonal,
"Yes",
"Assume that quark interactions are flavour diagonal.",
true);
static SwitchOption interfaceQuarkFlavourDiagonalNo
(interfaceQuarkFlavourDiagonal,
"No",
"Do not assume that quark interactions are flavour diagonal.",
false);
}
// *** Attention *** The following static variable is needed for the type
// description system in ThePEG. Please check that the template arguments
// are correct (the class and its base class), and that the constructor
// arguments are correct (the class name and the name of the dynamically
// loadable library where the class implementation can be found).
DescribeClass<MatchboxFactory,SubProcessHandler>
describeHerwigMatchboxFactory("Herwig::MatchboxFactory", "Herwig.so");
diff --git a/MatrixElement/Matchbox/MatchboxFactory.h b/MatrixElement/Matchbox/MatchboxFactory.h
--- a/MatrixElement/Matchbox/MatchboxFactory.h
+++ b/MatrixElement/Matchbox/MatchboxFactory.h
@@ -1,1285 +1,1292 @@
// -*- C++ -*-
//
// MatchboxFactory.h is a part of Herwig - A multi-purpose Monte Carlo event generator
// Copyright (C) 2002-2012 The Herwig Collaboration
//
// Herwig is licenced under version 2 of the GPL, see COPYING for details.
// Please respect the MCnet academic guidelines, see GUIDELINES for details.
//
#ifndef HERWIG_MatchboxFactory_H
#define HERWIG_MatchboxFactory_H
//
// This is the declaration of the MatchboxFactory class.
//
#include "ThePEG/Handlers/SubProcessHandler.h"
#include "Herwig/MatrixElement/Matchbox/Base/MatchboxAmplitude.h"
#include "Herwig/MatrixElement/Matchbox/Utility/Tree2toNGenerator.h"
#include "Herwig/MatrixElement/Matchbox/Utility/ProcessData.h"
#include "Herwig/MatrixElement/Matchbox/Utility/MatchboxScaleChoice.h"
#include "Herwig/MatrixElement/Matchbox/Phasespace/MatchboxPhasespace.h"
#include "Herwig/MatrixElement/Matchbox/Base/MatchboxMEBase.h"
#include "Herwig/MatrixElement/Matchbox/Base/SubtractedME.h"
#include "Herwig/MatrixElement/Matchbox/MatchboxFactory.fh"
namespace Herwig {
using namespace ThePEG;
/**
* \ingroup Matchbox
* \author Simon Platzer
*
* \brief MatchboxFactory automatically sets up a NLO
* QCD calculation carried out in dipole subtraction.
*
* @see \ref MatchboxFactoryInterfaces "The interfaces"
* defined for MatchboxFactory.
*/
class MatchboxFactory: public SubProcessHandler {
public:
/** @name Standard constructors and destructors. */
//@{
/**
* The default constructor.
*/
MatchboxFactory();
/**
* The destructor.
*/
virtual ~MatchboxFactory();
//@}
public:
/**
* Flag to indicate that at least one MatchboxFactory object is in action
*/
static bool isMatchboxRun() {
return theIsMatchboxRun();
}
/** @name Process and diagram information */
//@{
/**
* Return the diagram generator.
*/
Ptr<Tree2toNGenerator>::tptr diagramGenerator() const { return theDiagramGenerator; }
/**
* Set the diagram generator.
*/
void diagramGenerator(Ptr<Tree2toNGenerator>::ptr dg) { theDiagramGenerator = dg; }
/**
* Return the process data.
*/
Ptr<ProcessData>::tptr processData() const { return theProcessData; }
/**
* Set the process data.
*/
void processData(Ptr<ProcessData>::ptr pd) { theProcessData = pd; }
/**
* Return the number of light flavours, this matrix
* element is calculated for.
*/
unsigned int nLight() const { return theNLight; }
/**
* Set the number of light flavours, this matrix
* element is calculated for.
*/
void nLight(unsigned int n) { theNLight = n; }
/**
* Return the vector that contains the PDG ids of
* the light flavours, which are contained in the
* jet particle group.
*/
vector<int> nLightJetVec() const { return theNLightJetVec; }
/**
* Set the elements of the vector that contains the PDG
* ids of the light flavours, which are contained in the
* jet particle group.
*/
void nLightJetVec(int n) { theNLightJetVec.push_back(n); }
/**
* Return the vector that contains the PDG ids of
* the heavy flavours, which are contained in the
* jet particle group.
*/
vector<int> nHeavyJetVec() const { return theNHeavyJetVec; }
/**
* Set the elements of the vector that contains the PDG
* ids of the heavy flavours, which are contained in the
* jet particle group.
*/
void nHeavyJetVec(int n) { theNHeavyJetVec.push_back(n); }
/**
* Return the vector that contains the PDG ids of
* the light flavours, which are contained in the
* proton particle group.
*/
vector<int> nLightProtonVec() const { return theNLightProtonVec; }
/**
* Set the elements of the vector that contains the PDG
* ids of the light flavours, which are contained in the
* proton particle group.
*/
void nLightProtonVec(int n) { theNLightProtonVec.push_back(n); }
/**
* Return the order in \f$\alpha_S\f$.
*/
unsigned int orderInAlphaS() const { return theOrderInAlphaS; }
/**
* Set the order in \f$\alpha_S\f$.
*/
void orderInAlphaS(unsigned int o) { theOrderInAlphaS = o; }
/**
* Return the order in \f$\alpha_{EM}\f$.
*/
unsigned int orderInAlphaEW() const { return theOrderInAlphaEW; }
/**
* Set the order in \f$\alpha_{EM}\f$.
*/
void orderInAlphaEW(unsigned int o) { theOrderInAlphaEW = o; }
/**
* Return true, if all processes up to a maximum order are considered
*/
bool allProcesses() const { return theAllProcesses; }
/**
* Switch on/off inclusino off all processes up to a maximum order
*/
void setAllProcesses(bool on = true) { theAllProcesses = on; }
/**
* Return true, if Born contributions should be included.
*/
bool bornContributions() const { return theBornContributions; }
/**
* Switch on or off Born contributions
*/
void setBornContributions(bool on = true) { theBornContributions = on; }
/**
* Return true, if virtual contributions should be included.
*/
bool virtualContributions() const { return theVirtualContributions; }
/**
* Switch on or off virtual contributions
*/
void setVirtualContributions(bool on = true) { theVirtualContributions = on; }
/**
* Produce matrix element corrections, but no NLO
*/
bool meCorrectionsOnly() const { return theMECorrectionsOnly; }
/**
* Switch to produce matrix element corrections, but no NLO
*/
void setMECorrectionsOnly(bool on = true) { theMECorrectionsOnly = on; }
/**
* Produce matrix element corrections, with LoopSim NLO
*/
bool loopSimCorrections() const { return theLoopSimCorrections; }
/**
* Switch to produce matrix element corrections, with LoopSim NLO
*/
void setLoopSimCorrections(bool on = true) { theLoopSimCorrections = on; }
/**
* Return true, if subtracted real emission contributions should be included.
*/
bool realContributions() const { return theRealContributions; }
/**
* Switch on or off subtracted real emission contributions
*/
void setRealContributions(bool on = true) { theRealContributions = on; }
/**
* Return true, if virtual contributions should be treated as independent subprocesses
*/
bool independentVirtuals() const { return theIndependentVirtuals; }
/**
* Switch on/off virtual contributions should be treated as independent subprocesses
*/
void setIndependentVirtuals(bool on = true) { theIndependentVirtuals = on; }
/**
* Return true, if PK operator contributions should be treated as independent subprocesses
*/
bool independentPKs() const { return theIndependentPKs; }
/**
* Switch on/off PK operator contributions should be treated as independent subprocesses
*/
void setIndependentPKs(bool on = true) { theIndependentPKs = on; }
/**
* Return true, if SubProcessGroups should be
* setup from this MEGroup. If not, a single SubProcess
* is constructed from the data provided by the
* head matrix element.
*/
bool subProcessGroups() const { return theSubProcessGroups; }
/**
* Switch on or off producing subprocess groups.
*/
void setSubProcessGroups(bool on = true) { theSubProcessGroups = on; }
/**
* Return true, if subtraction scales should be caluclated from real emission kinematics
*/
bool realEmissionScales() const { return theRealEmissionScales; }
/**
* Switch on/off that subtraction scales should be caluclated from real emission kinematics
*/
void setRealEmissionScales(bool on = true) { theRealEmissionScales = on; }
/**
* Set the shower approximation.
*/
void showerApproximation(Ptr<ShowerApproximation>::tptr app) { theShowerApproximation = app; }
/**
* Return the shower approximation.
*/
Ptr<ShowerApproximation>::tptr showerApproximation() const { return theShowerApproximation; }
//@}
/** @name Phasespace generation and scale choice */
//@{
/**
* Return the phase space generator to be used.
*/
Ptr<MatchboxPhasespace>::tptr phasespace() const { return thePhasespace; }
/**
* Set the phase space generator to be used.
*/
void phasespace(Ptr<MatchboxPhasespace>::ptr ps) { thePhasespace = ps; }
/**
* Set the scale choice object
*/
void scaleChoice(Ptr<MatchboxScaleChoice>::ptr sc) { theScaleChoice = sc; }
/**
* Return the scale choice object
*/
Ptr<MatchboxScaleChoice>::tptr scaleChoice() const { return theScaleChoice; }
/**
* Get the factorization scale factor
*/
double factorizationScaleFactor() const { return theFactorizationScaleFactor; }
/**
* Set the factorization scale factor
*/
void factorizationScaleFactor(double f) { theFactorizationScaleFactor = f; }
/**
* Get the renormalization scale factor
*/
double renormalizationScaleFactor() const { return theRenormalizationScaleFactor; }
/**
* Set the renormalization scale factor
*/
void renormalizationScaleFactor(double f) { theRenormalizationScaleFactor = f; }
/**
* Return true, if fixed couplings are used.
*/
bool fixedCouplings() const { return theFixedCouplings; }
/**
* Switch on fixed couplings.
*/
void setFixedCouplings(bool on = true) { theFixedCouplings = on; }
/**
* Return true, if fixed couplings are used.
*/
bool fixedQEDCouplings() const { return theFixedQEDCouplings; }
/**
* Switch on fixed couplings.
*/
void setFixedQEDCouplings(bool on = true) { theFixedQEDCouplings = on; }
/**
* Return true, if veto scales should be set
* for the real emission
*/
bool vetoScales() const { return theVetoScales; }
/**
* Switch on setting veto scales
*/
void doVetoScales() { theVetoScales = true; }
/**
* Switch off setting veto scales
*/
void noVetoScales() { theVetoScales = true; }
//@}
/** @name Amplitudes and caching */
//@{
/**
* Return the amplitudes to be considered
*/
const vector<Ptr<MatchboxAmplitude>::ptr>& amplitudes() const { return theAmplitudes; }
/**
* Access the amplitudes to be considered
*/
vector<Ptr<MatchboxAmplitude>::ptr>& amplitudes() { return theAmplitudes; }
//@}
/** @name Matrix element objects. */
//@{
/**
* Return the Born matrix elements to be considered
*/
const vector<Ptr<MatchboxMEBase>::ptr>& bornMEs() const { return theBornMEs; }
/**
* Access the Born matrix elements to be considered
*/
vector<Ptr<MatchboxMEBase>::ptr>& bornMEs() { return theBornMEs; }
/**
* Return the loop induced matrix elements to be considered
*/
const vector<Ptr<MatchboxMEBase>::ptr>& loopInducedMEs() const { return theLoopInducedMEs; }
/**
* Access the loop induced matrix elements to be considered
*/
vector<Ptr<MatchboxMEBase>::ptr>& loopInducedMEs() { return theLoopInducedMEs; }
/**
* Return the processes to be ordered from an OLP
*/
const map<Ptr<MatchboxAmplitude>::tptr,
map<pair<Process,int>,int> >&
olpProcesses() const { return theOLPProcesses; }
/**
* Access the processes to be ordered from an OLP
*/
map<Ptr<MatchboxAmplitude>::tptr,
map<pair<Process,int>,int> >&
olpProcesses() { return theOLPProcesses; }
/**
* Order an OLP process and return its id
*/
int orderOLPProcess(const Process& p,
Ptr<MatchboxAmplitude>::tptr amp,
int type);
/**
* Return the amplitudes which need external initialization
*/
const set<Ptr<MatchboxAmplitude>::tptr>& externalAmplitudes() const {
return theExternalAmplitudes;
}
/**
* Access the amplitudes which need external initialization
*/
set<Ptr<MatchboxAmplitude>::tptr>& externalAmplitudes() {
return theExternalAmplitudes;
}
/**
* Return the virtual corrections to be considered
*/
const vector<Ptr<MatchboxInsertionOperator>::ptr>& virtuals() const { return theVirtuals; }
/**
* Access the virtual corrections to be considered
*/
vector<Ptr<MatchboxInsertionOperator>::ptr>& virtuals() { return theVirtuals; }
/**
* Return the produced NLO matrix elements
*/
const vector<Ptr<MatchboxMEBase>::ptr>& bornVirtualMEs() const { return theBornVirtualMEs; }
/**
* Access the produced NLO matrix elements
*/
vector<Ptr<MatchboxMEBase>::ptr>& bornVirtualMEs() { return theBornVirtualMEs; }
/**
* Return the real emission matrix elements to be considered
*/
const vector<Ptr<MatchboxMEBase>::ptr>& realEmissionMEs() const { return theRealEmissionMEs; }
/**
* Access the real emission matrix elements to be considered
*/
vector<Ptr<MatchboxMEBase>::ptr>& realEmissionMEs() { return theRealEmissionMEs; }
/**
* Return, which set of dipoles should be considered
*/
int dipoleSet() const { return theDipoleSet; }
/**
* Return, which set of dipoles should be considered
*/
void dipoleSet(int s) { theDipoleSet = s; }
/**
* Return the produced subtracted matrix elements
*/
const vector<Ptr<SubtractedME>::ptr>& subtractedMEs() const { return theSubtractedMEs; }
/**
* Access the produced subtracted matrix elements
*/
vector<Ptr<SubtractedME>::ptr>& subtractedMEs() { return theSubtractedMEs; }
/**
* Return the produced finite real emission matrix elements
*/
const vector<Ptr<MatchboxMEBase>::ptr>& finiteRealMEs() const { return theFiniteRealMEs; }
/**
* Access the produced finite real emission elements
*/
vector<Ptr<MatchboxMEBase>::ptr>& finiteRealMEs() { return theFiniteRealMEs; }
/**
* Return the map of Born processes to splitting dipoles
*/
const map<cPDVector,set<Ptr<SubtractionDipole>::ptr> >& splittingDipoles() const {
return theSplittingDipoles;
}
/**
* Identify a splitting channel
*/
struct SplittingChannel {
/**
* The Born XComb
*/
StdXCombPtr bornXComb;
/**
* The real XComb
*/
StdXCombPtr realXComb;
/**
* The set of tilde XCombs to consider for the real xcomb
*/
vector<StdXCombPtr> tildeXCombs;
/**
* The dipole in charge of the splitting
*/
Ptr<SubtractionDipole>::ptr dipole;
/**
* Dump the setup
*/
void print(ostream&) const;
};
/**
* Generate all splitting channels for the Born process handled by
* the given XComb
*/
list<SplittingChannel> getSplittingChannels(tStdXCombPtr xc) const;
/**
* Return the reweight objects for matrix elements
*/
const vector<ReweightPtr>& reweighters() const { return theReweighters; }
/**
* Access the reweight objects for matrix elements
*/
vector<ReweightPtr>& reweighters() { return theReweighters; }
/**
* Return the preweight objects for matrix elements
*/
const vector<ReweightPtr>& preweighters() const { return thePreweighters; }
/**
* Access the preweight objects for matrix elements
*/
vector<ReweightPtr>& preweighters() { return thePreweighters; }
//@}
/** @name Setup the matrix elements */
//@{
/**
* Return true if this object needs to be initialized before all
* other objects (except those for which this function also returns
* true). This default version always returns false, but subclasses
* may override it to return true.
*/
virtual bool preInitialize() const { return true; }
/**
* Prepare a matrix element.
*/
void prepareME(Ptr<MatchboxMEBase>::ptr);
/**
* Check consistency and switch to porduction mode.
*/
void productionMode();
/**
* Setup everything
*/
virtual void setup();
/**
* The highest multiplicity of legs having virtual contributions.(needed for madgraph)
*/
size_t highestVirt(){return theHighestVirtualsize;}
//@}
/** @name Diagnostic information */
//@{
/**
* Return true, if verbose
*/
bool verbose() const { return theVerbose; }
/**
* Switch on diagnostic information.
*/
void setVerbose(bool on = true) { theVerbose = on; }
/**
* Return true, if diagram weight is verbose
*/
bool verboseDia() const { return theDiagramWeightVerbose; }
/**
* Number of bins for diagram weight verbosity
*/
int diagramWeightVerboseNBins() const {return theDiagramWeightVerboseNBins;}
/**
* Return true, if verbose while initializing
*/
bool initVerbose() const { return theInitVerbose || verbose(); }
/**
* Switch on diagnostic information while initializing
*/
void setInitVerbose(bool on = true) { theInitVerbose = on; }
/**
* Dump the setup
*/
void print(ostream&) const;
/**
* Return the subtraction data prefix.
*/
const string& subtractionData() const { return theSubtractionData; }
/**
* Set the subtraction data prefix.
*/
void subtractionData(const string& s) { theSubtractionData = s; }
/**
* Return the subtraction plot type.
*/
const int& subtractionPlotType() const { return theSubtractionPlotType; }
/**
* Set the subtraction plot type.
*/
void subtractionPlotType(const int& t) { theSubtractionPlotType = t; }
/**
* Return whether subtraction data should be plotted for all phase space points individually
*/
const bool& subtractionScatterPlot() const { return theSubtractionScatterPlot; }
/**
* Set whether subtraction data should be plotted for all phase space points individually
*/
void subtractionScatterPlot(const bool& s) { theSubtractionScatterPlot = s; }
/**
* Return the pole data prefix.
*/
const string& poleData() const { return thePoleData; }
/**
* Set the pole data prefix.
*/
void poleData(const string& s) { thePoleData = s; }
/**
* Return true, if cancellationn of epsilon poles should be checked.
*/
bool checkPoles() const { return poleData() != ""; }
//@}
/** @name Process generation */
//@{
/**
* Return the particle groups.
*/
const map<string,PDVector>& particleGroups() const { return theParticleGroups; }
/**
* Access the particle groups.
*/
map<string,PDVector>& particleGroups() { return theParticleGroups; }
/**
* Return true, if the given particle is incoming
*/
bool isIncoming(cPDPtr p) const {
return theIncoming.find(p->id()) != theIncoming.end();
}
/**
* Return true, if spin correlation information should be provided, if possible.
*/
bool spinCorrelations() const { return theSpinCorrelations; }
/**
* Indicate that spin correlation information should be provided, if possible.
*/
void setSpinCorrelations(bool yes) { theSpinCorrelations = yes; }
//@}
/** @name Truncated qtilde shower information */
//@{
/**
* Return the subprocess of the real emission
*/
tSubProPtr hardTreeSubprocess() { return theHardtreeSubprocess; }
/**
* Set the subprocess of the real emission for use in calculating the shower hardtree
*/
void setHardTreeSubprocess(tSubProPtr hardTree) { theHardtreeSubprocess = hardTree; }
/**
* Return the born emitter
*/
int hardTreeEmitter() { return theHardtreeEmitter; }
/**
* Set the born emitter for use in calculating the shower hardtree
*/
void setHardTreeEmitter(int emitter) { theHardtreeEmitter = emitter; }
/**
* Return the born spectator
*/
int hardTreeSpectator() { return theHardtreeSpectator; }
/**
* Set the born spectator for use in calculating the shower hardtree
*/
void setHardTreeSpectator(int spectator) { theHardtreeSpectator = spectator; }
//@}
/** @name Data handling */
//@{
/**
* Return (and possibly create) a directory to contain amplitude
* information.
*/
const string& buildStorage();
/**
* Return (and possibly create) a directory to contain integration grid
* information.
*/
const string& runStorage();
/**
* alpha of http://arxiv.org/pdf/hep-ph/0307268v2.pdf to restrict
* dipole phase space
*/
double alphaParameter() const { return theAlphaParameter; }
/**
* set the alpha parameter (needed for massive PK-Operator)
*/
void setAlphaParameter(double a) { theAlphaParameter = a; }
//@}
public:
+ /**
+ * Print a summary of the parameters used
+ */
+ void summary(ostream&) const;
+
+public:
+
/** @name Functions used by the persistent I/O system. */
//@{
/**
* Function used to write out object persistently.
* @param os the persistent output stream written to.
*/
void persistentOutput(PersistentOStream & os) const;
/**
* Function used to read in object persistently.
* @param is the persistent input stream read from.
* @param version the version number of the object when written.
*/
void persistentInput(PersistentIStream & is, int version);
//@}
/**
* The standard Init function used to initialize the interfaces.
* Called exactly once for each class by the class description system
* before the main function starts or
* when this class is dynamically loaded.
*/
static void Init();
protected:
/** @name Clone Methods. */
//@{
/**
* Make a simple clone of this object.
* @return a pointer to the new object.
*/
virtual IBPtr clone() const;
/** Make a clone of this object, possibly modifying the cloned object
* to make it sane.
* @return a pointer to the new object.
*/
virtual IBPtr fullclone() const;
//@}
protected:
/** @name Standard Interfaced functions. */
//@{
/**
* Initialize this object after the setup phase before saving an
* EventGenerator to disk.
* @throws InitException if object could not be initialized properly.
*/
virtual void doinit();
/**
* Initialize this object. Called in the run phase just before
* a run begins.
*/
virtual void doinitrun();
//@}
private:
/**
* Flag to indicate that at least one MatchboxFactory object is in action
*/
static bool& theIsMatchboxRun();
/**
* The diagram generator.
*/
Ptr<Tree2toNGenerator>::ptr theDiagramGenerator;
/**
* The process data object to be used
*/
Ptr<ProcessData>::ptr theProcessData;
/**
* The number of light flavours, this matrix
* element is calculated for.
*/
unsigned int theNLight;
/**
* Vector with the PDG ids of the light quark flavours,
* which are contained in the jet particle group.
*/
vector<int> theNLightJetVec;
/**
* Vector with the PDG ids of the heavy quark flavours,
* which are contained in the jet particle group.
*/
vector<int> theNHeavyJetVec;
/**
* Vector with the PDG ids of the light quark flavours,
* which are contained in the proton particle group.
*/
vector<int> theNLightProtonVec;
/**
* The order in \f$\alpha_S\f$.
*/
unsigned int theOrderInAlphaS;
/**
* The order in \f$\alpha_{EM}\f$.
*/
unsigned int theOrderInAlphaEW;
/**
* Switch on or off Born contributions
*/
bool theBornContributions;
/**
* Switch on or off virtual contributions
*/
bool theVirtualContributions;
/**
* Switch on or off subtracted real emission contributions should be included.
*/
bool theRealContributions;
/**
* True if virtual contributions should be treated as independent subprocesses
*/
bool theIndependentVirtuals;
/**
* True if PK operator contributions should be treated as independent subprocesses
*/
bool theIndependentPKs;
/**
* True, if SubProcessGroups should be
* setup from this MEGroup. If not, a single SubProcess
* is constructed from the data provided by the
* head matrix element.
*/
bool theSubProcessGroups;
/**
* The phase space generator to be used.
*/
Ptr<MatchboxPhasespace>::ptr thePhasespace;
/**
* The scale choice object
*/
Ptr<MatchboxScaleChoice>::ptr theScaleChoice;
/**
* The factorization scale factor.
*/
double theFactorizationScaleFactor;
/**
* The renormalization scale factor.
*/
double theRenormalizationScaleFactor;
/**
* Use non-running couplings.
*/
bool theFixedCouplings;
/**
* Use non-running couplings.
*/
bool theFixedQEDCouplings;
/**
* True, if veto scales should be set
* for the real emission
*/
bool theVetoScales;
/**
* The amplitudes to be considered
*/
vector<Ptr<MatchboxAmplitude>::ptr> theAmplitudes;
/**
* The Born matrix elements to be considered
*/
vector<Ptr<MatchboxMEBase>::ptr> theBornMEs;
/**
* The loop induced matrix elements to be considered
*/
vector<Ptr<MatchboxMEBase>::ptr> theLoopInducedMEs;
/**
* The virtual corrections to be considered
*/
vector<Ptr<MatchboxInsertionOperator>::ptr> theVirtuals;
/**
* The real emission matrix elements to be considered
*/
vector<Ptr<MatchboxMEBase>::ptr> theRealEmissionMEs;
/**
* The produced NLO matrix elements
*/
vector<Ptr<MatchboxMEBase>::ptr> theBornVirtualMEs;
/**
* The produced subtracted matrix elements
*/
vector<Ptr<SubtractedME>::ptr> theSubtractedMEs;
/**
* The produced finite real emission matrix elements
*/
vector<Ptr<MatchboxMEBase>::ptr> theFiniteRealMEs;
/**
* Which set of dipoles should be considered
*/
int theDipoleSet;
/**
* Switch on or off verbosity
*/
bool theVerbose;
/**
* Switch on or off diagram weight verbosity
*/
bool theDiagramWeightVerbose;
/**
* Number of bins for diagram weight verbosity
*/
int theDiagramWeightVerboseNBins;
/**
* True, if verbose while initializing
*/
bool theInitVerbose;
/**
* Prefix for subtraction data
*/
string theSubtractionData;
/**
* Set the type of plot that is to be generated for subtraction checking
*/
int theSubtractionPlotType;
/**
* Set whether subtraction data should be plotted for all phase space points individually
*/
bool theSubtractionScatterPlot;
/**
* Prefix for pole data.
*/
string thePoleData;
/**
* Command to limit the real emission process to be considered.
*/
string doSingleRealProcess(string);
/**
* The real emission process to be included; if empty, all possible
* ones will be considered.
*/
vector<vector<string> > realEmissionProcesses;
/**
* Particle groups.
*/
map<string,PDVector> theParticleGroups;
/**
* Command to start a particle group.
*/
string startParticleGroup(string);
/**
* The name of the particle group currently edited.
*/
string particleGroupName;
/**
* The particle group currently edited.
*/
PDVector particleGroup;
/**
* Command to end a particle group.
*/
string endParticleGroup(string);
/**
* Parse a process description
*/
vector<string> parseProcess(string);
/**
* Command to set the process.
*/
string doProcess(string);
/**
* Command to set the process.
*/
string doLoopInducedProcess(string);
/**
* The process to consider in terms of particle groups.
*/
vector<vector<string> > processes;
/**
* The loop induced process to consider in terms of particle groups.
*/
vector<vector<string> > loopInducedProcesses;
/**
* Generate subprocesses.
*/
set<PDVector> makeSubProcesses(const vector<string>&) const;
/**
* Generate matrix element objects for the given process.
*/
vector<Ptr<MatchboxMEBase>::ptr> makeMEs(const vector<string>&,
unsigned int orderas,
bool virt);
/**
* The shower approximation.
*/
Ptr<ShowerApproximation>::ptr theShowerApproximation;
/**
* The map of Born processes to splitting dipoles
*/
map<cPDVector,set<Ptr<SubtractionDipole>::ptr> > theSplittingDipoles;
/**
* True, if subtraction scales should be caluclated from real emission kinematics
*/
bool theRealEmissionScales;
/**
* Consider all processes with order in couplings specifying the
* maximum order.
*/
bool theAllProcesses;
/**
* The processes to be ordered from an OLP
*/
map<Ptr<MatchboxAmplitude>::tptr,map<pair<Process,int>,int> > theOLPProcesses;
/**
* Amplitudes which need external initialization
*/
set<Ptr<MatchboxAmplitude>::tptr> theExternalAmplitudes;
/**
* Amplitudes to be selected on clashing responsibilities.
*/
vector<Ptr<MatchboxAmplitude>::ptr> theSelectedAmplitudes;
/**
* Amplitudes to be deselected on clashing responsibilities.
*/
vector<Ptr<MatchboxAmplitude>::ptr> theDeselectedAmplitudes;
/**
* Reweight objects for matrix elements
*/
vector<ReweightPtr> theReweighters;
/**
* Preweight objects for matrix elements
*/
vector<ReweightPtr> thePreweighters;
/**
* Produce matrix element corrections, but no NLO
*/
bool theMECorrectionsOnly;
/**
* The highest multiplicity of legs having virtual contributions.(needed for madgraph)
*/
int theHighestVirtualsize;
/**
* Produce matrix element corrections, with LoopSim NLO
*/
bool theLoopSimCorrections;
/**
* True, if the setup has already been run.
*/
bool ranSetup;
/**
* PDG ids of incoming particles
*/
set<long> theIncoming;
/**
* True, if first incoming partons originate from perturbative PDF
*/
bool theFirstPerturbativePDF;
/**
* True, if second incoming partons originate from perturbative PDF
*/
bool theSecondPerturbativePDF;
/**
* True, if this Factory is in production mode.
*/
bool inProductionMode;
/**
* The real emission subprocess used when calculating the hardtree
* in the truncated qtilde shower
*/
tSubProPtr theHardtreeSubprocess;
/**
* The born emitter used when calculating the hardtree in
* the truncated shower
*/
int theHardtreeEmitter;
/**
* The born spectator used when calculating the hardtree in
* the truncated shower
*/
int theHardtreeSpectator;
/**
* True, if spin correlation information should be provided, if possible.
*/
bool theSpinCorrelations;
/**
* The alpha parameter to be used for the dipole subtraction
*/
double theAlphaParameter;
/**
* Wether or not charge conservation should be enforced for the processes
* constructed.
*/
bool theEnforceChargeConservation;
/**
* Wether or not colour conservation should be enforced for the processes
* constructed.
*/
bool theEnforceColourConservation;
/**
* Wether or not lepton number conservation should be enforced for the processes
* constructed.
*/
bool theEnforceLeptonNumberConservation;
/**
* Wether or not quark number conservation should be enforced for the processes
* constructed.
*/
bool theEnforceQuarkNumberConservation;
/**
* Assume flavour diagonal lepton interactions
*/
bool theLeptonFlavourDiagonal;
/**
* Assume flavour diagonal quark interactions
*/
bool theQuarkFlavourDiagonal;
/**
* Command for production mode
*/
string doProductionMode(string) {
productionMode(); return "";
}
private:
/**
* The assignment operator is private and must never be called.
* In fact, it should not even be implemented.
*/
MatchboxFactory & operator=(const MatchboxFactory &);
};
}
#endif /* HERWIG_MatchboxFactory_H */
diff --git a/MatrixElement/Matchbox/Scales/MatchboxHtScale.cc b/MatrixElement/Matchbox/Scales/MatchboxHtScale.cc
--- a/MatrixElement/Matchbox/Scales/MatchboxHtScale.cc
+++ b/MatrixElement/Matchbox/Scales/MatchboxHtScale.cc
@@ -1,164 +1,164 @@
// -*- C++ -*-
//
// MatchboxHtScale.cc is a part of Herwig - A multi-purpose Monte Carlo event generator
// Copyright (C) 2002-2012 The Herwig Collaboration
//
// Herwig is licenced under version 2 of the GPL, see COPYING for details.
// Please respect the MCnet academic guidelines, see GUIDELINES for details.
//
//
// This is the implementation of the non-inlined, non-templated member
// functions of the MatchboxHtScale class.
//
#include "MatchboxHtScale.h"
#include "ThePEG/Interface/ClassDocumentation.h"
#include "ThePEG/Interface/Switch.h"
#include "ThePEG/Interface/Parameter.h"
#include "ThePEG/Interface/Reference.h"
#include "ThePEG/EventRecord/Particle.h"
#include "ThePEG/Repository/UseRandom.h"
#include "ThePEG/Repository/EventGenerator.h"
#include "ThePEG/Utilities/DescribeClass.h"
#include "ThePEG/Persistency/PersistentOStream.h"
#include "ThePEG/Persistency/PersistentIStream.h"
using namespace Herwig;
MatchboxHtScale::MatchboxHtScale()
: theIncludeMT(false), theHTFactor(1.0),
theMTFactor(1.0),theScalePtCut(15.*GeV) {}
MatchboxHtScale::~MatchboxHtScale() {}
IBPtr MatchboxHtScale::clone() const {
return new_ptr(*this);
}
IBPtr MatchboxHtScale::fullclone() const {
return new_ptr(*this);
}
Energy2 MatchboxHtScale::renormalizationScale() const {
tcPDVector pd (mePartonData().begin() + 2, mePartonData().end());
vector<LorentzMomentum> p (meMomenta().begin() + 2, meMomenta().end());
tcPDPtr t1 = mePartonData()[0];
tcPDPtr t2 = mePartonData()[1];
tcCutsPtr cuts = lastCutsPtr();
theJetFinder->cluster(pd, p, cuts, t1, t2);
initWeightFactors(pd,p,theJetFinder);
// momentum of the non-jet system
LorentzMomentum nonJetMomentum(ZERO,ZERO,ZERO,ZERO);
// (weighted) pt of the jet systems
Energy ptJetSum = ZERO;
bool gotone = false;
tcPDVector::const_iterator pdata = pd.begin();
vector<LorentzMomentum>::const_iterator mom = p.begin();
for ( ; mom != p.end(); ++pdata, ++mom ) {
if ( theJetFinder->unresolvedMatcher()->check(**pdata)&&
mom->perp()>theScalePtCut){
//abs(mom->rapidity()+(!lastXCombPtr()->head()?lastXCombPtr()->lastY():lastXCombPtr()->head()->lastY()))<5.01
gotone = true;
ptJetSum += jetPtWeight(*mom)*mom->perp();
} else if ( theIncludeMT ) {
nonJetMomentum += *mom;
}
}
if ( !gotone && lastXCombPtr()->willPassCuts() )
throw Exception() << "MatchboxHtScale::renormalizationScale(): "
<< "No jets could be found. Check your setup."
<< "\nHint: The HT scale is defined with a PtMin cut on jets. (default:) "
<< "\n set /Herwig/MatrixElements/Matchbox/ScalesHTScale:JetPtCut 15.*GeV "
<< Exception::runerror;
Energy mtNonJetSum =
sqrt(nonJetMomentum.perp2() + nonJetMomentum.m2());
mtNonJetSum *= theMTFactor;
ptJetSum *= theHTFactor;
return sqr(ptJetSum + mtNonJetSum);
}
Energy2 MatchboxHtScale::factorizationScale() const {
return renormalizationScale();
}
// If needed, insert default implementations of virtual function defined
// in the InterfacedBase class here (using ThePEG-interfaced-impl in Emacs).
void MatchboxHtScale::persistentOutput(PersistentOStream & os) const {
os << theJetFinder << theIncludeMT << theHTFactor << theMTFactor << ounit(theScalePtCut,GeV);
}
void MatchboxHtScale::persistentInput(PersistentIStream & is, int) {
is >> theJetFinder >> theIncludeMT >> theHTFactor >> theMTFactor >> iunit(theScalePtCut,GeV);
}
// *** Attention *** The following static variable is needed for the type
// description system in ThePEG. Please check that the template arguments
// are correct (the class and its base class), and that the constructor
// arguments are correct (the class name and the name of the dynamically
// loadable library where the class implementation can be found).
DescribeClass<MatchboxHtScale,MatchboxScaleChoice>
describeHerwigMatchboxHtScale("Herwig::MatchboxHtScale", "HwMatchboxScales.so");
void MatchboxHtScale::Init() {
static ClassDocumentation<MatchboxHtScale> documentation
("MatchboxHtScale implements scale choices related to transverse momenta.");
static Reference<MatchboxHtScale,JetFinder> interfaceJetFinder
("JetFinder",
"A reference to the jet finder.",
&MatchboxHtScale::theJetFinder, false, false, true, false, false);
static Switch<MatchboxHtScale,bool> interfaceIncludeMT
("IncludeMT",
"Include the transverse masses of the non-jet objects.",
&MatchboxHtScale::theIncludeMT, false, false, false);
static SwitchOption interfaceIncludeMTYes
(interfaceIncludeMT,
"Yes",
"",
true);
static SwitchOption interfaceIncludeMTNo
(interfaceIncludeMT,
"No",
"",
false);
static Parameter<MatchboxHtScale,double> interfaceHTFactor
("HTFactor",
"A factor to scale the HT contribution.",
&MatchboxHtScale::theHTFactor, 1.0, 0.0, 0,
false, false, Interface::lowerlim);
static Parameter<MatchboxHtScale,double> interfaceMTFactor
("MTFactor",
"A factor to scale the MT contribution.",
&MatchboxHtScale::theMTFactor, 1.0, 0.0, 0,
false, false, Interface::lowerlim);
static Parameter<MatchboxHtScale,Energy> interfaceScalePtCut
("JetPtCut",
"The Pt cut to define jets in the sum.",
- &MatchboxHtScale::theScalePtCut, 15.*GeV, 0.*GeV, 0.*GeV,
+ &MatchboxHtScale::theScalePtCut, GeV, 15.*GeV, 0.*GeV, 0.*GeV,
false, false, Interface::lowerlim);
}
diff --git a/MatrixElement/Matchbox/Utility/AmplitudeCache.h b/MatrixElement/Matchbox/Utility/AmplitudeCache.h
--- a/MatrixElement/Matchbox/Utility/AmplitudeCache.h
+++ b/MatrixElement/Matchbox/Utility/AmplitudeCache.h
@@ -1,346 +1,346 @@
// -*- C++ -*-
//
// AmplitudeCache.h is a part of Herwig - A multi-purpose Monte Carlo event generator
// Copyright (C) 2002-2012 The Herwig Collaboration
//
// Herwig is licenced under version 2 of the GPL, see COPYING for details.
// Please respect the MCnet academic guidelines, see GUIDELINES for details.
//
#ifndef HERWIG_AmplitudeCache_H
#define HERWIG_AmplitudeCache_H
#include "Herwig/MatrixElement/Matchbox/Utility/SpinorHelicity.h"
#include "ThePEG/Config/algorithm.h"
+#include <array>
namespace Herwig {
using namespace ThePEG;
+using std::array;
namespace SpinorHelicity {
/**
* \ingroup Matchbox
* \author Simon Platzer
*
* \brief Caching for amplitudes using spinor helicity techniques.
*
*/
-template<class AmplitudeKey>
+template<typename AmplitudeKey>
class AmplitudeCache {
typedef map<AmplitudeKey,pair<bool,Complex> > AmplitudeCacheMap;
typedef map<AmplitudeKey,pair<bool,LorentzVector<Complex> > > CurrentCacheMap;
/**
+ * Maximum N we can handle, SYM_N is storage size for a symmetric matrix of N * N elements
+ */
+ enum { MAX_N = 7, SYM_N = MAX_N*(MAX_N+1)/2 };
+
+ /**
* The number of points
*/
int theNPoints;
/**
* The energy scale to obtain dimensionless
* quantities.
*/
mutable Energy theScale;
/**
* Masses indexed by partons
*/
- mutable vector<double> theMasses;
+ mutable array<double,MAX_N> theMasses;
/**
* Momenta indexed by partons
*/
- mutable vector<LorentzMomentum> theMomenta;
+ mutable array<LorentzMomentum,MAX_N> theMomenta;
/**
* Crossing signs indexed by partons
*/
- mutable vector<int> theCrossingSigns;
+ mutable array<int,MAX_N> theCrossingSigns;
/**
* Plus spinors indexed by partons
*/
- mutable vector<PlusSpinor> thePlusSpinors;
+ mutable array<PlusSpinor,MAX_N> thePlusSpinors;
/**
* Plus conjugate spinors indexed by partons
*/
- mutable vector<PlusConjugateSpinor> thePlusConjugateSpinors;
+ mutable array<PlusConjugateSpinor,MAX_N> thePlusConjugateSpinors;
/**
* Invariants indexed by partons
*/
- mutable vector<vector<double> > theInvariants;
+ mutable array<double,SYM_N> theInvariants;
/**
* Flag products to be recalculated
*/
- mutable vector<vector<bool> > getInvariant;
+ mutable array<bool,SYM_N> getInvariant;
/**
* Spinor products indexed by partons
*/
- mutable vector<vector<Complex> > thePlusProducts;
+ mutable array<Complex,SYM_N> thePlusProducts;
/**
* Flag products to be recalculated
*/
- mutable vector<vector<bool> > getPlusProduct;
+ mutable array<bool,SYM_N> getPlusProduct;
/**
* Spinor currents indexed by partons
*/
- mutable vector<vector<LorentzVector<Complex> > > thePlusCurrents;
+ mutable array<LorentzVector<Complex>,SYM_N> thePlusCurrents;
/**
* Flag currents to be recalculated
*/
- mutable vector<vector<bool> > getPlusCurrent;
+ mutable array<bool,SYM_N> getPlusCurrent;
/**
* Cache intermediate amplitudes by index
*/
mutable AmplitudeCacheMap theCachedAmplitudes;
/**
* The last query for a cached amplitude
*/
mutable typename AmplitudeCacheMap::iterator theLastAmplitude;
/**
* Cache intermediate currents by index
*/
mutable CurrentCacheMap theCachedCurrents;
/**
* The last query for a cached current
*/
mutable typename CurrentCacheMap::iterator theLastCurrent;
/**
+ * Helper function to index symmetric arrays, assumes i <= j.
+ * Usual indexing function (N*i + j) corrected by triangle number for i-th row.
+ */
+ inline size_t idx(size_t i, size_t j) const {
+ return MAX_N * i - (i + 1) * i / 2 + j;
+ }
+
+ /**
* Helper to reset flags
*/
struct boolResetter {
- void operator()(vector<bool>::reference flag) const {
- flag = true;
- }
void operator()(pair<const AmplitudeKey,pair<bool,Complex> >& flag) const {
flag.second.first = true;
}
void operator()(pair<const AmplitudeKey,pair<bool,LorentzVector<Complex> > >& flag) const {
flag.second.first = true;
}
};
- /**
- * Helper to reset flags
- */
- struct boolVectorResetter {
- void operator()(vector<bool>& flags) const {
- for_each(flags.begin(),flags.end(),boolResetter());
- }
- };
-
public:
/**
* Constructor
*/
- AmplitudeCache()
- : theNPoints(0) {}
+ AmplitudeCache() : theNPoints(0) {}
/**
* Prepare for n-point amplitude
*/
void nPoints(int n);
/**
* Return the number of points
*/
int nPoints() const {
return theNPoints;
}
/**
* Set the energy scale to obtain dimensionless
* quantities and flag all quantities to be recalculated.
*/
void amplitudeScale(Energy s) const;
/**
* Set the momentum for the k'th parton
* and its associated mass.
*/
void momentum(int k, const LorentzMomentum& p,
bool getSpinors = true,
Energy mass = ZERO) const;
/**
* Reset flags
*/
void reset() const;
public:
/**
* Return the momentum for the k'th parton
*/
LorentzVector<double> momentum(int k) const { return theMomenta[k]/theScale; }
/**
* Get the energy scale to obtain dimensionless
* quantities and flag all quantities to be recalculated.
*/
Energy amplitudeScale() const { return theScale; }
/**
* Return the mass associated to the k'th parton
*/
double mass(int k) const { return theMasses[k]; }
/**
* Return the crossing sign for the
* i'th parton
*/
int crossingSign(int i) const { return theCrossingSigns[i]; }
/**
* Return the crossing sign for the
* i'th and j'th parton
*/
double crossingSign(int i, int j) const { return theCrossingSigns[i]*theCrossingSigns[j]; }
/**
* Return (ij)
*/
double invariant(int i, int j) const {
- if ( i== j )
- return 0.;
- if ( i > j )
- swap(i,j);
- if ( getInvariant[i][j] ) {
- getInvariant[i][j] = false;
- theInvariants[i][j] = 2.*(momentum(i)*momentum(j));
+ if ( i == j ) return 0.;
+ if ( i > j ) swap(i,j);
+ if ( getInvariant[idx(i,j)] ) {
+ getInvariant[idx(i,j)] = false;
+ theInvariants[idx(i,j)] = 2.*(momentum(i)*momentum(j));
}
- return theInvariants[i][j];
+ return theInvariants[idx(i,j)];
}
/**
* Return <ij>
*/
Complex plusProduct(int i, int j) const {
if ( i== j )
return 0.;
bool swapij = (i > j);
if ( swapij )
swap(i,j);
- if ( getPlusProduct[i][j] ) {
- getPlusProduct[i][j] = false;
- thePlusProducts[i][j] =
+ if ( getPlusProduct[idx(i,j)] ) {
+ getPlusProduct[idx(i,j)] = false;
+ thePlusProducts[idx(i,j)] =
PlusSpinorProduct(thePlusConjugateSpinors[i],
thePlusSpinors[j]).eval() / theScale;
}
- return swapij ? -thePlusProducts[i][j] : thePlusProducts[i][j];
+ return swapij ? -thePlusProducts[idx(i,j)] : thePlusProducts[idx(i,j)];
}
/**
* Return [ij]
*/
Complex minusProduct(int i, int j) const {
if ( i== j )
return 0.;
return -crossingSign(i,j)*conj(plusProduct(i,j));
}
/**
* Return <i|\gamma^\mu|j]
*/
LorentzVector<Complex> plusCurrent(int i, int j) const {
bool swapij = (i > j);
if ( swapij )
swap(i,j);
- if ( getPlusCurrent[i][j] ) {
- getPlusCurrent[i][j] = false;
+ if ( getPlusCurrent[idx(i,j)] ) {
+ getPlusCurrent[idx(i,j)] = false;
if ( i != j ) {
- thePlusCurrents[i][j] =
+ thePlusCurrents[idx(i,j)] =
PlusSpinorCurrent(thePlusConjugateSpinors[i],
MinusSpinor(theMomenta[j])).eval() / theScale;
} else {
- thePlusCurrents[i][j] = 2.*momentum(i);
+ thePlusCurrents[idx(i,j)] = 2.*momentum(i);
}
}
- return swapij ? crossingSign(i,j)*thePlusCurrents[i][j].conjugate() : thePlusCurrents[i][j];
+ return swapij ? crossingSign(i,j)*thePlusCurrents[idx(i,j)].conjugate() : thePlusCurrents[idx(i,j)];
}
/**
* Return [i|\gamma^\mu|j>
*/
LorentzVector<Complex> minusCurrent(int i, int j) const {
return plusCurrent(j,i);
}
public:
/**
* Return true, if the given amplitude
* needs to be recalculated.
*/
bool getAmplitude(const AmplitudeKey& key) const {
static Complex czero;
if ( ( theLastAmplitude = theCachedAmplitudes.find(key) )
== theCachedAmplitudes.end() ) {
theLastAmplitude = theCachedAmplitudes.insert(make_pair(key,make_pair(true,czero))).first;
}
return theLastAmplitude->second.first;
}
/**
* Cache an amplitude
*/
void cacheAmplitude(Complex amp) const {
theLastAmplitude->second = make_pair(false,amp);
}
/**
* Return a cached amplitude
*/
const Complex& cachedAmplitude() const {
return theLastAmplitude->second.second;
}
/**
* Return true, if the given current
* needs to be recalculated.
*/
bool getCurrent(const AmplitudeKey& key) const {
static LorentzVector<Complex> czero;
if ( ( theLastCurrent = theCachedCurrents.find(key) )
== theCachedCurrents.end() ) {
theLastCurrent = theCachedCurrents.insert(make_pair(key,make_pair(true,czero))).first;
}
return theLastCurrent->second.first;
}
/**
* Cache an current
*/
void cacheCurrent(const LorentzVector<Complex>& curr) const {
theLastCurrent->second = make_pair(false,curr);
}
/**
* Return a cached current
*/
const LorentzVector<Complex>& cachedCurrent() const {
return theLastCurrent->second.second;
}
};
}
}
#include "AmplitudeCache.tcc"
#endif // HERWIG_AmplitudeCache_H
diff --git a/MatrixElement/Matchbox/Utility/AmplitudeCache.tcc b/MatrixElement/Matchbox/Utility/AmplitudeCache.tcc
--- a/MatrixElement/Matchbox/Utility/AmplitudeCache.tcc
+++ b/MatrixElement/Matchbox/Utility/AmplitudeCache.tcc
@@ -1,73 +1,60 @@
// -*- C++ -*-
//
// AmplitudeCache.cc is a part of Herwig - A multi-purpose Monte Carlo event generator
// Copyright (C) 2002-2012 The Herwig Collaboration
//
// Herwig is licenced under version 2 of the GPL, see COPYING for details.
// Please respect the MCnet academic guidelines, see GUIDELINES for details.
//
namespace Herwig {
namespace SpinorHelicity {
-template<class AmplitudeKey>
+template<typename AmplitudeKey>
void AmplitudeCache<AmplitudeKey>::nPoints(int n) {
+ assert( n <= MAX_N );
+
theNPoints = n;
- theMasses.clear();
- theMomenta.clear();
- theCrossingSigns.clear();
- thePlusSpinors.clear();
- thePlusConjugateSpinors.clear();
- theInvariants.clear();
- thePlusProducts.clear();
- thePlusCurrents.clear();
- getInvariant.clear();
- getPlusProduct.clear();
- getPlusCurrent.clear();
-
- theMasses.resize(n);
- theMomenta.resize(n);
- theCrossingSigns.resize(n);
- thePlusSpinors.resize(n);
- thePlusConjugateSpinors.resize(n);
- theInvariants.resize(n,vector<double>(n));
- thePlusProducts.resize(n,vector<Complex>(n));
- thePlusCurrents.resize(n,vector<LorentzVector<Complex> >(n));
- getInvariant.resize(n,vector<bool>(n));
- getPlusProduct.resize(n,vector<bool>(n));
- getPlusCurrent.resize(n,vector<bool>(n));
+ theMasses.fill({});
+ theMomenta.fill({});
+ theCrossingSigns.fill({});
+ thePlusSpinors.fill(PlusSpinor());
+ thePlusConjugateSpinors.fill(PlusConjugateSpinor());
+ theInvariants.fill({});
+ thePlusProducts.fill({});
+ thePlusCurrents.fill({});
reset();
}
-template<class AmplitudeKey>
+template<typename AmplitudeKey>
void AmplitudeCache<AmplitudeKey>::amplitudeScale(Energy s) const {
theScale = s;
reset();
}
-template<class AmplitudeKey>
+template<typename AmplitudeKey>
void AmplitudeCache<AmplitudeKey>::momentum(int k, const LorentzMomentum& p,
bool getSpinors,
Energy mass) const {
theMasses[k] = mass/theScale;
theMomenta[k] = p;
if ( getSpinors ) {
theCrossingSigns[k] = p.t() > ZERO ? 1 : -1;
thePlusSpinors[k] = PlusSpinor(p);
thePlusConjugateSpinors[k] = PlusConjugateSpinor(p);
}
}
-template<class AmplitudeKey>
+template<typename AmplitudeKey>
void AmplitudeCache<AmplitudeKey>::reset() const {
- for_each(getInvariant.begin(),getInvariant.end(),boolVectorResetter());
- for_each(getPlusProduct.begin(),getPlusProduct.end(),boolVectorResetter());
- for_each(getPlusCurrent.begin(),getPlusCurrent.end(),boolVectorResetter());
+ getInvariant.fill(true);
+ getPlusProduct.fill(true);
+ getPlusCurrent.fill(true);
for_each(theCachedAmplitudes.begin(),theCachedAmplitudes.end(),boolResetter());
for_each(theCachedCurrents.begin(),theCachedCurrents.end(),boolResetter());
}
}}
diff --git a/Sampling/BinSampler.cc b/Sampling/BinSampler.cc
--- a/Sampling/BinSampler.cc
+++ b/Sampling/BinSampler.cc
@@ -1,717 +1,728 @@
// -*- C++ -*-
//
// BinSampler.cc is a part of Herwig - A multi-purpose Monte Carlo event generator
// Copyright (C) 2002-2012 The Herwig Collaboration
//
// Herwig is licenced under version 2 of the GPL, see COPYING for details.
// Please respect the MCnet academic guidelines, see GUIDELINES for details.
//
//
// This is the implementation of the non-inlined, non-templated member
// functions of the BinSampler class.
//
#include "BinSampler.h"
#include "ThePEG/Interface/ClassDocumentation.h"
#include "ThePEG/EventRecord/Particle.h"
#include "ThePEG/Repository/UseRandom.h"
#include "ThePEG/Repository/EventGenerator.h"
#include "ThePEG/Utilities/DescribeClass.h"
#include "ThePEG/Repository/Repository.h"
#include "ThePEG/Interface/Parameter.h"
#include "ThePEG/Interface/Switch.h"
#include "ThePEG/Persistency/PersistentOStream.h"
#include "ThePEG/Persistency/PersistentIStream.h"
#include "ThePEG/Handlers/StandardEventHandler.h"
#include "ThePEG/Handlers/StandardXComb.h"
#include <boost/progress.hpp>
#include "GeneralSampler.h"
using namespace Herwig;
BinSampler::BinSampler()
: MultiIterationStatistics(),
theBias(1.),
theWeighted(false),
theInitialPoints(1000000),
theNIterations(1),
theEnhancementFactor(1.0),
theNonZeroInPresampling(false),
theHalfPoints(false),
theMaxNewMax(30),
theReferenceWeight(1.0),
theBin(-1),
theInitialized(false),
theRemapperPoints(0),
theRemapChannelDimension(false),
theLuminosityMapperBins(0),
theGeneralMapperBins(0),
theRemapperMinSelection(0.00001),
theIntegrated(false),
theRemappersFilled(false),
- theHasGrids(false) {}
+ theHasGrids(false),
+ theKappa(1.){}
BinSampler::~BinSampler() {}
IBPtr BinSampler::clone() const {
return new_ptr(*this);
}
IBPtr BinSampler::fullclone() const {
return new_ptr(*this);
}
void BinSampler::sampler(Ptr<GeneralSampler>::tptr s) {
theSampler = s;
}
Ptr<GeneralSampler>::tptr BinSampler::sampler() const {
return theSampler;
}
string BinSampler::process() const {
ostringstream os("");
const StandardEventHandler& eh = *theEventHandler;
const StandardXComb& xc = *eh.xCombs()[theBin];
os << xc.matrixElement()->name() << " : ";
os << xc.mePartonData()[0]->PDGName() << " "
<< xc.mePartonData()[1]->PDGName() << " -> ";
for ( cPDVector::const_iterator pid =
xc.mePartonData().begin() + 2;
pid != xc.mePartonData().end(); ++pid )
os << (**pid).PDGName() << " ";
return os.str();
}
string BinSampler::shortprocess() const {
ostringstream os("");
const StandardEventHandler& eh = *theEventHandler;
const StandardXComb& xc = *eh.xCombs()[theBin];
os << xc.mePartonData()[0]->id() << " "
<< xc.mePartonData()[1]->id() << " : ";
for ( cPDVector::const_iterator pid =
xc.mePartonData().begin() + 2;
pid != xc.mePartonData().end(); ++pid )
os << (**pid).id() << " ";
return os.str();
}
string BinSampler::id() const {
ostringstream os("");
const StandardEventHandler& eh = *theEventHandler;
const StandardXComb& xc = *eh.xCombs()[theBin];
string name = xc.matrixElement()->name();
string::size_type i = name.find_first_of("[");
string nameFirst = name.substr(0,i);
i = name.find_first_of("]");
string nameSecond = name.substr(i+1);
os << nameFirst << nameSecond << ":";
for ( cPDVector::const_iterator pid =
xc.mePartonData().begin();
pid != xc.mePartonData().end(); ++pid )
os << (**pid).id() << (pid != (--xc.mePartonData().end()) ? "," : "");
return os.str();
}
double BinSampler::evaluate(vector<double> p,
bool remap) {
double w = 1.0;
if ( remap && !remappers.empty() ) {
for ( size_t k = 0; k < p.size(); ++k ) {
map<size_t,Remapper>::const_iterator r =
remappers.find(k);
if ( r != remappers.end() ) {
pair<double,double> f = r->second.generate(p[k]);
p[k] = f.first;
w /= f.second;
}
}
}
try {
w *= eventHandler()->dSigDR(p) / nanobarn;
} catch (Veto&) {
w = 0.0;
} catch (...) {
throw;
}
if (randomNumberString()!="")
for ( size_t k = 0; k < p.size(); ++k ) {
RandomNumberHistograms[RandomNumberIndex(id(),k)].first.book(p[k],w);
RandomNumberHistograms[RandomNumberIndex(id(),k)].second+=w;
}
return w;
}
double BinSampler::generate() {
double w = 1.;
for ( size_t k = 0; k < lastPoint().size(); ++k ) {
lastPoint()[k] = UseRandom::rnd();
}
try {
w = evaluate(lastPoint());
} catch (Veto&) {
w = 0.0;
} catch (...) {
throw;
}
if ( !weighted() && initialized() ) {
- double p = min(abs(w),referenceWeight())/referenceWeight();
+ double p = min(abs(w),kappa()*referenceWeight())/(kappa()*referenceWeight());
double sign = w >= 0. ? 1. : -1.;
if ( p < 1 && UseRandom::rnd() > p )
w = 0.;
else
- w = sign*max(abs(w),referenceWeight());
+ w = sign*max(abs(w),referenceWeight()*kappa());
}
select(w);
if ( w != 0.0 )
accept();
+ assert(kappa()==1.||sampler()->almostUnweighted());
return w;
}
void BinSampler::fillRemappers(bool progress) {
if ( remappers.empty() )
return;
unsigned long nanPoints = 0;
boost::progress_display* progressBar = 0;
if ( progress ) {
Repository::clog() << "warming up " << process();
progressBar = new boost::progress_display(theRemapperPoints,Repository::clog());
}
unsigned long countzero =0;
for ( unsigned long k = 0; k < theRemapperPoints; ++k,++countzero ) {
if (countzero>=theRemapperPoints)break;
double w = 1.;
for ( size_t j = 0; j < lastPoint().size(); ++j ) {
lastPoint()[j] = UseRandom::rnd();
}
try {
w = evaluate(lastPoint(),false);
} catch (Veto&) {
w = 0.0;
} catch (...) {
throw;
}
if ( isnan(w) || isinf(w) )
++nanPoints;
if ( theNonZeroInPresampling && w==0. ){
k--;
continue;
}
if ( w != 0.0 ) {
countzero=0;
for ( map<size_t,Remapper>::iterator r = remappers.begin();
r != remappers.end(); ++r )
r->second.fill(lastPoint()[r->first],w);
}
if ( progressBar )
++(*progressBar);
}
if ( progressBar ) {
delete progressBar;
}
if ( nanPoints ) {
Repository::clog() << "Warning: " << nanPoints
<< " out of " << theRemapperPoints << " points with nan or inf "
<< "weight encountered while filling remappers.\n" << flush;
}
}
void BinSampler::saveIntegrationData() const {
XML::Element stats = MultiIterationStatistics::toXML();
stats.appendAttribute("process",id());
sampler()->grids().append(stats);
}
void BinSampler::readIntegrationData() {
if ( theIntegrated )
return;
bool haveStats = false;
list<XML::Element>::iterator sit = sampler()->grids().children().begin();
for ( ; sit != sampler()->grids().children().end(); ++sit ) {
if ( sit->type() != XML::ElementTypes::Element )
continue;
if ( sit->name() != "MultiIterationStatistics" )
continue;
string proc;
sit->getFromAttribute("process",proc);
if ( proc == id() ) {
haveStats = true;
break;
}
}
if ( haveStats ) {
MultiIterationStatistics::fromXML(*sit);
sampler()->grids().erase(sit);
theIntegrated = true;
} else {
throw Exception()
<< "\n--------------------------------------------------------------------------------\n\n"
<< "Expected integration data.\n\n"
<< "* When using the build setup make sure the integrate command has been run.\n\n"
<< "* Check the [EventGenerator].log file for further information.\n\n"
<< "* Make sure that the Herwig folder can be found and that it contains a HerwigGrids.xml file.\n\n"
<< "* If you have split the integration jobs, make sure that each integration job was finished.\n"
<< " Afterwards delete the global HerwigGrids.xml file in the Herwig subfolder\n"
<< " to automatically create an updated version of the global HerwigGrids.xml file.\n\n"
<< "--------------------------------------------------------------------------------\n"
<< Exception::abortnow;
}
}
void BinSampler::saveRemappers() const {
if ( remappers.empty() )
return;
XML::Element maps(XML::ElementTypes::Element,"Remappers");
maps.appendAttribute("process",id());
for ( map<size_t,Remapper>::const_iterator r = remappers.begin();
r != remappers.end(); ++r ) {
XML::Element rmap = r->second.toXML();
rmap.appendAttribute("dimension",r->first);
maps.append(rmap);
}
sampler()->grids().append(maps);
}
void BinSampler::setupRemappers(bool progress) {
if ( !theRemapperPoints )
return;
if ( theRemappersFilled )
return;
lastPoint().resize(dimension());
bool haveGrid = false;
list<XML::Element>::iterator git = sampler()->grids().children().begin();
for ( ; git != sampler()->grids().children().end(); ++git ) {
if ( git->type() != XML::ElementTypes::Element )
continue;
if ( git->name() != "Remappers" )
continue;
string proc;
git->getFromAttribute("process",proc);
if ( proc == id() ) {
haveGrid = true;
break;
}
}
if ( haveGrid ) {
for ( list<XML::Element>::iterator cit = git->children().begin();
cit != git->children().end(); ++cit ) {
if ( cit->type() != XML::ElementTypes::Element )
continue;
if ( cit->name() != "Remapper" )
continue;
size_t dimension = 0;
cit->getFromAttribute("dimension",dimension);
remappers[dimension].fromXML(*cit);
}
sampler()->grids().erase(git);
}
if ( !haveGrid ) {
const StandardEventHandler& eh = *eventHandler();
const StandardXComb& xc = *eh.xCombs()[bin()];
const pair<int,int>& pdims = xc.partonDimensions();
set<int> remapped;
if ( theRemapChannelDimension && xc.diagrams().size() > 1 &&
dimension() > pdims.first + pdims.second ) {
remappers[pdims.first] = Remapper(xc.diagrams().size(),theRemapperMinSelection,false);
remapped.insert(pdims.first);
}
if ( theLuminosityMapperBins > 1 && dimension() >= pdims.first + pdims.second ) {
for ( int n = 0; n < pdims.first; ++n ) {
remappers[n] = Remapper(theLuminosityMapperBins,theRemapperMinSelection,true);
remapped.insert(n);
}
for ( int n = dimension() - pdims.second; n < dimension(); ++n ) {
remappers[n] = Remapper(theLuminosityMapperBins,theRemapperMinSelection,true);
remapped.insert(n);
}
}
if ( theGeneralMapperBins > 1 ) {
for ( int n = 0; n < dimension(); n++ ) {
if ( remapped.find(n) == remapped.end() ) {
remappers[n] = Remapper(theGeneralMapperBins,theRemapperMinSelection,true);
remapped.insert(n);
}
}
}
fillRemappers(progress);
for ( map<size_t,Remapper>::iterator r = remappers.begin();
r != remappers.end(); ++r ) {
r->second.finalize();
}
}
theRemappersFilled = true;
}
void BinSampler::runIteration(unsigned long points, bool progress) {
boost::progress_display* progressBar = 0;
if ( progress ) {
Repository::clog() << "integrating " << process() << " , iteration "
<< (iterations().size() + 1);
progressBar = new boost::progress_display(points,Repository::clog());
}
double w=0.;
double maxweight=0;
int numlastmax=0;
unsigned long countzero =0;
int newmax=0;
for ( unsigned long k = 0; k < points; ++k,++countzero ) {
if (countzero>=points)break;
w=abs(generate());
if(theNonZeroInPresampling && w==0.0){
k--;
continue;
}
if (w!=0.0)
countzero =0;
numlastmax++;
if (theHalfPoints&&maxweight<w&&
numlastmax<(int)(points/2.)){
if(++newmax>theMaxNewMax){
throw Exception()
<< "\n--------------------------------------------------------------------------------\n\n"
<< "To many new Maxima.\n\n"
<< "* With the option:\n\n"
<< "* set Sampler:BinSampler:HalfPoints Yes\n\n"
<< "* for every new maximum weight found until the half of the persampling points\n"
<< "* the counter is set to zero. We count the number of new maxima.\n"
<< "* You have reached: "<<newmax<<"\n"
<< "* Did you apply reasonable cuts to the process?\n"
<< "* You can set the maximum allowed new maxima by:"
<< "* set Sampler:BinSampler:MaxNewMax N\n\n"
<< "--------------------------------------------------------------------------------\n"
<< Exception::abortnow;
}
maxweight=w;
k=0;
numlastmax=0;
}
if ( progress ) {
++(*progressBar);
}
}
if ( progress ) {
Repository::clog() << "integrated ( "
<< averageWeight() << " +/- " << sqrt(averageWeightVariance())
<< " ) nb\nepsilon = "
<< (abs(maxWeight()) != 0. ? averageAbsWeight()/abs(maxWeight()) : 0.);
if ( !iterations().empty() )
Repository::clog() << " chi2 = " << chi2();
Repository::clog() << "\n";
Repository::clog() << "--------------------------------------------------------------------------------\n";
}
if ( progressBar )
delete progressBar;
}
void BinSampler::initialize(bool progress) {
lastPoint().resize(dimension());
if (randomNumberString()!="")
for(size_t i=0;i<lastPoint().size();i++){
RandomNumberHistograms[RandomNumberIndex(id(),i)] = make_pair( RandomNumberHistogram(),0.);
}
if ( initialized() )
return;
if ( !sampler()->grids().children().empty() ) {
nIterations(1);
}
if ( !integrated() ) {
unsigned long points = initialPoints();
for ( unsigned long k = 0; k < nIterations(); ++k ) {
runIteration(points,progress);
if ( k < nIterations() - 1 ) {
points = (unsigned long)(points*enhancementFactor());
adapt();
nextIteration();
}
}
}
isInitialized();
}
void BinSampler::finalize(bool){
if (theRandomNumbers!="")
for ( map<RandomNumberIndex,pair<RandomNumberHistogram,double> >::
const_iterator b = RandomNumberHistograms.begin();
b != RandomNumberHistograms.end(); ++b ) {
b->second.first.dump(randomNumberString(), b->first.first,shortprocess(),b->first.second);
}
}
BinSampler::RandomNumberHistogram::
RandomNumberHistogram(double low,
double up,
unsigned int nbins)
: lower(low) {
nbins = nbins + 1;
double c = up / (nbins-1.);
for ( unsigned int k = 1; k < nbins; ++k ) {
bins[low+c*k] = 0.;
binsw1[low+c*k] = 0.;
}
}
void BinSampler::RandomNumberHistogram::
dump(const std::string& folder,const std::string& prefix, const std::string& process,
const int NR) const {
ostringstream fname("");
std::string prefix2;
std::string prefix3=prefix;
std::remove_copy(prefix.begin(), prefix.end(), std::back_inserter(prefix2), '.');
prefix3=prefix2;prefix2.clear();
std::remove_copy(prefix3.begin(), prefix3.end(), std::back_inserter(prefix2), ':');
prefix3=prefix2;prefix2.clear();
std::remove_copy(prefix3.begin(), prefix3.end(), std::back_inserter(prefix2), ',');
fname << "RN-"<< NR ;
ofstream out((folder+"/"+prefix2+fname.str()+".dat").c_str());
double sumofweights=0.;
for ( map<double,double >::const_iterator b = bins.begin();b != bins.end(); ++b )
sumofweights+=b->second;
double sumofweights2=0.;
for ( map<double,double >::const_iterator b = binsw1.begin();b != binsw1.end(); ++b )
sumofweights2+=b->second;
map<double,double >::const_iterator b2 = binsw1.begin();
if ( sumofweights == 0 ) {
cerr << "Not enough statistic accumulated for "
<< process << " skipping random number diagnostic.\n"
<< flush;
return;
}
for ( map<double,double >::const_iterator b = bins.begin();
b != bins.end(); ++b, ++b2) {
out << " " << b->first
<< " " << b->second/sumofweights*100.
<< " " << b2->second/sumofweights2*100.
<< "\n" << flush;
}
double xmin = -0.01;
double xmax = 1.01;
ofstream gpout((folder+"/"+prefix2+fname.str()+".gp").c_str());
gpout << "set terminal epslatex color solid\n"
<< "set output '" << prefix2+fname.str() << "-plot.tex'\n"
<< "set xrange [" << xmin << ":" << xmax << "]\n";
gpout << "set xlabel 'rn "<<NR <<"' \n";
gpout << "set size 0.5,0.6\n";
gpout << "plot '" << prefix2+fname.str()
<< ".dat' u ($1):($2) w boxes lc rgbcolor \"blue\" t '{\\tiny "<<process <<" }',";
gpout << " '" << prefix2+fname.str();
gpout << ".dat' u ($1):($3) w boxes lc rgbcolor \"red\" t '';";
gpout << "reset\n";
}
// If needed, insert default implementations of virtual function defined
// in the InterfacedBase class here (using ThePEG-interfaced-impl in Emacs).
void BinSampler::persistentOutput(PersistentOStream & os) const {
MultiIterationStatistics::put(os);
os << theBias << theWeighted << theInitialPoints << theNIterations
<< theEnhancementFactor << theNonZeroInPresampling << theHalfPoints
<< theMaxNewMax << theReferenceWeight
<< theBin << theInitialized << theLastPoint
<< theEventHandler << theSampler << theRandomNumbers
<< theRemapperPoints << theRemapChannelDimension
- << theLuminosityMapperBins << theGeneralMapperBins;
+ << theLuminosityMapperBins << theGeneralMapperBins << theKappa;
}
void BinSampler::persistentInput(PersistentIStream & is, int) {
MultiIterationStatistics::get(is);
is >> theBias >> theWeighted >> theInitialPoints >> theNIterations
>> theEnhancementFactor >> theNonZeroInPresampling >> theHalfPoints
>> theMaxNewMax >> theReferenceWeight
>> theBin >> theInitialized >> theLastPoint
>> theEventHandler >> theSampler >> theRandomNumbers
>> theRemapperPoints >> theRemapChannelDimension
- >> theLuminosityMapperBins >> theGeneralMapperBins;
+ >> theLuminosityMapperBins >> theGeneralMapperBins >> theKappa;
}
// *** Attention *** The following static variable is needed for the type
// description system in ThePEG. Please check that the template arguments
// are correct (the class and its base class), and that the constructor
// arguments are correct (the class name and the name of the dynamically
// loadable library where the class implementation can be found).
DescribeClass<BinSampler,MultiIterationStatistics>
describeHerwigBinSampler("Herwig::BinSampler", "HwSampling.so");
void BinSampler::Init() {
static ClassDocumentation<BinSampler> documentation
("BinSampler samples XCombs bins. This default implementation performs flat MC integration.");
static Parameter<BinSampler,unsigned long> interfaceInitialPoints
("InitialPoints",
"The number of points to use for initial integration.",
&BinSampler::theInitialPoints, 1000000, 1, 0,
false, false, Interface::lowerlim);
static Parameter<BinSampler,size_t> interfaceNIterations
("NIterations",
"The number of iterations to perform initially.",
&BinSampler::theNIterations, 1, 1, 0,
false, false, Interface::lowerlim);
static Parameter<BinSampler,double> interfaceEnhancementFactor
("EnhancementFactor",
"The enhancement factor for the number of points in the next iteration.",
&BinSampler::theEnhancementFactor, 2.0, 1.0, 0,
false, false, Interface::lowerlim);
static Switch<BinSampler,bool> interfaceNonZeroInPresampling
("NonZeroInPresampling",
"Switch on to count only non zero weights in presampling.",
&BinSampler::theNonZeroInPresampling, true, false, false);
static SwitchOption interfaceNonZeroInPresamplingYes
(interfaceNonZeroInPresampling,
"Yes",
"",
true);
static SwitchOption interfaceNonZeroInPresamplingNo
(interfaceNonZeroInPresampling,
"No",
"",
false);
static Switch<BinSampler,bool> interfaceHalfPoints
("HalfPoints",
"Switch on to reset the counter of points if new maximumis was found in the first 1/2 points.",
&BinSampler::theHalfPoints, true, false, false);
static SwitchOption interfaceHalfPointsYes
(interfaceHalfPoints,
"Yes",
"",
true);
static SwitchOption interfaceHalfPointsNo
(interfaceHalfPoints,
"No",
"",
false);
static Parameter<BinSampler,int> interfaceMaxNewMax
("MaxNewMax",
"The maximum number of allowed new maxima in combination with the HalfPoints option.",
&BinSampler::theMaxNewMax, 30, 1, 0,
false, false, Interface::lowerlim);
static Parameter<BinSampler,string> interfaceRandomNumbers
("RandomNumbers",
"Prefix for distributions of the random numbers.",
&BinSampler::theRandomNumbers, "",
false, false);
static Parameter<BinSampler,unsigned long> interfaceRemapperPoints
("RemapperPoints",
"The number of points to be used for filling remappers.",
&BinSampler::theRemapperPoints, 10000, 0, 0,
false, false, Interface::lowerlim);
static Switch<BinSampler,bool> interfaceRemapChannelDimension
("RemapChannelDimension",
"Switch on remapping of the channel dimension.",
&BinSampler::theRemapChannelDimension, true, false, false);
static SwitchOption interfaceRemapChannelDimensionYes
(interfaceRemapChannelDimension,
"Yes",
"",
true);
static SwitchOption interfaceRemapChannelDimensionNo
(interfaceRemapChannelDimension,
"No",
"",
false);
static Parameter<BinSampler,unsigned long> interfaceLuminosityMapperBins
("LuminosityMapperBins",
"The number of bins to be used for remapping parton luminosities.",
&BinSampler::theLuminosityMapperBins, 0, 0, 0,
false, false, Interface::lowerlim);
static Parameter<BinSampler,unsigned long> interfaceGeneralMapperBins
("GeneralMapperBins",
"The number of bins to be used for remapping other phase space dimensions.",
&BinSampler::theGeneralMapperBins, 0, 0, 0,
false, false, Interface::lowerlim);
static Parameter<BinSampler,double> interfaceRemapperMinSelection
("RemapperMinSelection",
"The minimum bin selection probability for remappers.",
&BinSampler::theRemapperMinSelection, 0.00001, 0.0, 1.0,
false, false, Interface::limited);
+
+ static Parameter<BinSampler,double> interfaceKappa
+ ("Kappa",
+ "In AllmostUnweighted mode unweight to Kappa ReferenceWeight.",
+ &BinSampler::theKappa, 1., 0.000001, 1.0,
+ false, false, Interface::limited);
+
+
+
}
diff --git a/Sampling/BinSampler.h b/Sampling/BinSampler.h
--- a/Sampling/BinSampler.h
+++ b/Sampling/BinSampler.h
@@ -1,567 +1,587 @@
// -*- C++ -*-
//
// BinSampler.h is a part of Herwig - A multi-purpose Monte Carlo event generator
// Copyright (C) 2002-2012 The Herwig Collaboration
//
// Herwig is licenced under version 2 of the GPL, see COPYING for details.
// Please respect the MCnet academic guidelines, see GUIDELINES for details.
//
#ifndef Herwig_BinSampler_H
#define Herwig_BinSampler_H
//
// This is the declaration of the BinSampler class.
//
#include "ThePEG/Handlers/StandardEventHandler.h"
#include "ThePEG/Utilities/Exception.h"
#include "ThePEG/Repository/UseRandom.h"
#include "MultiIterationStatistics.h"
#include "Remapper.h"
namespace Herwig {
using namespace ThePEG;
class GeneralSampler;
/**
* \ingroup Matchbox
* \author Simon Platzer
*
* \brief BinSampler samples XCombs bins. This default implementation
* performs flat MC integration.
*
* @see \ref BinSamplerInterfaces "The interfaces"
* defined for BinSampler.
*/
class BinSampler: public Herwig::MultiIterationStatistics {
public:
/** @name Standard constructors and destructors. */
//@{
/**
* The default constructor.
*/
BinSampler();
/**
* The destructor.
*/
virtual ~BinSampler();
//@}
public:
/**
* Clone this object.
*/
Ptr<BinSampler>::ptr cloneMe() const {
return dynamic_ptr_cast<Ptr<BinSampler>::ptr>(clone());
}
public:
/**
* Evaluate the cross section
*/
double evaluate(vector<double> p,
bool remap = true);
/**
* Return the bias with which this sampler is selected. The sampler
* needs to divide out this bias in its weight calculation.
*/
double bias() const { return theBias; }
/**
* Set the bias with which this sampler is selected.
*/
void bias(double b) { theBias = b; }
/**
* Set the event handler
*/
void eventHandler(tStdEHPtr eh) { theEventHandler = eh; }
/**
* Return the event handler
*/
tStdEHPtr eventHandler() const { return theEventHandler; }
/**
* Set the containing sampler
*/
void sampler(Ptr<GeneralSampler>::tptr);
/**
* Get the containing sampler
*/
Ptr<GeneralSampler>::tptr sampler() const;
/**
* Return the bin
*/
int bin() const { return theBin; }
/**
* Set the bin
*/
void bin(int b) { theBin = b; }
/**
* Return a string describing the process handled by this sampler.
*/
string process() const;
/**
* Return a short string describing the process handled by this sampler.
*/
string shortprocess() const;
/**
* Return a string identifying the process handled by this sampler.
*/
string id() const;
/**
* Return the last generated point.
*/
const vector<double>& lastPoint() const { return theLastPoint; }
/**
* Access the last generated point.
*/
vector<double>& lastPoint() { return theLastPoint; }
/**
* Return the reference weight to be used
*/
double referenceWeight() const { return theReferenceWeight; }
/**
* Set the reference weight to be used
*/
void referenceWeight(double w) { theReferenceWeight = w; }
/**
* Return true, if this sampler can provide unweighted events; if
* the proposal density is not an overestimate, weights larger than
* one can be generated, the handling of these points being subject
* to the GeneralSampler class.
*/
virtual bool canUnweight() const { return true; }
/**
* Return true, if this sampler adapts on the fly while generating
* events. Cross sections in the GeneralSampler class are calculated
* from adding up the cross sections quoted by individual samplers.
*/
virtual bool adaptsOnTheFly() const { return false; }
/**
* If this sampler features a compensation algorithm, return true if
* more events need to be generated to finish the compensation.
*/
virtual bool compensating() const { return false; }
/**
* Return true, if weighted events should be generated
*/
bool weighted() const { return theWeighted; }
/**
* Indicate that weighted events should be generated
*/
void doWeighted(bool yes = true) { theWeighted = yes; }
/**
* Exception to be thrown if cross section information should be updated.
*/
struct NextIteration {};
/**
* Generate the next point and return its weight; store the point in
* lastPoint().
*/
virtual double generate();
/**
* Fill and finalize the remappers present
*/
void fillRemappers(bool progress);
/**
* Write remappers to grid file
*/
void saveRemappers() const;
/**
* Write integration data to grid files
*/
void saveIntegrationData() const;
/**
* Save grid data
*/
virtual void saveGrid() const {}
/**
* Read integration data from grid files
*/
void readIntegrationData();
/**
* Read remappers from grid file
*/
void setupRemappers(bool progress);
/**
* Run a single iteration of n points, optionally printing a
* progress bar to cout. Calls generate n times.
*/
void runIteration(unsigned long n, bool progress);
/**
* Adapt this sampler after an iteration has been run
*/
virtual void adapt() {}
/**
* Initialize this bin sampler. This default version calls runIteration.
*/
virtual void initialize(bool progress);
/**
* Return true, if this sampler has already been initialized.
*/
bool initialized() const { return theInitialized; }
/**
* Indicate that this sampler has already been initialized.
*/
void isInitialized() { theInitialized = true; }
/**
* Return true, if integration has already been performed
*/
bool integrated() const { return theIntegrated; }
/**
* Return true, if remappers have been set up
*/
bool remappersFilled() const { return theRemappersFilled; }
/**
* Return true, if this sampler has already read grid data.
*/
bool hasGrids() const { return theHasGrids; }
/**
* Indicate that this sampler has already read grid data.
*/
void didReadGrids() { theHasGrids = true; }
/**
* Finalize this sampler.
*/
virtual void finalize(bool);
/**
* Return the total integrated cross section determined from the
* Monte Carlo sampling so far.
*/
virtual CrossSection integratedXSec() const {
return averageWeight()*nanobarn;
}
/**
* Return the error on the total integrated cross section determined
* from the Monte Carlo sampling so far.
*/
virtual CrossSection integratedXSecErr() const {
return sqrt(abs(averageWeightVariance()))*nanobarn;
}
/**
* Define the key for the collinear subtraction data.
*/
struct RandomNumberHistogram {
/**
* The lower bound
*/
double lower;
/**
* The bins, indexed by upper bound.
*/
map<double,double > bins;
map<double,double > binsw1;
/**
* Constructor
*/
RandomNumberHistogram(double low = 0.0,
double up = 1.,
unsigned int nbins = 20);
/**
* Book an event.
*/
void book(double inv, double weight) {
map<double,double>::iterator b = bins.upper_bound(inv);
if ( b == bins.end() ) return;
b->second = b->second+weight;
map<double,double>::iterator b2 = binsw1.upper_bound(inv);
if ( b2 == binsw1.end() ) return;
b2->second = b2->second+1.;
}
/**
* Write to file given name and invariant.
*/
void dump(const std::string& folder,const std::string& prefix, const std::string& process,const int NR)const;
};
typedef pair<string,size_t > RandomNumberIndex;
map<RandomNumberIndex,pair<RandomNumberHistogram,double> > RandomNumberHistograms;
public:
/**
* Return the dimension.
*/
int dimension() const { return theEventHandler->nDim(bin()); }
/**
* Return the number of points to be used for initial integration.
*/
unsigned long initialPoints() const { return theInitialPoints; }
/**
* Set the number of points to be used for initial integration.
*/
void initialPoints(unsigned long n) { theInitialPoints = n; }
/**
* Return the number of iterations to be considered for initialization.
*/
size_t nIterations() const { return theNIterations; }
/**
* Set the number of iterations to be considered for initialization.
*/
void nIterations(size_t n) { theNIterations = n; }
/**
* Set the factor to enhance the number of points for the next
* iteration.
*/
void enhancementFactor(double f) { theEnhancementFactor = f; }
/**
* Return the factor to enhance the number of points for the next
* iteration.
*/
double enhancementFactor() const { return theEnhancementFactor; }
/**
* Return the folder for the random number plots.
*/
string randomNumberString() const {return theRandomNumbers;}
+ /**
+ * In the AlmostUnweighted mode we do not need to unweight
+ * the events to the reference weight.
+ * Kappa reduces effectivly the reference weight.
+ * This can be used for processes, where unweighting
+ * is hardly feasable.
+ */
+ double kappa() const {return theKappa;}
+
public:
/** @name Functions used by the persistent I/O system. */
//@{
/**
* Function used to write out object persistently.
* @param os the persistent output stream written to.
*/
void persistentOutput(PersistentOStream & os) const;
/**
* Function used to read in object persistently.
* @param is the persistent input stream read from.
* @param version the version number of the object when written.
*/
void persistentInput(PersistentIStream & is, int version);
//@}
/**
* The standard Init function used to initialize the interfaces.
* Called exactly once for each class by the class description system
* before the main function starts or
* when this class is dynamically loaded.
*/
static void Init();
protected:
/** @name Clone Methods. */
//@{
/**
* Make a simple clone of this object.
* @return a pointer to the new object.
*/
virtual IBPtr clone() const;
/** Make a clone of this object, possibly modifying the cloned object
* to make it sane.
* @return a pointer to the new object.
*/
virtual IBPtr fullclone() const;
//@}
// If needed, insert declarations of virtual function defined in the
// InterfacedBase class here (using ThePEG-interfaced-decl in Emacs).
private:
/**
* The bias with which this sampler is selected.
*/
double theBias;
/**
* True, if weighted events should be generated
*/
bool theWeighted;
/**
* The number of points to use for initial integration.
*/
unsigned long theInitialPoints;
/**
* The number of iterations to be considered for initialization.
*/
size_t theNIterations;
/**
* Factor to enhance the number of points for the next iteration.
*/
double theEnhancementFactor;
/**
* Switch to count only non zero weights in presampling.
*/
bool theNonZeroInPresampling;
/**
* Switch to require that we get half of the points
* in each iteration below the maximum weight of the iteration.
*/
bool theHalfPoints;
/**
* The maximum number of allowed new maxima,
* in combination with HalfPoints, in order to prevent unstable
* processes.
*/
int theMaxNewMax;
/**
* The reference weight to be used
*/
double theReferenceWeight;
/**
* The bin to be sampled.
*/
int theBin;
/**
* Wether or not this sampler has already been initialized.
*/
bool theInitialized;
/**
* The last generated point.
*/
vector<double> theLastPoint;
/**
* The event handler to be used.
*/
tStdEHPtr theEventHandler;
/**
* The containing sampler
*/
Ptr<GeneralSampler>::tptr theSampler;
/**
* Folder for the random number plots.
*/
string theRandomNumbers;
/**
* Remapper objects indexed by dimension
*/
map<size_t,Remapper> remappers;
/**
* The number of points to be used for initial filling of the remappers
*/
unsigned long theRemapperPoints;
/**
* True if channels should get a remapper
*/
bool theRemapChannelDimension;
/**
* The number of bins to be used for luminosity dimensions
*/
unsigned long theLuminosityMapperBins;
/**
* The number of bins to be used for any other dimension
*/
unsigned long theGeneralMapperBins;
/**
* The minimum selection probability for remapper bins
*/
double theRemapperMinSelection;
/**
* True, if integration has already be performed
*/
bool theIntegrated;
/**
* True, if remappers have been set up
*/
bool theRemappersFilled;
/**
* True, if this sampler has already read grid data.
*/
bool theHasGrids;
+
+
+ /**
+ * In the AlmostUnweighted mode we do not need to unweight
+ * the events to the reference weight.
+ * Kappa reduces effectivly the reference weight.
+ * This can be used for processes, where unweighting
+ * is hardly feasable.
+ */
+ double theKappa;
+
private:
/**
* The assignment operator is private and must never be called.
* In fact, it should not even be implemented.
*/
BinSampler & operator=(const BinSampler &);
};
}
#endif /* Herwig_BinSampler_H */
diff --git a/Sampling/CellGrids/CellGridSampler.cc b/Sampling/CellGrids/CellGridSampler.cc
--- a/Sampling/CellGrids/CellGridSampler.cc
+++ b/Sampling/CellGrids/CellGridSampler.cc
@@ -1,344 +1,345 @@
// -*- C++ -*-
//
// CellGridSampler.cpp is a part of Herwig - A multi-purpose Monte Carlo event generator
// Copyright (C) 2002-2012 The Herwig Collaboration
//
// Herwig is licenced under version 2 of the GPL, see COPYING for details.
// Please respect the MCnet academic guidelines, see GUIDELINES for details.
//
//
// This is the implementation of the non-inlined, non-templated member
// functions of the CellGridSampler class.
//
#include "CellGridSampler.h"
#include "ThePEG/Interface/ClassDocumentation.h"
#include "ThePEG/EventRecord/Particle.h"
#include "ThePEG/Repository/UseRandom.h"
#include "ThePEG/Repository/EventGenerator.h"
#include "ThePEG/Repository/Repository.h"
#include "ThePEG/Utilities/DescribeClass.h"
#include "ThePEG/Interface/Parameter.h"
#include "ThePEG/Interface/ParVector.h"
#include "ThePEG/Interface/Switch.h"
#include "ThePEG/Persistency/PersistentOStream.h"
#include "ThePEG/Persistency/PersistentIStream.h"
#include "ThePEG/Handlers/StandardEventHandler.h"
#include "ThePEG/Handlers/StandardXComb.h"
#include <boost/progress.hpp>
#include "CellGridSampler.h"
#include "Herwig/Sampling/GeneralSampler.h"
using namespace Herwig;
using namespace ExSample;
CellGridSampler::CellGridSampler()
: BinSampler(), SimpleCellGrid(),
theExplorationPoints(1000), theExplorationSteps(8),
theGain(0.3), theEpsilon(0.01),
theMinimumSelection(0.0001), theLuminositySplits(0),
theChannelSplits(0), theAllChannelSplits(false),
theUnweightCells(true) {}
CellGridSampler::~CellGridSampler() {}
IBPtr CellGridSampler::clone() const {
return new_ptr(*this);
}
IBPtr CellGridSampler::fullclone() const {
return new_ptr(*this);
}
double CellGridSampler::generate() {
UseRandom rnd;
double w = SimpleCellGrid::sample(rnd,*this,lastPoint(),
!weighted() && initialized() && theUnweightCells,
!initialized());
if ( !weighted() && initialized() ) {
- double p = min(abs(w),referenceWeight())/referenceWeight();
+ double p = min(abs(w),kappa()*referenceWeight())/(kappa()*referenceWeight());
double sign = w >= 0. ? 1. : -1.;
if ( p < 1 && UseRandom::rnd() > p )
w = 0.;
else
- w = sign*max(abs(w),referenceWeight());
+ w = sign*max(abs(w),referenceWeight()*kappa());
}
select(w);
if ( w != 0.0 )
accept();
+ assert(kappa()==1.||sampler()->almostUnweighted());
return w;
}
void CellGridSampler::adapt() {
UseRandom rnd;
set<SimpleCellGrid*> newCells;
SimpleCellGrid::adapt(theGain,theEpsilon,newCells);
SimpleCellGrid::explore(theExplorationPoints,rnd,*this,newCells,Repository::clog());
SimpleCellGrid::setWeights();
SimpleCellGrid::updateIntegral();
SimpleCellGrid::minimumSelection(theMinimumSelection);
}
void CellGridSampler::saveGrid() const {
XML::Element grid = SimpleCellGrid::toXML();
grid.appendAttribute("process",id());
sampler()->grids().append(grid);
}
void CellGridSampler::initialize(bool progress) {
bool haveGrid = false;
list<XML::Element>::iterator git = sampler()->grids().children().begin();
for ( ; git != sampler()->grids().children().end(); ++git ) {
if ( git->type() != XML::ElementTypes::Element )
continue;
if ( git->name() != "CellGrid" )
continue;
string proc;
git->getFromAttribute("process",proc);
if ( proc == id() ) {
haveGrid = true;
break;
}
}
if ( haveGrid ) {
SimpleCellGrid::fromXML(*git);
sampler()->grids().erase(git);
didReadGrids();
}
lastPoint().resize(dimension());
if (randomNumberString()!="")
for(size_t i=0;i<lastPoint().size();i++){
RandomNumberHistograms[RandomNumberIndex(id(),i)] = make_pair( RandomNumberHistogram(),0.);
}
if ( initialized() ) {
if ( !hasGrids() )
throw Exception() << "CellGridSampler: Require existing grid when starting to run.\n"
<< "Did you miss setting --setupfile?"
<< Exception::abortnow;
return;
}
if ( haveGrid ) {
if ( !integrated() )
runIteration(initialPoints(),progress);
isInitialized();
return;
}
SimpleCellGrid::boundaries(vector<double>(dimension(),0.0),vector<double>(dimension(),1.0));
SimpleCellGrid::weightInformation().resize(dimension());
UseRandom rnd;
boost::progress_display* progressBar = 0;
if ( progress ) {
Repository::clog() << "exploring " << process();
progressBar = new boost::progress_display(theExplorationSteps,Repository::clog());
}
std::set<SimpleCellGrid*> newCells;
if ( pre_adaption_splits().empty() &&
(theLuminositySplits || theChannelSplits || theAllChannelSplits) ) {
const StandardEventHandler& eh = *eventHandler();
const StandardXComb& xc = *eh.xCombs()[bin()];
the_pre_adaption_splits.resize(dimension(),0);
const pair<int,int>& pdims = xc.partonDimensions();
if ( theLuminositySplits && dimension() >= pdims.first + pdims.second ) {
for ( int n = 0; n < pdims.first; ++n )
the_pre_adaption_splits[n] = theLuminositySplits;
for ( int n = dimension() - pdims.second; n < dimension(); ++n )
the_pre_adaption_splits[n] = theLuminositySplits;
}
if ( theChannelSplits && xc.diagrams().size() &&
dimension() > pdims.first + pdims.second ) {
the_pre_adaption_splits[pdims.first] = theChannelSplits;
}
if ( theAllChannelSplits && xc.diagrams().size() > 1 &&
dimension() > pdims.first + pdims.second ) {
the_pre_adaption_splits[pdims.first] = xc.diagrams().size() - 1;
}
}
for(int splitdim=0; splitdim<min(dimension(),(int)pre_adaption_splits().size());splitdim++)
SimpleCellGrid::splitter(splitdim,pre_adaption_splits()[splitdim]);
SimpleCellGrid::explore(theExplorationPoints,rnd,*this,newCells,Repository::clog());
bool notAll = false;
for ( std::size_t step = 1; step < theExplorationSteps; ++step ) {
newCells.clear();
SimpleCellGrid::adapt(theGain,theEpsilon,newCells);
if ( progressBar )
++(*progressBar);
if ( newCells.empty() ) {
notAll = true;
break;
}
SimpleCellGrid::explore(theExplorationPoints,rnd,*this,newCells,Repository::clog());
}
if ( progressBar )
++(*progressBar);
SimpleCellGrid::setWeights();
SimpleCellGrid::updateIntegral();
SimpleCellGrid::minimumSelection(theMinimumSelection);
if ( progressBar ) {
if ( notAll )
cout << "\n" << flush;
delete progressBar;
}
unsigned long points = initialPoints();
for ( unsigned long k = 0; k < nIterations(); ++k ) {
runIteration(points,progress);
if ( k < nIterations() - 1 ) {
points = (unsigned long)(points*enhancementFactor());
adapt();
nextIteration();
}
}
didReadGrids();
isInitialized();
}
void CellGridSampler::finalize(bool) {
XML::Element grid = SimpleCellGrid::toXML();
grid.appendAttribute("process",id());
sampler()->grids().append(grid);
if (randomNumberString()!="")
for ( map<RandomNumberIndex,pair<RandomNumberHistogram,double> >::
const_iterator b = RandomNumberHistograms.begin();
b != RandomNumberHistograms.end(); ++b ) {
b->second.first.dump(randomNumberString(), b->first.first,shortprocess(),b->first.second);
}
}
// If needed, insert default implementations of virtual function defined
// in the InterfacedBase class here (using ThePEG-interfaced-impl in Emacs).
void CellGridSampler::persistentOutput(PersistentOStream & os) const {
os << theExplorationPoints << theExplorationSteps
<< theGain << theEpsilon << theMinimumSelection
<< the_pre_adaption_splits
<< theLuminositySplits << theChannelSplits
<< theAllChannelSplits << theUnweightCells;
}
void CellGridSampler::persistentInput(PersistentIStream & is, int) {
is >> theExplorationPoints >> theExplorationSteps
>> theGain >> theEpsilon >> theMinimumSelection
>> the_pre_adaption_splits
>> theLuminositySplits >> theChannelSplits
>> theAllChannelSplits >> theUnweightCells;
}
// *** Attention *** The following static variable is needed for the type
// description system in ThePEG. Please check that the template arguments
// are correct (the class and its base class), and that the constructor
// arguments are correct (the class name and the name of the dynamically
// loadable library where the class implementation can be found).
DescribeClass<CellGridSampler,BinSampler>
describeHerwigCellGridSampler("Herwig::CellGridSampler", "HwSampling.so");
void CellGridSampler::Init() {
static ClassDocumentation<CellGridSampler> documentation
("CellGridSampler samples XCombs bins using CellGrids.");
static Parameter<CellGridSampler,size_t> interfaceExplorationPoints
("ExplorationPoints",
"The number of points to use for cell exploration.",
&CellGridSampler::theExplorationPoints, 1000, 1, 0,
false, false, Interface::lowerlim);
static Parameter<CellGridSampler,size_t> interfaceExplorationSteps
("ExplorationSteps",
"The number of exploration steps to perform.",
&CellGridSampler::theExplorationSteps, 8, 1, 0,
false, false, Interface::lowerlim);
static Parameter<CellGridSampler,double> interfaceGain
("Gain",
"The gain factor used for adaption.",
&CellGridSampler::theGain, 0.3, 0.0, 1.0,
false, false, Interface::limited);
static Parameter<CellGridSampler,double> interfaceEpsilon
("Epsilon",
"The efficieny threshold used for adaption.",
&CellGridSampler::theEpsilon, 0.01, 0.0, 1.0,
false, false, Interface::limited);
static Parameter<CellGridSampler,double> interfaceMinimumSelection
("MinimumSelection",
"The minimum cell selection probability.",
&CellGridSampler::theMinimumSelection, 0.0001, 0.0, 1.0,
false, false, Interface::limited);
static ParVector<CellGridSampler,int> interfacethe_pre_adaption_splits
("preadaptionsplit",
"The splittings for each dimension befor adaption.",
&CellGridSampler::the_pre_adaption_splits, 1., -1, 0.0, 0.0, 0,
false, false, Interface::lowerlim);
static Parameter<CellGridSampler,int> interfaceLuminositySplits
("LuminositySplits",
"",
&CellGridSampler::theLuminositySplits, 0, 0, 0,
false, false, Interface::lowerlim);
static Parameter<CellGridSampler,int> interfaceChannelSplits
("ChannelSplits",
"",
&CellGridSampler::theChannelSplits, 0, 0, 0,
false, false, Interface::lowerlim);
static Switch<CellGridSampler,bool> interfaceAllChannelSplits
("AllChannelSplits",
"",
&CellGridSampler::theAllChannelSplits, false, false, false);
static SwitchOption interfaceAllChannelSplitsOn
(interfaceAllChannelSplits,
"On",
"",
true);
static SwitchOption interfaceAllChannelSplitsOff
(interfaceAllChannelSplits,
"Off",
"",
false);
static Switch<CellGridSampler,bool> interfaceUnweightCells
("UnweightCells",
"",
&CellGridSampler::theUnweightCells, true, false, false);
static SwitchOption interfaceUnweightCellsYes
(interfaceUnweightCells,
"Yes",
"",
true);
static SwitchOption interfaceUnweightCellsNo
(interfaceUnweightCells,
"No",
"",
false);
}
diff --git a/Sampling/GeneralSampler.cc b/Sampling/GeneralSampler.cc
--- a/Sampling/GeneralSampler.cc
+++ b/Sampling/GeneralSampler.cc
@@ -1,1029 +1,1036 @@
// -*- C++ -*-
//
// GeneralSampler.cc is a part of Herwig - A multi-purpose Monte Carlo event generator
// Copyright (C) 2002-2012 The Herwig Collaboration
//
// Herwig is licenced under version 2 of the GPL, see COPYING for details.
// Please respect the MCnet academic guidelines, see GUIDELINES for details.
//
//
// This is the implementation of the non-inlined, non-templated member
// functions of the GeneralSampler class.
//
#include "GeneralSampler.h"
#include "ThePEG/Interface/ClassDocumentation.h"
#include "ThePEG/EventRecord/Particle.h"
#include "ThePEG/Repository/UseRandom.h"
#include "ThePEG/Repository/EventGenerator.h"
#include "ThePEG/Repository/Repository.h"
#include "ThePEG/Utilities/DescribeClass.h"
#include "ThePEG/Utilities/LoopGuard.h"
#include "ThePEG/Interface/Reference.h"
#include "ThePEG/Interface/Switch.h"
#include "ThePEG/Interface/Parameter.h"
#include "ThePEG/Persistency/PersistentOStream.h"
#include "ThePEG/Persistency/PersistentIStream.h"
#include "ThePEG/Handlers/StandardEventHandler.h"
#include "ThePEG/Handlers/StandardXComb.h"
#include "Herwig/Utilities/RunDirectories.h"
#include "Herwig/Utilities/XML/ElementIO.h"
#include <boost/progress.hpp>
#include <boost/filesystem.hpp>
#include <cstdlib>
#include <sstream>
using namespace Herwig;
GeneralSampler::GeneralSampler()
: theVerbose(false),
theIntegratedXSec(ZERO), theIntegratedXSecErr(ZERO),
theUpdateAfter(1), crossSectionCalls(0), gotCrossSections(false),
theSumWeights(0.), theSumWeights2(0.),
theAttempts(0), theAccepts(0),
theMaxWeight(0.0), theAddUpSamplers(false),
theGlobalMaximumWeight(true), theFlatSubprocesses(false),
isSampling(false), theMinSelection(0.01), runCombinationData(false),
theAlmostUnweighted(false), maximumExceeds(0),
maximumExceededBy(0.), correctWeights(0.),theMaxEnhancement(1.05), didReadGrids(false),
theParallelIntegration(false),
theIntegratePerJob(0), theIntegrationJobs(0), theIntegrationJobsCreated(0),
justAfterIntegrate(false), theWriteGridsOnFinish(false) {}
GeneralSampler::~GeneralSampler() {}
IBPtr GeneralSampler::clone() const {
return new_ptr(*this);
}
IBPtr GeneralSampler::fullclone() const {
return new_ptr(*this);
}
double sign(double x) {
return x >= 0. ? 1. : -1.;
}
void GeneralSampler::initialize() {
if ( theParallelIntegration &&
runLevel() == ReadMode )
throw Exception()
<< "\n--------------------------------------------------------------------------------\n\n"
<< "Parallel integration is only supported in the build/integrate/run mode\n\n"
<< "--------------------------------------------------------------------------------\n"
<< Exception::abortnow;
if ( runLevel() == ReadMode ||
runLevel() == IntegrationMode ) {
assert(theSamplers.empty());
if ( !theGrids.children().empty() )
Repository::clog()
<< "--------------------------------------------------------------------------------\n\n"
<< "Using an existing grid. Please consider re-running the grid adaption\n"
<< "when there have been significant changes to parameters, cuts, etc.\n\n"
<< "--------------------------------------------------------------------------------\n"
<< flush;
}
if ( theParallelIntegration ) {
if ( !theIntegratePerJob && !theIntegrationJobs )
throw Exception()
<< "Please specify the number of subprocesses per integration job or the "
<< "number of integration jobs to be created."
<< Exception::abortnow;
if ( theIntegrationJobs ) {
unsigned int nintegrate = eventHandler()->nBins()/theIntegrationJobs;
if ( eventHandler()->nBins() % theIntegrationJobs != 0 )
++nintegrate;
theIntegratePerJob = max(theIntegratePerJob,nintegrate);
}
unsigned int jobCount = 0;
ofstream* jobList = 0;
generator()->log()
<< "--------------------------------------------------------------------------------\n"
<< "preparing integration jobs ...\n" << flush;
vector<int> randomized;
vector<int> pickfrom;
for ( int b = 0; b < eventHandler()->nBins(); ++b )
pickfrom.push_back(b);
//set<int> check;
while ( !pickfrom.empty() ) {
size_t idx = UseRandom::irnd(pickfrom.size());
randomized.push_back(pickfrom[idx]);
pickfrom.erase(pickfrom.begin() + idx);
}
int b = 0;
for ( vector<int>::const_iterator bx = randomized.begin();
bx != randomized.end(); ++bx, ++b ) {
if ( b == 0 || b % theIntegratePerJob == 0 ) {
if ( jobList ) {
jobList->close();
delete jobList;
jobList = 0;
}
ostringstream name;
string prefix = RunDirectories::buildStorage();
if ( prefix.empty() )
prefix = "./";
else if ( *prefix.rbegin() != '/' )
prefix += "/";
name << prefix << "integrationJob" << jobCount;
++jobCount;
string fname = name.str();
jobList = new ofstream(fname.c_str());
if ( !*jobList ) {
delete jobList;
throw Exception() << "Failed to write integration job list"
<< Exception::abortnow;
}
}
*jobList << *bx << " ";
}
theIntegrationJobsCreated = jobCount;
generator()->log()
<< "--------------------------------------------------------------------------------\n\n"
<< "Wrote " << jobCount << " integration jobs\n"
<< "Please submit integration jobs with the\nintegrate --jobid=x\ncommand for job ids "
<< "from 0 to " << (jobCount-1) << "\n\n"
+ << "e.g.:\n\n"
+ << " for i in $(seq 0 "<< (jobCount-1) <<");do Herwig integrate --jobid=$i "<<generator()->runName()<<".run & done\n\n"
<< "--------------------------------------------------------------------------------\n"
<< flush;
if ( jobList ) {
jobList->close();
delete jobList;
jobList = 0;
}
theParallelIntegration = false;
return;
}
if ( runLevel() == BuildMode )
return;
if ( !samplers().empty() )
return;
if ( binSampler()->adaptsOnTheFly() ) {
if ( !theAddUpSamplers ) {
Repository::clog() << "Warning: On-the-fly adapting samplers require cross section calculation from "
<< "adding up individual samplers. The AddUpSamplers flag will be switched on.";
}
theAddUpSamplers = true;
}
if ( !weighted() && !binSampler()->canUnweight() )
throw Exception() << "Unweighted events requested from weighted bin sampler object.";
if ( theFlatSubprocesses && !theGlobalMaximumWeight ) {
Repository::clog() << "Warning: Can only use a global maximum weight when selecting subprocesses "
<< "uniformly. The GlobalMaximumWeight flag will be switched on.";
theGlobalMaximumWeight = true;
}
set<int> binsToIntegrate;
if ( integrationList() != "" ) {
string prefix = RunDirectories::buildStorage();
if ( prefix.empty() )
prefix = "./";
else if ( *prefix.rbegin() != '/' )
prefix += "/";
string fname = prefix + integrationList();
ifstream jobList(fname.c_str());
if ( jobList ) {
int b = 0;
while ( jobList >> b )
binsToIntegrate.insert(b);
} else {
Repository::clog()
<< "Job list '"
<< integrationList() << "' not found.\n"
<< "Assuming empty integration job\n" << flush;
return;
}
}
if ( binsToIntegrate.empty() ) {
for ( int b = 0; b < eventHandler()->nBins(); ++b )
binsToIntegrate.insert(b);
}
boost::progress_display* progressBar = 0;
if ( !theVerbose && !justAfterIntegrate ) {
Repository::clog() << "integrating subprocesses";
progressBar = new boost::progress_display(binsToIntegrate.size(),Repository::clog());
}
-
+ int count=0;
for ( set<int>::const_iterator bit = binsToIntegrate.begin(); bit != binsToIntegrate.end(); ++bit ) {
+ count++;
+ if(theVerbose&&
+ (runLevel() == ReadMode ||
+ runLevel() == IntegrationMode))
+ cout<<"\nIntegrate "<< count <<" of "<<binsToIntegrate.size() <<":\n"<<flush;
Ptr<BinSampler>::ptr s = theBinSampler->cloneMe();
s->eventHandler(eventHandler());
s->sampler(this);
s->bin(*bit);
lastSampler(s);
s->doWeighted(eventHandler()->weighted());
s->setupRemappers(theVerbose);
if ( justAfterIntegrate )
s->readIntegrationData();
s->initialize(theVerbose);
samplers()[*bit] = s;
if ( !theVerbose && !justAfterIntegrate )
++(*progressBar);
if ( s->nanPoints() && theVerbose ) {
Repository::clog() << "warning: "
<< s->nanPoints() << " of "
<< s->allPoints() << " points with nan or inf weight.\n"
<< flush;
}
}
if ( progressBar ) {
delete progressBar;
progressBar = 0;
}
if ( runLevel() == IntegrationMode ) {
theGrids = XML::Element(XML::ElementTypes::Element,"Grids");
for ( map<double,Ptr<BinSampler>::ptr>::iterator s = samplers().begin();
s != samplers().end(); ++s ) {
s->second->saveGrid();
s->second->saveRemappers();
s->second->saveIntegrationData();
}
writeGrids();
return;
}
if ( theVerbose ) {
bool oldAdd = theAddUpSamplers;
theAddUpSamplers = true;
try {
Repository::clog() << "estimated total cross section is ( "
<< integratedXSec()/nanobarn << " +/- "
<< integratedXSecErr()/nanobarn << " ) nb\n" << flush;
} catch (...) {
theAddUpSamplers = oldAdd;
throw;
}
theAddUpSamplers = oldAdd;
}
updateSamplers();
if ( samplers().empty() ) {
throw Exception() << "No processes with non-zero cross section present."
<< Exception::abortnow;
}
if ( !justAfterIntegrate ) {
theGrids = XML::Element(XML::ElementTypes::Element,"Grids");
for ( map<double,Ptr<BinSampler>::ptr>::iterator s = samplers().begin();
s != samplers().end(); ++s ) {
s->second->saveGrid();
s->second->saveRemappers();
}
writeGrids();
}
}
double GeneralSampler::generate() {
long excptTries = 0;
gotCrossSections = false;
lastSampler(samplers().upper_bound(UseRandom::rnd())->second);
double weight = 0.;
while ( true ) {
try {
weight = 1.0;
double p = lastSampler()->referenceWeight()/lastSampler()->bias()/theMaxWeight;
if ( weighted() )
weight *= p;
else if ( p < UseRandom::rnd() ){
weight = 0.0;
// The lastSampler was picked according to the bias of the process.
--excptTries;
}
if ( weight != 0.0 )
weight *= lastSampler()->generate()/lastSampler()->referenceWeight();
} catch(BinSampler::NextIteration) {
updateSamplers();
lastSampler(samplers().upper_bound(UseRandom::rnd())->second);
if ( ++excptTries == eventHandler()->maxLoop() )
break;
continue;
} catch (...) {
throw;
}
if ( isnan(lastSampler()->lastWeight()) || isinf(lastSampler()->lastWeight()) ) {
lastSampler() = samplers().upper_bound(UseRandom::rnd())->second;
if ( ++excptTries == eventHandler()->maxLoop() )
break;
continue;
}
theAttempts += 1;
if ( abs(weight) == 0.0 ) {
lastSampler(samplers().upper_bound(UseRandom::rnd())->second);
if ( ++excptTries == eventHandler()->maxLoop() )
break;
continue;
}
if ( !eventHandler()->weighted() && !theAlmostUnweighted ) {
if ( abs(weight) > 1. ) {
++maximumExceeds;
maximumExceededBy += abs(weight)-1.;
}
correctWeights+=weight;
if ( weight > 0.0 )
weight = 1.;
else
weight = -1.;
}
break;
}
theAccepts += 1;
if ( excptTries == eventHandler()->maxLoop() )
throw Exception()
<< "GeneralSampler::generate() : Maximum number of tries to re-run event "
<< "selection reached. Aborting now." << Exception::runerror;
lastPoint() = lastSampler()->lastPoint();
lastSampler()->accept();
theSumWeights += weight;
theSumWeights2 += sqr(weight);
return weight;
}
void GeneralSampler::rejectLast() {
if ( !lastSampler() )
return;
double w = 0.0;
if ( weighted() )
w = lastSampler()->lastWeight()/lastSampler()->bias()/theMaxWeight;
else
w = lastSampler()->lastWeight()/lastSampler()->referenceWeight();
lastSampler()->reject();
theSumWeights -= w;
theSumWeights2 -= sqr(w);
theAttempts -= 1;
theAccepts -= 1;
}
void GeneralSampler::updateSamplers() {
map<double,Ptr<BinSampler>::ptr> checkedSamplers;
for ( map<double,Ptr<BinSampler>::ptr>::iterator s = samplers().begin();
s != samplers().end(); ++s ) {
if ( s->second->averageAbsWeight() == 0.0 ) {
generator()->log() << "Warning: no phase space points with non-zero cross section\n"
<< "could be obtained for the process: "
<< s->second->process() << "\n"
<< "This process will not be considered. Try increasing InitialPoints.\n"
<< flush;
if ( s->second->nanPoints() ) {
generator()->log() << "Warning: "
<< s->second->nanPoints() << " of "
<< s->second->allPoints() << " points with nan or inf weight\n"
<< "in " << s->second->process() << "\n" << flush;
}
continue;
}
checkedSamplers.insert(*s);
}
theSamplers = checkedSamplers;
if ( samplers().empty() )
return;
double allMax = 0.0;
double sumbias = 0.;
for ( map<double,Ptr<BinSampler>::ptr>::iterator s = samplers().begin();
s != samplers().end(); ++s ) {
double bias = 1.;
if ( !theFlatSubprocesses )
bias *= s->second->averageAbsWeight();
s->second->bias(bias);
sumbias += bias;
allMax = max(allMax,s->second->maxWeight()*theMaxEnhancement);
}
double nsumbias = 0.0;
bool needAdjust = false;
for ( map<double,Ptr<BinSampler>::ptr>::iterator s = samplers().begin();
s != samplers().end(); ++s ) {
needAdjust |= s->second->bias()/sumbias < theMinSelection;
s->second->bias(max(s->second->bias()/sumbias,theMinSelection));
nsumbias += s->second->bias();
}
if ( nsumbias == 0.0 ) {
samplers().clear();
return;
}
if ( needAdjust ) {
for ( map<double,Ptr<BinSampler>::ptr>::iterator s = samplers().begin();
s != samplers().end(); ++s ) {
s->second->bias(s->second->bias()/nsumbias);
}
}
theMaxWeight = 0.0;
for ( map<double,Ptr<BinSampler>::ptr>::iterator s = samplers().begin();
s != samplers().end(); ++s ) {
double wref = theGlobalMaximumWeight ? allMax :
s->second->maxWeight()*theMaxEnhancement;
s->second->referenceWeight(wref);
theMaxWeight = max(theMaxWeight,wref/s->second->bias());
if ( (isSampling && s->second == lastSampler()) ||
!isSampling )
s->second->nextIteration();
}
map<double,Ptr<BinSampler>::ptr> newSamplers;
double current = 0.;
for ( map<double,Ptr<BinSampler>::ptr>::iterator s = samplers().begin();
s != samplers().end(); ++s ) {
if ( s->second->bias() == 0.0 )
continue;
current += s->second->bias();
newSamplers[current] = s->second;
}
samplers() = newSamplers;
}
void GeneralSampler::currentCrossSections() const {
if ( !theAddUpSamplers ) {
double n = attempts();
if ( n > 1 ) {
theIntegratedXSec = sumWeights()*maxXSec()/attempts();
double sw = sumWeights(); double sw2 = sumWeights2();
theIntegratedXSecErr = maxXSec()*sqrt(abs(sw2/n-sqr(sw/n))/(n-1));
} else {
theIntegratedXSec = ZERO;
theIntegratedXSecErr = ZERO;
}
return;
}
if ( gotCrossSections )
return;
if ( crossSectionCalls > 0 ) {
if ( ++crossSectionCalls == theUpdateAfter ) {
crossSectionCalls = 0;
} else return;
}
++crossSectionCalls;
gotCrossSections = true;
theIntegratedXSec = ZERO;
double var = 0.0;
for ( map<double,Ptr<BinSampler>::ptr>::const_iterator s = samplers().begin();
s != samplers().end(); ++s ) {
theIntegratedXSec += s->second->integratedXSec();
var += sqr(s->second->integratedXSecErr()/nanobarn);
}
theIntegratedXSecErr = sqrt(var)*nanobarn;
}
void GeneralSampler::prepare() {
readGrids();
}
// If needed, insert default implementations of virtual function defined
// in the InterfacedBase class here (using ThePEG-interfaced-impl in Emacs).
void GeneralSampler::doinit() {
if ( RunDirectories::empty() )
RunDirectories::pushRunId(generator()->runName());
if ( integratePerJob() || integrationJobs() ) {
theParallelIntegration = true;
theIntegratePerJob = integratePerJob();
theIntegrationJobs = integrationJobs();
}
readGrids();
if ( theGrids.children().empty() && runLevel() == RunMode )
generator()->log()
<< "\n--------------------------------------------------------------------------------\n\n"
<< "Warning: No grid file could be found at the start of this run.\n\n"
<< "* For a read/run setup intented to be used with --setupfile please consider\n"
<< " using the build/integrate/run setup.\n"
<< "* For a build/integrate/run setup to be used with --setupfile please ensure\n"
<< " that the same setupfile is provided to both, the integrate and run steps.\n\n"
<< "--------------------------------------------------------------------------------\n" << flush;
if ( samplers().empty() && runLevel() == RunMode )
justAfterIntegrate = true;
SamplerBase::doinit();
}
void GeneralSampler::dofinish() {
set<string> compensating;
for ( map<double,Ptr<BinSampler>::ptr>::const_iterator s =
samplers().begin(); s != samplers().end(); ++s ) {
if ( s->second->compensating() ) {
compensating.insert(s->second->process());
}
if ( s->second->nanPoints() ) {
generator()->log() << "warning: "
<< s->second->nanPoints() << " of "
<< s->second->allPoints() << " points with nan or inf weight\n"
<< "in " << s->second->process() << "\n" << flush;
}
s->second->finalize(theVerbose);
}
if ( theVerbose ) {
if ( !compensating.empty() ) {
generator()->log() << "warning: sampling for the following processes is still compensating:\n";
for ( set<string>::const_iterator c = compensating.begin();
c != compensating.end(); ++c )
generator()->log() << *c << "\n";
}
generator()->log() << "final integrated cross section is ( "
<< integratedXSec()/nanobarn << " +/- "
<< integratedXSecErr()/nanobarn << " ) nb\n" << flush;
}
if ( !compensating.empty() ) {
generator()->log() << "Warning: Some samplers are still in compensating mode.\n" << flush;
}
if ( maximumExceeds != 0 ) {
//generator()->log() << maximumExceeds << " of " << theAttempts
// << " attempted points exceeded the guessed maximum weight\n"
// << "with an average relative deviation of "
// << maximumExceededBy/maximumExceeds << "\n\n" << flush;
generator()->log() <<"\n\n\nNote: In this run "<<maximumExceeds<<" of the "<<theAccepts<<" accepted events\n"
<<"were found with a weight W larger than the expected Wmax.\n";
generator()->log() <<"This corresponds to a cross section difference between:\n"
<<" UnitWeights: "<< theMaxWeight*theSumWeights/theAttempts<<"nb\n"
<<" AlmostUnweighted: "<< theMaxWeight*correctWeights/theAttempts<< "nb\n"<<
" use 'set Sampler:AlmostUnweighted On' to switch to non-unit weights.\n\n";
generator()->log() <<"The maximum weight determined in the read/integrate step has been enhanced by \n"<<
" set /Herwig/Samplers/Sampler:MaxEnhancement "<< theMaxEnhancement<<
".\nIf the rate of excessions ("<<(double)maximumExceeds*100/(double)theAccepts<<
"%) or the change of the cross section is large,\nyou can try to:\n\n"<<
"Enhance the number of points used in the read/integrate step\n"<<
" set /Herwig/Samplers/Sampler:BinSampler:InitialPoints ...\n\n"<<
"and/or enhance the reference weight found in the read/integrate step\n"<<
" set /Herwig/Samplers/Sampler:MaxEnhancement 1.x\n\n"<<
"If this does not help (and your process is well defined by cuts)\n"<<
"don't hesitate to contact herwig@projects.hepforge.org.\n\n";
}
if ( runCombinationData ) {
string dataName = RunDirectories::runStorage();
if ( dataName.empty() )
dataName = "./";
else if ( *dataName.rbegin() != '/' )
dataName += "/";
dataName += "HerwigSampling.dat";
ofstream data(dataName.c_str());
double runXSec =
theMaxWeight*theSumWeights/theAttempts;
double runXSecErr =
sqr(theMaxWeight)*(1./theAttempts)*(1./(theAttempts-1.))*
abs(theSumWeights2 - sqr(theSumWeights)/theAttempts);
data << setprecision(17);
data << "CrossSectionCombined "
<< (integratedXSec()/nanobarn) << " +/- "
<< (integratedXSecErr()/nanobarn) << "\n"
<< "CrossSectionRun "
<< runXSec << " +/- " << sqrt(runXSecErr) << "\n"
<< "PointsAttempted " << theAttempts << "\n"
<< "PointsAccepted " << theAccepts << "\n"
<< "SumWeights " << theSumWeights*theMaxWeight << "\n"
<< "SumWeights2 " << theSumWeights2*sqr(theMaxWeight) << "\n"
<< flush;
}
theGrids = XML::Element(XML::ElementTypes::Element,"Grids");
for ( map<double,Ptr<BinSampler>::ptr>::iterator s = samplers().begin();
s != samplers().end(); ++s ) {
s->second->saveGrid();
s->second->saveRemappers();
if ( justAfterIntegrate )
s->second->saveIntegrationData();
}
if ( theWriteGridsOnFinish )
writeGrids();
SamplerBase::dofinish();
}
void GeneralSampler::doinitrun() {
readGrids();
if ( theGrids.children().empty() && !didReadGrids )
generator()->log()
<< "\n--------------------------------------------------------------------------------\n\n"
<< "Warning:No grid file could be found at the start of this run.\n\n"
<< "* For a read/run setup intented to be used with --setupfile please consider\n"
<< " using the build/integrate/run setup.\n"
<< "* For a build/integrate/run setup to be used with --setupfile please ensure\n"
<< " that the same setupfile is provided to both, the integrate and run steps.\n\n"
<< "--------------------------------------------------------------------------------\n" << flush;
if ( samplers().empty() ) {
justAfterIntegrate = true;
if ( !hasSetupFile() )
initialize();
} else {
for ( map<double,Ptr<BinSampler>::ptr>::iterator s = samplers().begin();
s != samplers().end(); ++s ) {
s->second->setupRemappers(theVerbose);
if ( justAfterIntegrate )
s->second->readIntegrationData();
s->second->initialize(theVerbose);
}
}
isSampling = true;
SamplerBase::doinitrun();
}
void GeneralSampler::rebind(const TranslationMap & trans) {
for ( map<double,Ptr<BinSampler>::ptr>::iterator s =
samplers().begin(); s != samplers().end(); ++s )
s->second = trans.translate(s->second);
SamplerBase::rebind(trans);
}
IVector GeneralSampler::getReferences() {
IVector ret = SamplerBase::getReferences();
for ( map<double,Ptr<BinSampler>::ptr>::iterator s =
samplers().begin(); s != samplers().end(); ++s )
ret.push_back(s->second);
return ret;
}
void GeneralSampler::writeGrids() const {
if ( theGrids.children().empty() )
return;
string dataName = RunDirectories::runStorage();
if ( dataName.empty() )
dataName = "./";
else if ( *dataName.rbegin() != '/' )
dataName += "/";
dataName += "HerwigGrids.xml";
ofstream out(dataName.c_str());
XML::ElementIO::put(theGrids,out);
}
void GeneralSampler::readGrids() {
// return if grids were already read
if ( didReadGrids )
return;
// check for global HerwigGrids.xml file or combine integration jobs to a global HerwigGrids.xml file
// Show messages of integration job combination only in the first run (if no global HerwigGrids.xml file is found in one of the directories)
// or in case of an error
// Check if a globalHerwigGridsFileFound was found and keep messages in a stringstream buffer beforehand
bool globalHerwigGridsFileFound = false;
bool integrationJobCombinationSuccessful = true;
std::stringstream messageBuffer;
RunDirectories directories;
while ( directories && !didReadGrids ) {
string dataName = directories.nextRunStorage();
if ( dataName.empty() )
dataName = "./";
else if ( *dataName.rbegin() != '/' )
dataName += "/";
string directoryName = dataName;
dataName += "HerwigGrids.xml";
ifstream in(dataName.c_str());
if ( in ) {
theGrids = XML::ElementIO::get(in);
didReadGrids = true;
// Set to true if in any of the directories a global HerwigGrid.xml file was found
globalHerwigGridsFileFound = true;
}
else {
// Check if integrationJob was split and try to merge single integrationJobs together
// integrationJobsCreated() == 0 indicates that parallel integration has not been
// requested, while the parallel integration parameters may well yield a single job
if(integrationJobsCreated() >= 1 && runLevel() == RunMode) {
messageBuffer << "\n\n* Global HerwigGrids.xml file does not exist yet"
<< "\n and integration jobs were split into " << integrationJobsCreated() << " integration jobs."
<< "\n Trying to combine single integration jobs to a global HerwigGrids.xml file"
<< "\n using the following directory " << directoryName << ".";
theGrids = XML::Element(XML::ElementTypes::Element,"Grids");
integrationJobCombinationSuccessful = true;
for(unsigned int currentProcessedIntegrationJobNum = 0; currentProcessedIntegrationJobNum < integrationJobsCreated(); ++currentProcessedIntegrationJobNum) {
ostringstream currentProcessedIntegrationJob;
currentProcessedIntegrationJob << directoryName << "integrationJob" << currentProcessedIntegrationJobNum << "/HerwigGrids.xml";
if(boost::filesystem::exists(boost::filesystem::path(currentProcessedIntegrationJob.str()))) {
ifstream localGridFileIN(currentProcessedIntegrationJob.str().c_str());
if(localGridFileIN) {
theGrids = theGrids + XML::ElementIO::get(localGridFileIN);
messageBuffer << "\n* Added integration job " << currentProcessedIntegrationJobNum << " to global HerwigGrids.xml file.";
}
else {
integrationJobCombinationSuccessful = false;
messageBuffer << "\n* Could not open/add integration job " << currentProcessedIntegrationJobNum << " to global HerwigGrids.xml file.";
}
}
else {
integrationJobCombinationSuccessful = false;
messageBuffer << "\n* Could not find integration job " << currentProcessedIntegrationJob.str();
}
}
if(integrationJobCombinationSuccessful) {
string globalGridFile = directoryName + "HerwigGrids.xml";
ofstream globalGridFileOF(globalGridFile.c_str());
XML::ElementIO::put(theGrids,globalGridFileOF);
messageBuffer << "\n* Global HerwigGrids.xml file was created, the integration jobs 0 to " << integrationJobsCreated()-1
<< " were combined."
<< "\n* If previous warnings in regards to the HerwigGrids.xml file occured, these can be safely ignored."
<< "\n* Note: This message will occur only in the first run and will be suppressed in further runs.\n"
<< flush;
didReadGrids = true;
}
else {
messageBuffer << "\n* Global HerwigGrids.xml file could not be created due to failed combination of integration jobs."
<< "\n Please check the above-mentioned missing/failed integration jobs which are needed for the combination."
<< "\n* Note: It can be that the HerwigGrids.xml file is searched and can be found in further directories."
<< "\n In this case you can ignore this warning message.\n" << flush;
}
}
}
}
// Show messages if global HerwigGrids.xml file was not found or first combination run
if (!globalHerwigGridsFileFound && (theVerbose || !integrationJobCombinationSuccessful))
BaseRepository::cout() << messageBuffer.str() << "\n" << flush;
if ( !didReadGrids )
theGrids = XML::Element(XML::ElementTypes::Element,"Grids");
}
void GeneralSampler::persistentOutput(PersistentOStream & os) const {
os << theVerbose << theBinSampler << theSamplers << theLastSampler
<< theUpdateAfter << crossSectionCalls << gotCrossSections
<< ounit(theIntegratedXSec,nanobarn)
<< ounit(theIntegratedXSecErr,nanobarn)
<< theSumWeights << theSumWeights2
<< theAttempts << theAccepts << theMaxWeight
<< theAddUpSamplers << theGlobalMaximumWeight
<< theFlatSubprocesses << isSampling << theMinSelection
<< runCombinationData << theAlmostUnweighted << maximumExceeds
<< maximumExceededBy << correctWeights << theMaxEnhancement
<< theParallelIntegration
<< theIntegratePerJob << theIntegrationJobs
<< theIntegrationJobsCreated << theWriteGridsOnFinish;
}
void GeneralSampler::persistentInput(PersistentIStream & is, int) {
is >> theVerbose >> theBinSampler >> theSamplers >> theLastSampler
>> theUpdateAfter >> crossSectionCalls >> gotCrossSections
>> iunit(theIntegratedXSec,nanobarn)
>> iunit(theIntegratedXSecErr,nanobarn)
>> theSumWeights >> theSumWeights2
>> theAttempts >> theAccepts >> theMaxWeight
>> theAddUpSamplers >> theGlobalMaximumWeight
>> theFlatSubprocesses >> isSampling >> theMinSelection
>> runCombinationData >> theAlmostUnweighted >> maximumExceeds
>> maximumExceededBy >> correctWeights >> theMaxEnhancement
>> theParallelIntegration
>> theIntegratePerJob >> theIntegrationJobs
>> theIntegrationJobsCreated >> theWriteGridsOnFinish;
}
// *** Attention *** The following static variable is needed for the type
// description system in ThePEG. Please check that the template arguments
// are correct (the class and its base class), and that the constructor
// arguments are correct (the class name and the name of the dynamically
// loadable library where the class implementation can be found).
DescribeClass<GeneralSampler,SamplerBase>
describeHerwigGeneralSampler("Herwig::GeneralSampler", "HwSampling.so");
void GeneralSampler::Init() {
static ClassDocumentation<GeneralSampler> documentation
("A GeneralSampler class");
static Reference<GeneralSampler,BinSampler> interfaceBinSampler
("BinSampler",
"The bin sampler to be used.",
&GeneralSampler::theBinSampler, false, false, true, false, false);
static Parameter<GeneralSampler,size_t> interfaceUpdateAfter
("UpdateAfter",
"Update cross sections every number of events.",
&GeneralSampler::theUpdateAfter, 1, 1, 0,
false, false, Interface::lowerlim);
static Switch<GeneralSampler,bool> interfaceVerbose
("Verbose",
"",
&GeneralSampler::theVerbose, false, false, false);
static SwitchOption interfaceVerboseOn
(interfaceVerbose,
"On",
"",
true);
static SwitchOption interfaceVerboseOff
(interfaceVerbose,
"Off",
"",
false);
static Switch<GeneralSampler,bool> interfaceAddUpSamplers
("AddUpSamplers",
"Calculate cross sections from adding up individual samplers.",
&GeneralSampler::theAddUpSamplers, false, false, false);
static SwitchOption interfaceAddUpSamplersOn
(interfaceAddUpSamplers,
"On",
"",
true);
static SwitchOption interfaceAddUpSamplersOff
(interfaceAddUpSamplers,
"Off",
"",
false);
static Switch<GeneralSampler,bool> interfaceGlobalMaximumWeight
("GlobalMaximumWeight",
"Use a global maximum weight instead of partial unweighting.",
&GeneralSampler::theGlobalMaximumWeight, true, false, false);
static SwitchOption interfaceGlobalMaximumWeightOn
(interfaceGlobalMaximumWeight,
"On",
"",
true);
static SwitchOption interfaceGlobalMaximumWeightOff
(interfaceGlobalMaximumWeight,
"Off",
"",
false);
static Parameter<GeneralSampler,double> interfaceMaxEnhancement
("MaxEnhancement",
"Enhance the maximum reference weight found in the read step.",
&GeneralSampler::theMaxEnhancement, 1.1, 1.0, 1.5,
false, false, Interface::limited);
static Switch<GeneralSampler,bool> interfaceFlatSubprocesses
("FlatSubprocesses",
"[debug] Perform a flat subprocess selection.",
&GeneralSampler::theFlatSubprocesses, false, false, false);
static SwitchOption interfaceFlatSubprocessesOn
(interfaceFlatSubprocesses,
"On",
"",
true);
static SwitchOption interfaceFlatSubprocessesOff
(interfaceFlatSubprocesses,
"Off",
"",
false);
static Parameter<GeneralSampler,double> interfaceMinSelection
("MinSelection",
"A minimum subprocess selection probability.",
&GeneralSampler::theMinSelection, 0.01, 0.0, 1.0,
false, false, Interface::limited);
static Switch<GeneralSampler,bool> interfaceRunCombinationData
("RunCombinationData",
"",
&GeneralSampler::runCombinationData, false, false, false);
static SwitchOption interfaceRunCombinationDataOn
(interfaceRunCombinationData,
"On",
"",
true);
static SwitchOption interfaceRunCombinationDataOff
(interfaceRunCombinationData,
"Off",
"",
false);
static Switch<GeneralSampler,bool> interfaceAlmostUnweighted
("AlmostUnweighted",
"",
&GeneralSampler::theAlmostUnweighted, false, false, false);
static SwitchOption interfaceAlmostUnweightedOn
(interfaceAlmostUnweighted,
"On",
"",
true);
static SwitchOption interfaceAlmostUnweightedOff
(interfaceAlmostUnweighted,
"Off",
"",
false);
static Switch<GeneralSampler,bool> interfaceParallelIntegration
("ParallelIntegration",
"Prepare parallel jobs for integration.",
&GeneralSampler::theParallelIntegration, false, false, false);
static SwitchOption interfaceParallelIntegrationYes
(interfaceParallelIntegration,
"Yes",
"",
true);
static SwitchOption interfaceParallelIntegrationNo
(interfaceParallelIntegration,
"No",
"",
false);
static Parameter<GeneralSampler,unsigned int> interfaceIntegratePerJob
("IntegratePerJob",
"The number of subprocesses to integrate per job.",
&GeneralSampler::theIntegratePerJob, 0, 0, 0,
false, false, Interface::lowerlim);
static Parameter<GeneralSampler,unsigned int> interfaceIntegrationJobs
("IntegrationJobs",
"The maximum number of integration jobs to create.",
&GeneralSampler::theIntegrationJobs, 0, 0, 0,
false, false, Interface::lowerlim);
static Parameter<GeneralSampler,unsigned int> interfaceIntegrationJobsCreated
("IntegrationJobsCreated",
"The number of integration jobs which were actually created.",
&GeneralSampler::theIntegrationJobsCreated, 1, 1, 0,
false, false, Interface::lowerlim);
static Switch<GeneralSampler,bool> interfaceWriteGridsOnFinish
("WriteGridsOnFinish",
"Write grids on finishing a run.",
&GeneralSampler::theWriteGridsOnFinish, false, false, false);
static SwitchOption interfaceWriteGridsOnFinishYes
(interfaceWriteGridsOnFinish,
"Yes",
"",
true);
static SwitchOption interfaceWriteGridsOnFinishNo
(interfaceWriteGridsOnFinish,
"No",
"",
false);
}
diff --git a/Sampling/GeneralSampler.h b/Sampling/GeneralSampler.h
--- a/Sampling/GeneralSampler.h
+++ b/Sampling/GeneralSampler.h
@@ -1,489 +1,496 @@
// -*- C++ -*-
//
// GeneralSampler.h is a part of Herwig - A multi-purpose Monte Carlo event generator
// Copyright (C) 2002-2012 The Herwig Collaboration
//
// Herwig is licenced under version 2 of the GPL, see COPYING for details.
// Please respect the MCnet academic guidelines, see GUIDELINES for details.
//
#ifndef Herwig_GeneralSampler_H
#define Herwig_GeneralSampler_H
//
// This is the declaration of the GeneralSampler class.
//
#include "ThePEG/Handlers/SamplerBase.h"
#include "BinSampler.h"
namespace Herwig {
using namespace ThePEG;
/**
* \ingroup Matchbox
* \author Simon Platzer
*
* \brief A GeneralSampler class
*
* @see \ref GeneralSamplerInterfaces "The interfaces"
* defined for GeneralSampler.
*/
class GeneralSampler: public SamplerBase {
public:
/** @name Standard constructors and destructors. */
//@{
/**
* The default constructor.
*/
GeneralSampler();
/**
* The destructor.
*/
virtual ~GeneralSampler();
//@}
public:
/** @name Virtual functions from SamplerBase. */
//@{
/**
* Initialize the the sampler, possibly doing presampling of the
* phase space.
*/
virtual void initialize();
/**
* Generarate a new phase space point and return a weight associated
* with it. This weight should preferably be 1.
*/
virtual double generate();
/**
* Reject the last chosen phase space point.
*/
virtual void rejectLast();
/**
* If the sampler is able to sample several different functions
* separately, this function should return the last chosen
* function. This default version always returns 0.
*/
virtual int lastBin() const { return lastSampler() ? lastSampler()->bin() : 0; }
/**
* Return the total integrated cross section determined from the
* Monte Carlo sampling so far.
*/
virtual CrossSection integratedXSec() const {
currentCrossSections();
return theIntegratedXSec;
}
/**
* Return the error on the total integrated cross section determined
* from the Monte Carlo sampling so far.
*/
virtual CrossSection integratedXSecErr() const {
currentCrossSections();
return theIntegratedXSecErr;
}
/**
* Return the overestimated integrated cross section.
*/
virtual CrossSection maxXSec() const {
if ( theAddUpSamplers )
return SamplerBase::maxXSec();
return theMaxWeight*nanobarn;
}
/**
* Return the sum of the weights returned by generate() so far (of
* the events that were not rejeted).
*/
virtual double sumWeights() const { return theSumWeights; }
/**
* Return the sum of the weights squaredreturned by generate() so far (of
* the events that were not rejeted).
*/
virtual double sumWeights2() const { return theSumWeights2; }
/**
* Return the number of attempts
*/
virtual double attempts() const {
if ( theAddUpSamplers )
return SamplerBase::attempts();
return theAttempts;
}
/**
* Return the number of accepts
*/
double accepts() const { return theAccepts; }
//@}
/**
* Return the samplers
*/
const map<double,Ptr<BinSampler>::ptr>& samplers() const { return theSamplers; }
/**
* Return the bin sampler
*/
Ptr<BinSampler>::ptr binSampler() const { return theBinSampler; }
/**
* Return the last selected bin sampler
*/
Ptr<BinSampler>::tptr lastSampler() const { return theLastSampler; }
/**
* True if we should do weighted events
*/
bool weighted() const { return eventHandler()->weighted(); }
+
+ /**
+ * True if the sampler runs in Allmostunweighted mode.
+ */
+
+ bool almostUnweighted() const { return theAlmostUnweighted; }
+
public:
/**
* Return the XML element containing the grids
*/
const XML::Element& grids() const { return theGrids; }
/**
* Access the XML element containing the grids
*/
XML::Element& grids() { return theGrids; }
/**
* Write out grids
*/
void writeGrids() const;
/**
* Read in grids
*/
void readGrids();
/**
* Return the number of integration jobs which were actually created.
*/
unsigned int integrationJobsCreated() {
return theIntegrationJobsCreated;
}
/**
* An external hook to prepare the sampler for generating events, e.g. by
* combining grid files from parallel integration runs.
*/
virtual void prepare();
protected:
/**
* Access the samplers
*/
map<double,Ptr<BinSampler>::ptr>& samplers() { return theSamplers; }
/**
* Set the last selected bin sampler
*/
void lastSampler(Ptr<BinSampler>::tptr s) { theLastSampler = s; }
/**
* Calculate cross sections from samplers at current state.
*/
void currentCrossSections() const;
/**
* Update the sampler selection
*/
void updateSamplers();
public:
/** @name Functions used by the persistent I/O system. */
//@{
/**
* Function used to write out object persistently.
* @param os the persistent output stream written to.
*/
void persistentOutput(PersistentOStream & os) const;
/**
* Function used to read in object persistently.
* @param is the persistent input stream read from.
* @param version the version number of the object when written.
*/
void persistentInput(PersistentIStream & is, int version);
//@}
/**
* The standard Init function used to initialize the interfaces.
* Called exactly once for each class by the class description system
* before the main function starts or
* when this class is dynamically loaded.
*/
static void Init();
protected:
/** @name Clone Methods. */
//@{
/**
* Make a simple clone of this object.
* @return a pointer to the new object.
*/
virtual IBPtr clone() const;
/** Make a clone of this object, possibly modifying the cloned object
* to make it sane.
* @return a pointer to the new object.
*/
virtual IBPtr fullclone() const;
//@}
// If needed, insert declarations of virtual function defined in the
// InterfacedBase class here (using ThePEG-interfaced-decl in Emacs).
protected:
/**
* Initialize this object after the setup phase before saving an
* EventGenerator to disk.
* @throws InitException if object could not be initialized properly.
*/
virtual void doinit();
/**
* Initialize this object. Called in the run phase just before
* a run begins.
*/
virtual void doinitrun();
/**
* Finalize this object. Called in the run phase just after a
* run has ended. Used eg. to write out statistics.
*/
virtual void dofinish();
/**
* Rebind pointer to other Interfaced objects. Called in the setup phase
* after all objects used in an EventGenerator has been cloned so that
* the pointers will refer to the cloned objects afterwards.
* @param trans a TranslationMap relating the original objects to
* their respective clones.
* @throws RebindException if no cloned object was found for a given
* pointer.
*/
virtual void rebind(const TranslationMap & trans);
/**
* Return a vector of all pointers to Interfaced objects used in this
* object.
* @return a vector of pointers.
*/
virtual IVector getReferences();
private:
/**
* Whether or not additional information should be printed to cout.
*/
bool theVerbose;
/**
* The XML element containing the grids
*/
XML::Element theGrids;
/**
* The bin sampler to use.
*/
Ptr<BinSampler>::ptr theBinSampler;
/**
* The selector map for the bin samplers.
*/
map<double,Ptr<BinSampler>::ptr> theSamplers;
/**
* The last selected bin sampler.
*/
Ptr<BinSampler>::tptr theLastSampler;
/**
* The integrated cross section
*/
mutable CrossSection theIntegratedXSec;
/**
* The integrated cross section error
*/
mutable CrossSection theIntegratedXSecErr;
/**
* The number of events after which cross sections should truly be
* updated. This is used to prevent exhaustive combination of
* statistics when HepMC events are written out.
*/
size_t theUpdateAfter;
/**
* The number of calls to currentCrossSections since the last
* update.
*/
mutable size_t crossSectionCalls;
/**
* True, if currentCrossSections has been called since the last call
* to generate.
*/
mutable bool gotCrossSections;
/**
* The sum of weights
*/
double theSumWeights;
/**
* The sum of weights squared
*/
double theSumWeights2;
/**
* The number of attempts
*/
double theAttempts;
/**
* The number of accepts
*/
double theAccepts;
/**
* The maximum weight encountered
*/
double theMaxWeight;
/**
* True, if cross sections are to be combined from each sampler
* individually
*/
bool theAddUpSamplers;
/**
* True, if the global maximum weight should be used as
* reference. If not, the maximum weights of individual samplers are
* used, and selection probabilities fro the samplers are adjusted
* accordingly.
*/
bool theGlobalMaximumWeight;
/**
* True, if subprocesses should be selected flat. This is a debug
* flag, cross section information and distributions will not be
* correct.
*/
bool theFlatSubprocesses;
/**
* True, if we are generating events.
*/
bool isSampling;
/**
* A minimum selection probability for each sampler
*/
double theMinSelection;
/**
* True, if information for combining unnormalized runs should be
* printed out
*/
bool runCombinationData;
/**
* True, if we should perform an almost unweighted sampling
*/
bool theAlmostUnweighted;
/**
* Number of points which exceeded the maximum
*/
unsigned long maximumExceeds;
/**
* The average relative deviation from the maximum weight
*/
double maximumExceededBy;
/**
* The correct cross section as one would exspect with
* almostUnweighted.
*/
double correctWeights;
/**
* Enhancement factor to the maximum weight.
* This is to get less maximumExceeds.
*/
double theMaxEnhancement;
/**
* True, if grids have already been read.
*/
bool didReadGrids;
/**
* True, if parallel subprocess integration should be enabled
*/
bool theParallelIntegration;
/**
* The number of subprocesses to integrate per job
*/
unsigned int theIntegratePerJob;
/**
* The maximum number of integration jobs to be created
*/
unsigned int theIntegrationJobs;
/**
* The number of integration jobs which were actually created
*/
unsigned int theIntegrationJobsCreated;
/**
* Indicate that initialization is only reading a grid.
*/
bool justAfterIntegrate;
/**
* True, if grids should be written at the end of a run
*/
bool theWriteGridsOnFinish;
private:
/**
* The assignment operator is private and must never be called.
* In fact, it should not even be implemented.
*/
GeneralSampler & operator=(const GeneralSampler &);
};
}
#endif /* Herwig_GeneralSampler_H */
diff --git a/Sampling/MonacoSampler.cc b/Sampling/MonacoSampler.cc
--- a/Sampling/MonacoSampler.cc
+++ b/Sampling/MonacoSampler.cc
@@ -1,398 +1,399 @@
// -*- C++ -*-
//
// MonacoSampler.cc is a part of Herwig - A multi-purpose Monte Carlo event generator
// Copyright (C) 2002-2012 The Herwig Collaboration
//
// Herwig is licenced under version 2 of the GPL, see COPYING for details.
// Please respect the MCnet academic guidelines, see GUIDELINES for details.
//
//
// This is the implementation of the non-inlined, non-templated member
// functions of the MonacoSampler class.
//
#include "MonacoSampler.h"
#include "ThePEG/Interface/ClassDocumentation.h"
#include "ThePEG/EventRecord/Particle.h"
#include "ThePEG/Repository/UseRandom.h"
#include "ThePEG/Repository/EventGenerator.h"
#include "ThePEG/Utilities/DescribeClass.h"
#include "ThePEG/Repository/Repository.h"
#include "ThePEG/Interface/Parameter.h"
#include "ThePEG/Persistency/PersistentOStream.h"
#include "ThePEG/Persistency/PersistentIStream.h"
#include "ThePEG/Handlers/StandardEventHandler.h"
#include "ThePEG/Handlers/StandardXComb.h"
#include <boost/progress.hpp>
#include "MonacoSampler.h"
#include "Herwig/Sampling/GeneralSampler.h"
using namespace Herwig;
MonacoSampler::MonacoSampler()
: BinSampler(),
theAlpha(0.875),
theGridDivisions(48),
theIterationPoints(0) {}
MonacoSampler::~MonacoSampler() {}
IBPtr MonacoSampler::clone() const {
return new_ptr(*this);
}
IBPtr MonacoSampler::fullclone() const {
return new_ptr(*this);
}
double MonacoSampler::generate() {
double w = 1.;
// cout<<"\npoint: ";
std::valarray<int> upperb(dimension());
for ( int k = 0; k < dimension(); ++k ) {
double div = (1 - UseRandom::rnd()) * theGridDivisions;
upperb[k] = static_cast<int>(div);
double gupper, glower;
if ( upperb[k] <= 0 ) {
upperb[k] = 0;
glower = 0.;
gupper = theGrid(k,0);
} else if (upperb[k] >= static_cast<int>(theGridDivisions)) {
upperb[k] = theGridDivisions-1;
glower = theGrid(k,theGridDivisions-2);
gupper = theGrid(k,theGridDivisions-1);
} else {
glower = theGrid(k,upperb[k]-1);
gupper = theGrid(k,upperb[k]);
}
double gdiff = gupper - glower;
lastPoint()[k] = glower + (div-upperb[k])*gdiff;
w *= gdiff * theGridDivisions;
}
// cout<<lastPoint()[k]<<" ";
try {
w *= eventHandler()->dSigDR(lastPoint()) / nanobarn;
} catch (Veto&) {
w = 0.0;
} catch (...) {
throw;
}
// only store numbers
double wgt = w;
if ( isnan(wgt) || isinf(wgt) ) wgt = 0;
// save results for later grid optimization
theIterationPoints++;
for ( int k = 0; k < dimension(); ++k ) {
theGridData(k,upperb[k]) += wgt*wgt;
}
if (randomNumberString()!="")
for ( size_t k = 0; k < lastPoint().size(); ++k ) {
RandomNumberHistograms[RandomNumberIndex(id(),k)].first.book(lastPoint()[k],wgt);
RandomNumberHistograms[RandomNumberIndex(id(),k)].second+=wgt;
}
if ( !weighted() && initialized() ) {
- double p = min(abs(w),referenceWeight())/referenceWeight();
+ double p = min(abs(w),kappa()*referenceWeight())/(kappa()*referenceWeight());
double sign = w >= 0. ? 1. : -1.;
if ( p < 1 && UseRandom::rnd() > p )
w = 0.;
else
- w = sign*max(abs(w),referenceWeight());
+ w = sign*max(abs(w),kappa()*referenceWeight());
}
select(w);
+ assert(kappa()==1.||sampler()->almostUnweighted());
if ( w != 0.0 )
accept();
return w;
}
void MonacoSampler::saveGrid() const {
XML::Element grid = toXML();
grid.appendAttribute("process",id());
sampler()->grids().append(grid);
}
void MonacoSampler::initialize(bool progress) {
//read in grid
bool haveGrid = false;
list<XML::Element>::iterator git = sampler()->grids().children().begin();
for ( ; git != sampler()->grids().children().end(); ++git ) {
if ( git->type() != XML::ElementTypes::Element )
continue;
if ( git->name() != "Monaco" )
continue;
string proc;
git->getFromAttribute("process",proc);
if ( proc == id() ) {
haveGrid = true;
break;
}
}
if ( haveGrid ) {
fromXML(*git);
sampler()->grids().erase(git);
didReadGrids();
} else {
// flat grid
theGrid.resize(dimension(),theGridDivisions);
for (int k = 0; k < dimension(); k++)
for (size_t l = 0; l < theGridDivisions; l++)
theGrid(k,l) = (l+1)/static_cast<double>(theGridDivisions);
theGridData = boost::numeric::ublas::zero_matrix<double>(dimension(),theGridDivisions);
theIterationPoints = 0;
}
lastPoint().resize(dimension());
if (randomNumberString()!="")
for(size_t i=0;i<lastPoint().size();i++){
RandomNumberHistograms[RandomNumberIndex(id(),i)] = make_pair( RandomNumberHistogram(),0.);
}
if ( initialized() ) {
if ( !hasGrids() )
throw Exception() << "MonacoSampler: Require existing grid when starting to run.\n"
<< "Did you miss setting --setupfile?"
<< Exception::abortnow;
return;
}
if ( haveGrid ) {
if ( !integrated() ) {
runIteration(initialPoints(),progress);
adapt();
}
isInitialized();
return;
}
// if ( !sampler()->grids().children().empty() ) {
// nIterations(1);
// }
unsigned long points = initialPoints();
for ( unsigned long k = 0; k < nIterations(); ++k ) {
runIteration(points,progress);
if ( k < nIterations() - 1 ) {
points = (unsigned long)(points*enhancementFactor());
adapt();
nextIteration();
}
}
adapt();
didReadGrids();
isInitialized();
}
void MonacoSampler::adapt() {
int dim = dimension();
// refine grid
std::valarray<double> gridcumul(dim);
for (int k=0; k<dim; ++k) {
double gridold = theGridData(k,0);
double gridnew = theGridData(k,1);
theGridData(k,0) = (gridold + gridnew) / 2.0;
gridcumul[k] = theGridData(k,0);
for (size_t l=1; l<theGridDivisions-1; ++l) {
theGridData(k,l) = gridold + gridnew;
gridold = gridnew;
gridnew = theGridData(k,l+1);
theGridData(k,l) = (theGridData(k,l) + gridnew) / 3.0;
gridcumul[k] += theGridData(k,l);
}
theGridData(k,theGridDivisions-1) = (gridnew + gridold) / 2.0;
gridcumul[k] += theGridData(k,theGridDivisions-1);
}
for (int k=0; k<dim; ++k) {
double rc = 0.;
std::valarray<double> ri(theGridDivisions);
for (size_t l=0; l<theGridDivisions; ++l) {
ri[l] = 0.;
if ((theGridData(k,l) >= 0) && (gridcumul[k] != 0)) {
theGridData(k,l) = max( 1.0e-30, theGridData(k,l) );
double gpart = gridcumul[k] / theGridData(k,l);
ri[l] = pow( (gpart - 1.0) / (gpart * log( gpart )), theAlpha);
} else {
ri[l] = pow( 1. / log( 1e30 ), theAlpha);
}
rc += ri[l];
}
rc /= theGridDivisions;
double gridold = 0, gridnew = 0.;
double deltar = 0.;
unsigned int m = 0;
std::valarray<double> theGridRowNew(theGridDivisions);
for (size_t l = 0; l < theGridDivisions; ++l) {
deltar += ri[l];
gridold = gridnew;
gridnew = theGrid(k,l);
for (; deltar > rc; m++) {
deltar -= rc;
theGridRowNew[m] = gridnew - (gridnew - gridold) * deltar / ri[l];
}
}
for (size_t l = 0; l < theGridDivisions-1; ++l) {
theGrid(k,l) = theGridRowNew[l];
}
theGrid(k,theGridDivisions-1) = 1.0;
}
theGridData = boost::numeric::ublas::zero_matrix<double>(dimension(),theGridDivisions);
theIterationPoints = 0;
}
void MonacoSampler::finalize(bool) {
// save grid
adapt();
XML::Element grid = MonacoSampler::toXML();
grid.appendAttribute("process",id());
sampler()->grids().append(grid);
if (randomNumberString()!="")
for ( map<RandomNumberIndex,pair<RandomNumberHistogram,double> >::
const_iterator b = RandomNumberHistograms.begin();
b != RandomNumberHistograms.end(); ++b ) {
b->second.first.dump(randomNumberString(), b->first.first,shortprocess(),b->first.second);
}
}
void MonacoSampler::fromXML(const XML::Element& grid) {
int dim = 0;
grid.getFromAttribute("Dimension",dim);
if ( dim != dimension() ) {
throw std::runtime_error("[MonacoSampler] Number of dimensions in grid file does not match expectation.");
}
size_t griddivisions = 0;
grid.getFromAttribute("GridDivisions",griddivisions);
boost::numeric::ublas::matrix<double> tmpgrid(dim,griddivisions);
pair<multimap<pair<int,string>,list<XML::Element>::iterator>::const_iterator,multimap<pair<int,string>,list<XML::Element>::iterator>::const_iterator> cit;
cit = grid.findAll(XML::ElementTypes::Element,"GridVector");
if ( cit.first->second == grid.children().end() )
throw std::runtime_error("[MonacoSampler] Expected a GridVector element.");
for (multimap<pair<int,string>,list<XML::Element>::iterator>::const_iterator iit=cit.first; iit!=cit.second; ++iit) {
const XML::Element& gridvector = *iit->second;
int k = 0;
gridvector.getFromAttribute("Index",k);
if ( k >= dim ) {
throw std::runtime_error("[MonacoSampler] Index of grid dimension larger than grid size.");
} else {
list<XML::Element>::const_iterator git;
git = gridvector.findFirst(XML::ElementTypes::ParsedCharacterData,"");
if ( git == gridvector.children().end() )
throw std::runtime_error("[MonacoSampler] Expected grid data.");
istringstream bdata(git->content());
for ( size_t l = 0; l < griddivisions; ++l ) {
bdata >> tmpgrid(k,l);
}
}
}
// store back into main variable
// if griddivisions do not match, rebin preserving bin density
theGrid.resize(dim,theGridDivisions);
theIterationPoints = 0;
double divratio = griddivisions / static_cast<double>(theGridDivisions);
for (int k = 0; k < dim; k++) {
double xold = 0, xnew = 0, deltar = 0;
size_t l = 0;
for (size_t m = 0; m < griddivisions; m++) {
deltar += 1;
xold = xnew;
xnew = tmpgrid(k,m);
for (; deltar > divratio; l++) {
deltar -= divratio;
theGrid(k,l) = xnew - (xnew - xold) * deltar;
}
}
theGrid(k,theGridDivisions-1) = 1.0;
}
theGridData = boost::numeric::ublas::zero_matrix<double>(dimension(),theGridDivisions);
}
XML::Element MonacoSampler::toXML() const {
XML::Element grid(XML::ElementTypes::Element,"Monaco");
grid.appendAttribute("Dimension",dimension());
grid.appendAttribute("GridDivisions",theGridDivisions);
for ( int k = 0; k < dimension(); ++k ) {
XML::Element gridvector(XML::ElementTypes::Element,"GridVector");
gridvector.appendAttribute("Index",k);
ostringstream bdata;
bdata << setprecision(17);
for ( size_t l = 0; l < theGridDivisions; ++l )
bdata << theGrid(k,l) << " ";
XML::Element belem(XML::ElementTypes::ParsedCharacterData,bdata.str());
gridvector.append(belem);
grid.append(gridvector);
}
return grid;
}
// If needed, insert default implementations of virtual function defined
// in the InterfacedBase class here (using ThePEG-interfaced-impl in Emacs).
void MonacoSampler::persistentOutput(PersistentOStream & os) const {
BinSampler::put(os);
os << theAlpha << theGridDivisions;
}
void MonacoSampler::persistentInput(PersistentIStream & is, int) {
BinSampler::get(is);
is >> theAlpha >> theGridDivisions;
}
// *** Attention *** The following static variable is needed for the type
// description system in ThePEG. Please check that the template arguments
// are correct (the class and its base class), and that the constructor
// arguments are correct (the class name and the name of the dynamically
// loadable library where the class implementation can be found).
DescribeClass<MonacoSampler,BinSampler>
describeHerwigMonacoSampler("Herwig::MonacoSampler", "HwSampling.so");
void MonacoSampler::Init() {
static ClassDocumentation<MonacoSampler> documentation
("MonacoSampler samples XCombs bins. This implementation performs weighted MC integration using Monaco, an adapted Vegas algorithm.");
static Parameter<MonacoSampler,double> interfaceAlpha
("Alpha",
"Rate of grid modification (0 for no modification).",
&MonacoSampler::theAlpha, 0.875, 0.0, 0,
false, false, Interface::lowerlim);
static Parameter<MonacoSampler,size_t> interfaceGridDivisions
("GridDivisions",
"The number of divisions per grid dimension.",
&MonacoSampler::theGridDivisions, 48, 1, 0,
false, false, Interface::lowerlim);
}
diff --git a/Shower/Base/Evolver.cc b/Shower/Base/Evolver.cc
deleted file mode 100644
--- a/Shower/Base/Evolver.cc
+++ /dev/null
@@ -1,3713 +0,0 @@
-// -*- C++ -*-
-//
-// Evolver.cc is a part of Herwig - A multi-purpose Monte Carlo event generator
-// Copyright (C) 2002-2011 The Herwig Collaboration
-//
-// Herwig is licenced under version 2 of the GPL, see COPYING for details.
-// Please respect the MCnet academic guidelines, see GUIDELINES for details.
-//
-//
-// This is the implementation of the non-inlined, non-templated member
-// functions of the Evolver class.
-//
-#include "Evolver.h"
-#include "ThePEG/Interface/ClassDocumentation.h"
-#include "ThePEG/Interface/Reference.h"
-#include "ThePEG/Interface/RefVector.h"
-#include "ThePEG/Interface/Switch.h"
-#include "ThePEG/Interface/Parameter.h"
-#include "ThePEG/Persistency/PersistentOStream.h"
-#include "ThePEG/Persistency/PersistentIStream.h"
-#include "Herwig/Shower/Base/ShowerParticle.h"
-#include "ThePEG/Utilities/EnumIO.h"
-#include "ShowerKinematics.h"
-#include "ThePEG/PDT/EnumParticles.h"
-#include "ThePEG/Repository/EventGenerator.h"
-#include "ThePEG/Handlers/EventHandler.h"
-#include "ThePEG/Handlers/StandardEventHandler.h"
-#include "ThePEG/Utilities/Throw.h"
-#include "ShowerTree.h"
-#include "ShowerProgenitor.h"
-#include "KinematicsReconstructor.h"
-#include "PartnerFinder.h"
-#include "ThePEG/Handlers/StandardXComb.h"
-#include "ThePEG/PDT/DecayMode.h"
-#include "Herwig/Shower/ShowerHandler.h"
-#include "ThePEG/Utilities/DescribeClass.h"
-#include "ShowerVertex.h"
-#include "ThePEG/Repository/CurrentGenerator.h"
-#include "Herwig/MatrixElement/Matchbox/Base/SubtractedME.h"
-#include "Herwig/MatrixElement/Matchbox/MatchboxFactory.h"
-#include "ThePEG/Handlers/StandardXComb.h"
-
-using namespace Herwig;
-
-namespace {
-
- /**
- * A struct to order the particles in the same way as in the DecayMode's
- */
- struct ParticleOrdering {
- /**
- * Operator for the ordering
- * @param p1 The first ParticleData object
- * @param p2 The second ParticleData object
- */
- bool operator() (tcPDPtr p1, tcPDPtr p2) {
- return abs(p1->id()) > abs(p2->id()) ||
- ( abs(p1->id()) == abs(p2->id()) && p1->id() > p2->id() ) ||
- ( p1->id() == p2->id() && p1->fullName() > p2->fullName() );
- }
- };
- typedef multiset<tcPDPtr,ParticleOrdering> OrderedParticles;
-
- /**
- * Cached lookup of decay modes.
- * Generator::findDecayMode() is not efficient.
- */
- tDMPtr findDecayMode(const string & tag) {
- static map<string,DMPtr> cache;
- map<string,DMPtr>::const_iterator pos = cache.find(tag);
-
- if ( pos != cache.end() )
- return pos->second;
-
- tDMPtr dm = CurrentGenerator::current().findDecayMode(tag);
- cache[tag] = dm;
- return dm;
- }
-}
-
-DescribeClass<Evolver,Interfaced>
-describeEvolver ("Herwig::Evolver","HwShower.so");
-
-bool Evolver::_hardEmissionModeWarn = true;
-bool Evolver::_missingTruncWarn = true;
-
-IBPtr Evolver::clone() const {
- return new_ptr(*this);
-}
-
-IBPtr Evolver::fullclone() const {
- return new_ptr(*this);
-}
-
-void Evolver::persistentOutput(PersistentOStream & os) const {
- os << _model << _splittingGenerator << _maxtry
- << _meCorrMode << _hardVetoMode << _hardVetoRead << _hardVetoReadOption
- << _limitEmissions << _spinOpt << _softOpt << _hardPOWHEG
- << ounit(_iptrms,GeV) << _beta << ounit(_gamma,GeV) << ounit(_iptmax,GeV)
- << _vetoes << _fullShowerVetoes << _nReWeight << _reWeight
- << _trunc_Mode << _hardEmissionMode << _reconOpt
- << isMCatNLOSEvent << isMCatNLOHEvent
- << isPowhegSEvent << isPowhegHEvent
- << theFactorizationScaleFactor << theRenormalizationScaleFactor << ounit(muPt,GeV)
- << interaction_ << _maxTryFSR << _maxFailFSR << _fracFSR << interactions_.size();
- for(unsigned int ix=0;ix<interactions_.size();++ix)
- os << oenum(interactions_[ix]);
-}
-
-void Evolver::persistentInput(PersistentIStream & is, int) {
- unsigned int isize;
- is >> _model >> _splittingGenerator >> _maxtry
- >> _meCorrMode >> _hardVetoMode >> _hardVetoRead >> _hardVetoReadOption
- >> _limitEmissions >> _spinOpt >> _softOpt >> _hardPOWHEG
- >> iunit(_iptrms,GeV) >> _beta >> iunit(_gamma,GeV) >> iunit(_iptmax,GeV)
- >> _vetoes >> _fullShowerVetoes >> _nReWeight >> _reWeight
- >> _trunc_Mode >> _hardEmissionMode >> _reconOpt
- >> isMCatNLOSEvent >> isMCatNLOHEvent
- >> isPowhegSEvent >> isPowhegHEvent
- >> theFactorizationScaleFactor >> theRenormalizationScaleFactor >> iunit(muPt,GeV)
- >> interaction_ >> _maxTryFSR >> _maxFailFSR >> _fracFSR >> isize;
- interactions_.resize(isize);
- for(unsigned int ix=0;ix<interactions_.size();++ix)
- is >> ienum(interactions_[ix]);
-}
-
-void Evolver::doinit() {
- Interfaced::doinit();
- // interactions may have been changed through a setup file so we
- // clear it up here
- interactions_.clear();
- if(interaction_==0) {
- interactions_.push_back(ShowerInteraction::QCD);
- interactions_.push_back(ShowerInteraction::QED);
- }
- else if(interaction_==1) {
- interactions_.push_back(ShowerInteraction::QCD);
- }
- else if(interaction_==2) {
- interactions_.push_back(ShowerInteraction::QED);
- interactions_.push_back(ShowerInteraction::QCD);
- }
- else if(interaction_==3) {
- interactions_.push_back(ShowerInteraction::QED);
- }
- else if(interaction_==4) {
- interactions_.push_back(ShowerInteraction::Both);
- }
- // calculate max no of FSR vetos
- _maxFailFSR = max(int(_maxFailFSR), int(_fracFSR*double(generator()->N())));
- // check on the reweighting
- for(unsigned int ix=0;ix<_fullShowerVetoes.size();++ix) {
- if(_fullShowerVetoes[ix]->behaviour()==1) {
- _reWeight = true;
- break;
- }
- }
- if(_reWeight && maximumTries()<_nReWeight) {
- throw Exception() << "Reweight being performed in the shower but the number of attempts for the"
- << "shower is less than that for the reweighting.\n"
- << "Maximum number of attempt for the shower "
- << fullName() << ":MaxTry is " << maximumTries() << "\nand for reweighting is "
- << fullName() << ":NReWeight is " << _nReWeight << "\n"
- << "we recommend the number of attempts is 10 times the number for reweighting\n"
- << Exception::runerror;
- }
-}
-
-void Evolver::Init() {
-
- static ClassDocumentation<Evolver> documentation
- ("This class is responsible for carrying out the showering,",
- "including the kinematics reconstruction, in a given scale range,"
- "including the option of the POWHEG approach to simulated next-to-leading order"
- " radiation\\cite{Nason:2004rx}.",
- "%\\cite{Nason:2004rx}\n"
- "\\bibitem{Nason:2004rx}\n"
- " P.~Nason,\n"
- " ``A new method for combining NLO QCD with shower Monte Carlo algorithms,''\n"
- " JHEP {\\bf 0411} (2004) 040\n"
- " [arXiv:hep-ph/0409146].\n"
- " %%CITATION = JHEPA,0411,040;%%\n");
-
- static Reference<Evolver,SplittingGenerator>
- interfaceSplitGen("SplittingGenerator",
- "A reference to the SplittingGenerator object",
- &Herwig::Evolver::_splittingGenerator,
- false, false, true, false);
-
- static Reference<Evolver,ShowerModel> interfaceShowerModel
- ("ShowerModel",
- "The pointer to the object which defines the shower evolution model.",
- &Evolver::_model, false, false, true, false, false);
-
- static Parameter<Evolver,unsigned int> interfaceMaxTry
- ("MaxTry",
- "The maximum number of attempts to generate the shower from a"
- " particular ShowerTree",
- &Evolver::_maxtry, 100, 1, 100000,
- false, false, Interface::limited);
-
- static Parameter<Evolver,unsigned int> interfaceNReWeight
- ("NReWeight",
- "The number of attempts for the shower when reweighting",
- &Evolver::_nReWeight, 100, 10, 10000,
- false, false, Interface::limited);
-
- static Switch<Evolver, unsigned int> ifaceMECorrMode
- ("MECorrMode",
- "Choice of the ME Correction Mode",
- &Evolver::_meCorrMode, 1, false, false);
- static SwitchOption off
- (ifaceMECorrMode,"No","MECorrections off", 0);
- static SwitchOption on
- (ifaceMECorrMode,"Yes","hard+soft on", 1);
- static SwitchOption hard
- (ifaceMECorrMode,"Hard","only hard on", 2);
- static SwitchOption soft
- (ifaceMECorrMode,"Soft","only soft on", 3);
-
- static Switch<Evolver, unsigned int> ifaceHardVetoMode
- ("HardVetoMode",
- "Choice of the Hard Veto Mode",
- &Evolver::_hardVetoMode, 1, false, false);
- static SwitchOption HVoff
- (ifaceHardVetoMode,"No","hard vetos off", 0);
- static SwitchOption HVon
- (ifaceHardVetoMode,"Yes","hard vetos on", 1);
- static SwitchOption HVIS
- (ifaceHardVetoMode,"Initial", "only IS emissions vetoed", 2);
- static SwitchOption HVFS
- (ifaceHardVetoMode,"Final","only FS emissions vetoed", 3);
-
- static Switch<Evolver, unsigned int> ifaceHardVetoRead
- ("HardVetoScaleSource",
- "If hard veto scale is to be read",
- &Evolver::_hardVetoRead, 0, false, false);
- static SwitchOption HVRcalc
- (ifaceHardVetoRead,"Calculate","Calculate from hard process", 0);
- static SwitchOption HVRread
- (ifaceHardVetoRead,"Read","Read from XComb->lastScale", 1);
-
- static Switch<Evolver, bool> ifaceHardVetoReadOption
- ("HardVetoReadOption",
- "Apply read-in scale veto to all collisions or just the primary one?",
- &Evolver::_hardVetoReadOption, false, false, false);
- static SwitchOption AllCollisions
- (ifaceHardVetoReadOption,
- "AllCollisions",
- "Read-in pT veto applied to primary and secondary collisions.",
- false);
- static SwitchOption PrimaryCollision
- (ifaceHardVetoReadOption,
- "PrimaryCollision",
- "Read-in pT veto applied to primary but not secondary collisions.",
- true);
-
- static Parameter<Evolver, Energy> ifaceiptrms
- ("IntrinsicPtGaussian",
- "RMS of intrinsic pT of Gaussian distribution:\n"
- "2*(1-Beta)*exp(-sqr(intrinsicpT/RMS))/sqr(RMS)",
- &Evolver::_iptrms, GeV, ZERO, ZERO, 1000000.0*GeV,
- false, false, Interface::limited);
-
- static Parameter<Evolver, double> ifacebeta
- ("IntrinsicPtBeta",
- "Proportion of inverse quadratic distribution in generating intrinsic pT.\n"
- "(1-Beta) is the proportion of Gaussian distribution",
- &Evolver::_beta, 0, 0, 1,
- false, false, Interface::limited);
-
- static Parameter<Evolver, Energy> ifacegamma
- ("IntrinsicPtGamma",
- "Parameter for inverse quadratic:\n"
- "2*Beta*Gamma/(sqr(Gamma)+sqr(intrinsicpT))",
- &Evolver::_gamma,GeV, ZERO, ZERO, 100000.0*GeV,
- false, false, Interface::limited);
-
- static Parameter<Evolver, Energy> ifaceiptmax
- ("IntrinsicPtIptmax",
- "Upper bound on intrinsic pT for inverse quadratic",
- &Evolver::_iptmax,GeV, ZERO, ZERO, 100000.0*GeV,
- false, false, Interface::limited);
-
- static RefVector<Evolver,ShowerVeto> ifaceVetoes
- ("Vetoes",
- "The vetoes to be checked during showering",
- &Evolver::_vetoes, -1,
- false,false,true,true,false);
-
- static RefVector<Evolver,FullShowerVeto> interfaceFullShowerVetoes
- ("FullShowerVetoes",
- "The vetos to be appliede on the full final state of the shower",
- &Evolver::_fullShowerVetoes, -1, false, false, true, false, false);
-
- static Switch<Evolver,unsigned int> interfaceLimitEmissions
- ("LimitEmissions",
- "Limit the number and type of emissions for testing",
- &Evolver::_limitEmissions, 0, false, false);
- static SwitchOption interfaceLimitEmissionsNoLimit
- (interfaceLimitEmissions,
- "NoLimit",
- "Allow an arbitrary number of emissions",
- 0);
- static SwitchOption interfaceLimitEmissionsOneInitialStateEmission
- (interfaceLimitEmissions,
- "OneInitialStateEmission",
- "Allow one emission in the initial state and none in the final state",
- 1);
- static SwitchOption interfaceLimitEmissionsOneFinalStateEmission
- (interfaceLimitEmissions,
- "OneFinalStateEmission",
- "Allow one emission in the final state and none in the initial state",
- 2);
- static SwitchOption interfaceLimitEmissionsHardOnly
- (interfaceLimitEmissions,
- "HardOnly",
- "Only allow radiation from the hard ME correction",
- 3);
- static SwitchOption interfaceLimitEmissionsOneEmission
- (interfaceLimitEmissions,
- "OneEmission",
- "Allow one emission in either the final state or initial state, but not both",
- 4);
-
- static Switch<Evolver,bool> interfaceTruncMode
- ("TruncatedShower", "Include the truncated shower?",
- &Evolver::_trunc_Mode, 1, false, false);
- static SwitchOption interfaceTruncMode0
- (interfaceTruncMode,"No","Truncated Shower is OFF", 0);
- static SwitchOption interfaceTruncMode1
- (interfaceTruncMode,"Yes","Truncated Shower is ON", 1);
-
- static Switch<Evolver,int> interfaceHardEmissionMode
- ("HardEmissionMode",
- "Whether to use ME corrections or POWHEG for the hardest emission",
- &Evolver::_hardEmissionMode, 0, false, false);
- static SwitchOption interfaceHardEmissionModeDecayMECorrection
- (interfaceHardEmissionMode,
- "DecayMECorrection",
- "Old fashioned ME correction for decays only",
- -1);
- static SwitchOption interfaceHardEmissionModeMECorrection
- (interfaceHardEmissionMode,
- "MECorrection",
- "Old fashioned ME correction",
- 0);
- static SwitchOption interfaceHardEmissionModePOWHEG
- (interfaceHardEmissionMode,
- "POWHEG",
- "Powheg style hard emission using internal matrix elements",
- 1);
- static SwitchOption interfaceHardEmissionModeMatchboxPOWHEG
- (interfaceHardEmissionMode,
- "MatchboxPOWHEG",
- "Powheg style emission for the hard process using Matchbox",
- 2);
- static SwitchOption interfaceHardEmissionModeFullPOWHEG
- (interfaceHardEmissionMode,
- "FullPOWHEG",
- "Powheg style emission for the hard process using Matchbox "
- "and decays using internal matrix elements",
- 3);
-
- static Switch<Evolver,unsigned int > interfaceInteractions
- ("Interactions",
- "The interactions to be used in the shower",
- &Evolver::interaction_, 1, false, false);
- static SwitchOption interfaceInteractionsQCDFirst
- (interfaceInteractions,
- "QCDFirst",
- "QCD first then QED",
- 0);
- static SwitchOption interfaceInteractionsQCDOnly
- (interfaceInteractions,
- "QCDOnly",
- "Only QCD",
- 1);
- static SwitchOption interfaceInteractionsQEDFirst
- (interfaceInteractions,
- "QEDFirst",
- "QED first then QCD",
- 2);
- static SwitchOption interfaceInteractionsQEDOnly
- (interfaceInteractions,
- "QEDOnly",
- "Only QED",
- 3);
- static SwitchOption interfaceInteractionsBothAtOnce
- (interfaceInteractions,
- "BothAtOnce",
- "Generate both at the same time",
- 4);
-
- static Switch<Evolver,unsigned int> interfaceReconstructionOption
- ("ReconstructionOption",
- "Treatment of the reconstruction of the transverse momentum of "
- "a branching from the evolution scale.",
- &Evolver::_reconOpt, 0, false, false);
- static SwitchOption interfaceReconstructionOptionCutOff
- (interfaceReconstructionOption,
- "CutOff",
- "Use the cut-off masses in the calculation",
- 0);
- static SwitchOption interfaceReconstructionOptionOffShell
- (interfaceReconstructionOption,
- "OffShell",
- "Use the off-shell masses in the calculation veto the emission of the parent,"
- " no veto in generation of emissions from children",
- 1);
- static SwitchOption interfaceReconstructionOptionOffShell2
- (interfaceReconstructionOption,
- "OffShell2",
- "Use the off-shell masses in the calculation veto the emissions from the children."
- " no veto in generation of emissions from children",
- 2);
- static SwitchOption interfaceReconstructionOptionOffShell3
- (interfaceReconstructionOption,
- "OffShell3",
- "Use the off-shell masses in the calculation veto the emissions from the children."
- " veto in generation of emissions from children using cut-off for second parton",
- 3);
- static SwitchOption interfaceReconstructionOptionOffShell4
- (interfaceReconstructionOption,
- "OffShell4",
- "Ass OffShell3 but with a restriction on the mass of final-state"
- " jets produced via backward evolution.",
- 4);
-
- static Switch<Evolver,unsigned int> interfaceSpinCorrelations
- ("SpinCorrelations",
- "Treatment of spin correlations in the parton shower",
- &Evolver::_spinOpt, 1, false, false);
- static SwitchOption interfaceSpinCorrelationsOff
- (interfaceSpinCorrelations,
- "No",
- "No spin correlations",
- 0);
- static SwitchOption interfaceSpinCorrelationsSpin
- (interfaceSpinCorrelations,
- "Yes",
- "Include the azimuthal spin correlations only",
- 1);
-
- static Switch<Evolver,unsigned int> interfaceSoftCorrelations
- ("SoftCorrelations",
- "Option for the treatment of soft correlations in the parton shower",
- &Evolver::_softOpt, 2, false, false);
- static SwitchOption interfaceSoftCorrelationsNone
- (interfaceSoftCorrelations,
- "No",
- "No soft correlations",
- 0);
- static SwitchOption interfaceSoftCorrelationsFull
- (interfaceSoftCorrelations,
- "Full",
- "Use the full eikonal",
- 1);
- static SwitchOption interfaceSoftCorrelationsSingular
- (interfaceSoftCorrelations,
- "Singular",
- "Use original Webber-Marchisini form",
- 2);
-
- static Switch<Evolver,bool> interfaceHardPOWHEG
- ("HardPOWHEG",
- "Treatment of powheg emissions which are too hard to have a shower interpretation",
- &Evolver::_hardPOWHEG, false, false, false);
- static SwitchOption interfaceHardPOWHEGAsShower
- (interfaceHardPOWHEG,
- "AsShower",
- "Still interpret as shower emissions",
- false);
- static SwitchOption interfaceHardPOWHEGRealEmission
- (interfaceHardPOWHEG,
- "RealEmission",
- "Generate shower from the real emmission configuration",
- true);
-
- static Parameter<Evolver,unsigned int> interfaceMaxTryFSR
- ("MaxTryFSR",
- "The maximum number of attempted FSR emissions in"
- " the generation of the FSR",
- &Evolver::_maxTryFSR, 100000, 10, 100000000,
- false, false, Interface::limited);
-
- static Parameter<Evolver,unsigned int> interfaceMaxFailFSR
- ("MaxFailFSR",
- "Maximum number of failures generating the FSR",
- &Evolver::_maxFailFSR, 100, 1, 100000000,
- false, false, Interface::limited);
-
-
- static Parameter<Evolver,double> interfaceFSRFailureFraction
- ("FSRFailureFraction",
- "Maximum fraction of events allowed to fail due to too many FSR emissions",
- &Evolver::_fracFSR, 0.001, 1e-10, 1,
- false, false, Interface::limited);
-}
-
-void Evolver::generateIntrinsicpT(vector<ShowerProgenitorPtr> particlesToShower) {
- _intrinsic.clear();
- if ( !ipTon() || !isISRadiationON() ) return;
- // don't do anything for the moment for secondary scatters
- if( !ShowerHandler::currentHandler()->firstInteraction() ) return;
- // generate intrinsic pT
- for(unsigned int ix=0;ix<particlesToShower.size();++ix) {
- // only consider initial-state particles
- if(particlesToShower[ix]->progenitor()->isFinalState()) continue;
- if(!particlesToShower[ix]->progenitor()->dataPtr()->coloured()) continue;
- Energy ipt;
- if(UseRandom::rnd() > _beta) {
- ipt=_iptrms*sqrt(-log(UseRandom::rnd()));
- }
- else {
- ipt=_gamma*sqrt(pow(1.+sqr(_iptmax/_gamma), UseRandom::rnd())-1.);
- }
- pair<Energy,double> pt = make_pair(ipt,UseRandom::rnd(Constants::twopi));
- _intrinsic[particlesToShower[ix]] = pt;
- }
-}
-
-void Evolver::setupMaximumScales(const vector<ShowerProgenitorPtr> & p,
- XCPtr xcomb) {
- // let POWHEG events radiate freely
- if(_hardEmissionMode>0&&hardTree()) {
- vector<ShowerProgenitorPtr>::const_iterator ckt = p.begin();
- for (; ckt != p.end(); ckt++) (*ckt)->maxHardPt(Constants::MaxEnergy);
- return;
- }
- // return if no vetos
- if (!hardVetoOn()) return;
- // find out if hard partonic subprocess.
- bool isPartonic(false);
- map<ShowerProgenitorPtr,ShowerParticlePtr>::const_iterator
- cit = _currenttree->incomingLines().begin();
- Lorentz5Momentum pcm;
- for(; cit!=currentTree()->incomingLines().end(); ++cit) {
- pcm += cit->first->progenitor()->momentum();
- isPartonic |= cit->first->progenitor()->coloured();
- }
- // find minimum pt from hard process, the maximum pt from all outgoing
- // coloured lines (this is simpler and more general than
- // 2stu/(s^2+t^2+u^2)). Maximum scale for scattering processes will
- // be transverse mass.
- Energy ptmax = generator()->maximumCMEnergy();
- // general case calculate the scale
- if (!hardVetoXComb()||
- (hardVetoReadOption()&&
- !ShowerHandler::currentHandler()->firstInteraction())) {
- // scattering process
- if(currentTree()->isHard()) {
- assert(xcomb);
- // coloured incoming particles
- if (isPartonic) {
- map<ShowerProgenitorPtr,tShowerParticlePtr>::const_iterator
- cjt = currentTree()->outgoingLines().begin();
- for(; cjt!=currentTree()->outgoingLines().end(); ++cjt) {
- if (cjt->first->progenitor()->coloured())
- ptmax = min(ptmax,cjt->first->progenitor()->momentum().mt());
- }
- }
- if (ptmax == generator()->maximumCMEnergy() ) ptmax = pcm.m();
- if(hardVetoXComb()&&hardVetoReadOption()&&
- !ShowerHandler::currentHandler()->firstInteraction()) {
- ptmax=min(ptmax,sqrt(xcomb->lastShowerScale()));
- }
- }
- // decay, incoming() is the decaying particle.
- else {
- ptmax = currentTree()->incomingLines().begin()->first
- ->progenitor()->momentum().mass();
- }
- }
- // hepeup.SCALUP is written into the lastXComb by the
- // LesHouchesReader itself - use this by user's choice.
- // Can be more general than this.
- else {
- if(currentTree()->isHard()) {
- assert(xcomb);
- ptmax = sqrt( xcomb->lastShowerScale() );
- }
- else {
- ptmax = currentTree()->incomingLines().begin()->first
- ->progenitor()->momentum().mass();
- }
- }
- ptmax *= ShowerHandler::currentHandler()->hardScaleFactor();
- // set maxHardPt for all progenitors. For partonic processes this
- // is now the max pt in the FS, for non-partonic processes or
- // processes with no coloured FS the invariant mass of the IS
- vector<ShowerProgenitorPtr>::const_iterator ckt = p.begin();
- for (; ckt != p.end(); ckt++) (*ckt)->maxHardPt(ptmax);
-}
-
-void Evolver::setupHardScales(const vector<ShowerProgenitorPtr> & p,
- XCPtr xcomb) {
- if ( hardVetoXComb() &&
- (!hardVetoReadOption() ||
- ShowerHandler::currentHandler()->firstInteraction()) ) {
- Energy hardScale = ZERO;
- if(currentTree()->isHard()) {
- assert(xcomb);
- hardScale = sqrt( xcomb->lastShowerScale() );
- }
- else {
- hardScale = currentTree()->incomingLines().begin()->first
- ->progenitor()->momentum().mass();
- }
- hardScale *= ShowerHandler::currentHandler()->hardScaleFactor();
- vector<ShowerProgenitorPtr>::const_iterator ckt = p.begin();
- for (; ckt != p.end(); ckt++) (*ckt)->hardScale(hardScale);
- muPt = hardScale;
- }
-}
-
-void Evolver::showerHardProcess(ShowerTreePtr hard, XCPtr xcomb) {
-
-
- isMCatNLOSEvent = false;
- isMCatNLOHEvent = false;
- isPowhegSEvent = false;
- isPowhegHEvent = false;
-
- Ptr<SubtractedME>::tptr subme;
- Ptr<MatchboxMEBase>::tptr me;
- Ptr<SubtractionDipole>::tptr dipme;
-
- Ptr<StandardXComb>::ptr sxc = dynamic_ptr_cast<Ptr<StandardXComb>::ptr>(xcomb);
-
- if ( sxc ) {
- subme = dynamic_ptr_cast<Ptr<SubtractedME>::tptr>(sxc->matrixElement());
- me = dynamic_ptr_cast<Ptr<MatchboxMEBase>::tptr>(sxc->matrixElement());
- dipme = dynamic_ptr_cast<Ptr<SubtractionDipole>::tptr>(sxc->matrixElement());
- }
-
- if ( subme ) {
- if ( subme->showerApproximation() ) {
- theShowerApproximation = subme->showerApproximation();
- // separate MCatNLO and POWHEG-type corrections
- if ( !subme->showerApproximation()->needsSplittingGenerator() ) {
- if ( subme->realShowerSubtraction() )
- isMCatNLOHEvent = true;
- else if ( subme->virtualShowerSubtraction() )
- isMCatNLOSEvent = true;
- }
- else {
- if ( subme->realShowerSubtraction() )
- isPowhegHEvent = true;
- else if ( subme->virtualShowerSubtraction() || subme->loopSimSubtraction() )
- isPowhegSEvent = true;
- }
- }
- } else if ( me ) {
- if ( me->factory()->showerApproximation() ) {
- theShowerApproximation = me->factory()->showerApproximation();
- if ( !me->factory()->showerApproximation()->needsSplittingGenerator() )
- isMCatNLOSEvent = true;
- else
- isPowhegSEvent = true;
- }
- }
-
- string error = "Inconsistent hard emission set-up in Evolver::showerHardProcess(). ";
- if ( ( isMCatNLOSEvent || isMCatNLOHEvent ) ){
- if (_hardEmissionMode > 1)
- throw Exception() << error
- << "Cannot generate POWHEG matching with MC@NLO shower "
- << "approximation. Add 'set Evolver:HardEmissionMode 0' to input file."
- << Exception::runerror;
- if ( ShowerHandler::currentHandler()->canHandleMatchboxTrunc())
- throw Exception() << error
- << "Cannot use truncated qtilde shower with MC@NLO shower "
- << "approximation. Set LHCGenerator:EventHandler"
- << ":CascadeHandler to '/Herwig/Shower/ShowerHandler' or "
- << "'/Herwig/DipoleShower/DipoleShowerHandler'."
- << Exception::runerror;
- }
- else if ( ((isPowhegSEvent || isPowhegHEvent) || dipme) &&
- _hardEmissionMode < 2){
- if ( ShowerHandler::currentHandler()->canHandleMatchboxTrunc())
- throw Exception() << error
- << "Unmatched events requested for POWHEG shower "
- << "approximation. Set Evolver:HardEmissionMode to "
- << "'MatchboxPOWHEG' or 'FullPOWHEG'."
- << Exception::runerror;
- else if (_hardEmissionModeWarn){
- _hardEmissionModeWarn = false;
- _hardEmissionMode+=2;
- throw Exception() << error
- << "Unmatched events requested for POWHEG shower "
- << "approximation. Changing Evolver:HardEmissionMode from "
- << _hardEmissionMode-2 << " to " << _hardEmissionMode
- << Exception::warning;
- }
- }
-
- if ( isPowhegSEvent || isPowhegHEvent) {
- if (theShowerApproximation->needsTruncatedShower() &&
- !ShowerHandler::currentHandler()->canHandleMatchboxTrunc() )
- throw Exception() << error
- << "Current shower handler cannot generate truncated shower. "
- << "Set Generator:EventHandler:CascadeHandler to "
- << "'/Herwig/Shower/PowhegShowerHandler'."
- << Exception::runerror;
- }
- else if ( dipme && _missingTruncWarn){
- _missingTruncWarn=false;
- throw Exception() << "Warning: POWHEG shower approximation used without "
- << "truncated shower. Set Generator:EventHandler:"
- << "CascadeHandler to '/Herwig/Shower/PowhegShowerHandler' and "
- << "'MEMatching:TruncatedShower Yes'."
- << Exception::warning;
- }
- else if ( !dipme && _hardEmissionMode > 1 &&
- ShowerHandler::currentHandler()->firstInteraction())
- throw Exception() << error
- << "POWHEG matching requested for LO events. Include "
- << "'set Factory:ShowerApproximation MEMatching' in input file."
- << Exception::runerror;
-
- _hardme = HwMEBasePtr();
- // extract the matrix element
- tStdXCombPtr lastXC = dynamic_ptr_cast<tStdXCombPtr>(xcomb);
- if(lastXC) {
- _hardme = dynamic_ptr_cast<HwMEBasePtr>(lastXC->matrixElement());
- }
- _decayme = HwDecayerBasePtr();
- // set the current tree
- currentTree(hard);
- hardTree(HardTreePtr());
- // number of attempts if more than one interaction switched on
- unsigned int interactionTry=0;
- do {
- try {
- // generate the showering
- doShowering(true,xcomb);
- // if no vetos return
- return;
- }
- catch (InteractionVeto) {
- currentTree()->clear();
- ++interactionTry;
- }
- }
- while(interactionTry<=5);
- throw Exception() << "Too many tries for shower in "
- << "Evolver::showerHardProcess()"
- << Exception::eventerror;
-}
-
-void Evolver::hardMatrixElementCorrection(bool hard) {
- // set the initial enhancement factors for the soft correction
- _initialenhance = 1.;
- _finalenhance = 1.;
- // if hard matrix element switched off return
- if(!MECOn(hard)) return;
- // see if we can get the correction from the matrix element
- // or decayer
- if(hard) {
- if(_hardme&&_hardme->hasMECorrection()) {
- _hardme->initializeMECorrection(_currenttree,
- _initialenhance,_finalenhance);
- if(hardMEC(hard))
- _hardme->applyHardMatrixElementCorrection(_currenttree);
- }
- }
- else {
- if(_decayme&&_decayme->hasMECorrection()) {
- _decayme->initializeMECorrection(_currenttree,
- _initialenhance,_finalenhance);
- if(hardMEC(hard))
- _decayme->applyHardMatrixElementCorrection(_currenttree);
- }
- }
-}
-
-ShowerParticleVector Evolver::createTimeLikeChildren(tShowerParticlePtr particle, IdList ids) {
- // Create the ShowerParticle objects for the two children of
- // the emitting particle; set the parent/child relationship
- // if same as definition create particles, otherwise create cc
- tcPDPtr pdata[2];
- for(unsigned int ix=0;ix<2;++ix) pdata[ix]=getParticleData(ids[ix+1]);
- if(particle->id()!=ids[0]) {
- for(unsigned int ix=0;ix<2;++ix) {
- tPDPtr cc(pdata[ix]->CC());
- if(cc) pdata[ix]=cc;
- }
- }
- ShowerParticleVector children;
- for(unsigned int ix=0;ix<2;++ix) {
- children.push_back(new_ptr(ShowerParticle(pdata[ix],true)));
- if(children[ix]->id()==_progenitor->id()&&!pdata[ix]->stable())
- children[ix]->set5Momentum(Lorentz5Momentum(_progenitor->progenitor()->mass()));
- else
- children[ix]->set5Momentum(Lorentz5Momentum(pdata[ix]->mass()));
- }
- return children;
-}
-
-bool Evolver::timeLikeShower(tShowerParticlePtr particle,
- ShowerInteraction::Type type,
- Branching fb, bool first) {
- // don't do anything if not needed
- if(_limitEmissions == 1 || hardOnly() ||
- ( _limitEmissions == 2 && _nfs != 0) ||
- ( _limitEmissions == 4 && _nfs + _nis != 0) ) {
- if(particle->spinInfo()) particle->spinInfo()->develop();
- return false;
- }
- // too many tries
- if(_nFSR>=_maxTryFSR) {
- ++_nFailedFSR;
- // too many failed events
- if(_nFailedFSR>=_maxFailFSR)
- throw Exception() << "Too many events have failed due to too many shower emissions, in\n"
- << "Evolver::timeLikeShower(). Terminating run\n"
- << Exception::runerror;
- throw Exception() << "Too many attempted emissions in Evolver::timeLikeShower()\n"
- << Exception::eventerror;
- }
- // generate the emission
- ShowerParticleVector children;
- int ntry=0;
- // generate the emission
- if(!fb.kinematics)
- fb = selectTimeLikeBranching(particle,type,HardBranchingPtr());
- // no emission, return
- if(!fb.kinematics) {
- if(particle->spinInfo()) particle->spinInfo()->develop();
- return false;
- }
- Branching fc[2];
- bool setupChildren = true;
- while (ntry<50) {
- fc[0] = Branching();
- fc[1] = Branching();
- ++ntry;
- assert(fb.kinematics);
- // has emitted
- // Assign the shower kinematics to the emitting particle.
- if(setupChildren) {
- ++_nFSR;
- particle->showerKinematics(fb.kinematics);
- // generate phi
- fb.kinematics->phi(fb.sudakov->generatePhiForward(*particle,fb.ids,fb.kinematics));
- // check highest pT
- if(fb.kinematics->pT()>progenitor()->highestpT())
- progenitor()->highestpT(fb.kinematics->pT());
- // create the children
- children = createTimeLikeChildren(particle,fb.ids);
- // update the children
- particle->showerKinematics()->
- updateChildren(particle, children,fb.type,_reconOpt>=3);
- // update number of emissions
- ++_nfs;
- if(_limitEmissions!=0) {
- if(children[0]->spinInfo()) children[0]->spinInfo()->develop();
- if(children[1]->spinInfo()) children[1]->spinInfo()->develop();
- if(particle->spinInfo()) particle->spinInfo()->develop();
- return true;
- }
- setupChildren = false;
- }
- // select branchings for children
- fc[0] = selectTimeLikeBranching(children[0],type,HardBranchingPtr());
- fc[1] = selectTimeLikeBranching(children[1],type,HardBranchingPtr());
- // old default
- if(_reconOpt==0) {
- // shower the first particle
- if(fc[0].kinematics) timeLikeShower(children[0],type,fc[0],false);
- if(children[0]->spinInfo()) children[0]->spinInfo()->develop();
- // shower the second particle
- if(fc[1].kinematics) timeLikeShower(children[1],type,fc[1],false);
- if(children[1]->spinInfo()) children[1]->spinInfo()->develop();
- break;
- }
- // Herwig default
- else if(_reconOpt==1) {
- // shower the first particle
- if(fc[0].kinematics) timeLikeShower(children[0],type,fc[0],false);
- if(children[0]->spinInfo()) children[0]->spinInfo()->develop();
- // shower the second particle
- if(fc[1].kinematics) timeLikeShower(children[1],type,fc[1],false);
- if(children[1]->spinInfo()) children[1]->spinInfo()->develop();
- // branching has happened
- particle->showerKinematics()->updateParent(particle, children,fb.type);
- // clean up the vetoed emission
- if(particle->virtualMass()==ZERO) {
- particle->showerKinematics(ShoKinPtr());
- for(unsigned int ix=0;ix<children.size();++ix)
- particle->abandonChild(children[ix]);
- children.clear();
- if(particle->spinInfo()) particle->spinInfo()->decayVertex(VertexPtr());
- particle->vetoEmission(fb.type,fb.kinematics->scale());
- // generate the new emission
- fb = selectTimeLikeBranching(particle,type,HardBranchingPtr());
- // no emission, return
- if(!fb.kinematics) {
- if(particle->spinInfo()) particle->spinInfo()->develop();
- return false;
- }
- setupChildren = true;
- continue;
- }
- else
- break;
- }
- // veto children
- else if(_reconOpt>=2) {
- // cut-off masses for the branching
- const vector<Energy> & virtualMasses = fb.sudakov->virtualMasses(fb.ids);
- // compute the masses of the children
- Energy masses[3];
- for(unsigned int ix=0;ix<2;++ix) {
- if(fc[ix].kinematics) {
- const vector<Energy> & vm = fc[ix].sudakov->virtualMasses(fc[ix].ids);
- Energy2 q2 =
- fc[ix].kinematics->z()*(1.-fc[ix].kinematics->z())*sqr(fc[ix].kinematics->scale());
- if(fc[ix].ids[0]!=ParticleID::g) q2 += sqr(vm[0]);
- masses[ix+1] = sqrt(q2);
- }
- else {
- masses[ix+1] = virtualMasses[ix+1];
- }
- }
- masses[0] = fb.ids[0]!=ParticleID::g ? virtualMasses[0] : ZERO;
- double z = fb.kinematics->z();
- Energy2 pt2 = z*(1.-z)*(z*(1.-z)*sqr(fb.kinematics->scale()) + sqr(masses[0]))
- - sqr(masses[1])*(1.-z) - sqr(masses[2])*z;
- if(pt2>=ZERO) {
- break;
- }
- else {
- // reset the scales for the children
- for(unsigned int ix=0;ix<2;++ix) {
- if(fc[ix].kinematics)
- children[ix]->vetoEmission(fc[ix].type,fc[ix].kinematics->scale());
- else
- children[ix]->vetoEmission(ShowerPartnerType::QCDColourLine,ZERO);
- children[ix]->virtualMass(ZERO);
- }
- }
- }
- };
- if(_reconOpt>=2) {
- // shower the first particle
- if(fc[0].kinematics) timeLikeShower(children[0],type,fc[0],false);
- if(children[0]->spinInfo()) children[0]->spinInfo()->develop();
- // shower the second particle
- if(fc[1].kinematics) timeLikeShower(children[1],type,fc[1],false);
- if(children[1]->spinInfo()) children[1]->spinInfo()->develop();
- // branching has happened
- particle->showerKinematics()->updateParent(particle, children,fb.type);
- }
- if(first&&!children.empty())
- particle->showerKinematics()->resetChildren(particle,children);
- if(particle->spinInfo()) particle->spinInfo()->develop();
- return true;
-}
-
-bool
-Evolver::spaceLikeShower(tShowerParticlePtr particle, PPtr beam,
- ShowerInteraction::Type type) {
- //using the pdf's associated with the ShowerHandler assures, that
- //modified pdf's are used for the secondary interactions via
- //CascadeHandler::resetPDFs(...)
- tcPDFPtr pdf;
- if(ShowerHandler::currentHandler()->firstPDF().particle() == _beam)
- pdf = ShowerHandler::currentHandler()->firstPDF().pdf();
- if(ShowerHandler::currentHandler()->secondPDF().particle() == _beam)
- pdf = ShowerHandler::currentHandler()->secondPDF().pdf();
- Energy freeze = ShowerHandler::currentHandler()->pdfFreezingScale();
- // don't do anything if not needed
- if(_limitEmissions == 2 || hardOnly() ||
- ( _limitEmissions == 1 && _nis != 0 ) ||
- ( _limitEmissions == 4 && _nis + _nfs != 0 ) ) {
- if(particle->spinInfo()) particle->spinInfo()->develop();
- return false;
- }
- Branching bb;
- // generate branching
- while (true) {
- bb=_splittingGenerator->chooseBackwardBranching(*particle,beam,
- _initialenhance,
- _beam,type,
- pdf,freeze);
- // return if no emission
- if(!bb.kinematics) {
- if(particle->spinInfo()) particle->spinInfo()->develop();
- return false;
- }
- // if not vetoed break
- if(!spaceLikeVetoed(bb,particle)) break;
- // otherwise reset scale and continue
- particle->vetoEmission(bb.type,bb.kinematics->scale());
- if(particle->spinInfo()) particle->spinInfo()->decayVertex(VertexPtr());
- }
- // assign the splitting function and shower kinematics
- particle->showerKinematics(bb.kinematics);
- if(bb.kinematics->pT()>progenitor()->highestpT())
- progenitor()->highestpT(bb.kinematics->pT());
- // For the time being we are considering only 1->2 branching
- // particles as in Sudakov form factor
- tcPDPtr part[2]={getParticleData(bb.ids[0]),
- getParticleData(bb.ids[2])};
- if(particle->id()!=bb.ids[1]) {
- if(part[0]->CC()) part[0]=part[0]->CC();
- if(part[1]->CC()) part[1]=part[1]->CC();
- }
- // Now create the actual particles, make the otherChild a final state
- // particle, while the newParent is not
- ShowerParticlePtr newParent=new_ptr(ShowerParticle(part[0],false));
- ShowerParticlePtr otherChild = new_ptr(ShowerParticle(part[1],true,true));
- ShowerParticleVector theChildren;
- theChildren.push_back(particle);
- theChildren.push_back(otherChild);
- //this updates the evolution scale
- particle->showerKinematics()->
- updateParent(newParent, theChildren,bb.type);
- // update the history if needed
- _currenttree->updateInitialStateShowerProduct(_progenitor,newParent);
- _currenttree->addInitialStateBranching(particle,newParent,otherChild);
- // for the reconstruction of kinematics, parent/child
- // relationships are according to the branching process:
- // now continue the shower
- ++_nis;
- bool emitted = _limitEmissions==0 ?
- spaceLikeShower(newParent,beam,type) : false;
- if(newParent->spinInfo()) newParent->spinInfo()->develop();
- // now reconstruct the momentum
- if(!emitted) {
- if(_intrinsic.find(_progenitor)==_intrinsic.end()) {
- bb.kinematics->updateLast(newParent,ZERO,ZERO);
- }
- else {
- pair<Energy,double> kt=_intrinsic[_progenitor];
- bb.kinematics->updateLast(newParent,
- kt.first*cos(kt.second),
- kt.first*sin(kt.second));
- }
- }
- particle->showerKinematics()->
- updateChildren(newParent, theChildren,bb.type,_reconOpt>=4);
- if(_limitEmissions!=0) {
- if(particle->spinInfo()) particle->spinInfo()->develop();
- return true;
- }
- // perform the shower of the final-state particle
- timeLikeShower(otherChild,type,Branching(),true);
- updateHistory(otherChild);
- if(theChildren[1]->spinInfo()) theChildren[1]->spinInfo()->develop();
- // return the emitted
- if(particle->spinInfo()) particle->spinInfo()->develop();
- return true;
-}
-
-void Evolver::showerDecay(ShowerTreePtr decay) {
- _decayme = HwDecayerBasePtr();
- _hardme = HwMEBasePtr();
- // find the decayer
- // try the normal way if possible
- tDMPtr dm = decay->incomingLines().begin()->first->original() ->decayMode();
- if(!dm) dm = decay->incomingLines().begin()->first->copy() ->decayMode();
- if(!dm) dm = decay->incomingLines().begin()->first->progenitor()->decayMode();
- // otherwise make a string and look it up
- if(!dm) {
- string tag = decay->incomingLines().begin()->first->original()->dataPtr()->name()
- + "->";
- OrderedParticles outgoing;
- for(map<ShowerProgenitorPtr,tShowerParticlePtr>::const_iterator
- it=decay->outgoingLines().begin();it!=decay->outgoingLines().end();++it) {
- if(abs(decay->incomingLines().begin()->first->original()->id()) == ParticleID::t &&
- abs(it->first->original()->id())==ParticleID::Wplus &&
- decay->treelinks().size() == 1) {
- ShowerTreePtr Wtree = decay->treelinks().begin()->first;
- for(map<ShowerProgenitorPtr,tShowerParticlePtr>::const_iterator
- it2=Wtree->outgoingLines().begin();it2!=Wtree->outgoingLines().end();++it2) {
- outgoing.insert(it2->first->original()->dataPtr());
- }
- }
- else {
- outgoing.insert(it->first->original()->dataPtr());
- }
- }
- for(OrderedParticles::const_iterator it=outgoing.begin(); it!=outgoing.end();++it) {
- if(it!=outgoing.begin()) tag += ",";
- tag +=(**it).name();
- }
- tag += ";";
- dm = findDecayMode(tag);
- }
- if(dm) _decayme = dynamic_ptr_cast<HwDecayerBasePtr>(dm->decayer());
- // set the ShowerTree to be showered
- currentTree(decay);
- decay->applyTransforms();
- hardTree(HardTreePtr());
- unsigned int interactionTry=0;
- do {
- try {
- // generate the showering
- doShowering(false,XCPtr());
- // if no vetos
- // force calculation of spin correlations
- SpinPtr spInfo = decay->incomingLines().begin()->first->progenitor()->spinInfo();
- if(spInfo) {
- if(!spInfo->developed()) spInfo->needsUpdate();
- spInfo->develop();
- }
- // and then return
- return;
- }
- catch (InteractionVeto) {
- currentTree()->clear();
- ++interactionTry;
- }
- }
- while(interactionTry<=5);
- throw Exception() << "Too many tries for QED shower in Evolver::showerDecay()"
- << Exception::eventerror;
-}
-
-bool Evolver::spaceLikeDecayShower(tShowerParticlePtr particle,
- const ShowerParticle::EvolutionScales & maxScales,
- Energy minmass,ShowerInteraction::Type type,
- Branching fb) {
- // too many tries
- if(_nFSR>=_maxTryFSR) {
- ++_nFailedFSR;
- // too many failed events
- if(_nFailedFSR>=_maxFailFSR)
- throw Exception() << "Too many events have failed due to too many shower emissions, in\n"
- << "Evolver::timeLikeShower(). Terminating run\n"
- << Exception::runerror;
- throw Exception() << "Too many attempted emissions in Evolver::timeLikeShower()\n"
- << Exception::eventerror;
- }
- // generate the emission
- ShowerParticleVector children;
- int ntry=0;
- // generate the emission
- if(!fb.kinematics)
- fb = selectSpaceLikeDecayBranching(particle,maxScales,minmass,type,
- HardBranchingPtr());
- // no emission, return
- if(!fb.kinematics) return false;
- Branching fc[2];
- bool setupChildren = true;
- while (ntry<50) {
- if(particle->virtualMass()==ZERO)
- particle->virtualMass(_progenitor->progenitor()->mass());
- fc[0] = Branching();
- fc[1] = Branching();
- ++ntry;
- assert(fb.kinematics);
- // has emitted
- // Assign the shower kinematics to the emitting particle.
- if(setupChildren) {
- ++_nFSR;
- // Assign the shower kinematics to the emitting particle.
- particle->showerKinematics(fb.kinematics);
- if(fb.kinematics->pT()>progenitor()->highestpT())
- progenitor()->highestpT(fb.kinematics->pT());
- // create the ShowerParticle objects for the two children
- children = createTimeLikeChildren(particle,fb.ids);
- // updateChildren the children
- particle->showerKinematics()->
- updateChildren(particle, children, fb.type,_reconOpt>=3);
- setupChildren = false;
- }
- // select branchings for children
- fc[0] = selectSpaceLikeDecayBranching(children[0],maxScales,minmass,
- type,HardBranchingPtr());
- fc[1] = selectTimeLikeBranching (children[1],type,HardBranchingPtr());
- // old default
- if(_reconOpt==0) {
- // shower the first particle
- _currenttree->updateInitialStateShowerProduct(_progenitor,children[0]);
- _currenttree->addInitialStateBranching(particle,children[0],children[1]);
- if(fc[0].kinematics) spaceLikeDecayShower(children[0],maxScales,minmass,type,Branching());
- // shower the second particle
- if(fc[1].kinematics) timeLikeShower(children[1],type,fc[1],true);
- updateHistory(children[1]);
- // branching has happened
- break;
- }
- // Herwig default
- else if(_reconOpt==1) {
- // shower the first particle
- _currenttree->updateInitialStateShowerProduct(_progenitor,children[0]);
- _currenttree->addInitialStateBranching(particle,children[0],children[1]);
- if(fc[0].kinematics) spaceLikeDecayShower(children[0],maxScales,minmass,type,Branching());
- // shower the second particle
- if(fc[1].kinematics) timeLikeShower(children[1],type,fc[1],true);
- updateHistory(children[1]);
- // branching has happened
- particle->showerKinematics()->updateParent(particle, children,fb.type);
- // clean up the vetoed emission
- if(particle->virtualMass()==ZERO) {
- particle->showerKinematics(ShoKinPtr());
- for(unsigned int ix=0;ix<children.size();++ix)
- particle->abandonChild(children[ix]);
- children.clear();
- particle->vetoEmission(fb.type,fb.kinematics->scale());
- // generate the new emission
- fb = selectSpaceLikeDecayBranching(particle,maxScales,minmass,type,
- HardBranchingPtr());
- // no emission, return
- if(!fb.kinematics) {
- return false;
- }
- setupChildren = true;
- continue;
- }
- else
- break;
- }
- else if(_reconOpt>=2) {
- // cut-off masses for the branching
- const vector<Energy> & virtualMasses = fb.sudakov->virtualMasses(fb.ids);
- // compute the masses of the children
- Energy masses[3];
- // space-like children
- masses[1] = children[0]->virtualMass();
- // time-like child
- if(fc[1].kinematics) {
- const vector<Energy> & vm = fc[1].sudakov->virtualMasses(fc[1].ids);
- Energy2 q2 =
- fc[1].kinematics->z()*(1.-fc[1].kinematics->z())*sqr(fc[1].kinematics->scale());
- if(fc[1].ids[0]!=ParticleID::g) q2 += sqr(vm[0]);
- masses[2] = sqrt(q2);
- }
- else {
- masses[2] = virtualMasses[2];
- }
- masses[0]=particle->virtualMass();
- double z = fb.kinematics->z();
- Energy2 pt2 = (1.-z)*(z*sqr(masses[0])-sqr(masses[1])-z/(1.-z)*sqr(masses[2]));
- if(pt2>=ZERO) {
- break;
- }
- else {
- // reset the scales for the children
- for(unsigned int ix=0;ix<2;++ix) {
- if(fc[ix].kinematics)
- children[ix]->vetoEmission(fc[ix].type,fc[ix].kinematics->scale());
- else {
- if(ix==0)
- children[ix]->vetoEmission(ShowerPartnerType::QCDColourLine,Constants::MaxEnergy);
- else
- children[ix]->vetoEmission(ShowerPartnerType::QCDColourLine,ZERO);
- }
- }
- children[0]->virtualMass(_progenitor->progenitor()->mass());
- children[1]->virtualMass(ZERO);
- }
- }
- };
- if(_reconOpt>=2) {
- // In the case of splittings which involves coloured particles,
- // set properly the colour flow of the branching.
- // update the history if needed
- _currenttree->updateInitialStateShowerProduct(_progenitor,children[0]);
- _currenttree->addInitialStateBranching(particle,children[0],children[1]);
- // shower the first particle
- if(fc[0].kinematics) spaceLikeDecayShower(children[0],maxScales,minmass,type,Branching());
- // shower the second particle
- if(fc[1].kinematics) timeLikeShower(children[1],type,fc[1],true);
- updateHistory(children[1]);
- // branching has happened
- particle->showerKinematics()->updateParent(particle, children,fb.type);
- }
- // branching has happened
- return true;
-}
-
-vector<ShowerProgenitorPtr> Evolver::setupShower(bool hard) {
- // generate POWHEG hard emission if needed
- if(_hardEmissionMode>0) hardestEmission(hard);
- ShowerInteraction::Type inter = interactions_[0];
- if(_hardtree&&inter!=ShowerInteraction::Both) {
- inter = _hardtree->interaction();
- }
- // set the initial colour partners
- setEvolutionPartners(hard,inter,false);
- // generate hard me if needed
- if(_hardEmissionMode==0 ||
- (!hard && _hardEmissionMode==-1)) hardMatrixElementCorrection(hard);
- // get the particles to be showered
- vector<ShowerProgenitorPtr> particlesToShower =
- currentTree()->extractProgenitors();
- // remake the colour partners if needed
- if(_currenttree->hardMatrixElementCorrection()) {
- setEvolutionPartners(hard,interactions_[0],true);
- _currenttree->resetShowerProducts();
- }
- // return the answer
- return particlesToShower;
-}
-
-void Evolver::setEvolutionPartners(bool hard,ShowerInteraction::Type type,
- bool clear) {
- // match the particles in the ShowerTree and hardTree
- if(hardTree() && !hardTree()->connect(currentTree()))
- throw Exception() << "Can't match trees in "
- << "Evolver::setEvolutionPartners()"
- << Exception::eventerror;
- // extract the progenitors
- vector<ShowerParticlePtr> particles =
- currentTree()->extractProgenitorParticles();
- // clear the partners if needed
- if(clear) {
- for(unsigned int ix=0;ix<particles.size();++ix) {
- particles[ix]->partner(ShowerParticlePtr());
- particles[ix]->clearPartners();
- }
- }
- // sort out the colour partners
- if(hardTree()) {
- // find the partner
- for(unsigned int ix=0;ix<particles.size();++ix) {
- tHardBranchingPtr partner =
- hardTree()->particles()[particles[ix]]->colourPartner();
- if(!partner) continue;
- for(map<ShowerParticlePtr,tHardBranchingPtr>::const_iterator
- it=hardTree()->particles().begin();
- it!=hardTree()->particles().end();++it) {
- if(it->second==partner) particles[ix]->partner(it->first);
- }
- if(!particles[ix]->partner())
- throw Exception() << "Can't match partners in "
- << "Evolver::setEvolutionPartners()"
- << Exception::eventerror;
- }
- }
- // Set the initial evolution scales
- showerModel()->partnerFinder()->
- setInitialEvolutionScales(particles,!hard,type,!_hardtree);
- if(hardTree() && _hardPOWHEG) {
- bool tooHard=false;
- map<ShowerParticlePtr,tHardBranchingPtr>::const_iterator
- eit=hardTree()->particles().end();
- for(unsigned int ix=0;ix<particles.size();++ix) {
- map<ShowerParticlePtr,tHardBranchingPtr>::const_iterator
- mit = hardTree()->particles().find(particles[ix]);
- Energy hardScale(ZERO);
- ShowerPartnerType::Type type(ShowerPartnerType::Undefined);
- // final-state
- if(particles[ix]->isFinalState()) {
- if(mit!= eit && !mit->second->children().empty()) {
- hardScale = mit->second->scale();
- type = mit->second->type();
- }
- }
- // initial-state
- else {
- if(mit!= eit && mit->second->parent()) {
- hardScale = mit->second->parent()->scale();
- type = mit->second->parent()->type();
- }
- }
- if(type!=ShowerPartnerType::Undefined) {
- if(type==ShowerPartnerType::QED) {
- tooHard |= particles[ix]->scales().QED_noAO<hardScale;
- }
- else if(type==ShowerPartnerType::QCDColourLine) {
- tooHard |= particles[ix]->scales().QCD_c_noAO<hardScale;
- }
- else if(type==ShowerPartnerType::QCDAntiColourLine) {
- tooHard |= particles[ix]->scales().QCD_ac_noAO<hardScale;
- }
- }
- }
- if(tooHard) convertHardTree(hard,type);
- }
-}
-
-void Evolver::updateHistory(tShowerParticlePtr particle) {
- if(!particle->children().empty()) {
- ShowerParticleVector theChildren;
- for(unsigned int ix=0;ix<particle->children().size();++ix) {
- ShowerParticlePtr part = dynamic_ptr_cast<ShowerParticlePtr>
- (particle->children()[ix]);
- theChildren.push_back(part);
- }
- // update the history if needed
- if(particle==_currenttree->getFinalStateShowerProduct(_progenitor))
- _currenttree->updateFinalStateShowerProduct(_progenitor,
- particle,theChildren);
- _currenttree->addFinalStateBranching(particle,theChildren);
- for(unsigned int ix=0;ix<theChildren.size();++ix)
- updateHistory(theChildren[ix]);
- }
-}
-
-bool Evolver::startTimeLikeShower(ShowerInteraction::Type type) {
- _nFSR = 0;
- if(hardTree()) {
- map<ShowerParticlePtr,tHardBranchingPtr>::const_iterator
- eit=hardTree()->particles().end(),
- mit = hardTree()->particles().find(progenitor()->progenitor());
- if( mit != eit && !mit->second->children().empty() ) {
- bool output=truncatedTimeLikeShower(progenitor()->progenitor(),
- mit->second ,type,Branching(),true);
- if(output) updateHistory(progenitor()->progenitor());
- return output;
- }
- }
- bool output = hardOnly() ? false :
- timeLikeShower(progenitor()->progenitor() ,type,Branching(),true) ;
- if(output) updateHistory(progenitor()->progenitor());
- return output;
-}
-
-bool Evolver::startSpaceLikeShower(PPtr parent, ShowerInteraction::Type type) {
- if(hardTree()) {
- map<ShowerParticlePtr,tHardBranchingPtr>::const_iterator
- eit =hardTree()->particles().end(),
- mit = hardTree()->particles().find(progenitor()->progenitor());
- if( mit != eit && mit->second->parent() ) {
- return truncatedSpaceLikeShower( progenitor()->progenitor(),
- parent, mit->second->parent(), type );
- }
- }
- return hardOnly() ? false :
- spaceLikeShower(progenitor()->progenitor(),parent,type);
-}
-
-bool Evolver::
-startSpaceLikeDecayShower(const ShowerParticle::EvolutionScales & maxScales,
- Energy minimumMass,ShowerInteraction::Type type) {
- _nFSR = 0;
- if(hardTree()) {
- map<ShowerParticlePtr,tHardBranchingPtr>::const_iterator
- eit =hardTree()->particles().end(),
- mit = hardTree()->particles().find(progenitor()->progenitor());
- if( mit != eit && mit->second->parent() ) {
- HardBranchingPtr branch=mit->second;
- while(branch->parent()) branch=branch->parent();
- return truncatedSpaceLikeDecayShower(progenitor()->progenitor(),maxScales,
- minimumMass, branch ,type, Branching());
- }
- }
- return hardOnly() ? false :
- spaceLikeDecayShower(progenitor()->progenitor(),maxScales,minimumMass,type,Branching());
-}
-
-bool Evolver::timeLikeVetoed(const Branching & fb,
- ShowerParticlePtr particle) {
- // work out type of interaction
- ShowerInteraction::Type type = fb.type==ShowerPartnerType::QED ?
- ShowerInteraction::QED : ShowerInteraction::QCD;
- // check whether emission was harder than largest pt of hard subprocess
- if ( hardVetoFS() && fb.kinematics->pT() > _progenitor->maxHardPt() )
- return true;
- // soft matrix element correction veto
- if( softMEC()) {
- if(_hardme && _hardme->hasMECorrection()) {
- if(_hardme->softMatrixElementVeto(_progenitor,particle,fb))
- return true;
- }
- else if(_decayme && _decayme->hasMECorrection()) {
- if(_decayme->softMatrixElementVeto(_progenitor,particle,fb))
- return true;
- }
- }
- // veto on maximum pt
- if(fb.kinematics->pT()>_progenitor->maximumpT(type)) return true;
- // general vetos
- if (fb.kinematics && !_vetoes.empty()) {
- bool vetoed=false;
- for (vector<ShowerVetoPtr>::iterator v = _vetoes.begin();
- v != _vetoes.end(); ++v) {
- bool test = (**v).vetoTimeLike(_progenitor,particle,fb);
- switch((**v).vetoType()) {
- case ShowerVeto::Emission:
- vetoed |= test;
- break;
- case ShowerVeto::Shower:
- if(test) throw VetoShower();
- break;
- case ShowerVeto::Event:
- if(test) throw Veto();
- break;
- }
- }
- if(vetoed) return true;
- }
- if ( ShowerHandler::currentHandler()->firstInteraction() &&
- ShowerHandler::currentHandler()->profileScales() ) {
- double weight =
- ShowerHandler::currentHandler()->profileScales()->
- hardScaleProfile(_progenitor->hardScale(),fb.kinematics->pT());
- if ( UseRandom::rnd() > weight )
- return true;
- }
- return false;
-}
-
-bool Evolver::spaceLikeVetoed(const Branching & bb,
- ShowerParticlePtr particle) {
- // work out type of interaction
- ShowerInteraction::Type type = bb.type==ShowerPartnerType::QED ?
- ShowerInteraction::QED : ShowerInteraction::QCD;
- // check whether emission was harder than largest pt of hard subprocess
- if (hardVetoIS() && bb.kinematics->pT() > _progenitor->maxHardPt())
- return true;
- // apply the soft correction
- if( softMEC() && _hardme && _hardme->hasMECorrection() ) {
- if(_hardme->softMatrixElementVeto(_progenitor,particle,bb))
- return true;
- }
- // the more general vetos
-
- // check vs max pt for the shower
- if(bb.kinematics->pT()>_progenitor->maximumpT(type)) return true;
-
- if (!_vetoes.empty()) {
- bool vetoed=false;
- for (vector<ShowerVetoPtr>::iterator v = _vetoes.begin();
- v != _vetoes.end(); ++v) {
- bool test = (**v).vetoSpaceLike(_progenitor,particle,bb);
- switch ((**v).vetoType()) {
- case ShowerVeto::Emission:
- vetoed |= test;
- break;
- case ShowerVeto::Shower:
- if(test) throw VetoShower();
- break;
- case ShowerVeto::Event:
- if(test) throw Veto();
- break;
- }
- }
- if (vetoed) return true;
- }
- if ( ShowerHandler::currentHandler()->firstInteraction() &&
- ShowerHandler::currentHandler()->profileScales() ) {
- double weight =
- ShowerHandler::currentHandler()->profileScales()->
- hardScaleProfile(_progenitor->hardScale(),bb.kinematics->pT());
- if ( UseRandom::rnd() > weight )
- return true;
- }
- return false;
-}
-
-bool Evolver::spaceLikeDecayVetoed( const Branching & fb,
- ShowerParticlePtr particle) {
- // work out type of interaction
- ShowerInteraction::Type type = fb.type==ShowerPartnerType::QED ?
- ShowerInteraction::QED : ShowerInteraction::QCD;
- // apply the soft correction
- if( softMEC() && _decayme && _decayme->hasMECorrection() ) {
- if(_decayme->softMatrixElementVeto(_progenitor,particle,fb))
- return true;
- }
- // veto on hardest pt in the shower
- if(fb.kinematics->pT()> _progenitor->maximumpT(type)) return true;
- // general vetos
- if (!_vetoes.empty()) {
- bool vetoed=false;
- for (vector<ShowerVetoPtr>::iterator v = _vetoes.begin();
- v != _vetoes.end(); ++v) {
- bool test = (**v).vetoSpaceLike(_progenitor,particle,fb);
- switch((**v).vetoType()) {
- case ShowerVeto::Emission:
- vetoed |= test;
- break;
- case ShowerVeto::Shower:
- if(test) throw VetoShower();
- break;
- case ShowerVeto::Event:
- if(test) throw Veto();
- break;
- }
- if (vetoed) return true;
- }
- }
- return false;
-}
-
-void Evolver::hardestEmission(bool hard) {
- HardTreePtr ISRTree;
- if( ( _hardme && _hardme->hasPOWHEGCorrection()!=0 && _hardEmissionMode< 2) ||
- ( _decayme && _decayme->hasPOWHEGCorrection()!=0 && _hardEmissionMode!=2) ) {
- if(_hardme) {
- assert(hard);
- if(interaction_==4) {
- vector<ShowerInteraction::Type> inter(2);
- inter[0] = ShowerInteraction::QCD;
- inter[1] = ShowerInteraction::QED;
- _hardtree = _hardme->generateHardest( currentTree(),inter );
- }
- else {
- _hardtree = _hardme->generateHardest( currentTree(),interactions_ );
- }
- }
- else {
- assert(!hard);
- _hardtree = _decayme->generateHardest( currentTree() );
- }
- // store initial state POWHEG radiation
- if(_hardtree && _hardme && _hardme->hasPOWHEGCorrection()==1)
- ISRTree=_hardtree;
- }
-
- else if (_hardEmissionMode>1 && hard) {
- // Get minimum pT cutoff used in shower approximation
- Energy maxpt = 1.*GeV;
- int colouredIn = 0;
- int colouredOut = 0;
- for( map< ShowerProgenitorPtr, tShowerParticlePtr >::iterator it
- = currentTree()->outgoingLines().begin();
- it != currentTree()->outgoingLines().end(); ++it ) {
- if( it->second->coloured() ) colouredOut+=1;
- }
- for( map< ShowerProgenitorPtr, ShowerParticlePtr >::iterator it
- = currentTree()->incomingLines().begin();
- it != currentTree()->incomingLines().end(); ++it ) {
- if( ! it->second->coloured() ) colouredIn+=1;
- }
-
- if ( theShowerApproximation ){
- if ( theShowerApproximation->ffPtCut() == theShowerApproximation->fiPtCut() &&
- theShowerApproximation->ffPtCut() == theShowerApproximation->iiPtCut() )
- maxpt = theShowerApproximation->ffPtCut();
- else if ( colouredIn == 2 && colouredOut == 0 )
- maxpt = theShowerApproximation->iiPtCut();
- else if ( colouredIn == 0 && colouredOut > 1 )
- maxpt = theShowerApproximation->ffPtCut();
- else if ( colouredIn == 2 && colouredOut == 1 )
- maxpt = min(theShowerApproximation->iiPtCut(), theShowerApproximation->fiPtCut());
- else if ( colouredIn == 1 && colouredOut > 1 )
- maxpt = min(theShowerApproximation->ffPtCut(), theShowerApproximation->fiPtCut());
- else
- maxpt = min(min(theShowerApproximation->iiPtCut(), theShowerApproximation->fiPtCut()),
- theShowerApproximation->ffPtCut());
- }
-
- // Generate hardtree from born and real emission subprocesses
- _hardtree = ShowerHandler::currentHandler()->generateCKKW(currentTree());
-
- // Find transverse momentum of hardest emission
- if (_hardtree){
- for(set<HardBranchingPtr>::iterator it=_hardtree->branchings().begin();
- it!=_hardtree->branchings().end();++it) {
- if ((*it)->parent() && (*it)->status()==HardBranching::Incoming)
- maxpt=(*it)->branchingParticle()->momentum().perp();
- if ((*it)->children().size()==2 && (*it)->status()==HardBranching::Outgoing){
- if ((*it)->branchingParticle()->id()!=21 &&
- abs((*it)->branchingParticle()->id())>5 ){
- if ((*it)->children()[0]->branchingParticle()->id()==21 ||
- abs((*it)->children()[0]->branchingParticle()->id())<6)
- maxpt=(*it)->children()[0]->branchingParticle()->momentum().perp();
- else if ((*it)->children()[1]->branchingParticle()->id()==21 ||
- abs((*it)->children()[1]->branchingParticle()->id())<6)
- maxpt=(*it)->children()[1]->branchingParticle()->momentum().perp();
- }
- else {
- if ( abs((*it)->branchingParticle()->id())<6){
- if (abs((*it)->children()[0]->branchingParticle()->id())<6)
- maxpt = (*it)->children()[1]->branchingParticle()->momentum().perp();
- else
- maxpt = (*it)->children()[0]->branchingParticle()->momentum().perp();
- }
- else maxpt = (*it)->children()[1]->branchingParticle()->momentum().perp();
- }
- }
- }
- }
-
-
- // Hardest (pt) emission should be the first powheg emission.
- maxpt=min(sqrt(ShowerHandler::currentHandler()->lastXCombPtr()->lastShowerScale()),maxpt);
-
- // Set maxpt to pT of emission when showering POWHEG real-emission subprocesses
- if (!isPowhegSEvent && !isPowhegHEvent){
- vector<int> outGluon;
- vector<int> outQuark;
- map< ShowerProgenitorPtr, tShowerParticlePtr >::iterator it;
- for( it = currentTree()->outgoingLines().begin();
- it != currentTree()->outgoingLines().end(); ++it ) {
- if ( abs(it->second->id())< 6) outQuark.push_back(it->second->id());
- if ( it->second->id()==21 ) outGluon.push_back(it->second->id());
- }
- if (outGluon.size() + outQuark.size() == 1){
- for( it = currentTree()->outgoingLines().begin();
- it != currentTree()->outgoingLines().end(); ++it ) {
- if ( abs(it->second->id())< 6 || it->second->id()==21 )
- maxpt = it->second->momentum().perp();
- }
- }
- else if (outGluon.size() + outQuark.size() > 1){
- // assume qqbar pair from a Z/gamma
- if (outGluon.size()==1 && outQuark.size() == 2 && outQuark[0]==-outQuark[1]){
- for( it = currentTree()->outgoingLines().begin();
- it != currentTree()->outgoingLines().end(); ++it ) {
- if ( it->second->id()==21 )
- maxpt = it->second->momentum().perp();
- }
- }
- // otherwise take the lowest pT avoiding born DY events
- else {
- maxpt = generator()->maximumCMEnergy();
- for( it = currentTree()->outgoingLines().begin();
- it != currentTree()->outgoingLines().end(); ++it ) {
- if ( abs(it->second->id())< 6 || it->second->id()==21 )
- maxpt = min(maxpt,it->second->momentum().perp());
- }
- }
- }
- }
-
- // set maximum pT for subsequent emissions from S events
- if ( isPowhegSEvent || (!isPowhegSEvent && !isPowhegHEvent)){
- for( map< ShowerProgenitorPtr, tShowerParticlePtr >::iterator it
- = currentTree()->outgoingLines().begin();
- it != currentTree()->outgoingLines().end(); ++it ) {
- if( ! it->second->coloured() ) continue;
- it->first->maximumpT(maxpt, ShowerInteraction::QCD );
- }
- for( map< ShowerProgenitorPtr, ShowerParticlePtr >::iterator it
- = currentTree()->incomingLines().begin();
- it != currentTree()->incomingLines().end(); ++it ) {
- if( ! it->second->coloured() ) continue;
- it->first->maximumpT(maxpt, ShowerInteraction::QCD );
- }
- }
- }
- else
- _hardtree = ShowerHandler::currentHandler()->generateCKKW(currentTree());
-
- // if hard me doesn't have a FSR powheg
- // correction use decay powheg correction
- if (_hardme && _hardme->hasPOWHEGCorrection()<2) {
- // check for intermediate colour singlet resonance
- const ParticleVector inter = _hardme->subProcess()->intermediates();
- if (inter.size()!=1 ||
- inter[0]->momentum().m2()/GeV2 < 0 ||
- inter[0]->dataPtr()->iColour()!=PDT::Colour0){
- if(_hardtree) connectTrees(currentTree(),_hardtree,hard);
- return;
- }
-
- map<ShowerProgenitorPtr, tShowerParticlePtr > out = currentTree()->outgoingLines();
- // ignore cases where outgoing particles are not coloured
- if (out.size()!=2 ||
- out. begin()->second->dataPtr()->iColour()==PDT::Colour0 ||
- out.rbegin()->second->dataPtr()->iColour()==PDT::Colour0) {
- if(_hardtree) connectTrees(currentTree(),_hardtree,hard);
- return;
- }
-
- // look up decay mode
- tDMPtr dm;
- string tag;
- string inParticle = inter[0]->dataPtr()->name() + "->";
- vector<string> outParticles;
- outParticles.push_back(out.begin ()->first->progenitor()->dataPtr()->name());
- outParticles.push_back(out.rbegin()->first->progenitor()->dataPtr()->name());
- for (int it=0; it<2; ++it){
- tag = inParticle + outParticles[it] + "," + outParticles[(it+1)%2] + ";";
- dm = generator()->findDecayMode(tag);
- if(dm) break;
- }
-
- // get the decayer
- HwDecayerBasePtr decayer;
- if(dm) decayer = dynamic_ptr_cast<HwDecayerBasePtr>(dm->decayer());
- // check if decayer has a FSR POWHEG correction
- if (!decayer || decayer->hasPOWHEGCorrection()<2){
- if(_hardtree) connectTrees(currentTree(),_hardtree,hard);
- return;
- }
-
- // generate the hardest emission
- ShowerDecayMap decay;
- PPtr in = new_ptr(*inter[0]);
- ShowerTreePtr decayTree = new_ptr(ShowerTree(in, decay));
- HardTreePtr FSRTree = decayer->generateHardest(decayTree);
- if (!FSRTree) {
- if(_hardtree) connectTrees(currentTree(),_hardtree,hard);
- return;
- }
-
- // if there is no ISRTree make _hardtree from FSRTree
- if (!ISRTree){
- vector<HardBranchingPtr> inBranch,hardBranch;
- for(map<ShowerProgenitorPtr,ShowerParticlePtr>::const_iterator
- cit =currentTree()->incomingLines().begin();
- cit!=currentTree()->incomingLines().end();++cit ) {
- inBranch.push_back(new_ptr(HardBranching(cit->second,SudakovPtr(),
- HardBranchingPtr(),
- HardBranching::Incoming)));
- inBranch.back()->beam(cit->first->original()->parents()[0]);
- hardBranch.push_back(inBranch.back());
- }
- if(inBranch[0]->branchingParticle()->dataPtr()->coloured()) {
- inBranch[0]->colourPartner(inBranch[1]);
- inBranch[1]->colourPartner(inBranch[0]);
- }
- for(set<HardBranchingPtr>::iterator it=FSRTree->branchings().begin();
- it!=FSRTree->branchings().end();++it) {
- if((**it).branchingParticle()->id()!=in->id())
- hardBranch.push_back(*it);
- }
- hardBranch[2]->colourPartner(hardBranch[3]);
- hardBranch[3]->colourPartner(hardBranch[2]);
- HardTreePtr newTree = new_ptr(HardTree(hardBranch,inBranch,
- ShowerInteraction::QCD));
- _hardtree = newTree;
- }
-
- // Otherwise modify the ISRTree to include the emission in FSRTree
- else {
- vector<tShowerParticlePtr> FSROut, ISROut;
- set<HardBranchingPtr>::iterator itFSR, itISR;
- // get outgoing particles
- for(itFSR =FSRTree->branchings().begin();
- itFSR!=FSRTree->branchings().end();++itFSR){
- if ((**itFSR).status()==HardBranching::Outgoing)
- FSROut.push_back((*itFSR)->branchingParticle());
- }
- for(itISR =ISRTree->branchings().begin();
- itISR!=ISRTree->branchings().end();++itISR){
- if ((**itISR).status()==HardBranching::Outgoing)
- ISROut.push_back((*itISR)->branchingParticle());
- }
-
- // find COM frame formed by outgoing particles
- LorentzRotation eventFrameFSR, eventFrameISR;
- eventFrameFSR = ((FSROut[0]->momentum()+FSROut[1]->momentum()).findBoostToCM());
- eventFrameISR = ((ISROut[0]->momentum()+ISROut[1]->momentum()).findBoostToCM());
-
- // find rotation between ISR and FSR frames
- int j=0;
- if (ISROut[0]->id()!=FSROut[0]->id()) j=1;
- eventFrameISR.rotateZ( (eventFrameFSR*FSROut[0]->momentum()).phi()-
- (eventFrameISR*ISROut[j]->momentum()).phi() );
- eventFrameISR.rotateY( (eventFrameFSR*FSROut[0]->momentum()).theta()-
- (eventFrameISR*ISROut[j]->momentum()).theta() );
- eventFrameISR.invert();
-
- for (itFSR=FSRTree->branchings().begin();
- itFSR!=FSRTree->branchings().end();++itFSR){
- if ((**itFSR).branchingParticle()->id()==in->id()) continue;
- for (itISR =ISRTree->branchings().begin();
- itISR!=ISRTree->branchings().end();++itISR){
- if ((**itISR).status()==HardBranching::Incoming) continue;
- if ((**itFSR).branchingParticle()->id()==
- (**itISR).branchingParticle()->id()){
- // rotate FSRTree particle to ISRTree event frame
- (**itISR).branchingParticle()->setMomentum(eventFrameISR*
- eventFrameFSR*
- (**itFSR).branchingParticle()->momentum());
- (**itISR).branchingParticle()->rescaleMass();
- // add the children of the FSRTree particles to the ISRTree
- if(!(**itFSR).children().empty()){
- (**itISR).addChild((**itFSR).children()[0]);
- (**itISR).addChild((**itFSR).children()[1]);
- // rotate momenta to ISRTree event frame
- (**itISR).children()[0]->branchingParticle()->setMomentum(eventFrameISR*
- eventFrameFSR*
- (**itFSR).children()[0]->branchingParticle()->momentum());
- (**itISR).children()[1]->branchingParticle()->setMomentum(eventFrameISR*
- eventFrameFSR*
- (**itFSR).children()[1]->branchingParticle()->momentum());
- }
- }
- }
- }
- _hardtree = ISRTree;
- }
- }
- if(_hardtree){
- connectTrees(currentTree(),_hardtree,hard);
- }
-}
-
-bool Evolver::truncatedTimeLikeShower(tShowerParticlePtr particle,
- HardBranchingPtr branch,
- ShowerInteraction::Type type,
- Branching fb, bool first) {
- // select a branching if we don't have one
- if(!fb.kinematics)
- fb = selectTimeLikeBranching(particle,type,branch);
- // must be an emission, the forced one it not a truncated one
- assert(fb.kinematics);
- ShowerParticleVector children;
- int ntry=0;
- Branching fc[2];
- bool setupChildren = true;
- while (ntry<50) {
- if(!fc[0].hard) fc[0] = Branching();
- if(!fc[1].hard) fc[1] = Branching();
- ++ntry;
- // Assign the shower kinematics to the emitting particle.
- if(setupChildren) {
- ++_nFSR;
- // Assign the shower kinematics to the emitting particle.
- particle->showerKinematics(fb.kinematics);
- if(fb.kinematics->pT()>progenitor()->highestpT())
- progenitor()->highestpT(fb.kinematics->pT());
- // if not hard generate phi
- if(!fb.hard)
- fb.kinematics->phi(fb.sudakov->generatePhiForward(*particle,fb.ids,fb.kinematics));
- // create the children
- children = createTimeLikeChildren(particle,fb.ids);
- // update the children
- particle->showerKinematics()->
- updateChildren(particle, children,fb.type,_reconOpt>=3);
- setupChildren = false;
- }
- // select branchings for children
- if(!fc[0].kinematics) {
- // select branching for first particle
- if(!fb.hard && fb.iout ==1 )
- fc[0] = selectTimeLikeBranching(children[0],type,branch);
- else if(fb.hard && !branch->children()[0]->children().empty() )
- fc[0] = selectTimeLikeBranching(children[0],type,branch->children()[0]);
- else
- fc[0] = selectTimeLikeBranching(children[0],type,HardBranchingPtr());
- }
- // select branching for the second particle
- if(!fc[1].kinematics) {
- // select branching for first particle
- if(!fb.hard && fb.iout ==2 )
- fc[1] = selectTimeLikeBranching(children[1],type,branch);
- else if(fb.hard && !branch->children()[1]->children().empty() )
- fc[1] = selectTimeLikeBranching(children[1],type,branch->children()[1]);
- else
- fc[1] = selectTimeLikeBranching(children[1],type,HardBranchingPtr());
- }
- // old default
- if(_reconOpt==0 || (_reconOpt==1 && fb.hard) ) {
- // shower the first particle
- if(fc[0].kinematics) {
- // the parent has truncated emission and following line
- if(!fb.hard && fb.iout == 1)
- truncatedTimeLikeShower(children[0],branch,type,fc[0],false);
- // hard emission and subsquent hard emissions
- else if(fb.hard && !branch->children()[0]->children().empty() )
- truncatedTimeLikeShower(children[0],branch->children()[0],type,fc[0],false);
- // normal shower
- else
- timeLikeShower(children[0],type,fc[0],false);
- }
- if(children[0]->spinInfo()) children[0]->spinInfo()->develop();
- // shower the second particle
- if(fc[1].kinematics) {
- // the parent has truncated emission and following line
- if(!fb.hard && fb.iout == 2)
- truncatedTimeLikeShower(children[1],branch,type,fc[1],false);
- // hard emission and subsquent hard emissions
- else if(fb.hard && !branch->children()[1]->children().empty() )
- truncatedTimeLikeShower(children[1],branch->children()[1],type,fc[1],false);
- else
- timeLikeShower(children[1],type,fc[1],false);
- }
- if(children[1]->spinInfo()) children[1]->spinInfo()->develop();
- // branching has happened
- particle->showerKinematics()->updateParent(particle, children,fb.type);
- break;
- }
- // H7 default
- else if(_reconOpt==1) {
- // shower the first particle
- if(fc[0].kinematics) {
- // the parent has truncated emission and following line
- if(!fb.hard && fb.iout == 1)
- truncatedTimeLikeShower(children[0],branch,type,fc[0],false);
- else
- timeLikeShower(children[0],type,fc[0],false);
- }
- if(children[0]->spinInfo()) children[0]->spinInfo()->develop();
- // shower the second particle
- if(fc[1].kinematics) {
- // the parent has truncated emission and following line
- if(!fb.hard && fb.iout == 2)
- truncatedTimeLikeShower(children[1],branch,type,fc[1],false);
- else
- timeLikeShower(children[1],type,fc[1],false);
- }
- if(children[1]->spinInfo()) children[1]->spinInfo()->develop();
- // branching has happened
- particle->showerKinematics()->updateParent(particle, children,fb.type);
- // clean up the vetoed emission
- if(particle->virtualMass()==ZERO) {
- particle->showerKinematics(ShoKinPtr());
- for(unsigned int ix=0;ix<children.size();++ix)
- particle->abandonChild(children[ix]);
- children.clear();
- if(particle->spinInfo()) particle->spinInfo()->decayVertex(VertexPtr());
- particle->vetoEmission(fb.type,fb.kinematics->scale());
- // generate the new emission
- fb = selectTimeLikeBranching(particle,type,branch);
- // must be at least hard emission
- assert(fb.kinematics);
- setupChildren = true;
- continue;
- }
- else
- break;
- }
- else if(_reconOpt>=2) {
- // cut-off masses for the branching
- const vector<Energy> & virtualMasses = fb.sudakov->virtualMasses(fb.ids);
- // compute the masses of the children
- Energy masses[3];
- for(unsigned int ix=0;ix<2;++ix) {
- if(fc[ix].kinematics) {
- const vector<Energy> & vm = fc[ix].sudakov->virtualMasses(fc[ix].ids);
- Energy2 q2 =
- fc[ix].kinematics->z()*(1.-fc[ix].kinematics->z())*sqr(fc[ix].kinematics->scale());
- if(fc[ix].ids[0]!=ParticleID::g) q2 += sqr(vm[0]);
- masses[ix+1] = sqrt(q2);
- }
- else {
- masses[ix+1] = virtualMasses[ix+1];
- }
- }
- masses[0] = fb.ids[0]!=ParticleID::g ? virtualMasses[0] : ZERO;
- double z = fb.kinematics->z();
- Energy2 pt2 = z*(1.-z)*(z*(1.-z)*sqr(fb.kinematics->scale()) + sqr(masses[0]))
- - sqr(masses[1])*(1.-z) - sqr(masses[2])*z;
- if(pt2>=ZERO) {
- break;
- }
- // if only the hard emission have to accept it
- else if ((fc[0].hard && !fc[1].kinematics) ||
- (fc[1].hard && !fc[0].kinematics) ) {
- break;
- }
- else {
- // reset the scales for the children
- for(unsigned int ix=0;ix<2;++ix) {
- if(fc[ix].hard) continue;
- if(fc[ix].kinematics && ! fc[ix].hard )
- children[ix]->vetoEmission(fc[ix].type,fc[ix].kinematics->scale());
- else
- children[ix]->vetoEmission(ShowerPartnerType::QCDColourLine,ZERO);
- children[ix]->virtualMass(ZERO);
- }
- }
- }
- };
- if(_reconOpt>=2) {
- // shower the first particle
- if(fc[0].kinematics) {
- // the parent has truncated emission and following line
- if(!fb.hard && fb.iout == 1)
- truncatedTimeLikeShower(children[0],branch,type,fc[0],false);
- // hard emission and subsquent hard emissions
- else if(fb.hard && !branch->children()[0]->children().empty() )
- truncatedTimeLikeShower(children[0],branch->children()[0],type,fc[0],false);
- // normal shower
- else
- timeLikeShower(children[0],type,fc[0],false);
- }
- if(children[0]->spinInfo()) children[0]->spinInfo()->develop();
- // shower the second particle
- if(fc[1].kinematics) {
- // the parent has truncated emission and following line
- if(!fb.hard && fb.iout == 2)
- truncatedTimeLikeShower(children[1],branch,type,fc[1],false);
- // hard emission and subsquent hard emissions
- else if(fb.hard && !branch->children()[1]->children().empty() )
- truncatedTimeLikeShower(children[1],branch->children()[1],type,fc[1],false);
- else
- timeLikeShower(children[1],type,fc[1],false);
- }
- if(children[1]->spinInfo()) children[1]->spinInfo()->develop();
- // branching has happened
- particle->showerKinematics()->updateParent(particle, children,fb.type);
- }
- if(first&&!children.empty())
- particle->showerKinematics()->resetChildren(particle,children);
- if(particle->spinInfo()) particle->spinInfo()->develop();
- return true;
-}
-
-bool Evolver::truncatedSpaceLikeShower(tShowerParticlePtr particle, PPtr beam,
- HardBranchingPtr branch,
- ShowerInteraction::Type type) {
- tcPDFPtr pdf;
- if(ShowerHandler::currentHandler()->firstPDF().particle() == beamParticle())
- pdf = ShowerHandler::currentHandler()->firstPDF().pdf();
- if(ShowerHandler::currentHandler()->secondPDF().particle() == beamParticle())
- pdf = ShowerHandler::currentHandler()->secondPDF().pdf();
- Energy freeze = ShowerHandler::currentHandler()->pdfFreezingScale();
- Branching bb;
- // parameters of the force branching
- double z(0.);
- HardBranchingPtr timelike;
- for( unsigned int ix = 0; ix < branch->children().size(); ++ix ) {
- if( branch->children()[ix]->status() ==HardBranching::Outgoing) {
- timelike = branch->children()[ix];
- }
- if( branch->children()[ix]->status() ==HardBranching::Incoming )
- z = branch->children()[ix]->z();
- }
- // generate truncated branching
- tcPDPtr part[2];
- if(z>=0.&&z<=1.) {
- while (true) {
- if( !isTruncatedShowerON() || hardOnly() ) break;
- bb = splittingGenerator()->chooseBackwardBranching( *particle,
- beam, 1., beamParticle(),
- type , pdf,freeze);
- if( !bb.kinematics || bb.kinematics->scale() < branch->scale() ) {
- bb = Branching();
- break;
- }
- // particles as in Sudakov form factor
- part[0] = getParticleData( bb.ids[0] );
- part[1] = getParticleData( bb.ids[2] );
-
- //is emitter anti-particle
- if( particle->id() != bb.ids[1]) {
- if( part[0]->CC() ) part[0] = part[0]->CC();
- if( part[1]->CC() ) part[1] = part[1]->CC();
- }
- double zsplit = bb.kinematics->z();
- // apply the vetos for the truncated shower
- // if doesn't carry most of momentum
- ShowerInteraction::Type type2 = bb.type==ShowerPartnerType::QED ?
- ShowerInteraction::QED : ShowerInteraction::QCD;
- if(type2==branch->sudakov()->interactionType() &&
- zsplit < 0.5) {
- particle->vetoEmission(bb.type,bb.kinematics->scale());
- continue;
- }
- // others
- if( part[0]->id() != particle->id() || // if particle changes type
- bb.kinematics->pT() > progenitor()->maximumpT(type2) || // pt veto
- bb.kinematics->scale() < branch->scale()) { // angular ordering veto
- particle->vetoEmission(bb.type,bb.kinematics->scale());
- continue;
- }
- // and those from the base class
- if(spaceLikeVetoed(bb,particle)) {
- particle->vetoEmission(bb.type,bb.kinematics->scale());
- continue;
- }
- break;
- }
- }
- if( !bb.kinematics ) {
- //do the hard emission
- ShoKinPtr kinematics =
- branch->sudakov()->createInitialStateBranching( branch->scale(), z, branch->phi(),
- branch->children()[0]->pT() );
- kinematics->initialize( *particle, beam );
- // assign the splitting function and shower kinematics
- particle->showerKinematics( kinematics );
- if(kinematics->pT()>progenitor()->highestpT())
- progenitor()->highestpT(kinematics->pT());
- // For the time being we are considering only 1->2 branching
- // Now create the actual particles, make the otherChild a final state
- // particle, while the newParent is not
- ShowerParticlePtr newParent =
- new_ptr( ShowerParticle( branch->branchingParticle()->dataPtr(), false ) );
- ShowerParticlePtr otherChild =
- new_ptr( ShowerParticle( timelike->branchingParticle()->dataPtr(),
- true, true ) );
- ShowerParticleVector theChildren;
- theChildren.push_back( particle );
- theChildren.push_back( otherChild );
- particle->showerKinematics()->
- updateParent( newParent, theChildren, branch->type());
- // update the history if needed
- currentTree()->updateInitialStateShowerProduct( progenitor(), newParent );
- currentTree()->addInitialStateBranching( particle, newParent, otherChild );
- // for the reconstruction of kinematics, parent/child
- // relationships are according to the branching process:
- // now continue the shower
- bool emitted=false;
- if(!hardOnly()) {
- if( branch->parent() ) {
- emitted = truncatedSpaceLikeShower( newParent, beam, branch->parent() , type);
- }
- else {
- emitted = spaceLikeShower( newParent, beam , type);
- }
- }
- if( !emitted ) {
- if( intrinsicpT().find( progenitor() ) == intrinsicpT().end() ) {
- kinematics->updateLast( newParent, ZERO, ZERO );
- }
- else {
- pair<Energy,double> kt = intrinsicpT()[progenitor()];
- kinematics->updateLast( newParent,
- kt.first*cos( kt.second ),
- kt.first*sin( kt.second ) );
- }
- }
- particle->showerKinematics()->
- updateChildren( newParent, theChildren,bb.type,false);
- if(hardOnly()) return true;
- // perform the shower of the final-state particle
- if( timelike->children().empty() ) {
- timeLikeShower( otherChild , type,Branching(),true);
- }
- else {
- truncatedTimeLikeShower( otherChild, timelike , type,Branching(), true);
- }
- updateHistory(otherChild);
- // return the emitted
- return true;
- }
- // assign the splitting function and shower kinematics
- particle->showerKinematics( bb.kinematics );
- if(bb.kinematics->pT()>progenitor()->highestpT())
- progenitor()->highestpT(bb.kinematics->pT());
- // For the time being we are considering only 1->2 branching
- // Now create the actual particles, make the otherChild a final state
- // particle, while the newParent is not
- ShowerParticlePtr newParent = new_ptr( ShowerParticle( part[0], false ) );
- ShowerParticlePtr otherChild = new_ptr( ShowerParticle( part[1], true, true ) );
- ShowerParticleVector theChildren;
- theChildren.push_back( particle );
- theChildren.push_back( otherChild );
- particle->showerKinematics()->
- updateParent( newParent, theChildren, bb.type);
- // update the history if needed
- currentTree()->updateInitialStateShowerProduct( progenitor(), newParent );
- currentTree()->addInitialStateBranching( particle, newParent, otherChild );
- // for the reconstruction of kinematics, parent/child
- // relationships are according to the branching process:
- // now continue the shower
- bool emitted = truncatedSpaceLikeShower( newParent, beam, branch,type);
- // now reconstruct the momentum
- if( !emitted ) {
- if( intrinsicpT().find( progenitor() ) == intrinsicpT().end() ) {
- bb.kinematics->updateLast( newParent, ZERO, ZERO );
- }
- else {
- pair<Energy,double> kt = intrinsicpT()[ progenitor() ];
- bb.kinematics->updateLast( newParent,
- kt.first*cos( kt.second ),
- kt.first*sin( kt.second ) );
- }
- }
- particle->showerKinematics()->
- updateChildren( newParent, theChildren, bb.type,false);
- // perform the shower of the final-state particle
- timeLikeShower( otherChild , type,Branching(),true);
- updateHistory(otherChild);
- // return the emitted
- return true;
-}
-
-bool Evolver::
-truncatedSpaceLikeDecayShower(tShowerParticlePtr particle,
- const ShowerParticle::EvolutionScales & maxScales,
- Energy minmass, HardBranchingPtr branch,
- ShowerInteraction::Type type, Branching fb) {
- // select a branching if we don't have one
- if(!fb.kinematics)
- fb = selectSpaceLikeDecayBranching(particle,maxScales,minmass,type,branch);
- // must be an emission, the forced one it not a truncated one
- assert(fb.kinematics);
- ShowerParticleVector children;
- int ntry=0;
- Branching fc[2];
- bool setupChildren = true;
- while (ntry<50) {
- if(!fc[0].hard) fc[0] = Branching();
- if(!fc[1].hard) fc[1] = Branching();
- ++ntry;
- if(setupChildren) {
- ++_nFSR;
- // Assign the shower kinematics to the emitting particle.
- particle->showerKinematics(fb.kinematics);
- if(fb.kinematics->pT()>progenitor()->highestpT())
- progenitor()->highestpT(fb.kinematics->pT());
- // create the ShowerParticle objects for the two children
- children = createTimeLikeChildren(particle,fb.ids);
- // updateChildren the children
- particle->showerKinematics()->
- updateChildren(particle, children, fb.type,_reconOpt>=3);
- setupChildren = false;
- }
- // select branchings for children
- if(!fc[0].kinematics) {
- if(children[0]->id()==particle->id()) {
- // select branching for first particle
- if(!fb.hard)
- fc[0] = selectSpaceLikeDecayBranching(children[0],maxScales,minmass,type,branch);
- else if(fb.hard && ! branch->children()[0]->children().empty() )
- fc[0] = selectSpaceLikeDecayBranching(children[0],maxScales,minmass,type,
- branch->children()[0]);
- else
- fc[0] = selectSpaceLikeDecayBranching(children[0],maxScales,minmass,type,
- HardBranchingPtr());
- }
- else {
- // select branching for first particle
- if(fb.hard && !branch->children()[0]->children().empty() )
- fc[0] = selectTimeLikeBranching(children[0],type,branch->children()[0]);
- else
- fc[0] = selectTimeLikeBranching(children[0],type,HardBranchingPtr());
- }
- }
- // select branching for the second particle
- if(!fc[1].kinematics) {
- if(children[1]->id()==particle->id()) {
- // select branching for first particle
- if(!fb.hard)
- fc[1] = selectSpaceLikeDecayBranching(children[1],maxScales,minmass,type,branch);
- else if(fb.hard && ! branch->children()[1]->children().empty() )
- fc[1] = selectSpaceLikeDecayBranching(children[1],maxScales,minmass,type,
- branch->children()[1]);
- else
- fc[1] = selectSpaceLikeDecayBranching(children[1],maxScales,minmass,type,
- HardBranchingPtr());
- }
- else {
- if(fb.hard && !branch->children()[1]->children().empty() )
- fc[1] = selectTimeLikeBranching(children[1],type,branch->children()[1]);
- else
- fc[1] = selectTimeLikeBranching(children[1],type,HardBranchingPtr());
- }
- }
- // old default
- if(_reconOpt==0 || (_reconOpt==1 && fb.hard) ) {
- // update the history if needed
- currentTree()->updateInitialStateShowerProduct(progenitor(),children[0]);
- currentTree()->addInitialStateBranching(particle,children[0],children[1]);
- // shower the first particle
- if(fc[0].kinematics) {
- if(children[0]->id()==particle->id()) {
- if(!fb.hard)
- truncatedSpaceLikeDecayShower( children[0],maxScales,minmass,
- branch,type,fc[0]);
- else if(fb.hard && ! branch->children()[0]->children().empty() )
- truncatedSpaceLikeDecayShower( children[0],maxScales,minmass,
- branch->children()[0],type,fc[0]);
- else
- spaceLikeDecayShower( children[0],maxScales,minmass,type,fc[0]);
- }
- else {
- if(fb.hard && !branch->children()[0]->children().empty() )
- truncatedTimeLikeShower(children[0],branch->children()[0],type,fc[0],false);
- // normal shower
- else
- timeLikeShower(children[0],type,fc[0],false);
- }
- }
- // shower the second particle
- if(fc[1].kinematics) {
- if(children[0]->id()==particle->id()) {
- if(!fb.hard)
- truncatedSpaceLikeDecayShower( children[0],maxScales,minmass,
- branch,type,fc[1]);
- else if(fb.hard && ! branch->children()[0]->children().empty() )
- truncatedSpaceLikeDecayShower( children[0],maxScales,minmass,
- branch->children()[0],type,fc[1]);
- else
- spaceLikeDecayShower( children[0],maxScales,minmass,type,fc[1]);
- }
- else {
- if(fb.hard && !branch->children()[0]->children().empty() )
- truncatedTimeLikeShower(children[0],branch->children()[0],type,fc[1],false);
- // normal shower
- else
- timeLikeShower(children[0],type,fc[1],false);
- }
- }
- updateHistory(children[1]);
- // branching has happened
- break;
- }
- // H7 default
- else if(_reconOpt==1) {
- // update the history if needed
- currentTree()->updateInitialStateShowerProduct(progenitor(),children[0]);
- currentTree()->addInitialStateBranching(particle,children[0],children[1]);
- // shower the first particle
- if(fc[0].kinematics) {
- if(children[0]->id()==particle->id()) {
- if(!fb.hard)
- truncatedSpaceLikeDecayShower( children[0],maxScales,minmass,
- branch,type,fc[0]);
- else if(fb.hard && ! branch->children()[0]->children().empty() )
- truncatedSpaceLikeDecayShower( children[0],maxScales,minmass,
- branch->children()[0],type,fc[0]);
- else
- spaceLikeDecayShower( children[0],maxScales,minmass,type,fc[0]);
- }
- else {
- if(fb.hard && !branch->children()[0]->children().empty() )
- truncatedTimeLikeShower(children[0],branch->children()[0],type,fc[0],false);
- // normal shower
- else
- timeLikeShower(children[0],type,fc[0],false);
- }
- }
- // shower the second particle
- if(fc[1].kinematics) {
- if(children[0]->id()==particle->id()) {
- if(!fb.hard)
- truncatedSpaceLikeDecayShower( children[0],maxScales,minmass,
- branch,type,fc[1]);
- else if(fb.hard && ! branch->children()[0]->children().empty() )
- truncatedSpaceLikeDecayShower( children[0],maxScales,minmass,
- branch->children()[0],type,fc[1]);
- else
- spaceLikeDecayShower( children[0],maxScales,minmass,type,fc[1]);
- }
- else {
- if(fb.hard && !branch->children()[0]->children().empty() )
- truncatedTimeLikeShower(children[0],branch->children()[0],type,fc[1],false);
- // normal shower
- else
- timeLikeShower(children[0],type,fc[1],false);
- }
- }
- // clean up the vetoed emission
- if(particle->virtualMass()==ZERO) {
- particle->showerKinematics(ShoKinPtr());
- for(unsigned int ix=0;ix<children.size();++ix)
- particle->abandonChild(children[ix]);
- children.clear();
- particle->vetoEmission(fb.type,fb.kinematics->scale());
- // generate the new emission
- fb = selectSpaceLikeDecayBranching(particle,maxScales,minmass,type,branch);
- // must be at least hard emission
- assert(fb.kinematics);
- setupChildren = true;
- continue;
- }
- else {
- updateHistory(children[1]);
- break;
- }
- }
- else if(_reconOpt>=2) {
- // cut-off masses for the branching
- const vector<Energy> & virtualMasses = fb.sudakov->virtualMasses(fb.ids);
- // compute the masses of the children
- Energy masses[3];
- // space-like children
- masses[1] = children[0]->virtualMass();
- // time-like child
- if(fc[1].kinematics) {
- const vector<Energy> & vm = fc[1].sudakov->virtualMasses(fc[1].ids);
- Energy2 q2 =
- fc[1].kinematics->z()*(1.-fc[1].kinematics->z())*sqr(fc[1].kinematics->scale());
- if(fc[1].ids[0]!=ParticleID::g) q2 += sqr(vm[0]);
- masses[2] = sqrt(q2);
- }
- else {
- masses[2] = virtualMasses[2];
- }
- masses[0]=particle->virtualMass();
- double z = fb.kinematics->z();
- Energy2 pt2 = (1.-z)*(z*sqr(masses[0])-sqr(masses[1])-z/(1.-z)*sqr(masses[2]));
- if(pt2>=ZERO) {
- break;
- }
- else {
- // reset the scales for the children
- for(unsigned int ix=0;ix<2;++ix) {
- if(fc[ix].kinematics)
- children[ix]->vetoEmission(fc[ix].type,fc[ix].kinematics->scale());
- else {
- if(ix==0)
- children[ix]->vetoEmission(ShowerPartnerType::QCDColourLine,Constants::MaxEnergy);
- else
- children[ix]->vetoEmission(ShowerPartnerType::QCDColourLine,ZERO);
- }
- }
- children[0]->virtualMass(_progenitor->progenitor()->mass());
- children[1]->virtualMass(ZERO);
- }
- }
- };
- if(_reconOpt>=2) {
- // update the history if needed
- currentTree()->updateInitialStateShowerProduct(progenitor(),children[0]);
- currentTree()->addInitialStateBranching(particle,children[0],children[1]);
- // shower the first particle
- if(fc[0].kinematics) {
- if(children[0]->id()==particle->id()) {
- if(!fb.hard)
- truncatedSpaceLikeDecayShower( children[0],maxScales,minmass,
- branch,type,fc[0]);
- else if(fb.hard && ! branch->children()[0]->children().empty() )
- truncatedSpaceLikeDecayShower( children[0],maxScales,minmass,
- branch->children()[0],type,fc[0]);
- else
- spaceLikeDecayShower( children[0],maxScales,minmass,type,fc[0]);
- }
- else {
- if(fb.hard && !branch->children()[0]->children().empty() )
- truncatedTimeLikeShower(children[0],branch->children()[0],type,fc[0],false);
- // normal shower
- else
- timeLikeShower(children[0],type,fc[0],false);
- }
- }
- // shower the second particle
- if(fc[1].kinematics) {
- if(children[0]->id()==particle->id()) {
- if(!fb.hard)
- truncatedSpaceLikeDecayShower( children[0],maxScales,minmass,
- branch,type,fc[1]);
- else if(fb.hard && ! branch->children()[0]->children().empty() )
- truncatedSpaceLikeDecayShower( children[0],maxScales,minmass,
- branch->children()[0],type,fc[1]);
- else
- spaceLikeDecayShower( children[0],maxScales,minmass,type,fc[1]);
- }
- else {
- if(fb.hard && !branch->children()[0]->children().empty() )
- truncatedTimeLikeShower(children[0],branch->children()[0],type,fc[1],false);
- // normal shower
- else
- timeLikeShower(children[0],type,fc[1],false);
- }
- }
- updateHistory(children[1]);
- }
- return true;
-}
-
-bool Evolver::constructDecayTree(vector<ShowerProgenitorPtr> & particlesToShower,
- ShowerInteraction::Type inter) {
- Energy ptmax(-GeV);
- // get the maximum pt is all ready a hard tree
- if(hardTree()) {
- for(unsigned int ix=0;ix<particlesToShower.size();++ix) {
- if(particlesToShower[ix]->maximumpT(inter)>ptmax&&
- particlesToShower[ix]->progenitor()->isFinalState())
- ptmax = particlesToShower[ix]->maximumpT(inter);
- }
- }
- vector<HardBranchingPtr> spaceBranchings,allBranchings;
- for(unsigned int ix=0;ix<particlesToShower.size();++ix) {
- if(particlesToShower[ix]->progenitor()->isFinalState()) {
- HardBranchingPtr newBranch;
- if(particlesToShower[ix]->hasEmitted()) {
- newBranch =
- new_ptr(HardBranching(particlesToShower[ix]->progenitor(),
- particlesToShower[ix]->progenitor()->
- showerKinematics()->SudakovFormFactor(),
- HardBranchingPtr(),HardBranching::Outgoing));
- constructTimeLikeLine(newBranch,particlesToShower[ix]->progenitor());
- }
- else {
- newBranch =
- new_ptr(HardBranching(particlesToShower[ix]->progenitor(),
- SudakovPtr(),HardBranchingPtr(),
- HardBranching::Outgoing));
- }
- allBranchings.push_back(newBranch);
- }
- else {
- HardBranchingPtr newBranch;
- if(particlesToShower[ix]->hasEmitted()) {
- newBranch =
- new_ptr(HardBranching(particlesToShower[ix]->progenitor(),
- particlesToShower[ix]->progenitor()->
- showerKinematics()->SudakovFormFactor(),
- HardBranchingPtr(),HardBranching::Decay));
- constructTimeLikeLine(newBranch,particlesToShower[ix]->progenitor());
- HardBranchingPtr last=newBranch;
- do {
- for(unsigned int ix=0;ix<last->children().size();++ix) {
- if(last->children()[ix]->branchingParticle()->id()==
- particlesToShower[ix]->id()) {
- last = last->children()[ix];
- continue;
- }
- }
- }
- while(!last->children().empty());
- last->status(HardBranching::Incoming);
- spaceBranchings.push_back(newBranch);
- allBranchings .push_back(last);
- }
- else {
- newBranch =
- new_ptr(HardBranching(particlesToShower[ix]->progenitor(),
- SudakovPtr(),HardBranchingPtr(),
- HardBranching::Incoming));
- spaceBranchings.push_back(newBranch);
- allBranchings .push_back(newBranch);
- }
- }
- }
- HardTreePtr QCDTree = new_ptr(HardTree(allBranchings,spaceBranchings,inter));
- // set the charge partners
- ShowerParticleVector particles;
- particles.push_back(spaceBranchings.back()->branchingParticle());
- for(set<HardBranchingPtr>::iterator cit=QCDTree->branchings().begin();
- cit!=QCDTree->branchings().end();++cit) {
- if((*cit)->status()==HardBranching::Outgoing)
- particles.push_back((*cit)->branchingParticle());
- }
- // get the partners
- showerModel()->partnerFinder()->setInitialEvolutionScales(particles,true,inter,true);
- // do the inverse recon
- if(!showerModel()->kinematicsReconstructor()->
- deconstructDecayJets(QCDTree,this,inter)) {
- return false;
- }
- // clear the old shower
- currentTree()->clear();
- // set the hard tree
- hardTree(QCDTree);
- // set the charge partners
- setEvolutionPartners(false,inter,false);
- // get the particles to be showered
- map<ShowerProgenitorPtr,ShowerParticlePtr>::const_iterator cit;
- map<ShowerProgenitorPtr,tShowerParticlePtr>::const_iterator cjt;
- particlesToShower.clear();
- // incoming particles
- for(cit=currentTree()->incomingLines().begin();
- cit!=currentTree()->incomingLines().end();++cit)
- particlesToShower.push_back(((*cit).first));
- assert(particlesToShower.size()==1);
- // outgoing particles
- for(cjt=currentTree()->outgoingLines().begin();
- cjt!=currentTree()->outgoingLines().end();++cjt) {
- particlesToShower.push_back(((*cjt).first));
- if(ptmax>ZERO) particlesToShower.back()->maximumpT(ptmax,inter);
- }
- for(unsigned int ix=0;ix<particlesToShower.size();++ix) {
- map<ShowerParticlePtr,tHardBranchingPtr>::const_iterator
- eit=hardTree()->particles().end(),
- mit = hardTree()->particles().find(particlesToShower[ix]->progenitor());
- if( mit != eit) {
- if(mit->second->status()==HardBranching::Outgoing)
- particlesToShower[ix]->progenitor()->set5Momentum(mit->second->pVector());
- }
- }
- return true;
-}
-
-bool Evolver::constructHardTree(vector<ShowerProgenitorPtr> & particlesToShower,
- ShowerInteraction::Type inter) {
- bool noEmission = true;
- vector<HardBranchingPtr> spaceBranchings,allBranchings;
- for(unsigned int ix=0;ix<particlesToShower.size();++ix) {
- if(particlesToShower[ix]->progenitor()->isFinalState()) {
- HardBranchingPtr newBranch;
- if(particlesToShower[ix]->hasEmitted()) {
- noEmission = false;
- newBranch =
- new_ptr(HardBranching(particlesToShower[ix]->progenitor(),
- particlesToShower[ix]->progenitor()->
- showerKinematics()->SudakovFormFactor(),
- HardBranchingPtr(),HardBranching::Outgoing));
- constructTimeLikeLine(newBranch,particlesToShower[ix]->progenitor());
- }
- else {
- newBranch =
- new_ptr(HardBranching(particlesToShower[ix]->progenitor(),
- SudakovPtr(),HardBranchingPtr(),
- HardBranching::Outgoing));
- }
- allBranchings.push_back(newBranch);
- }
- else {
- HardBranchingPtr first,last;
- if(!particlesToShower[ix]->progenitor()->parents().empty()) {
- noEmission = false;
- constructSpaceLikeLine(particlesToShower[ix]->progenitor(),
- first,last,SudakovPtr(),
- particlesToShower[ix]->original()->parents()[0]);
- }
- else {
- first = new_ptr(HardBranching(particlesToShower[ix]->progenitor(),
- SudakovPtr(),HardBranchingPtr(),
- HardBranching::Incoming));
- if(particlesToShower[ix]->original()->parents().empty())
- first->beam(particlesToShower[ix]->original());
- else
- first->beam(particlesToShower[ix]->original()->parents()[0]);
- last = first;
- }
- spaceBranchings.push_back(first);
- allBranchings.push_back(last);
- }
- }
- if(!noEmission) {
- HardTreePtr QCDTree = new_ptr(HardTree(allBranchings,spaceBranchings,
- inter));
- // set the charge partners
- ShowerParticleVector particles;
- for(set<HardBranchingPtr>::iterator cit=QCDTree->branchings().begin();
- cit!=QCDTree->branchings().end();++cit) {
- particles.push_back((*cit)->branchingParticle());
- }
- // get the partners
- showerModel()->partnerFinder()->setInitialEvolutionScales(particles,false,
- inter,true);
- // do the inverse recon
- if(!showerModel()->kinematicsReconstructor()->
- deconstructHardJets(QCDTree,this,inter))
- throw Exception() << "Can't to shower deconstruction for QED shower in"
- << "QEDEvolver::showerHard" << Exception::eventerror;
- // set the hard tree
- hardTree(QCDTree);
- }
- // clear the old shower
- currentTree()->clear();
- // set the charge partners
- setEvolutionPartners(true,inter,false);
- // get the particles to be showered
- particlesToShower = currentTree()->extractProgenitors();
- // reset momenta
- if(hardTree()) {
- for(unsigned int ix=0;ix<particlesToShower.size();++ix) {
- map<ShowerParticlePtr,tHardBranchingPtr>::const_iterator
- eit=hardTree()->particles().end(),
- mit = hardTree()->particles().find(particlesToShower[ix]->progenitor());
- if( mit != eit) {
- particlesToShower[ix]->progenitor()->set5Momentum(mit->second->showerMomentum());
- }
- }
- }
- return true;
-}
-
-void Evolver::constructTimeLikeLine(tHardBranchingPtr branch,
- tShowerParticlePtr particle) {
- for(unsigned int ix=0;ix<particle->children().size();++ix) {
- HardBranching::Status status = branch->status();
- tShowerParticlePtr child =
- dynamic_ptr_cast<ShowerParticlePtr>(particle->children()[ix]);
- if(child->children().empty()) {
- HardBranchingPtr newBranch =
- new_ptr(HardBranching(child,SudakovPtr(),branch,status));
- branch->addChild(newBranch);
- }
- else {
- HardBranchingPtr newBranch =
- new_ptr(HardBranching(child,child->showerKinematics()->SudakovFormFactor(),
- branch,status));
- constructTimeLikeLine(newBranch,child);
- branch->addChild(newBranch);
- }
- }
- // sort out the type of interaction
- if(!branch->children().empty()) {
- if(branch->branchingParticle()->id()==ParticleID::gamma ||
- branch->children()[0]->branchingParticle()->id()==ParticleID::gamma ||
- branch->children()[1]->branchingParticle()->id()==ParticleID::gamma)
- branch->type(ShowerPartnerType::QED);
- else {
- if(branch->branchingParticle()->id()==
- branch->children()[0]->branchingParticle()->id()) {
- if(branch->branchingParticle()->dataPtr()->iColour()==PDT::Colour8) {
- tShowerParticlePtr emittor =
- branch->branchingParticle()->showerKinematics()->z()>0.5 ?
- branch->children()[0]->branchingParticle() :
- branch->children()[1]->branchingParticle();
- if(branch->branchingParticle()->colourLine()==emittor->colourLine())
- branch->type(ShowerPartnerType::QCDAntiColourLine);
- else if(branch->branchingParticle()->antiColourLine()==emittor->antiColourLine())
- branch->type(ShowerPartnerType::QCDColourLine);
- else
- assert(false);
- }
- else if(branch->branchingParticle()->colourLine()) {
- branch->type(ShowerPartnerType::QCDColourLine);
- }
- else if(branch->branchingParticle()->antiColourLine()) {
- branch->type(ShowerPartnerType::QCDAntiColourLine);
- }
- else
- assert(false);
- }
- else if(branch->branchingParticle()->id()==ParticleID::g &&
- branch->children()[0]->branchingParticle()->id()==
- -branch->children()[1]->branchingParticle()->id()) {
- if(branch->branchingParticle()->showerKinematics()->z()>0.5)
- branch->type(ShowerPartnerType::QCDAntiColourLine);
- else
- branch->type(ShowerPartnerType::QCDColourLine);
-
- }
- else
- assert(false);
- }
- }
-}
-
-void Evolver::constructSpaceLikeLine(tShowerParticlePtr particle,
- HardBranchingPtr & first,
- HardBranchingPtr & last,
- SudakovPtr sud,PPtr beam) {
- if(!particle) return;
- if(!particle->parents().empty()) {
- tShowerParticlePtr parent =
- dynamic_ptr_cast<ShowerParticlePtr>(particle->parents()[0]);
- SudakovPtr newSud=particle->showerKinematics()->SudakovFormFactor();
- constructSpaceLikeLine(parent,first,last,newSud,beam);
- }
- HardBranchingPtr newBranch =
- new_ptr(HardBranching(particle,sud,last,HardBranching::Incoming));
- newBranch->beam(beam);
- if(!first) {
- first=newBranch;
- last =newBranch;
- return;
- }
- last->addChild(newBranch);
- tShowerParticlePtr timeChild =
- dynamic_ptr_cast<ShowerParticlePtr>(particle->parents()[0]->children()[1]);
- HardBranchingPtr timeBranch;
- if(!timeChild->children().empty()) {
- timeBranch =
- new_ptr(HardBranching(timeChild,
- timeChild->showerKinematics()->SudakovFormFactor(),
- last,HardBranching::Outgoing));
- constructTimeLikeLine(timeBranch,timeChild);
- }
- else {
- timeBranch =
- new_ptr(HardBranching(timeChild,SudakovPtr(),last,HardBranching::Outgoing));
- }
- last->addChild(timeBranch);
- // sort out the type
- if(last->branchingParticle() ->id() == ParticleID::gamma ||
- newBranch->branchingParticle() ->id() == ParticleID::gamma ||
- timeBranch->branchingParticle()->id() == ParticleID::gamma) {
- last->type(ShowerPartnerType::QED);
- }
- else if(last->branchingParticle()->id()==newBranch->branchingParticle()->id()) {
- if(last->branchingParticle()->id()==ParticleID::g) {
- if(last->branchingParticle()->colourLine()==
- newBranch->branchingParticle()->colourLine()) {
- last->type(ShowerPartnerType::QCDAntiColourLine);
- }
- else {
- last->type(ShowerPartnerType::QCDColourLine);
- }
- }
- else if(last->branchingParticle()->hasColour()) {
- last->type(ShowerPartnerType::QCDColourLine);
- }
- else if(last->branchingParticle()->hasAntiColour()) {
- last->type(ShowerPartnerType::QCDAntiColourLine);
- }
- else
- assert(false);
- }
- else if(newBranch->branchingParticle()->id()==ParticleID::g) {
- if(last->branchingParticle()->hasColour()) {
- last->type(ShowerPartnerType::QCDAntiColourLine);
- }
- else if(last->branchingParticle()->hasAntiColour()) {
- last->type(ShowerPartnerType::QCDColourLine);
- }
- else
- assert(false);
- }
- else if(newBranch->branchingParticle()->hasColour()) {
- last->type(ShowerPartnerType::QCDColourLine);
- }
- else if(newBranch->branchingParticle()->hasAntiColour()) {
- last->type(ShowerPartnerType::QCDAntiColourLine);
- }
- else {
- assert(false);
- }
- last=newBranch;
-}
-
-void Evolver::connectTrees(ShowerTreePtr showerTree,
- HardTreePtr hardTree, bool hard ) {
- ShowerParticleVector particles;
- // find the Sudakovs
- for(set<HardBranchingPtr>::iterator cit=hardTree->branchings().begin();
- cit!=hardTree->branchings().end();++cit) {
- // Sudakovs for ISR
- if((**cit).parent()&&(**cit).status()==HardBranching::Incoming) {
- ++_nis;
- IdList br(3);
- br[0] = (**cit).parent()->branchingParticle()->id();
- br[1] = (**cit). branchingParticle()->id();
- br[2] = (**cit).parent()->children()[0]==*cit ?
- (**cit).parent()->children()[1]->branchingParticle()->id() :
- (**cit).parent()->children()[0]->branchingParticle()->id();
- BranchingList branchings = splittingGenerator()->initialStateBranchings();
- if(br[1]<0&&br[0]==br[1]) {
- br[0] = abs(br[0]);
- br[1] = abs(br[1]);
- }
- else if(br[1]<0) {
- br[1] = -br[1];
- br[2] = -br[2];
- }
- long index = abs(br[1]);
- SudakovPtr sudakov;
- for(BranchingList::const_iterator cjt = branchings.lower_bound(index);
- cjt != branchings.upper_bound(index); ++cjt ) {
- IdList ids = cjt->second.second;
- if(ids[0]==br[0]&&ids[1]==br[1]&&ids[2]==br[2]) {
- sudakov=cjt->second.first;
- break;
- }
- }
- if(!sudakov) throw Exception() << "Can't find Sudakov for the hard emission in "
- << "Evolver::connectTrees() for ISR"
- << Exception::runerror;
- (**cit).parent()->sudakov(sudakov);
- }
- // Sudakovs for FSR
- else if(!(**cit).children().empty()) {
- ++_nfs;
- IdList br(3);
- br[0] = (**cit) .branchingParticle()->id();
- br[1] = (**cit).children()[0]->branchingParticle()->id();
- br[2] = (**cit).children()[1]->branchingParticle()->id();
- BranchingList branchings = splittingGenerator()->finalStateBranchings();
- if(br[0]<0) {
- br[0] = abs(br[0]);
- br[1] = abs(br[1]);
- br[2] = abs(br[2]);
- }
- long index = br[0];
- SudakovPtr sudakov;
- for(BranchingList::const_iterator cjt = branchings.lower_bound(index);
- cjt != branchings.upper_bound(index); ++cjt ) {
- IdList ids = cjt->second.second;
- if(ids[0]==br[0]&&ids[1]==br[1]&&ids[2]==br[2]) {
- sudakov=cjt->second.first;
- break;
- }
- }
- if(!sudakov) throw Exception() << "Can't find Sudakov for the hard emission in "
- << "Evolver::connectTrees()"
- << Exception::runerror;
- (**cit).sudakov(sudakov);
- }
- }
- // calculate the evolution scale
- for(set<HardBranchingPtr>::iterator cit=hardTree->branchings().begin();
- cit!=hardTree->branchings().end();++cit) {
- particles.push_back((*cit)->branchingParticle());
- }
- showerModel()->partnerFinder()->
- setInitialEvolutionScales(particles,!hard,hardTree->interaction(),
- !hardTree->partnersSet());
- hardTree->partnersSet(true);
- // inverse reconstruction
- if(hard) {
- showerModel()->kinematicsReconstructor()->
- deconstructHardJets(hardTree,ShowerHandler::currentHandler()->evolver(),
- hardTree->interaction());
- }
- else
- showerModel()->kinematicsReconstructor()->
- deconstructDecayJets(hardTree,ShowerHandler::currentHandler()->evolver(),
- hardTree->interaction());
- // now reset the momenta of the showering particles
- vector<ShowerProgenitorPtr> particlesToShower;
- for(map<ShowerProgenitorPtr,ShowerParticlePtr>::const_iterator
- cit=showerTree->incomingLines().begin();
- cit!=showerTree->incomingLines().end();++cit )
- particlesToShower.push_back(cit->first);
- // extract the showering particles
- for(map<ShowerProgenitorPtr,tShowerParticlePtr>::const_iterator
- cit=showerTree->outgoingLines().begin();
- cit!=showerTree->outgoingLines().end();++cit )
- particlesToShower.push_back(cit->first);
- // match them
- map<ShowerProgenitorPtr,HardBranchingPtr> partners;
- for(set<HardBranchingPtr>::const_iterator bit=hardTree->branchings().begin();
- bit!=hardTree->branchings().end();++bit) {
- Energy2 dmin( 1e30*GeV2 );
- ShowerProgenitorPtr partner;
- for(vector<ShowerProgenitorPtr>::const_iterator pit=particlesToShower.begin();
- pit!=particlesToShower.end();++pit) {
- if(partners.find(*pit)!=partners.end()) continue;
- if( (**bit).branchingParticle()->id() != (**pit).progenitor()->id() ) continue;
- if( (**bit).branchingParticle()->isFinalState() !=
- (**pit).progenitor()->isFinalState() ) continue;
- if( (**pit).progenitor()->isFinalState() ) {
- Energy2 dtest =
- sqr( (**pit).progenitor()->momentum().x() - (**bit).showerMomentum().x() ) +
- sqr( (**pit).progenitor()->momentum().y() - (**bit).showerMomentum().y() ) +
- sqr( (**pit).progenitor()->momentum().z() - (**bit).showerMomentum().z() ) +
- sqr( (**pit).progenitor()->momentum().t() - (**bit).showerMomentum().t() );
- // add mass difference for identical particles (e.g. Z0 Z0 production)
- dtest += 1e10*sqr((**pit).progenitor()->momentum().m()-(**bit).showerMomentum().m());
- if( dtest < dmin ) {
- partner = *pit;
- dmin = dtest;
- }
- }
- else {
- // ensure directions are right
- if((**pit).progenitor()->momentum().z()/(**bit).showerMomentum().z()>ZERO) {
- partner = *pit;
- break;
- }
- }
- }
- if(!partner) throw Exception() << "Failed to match shower and hard trees in Evolver::hardestEmission"
- << Exception::eventerror;
- partners[partner] = *bit;
- }
- for(vector<ShowerProgenitorPtr>::const_iterator pit=particlesToShower.begin();
- pit!=particlesToShower.end();++pit) {
- HardBranchingPtr partner = partners[*pit];
- if((**pit).progenitor()->dataPtr()->stable()) {
- (**pit).progenitor()->set5Momentum(partner->showerMomentum());
- (**pit).copy()->set5Momentum(partner->showerMomentum());
- }
- else {
- Lorentz5Momentum oldMomentum = (**pit).progenitor()->momentum();
- Lorentz5Momentum newMomentum = partner->showerMomentum();
- LorentzRotation boost( oldMomentum.findBoostToCM(),oldMomentum.e()/oldMomentum.mass());
- (**pit).progenitor()->transform(boost);
- (**pit).copy() ->transform(boost);
- boost = LorentzRotation(-newMomentum.findBoostToCM(),newMomentum.e()/newMomentum.mass());
- (**pit).progenitor()->transform(boost);
- (**pit).copy() ->transform(boost);
- }
- }
- // correction boosts for daughter trees
- for(map<tShowerTreePtr,pair<tShowerProgenitorPtr,tShowerParticlePtr> >::const_iterator
- tit = showerTree->treelinks().begin();
- tit != showerTree->treelinks().end();++tit) {
- ShowerTreePtr decayTree = tit->first;
- map<ShowerProgenitorPtr,ShowerParticlePtr>::const_iterator
- cit = decayTree->incomingLines().begin();
- // reset the momentum of the decay particle
- Lorentz5Momentum oldMomentum = cit->first->progenitor()->momentum();
- Lorentz5Momentum newMomentum = tit->second.second->momentum();
- LorentzRotation boost( oldMomentum.findBoostToCM(),oldMomentum.e()/oldMomentum.mass());
- decayTree->transform(boost,true);
- boost = LorentzRotation(-newMomentum.findBoostToCM(),newMomentum.e()/newMomentum.mass());
- decayTree->transform(boost,true);
- }
-}
-
-void Evolver::doShowering(bool hard,XCPtr xcomb) {
- // order of the interactions
- bool showerOrder(true);
- // zero number of emissions
- _nis = _nfs = 0;
- // if MC@NLO H event and limited emissions
- // indicate both final and initial state emission
- if ( isMCatNLOHEvent && _limitEmissions != 0 ) {
- _nis = _nfs = 1;
- }
- // extract particles to shower
- vector<ShowerProgenitorPtr> particlesToShower(setupShower(hard));
- // setup the maximum scales for the shower
- if (hardVetoOn()) setupMaximumScales(particlesToShower,xcomb);
- // set the hard scales for the profiles
- setupHardScales(particlesToShower,xcomb);
- // specific stuff for hard processes and decays
- Energy minmass(ZERO), mIn(ZERO);
- // hard process generate the intrinsic p_T once and for all
- if(hard) {
- generateIntrinsicpT(particlesToShower);
- }
- // decay compute the minimum mass of the final-state
- else {
- for(unsigned int ix=0;ix<particlesToShower.size();++ix) {
- if(particlesToShower[ix]->progenitor()->isFinalState()) {
- if(particlesToShower[ix]->progenitor()->dataPtr()->stable())
- minmass += particlesToShower[ix]->progenitor()->dataPtr()->constituentMass();
- else
- minmass += particlesToShower[ix]->progenitor()->mass();
- }
- else {
- mIn = particlesToShower[ix]->progenitor()->mass();
- }
- }
- // throw exception if decay can't happen
- if ( minmass > mIn ) {
- throw Exception() << "Evolver.cc: Mass of decaying particle is "
- << "below constituent masses of decay products."
- << Exception::eventerror;
- }
- }
- // check if interactions in right order
- if(hardTree() && interaction_!=4 &&
- hardTree()->interaction()!=interactions_[0]) {
- assert(interactions_.size()==2);
- showerOrder = false;
- swap(interactions_[0],interactions_[1]);
- }
- // loop over possible interactions
- bool reWeighting = _reWeight && hard && ShowerHandler::currentHandler()->firstInteraction();
- double eventWeight=0.;
- unsigned int nTryReWeight(0);
- for(unsigned int inter=0;inter<interactions_.size();++inter) {
- // set up for second pass if required
- if(inter!=0) {
- // zero intrinsic pt so only added first time round
- intrinsicpT().clear();
- // construct the tree and throw veto if not possible
- if(!(hard ?
- constructHardTree (particlesToShower,interactions_[inter]) :
- constructDecayTree(particlesToShower,interactions_[inter])))
- throw InteractionVeto();
- }
- // create random particle vector (only need to do once)
- vector<ShowerProgenitorPtr> tmp;
- unsigned int nColouredIncoming = 0;
- while(particlesToShower.size()>0){
- unsigned int xx=UseRandom::irnd(particlesToShower.size());
- tmp.push_back(particlesToShower[xx]);
- particlesToShower.erase(particlesToShower.begin()+xx);
- }
- particlesToShower=tmp;
- for(unsigned int ix=0;ix<particlesToShower.size();++ix) {
- if(!particlesToShower[ix]->progenitor()->isFinalState() &&
- particlesToShower[ix]->progenitor()->coloured()) ++nColouredIncoming;
- }
- bool switchRecon = hard && nColouredIncoming !=1;
- // main shower loop
- unsigned int ntry(0);
- bool reconstructed = false;
- do {
- // clear results of last attempt if needed
- if(ntry!=0) {
- currentTree()->clear();
- setEvolutionPartners(hard,interactions_[inter],true);
- _nis = _nfs = 0;
- // if MC@NLO H event and limited emissions
- // indicate both final and initial state emission
- if ( isMCatNLOHEvent && _limitEmissions != 0 ) {
- _nis = _nfs = 1;
- }
- for(unsigned int ix=0; ix<particlesToShower.size();++ix) {
- SpinPtr spin = particlesToShower[ix]->progenitor()->spinInfo();
- if(spin && spin->decayVertex() &&
- dynamic_ptr_cast<tcSVertexPtr>(spin->decayVertex())) {
- spin->decayVertex(VertexPtr());
- }
- }
- }
- // loop over particles
- for(unsigned int ix=0;ix<particlesToShower.size();++ix) {
- // extract the progenitor
- progenitor(particlesToShower[ix]);
- // final-state radiation
- if(progenitor()->progenitor()->isFinalState()) {
- if(!isFSRadiationON()) continue;
- // perform shower
- progenitor()->hasEmitted(startTimeLikeShower(interactions_[inter]));
- }
- // initial-state radiation
- else {
- if(!isISRadiationON()) continue;
- // hard process
- if(hard) {
- // get the PDF
- setBeamParticle(_progenitor->beam());
- assert(beamParticle());
- // perform the shower
- // set the beam particle
- tPPtr beamparticle=progenitor()->original();
- if(!beamparticle->parents().empty())
- beamparticle=beamparticle->parents()[0];
- // generate the shower
- progenitor()->hasEmitted(startSpaceLikeShower(beamparticle,
- interactions_[inter]));
- }
- // decay
- else {
- // skip colour and electrically neutral particles
- if(!progenitor()->progenitor()->dataPtr()->coloured() &&
- !progenitor()->progenitor()->dataPtr()->charged()) {
- progenitor()->hasEmitted(false);
- continue;
- }
- // perform shower
- // set the scales correctly. The current scale is the maximum scale for
- // emission not the starting scale
- ShowerParticle::EvolutionScales maxScales(progenitor()->progenitor()->scales());
- progenitor()->progenitor()->scales() = ShowerParticle::EvolutionScales();
- if(progenitor()->progenitor()->dataPtr()->charged()) {
- progenitor()->progenitor()->scales().QED = progenitor()->progenitor()->mass();
- progenitor()->progenitor()->scales().QED_noAO = progenitor()->progenitor()->mass();
- }
- if(progenitor()->progenitor()->hasColour()) {
- progenitor()->progenitor()->scales().QCD_c = progenitor()->progenitor()->mass();
- progenitor()->progenitor()->scales().QCD_c_noAO = progenitor()->progenitor()->mass();
- }
- if(progenitor()->progenitor()->hasAntiColour()) {
- progenitor()->progenitor()->scales().QCD_ac = progenitor()->progenitor()->mass();
- progenitor()->progenitor()->scales().QCD_ac_noAO = progenitor()->progenitor()->mass();
- }
- // perform the shower
- progenitor()->hasEmitted(startSpaceLikeDecayShower(maxScales,minmass,
- interactions_[inter]));
- }
- }
- }
- // do the kinematic reconstruction, checking if it worked
- reconstructed = hard ?
- showerModel()->kinematicsReconstructor()->
- reconstructHardJets (currentTree(),intrinsicpT(),interactions_[inter],
- switchRecon && ntry>maximumTries()/2) :
- showerModel()->kinematicsReconstructor()->
- reconstructDecayJets(currentTree(),interactions_[inter]);
- if(!reconstructed) continue;
- // apply vetos on the full shower
- for(vector<FullShowerVetoPtr>::const_iterator it=_fullShowerVetoes.begin();
- it!=_fullShowerVetoes.end();++it) {
- int veto = (**it).applyVeto(currentTree());
- if(veto<0) continue;
- // veto the shower
- if(veto==0) {
- reconstructed = false;
- break;
- }
- // veto the shower and reweight
- else if(veto==1) {
- reconstructed = false;
- break;
- }
- // veto the event
- else if(veto==2) {
- throw Veto();
- }
- }
- if(reWeighting) {
- if(reconstructed) eventWeight += 1.;
- reconstructed=false;
- ++nTryReWeight;
- if(nTryReWeight==_nReWeight) {
- reWeighting = false;
- if(eventWeight==0.) throw Veto();
- }
- }
- }
- while(!reconstructed&&maximumTries()>++ntry);
- // check if failed to generate the shower
- if(ntry==maximumTries()) {
- if(hard)
- throw ShowerHandler::ShowerTriesVeto(ntry);
- else
- throw Exception() << "Failed to generate the shower after "
- << ntry << " attempts in Evolver::showerDecay()"
- << Exception::eventerror;
- }
- }
- // handle the weights and apply any reweighting required
- if(nTryReWeight>0) {
- tStdEHPtr seh = dynamic_ptr_cast<tStdEHPtr>(generator()->currentEventHandler());
- static bool first = true;
- if(seh) {
- seh->reweight(eventWeight/double(nTryReWeight));
- }
- else if(first) {
- generator()->log() << "Reweighting the shower only works with internal Herwig7 processes"
- << "Presumably you are showering Les Houches Events. These will not be"
- << "reweighted\n";
- first = false;
- }
- }
- // tree has now showered
- _currenttree->hasShowered(true);
- if(!showerOrder) swap(interactions_[0],interactions_[1]);
- hardTree(HardTreePtr());
-}
-
-void Evolver:: convertHardTree(bool hard,ShowerInteraction::Type type) {
- map<ColinePtr,ColinePtr> cmap;
- // incoming particles
- for(map<ShowerProgenitorPtr,ShowerParticlePtr>::const_iterator
- cit=currentTree()->incomingLines().begin();cit!=currentTree()->incomingLines().end();++cit) {
- map<ShowerParticlePtr,tHardBranchingPtr>::const_iterator
- mit = hardTree()->particles().find(cit->first->progenitor());
- // put the colour lines in the map
- ShowerParticlePtr oldParticle = cit->first->progenitor();
- ShowerParticlePtr newParticle = mit->second->branchingParticle();
- ColinePtr cLine = oldParticle-> colourLine();
- ColinePtr aLine = oldParticle->antiColourLine();
- if(newParticle->colourLine() &&
- cmap.find(newParticle-> colourLine())==cmap.end())
- cmap[newParticle-> colourLine()] = cLine;
- if(newParticle->antiColourLine() &&
- cmap.find(newParticle->antiColourLine())==cmap.end())
- cmap[newParticle->antiColourLine()] = aLine;
- // check whether or not particle emits
- bool emission = mit->second->parent();
- if(emission) {
- if(newParticle->colourLine()) {
- ColinePtr ctemp = newParticle-> colourLine();
- ctemp->removeColoured(newParticle);
- }
- if(newParticle->antiColourLine()) {
- ColinePtr ctemp = newParticle->antiColourLine();
- ctemp->removeAntiColoured(newParticle);
- }
- newParticle = mit->second->parent()->branchingParticle();
- }
- // get the new colour lines
- ColinePtr newCLine,newALine;
- // sort out colour lines
- if(newParticle->colourLine()) {
- ColinePtr ctemp = newParticle-> colourLine();
- ctemp->removeColoured(newParticle);
- if(cmap.find(ctemp)!=cmap.end()) {
- newCLine = cmap[ctemp];
- }
- else {
- newCLine = new_ptr(ColourLine());
- cmap[ctemp] = newCLine;
- }
- }
- // and anticolour lines
- if(newParticle->antiColourLine()) {
- ColinePtr ctemp = newParticle->antiColourLine();
- ctemp->removeAntiColoured(newParticle);
- if(cmap.find(ctemp)!=cmap.end()) {
- newALine = cmap[ctemp];
- }
- else {
- newALine = new_ptr(ColourLine());
- cmap[ctemp] = newALine;
- }
- }
- // remove colour lines from old particle
- if(aLine) {
- aLine->removeAntiColoured(cit->first->copy());
- aLine->removeAntiColoured(cit->first->progenitor());
- }
- if(cLine) {
- cLine->removeColoured(cit->first->copy());
- cLine->removeColoured(cit->first->progenitor());
- }
- // add particle to colour lines
- if(newCLine) newCLine->addColoured (newParticle);
- if(newALine) newALine->addAntiColoured(newParticle);
- // insert new particles
- cit->first->copy(newParticle);
- ShowerParticlePtr sp(new_ptr(ShowerParticle(*newParticle,1,false)));
- cit->first->progenitor(sp);
- currentTree()->incomingLines()[cit->first]=sp;
- cit->first->perturbative(!emission);
- // and the emitted particle if needed
- if(emission) {
- ShowerParticlePtr newOut = mit->second->parent()->children()[1]->branchingParticle();
- if(newOut->colourLine()) {
- ColinePtr ctemp = newOut-> colourLine();
- ctemp->removeColoured(newOut);
- assert(cmap.find(ctemp)!=cmap.end());
- cmap[ctemp]->addColoured (newOut);
- }
- if(newOut->antiColourLine()) {
- ColinePtr ctemp = newOut->antiColourLine();
- ctemp->removeAntiColoured(newOut);
- assert(cmap.find(ctemp)!=cmap.end());
- cmap[ctemp]->addAntiColoured(newOut);
- }
- ShowerParticlePtr sout=new_ptr(ShowerParticle(*newOut,1,true));
- ShowerProgenitorPtr out=new_ptr(ShowerProgenitor(cit->first->original(),newOut,sout));
- out->perturbative(false);
- currentTree()->outgoingLines().insert(make_pair(out,sout));
- }
- if(hard) {
- // sort out the value of x
- if(mit->second->beam()->momentum().z()>ZERO) {
- sp->x(newParticle->momentum(). plus()/mit->second->beam()->momentum(). plus());
- }
- else {
- sp->x(newParticle->momentum().minus()/mit->second->beam()->momentum().minus());
- }
- }
- }
- // outgoing particles
- for(map<ShowerProgenitorPtr,tShowerParticlePtr>::const_iterator
- cit=currentTree()->outgoingLines().begin();cit!=currentTree()->outgoingLines().end();++cit) {
- map<tShowerTreePtr,pair<tShowerProgenitorPtr,
- tShowerParticlePtr> >::const_iterator tit;
- for(tit = currentTree()->treelinks().begin();
- tit != currentTree()->treelinks().end();++tit) {
- if(tit->second.first && tit->second.second==cit->first->progenitor())
- break;
- }
- map<ShowerParticlePtr,tHardBranchingPtr>::const_iterator
- mit = hardTree()->particles().find(cit->first->progenitor());
- if(mit==hardTree()->particles().end()) continue;
- // put the colour lines in the map
- ShowerParticlePtr oldParticle = cit->first->progenitor();
- ShowerParticlePtr newParticle = mit->second->branchingParticle();
- ShowerParticlePtr newOut;
- ColinePtr cLine = oldParticle-> colourLine();
- ColinePtr aLine = oldParticle->antiColourLine();
- if(newParticle->colourLine() &&
- cmap.find(newParticle-> colourLine())==cmap.end())
- cmap[newParticle-> colourLine()] = cLine;
- if(newParticle->antiColourLine() &&
- cmap.find(newParticle->antiColourLine())==cmap.end())
- cmap[newParticle->antiColourLine()] = aLine;
- // check whether or not particle emits
- bool emission = !mit->second->children().empty();
- if(emission) {
- if(newParticle->colourLine()) {
- ColinePtr ctemp = newParticle-> colourLine();
- ctemp->removeColoured(newParticle);
- }
- if(newParticle->antiColourLine()) {
- ColinePtr ctemp = newParticle->antiColourLine();
- ctemp->removeAntiColoured(newParticle);
- }
- newParticle = mit->second->children()[0]->branchingParticle();
- newOut = mit->second->children()[1]->branchingParticle();
- if(newParticle->id()!=oldParticle->id()&&newParticle->id()==newOut->id())
- swap(newParticle,newOut);
- }
- // get the new colour lines
- ColinePtr newCLine,newALine;
- // sort out colour lines
- if(newParticle->colourLine()) {
- ColinePtr ctemp = newParticle-> colourLine();
- ctemp->removeColoured(newParticle);
- if(cmap.find(ctemp)!=cmap.end()) {
- newCLine = cmap[ctemp];
- }
- else {
- newCLine = new_ptr(ColourLine());
- cmap[ctemp] = newCLine;
- }
- }
- // and anticolour lines
- if(newParticle->antiColourLine()) {
- ColinePtr ctemp = newParticle->antiColourLine();
- ctemp->removeAntiColoured(newParticle);
- if(cmap.find(ctemp)!=cmap.end()) {
- newALine = cmap[ctemp];
- }
- else {
- newALine = new_ptr(ColourLine());
- cmap[ctemp] = newALine;
- }
- }
- // remove colour lines from old particle
- if(aLine) {
- aLine->removeAntiColoured(cit->first->copy());
- aLine->removeAntiColoured(cit->first->progenitor());
- }
- if(cLine) {
- cLine->removeColoured(cit->first->copy());
- cLine->removeColoured(cit->first->progenitor());
- }
- // special for unstable particles
- if(newParticle->id()==oldParticle->id() &&
- (tit!=currentTree()->treelinks().end()||!oldParticle->dataPtr()->stable())) {
- Lorentz5Momentum oldMomentum = oldParticle->momentum();
- Lorentz5Momentum newMomentum = newParticle->momentum();
- LorentzRotation boost( oldMomentum.findBoostToCM(),oldMomentum.e()/oldMomentum.mass());
- if(tit!=currentTree()->treelinks().end()) tit->first->transform(boost,false);
- oldParticle->transform(boost);
- boost = LorentzRotation(-newMomentum.findBoostToCM(),newMomentum.e()/newMomentum.mass());
- oldParticle->transform(boost);
- if(tit!=currentTree()->treelinks().end()) tit->first->transform(boost,false);
- newParticle=oldParticle;
- }
- // add particle to colour lines
- if(newCLine) newCLine->addColoured (newParticle);
- if(newALine) newALine->addAntiColoured(newParticle);
- // insert new particles
- cit->first->copy(newParticle);
- ShowerParticlePtr sp(new_ptr(ShowerParticle(*newParticle,1,true)));
- cit->first->progenitor(sp);
- currentTree()->outgoingLines()[cit->first]=sp;
- cit->first->perturbative(!emission);
- // and the emitted particle if needed
- if(emission) {
- if(newOut->colourLine()) {
- ColinePtr ctemp = newOut-> colourLine();
- ctemp->removeColoured(newOut);
- assert(cmap.find(ctemp)!=cmap.end());
- cmap[ctemp]->addColoured (newOut);
- }
- if(newOut->antiColourLine()) {
- ColinePtr ctemp = newOut->antiColourLine();
- ctemp->removeAntiColoured(newOut);
- assert(cmap.find(ctemp)!=cmap.end());
- cmap[ctemp]->addAntiColoured(newOut);
- }
- ShowerParticlePtr sout=new_ptr(ShowerParticle(*newOut,1,true));
- ShowerProgenitorPtr out=new_ptr(ShowerProgenitor(cit->first->original(),newOut,sout));
- out->perturbative(false);
- currentTree()->outgoingLines().insert(make_pair(out,sout));
- }
- // update any decay products
- if(tit!=currentTree()->treelinks().end())
- currentTree()->updateLink(tit->first,make_pair(cit->first,sp));
- }
- // reset the tree
- currentTree()->resetShowerProducts();
- // reextract the particles and set the colour partners
- vector<ShowerParticlePtr> particles =
- currentTree()->extractProgenitorParticles();
- // clear the partners
- for(unsigned int ix=0;ix<particles.size();++ix) {
- particles[ix]->partner(ShowerParticlePtr());
- particles[ix]->clearPartners();
- }
- // clear the tree
- hardTree(HardTreePtr());
- // Set the initial evolution scales
- showerModel()->partnerFinder()->
- setInitialEvolutionScales(particles,!hard,type,!_hardtree);
-}
-
-Branching Evolver::selectTimeLikeBranching(tShowerParticlePtr particle,
- ShowerInteraction::Type type,
- HardBranchingPtr branch) {
- Branching fb;
- unsigned int iout=0;
- tcPDPtr pdata[2];
- while (true) {
- // break if doing truncated shower and no truncated shower needed
- if(branch && (!isTruncatedShowerON()||hardOnly())) break;
- fb=_splittingGenerator->chooseForwardBranching(*particle,_finalenhance,type);
- // no emission break
- if(!fb.kinematics) break;
- // special for truncated shower
- if(branch) {
- // check haven't evolved too far
- if(fb.kinematics->scale() < branch->scale()) {
- fb=Branching();
- break;
- }
- // get the particle data objects
- for(unsigned int ix=0;ix<2;++ix) pdata[ix]=getParticleData(fb.ids[ix+1]);
- if(particle->id()!=fb.ids[0]) {
- for(unsigned int ix=0;ix<2;++ix) {
- tPDPtr cc(pdata[ix]->CC());
- if(cc) pdata[ix]=cc;
- }
- }
- // find the truncated line
- iout=0;
- if(pdata[0]->id()!=pdata[1]->id()) {
- if(pdata[0]->id()==particle->id()) iout=1;
- else if (pdata[1]->id()==particle->id()) iout=2;
- }
- else if(pdata[0]->id()==particle->id()) {
- if(fb.kinematics->z()>0.5) iout=1;
- else iout=2;
- }
- // apply the vetos for the truncated shower
- // no flavour changing branchings
- if(iout==0) {
- particle->vetoEmission(fb.type,fb.kinematics->scale());
- continue;
- }
- double zsplit = iout==1 ? fb.kinematics->z() : 1-fb.kinematics->z();
- // only if same interaction for forced branching
- ShowerInteraction::Type type2 = fb.type==ShowerPartnerType::QED ?
- ShowerInteraction::QED : ShowerInteraction::QCD;
- // and evolution
- if(type2==branch->sudakov()->interactionType()) {
- if(zsplit < 0.5 || // hardest line veto
- fb.kinematics->scale()*zsplit < branch->scale() ) { // angular ordering veto
- particle->vetoEmission(fb.type,fb.kinematics->scale());
- continue;
- }
- }
- // pt veto
- if(fb.kinematics->pT() > progenitor()->maximumpT(type2)) {
- particle->vetoEmission(fb.type,fb.kinematics->scale());
- continue;
- }
- }
- // standard vetos for all emissions
- if(timeLikeVetoed(fb,particle)) {
- particle->vetoEmission(fb.type,fb.kinematics->scale());
- if(particle->spinInfo()) particle->spinInfo()->decayVertex(VertexPtr());
- continue;
- }
- break;
- }
- // normal case
- if(!branch) {
- if(fb.kinematics) fb.hard = false;
- return fb;
- }
- // truncated emission
- if(fb.kinematics) {
- fb.hard = false;
- fb.iout = iout;
- return fb;
- }
- // otherwise need to return the hard emission
- // construct the kinematics for the hard emission
- ShoKinPtr showerKin=
- branch->sudakov()->createFinalStateBranching(branch->scale(),
- branch->children()[0]->z(),
- branch->phi(),
- branch->children()[0]->pT());
- showerKin->initialize( *particle,PPtr() );
- IdList idlist(3);
- idlist[0] = particle->id();
- idlist[1] = branch->children()[0]->branchingParticle()->id();
- idlist[2] = branch->children()[1]->branchingParticle()->id();
- fb = Branching( showerKin, idlist, branch->sudakov(),branch->type() );
- fb.hard = true;
- fb.iout=0;
- // return it
- return fb;
-}
-
-Branching Evolver::selectSpaceLikeDecayBranching(tShowerParticlePtr particle,
- const ShowerParticle::EvolutionScales & maxScales,
- Energy minmass,ShowerInteraction::Type type,
- HardBranchingPtr branch) {
- Branching fb;
- unsigned int iout=0;
- tcPDPtr pdata[2];
- while (true) {
- // break if doing truncated shower and no truncated shower needed
- if(branch && (!isTruncatedShowerON()||hardOnly())) break;
- // select branching
- fb=_splittingGenerator->chooseDecayBranching(*particle,maxScales,minmass,
- _initialenhance,type);
- // return if no radiation
- if(!fb.kinematics) break;
- // special for truncated shower
- if(branch) {
- // check haven't evolved too far
- if(fb.kinematics->scale() < branch->scale()) {
- fb=Branching();
- break;
- }
- // get the particle data objects
- for(unsigned int ix=0;ix<2;++ix) pdata[ix]=getParticleData(fb.ids[ix+1]);
- if(particle->id()!=fb.ids[0]) {
- for(unsigned int ix=0;ix<2;++ix) {
- tPDPtr cc(pdata[ix]->CC());
- if(cc) pdata[ix]=cc;
- }
- }
- // find the truncated line
- iout=0;
- if(pdata[0]->id()!=pdata[1]->id()) {
- if(pdata[0]->id()==particle->id()) iout=1;
- else if (pdata[1]->id()==particle->id()) iout=2;
- }
- else if(pdata[0]->id()==particle->id()) {
- if(fb.kinematics->z()>0.5) iout=1;
- else iout=2;
- }
- // apply the vetos for the truncated shower
- // no flavour changing branchings
- if(iout==0) {
- particle->vetoEmission(fb.type,fb.kinematics->scale());
- continue;
- }
- ShowerInteraction::Type type2 = fb.type==ShowerPartnerType::QED ?
- ShowerInteraction::QED : ShowerInteraction::QCD;
- double zsplit = iout==1 ? fb.kinematics->z() : 1-fb.kinematics->z();
- if(type2==branch->sudakov()->interactionType()) {
- if(zsplit < 0.5 || // hardest line veto
- fb.kinematics->scale()*zsplit < branch->scale() ) { // angular ordering veto
- particle->vetoEmission(fb.type,fb.kinematics->scale());
- continue;
- }
- }
- // pt veto
- if(fb.kinematics->pT() > progenitor()->maximumpT(type2)) {
- particle->vetoEmission(fb.type,fb.kinematics->scale());
- continue;
- }
- }
- // if not vetoed break
- if(spaceLikeDecayVetoed(fb,particle)) {
- // otherwise reset scale and continue
- particle->vetoEmission(fb.type,fb.kinematics->scale());
- continue;
- }
- break;
- }
- // normal case
- if(!branch) {
- if(fb.kinematics) fb.hard = false;
- return fb;
- }
- // truncated emission
- if(fb.kinematics) {
- fb.hard = false;
- fb.iout = iout;
- return fb;
- }
- // otherwise need to return the hard emission
- // construct the kinematics for the hard emission
- ShoKinPtr showerKin=
- branch->sudakov()->createDecayBranching(branch->scale(),
- branch->children()[0]->z(),
- branch->phi(),
- branch->children()[0]->pT());
- showerKin->initialize( *particle,PPtr() );
- IdList idlist(3);
- idlist[0] = particle->id();
- idlist[1] = branch->children()[0]->branchingParticle()->id();
- idlist[2] = branch->children()[1]->branchingParticle()->id();
- // create the branching
- fb = Branching( showerKin, idlist, branch->sudakov(),ShowerPartnerType::QCDColourLine );
- fb.hard=true;
- fb.iout=0;
- // return it
- return fb;
-}
diff --git a/Shower/Base/Evolver.h b/Shower/Base/Evolver.h
deleted file mode 100644
--- a/Shower/Base/Evolver.h
+++ /dev/null
@@ -1,969 +0,0 @@
-// -*- C++ -*-
-//
-// Evolver.h is a part of Herwig - A multi-purpose Monte Carlo event generator
-// Copyright (C) 2002-2011 The Herwig Collaboration
-//
-// Herwig is licenced under version 2 of the GPL, see COPYING for details.
-// Please respect the MCnet academic guidelines, see GUIDELINES for details.
-//
-#ifndef HERWIG_Evolver_H
-#define HERWIG_Evolver_H
-//
-// This is the declaration of the Evolver class.
-//
-
-#include "ThePEG/Interface/Interfaced.h"
-#include "Herwig/Shower/SplittingFunctions/SplittingGenerator.h"
-#include "ShowerModel.h"
-#include "ThePEG/PDF/BeamParticleData.h"
-#include "ShowerTree.h"
-#include "ShowerProgenitor.fh"
-#include "Herwig/Shower/ShowerHandler.fh"
-#include "Branching.h"
-#include "ShowerVeto.h"
-#include "FullShowerVeto.h"
-#include "HardTree.h"
-#include "ThePEG/Handlers/XComb.h"
-#include "Evolver.fh"
-#include "Herwig/MatrixElement/HwMEBase.h"
-#include "Herwig/Decay/HwDecayerBase.h"
-#include "Herwig/MatrixElement/Matchbox/Matching/ShowerApproximation.h"
-#include "Herwig/Utilities/Statistic.h"
-
-namespace Herwig {
-
-using namespace ThePEG;
-
-/**\ingroup Shower
- * Exception class
- * used to communicate failure of QED shower
- */
-struct InteractionVeto {};
-
-/** \ingroup Shower
- * The Evolver class class performs the sohwer evolution of hard scattering
- * and decay processes in Herwig.
- *
- * @see \ref EvolverInterfaces "The interfaces"
- * defined for Evolver.
- */
-class Evolver: public Interfaced {
-
-/**
- * The ShowerHandler is a friend to set some parameters at initialisation
- */
-friend class ShowerHandler;
-
-public:
-
- /**
- * Pointer to an XComb object
- */
- typedef Ptr<XComb>::pointer XCPtr;
-
-public:
-
- /**
- * Default Constructor
- */
- Evolver() : _maxtry(100), _meCorrMode(1), _hardVetoMode(1),
- _hardVetoRead(0), _reconOpt(0),
- _hardVetoReadOption(false),
- _iptrms(ZERO), _beta(0.), _gamma(ZERO), _iptmax(),
- _limitEmissions(0), _initialenhance(1.), _finalenhance(1.),
- _nReWeight(100), _reWeight(false),
- interaction_(1), _trunc_Mode(true), _hardEmissionMode(0),
- _spinOpt(1), _softOpt(2), _hardPOWHEG(false),
- theFactorizationScaleFactor(1.0),
- theRenormalizationScaleFactor(1.0), muPt(ZERO),
- _maxTryFSR(100000),_maxFailFSR(100),_fracFSR(0.001),
- _nFSR(0), _nFailedFSR(0)
- {}
-
- /**
- * Members to perform the shower
- */
- //@{
- /**
- * Perform the shower of the hard process
- */
- virtual void showerHardProcess(ShowerTreePtr,XCPtr);
-
- /**
- * Perform the shower of a decay
- */
- virtual void showerDecay(ShowerTreePtr);
- //@}
-
- /**
- * Access to the flags and shower variables
- */
- //@{
- /**
- * Is there any showering switched on
- */
- bool showeringON() const { return isISRadiationON() || isFSRadiationON(); }
-
- /**
- * It returns true/false if the initial-state radiation is on/off.
- */
- bool isISRadiationON() const { return _splittingGenerator->isISRadiationON(); }
-
- /**
- * It returns true/false if the final-state radiation is on/off.
- */
- bool isFSRadiationON() const { return _splittingGenerator->isFSRadiationON(); }
-
- /**
- * Get the ShowerModel
- */
- ShowerModelPtr showerModel() const {return _model;}
-
- /**
- * Get the SplittingGenerator
- */
- tSplittingGeneratorPtr splittingGenerator() const { return _splittingGenerator; }
-
- /**
- * Mode for hard emissions
- */
- int hardEmissionMode() const {return _hardEmissionMode;}
-
- /**
- * Switch on or off hard vetoes
- */
- void restrictPhasespace(bool yes) {
- if ( yes )
- _hardVetoMode = 1;
- else
- _hardVetoMode = 0;
- }
-
- /**
- * Switch on or off hard veto scale from muF
- */
- void hardScaleIsMuF(bool yes) {
- if ( yes )
- _hardVetoRead = 1;
- else
- _hardVetoRead = 0;
- }
- //@}
-
- /**
- * Connect the Hard and Shower trees
- */
- virtual void connectTrees(ShowerTreePtr showerTree, HardTreePtr hardTree, bool hard );
-
- /**
- * Access to switches for spin correlations
- */
- //@{
- /**
- * Spin Correlations
- */
- unsigned int spinCorrelations() const {
- return _spinOpt;
- }
-
- /**
- * Soft correlations
- */
- unsigned int softCorrelations() const {
- return _softOpt;
- }
-
- /**
- * Any correlations
- */
- bool correlations() const {
- return _spinOpt!=0||_softOpt!=0;
- }
- //@}
-
-
- /**
- * Set the factorization scale factor
- */
- void factorizationScaleFactor(double f) {
- if ( f == theFactorizationScaleFactor )
- return;
- theFactorizationScaleFactor = f;
- splittingGenerator()->factorizationScaleFactor(f);
- }
-
- /**
- * Set the renormalization scale factor
- */
- void renormalizationScaleFactor(double f) {
- if ( f == theRenormalizationScaleFactor )
- return;
- theRenormalizationScaleFactor = f;
- splittingGenerator()->renormalizationScaleFactor(f);
- }
-
-public:
-
- /** @name Functions used by the persistent I/O system. */
- //@{
- /**
- * Function used to write out object persistently.
- * @param os the persistent output stream written to.
- */
- void persistentOutput(PersistentOStream & os) const;
-
- /**
- * Function used to read in object persistently.
- * @param is the persistent input stream read from.
- * @param version the version number of the object when written.
- */
- void persistentInput(PersistentIStream & is, int version);
- //@}
-
- /**
- * The standard Init function used to initialize the interfaces.
- * Called exactly once for each class by the class description system
- * before the main function starts or
- * when this class is dynamically loaded.
- */
- static void Init();
-
-protected:
-
- /**
- * Perform the shower
- */
- void doShowering(bool hard,XCPtr);
-
- /**
- * Generate the hard matrix element correction
- */
- virtual void hardMatrixElementCorrection(bool);
-
- /**
- * Generate the hardest emission
- */
- virtual void hardestEmission(bool hard);
-
- /**
- * Extract the particles to be showered, set the evolution scales
- * and apply the hard matrix element correction
- * @param hard Whether this is a hard process or decay
- * @return The particles to be showered
- */
- virtual vector<ShowerProgenitorPtr> setupShower(bool hard);
-
- /**
- * set the colour partners
- */
- virtual void setEvolutionPartners(bool hard,ShowerInteraction::Type,
- bool clear);
-
- /**
- * Methods to perform the evolution of an individual particle, including
- * recursive calling on the products
- */
- //@{
- /**
- * It does the forward evolution of the time-like input particle
- * (and recursively for all its radiation products).
- * accepting only emissions which conforms to the showerVariables
- * and soft matrix element correction.
- * If at least one emission has occurred then the method returns true.
- * @param particle The particle to be showered
- */
- virtual bool timeLikeShower(tShowerParticlePtr particle, ShowerInteraction::Type,
- Branching fb, bool first);
-
- /**
- * It does the backward evolution of the space-like input particle
- * (and recursively for all its time-like radiation products).
- * accepting only emissions which conforms to the showerVariables.
- * If at least one emission has occurred then the method returns true
- * @param particle The particle to be showered
- * @param beam The beam particle
- */
- virtual bool spaceLikeShower(tShowerParticlePtr particle,PPtr beam,
- ShowerInteraction::Type);
-
- /**
- * If does the forward evolution of the input on-shell particle
- * involved in a decay
- * (and recursively for all its time-like radiation products).
- * accepting only emissions which conforms to the showerVariables.
- * @param particle The particle to be showered
- * @param maxscale The maximum scale for the shower.
- * @param minimumMass The minimum mass of the final-state system
- */
- virtual bool
- spaceLikeDecayShower(tShowerParticlePtr particle,
- const ShowerParticle::EvolutionScales & maxScales,
- Energy minimumMass,ShowerInteraction::Type,
- Branching fb);
-
- /**
- * Truncated shower from a time-like particle
- */
- virtual bool truncatedTimeLikeShower(tShowerParticlePtr particle,
- HardBranchingPtr branch,
- ShowerInteraction::Type type,
- Branching fb, bool first);
-
- /**
- * Truncated shower from a space-like particle
- */
- virtual bool truncatedSpaceLikeShower(tShowerParticlePtr particle,PPtr beam,
- HardBranchingPtr branch,
- ShowerInteraction::Type type);
-
- /**
- * Truncated shower from a time-like particle
- */
- virtual bool truncatedSpaceLikeDecayShower(tShowerParticlePtr particle,
- const ShowerParticle::EvolutionScales & maxScales,
- Energy minimumMass, HardBranchingPtr branch,
- ShowerInteraction::Type type, Branching fb);
- //@}
-
- /**
- * Switches for matrix element corrections
- */
- //@{
- /**
- * Any ME correction?
- */
- bool MECOn(bool hard) const {
- return ( _hardEmissionMode == 0 ||
- (!hard && _hardEmissionMode ==-1) ) &&
- _meCorrMode > 0;
- }
-
- /**
- * Any hard ME correction?
- */
- bool hardMEC(bool hard) const {
- return ( _hardEmissionMode == 0 ||
- (!hard && _hardEmissionMode ==-1) ) &&
- (_meCorrMode == 1 || _meCorrMode == 2);
- }
-
- /**
- * Any soft ME correction?
- */
- bool softMEC() const {
- return ( _hardEmissionMode == 0 ||
- (_currenttree->isDecay() && _hardEmissionMode ==-1) ) &&
- (_meCorrMode == 1 || _meCorrMode > 2);
- }
- //@}
-
- /**
- * Is the truncated shower on?
- */
- bool isTruncatedShowerON() const {return _trunc_Mode;}
-
- /**
- * Switch for intrinsic pT
- */
- //@{
- /**
- * Any intrinsic pT?
- */
- bool ipTon() const {
- return _iptrms != ZERO || ( _beta == 1.0 && _gamma != ZERO && _iptmax !=ZERO );
- }
- //@}
-
- /**@name Additional shower vetoes */
- //@{
- /**
- * Insert a veto.
- */
- void addVeto (ShowerVetoPtr v) { _vetoes.push_back(v); }
-
- /**
- * Remove a veto.
- */
- void removeVeto (ShowerVetoPtr v) {
- vector<ShowerVetoPtr>::iterator vit = find(_vetoes.begin(),_vetoes.end(),v);
- if (vit != _vetoes.end())
- _vetoes.erase(vit);
- }
-
- //@}
-
- /**
- * Switches for vetoing hard emissions
- */
- //@{
- /**
- * Vetos on?
- */
- bool hardVetoOn() const { return _hardVetoMode > 0; }
-
- /**
- * veto hard emissions in IS shower?
- */
- bool hardVetoIS() const { return _hardVetoMode == 1 || _hardVetoMode == 2; }
-
- /**
- * veto hard emissions in FS shower?
- */
- bool hardVetoFS() const { return _hardVetoMode == 1 || _hardVetoMode > 2; }
-
- /**
- * veto hard emissions according to lastScale from XComb?
- */
- bool hardVetoXComb() const {return (_hardVetoRead == 1);}
-
- /**
- * Returns true if the hard veto read-in is to be applied to only
- * the primary collision and false otherwise.
- */
- bool hardVetoReadOption() const {return _hardVetoReadOption;}
- //@}
-
- /**
- * Enhancement factors for radiation needed to generate the soft matrix
- * element correction.
- */
- //@{
- /**
- * Access the enhancement factor for initial-state radiation
- */
- double initialStateRadiationEnhancementFactor() const { return _initialenhance; }
-
- /**
- * Access the enhancement factor for final-state radiation
- */
- double finalStateRadiationEnhancementFactor() const { return _finalenhance; }
-
- /**
- * Set the enhancement factor for initial-state radiation
- */
- void initialStateRadiationEnhancementFactor(double in) { _initialenhance=in; }
-
- /**
- * Set the enhancement factor for final-state radiation
- */
- void finalStateRadiationEnhancementFactor(double in) { _finalenhance=in; }
- //@}
-
- /**
- * Access to set/get the HardTree currently beinging showered
- */
- //@{
- /**
- * The HardTree currently being showered
- */
- tHardTreePtr hardTree() {return _hardtree;}
-
- /**
- * The HardTree currently being showered
- */
- void hardTree(tHardTreePtr in) {_hardtree = in;}
- //@}
-
- /**
- * Access/set the beam particle for the current initial-state shower
- */
- //@{
- /**
- * Get the beam particle data
- */
- Ptr<BeamParticleData>::const_pointer beamParticle() const { return _beam; }
-
- /**
- * Set the beam particle data
- */
- void setBeamParticle(Ptr<BeamParticleData>::const_pointer in) { _beam=in; }
- //@}
-
- /**
- * Set/Get the current tree being evolverd for inheriting classes
- */
- //@{
- /**
- * Get the tree
- */
- tShowerTreePtr currentTree() { return _currenttree; }
-
- /**
- * Set the tree
- */
- void currentTree(tShowerTreePtr tree) { _currenttree=tree; }
-
- //@}
-
- /**
- * Access the maximum number of attempts to generate the shower
- */
- unsigned int maximumTries() const { return _maxtry; }
-
- /**
- * Set/Get the ShowerProgenitor for the current shower
- */
- //@{
- /**
- * Access the progenitor
- */
- ShowerProgenitorPtr progenitor() { return _progenitor; }
-
- /**
- * Set the progenitor
- */
- void progenitor(ShowerProgenitorPtr in) { _progenitor=in; }
- //@}
-
- /**
- * Calculate the intrinsic \f$p_T\f$.
- */
- virtual void generateIntrinsicpT(vector<ShowerProgenitorPtr>);
-
- /**
- * Access to the intrinsic \f$p_T\f$ for inheriting classes
- */
- map<tShowerProgenitorPtr,pair<Energy,double> > & intrinsicpT() { return _intrinsic; }
-
- /**
- * find the maximally allowed pt acc to the hard process.
- */
- void setupMaximumScales(const vector<ShowerProgenitorPtr> &,XCPtr);
-
- /**
- * find the relevant hard scales for profile scales.
- */
- void setupHardScales(const vector<ShowerProgenitorPtr> &,XCPtr);
-
- /**
- * Return the relevant hard scale to be used in the profile scales
- */
- Energy hardScale() const {
- return muPt;
- }
-
- /**
- * Convert the HardTree into an extra shower emission
- */
- void convertHardTree(bool hard,ShowerInteraction::Type type);
-
-protected:
-
- /**
- * Start the shower of a timelike particle
- */
- virtual bool startTimeLikeShower(ShowerInteraction::Type);
-
- /**
- * Update of the time-like stuff
- */
- void updateHistory(tShowerParticlePtr particle);
-
- /**
- * Start the shower of a spacelike particle
- */
- virtual bool startSpaceLikeShower(PPtr,ShowerInteraction::Type);
-
- /**
- * Start the shower of a spacelike particle
- */
- virtual bool
- startSpaceLikeDecayShower(const ShowerParticle::EvolutionScales & maxScales,
- Energy minimumMass,ShowerInteraction::Type);
-
- /**
- * Select the branching for the next time-like emission
- */
- Branching selectTimeLikeBranching(tShowerParticlePtr particle,
- ShowerInteraction::Type type,
- HardBranchingPtr branch);
-
- /**
- * Select the branching for the next space-like emission in a decay
- */
- Branching selectSpaceLikeDecayBranching(tShowerParticlePtr particle,
- const ShowerParticle::EvolutionScales & maxScales,
- Energy minmass,ShowerInteraction::Type type,
- HardBranchingPtr branch);
- /**
- * Create the timelike child of a branching
- */
- ShowerParticleVector createTimeLikeChildren(tShowerParticlePtr particle,
- IdList ids);
-
- /**
- * Vetos for the timelike shower
- */
- virtual bool timeLikeVetoed(const Branching &,ShowerParticlePtr);
-
- /**
- * Vetos for the spacelike shower
- */
- virtual bool spaceLikeVetoed(const Branching &,ShowerParticlePtr);
-
- /**
- * Vetos for the spacelike shower
- */
- virtual bool spaceLikeDecayVetoed(const Branching &,ShowerParticlePtr);
-
- /**
- * Only generate the hard emission, for testing only.
- */
- bool hardOnly() const {return _limitEmissions==3;}
-
- /**
- * Members to construct the HardTree from the shower if needed
- */
- //@{
- /**
- * Construct the tree for a scattering process
- */
- bool constructHardTree(vector<ShowerProgenitorPtr> & particlesToShower,
- ShowerInteraction::Type inter);
-
- /**
- * Construct the tree for a decay process
- */
- bool constructDecayTree(vector<ShowerProgenitorPtr> & particlesToShower,
- ShowerInteraction::Type inter);
-
- /**
- * Construct a time-like line
- */
- void constructTimeLikeLine(tHardBranchingPtr branch,tShowerParticlePtr particle);
-
- /**
- * Construct a space-like line
- */
- void constructSpaceLikeLine(tShowerParticlePtr particle,
- HardBranchingPtr & first, HardBranchingPtr & last,
- SudakovPtr sud,PPtr beam);
- //@}
-
-public:
-
- /** @name MC@NLO diagnostics */
- //@{
-
- /**
- * True, if Matchbox MC@NLO S-event
- */
- bool wasMCatNLOSEvent() const { return isMCatNLOSEvent; }
-
- /**
- * True, if matchbox MC@NLO H-event
- */
- bool wasMCatNLOHEvent() const { return isMCatNLOHEvent; }
-
- //@}
-
-protected:
-
- /** @name Clone Methods. */
- //@{
- /**
- * Make a simple clone of this object.
- * @return a pointer to the new object.
- */
- virtual IBPtr clone() const;
-
- /** Make a clone of this object, possibly modifying the cloned object
- * to make it sane.
- * @return a pointer to the new object.
- */
- virtual IBPtr fullclone() const;
- //@}
-
-protected:
-
- /** @name Standard Interfaced functions. */
- //@{
- /**
- * Initialize this object after the setup phase before saving an
- * EventGenerator to disk.
- * @throws InitException if object could not be initialized properly.
- */
- virtual void doinit();
- //@}
-
-private:
-
- /**
- * The assignment operator is private and must never be called.
- * In fact, it should not even be implemented.
- */
- Evolver & operator=(const Evolver &);
-
-private:
-
- /**
- * Pointer to the model for the shower evolution model
- */
- ShowerModelPtr _model;
-
- /**
- * Pointer to the splitting generator
- */
- SplittingGeneratorPtr _splittingGenerator;
-
- /**
- * Maximum number of tries to generate the shower of a particular tree
- */
- unsigned int _maxtry;
-
- /**
- * Matrix element correction switch
- */
- unsigned int _meCorrMode;
-
- /**
- * Hard emission veto switch
- */
- unsigned int _hardVetoMode;
-
- /**
- * Hard veto to be read switch
- */
- unsigned int _hardVetoRead;
-
- /**
- * Control of the reconstruction option
- */
- unsigned int _reconOpt;
-
- /**
- * If hard veto pT scale is being read-in this determines
- * whether the read-in value is applied to primary and
- * secondary (MPI) scatters or just the primary one, with
- * the usual computation of the veto being performed for
- * the secondary (MPI) scatters.
- */
- bool _hardVetoReadOption;
-
- /**
- * rms intrinsic pT of Gaussian distribution
- */
- Energy _iptrms;
-
- /**
- * Proportion of inverse quadratic intrinsic pT distribution
- */
- double _beta;
-
- /**
- * Parameter for inverse quadratic: 2*Beta*Gamma/(sqr(Gamma)+sqr(intrinsicpT))
- */
- Energy _gamma;
-
- /**
- * Upper bound on intrinsic pT for inverse quadratic
- */
- Energy _iptmax;
-
- /**
- * Limit the number of emissions for testing
- */
- unsigned int _limitEmissions;
-
- /**
- * The progenitor of the current shower
- */
- ShowerProgenitorPtr _progenitor;
-
- /**
- * Matrix element
- */
- HwMEBasePtr _hardme;
-
- /**
- * Decayer
- */
- HwDecayerBasePtr _decayme;
-
- /**
- * The ShowerTree currently being showered
- */
- ShowerTreePtr _currenttree;
-
- /**
- * The HardTree currently being showered
- */
- HardTreePtr _hardtree;
-
- /**
- * Radiation enhancement factors for use with the veto algorithm
- * if needed by the soft matrix element correction
- */
- //@{
- /**
- * Enhancement factor for initial-state radiation
- */
- double _initialenhance;
-
- /**
- * Enhancement factor for final-state radiation
- */
- double _finalenhance;
- //@}
-
- /**
- * The beam particle data for the current initial-state shower
- */
- Ptr<BeamParticleData>::const_pointer _beam;
-
- /**
- * Storage of the intrinsic \f$p_t\f$ of the particles
- */
- map<tShowerProgenitorPtr,pair<Energy,double> > _intrinsic;
-
- /**
- * Vetoes
- */
- vector<ShowerVetoPtr> _vetoes;
-
- /**
- * Full Shower Vetoes
- */
- vector<FullShowerVetoPtr> _fullShowerVetoes;
-
- /**
- * Number of iterations for reweighting
- */
- unsigned int _nReWeight;
-
- /**
- * Whether or not we are reweighting
- */
- bool _reWeight;
-
- /**
- * number of IS emissions
- */
- unsigned int _nis;
-
- /**
- * Number of FS emissions
- */
- unsigned int _nfs;
-
- /**
- * The option for wqhich interactions to use
- */
- unsigned int interaction_;
-
- /**
- * Interactions allowed in the shower
- */
- vector<ShowerInteraction::Type> interactions_;
-
- /**
- * Truncated shower switch
- */
- bool _trunc_Mode;
-
- /**
- * Count of the number of truncated emissions
- */
- unsigned int _truncEmissions;
-
- /**
- * Mode for the hard emissions
- */
- int _hardEmissionMode;
-
- /**
- * Option to include spin correlations
- */
- unsigned int _spinOpt;
-
- /**
- * Option for the kernal for soft correlations
- */
- unsigned int _softOpt;
-
- /**
- * Option for hard radiation in POWHEG events
- */
- bool _hardPOWHEG;
-
- /**
- * True, if Matchbox MC@NLO S-event
- */
- bool isMCatNLOSEvent;
-
- /**
- * True, if matchbox MC@NLO H-event
- */
- bool isMCatNLOHEvent;
-
- /**
- * True, if Matchbox Powheg S-event
- */
- bool isPowhegSEvent;
-
- /**
- * True, if matchbox Powheg H-event
- */
- bool isPowhegHEvent;
-
- /**
- * The shower approximation to provide the hard scale profile
- */
- Ptr<ShowerApproximation>::tptr theShowerApproximation;
-
- /**
- * The factorization scale factor.
- */
- double theFactorizationScaleFactor;
-
- /**
- * The renormalization scale factor.
- */
- double theRenormalizationScaleFactor;
-
- /**
- * True if no warnings about incorrect hard emission
- * mode setting have been issued yet
- */
- static bool _hardEmissionModeWarn;
-
- /**
- * True if no warnings about missing truncated shower
- * have been issued yet
- */
- static bool _missingTruncWarn;
-
- /**
- * The relevant hard scale to be used in the profile scales
- */
- Energy muPt;
-
- /**
- * Maximum number of emission attempts for FSR
- */
- unsigned int _maxTryFSR;
-
- /**
- * Maximum number of failures for FSR generation
- */
- unsigned int _maxFailFSR;
-
- /**
- * Failure fraction for FSR generation
- */
- double _fracFSR;
-
- /**
- * Counter for number of FSR emissions
- */
- unsigned int _nFSR;
-
- /**
- * Counter for the number of failed events due to FSR emissions
- */
- unsigned int _nFailedFSR;
-};
-
-}
-
-#endif /* HERWIG_Evolver_H */
diff --git a/Shower/Dipole/AlphaS/lo_alpha_s.h b/Shower/Dipole/AlphaS/lo_alpha_s.h
--- a/Shower/Dipole/AlphaS/lo_alpha_s.h
+++ b/Shower/Dipole/AlphaS/lo_alpha_s.h
@@ -1,164 +1,167 @@
// -*- C++ -*-
// couplings/lo_alpha_s.h is part of matchbox
// (C) 2008 Simon Platzer -- sp@particle.uni-karlsruhe.de
#ifndef matchbox_couplings_lo_alpha_s_h
#define matchbox_couplings_lo_alpha_s_h
#include "alpha_s.h"
namespace matchbox {
using namespace ThePEG;
/**
* LO running alpha_s
*
* @see \ref lo_alpha_sInterfaces "The interfaces"
* defined for lo_alpha_s.
*/
class lo_alpha_s
: public alpha_s {
public:
/** @name Standard constructors and destructors. */
//@{
/**
* The default constructor.
*/
lo_alpha_s();
/**
* The destructor.
*/
virtual ~lo_alpha_s();
//@}
public:
/// return alpha_s as function of scale, QCD scale
/// and number of active flavours
virtual double operator () (Energy2 scale,
Energy2 lambda2,
unsigned int nf) const;
+ /// return the number of loops which determine this running
+ virtual unsigned int nloops () const { return 1; }
+
public:
/** @name Functions used by the persistent I/O system. */
//@{
/**
* Function used to write out object persistently.
* @name os the persistent output stream written to.
*/
void persistentOutput(PersistentOStream & os) const;
/**
* Function used to read in object persistently.
* @name is the persistent input stream read from.
* @name version the version number of the object when written.
*/
void persistentInput(PersistentIStream & is, int version);
//@}
/**
* The standard Init function used to initialize the interfaces.
* Called exactly once for each class by the class description system
* before the main function starts or
* when this class is dynamically loaded.
*/
static void Init();
protected:
/** @name Standard Interfaced functions. */
//@{
/**
* Initialize this object after the setup phase before saving an
* EventGenerator to disk.
* @throws InitException if object could not be initialized properly.
*/
virtual inline void doinit() throw(InitException) {
freezing_scale_ *= scale_factor();
alpha_s::doinit();
}
//@}
protected:
/** @name Clone Methods. */
//@{
/**
* Make a simple clone of this object.
* @return a pointer to the new object.
*/
virtual IBPtr clone() const;
/** Make a clone of this object, possibly modifying the cloned object
* to make it sane.
* @return a pointer to the new object.
*/
virtual IBPtr fullclone() const;
//@}
private:
/**
* The static object used to initialize the description of this class.
* Indicates that this is an abstract class with persistent data.
*/
static ClassDescription<lo_alpha_s> initlo_alpha_s;
/**
* The assignment operator is private and must never be called.
* In fact, it should not even be implemented.
*/
lo_alpha_s & operator=(const lo_alpha_s &);
private:
Energy freezing_scale_;
};
}
#include "ThePEG/Utilities/ClassTraits.h"
namespace ThePEG {
/** @cond TRAITSPECIALIZATIONS */
/** This template specialization informs ThePEG about the
* base classes of lo_alpha_s. */
template <>
struct BaseClassTrait<matchbox::lo_alpha_s,1> {
/** Typedef of the first base class of lo_alpha_s. */
typedef matchbox::alpha_s NthBase;
};
/** This template specialization informs ThePEG about the name of
* the lo_alpha_s class and the shared object where it is defined. */
template <>
struct ClassTraits<matchbox::lo_alpha_s>
: public ClassTraitsBase<matchbox::lo_alpha_s> {
/** Return a platform-independent class name */
static string className() { return "matchbox::lo_alpha_s"; }
/**
* The name of a file containing the dynamic library where the class
* lo_alpha_s is implemented. It may also include several, space-separated,
* libraries if the class lo_alpha_s depends on other classes (base classes
* excepted). In this case the listed libraries will be dynamically
* linked in the order they are specified.
*/
static string library() { return "HwDipoleShowerAlphaS.so"; }
};
/** @endcond */
}
#endif /* matchbox_couplings_lo_alpha_s_h */
diff --git a/Shower/Dipole/AlphaS/nlo_alpha_s.h b/Shower/Dipole/AlphaS/nlo_alpha_s.h
--- a/Shower/Dipole/AlphaS/nlo_alpha_s.h
+++ b/Shower/Dipole/AlphaS/nlo_alpha_s.h
@@ -1,194 +1,197 @@
// -*- C++ -*-
// couplings/nlo_alpha_s.h is part of matchbox
// (C) 2008 Simon Platzer -- sp@particle.uni-karlsruhe.de
#ifndef matchbox_couplings_nlo_alpha_s_h
#define matchbox_couplings_nlo_alpha_s_h
#include "alpha_s.h"
namespace matchbox {
using namespace ThePEG;
/**
* NLO running alpha_s
*
* @see \ref nlo_alpha_sInterfaces "The interfaces"
* defined for nlo_alpha_s.
*/
class nlo_alpha_s
: public alpha_s {
public:
/** @name Standard constructors and destructors. */
//@{
/**
* The default constructor.
*/
nlo_alpha_s();
/**
* The destructor.
*/
virtual ~nlo_alpha_s();
//@}
public:
/// return alpha_s as function of scale, QCD scale
/// and number of active flavours
virtual double operator () (Energy2 scale,
Energy2 lambda2,
unsigned int nf) const;
+ /// return the number of loops which determine this running
+ virtual unsigned int nloops () const { return 2; }
+
public:
/** @name Functions used by the persistent I/O system. */
//@{
/**
* Function used to write out object persistently.
* @name os the persistent output stream written to.
*/
void persistentOutput(PersistentOStream & os) const;
/**
* Function used to read in object persistently.
* @name is the persistent input stream read from.
* @name version the version number of the object when written.
*/
void persistentInput(PersistentIStream & is, int version);
//@}
/**
* The standard Init function used to initialize the interfaces.
* Called exactly once for each class by the class description system
* before the main function starts or
* when this class is dynamically loaded.
*/
static void Init();
protected:
/** @name Standard Interfaced functions. */
//@{
/**
* Initialize this object after the setup phase before saving an
* EventGenerator to disk.
* @throws InitException if object could not be initialized properly.
*/
virtual inline void doinit() throw(InitException) {
freezing_scale_ *= scale_factor();
alpha_s::doinit();
}
//@}
protected:
/** @name Clone Methods. */
//@{
/**
* Make a simple clone of this object.
* @return a pointer to the new object.
*/
virtual IBPtr clone() const;
/** Make a clone of this object, possibly modifying the cloned object
* to make it sane.
* @return a pointer to the new object.
*/
virtual IBPtr fullclone() const;
//@}
private:
/**
* The static object used to initialize the description of this class.
* Indicates that this is an abstract class with persistent data.
*/
static ClassDescription<nlo_alpha_s> initnlo_alpha_s;
/**
* The assignment operator is private and must never be called.
* In fact, it should not even be implemented.
*/
nlo_alpha_s & operator=(const nlo_alpha_s &);
private:
struct rg_solution {
inline double operator () (double alpha) {
double beta0 = (33.-2.*nf)/(12.*Constants::pi);
double beta1 = (153.-19.*nf)/(24.*sqr(Constants::pi));
return ((1./alpha)+(beta1/beta0)*log(alpha/(beta0+beta1*alpha))- beta0*slog);
}
double slog;
unsigned int nf;
};
Energy freezing_scale_;
bool exact_evaluation_;
static rg_solution& rg () {
static rg_solution rg_;
return rg_;
}
static gsl::bisection_root_solver<rg_solution,100>& rg_solver () {
static gsl::bisection_root_solver<rg_solution,100> rg_solver_(rg());
return rg_solver_;
}
bool two_largeq_terms_;
};
}
#include "ThePEG/Utilities/ClassTraits.h"
namespace ThePEG {
/** @cond TRAITSPECIALIZATIONS */
/** This template specialization informs ThePEG about the
* base classes of nlo_alpha_s. */
template <>
struct BaseClassTrait<matchbox::nlo_alpha_s,1> {
/** Typedef of the first base class of nlo_alpha_s. */
typedef matchbox::alpha_s NthBase;
};
/** This template specialization informs ThePEG about the name of
* the nlo_alpha_s class and the shared object where it is defined. */
template <>
struct ClassTraits<matchbox::nlo_alpha_s>
: public ClassTraitsBase<matchbox::nlo_alpha_s> {
/** Return a platform-independent class name */
static string className() { return "matchbox::nlo_alpha_s"; }
/**
* The name of a file containing the dynamic library where the class
* nlo_alpha_s is implemented. It may also include several, space-separated,
* libraries if the class nlo_alpha_s depends on other classes (base classes
* excepted). In this case the listed libraries will be dynamically
* linked in the order they are specified.
*/
static string library() { return "HwDipoleShowerAlphaS.so"; }
};
/** @endcond */
}
#endif /* matchbox_couplings_nlo_alpha_s_h */
diff --git a/Shower/QTilde/Base/Evolver.fh b/Shower/QTilde/Base/Evolver.fh
deleted file mode 100644
--- a/Shower/QTilde/Base/Evolver.fh
+++ /dev/null
@@ -1,22 +0,0 @@
-// -*- C++ -*-
-//
-// This is the forward declaration of the Evolver class.
-//
-#ifndef HERWIG_Evolver_FH
-#define HERWIG_Evolver_FH
-
-#include "ThePEG/Config/Pointers.h"
-
-namespace Herwig {
-
-class Evolver;
-
-}
-
-namespace ThePEG {
-
-ThePEG_DECLARE_POINTERS(Herwig::Evolver,EvolverPtr);
-
-}
-
-#endif
diff --git a/Shower/QTilde/Base/PartnerFinder.h b/Shower/QTilde/Base/PartnerFinder.h
--- a/Shower/QTilde/Base/PartnerFinder.h
+++ b/Shower/QTilde/Base/PartnerFinder.h
@@ -1,209 +1,208 @@
// -*- C++ -*-
//
// PartnerFinder.h is a part of Herwig - A multi-purpose Monte Carlo event generator
// Copyright (C) 2002-2011 The Herwig Collaboration
//
// Herwig is licenced under version 2 of the GPL, see COPYING for details.
// Please respect the MCnet academic guidelines, see GUIDELINES for details.
//
#ifndef HERWIG_PartnerFinder_H
#define HERWIG_PartnerFinder_H
//
// This is the declaration of the PartnerFinder class.
//
#include "Herwig/Shower/QTilde/ShowerConfig.h"
#include "ThePEG/Interface/Interfaced.h"
-#include "Evolver.fh"
#include "PartnerFinder.fh"
namespace Herwig {
using namespace ThePEG;
/**
* typedef of a pair of particle for calculating the evolution scales
*/
typedef pair<tShowerParticlePtr,tShowerParticlePtr> ShowerPPair;
/** \ingroup Shower
*
* This class is responsible of two related tasks:
* - it finds the partners
* - for each pair of partners (and interaction therefore)
* it sets the initial evolution scales of both of them.
*
* In general the finding of the partners is performed by this class but
* the calculation of the initial evolution scales should be implemented
* for different shower evolution models in classes inheriting from this one.
* Notice that a given particle has, in general, a different partner
* for each different interaction; however, given a partner, its
* initial evolution scale, Q, is purely a kinematical relationship
* between the pair, without dependence on the dynamics (i.e. type of interaction).
*
* @see \ref PartnerFinderInterfaces "The interfaces"
* defined for PartnerFinder.
*/
class PartnerFinder: public Interfaced {
public:
/**
* The default constructor.
*/
PartnerFinder() : partnerMethod_(0), QEDPartner_(0), scaleChoice_(0) {}
/**
* Given in input a collection of particles (ShowerParticle objects),
* each of these methods set the initial evolution scales of those particles,
* between the ones given in input, that do not have yet their
* evolution scale set.
* The input collection of particles can be either the full collection of
* showering particles (kept in the main class ShowerHandler,
* in the case isDecayCase is false, or simply, in the case isDecayCase
* is true, the decaying particle and its decay products.
* The methods returns true, unless something wrong (inconsistencies,
* or undefined values) happens.
*
* These methods are virtual but in most cases inheriting classes should not
* need to overide them as they simply find the relevant partner and call
* one of the calculateScale members to calculate the scale.
*/
//@{
/**
* Set the initial scales
* @param particles The particles to be considered
* @param isDecayCase Whether or not this is a decay
* @param setPartners Whether to set the colour partners or just the scales
*/
virtual void setInitialEvolutionScales(const ShowerParticleVector &particles,
const bool isDecayCase,
ShowerInteraction::Type,
const bool setPartners=true);
//@}
public:
/** @name Functions used by the persistent I/O system. */
//@{
/**
* Function used to write out object persistently.
* @param os the persistent output stream written to.
*/
void persistentOutput(PersistentOStream & os) const;
/**
* Function used to read in object persistently.
* @param is the persistent input stream read from.
* @param version the version number of the object when written.
*/
void persistentInput(PersistentIStream & is, int version);
//@}
/**
* The standard Init function used to initialize the interfaces.
* Called exactly once for each class by the class description system
* before the main function starts or
* when this class is dynamically loaded.
*/
static void Init();
protected:
/**
* Members to set the scales for different interactions
*/
//@{
/**
* Set initial scales for a QCD interaction
*/
virtual void setInitialQCDEvolutionScales(const ShowerParticleVector &particles,
const bool isDecayCase,
const bool setPartners=true);
/**
* Set initial scales for a QED interaction
*/
virtual void setInitialQEDEvolutionScales(const ShowerParticleVector &particles,
const bool isDecayCase,
const bool setPartners=true);
//@}
/**
* Find the QCD partners
* @param particle The particle to find the partners for
* @param particles The full set of particles to search
*/
vector< pair<ShowerPartnerType::Type, tShowerParticlePtr> >
findQCDPartners(tShowerParticlePtr particle, const ShowerParticleVector &particles);
/**
* Find the QED partners
* @param particle The particle to find the partners for
* @param particles The full set of particles to search
*/
vector< pair<double, tShowerParticlePtr> >
findQEDPartners(tShowerParticlePtr particle, const ShowerParticleVector &particles,
const bool isDecayCase);
/**
* Given a pair of particles, supposedly partners w.r.t. an interaction,
* this method returns their initial evolution scales as a pair.
* If something wrong happens, it returns the null (ZERO,ZERO) pair.
* This method is used by the above setXXXInitialEvolutionScales
* methods.
* These methods must be overiden in inheriting classes
*/
//@{
/**
* General method to calculate the initial evolution scales
*/
virtual pair<Energy,Energy> calculateInitialEvolutionScales(const ShowerPPair &,
const bool isDecayCase);
/**
* Calculate the initial evolution scales for two final-state particles
*/
virtual pair<Energy,Energy> calculateFinalFinalScales(const ShowerPPair &)=0;
/**
* Calculate the initial evolution scales for two initial-state particles
*/
virtual pair<Energy,Energy> calculateInitialInitialScales(const ShowerPPair &)=0;
/**
* Calculate the initial evolution scales for one initial
* and one final-state particles
*/
virtual pair<Energy,Energy> calculateInitialFinalScales(const ShowerPPair &,
const bool isDecayCase)=0;
//@}
private:
/**
* The assignment operator is private and must never be called.
* In fact, it should not even be implemented.
*/
PartnerFinder & operator=(const PartnerFinder &);
private:
/**
* Method for choosing colour partner
*/
int partnerMethod_;
/**
* Choice for the QED radiation partner
*/
int QEDPartner_;
/**
* Choice of the scale
*/
int scaleChoice_;
};
}
#endif /* HERWIG_PartnerFinder_H */
diff --git a/Tests/Makefile.am b/Tests/Makefile.am
--- a/Tests/Makefile.am
+++ b/Tests/Makefile.am
@@ -1,365 +1,365 @@
AUTOMAKE_OPTIONS = -Wno-portability
AM_LDFLAGS += -module -avoid-version -rpath /dummy/path/not/used
EXTRA_DIST = Inputs python Rivet
dist-hook:
rm -rf $(distdir)/Inputs/.svn
rm -rf $(distdir)/python/.svn
rm -rf $(distdir)/Rivet/.svn
EXTRA_LTLIBRARIES = LeptonTest.la GammaTest.la HadronTest.la DISTest.la
if WANT_LIBFASTJET
EXTRA_LTLIBRARIES += HadronJetTest.la LeptonJetTest.la
HadronJetTest_la_SOURCES = \
Hadron/VHTest.h Hadron/VHTest.cc\
Hadron/VTest.h Hadron/VTest.cc\
Hadron/HTest.h Hadron/HTest.cc
HadronJetTest_la_CPPFLAGS = $(AM_CPPFLAGS) $(FASTJETINCLUDE) \
-I$(FASTJETPATH)
HadronJetTest_la_LIBADD = $(FASTJETLIBS)
LeptonJetTest_la_SOURCES = \
Lepton/TopDecay.h Lepton/TopDecay.cc
LeptonJetTest_la_CPPFLAGS = $(AM_CPPFLAGS) $(FASTJETINCLUDE) \
-I$(FASTJETPATH)
LeptonJetTest_la_LIBADD = $(FASTJETLIBS)
endif
LeptonTest_la_SOURCES = \
Lepton/VVTest.h Lepton/VVTest.cc \
Lepton/VBFTest.h Lepton/VBFTest.cc \
Lepton/VHTest.h Lepton/VHTest.cc \
Lepton/FermionTest.h Lepton/FermionTest.cc
GammaTest_la_SOURCES = \
Gamma/GammaMETest.h Gamma/GammaMETest.cc \
Gamma/GammaPMETest.h Gamma/GammaPMETest.cc
DISTest_la_SOURCES = \
DIS/DISTest.h DIS/DISTest.cc
HadronTest_la_SOURCES = \
Hadron/HadronVVTest.h Hadron/HadronVVTest.cc\
Hadron/HadronVBFTest.h Hadron/HadronVBFTest.cc\
Hadron/WHTest.h Hadron/WHTest.cc\
Hadron/ZHTest.h Hadron/ZHTest.cc\
Hadron/VGammaTest.h Hadron/VGammaTest.cc\
Hadron/ZJetTest.h Hadron/ZJetTest.cc\
Hadron/WJetTest.h Hadron/WJetTest.cc\
Hadron/QQHTest.h Hadron/QQHTest.cc
REPO = $(top_builddir)/src/HerwigDefaults.rpo
HERWIG = $(top_builddir)/src/Herwig
HWREAD = $(HERWIG) read --repo $(REPO) -L $(builddir)/.libs -i $(top_builddir)/src
HWRUN = $(HERWIG) run
tests : tests-LEP tests-DIS tests-LHC tests-Gamma
if WANT_LIBFASTJET
tests-LEP : test-LEP-VV test-LEP-VH test-LEP-VBF test-LEP-BB test-LEP-Quarks test-LEP-Leptons \
test-LEP-default test-LEP-Powheg test-LEP-TopDecay
else
tests-LEP : test-LEP-VV test-LEP-VH test-LEP-VBF test-LEP-BB test-LEP-Quarks test-LEP-Leptons
endif
tests-DIS : test-DIS-Charged test-DIS-Neutral
if WANT_LIBFASTJET
tests-LHC : test-LHC-WW test-LHC-WZ test-LHC-ZZ test-LHC-ZGamma test-LHC-WGamma \
test-LHC-ZH test-LHC-WH test-LHC-ZJet test-LHC-WJet test-LHC-Z test-LHC-W test-LHC-ZZVBF test-LHC-VBF \
test-LHC-WWVBF test-LHC-bbH test-LHC-ttH test-LHC-GammaGamma test-LHC-GammaJet test-LHC-Higgs \
test-LHC-HiggsJet test-LHC-QCDFast test-LHC-QCD test-LHC-Top test-LHC-Bottom \
test-LHC-WHJet test-LHC-ZHJet test-LHC-HJet test-LHC-ZShower test-LHC-WShower\
test-LHC-WHJet-Powheg test-LHC-ZHJet-Powheg test-LHC-HJet-Powheg \
test-LHC-ZShower-Powheg test-LHC-WShower-Powheg
else
tests-LHC : test-LHC-WW test-LHC-WZ test-LHC-ZZ test-LHC-ZGamma test-LHC-WGamma \
test-LHC-ZH test-LHC-WH test-LHC-ZJet test-LHC-WJet test-LHC-Z test-LHC-W test-LHC-ZZVBF test-LHC-VBF \
test-LHC-WWVBF test-LHC-bbH test-LHC-ttH test-LHC-GammaGamma test-LHC-GammaJet test-LHC-Higgs \
test-LHC-HiggsJet test-LHC-QCDFast test-LHC-QCD test-LHC-Top
endif
tests-Gamma : test-Gamma-FF test-Gamma-WW test-Gamma-P
if WANT_LIBFASTJET
test-LEP-% : Inputs/LEP-%.in LeptonTest.la LeptonJetTest.la
$(HWREAD) $<
$(HWRUN) $(notdir $(subst .in,.run,$<)) -N $${NUMEVENTS:-10000}
else
test-LEP-% : Inputs/LEP-%.in LeptonTest.la
$(HWREAD) $<
$(HWRUN) $(notdir $(subst .in,.run,$<)) -N $${NUMEVENTS:-10000}
endif
Rivet-LHC-Matchbox-% : Rivet/LHC-Matchbox-%.in
if [ ! -d Rivet-$(notdir $(subst .in,,$<)) ]; then mkdir Rivet-$(notdir $(subst .in,,$<)); fi;
cd Rivet-$(notdir $(subst .in,,$<)); echo `pwd`; \
../$(HERWIG) read --repo ../$(REPO) -L ../$(top_builddir)/lib -i ../$(top_builddir)/src ../$<; \
../$(HERWIG) run $(notdir $(subst .in,.run,$<)) -N $${NUMEVENTS:-10000}; \
mv $(notdir $(subst .in,.yoda,$<)) ..; \
cd ..
Rivet-TVT-Matchbox-% : Rivet/TVT-Matchbox-%.in
if [ ! -d Rivet-$(notdir $(subst .in,,$<)) ]; then mkdir Rivet-$(notdir $(subst .in,,$<)); fi;
cd Rivet-$(notdir $(subst .in,,$<)); echo `pwd`; \
../$(HERWIG) read --repo ../$(REPO) -L ../$(top_builddir)/lib -i ../$(top_builddir)/src ../$<; \
../$(HERWIG) run $(notdir $(subst .in,.run,$<)) -N $${NUMEVENTS:-10000}; \
mv $(notdir $(subst .in,.yoda,$<)) ..; \
cd ..
Rivet-TVT-Dipole-% : Rivet/TVT-Dipole-%.in
if [ ! -d Rivet-$(notdir $(subst .in,,$<)) ]; then mkdir Rivet-$(notdir $(subst .in,,$<)); fi;
cd Rivet-$(notdir $(subst .in,,$<)); echo `pwd`; \
../$(HERWIG) read --repo ../$(REPO) -L ../$(top_builddir)/lib -i ../$(top_builddir)/src ../$<; \
../$(HERWIG) run $(notdir $(subst .in,.run,$<)) -N $${NUMEVENTS:-10000}; \
mv $(notdir $(subst .in,.yoda,$<)) ..; \
cd ..
Rivet-LHC-Dipole-% : Rivet/LHC-Dipole-%.in
if [ ! -d Rivet-$(notdir $(subst .in,,$<)) ]; then mkdir Rivet-$(notdir $(subst .in,,$<)); fi;
cd Rivet-$(notdir $(subst .in,,$<)); echo `pwd`; \
../$(HERWIG) read --repo ../$(REPO) -L ../$(top_builddir)/lib -i ../$(top_builddir)/src ../$<; \
../$(HERWIG) run $(notdir $(subst .in,.run,$<)) -N $${NUMEVENTS:-10000}; \
mv $(notdir $(subst .in,.yoda,$<)) ..; \
cd ..
Rivet/LEP-%.in :
python/make_input_files.py $(notdir $(subst .in,,$@))
Rivet/DIS-%.in :
python/make_input_files.py $(notdir $(subst .in,,$@))
Rivet/BFactory-%.in:
python/make_input_files.py $(notdir $(subst .in,,$@))
Rivet/TVT-%.in:
python/make_input_files.py $(notdir $(subst .in,,$@))
Rivet/LHC-%.in:
python/make_input_files.py $(notdir $(subst .in,,$@))
Rivet/Star-%.in:
python/make_input_files.py $(notdir $(subst .in,,$@))
Rivet/SppS-%.in:
python/make_input_files.py $(notdir $(subst .in,,$@))
Rivet/ISR-%.in:
python/make_input_files.py $(notdir $(subst .in,,$@))
Rivet-LEP-Matchbox-% : Rivet/LEP-Matchbox-%.in
if [ ! -d Rivet-$(notdir $(subst .in,,$<)) ]; then mkdir Rivet-$(notdir $(subst .in,,$<)); fi;
cd Rivet-$(notdir $(subst .in,,$<)); echo `pwd`; \
../$(HERWIG) read --repo ../$(REPO) -L ../$(top_builddir)/lib -i ../$(top_builddir)/src ../$<; \
../$(HERWIG) run $(notdir $(subst .in,.run,$<)) -N $${NUMEVENTS:-10000}; \
mv $(notdir $(subst .in,.yoda,$<)) ..; \
cd ..
Rivet-LEP-Dipole-% : Rivet/LEP-Dipole-%.in
if [ ! -d Rivet-$(notdir $(subst .in,,$<)) ]; then mkdir Rivet-$(notdir $(subst .in,,$<)); fi;
cd Rivet-$(notdir $(subst .in,,$<)); echo `pwd`; \
../$(HERWIG) read --repo ../$(REPO) -L ../$(top_builddir)/lib -i ../$(top_builddir)/src ../$<; \
../$(HERWIG) run $(notdir $(subst .in,.run,$<)) -N $${NUMEVENTS:-10000}; \
mv $(notdir $(subst .in,.yoda,$<)) ..; \
cd ..
Rivet-BFactory-Matchbox-% : Rivet/BFactory-Matchbox-%.in
if [ ! -d Rivet-$(notdir $(subst .in,,$<)) ]; then mkdir Rivet-$(notdir $(subst .in,,$<)); fi;
cd Rivet-$(notdir $(subst .in,,$<)); echo `pwd`; \
../$(HERWIG) read --repo ../$(REPO) -L ../$(top_builddir)/lib -i ../$(top_builddir)/src ../$<; \
../$(HERWIG) run $(notdir $(subst .in,.run,$<)) -N $${NUMEVENTS:-10000}; \
mv $(notdir $(subst .in,.yoda,$<)) ..; \
cd ..
Rivet-LEP-% : Rivet/LEP-%.in
$(HWREAD) $<
$(HWRUN) $(notdir $(subst .in,.run,$<)) -N $${NUMEVENTS:-10000}
Rivet-BFactory-% : Rivet/BFactory-%.in
$(HWREAD) $<
$(HWRUN) $(notdir $(subst .in,.run,$<)) -N $${NUMEVENTS:-10000}
Rivet-TVT-% : Rivet/TVT-%.in
$(HWREAD) $<
$(HWRUN) $(notdir $(subst .in,.run,$<)) -N $${NUMEVENTS:-10000}
Rivet-DIS-% : Rivet/DIS-%.in
$(HWREAD) $<
$(HWRUN) $(notdir $(subst .in,.run,$<)) -N $${NUMEVENTS:-10000}
Rivet-LHC-% : Rivet/LHC-%.in
$(HWREAD) $<
$(HWRUN) $(notdir $(subst .in,.run,$<)) -N $${NUMEVENTS:-10000}
Rivet-Star-% : Rivet/Star-%.in
$(HWREAD) $<
$(HWRUN) $(notdir $(subst .in,.run,$<)) -N $${NUMEVENTS:-10000}
Rivet-SppS-% : Rivet/SppS-%.in
$(HWREAD) $<
$(HWRUN) $(notdir $(subst .in,.run,$<)) -N $${NUMEVENTS:-10000}
Rivet-ISR-% : Rivet/ISR-%.in
$(HWREAD) $<
$(HWRUN) $(notdir $(subst .in,.run,$<)) -N $${NUMEVENTS:-10000}
Rivet-inputfiles: $(shell echo Rivet/LEP{,-Powheg,-Matchbox,-Dipole,-Matchbox-Powheg}-{10,22,35,44,91,130,133,136,161,172,177,183,189,192,196,197,200,202,206,91-nopi}.in) \
$(shell echo Rivet/LEP{,-Powheg,-Matchbox-Powheg}-14.in) \
$(shell echo Rivet/BFactory{,-Powheg,-Matchbox,-Dipole,-Matchbox-Powheg}-{10.52,10.52-sym,10.54,10.45}.in) \
$(shell echo Rivet/BFactory-{Upsilon,Upsilon2,Upsilon4,Tau,10.58-res}.in) \
$(shell echo Rivet/DIS{,-NoME,-Powheg,-Matchbox,-Dipole,-Matchbox-Powheg}-{e--LowQ2,e+-LowQ2,e+-HighQ2}.in) \
$(shell echo Rivet/TVT{,-Powheg,-Matchbox,-Dipole,-Matchbox-Powheg}-{Run-I-Z,Run-I-W,Run-I-WZ,Run-II-Z-e,Run-II-Z-{,LowMass-,HighMass-}mu,Run-II-W}.in) \
$(shell echo Rivet/TVT-Run-II-{DiPhoton-GammaGamma,DiPhoton-GammaJet,PromptPhoton}.in) \
$(shell echo Rivet/TVT-Powheg-Run-II-{DiPhoton-GammaGamma,DiPhoton-GammaJet}.in) \
$(shell echo Rivet/TVT{,-Dipole,-Matchbox,-Matchbox-Powheg}-{Run-II-Jets-{0..11},Run-I-Jets-{1..8}}.in ) \
$(shell echo Rivet/TVT{,-Dipole,-Matchbox,-Matchbox-Powheg}-{630-Jets-{1..3},300-Jets-1,900-Jets-1}.in ) \
$(shell echo Rivet/TVT-{Run-I,Run-II,300,630,900}-UE.in) \
$(shell echo Rivet/LHC{,-Dipole,-Matchbox,-Matchbox-Powheg}-7-Jets-{0..15}.in ) \
$(shell echo Rivet/LHC-{900,2360,2760,7,8,13}-UE.in ) \
$(shell echo Rivet/LHC-{900,7}-UE-Long.in ) \
$(shell echo Rivet/LHC{,-Dipole,-Matchbox,-Matchbox-Powheg}-7-Charm-{1..5}.in) \
$(shell echo Rivet/LHC{,-Dipole,-Matchbox,-Matchbox-Powheg}-7-Bottom-{0..8}.in) \
$(shell echo Rivet/LHC{,-Matchbox,-Matchbox-Powheg}-7-Top-{L,SL,All}.in) \
$(shell echo Rivet/LHC{,-Matchbox,-Matchbox-Powheg}-8-Top-All.in) \
$(shell echo Rivet/Star-{UE,Jets-{1..4}}.in ) \
$(shell echo Rivet/SppS-{200,500,900,546}-UE.in ) \
$(shell echo Rivet/LHC{,-Matchbox,-Matchbox-Powheg,-Powheg,-Dipole}-{W-{e,mu},13-Z-{e,mu},8-Z-Mass{1..4}-{e,mu},Z-{e,mu,mu-SOPHTY},Z-LowMass-{e,mu},Z-MedMass-e,WZ,WW-{emu,ll},ZZ-{ll,lv},W-Z-{e,mu}}.in) \
$(shell echo Rivet/LHC-7-{W,Z}Gamma-{e,mu}.in) \
$(shell echo Rivet/LHC{,-Matchbox,-Matchbox-Powheg,-Dipole}-{7-W-Jet-{1..3}-e,7-Z-Jet-{0..3}-e,7-Z-Jet-0-mu}.in) \
$(shell echo Rivet/LHC{-Matchbox,-Matchbox-Powheg,-Dipole}-{Z-b,Z-bb,W-b,8-Z-jj}.in) \
- $(shell echo Rivet/LHC-7-PromptPhoton-{1..4}.in) Rivet/LHC-GammaGamma-7.in \
+ $(shell echo Rivet/LHC-{7,8}-PromptPhoton-{1..4}.in) Rivet/LHC-GammaGamma-7.in \
$(shell echo Rivet/LHC{,-Powheg}-7-{DiPhoton-GammaGamma,DiPhoton-GammaJet}.in) \
$(shell echo Rivet/LHC{,-Powheg,-Matchbox,-Matchbox-Powheg,-Dipole}-{ggH,VBF,WH,ZH}.in) \
$(shell echo Rivet/LHC{,-Powheg,-Matchbox,-Matchbox-Powheg,-Dipole}-8-{ggH,VBF,WH,ZH}{,-GammaGamma}.in) \
$(shell echo Rivet/LHC{,-Matchbox,-Matchbox-Powheg,-Dipole}-ggHJet.in)
# $(shell echo Rivet/ISR-{30,44,53,62}-UE.in ) $(shell echo Rivet/SppS-{53,63}-UE.in )
Rivet-LEP: $(shell echo Rivet-LEP{,-Powheg,-Matchbox,-Dipole}-{10,14,22,35,44,91,130,133,136,161,172,177,183,189,192,196,197,200,202,206,91-nopi})
rm -rf Rivet-LEP
python/merge-LEP LEP
python/merge-LEP LEP-Powheg
python/merge-LEP LEP-Matchbox
python/merge-LEP LEP-Dipole
rivet-mkhtml -o Rivet-LEP LEP.yoda:Hw++ LEP-Powheg.yoda:Hw++-Powheg LEP-Matchbox.yoda:Hw++-Matchbox LEP-Dipole.yoda:Hw++-Dipole
Rivet-BFactory: $(shell echo Rivet-BFactory{,-Powheg,-Matchbox,-Dipole}-{10.52,10.52-sym,10.54,10.45}) \
$(shell echo Rivet-BFactory-{Upsilon,Upsilon2,Upsilon4,Tau,10.58-res,10.58})
rm -rf Rivet-BFactory
python/merge-BFactory BFactory
python/merge-BFactory BFactory-Powheg
python/merge-BFactory BFactory-Matchbox
python/merge-BFactory BFactory-Dipole
rivet-mkhtml -o Rivet-BFactory BFactory.yoda:Hw++ BFactory-Powheg.yoda:Hw++-Powheg BFactory-Matchbox.yoda:Hw++-Matchbox BFactory-Dipole.yoda:Hw++-Dipole
Rivet-DIS: $(shell echo Rivet-DIS{,-NoME,-Powheg,-Matchbox,-Dipole}-{e--LowQ2,e+-LowQ2,e+-HighQ2})
rm -rf Rivet-DIS
python/merge-DIS DIS
python/merge-DIS DIS-Powheg
python/merge-DIS DIS-NoME
python/merge-DIS DIS-Matchbox
python/merge-DIS DIS-Dipole
rivet-mkhtml -o Rivet-DIS DIS.yoda:Hw++ DIS-Powheg.yoda:Hw++-Powheg DIS-NoME.yoda:Hw++-NoME DIS-Matchbox.yoda:Hw++-Matchbox DIS-Dipole.yoda:Hw++-Dipole
Rivet-TVT-WZ: $(shell echo Rivet-TVT{,-Powheg,-Matchbox,-Dipole}-{Run-I-Z,Run-I-W,Run-I-WZ,Run-II-Z-{e,{,LowMass-,HighMass-}mu},Run-II-W})
rm -rf Rivet-TVT-WZ
python/merge-TVT-EW TVT-Run-II-W.yoda TVT-Run-II-Z-{e,{,LowMass-,HighMass-}mu}.yoda\
TVT-Run-I-{W,Z,WZ}.yoda -o TVT-WZ.yoda
python/merge-TVT-EW TVT-Powheg-Run-II-W.yoda TVT-Powheg-Run-II-Z-{e,{,LowMass-,HighMass-}mu}.yoda\
TVT-Powheg-Run-I-{W,Z,WZ}.yoda -o TVT-Powheg-WZ.yoda
python/merge-TVT-EW TVT-Matchbox-Run-II-W.yoda TVT-Matchbox-Run-II-Z-{e,{,LowMass-,HighMass-}mu}.yoda\
TVT-Matchbox-Run-I-{W,Z,WZ}.yoda -o TVT-Matchbox-WZ.yoda
python/merge-TVT-EW TVT-Dipole-Run-II-W.yoda TVT-Dipole-Run-II-Z-{e,{,LowMass-,HighMass-}mu}.yoda\
TVT-Dipole-Run-I-{W,Z,WZ}.yoda -o TVT-Dipole-WZ.yoda
rivet-mkhtml -o Rivet-TVT-WZ TVT-WZ.yoda:Hw++ TVT-Powheg-WZ.yoda:Hw++-Powheg TVT-Matchbox-WZ.yoda:Hw++-Matchbox TVT-Dipole-WZ.yoda:Hw++-Dipole
Rivet-TVT-Photon: $(shell echo Rivet-TVT-Run-II-{DiPhoton-GammaGamma,DiPhoton-GammaJet,PromptPhoton}) \
$(shell echo Rivet-TVT-Powheg-Run-II-{DiPhoton-GammaGamma,DiPhoton-GammaJet})
rm -rf Rivet-TVT-Photon
python/merge-TVT-Photon TVT -o TVT-Photon.yoda
python/merge-TVT-Photon TVT-Powheg -o TVT-Powheg-Photon.yoda
rivet-mkhtml -o Rivet-TVT-Photon TVT-Photon.yoda:Hw TVT-Powheg-Photon.yoda:Hw-Powheg
Rivet-TVT-Jets: $(shell echo Rivet-TVT-{Run-II-Jets-{0..11},Run-I-Jets-{1..8}} ) \
$(shell echo Rivet-TVT-{630-Jets-{1..3},300-Jets-1,900-Jets-1} ) \
$(shell echo Rivet-TVT-{Run-I,Run-II,300,630,900}-UE)
python/merge-TVT-Energy TVT
rivet-merge-CDF_2012_NOTE10874 TVT-300-Energy.yoda TVT-900-Energy.yoda TVT-1960-Energy.yoda
flat2yoda RatioPlots.dat -o TVT-RatioPlots.yoda
rm -rf Rivet-TVT-Jets
python/merge-TVT-Jets TVT
rivet-mkhtml -o Rivet-TVT-Jets TVT-Jets.yoda:Hw++
Rivet-LHC-Jets: $(shell echo Rivet-LHC-7-Jets-{0..15} ) \
$(shell echo Rivet-LHC-{900,2360,2760,7,8,13}-UE ) \
$(shell echo Rivet-LHC-{900,7}-UE-Long ) \
$(shell echo Rivet-LHC-7-Charm-{1..5}) \
$(shell echo Rivet-LHC-7-Bottom-{0..8}) \
$(shell echo Rivet-LHC-7-Top-{L,SL,All})\
$(shell echo Rivet-LHC-8-Top-All)
rm -rf Rivet-LHC-Jets
python/merge-LHC-Jets LHC
rivet-mkhtml -o Rivet-LHC-Jets LHC-Jets.yoda:Hw++
Rivet-Star: $(shell echo Rivet-Star-{UE,Jets-{1..4}} )
rm -rf Rivet-Star
python/merge-Star Star
rivet-mkhtml -o Rivet-Star Star.yoda
Rivet-SppS: $(shell echo Rivet-ISR-{30,44,53,62}-UE ) \
$(shell echo Rivet-SppS-{53,63,200,500,900,546}-UE )
rm -rf Rivet-SppS
python/merge-SppS SppS
rivet-mkhtml -o Rivet-SppS SppS.yoda
Rivet-LHC-EW: $(shell echo Rivet-LHC{,-Matchbox,-Powheg,-Dipole}-{13-Z-{e,mu},8-Z-Mass{1..4}-{e,mu},W-{e,mu},Z-{e,mu,mu-SOPHTY},Z-LowMass-{e,mu},Z-MedMass-e,WZ,WW-{emu,ll},ZZ-{ll,lv},W-Z-{e,mu}}) \
$(shell echo Rivet-LHC{,-Matchbox,-Dipole}-{7-W-Jet-{1..3}-e,7-Z-Jet-{0..3}-e,7-Z-Jet-0-mu}) \
$(shell echo Rivet-LHC{-Matchbox,-Dipole}-{Z-b,Z-bb,W-b,8-Z-jj}) \
$(shell echo Rivet-LHC-7-{W,Z}Gamma-{e,mu}) \
rm -rf Rivet-LHC-EW;
python/merge-LHC-EW LHC-{13-Z-{e,mu},8-Z-Mass{1..4}-{e,mu},W-{e,mu},Z-{e,mu},Z-LowMass-{e,mu},Z-MedMass-e,W-Z-{e,mu},WW-{emu,ll},WZ,ZZ-{ll,lv}}.yoda LHC-7-{W,Z}-Jet-{1,2,3}-e.yoda LHC-7-{W,Z}Gamma-{e,mu}.yoda -o LHC-EW.yoda;
python/merge-LHC-EW LHC-Matchbox-{13-Z-{e,mu},8-Z-Mass{1..4}-{e,mu},W-{e,mu},Z-{e,mu},Z-LowMass-{e,mu},Z-MedMass-e,W-Z-{e,mu},WW-{emu,ll},WZ,ZZ-{ll,lv}}.yoda LHC-Matchbox-7-{W,Z}-Jet-{1,2,3}-e.yoda -o LHC-Matchbox-EW.yoda;
python/merge-LHC-EW LHC-Dipole-{13-Z-{e,mu},8-Z-Mass{1..4}-{e,mu},W-{e,mu},Z-{e,mu},Z-LowMass-{e,mu},Z-MedMass-e,W-Z-{e,mu},WW-{emu,ll},WZ,ZZ-{ll,lv}}.yoda LHC-Dipole-7-{W,Z}-Jet-{1,2,3}-e.yoda -o LHC-Dipole-EW.yoda;
python/merge-LHC-EW LHC-Powheg-{W-{e,mu},Z-{e,mu},Z-LowMass-{e,mu},Z-MedMass-e,W-Z-{e,mu},WW-{emu,ll},WZ,ZZ-{ll,lv}}.yoda -o LHC-Powheg-EW.yoda;
rivet-mkhtml -o Rivet-LHC-EW LHC-EW.yoda:Hw++ LHC-Powheg-EW.yoda:Hw++-Powheg LHC-Matchbox-EW.yoda:Hw++-Matchbox LHC-Matchbox-Z-b.yoda:Hw++-Matchbox-Zb \
LHC-Matchbox-Z-bb.yoda:Hw++-Matchbox-Zbb LHC-Matchbox-W-b.yoda:Hw++-Matchbox-W-bb LHC-Dipole-EW.yoda:Hw++-Dipole \
LHC-Dipole-Z-b.yoda:Hw++-Dipole-Zb LHC-Dipole-Z-bb.yoda:Hw++-Dipole-Zbb LHC-Dipole-W-b.yoda:Hw++-Dipole-W-bb \
LHC-Z-mu-SOPHTY.yoda:Hw++ LHC-Powheg-Z-mu-SOPHTY.yoda:Hw++-Powheg LHC-Matchbox-Z-mu-SOPHTY.yoda:Hw++-Matchbox
-Rivet-LHC-Photon: $(shell echo Rivet-LHC-7-PromptPhoton-{1..4}) Rivet-LHC-GammaGamma-7 \
+Rivet-LHC-Photon: $(shell echo Rivet-LHC-{7,8}-PromptPhoton-{1..4}) Rivet-LHC-GammaGamma-7 \
$(shell echo Rivet-LHC{,-Powheg}-7-{DiPhoton-GammaGamma,DiPhoton-GammaJet})
rm -rf Rivet-LHC-Photon
python/merge-LHC-Photon LHC -o LHC-Photon.yoda
python/merge-LHC-Photon LHC-Powheg -o LHC-Powheg-Photon.yoda
rivet-mkhtml -o Rivet-LHC-Photon LHC-Photon.yoda:Hw LHC-Powheg-Photon.yoda:Hw-Powheg
Rivet-LHC-Higgs: $(shell echo Rivet-LHC{,-Powheg}-{ggH,VBF,WH,ZH})\
$(shell echo Rivet-LHC{,-Powheg}-8-{ggH,VBF,WH,ZH}{,-GammaGamma}) Rivet-LHC-ggHJet
rm -rf Rivet-LHC-Higgs
rivet-mkhtml -o Rivet-LHC-Higgs LHC-Powheg-ggH.yoda:gg-Powheg LHC-ggH.yoda:gg LHC-ggHJet.yoda:HJet \
LHC-Powheg-VBF.yoda:VBF-Powheg LHC-VBF.yoda:VBF LHC-WH.yoda:WH LHC-ZH.yoda:ZH \
LHC-Powheg-WH.yoda:WH-Powheg LHC-Powheg-ZH.yoda:ZH-Powheg
tests-Rivet : Rivet-LEP Rivet-BFactory Rivet-DIS Rivet-TVT-WZ Rivet-TVT-Photon Rivet-TVT-Jets Rivet-LHC-Jets Rivet-Star Rivet-SppS Rivet-LHC-EW Rivet-LHC-Photon
test-Gamma-% : Inputs/Gamma-%.in GammaTest.la
$(HWREAD) $<
$(HWRUN) $(notdir $(subst .in,.run,$<)) -N $${NUMEVENTS:-10000}
test-DIS-% : Inputs/DIS-%.in DISTest.la
$(HWREAD) $<
$(HWRUN) $(notdir $(subst .in,.run,$<)) -N $${NUMEVENTS:-10000}
if WANT_LIBFASTJET
test-LHC-% : Inputs/LHC-%.in HadronTest.la GammaTest.la HadronJetTest.la
$(HWREAD) $<
$(HWRUN) $(notdir $(subst .in,.run,$<)) -N $${NUMEVENTS:-10000}
else
test-LHC-% : Inputs/LHC-%.in HadronTest.la GammaTest.la
$(HWREAD) $<
$(HWRUN) $(notdir $(subst .in,.run,$<)) -N $${NUMEVENTS:-10000}
endif
clean-local:
rm -f *.out *.log *.tex *.top *.run *.dump *.mult *.Bmult *.yoda
diff --git a/Tests/Rivet/LEP/LEP-91.in b/Tests/Rivet/LEP/LEP-91.in
--- a/Tests/Rivet/LEP/LEP-91.in
+++ b/Tests/Rivet/LEP/LEP-91.in
@@ -1,67 +1,68 @@
##################################################
# LEP physics parameters (override defaults)
##################################################
set /Herwig/Generators/EventGenerator:EventHandler:LuminosityFunction:Energy 91.2
##################################################
# select the analyses
##################################################
# Validated
##################################################
# ALEPH charged particle multiplicity
insert /Herwig/Analysis/RivetAnalysis:Analyses 0 ALEPH_1991_S2435284
# ALEPH main LEP I QCD summary paper
insert /Herwig/Analysis/RivetAnalysis:Analyses 0 ALEPH_1996_S3486095
# ALEPH D*
insert /Herwig/Analysis/RivetAnalysis:Analyses 0 ALEPH_1999_S4193598
# OPAL charged hadron analysis
insert /Herwig/Analysis/RivetAnalysis:Analyses 0 OPAL_1994_S2927284
# OPAL Delta++ analysis
insert /Herwig/Analysis/RivetAnalysis:Analyses 0 OPAL_1995_S3198391
# OPAL J/Psi analysis analysis
insert /Herwig/Analysis/RivetAnalysis:Analyses 0 OPAL_1996_S3257789
# ALEPH eta/omega analysis
insert /Herwig/Analysis/RivetAnalysis:Analyses 0 ALEPH_2002_S4823664
# OPAL K*0 analysis
insert /Herwig/Analysis/RivetAnalysis:Analyses 0 OPAL_1997_S3608263
# OPAL flavour specific charged multiplicities etc
insert /Herwig/Analysis/RivetAnalysis:Analyses 0 OPAL_1998_S3780481
# OPAL f_0,f_2 and phi production
insert /Herwig/Analysis/RivetAnalysis:Analyses 0 OPAL_1998_S3702294
# OPAL gamma,pi0,eta,eta',rho+/-,a0+/-
insert /Herwig/Analysis/RivetAnalysis:Analyses 0 OPAL_1998_S3749908
# OPAL K0
insert /Herwig/Analysis/RivetAnalysis:Analyses 0 OPAL_2000_S4418603
# SLD flavour specific charged multiplicities etc
insert /Herwig/Analysis/RivetAnalysis:Analyses 0 SLD_1996_S3398250
# SLD flavour specific charged multiplicities etc
insert /Herwig/Analysis/RivetAnalysis:Analyses 0 SLD_1999_S3743934
# SLD flavour specific charged multiplicities etc
insert /Herwig/Analysis/RivetAnalysis:Analyses 0 SLD_2004_S5693039
# OPAL event shapes and multiplicities at different energies
insert /Herwig/Analysis/RivetAnalysis:Analyses 0 OPAL_2004_S6132243
# ALEPH jet and event shapes at many energies
insert /Herwig/Analysis/RivetAnalysis:Analyses 0 ALEPH_2004_S5765862
# OPAL/JADE jet rates at many energies
insert /Herwig/Analysis/RivetAnalysis:Analyses 0 JADE_OPAL_2000_S4300807
# DELPHI strange baryon production
insert /Herwig/Analysis/RivetAnalysis:Analyses 0 DELPHI_1995_S3137023
# DELPHI f_0, rho_0 and f_2 production
insert /Herwig/Analysis/RivetAnalysis:Analyses 0 DELPHI_1999_S3960137
# OPAL strange baryon production
insert /Herwig/Analysis/RivetAnalysis:Analyses 0 OPAL_1997_S3396100
# DELPHI tuning paper
insert /Herwig/Analysis/RivetAnalysis:Analyses 0 DELPHI_1996_S3430090
# DELPHI b quark
insert /Herwig/Analysis/RivetAnalysis:Analyses 0 DELPHI_2002_069_CONF_603
+insert /Herwig/Analysis/RivetAnalysis:Analyses 0 DELPHI_2011_I890503
# ALEPH b quark
insert /Herwig/Analysis/RivetAnalysis:Analyses 0 ALEPH_2001_S4656318
# SLD b quark
insert /Herwig/Analysis/RivetAnalysis:Analyses 0 SLD_2002_S4869273
# PDG hadron multiplicities and ratios
insert /Herwig/Analysis/RivetAnalysis:Analyses 0 PDG_HADRON_MULTIPLICITIES
insert /Herwig/Analysis/RivetAnalysis:Analyses 0 PDG_HADRON_MULTIPLICITIES_RATIOS
##################################################
# unvalidated
##################################################
# OPAL 4 jet angles
insert /Herwig/Analysis/RivetAnalysis:Analyses 0 OPAL_2001_S4553896
diff --git a/Tests/Rivet/LHC/LHC-7-PromptPhoton-1.in b/Tests/Rivet/LHC/LHC-8-PromptPhoton-1.in
copy from Tests/Rivet/LHC/LHC-7-PromptPhoton-1.in
copy to Tests/Rivet/LHC/LHC-8-PromptPhoton-1.in
--- a/Tests/Rivet/LHC/LHC-7-PromptPhoton-1.in
+++ b/Tests/Rivet/LHC/LHC-8-PromptPhoton-1.in
@@ -1,13 +1,5 @@
##################################################
# select the analyses
##################################################
# ATLAS prompt photon
-insert /Herwig/Analysis/RivetAnalysis:Analyses 0 ATLAS_2010_S8914702
-insert /Herwig/Analysis/RivetAnalysis:Analyses 0 ATLAS_2012_I1093738
-insert /Herwig/Analysis/RivetAnalysis:Analyses 0 ATLAS_2011_I921594
-insert /Herwig/Analysis/RivetAnalysis:Analyses 0 ATLAS_2013_I1263495
-insert /Herwig/Analysis/RivetAnalysis:Analyses 0 ATLAS_2013_I1244522
-# CMS
-insert /Herwig/Analysis/RivetAnalysis:Analyses 0 CMS_2013_I1258128
-# MC
-insert /Herwig/Analysis/RivetAnalysis:Analyses 0 MC_PHOTONJETS
+insert /Herwig/Analysis/RivetAnalysis:Analyses 0 ATLAS_2016_I1457605
diff --git a/Tests/Rivet/LHC/LHC-7-PromptPhoton-1.in b/Tests/Rivet/LHC/LHC-8-PromptPhoton-2.in
copy from Tests/Rivet/LHC/LHC-7-PromptPhoton-1.in
copy to Tests/Rivet/LHC/LHC-8-PromptPhoton-2.in
--- a/Tests/Rivet/LHC/LHC-7-PromptPhoton-1.in
+++ b/Tests/Rivet/LHC/LHC-8-PromptPhoton-2.in
@@ -1,13 +1,5 @@
##################################################
# select the analyses
##################################################
# ATLAS prompt photon
-insert /Herwig/Analysis/RivetAnalysis:Analyses 0 ATLAS_2010_S8914702
-insert /Herwig/Analysis/RivetAnalysis:Analyses 0 ATLAS_2012_I1093738
-insert /Herwig/Analysis/RivetAnalysis:Analyses 0 ATLAS_2011_I921594
-insert /Herwig/Analysis/RivetAnalysis:Analyses 0 ATLAS_2013_I1263495
-insert /Herwig/Analysis/RivetAnalysis:Analyses 0 ATLAS_2013_I1244522
-# CMS
-insert /Herwig/Analysis/RivetAnalysis:Analyses 0 CMS_2013_I1258128
-# MC
-insert /Herwig/Analysis/RivetAnalysis:Analyses 0 MC_PHOTONJETS
+insert /Herwig/Analysis/RivetAnalysis:Analyses 0 ATLAS_2016_I1457605
diff --git a/Tests/Rivet/LHC/LHC-7-PromptPhoton-1.in b/Tests/Rivet/LHC/LHC-8-PromptPhoton-3.in
copy from Tests/Rivet/LHC/LHC-7-PromptPhoton-1.in
copy to Tests/Rivet/LHC/LHC-8-PromptPhoton-3.in
--- a/Tests/Rivet/LHC/LHC-7-PromptPhoton-1.in
+++ b/Tests/Rivet/LHC/LHC-8-PromptPhoton-3.in
@@ -1,13 +1,5 @@
##################################################
# select the analyses
##################################################
# ATLAS prompt photon
-insert /Herwig/Analysis/RivetAnalysis:Analyses 0 ATLAS_2010_S8914702
-insert /Herwig/Analysis/RivetAnalysis:Analyses 0 ATLAS_2012_I1093738
-insert /Herwig/Analysis/RivetAnalysis:Analyses 0 ATLAS_2011_I921594
-insert /Herwig/Analysis/RivetAnalysis:Analyses 0 ATLAS_2013_I1263495
-insert /Herwig/Analysis/RivetAnalysis:Analyses 0 ATLAS_2013_I1244522
-# CMS
-insert /Herwig/Analysis/RivetAnalysis:Analyses 0 CMS_2013_I1258128
-# MC
-insert /Herwig/Analysis/RivetAnalysis:Analyses 0 MC_PHOTONJETS
+insert /Herwig/Analysis/RivetAnalysis:Analyses 0 ATLAS_2016_I1457605
diff --git a/Tests/Rivet/LHC/LHC-7-PromptPhoton-1.in b/Tests/Rivet/LHC/LHC-8-PromptPhoton-4.in
copy from Tests/Rivet/LHC/LHC-7-PromptPhoton-1.in
copy to Tests/Rivet/LHC/LHC-8-PromptPhoton-4.in
--- a/Tests/Rivet/LHC/LHC-7-PromptPhoton-1.in
+++ b/Tests/Rivet/LHC/LHC-8-PromptPhoton-4.in
@@ -1,13 +1,5 @@
##################################################
# select the analyses
##################################################
# ATLAS prompt photon
-insert /Herwig/Analysis/RivetAnalysis:Analyses 0 ATLAS_2010_S8914702
-insert /Herwig/Analysis/RivetAnalysis:Analyses 0 ATLAS_2012_I1093738
-insert /Herwig/Analysis/RivetAnalysis:Analyses 0 ATLAS_2011_I921594
-insert /Herwig/Analysis/RivetAnalysis:Analyses 0 ATLAS_2013_I1263495
-insert /Herwig/Analysis/RivetAnalysis:Analyses 0 ATLAS_2013_I1244522
-# CMS
-insert /Herwig/Analysis/RivetAnalysis:Analyses 0 CMS_2013_I1258128
-# MC
-insert /Herwig/Analysis/RivetAnalysis:Analyses 0 MC_PHOTONJETS
+insert /Herwig/Analysis/RivetAnalysis:Analyses 0 ATLAS_2016_I1457605
diff --git a/Tests/Rivet/TVT/TVT-Run-II-Z-e.in b/Tests/Rivet/TVT/TVT-Run-II-Z-e.in
--- a/Tests/Rivet/TVT/TVT-Run-II-Z-e.in
+++ b/Tests/Rivet/TVT/TVT-Run-II-Z-e.in
@@ -1,21 +1,21 @@
##################################################
# select the analyses
##################################################
# CDF Run II Z+jets
insert /Herwig/Analysis/RivetAnalysis:Analyses 0 CDF_2008_S7540469
# CDF Z+b jets
insert /Herwig/Analysis/RivetAnalysis:Analyses 0 CDF_2008_S8095620
# CDF Run II Z cross section and rapidity
-insert /Herwig/Analysis/RivetAnalysis:Analyses 0 CDF_2009_S8383952
+insert /Herwig/Analysis/RivetAnalysis:Analyses 0 CDF_2009_I856131
# D0 Run II Z cross section and rapidity
insert /Herwig/Analysis/RivetAnalysis:Analyses 0 D0_2007_S7075677
# D0 Run II jet ratios
insert /Herwig/Analysis/RivetAnalysis:Analyses 0 D0_2008_S6879055
# D0 Run II
insert /Herwig/Analysis/RivetAnalysis:Analyses 0 D0_2008_S7554427
# D0 Run II
insert /Herwig/Analysis/RivetAnalysis:Analyses 0 D0_2009_S8202443
# D0 run II
insert /Herwig/Analysis/RivetAnalysis:Analyses 0 D0_2010_S8821313
# CDF run II Z pT
insert /Herwig/Analysis/RivetAnalysis:Analyses 0 CDF_2012_I1124333
\ No newline at end of file
diff --git a/Tests/python/merge-LHC-Photon b/Tests/python/merge-LHC-Photon
--- a/Tests/python/merge-LHC-Photon
+++ b/Tests/python/merge-LHC-Photon
@@ -1,265 +1,274 @@
#! /usr/bin/env python
import logging
import sys
import os, yoda
"""%prog
Script for merging aida files
"""
def fillAbove(scale,desthisto, sourcehistosbyptmin):
pthigh= 1e100
ptlow =-1e100
for pt, h in sorted(sourcehistosbyptmin.iteritems(),reverse=True):
ptlow=pt
if(type(desthisto)==yoda.core.Scatter2D) :
for i in range(0,h.numPoints) :
xMin = h.points[i].x-h.points[i].xErrs.minus
if( xMin*scale >= ptlow and
xMin*scale < pthigh ) :
desthisto.addPoint(h.points[i])
elif(type(desthisto)==yoda.core.Profile1D) :
for i in range(0,h.numBins) :
if(h.bins[i].xMin*scale >= ptlow and
h.bins[i].xMin*scale < pthigh ) :
desthisto.bins[i] += h.bins[i]
elif(type(desthisto)==yoda.core.Histo1D) :
for i in range(0,h.numBins) :
if(h.bins[i].xMin*scale >= ptlow and
h.bins[i].xMin*scale < pthigh ) :
desthisto.bins[i] += h.bins[i]
else :
logging.error("Can't merge %s, unknown type" % desthisto.path)
sys.exit(1)
pthigh=pt
def mergeByPt(hpath, scale=1.):
global inhistos
global outhistos
try:
fillAbove(scale,outhistos[hpath], inhistos[hpath])
except:
pass
def useOnePt(hpath, ptmin):
global inhistos
global outhistos
try:
## Find best pT_min match
ptmins = inhistos[hpath].keys()
closest_ptmin = None
for ptm in ptmins:
if closest_ptmin is None or \
abs(ptm-float(ptmin)) < abs(closest_ptmin-float(ptmin)):
closest_ptmin = ptm
if closest_ptmin != float(ptmin):
logging.warning("Inexact match for requested pTmin=%s: " % ptmin + \
"using pTmin=%e instead" % closest_ptmin)
outhistos[hpath] = inhistos[hpath][closest_ptmin]
except:
pass
if sys.version_info[:3] < (2,4,0):
print "rivet scripts require Python version >= 2.4.0... exiting"
sys.exit(1)
if __name__ == "__main__":
import logging
from optparse import OptionParser, OptionGroup
parser = OptionParser(usage="%prog aidafile aidafile2 [...]")
parser.add_option("-o", "--out", dest="OUTFILE", default="-")
verbgroup = OptionGroup(parser, "Verbosity control")
verbgroup.add_option("-v", "--verbose", action="store_const", const=logging.DEBUG, dest="LOGLEVEL",
default=logging.INFO, help="print debug (very verbose) messages")
verbgroup.add_option("-q", "--quiet", action="store_const", const=logging.WARNING, dest="LOGLEVEL",
default=logging.INFO, help="be very quiet")
parser.add_option_group(verbgroup)
(opts, args) = parser.parse_args()
logging.basicConfig(level=opts.LOGLEVEL, format="%(message)s")
## Check args
if len(args) < 1:
logging.error("Must specify at least the name of the files")
sys.exit(1)
files=["-7-PromptPhoton-1.yoda","-7-PromptPhoton-2.yoda",
"-7-PromptPhoton-3.yoda","-7-PromptPhoton-4.yoda",
+ "-8-PromptPhoton-1.yoda","-8-PromptPhoton-2.yoda",
+ "-8-PromptPhoton-3.yoda","-8-PromptPhoton-4.yoda",
"-7-DiPhoton-GammaGamma.yoda","-7-DiPhoton-GammaJet.yoda","-GammaGamma-7.yoda"]
## Get histos
inhistos = {}
outhistos={}
for f in files:
file=args[0]+f
if not os.access(file, os.R_OK):
logging.error("%s can not be read" % file)
continue
try:
aos = yoda.read(file)
except:
logging.error("%s can not be parsed as XML" % file)
break
if(file.find("PromptPhoton")>=0) :
- if(file.find("-7-PromptPhoton-1")>0) :
+ if(file.find("PromptPhoton-1")>0) :
ptmin=0.
- elif(file.find("-7-PromptPhoton-2")>0) :
+ elif(file.find("PromptPhoton-2")>0) :
ptmin=35.
- elif(file.find("-7-PromptPhoton-3")>0) :
+ elif(file.find("PromptPhoton-3")>0) :
ptmin=90.
- elif(file.find("-7-PromptPhoton-4")>0) :
+ elif(file.find("PromptPhoton-4")>0) :
ptmin=170.
## Get histos from this YODA file
for aopath, ao in aos.iteritems() :
if not inhistos.has_key(aopath):
inhistos[aopath] = {}
if (aopath.find("CMS_2013_I1258128")>0) :
if(aopath.find("d05")>0 or aopath.find("d06")>0 or
aopath.find("d07")>0 or aopath.find("d08")>0) :
inhistos[aopath][ptmin] = ao
else :
inhistos[aopath][ptmin] = ao
else :
## Get histos from this YODA file
for aopath, ao in aos.iteritems() :
if(aopath.find("XSEC")>=0 or aopath.find("EVTCOUNT")>=0) : continue
print aopath
if ( aopath in outhistos ) :
aotype = type(ao)
if aotype in (yoda.Counter, yoda.Histo1D, yoda.Histo2D, yoda.Profile1D, yoda.Profile2D):
outhistos[aopath] += ao
else :
quit()
else:
outhistos[aopath] = ao
for hpath,hsets in inhistos.iteritems():
if( hpath.find("1263495")>0 or
hpath.find("1093738")>0 or
hpath.find("921594")>0 or
hpath.find("8914702")>0 or
- hpath.find("1244522")>0 ) :
+ hpath.find("1244522")>0 or
+ hpath.find("1457605")>0 ) :
if(type(hsets.values()[0])==yoda.core.Scatter2D) :
outhistos[hpath] = yoda.core.Scatter2D(hsets.values()[0].path,
hsets.values()[0].title)
elif(type(hsets.values()[0])==yoda.core.Profile1D) :
outhistos[hpath] = yoda.core.Profile1D(hsets.values()[0].path,
hsets.values()[0].title)
for i in range(0,hsets.values()[0].numBins) :
outhistos[hpath].addBin(hsets.values()[0].bins[i].xMin,
hsets.values()[0].bins[i].xMax)
elif(type(hsets.values()[0])==yoda.core.Histo1D) :
outhistos[hpath] = yoda.core.Histo1D(hsets.values()[0].path,
hsets.values()[0].title)
for i in range(0,hsets.values()[0].numBins) :
outhistos[hpath].addBin(hsets.values()[0].bins[i].xMin,
hsets.values()[0].bins[i].xMax)
else :
logging.error("Histogram %s is of unknown type" % hpath)
print hpath,type(hsets.values()[0])
sys.exit(1)
logging.info("Processing ATLAS_2013_I1263495")
mergeByPt("/ATLAS_2013_I1263495/d01-x01-y01")
mergeByPt("/ATLAS_2013_I1263495/d01-x01-y03")
useOnePt("/ATLAS_2013_I1263495/d01-x02-y01", "90" )
logging.info("Processing ATLAS_2012_I1093738")
mergeByPt("/ATLAS_2012_I1093738/d01-x01-y01")
mergeByPt("/ATLAS_2012_I1093738/d02-x01-y01")
mergeByPt("/ATLAS_2012_I1093738/d03-x01-y01")
mergeByPt("/ATLAS_2012_I1093738/d04-x01-y01")
mergeByPt("/ATLAS_2012_I1093738/d05-x01-y01")
mergeByPt("/ATLAS_2012_I1093738/d06-x01-y01")
logging.info("Processing ATLAS_2011_I921594")
mergeByPt("/ATLAS_2011_I921594/d01-x01-y01")
mergeByPt("/ATLAS_2011_I921594/d01-x01-y02")
mergeByPt("/ATLAS_2011_I921594/d01-x01-y04")
mergeByPt("/ATLAS_2011_I921594/d01-x01-y05")
logging.info("Processing ATLAS_2010_S8914702")
mergeByPt("/ATLAS_2010_S8914702/d01-x01-y01")
mergeByPt("/ATLAS_2010_S8914702/d01-x01-y02")
mergeByPt("/ATLAS_2010_S8914702/d01-x01-y03")
logging.info("Processing CMS_2013_I1258128")
useOnePt("/CMS_2013_I1258128/d05-x01-y01", "35" )
useOnePt("/CMS_2013_I1258128/d06-x01-y01", "35" )
useOnePt("/CMS_2013_I1258128/d07-x01-y01", "35" )
useOnePt("/CMS_2013_I1258128/d08-x01-y01", "35" )
logging.info("Processing ATLAS_2013_I1244522")
mergeByPt("/ATLAS_2013_I1244522/d01-x01-y01")
mergeByPt("/ATLAS_2013_I1244522/d02-x01-y01")
useOnePt("/ATLAS_2013_I1244522/d03-x01-y01", "35" )
useOnePt("/ATLAS_2013_I1244522/d04-x01-y01", "35" )
useOnePt("/ATLAS_2013_I1244522/d05-x01-y01", "35" )
useOnePt("/ATLAS_2013_I1244522/d06-x01-y01", "35" )
useOnePt("/ATLAS_2013_I1244522/d07-x01-y01", "35" )
+logging.info("Processing ATLAS_2016_I1457605")
+mergeByPt("/ATLAS_2016_I1457605/d01-x01-y01")
+mergeByPt("/ATLAS_2016_I1457605/d02-x01-y01")
+mergeByPt("/ATLAS_2016_I1457605/d03-x01-y01")
+mergeByPt("/ATLAS_2016_I1457605/d04-x01-y01")
+
logging.info("Processing /MC_PHOTONJETS")
useOnePt("/MC_PHOTONJETS/jet_HT","0")
useOnePt("/MC_PHOTONJETS/jet_eta_1","0")
useOnePt("/MC_PHOTONJETS/jet_eta_2","0")
useOnePt("/MC_PHOTONJETS/jet_eta_3","0")
useOnePt("/MC_PHOTONJETS/jet_eta_4","0")
useOnePt("/MC_PHOTONJETS/jet_eta_pmratio_1","0")
useOnePt("/MC_PHOTONJETS/jet_eta_pmratio_2","0")
useOnePt("/MC_PHOTONJETS/jet_eta_pmratio_3","0")
useOnePt("/MC_PHOTONJETS/jet_eta_pmratio_4","0")
useOnePt("/MC_PHOTONJETS/jet_mass_1","0")
useOnePt("/MC_PHOTONJETS/jet_mass_2","0")
useOnePt("/MC_PHOTONJETS/jet_mass_3","0")
useOnePt("/MC_PHOTONJETS/jet_mass_4","0")
useOnePt("/MC_PHOTONJETS/jet_multi_exclusive","0")
useOnePt("/MC_PHOTONJETS/jet_multi_inclusive","0")
useOnePt("/MC_PHOTONJETS/jet_multi_ratio","0")
useOnePt("/MC_PHOTONJETS/jet_pT_1","0")
useOnePt("/MC_PHOTONJETS/jet_pT_2","0")
useOnePt("/MC_PHOTONJETS/jet_pT_3","0")
useOnePt("/MC_PHOTONJETS/jet_pT_4","0")
useOnePt("/MC_PHOTONJETS/jet_y_1","0")
useOnePt("/MC_PHOTONJETS/jet_y_2","0")
useOnePt("/MC_PHOTONJETS/jet_y_3","0")
useOnePt("/MC_PHOTONJETS/jet_y_4","0")
useOnePt("/MC_PHOTONJETS/jet_y_pmratio_1","0")
useOnePt("/MC_PHOTONJETS/jet_y_pmratio_2","0")
useOnePt("/MC_PHOTONJETS/jet_y_pmratio_3","0")
useOnePt("/MC_PHOTONJETS/jet_y_pmratio_4","0")
useOnePt("/MC_PHOTONJETS/jets_dR_12","0")
useOnePt("/MC_PHOTONJETS/jets_dR_13","0")
useOnePt("/MC_PHOTONJETS/jets_dR_23","0")
useOnePt("/MC_PHOTONJETS/jets_deta_12","0")
useOnePt("/MC_PHOTONJETS/jets_deta_13","0")
useOnePt("/MC_PHOTONJETS/jets_deta_23","0")
useOnePt("/MC_PHOTONJETS/jets_dphi_12","0")
useOnePt("/MC_PHOTONJETS/jets_dphi_13","0")
useOnePt("/MC_PHOTONJETS/jets_dphi_23","0")
useOnePt("/MC_PHOTONJETS/photon_jet1_dR","0")
useOnePt("/MC_PHOTONJETS/photon_jet1_deta","0")
useOnePt("/MC_PHOTONJETS/photon_jet1_dphi","0")
useOnePt("/MC_PHOTONJETUE/gammajet-dR","0")
useOnePt("/MC_PHOTONJETUE/gammajet-dphi","0")
useOnePt("/MC_PHOTONJETUE/trans-maxnchg-gamma","0")
useOnePt("/MC_PHOTONJETUE/trans-maxnchg-jet","0")
useOnePt("/MC_PHOTONJETUE/trans-maxptsum-gamma","0")
useOnePt("/MC_PHOTONJETUE/trans-maxptsum-jet","0")
useOnePt("/MC_PHOTONJETUE/trans-minnchg-gamma","0")
useOnePt("/MC_PHOTONJETUE/trans-minnchg-jet","0")
useOnePt("/MC_PHOTONJETUE/trans-minptsum-gamma","0")
useOnePt("/MC_PHOTONJETUE/trans-minptsum-jet","0")
useOnePt("/MC_PHOTONJETUE/trans-nchg-gamma","0")
useOnePt("/MC_PHOTONJETUE/trans-nchg-jet","0")
useOnePt("/MC_PHOTONJETUE/trans-ptavg-gamma","0")
useOnePt("/MC_PHOTONJETUE/trans-ptavg-jet","0")
useOnePt("/MC_PHOTONJETUE/trans-ptsum-gamma","0")
useOnePt("/MC_PHOTONJETUE/trans-ptsum-jet","0")
# Choose output file
yoda.writeYODA(outhistos,opts.OUTFILE)
sys.exit(0)
diff --git a/Utilities/Makefile.am b/Utilities/Makefile.am
--- a/Utilities/Makefile.am
+++ b/Utilities/Makefile.am
@@ -1,50 +1,51 @@
SUBDIRS = XML Statistics
noinst_LTLIBRARIES = libHwUtils.la
pkglib_LTLIBRARIES = libHwRunDirectories.la
libHwUtils_la_SOURCES = \
EnumParticles.h \
Interpolator.tcc Interpolator.h \
Kinematics.cc Kinematics.h \
Maths.h Maths.cc \
StandardSelectors.cc StandardSelectors.h\
Histogram.cc Histogram.fh Histogram.h \
GaussianIntegrator.cc GaussianIntegrator.h \
GaussianIntegrator.tcc \
Statistic.h HerwigStrategy.cc HerwigStrategy.h \
GSLIntegrator.h GSLIntegrator.tcc \
-GSLBisection.h GSLBisection.tcc GSLHelper.h
+GSLBisection.h GSLBisection.tcc GSLHelper.h \
+expm-1.h
nodist_libHwUtils_la_SOURCES = hgstamp.inc
BUILT_SOURCES = hgstamp.inc
CLEANFILES = hgstamp.inc
AUTOMAKE_OPTIONS = -Wno-portability
HGVERSION := $(shell hg -R $(top_srcdir) parents --template '"Herwig {node|short} ({branch})"' 2> /dev/null || echo \"$(PACKAGE_STRING)\" || true )
.PHONY: update_hgstamp
hgstamp.inc: update_hgstamp
@[ -f $@ ] || touch $@
@echo '$(HGVERSION)' | cmp -s $@ - || echo '$(HGVERSION)' > $@
libHwUtils_la_LIBADD = \
XML/libHwXML.la \
Statistics/libHwStatistics.la
libHwRunDirectories_la_SOURCES = \
RunDirectories.h RunDirectories.cc
libHwRunDirectories_la_LDFLAGS = $(AM_LDFLAGS) -version-info 1:0:0
check_PROGRAMS = utilities_test
utilities_test_SOURCES = \
tests/utilitiesTestsMain.cc \
tests/utilitiesTestsGlobalFixture.h \
tests/utilitiesTestsKinematics.h \
tests/utilitiesTestMaths.h \
tests/utilitiesTestsStatistic.h
utilities_test_LDADD = $(BOOST_UNIT_TEST_FRAMEWORK_LIBS) $(BOOST_FILESYSTEM_LIBS) $(BOOST_SYSTEM_LIBS) $(THEPEGLIB) -ldl libHwUtils.la
utilities_test_LDFLAGS = $(AM_LDFLAGS) -export-dynamic $(BOOST_UNIT_TEST_FRAMEWORK_LDFLAGS) $(BOOST_SYSTEM_LDFLAGS) $(BOOST_FILESYSTEM_LDFLAGS) $(THEPEGLDFLAGS)
utilities_test_CPPFLAGS = $(AM_CPPFLAGS) $(BOOST_CPPFLAGS) -DHERWIG_PKGDATADIR="\"$(pkgdatadir)\"" -DHERWIG_PKGLIBDIR="\"$(pkglibdir)\"" -DTHEPEG_PKGLIBDIR="\"$(THEPEGLIBPATH)\""
TESTS = utilities_test
diff --git a/Utilities/expm-1.h b/Utilities/expm-1.h
new file mode 100644
--- /dev/null
+++ b/Utilities/expm-1.h
@@ -0,0 +1,147 @@
+//
+// Copyright (c) 2007
+// Tsai, Dung-Bang
+// National Taiwan University, Department of Physics
+//
+// E-Mail : dbtsai (at) gmail.com
+// Begine : 2007/11/20
+// Last modify : 2007/11/22
+// Version : v0.1
+//
+// EXPGM_PAD computes the matrix exponential exp(H) for general matrixs,
+// including complex and real matrixs using the irreducible (p,p) degree
+// rational Pade approximation to the exponential
+// exp(z) = r(z)=(+/-)( I+2*(Q(z)/P(z))).
+//
+// Usage :
+//
+// U = expm_pad(H)
+// U = expm_pad(H, p)
+//
+// where p is internally set to 6 (recommended and gererally satisfactory).
+//
+// See also MATLAB supplied functions, EXPM and EXPM1.
+//
+// Reference :
+// EXPOKIT, Software Package for Computing Matrix Exponentials.
+// ACM - Transactions On Mathematical Software, 24(1):130-156, 1998
+//
+// Permission to use, copy, modify, distribute and sell this software
+// and its documentation for any purpose is hereby granted without fee,
+// provided that the above copyright notice appear in all copies and
+// that both that copyright notice and this permission notice appear
+// in supporting documentation. The authors make no representations
+// about the suitability of this software for any purpose.
+// It is provided "as is" without express or implied warranty.
+//
+
+#ifndef _BOOST_UBLAS_EXPM_
+#define _BOOST_UBLAS_EXPM_
+#include <complex>
+#include <boost/numeric/ublas/vector.hpp>
+#include <boost/numeric/ublas/matrix.hpp>
+#include <boost/numeric/ublas/lu.hpp>
+
+namespace boost { namespace numeric { namespace ublas {
+
+template<typename MATRIX> MATRIX expm_pad(const MATRIX &H, const int p = 6) {
+ typedef typename MATRIX::value_type value_type;
+ typedef typename MATRIX::size_type size_type;
+ typedef double real_value_type; // Correct me. Need to modify.
+ assert(H.size1() == H.size2());
+ const size_type n = H.size1();
+ const identity_matrix<value_type> I(n);
+ matrix<value_type> U(n,n),H2(n,n),P(n,n),Q(n,n);
+ real_value_type norm = 0.0;
+
+ // Calcuate Pade coefficients (1-based instead of 0-based as in the c vector)
+ vector<real_value_type> c(p+2);
+ c(1)=1;
+ for(size_type i = 1; i <= p; ++i)
+ c(i+1) = c(i) * ((p + 1.0 - i)/(i * (2.0 * p + 1 - i)));
+ // Calcuate the infinty norm of H, which is defined as the largest row sum of a matrix
+ for(size_type i=0; i<n; ++i)
+ {
+ real_value_type temp = 0.0;
+ for(size_type j=0;j<n;j++)
+ temp += std::abs<real_value_type>(H(j,i)); // Correct me, if H is complex, can I use that abs?
+ norm = std::max<real_value_type>(norm, temp);
+ }
+ if (norm == 0.0)
+ {
+ boost::throw_exception(boost::numeric::ublas::bad_argument());
+ std::cerr<<"Error! Null input in the routine EXPM_PAD.\n";
+ exit(0);
+ }
+ // Scaling, seek s such that || H*2^(-s) || < 1/2, and set scale = 2^(-s)
+ int s = 0;
+ real_value_type scale = 1.0;
+ if(norm > 0.5) {
+ s = std::max<int>(0, static_cast<int>((log(norm) / log(2.0) + 2.0)));
+ scale /= static_cast<real_value_type>(std::pow(2.0, s));
+ U.assign(scale * H); // Here U is used as temp value due to that H is const
+ }
+ else
+ U.assign(H);
+ // Horner evaluation of the irreducible fraction, see the following ref above.
+ // Initialise P (numerator) and Q (denominator)
+ H2.assign( prod(U, U) );
+ Q.assign( c(p+1)*I );
+ P.assign( c(p)*I );
+ size_type odd = 1;
+ for( size_type k = p - 1; k > 0; --k)
+ {
+ if( odd == 1)
+ {
+ Q = ( prod(Q, H2) + c(k) * I );
+ }
+ else
+ {
+ P = ( prod(P, H2) + c(k) * I );
+ }
+ odd = 1 - odd;
+ }
+ if( odd == 1)
+ {
+ Q = ( prod(Q, U) );
+ Q -= P ;
+ //U.assign( -(I + 2*(Q\P)));
+ }
+ else
+ {
+ P = (prod(P, U));
+ Q -= P;
+ //U.assign( I + 2*(Q\P));
+ }
+ // In origine expokit package, they use lapack ZGESV to obtain inverse matrix,
+ // and in that ZGESV routine, it uses LU decomposition for obtaing inverse matrix.
+ // Since in ublas, there is no matrix inversion template, I simply use the build-in
+ // LU decompostion package in ublas, and back substitute by myself.
+ //
+ //////////////// Implement Matrix Inversion ///////////////////////
+ permutation_matrix<size_type> pm(n);
+ int res = lu_factorize(Q, pm);
+ if( res != 0)
+ {
+ std::cerr << "Error in the matrix inversion in template expm_pad.\n";
+ exit(0);
+ }
+ H2 = I; // H2 is not needed anymore, so it is temporary used as identity matrix for substituting.
+
+ lu_substitute(Q, pm, H2);
+ if( odd == 1)
+ U.assign( -(I + 2.0 * prod(H2, P)));
+ else
+ U.assign( I + 2.0 * prod(H2, P));
+ // Squaring
+ for(size_t i = 0; i < s; ++i)
+ {
+ U = (prod(U,U));
+ }
+ return U;
+ }
+
+}}}
+
+
+#endif
diff --git a/src/Matchbox/PQCDLevel.in b/src/Matchbox/PQCDLevel.in
--- a/src/Matchbox/PQCDLevel.in
+++ b/src/Matchbox/PQCDLevel.in
@@ -1,13 +1,13 @@
# -*- ThePEG-repository -*-
set /Herwig/DipoleShower/DipoleShowerHandler:MPIHandler NULL
set /Herwig/Shower/ShowerHandler:MPIHandler NULL
set /Herwig/Shower/PowhegShowerHandler:MPIHandler NULL
-set /Herwig/Shower/ShowerHandler:Interactions QCDOnly
+set /Herwig/Shower/ShowerHandler:Interactions QCD
cd /Herwig/EventHandlers
set EventHandler:HadronizationHandler NULL
set EventHandler:DecayHandler NULL
set /Herwig/Analysis/Basics:CheckQuark No
diff --git a/src/defaults/MatchboxDefaults.in.in b/src/defaults/MatchboxDefaults.in.in
--- a/src/defaults/MatchboxDefaults.in.in
+++ b/src/defaults/MatchboxDefaults.in.in
@@ -1,781 +1,781 @@
# -*- ThePEG-repository -*-
################################################################################
#
# Default setup for Matchbox matrix element generation.
# You do not need to make any change in here; processes of
# interest can be chosen in the standard input files.
#
################################################################################
################################################################################
# Load libraries
################################################################################
library JetCuts.so
library FastJetFinder.so
library HwMatchboxScales.so
library HwMatchboxCuts.so
library HwSampling.so
library HwColorFull.so
library HwMatchboxBuiltin.so
################################################################################
# Integration/sampling
################################################################################
mkdir /Herwig/Samplers
cd /Herwig/Samplers
create Herwig::BinSampler FlatBinSampler
set FlatBinSampler:InitialPoints 1000
set FlatBinSampler:UseAllIterations No
create Herwig::CellGridSampler CellGridSampler
set CellGridSampler:InitialPoints 10000
set CellGridSampler:ExplorationPoints 500
set CellGridSampler:ExplorationSteps 4
set CellGridSampler:Gain 0.3
set CellGridSampler:Epsilon 1.0
set CellGridSampler:MinimumSelection 0.000001
set CellGridSampler:NIterations 1
set CellGridSampler:EnhancementFactor 1
set CellGridSampler:UseAllIterations No
set CellGridSampler:RemapperPoints 50000
set CellGridSampler:RemapperMinSelection 0.00001
set CellGridSampler:RemapChannelDimension Yes
set CellGridSampler:LuminosityMapperBins 20
set CellGridSampler:GeneralMapperBins 0
set CellGridSampler:HalfPoints No
set CellGridSampler:MaxNewMax 30
set CellGridSampler:NonZeroInPresampling Yes
create Herwig::MonacoSampler MonacoSampler
set MonacoSampler:InitialPoints 15000
set MonacoSampler:NIterations 4
set MonacoSampler:EnhancementFactor 1.2
set MonacoSampler:UseAllIterations No
set MonacoSampler:RemapChannelDimension No
set MonacoSampler:LuminosityMapperBins 0
set MonacoSampler:HalfPoints No
set MonacoSampler:MaxNewMax 30
set MonacoSampler:NonZeroInPresampling Yes
create Herwig::GeneralSampler Sampler
set Sampler:UpdateAfter 1000
set Sampler:BinSampler CellGridSampler
set Sampler:AddUpSamplers Off
set Sampler:GlobalMaximumWeight Off
set Sampler:FlatSubprocesses Off
set Sampler:MinSelection 0.000001
set Sampler:AlmostUnweighted Off
set Sampler:RunCombinationData Off
set Sampler:WriteGridsOnFinish No
set Sampler:MaxEnhancement 1.1
################################################################################
# Setup the factory object
################################################################################
mkdir /Herwig/MatrixElements/Matchbox
cd /Herwig/MatrixElements/Matchbox
create Herwig::MatchboxFactory Factory
do Factory:StartParticleGroup p
insert Factory:ParticleGroup 0 /Herwig/Particles/b
insert Factory:ParticleGroup 0 /Herwig/Particles/bbar
insert Factory:ParticleGroup 0 /Herwig/Particles/c
insert Factory:ParticleGroup 0 /Herwig/Particles/cbar
insert Factory:ParticleGroup 0 /Herwig/Particles/s
insert Factory:ParticleGroup 0 /Herwig/Particles/sbar
insert Factory:ParticleGroup 0 /Herwig/Particles/d
insert Factory:ParticleGroup 0 /Herwig/Particles/dbar
insert Factory:ParticleGroup 0 /Herwig/Particles/u
insert Factory:ParticleGroup 0 /Herwig/Particles/ubar
insert Factory:ParticleGroup 0 /Herwig/Particles/g
do Factory:EndParticleGroup
do Factory:StartParticleGroup pbar
insert Factory:ParticleGroup 0 /Herwig/Particles/b
insert Factory:ParticleGroup 0 /Herwig/Particles/bbar
insert Factory:ParticleGroup 0 /Herwig/Particles/c
insert Factory:ParticleGroup 0 /Herwig/Particles/cbar
insert Factory:ParticleGroup 0 /Herwig/Particles/s
insert Factory:ParticleGroup 0 /Herwig/Particles/sbar
insert Factory:ParticleGroup 0 /Herwig/Particles/d
insert Factory:ParticleGroup 0 /Herwig/Particles/dbar
insert Factory:ParticleGroup 0 /Herwig/Particles/u
insert Factory:ParticleGroup 0 /Herwig/Particles/ubar
insert Factory:ParticleGroup 0 /Herwig/Particles/g
do Factory:EndParticleGroup
do Factory:StartParticleGroup j
insert Factory:ParticleGroup 0 /Herwig/Particles/b
insert Factory:ParticleGroup 0 /Herwig/Particles/bbar
insert Factory:ParticleGroup 0 /Herwig/Particles/c
insert Factory:ParticleGroup 0 /Herwig/Particles/cbar
insert Factory:ParticleGroup 0 /Herwig/Particles/s
insert Factory:ParticleGroup 0 /Herwig/Particles/sbar
insert Factory:ParticleGroup 0 /Herwig/Particles/d
insert Factory:ParticleGroup 0 /Herwig/Particles/dbar
insert Factory:ParticleGroup 0 /Herwig/Particles/u
insert Factory:ParticleGroup 0 /Herwig/Particles/ubar
insert Factory:ParticleGroup 0 /Herwig/Particles/g
do Factory:EndParticleGroup
do Factory:StartParticleGroup u
insert Factory:ParticleGroup 0 /Herwig/Particles/u
do Factory:EndParticleGroup
do Factory:StartParticleGroup ubar
insert Factory:ParticleGroup 0 /Herwig/Particles/ubar
do Factory:EndParticleGroup
do Factory:StartParticleGroup d
insert Factory:ParticleGroup 0 /Herwig/Particles/d
do Factory:EndParticleGroup
do Factory:StartParticleGroup dbar
insert Factory:ParticleGroup 0 /Herwig/Particles/dbar
do Factory:EndParticleGroup
do Factory:StartParticleGroup s
insert Factory:ParticleGroup 0 /Herwig/Particles/s
do Factory:EndParticleGroup
do Factory:StartParticleGroup sbar
insert Factory:ParticleGroup 0 /Herwig/Particles/sbar
do Factory:EndParticleGroup
do Factory:StartParticleGroup c
insert Factory:ParticleGroup 0 /Herwig/Particles/c
do Factory:EndParticleGroup
do Factory:StartParticleGroup cbar
insert Factory:ParticleGroup 0 /Herwig/Particles/cbar
do Factory:EndParticleGroup
do Factory:StartParticleGroup b
insert Factory:ParticleGroup 0 /Herwig/Particles/b
do Factory:EndParticleGroup
do Factory:StartParticleGroup bbar
insert Factory:ParticleGroup 0 /Herwig/Particles/bbar
do Factory:EndParticleGroup
do Factory:StartParticleGroup t
insert Factory:ParticleGroup 0 /Herwig/Particles/t
do Factory:EndParticleGroup
do Factory:StartParticleGroup tbar
insert Factory:ParticleGroup 0 /Herwig/Particles/tbar
do Factory:EndParticleGroup
do Factory:StartParticleGroup g
insert Factory:ParticleGroup 0 /Herwig/Particles/g
do Factory:EndParticleGroup
do Factory:StartParticleGroup gamma
insert Factory:ParticleGroup 0 /Herwig/Particles/gamma
do Factory:EndParticleGroup
do Factory:StartParticleGroup h0
insert Factory:ParticleGroup 0 /Herwig/Particles/h0
do Factory:EndParticleGroup
do Factory:StartParticleGroup W+
insert Factory:ParticleGroup 0 /Herwig/Particles/W+
do Factory:EndParticleGroup
do Factory:StartParticleGroup W-
insert Factory:ParticleGroup 0 /Herwig/Particles/W-
do Factory:EndParticleGroup
do Factory:StartParticleGroup Z0
insert Factory:ParticleGroup 0 /Herwig/Particles/Z0
do Factory:EndParticleGroup
do Factory:StartParticleGroup e+
insert Factory:ParticleGroup 0 /Herwig/Particles/e+
do Factory:EndParticleGroup
do Factory:StartParticleGroup e-
insert Factory:ParticleGroup 0 /Herwig/Particles/e-
do Factory:EndParticleGroup
do Factory:StartParticleGroup mu+
insert Factory:ParticleGroup 0 /Herwig/Particles/mu+
do Factory:EndParticleGroup
do Factory:StartParticleGroup mu-
insert Factory:ParticleGroup 0 /Herwig/Particles/mu-
do Factory:EndParticleGroup
do Factory:StartParticleGroup tau+
insert Factory:ParticleGroup 0 /Herwig/Particles/tau+
do Factory:EndParticleGroup
do Factory:StartParticleGroup tau-
insert Factory:ParticleGroup 0 /Herwig/Particles/tau-
do Factory:EndParticleGroup
do Factory:StartParticleGroup nu_e
insert Factory:ParticleGroup 0 /Herwig/Particles/nu_e
do Factory:EndParticleGroup
do Factory:StartParticleGroup nu_mu
insert Factory:ParticleGroup 0 /Herwig/Particles/nu_mu
do Factory:EndParticleGroup
do Factory:StartParticleGroup nu_tau
insert Factory:ParticleGroup 0 /Herwig/Particles/nu_tau
do Factory:EndParticleGroup
do Factory:StartParticleGroup nu_ebar
insert Factory:ParticleGroup 0 /Herwig/Particles/nu_ebar
do Factory:EndParticleGroup
do Factory:StartParticleGroup nu_mubar
insert Factory:ParticleGroup 0 /Herwig/Particles/nu_mubar
do Factory:EndParticleGroup
do Factory:StartParticleGroup nu_taubar
insert Factory:ParticleGroup 0 /Herwig/Particles/nu_taubar
do Factory:EndParticleGroup
do Factory:StartParticleGroup l
insert Factory:ParticleGroup 0 /Herwig/Particles/e+
insert Factory:ParticleGroup 0 /Herwig/Particles/mu+
insert Factory:ParticleGroup 0 /Herwig/Particles/e-
insert Factory:ParticleGroup 0 /Herwig/Particles/mu-
do Factory:EndParticleGroup
do Factory:StartParticleGroup nu
insert Factory:ParticleGroup 0 /Herwig/Particles/nu_e
insert Factory:ParticleGroup 0 /Herwig/Particles/nu_mu
insert Factory:ParticleGroup 0 /Herwig/Particles/nu_ebar
insert Factory:ParticleGroup 0 /Herwig/Particles/nu_mubar
do Factory:EndParticleGroup
do Factory:StartParticleGroup l+
insert Factory:ParticleGroup 0 /Herwig/Particles/e+
insert Factory:ParticleGroup 0 /Herwig/Particles/mu+
do Factory:EndParticleGroup
do Factory:StartParticleGroup l-
insert Factory:ParticleGroup 0 /Herwig/Particles/e-
insert Factory:ParticleGroup 0 /Herwig/Particles/mu-
do Factory:EndParticleGroup
################################################################################
# Default settings for hard process widths
################################################################################
set /Herwig/Particles/mu+:HardProcessWidth 0*GeV
set /Herwig/Particles/mu-:HardProcessWidth 0*GeV
set /Herwig/Particles/tau+:HardProcessWidth 0*GeV
set /Herwig/Particles/tau-:HardProcessWidth 0*GeV
################################################################################
# Setup amplitudes
################################################################################
cd /Herwig/MatrixElements/Matchbox
mkdir Amplitudes
cd Amplitudes
create ColorFull::TraceBasis TraceBasis
create Herwig::MatchboxHybridAmplitude GenericProcesses
@LOAD_MADGRAPH@ HwMatchboxMadGraph.so
@CREATE_MADGRAPH@ Herwig::MadGraphAmplitude MadGraph
@SET_MADGRAPH@ MadGraph:ColourBasis TraceBasis
@LOAD_GOSAM@ HwMatchboxGoSam.so
@CREATE_GOSAM@ Herwig::GoSamAmplitude GoSam
@LOAD_NJET@ HwMatchboxNJet.so
@CREATE_NJET@ Herwig::NJetsAmplitude NJet
@DO_NJET@ NJet:Massless 5
@DO_NJET@ NJet:Massless -5
@LOAD_OPENLOOPS@ HwMatchboxOpenLoops.so
@CREATE_OPENLOOPS@ Herwig::OpenLoopsAmplitude OpenLoops
@LOAD_VBFNLO@ HwMatchboxVBFNLO.so
@CREATE_VBFNLO@ Herwig::VBFNLOAmplitude VBFNLO
mkdir Builtin
cd Builtin
create Herwig::SimpleColourBasis SimpleColourBasis
create Herwig::SimpleColourBasis2 SimpleColourBasis2
create Herwig::MatchboxAmplitudellbarqqbar Amplitudellbarqqbar
set Amplitudellbarqqbar:ColourBasis SimpleColourBasis
create Herwig::MatchboxAmplitudellbarqqbarg Amplitudellbarqqbarg
set Amplitudellbarqqbarg:ColourBasis SimpleColourBasis
create Herwig::MatchboxAmplitudellbarqqbargg Amplitudellbarqqbargg
set Amplitudellbarqqbargg:ColourBasis SimpleColourBasis
create Herwig::MatchboxAmplitudellbarqqbarqqbar Amplitudellbarqqbarqqbar
set Amplitudellbarqqbarqqbar:ColourBasis SimpleColourBasis
create Herwig::MatchboxAmplitudelnuqqbar Amplitudelnuqqbar
set Amplitudelnuqqbar:ColourBasis SimpleColourBasis
create Herwig::MatchboxAmplitudelnuqqbarg Amplitudelnuqqbarg
set Amplitudelnuqqbarg:ColourBasis SimpleColourBasis
create Herwig::MatchboxAmplitudelnuqqbargg Amplitudelnuqqbargg
set Amplitudelnuqqbargg:ColourBasis SimpleColourBasis
create Herwig::MatchboxAmplitudelnuqqbarqqbar Amplitudelnuqqbarqqbar
set Amplitudelnuqqbarqqbar:ColourBasis SimpleColourBasis
create Herwig::MatchboxAmplitudehgg Amplitudehgg
set Amplitudehgg:ColourBasis SimpleColourBasis
create Herwig::MatchboxAmplitudehggg Amplitudehggg
set Amplitudehggg:ColourBasis SimpleColourBasis
create Herwig::MatchboxAmplitudehqqbarg Amplitudehqqbarg
set Amplitudehqqbarg:ColourBasis SimpleColourBasis
create Herwig::MatchboxAmplitudeqqbarttbar Amplitudeqqbarttbar
set Amplitudeqqbarttbar:ColourBasis SimpleColourBasis2
create Herwig::MatchboxAmplitudeqqbarttbarg Amplitudeqqbarttbarg
set Amplitudeqqbarttbarg:ColourBasis SimpleColourBasis2
create Herwig::MatchboxAmplitudeggttbar Amplitudeggttbar
set Amplitudeggttbar:ColourBasis SimpleColourBasis2
create Herwig::MatchboxAmplitudeggttbarg Amplitudeggttbarg
set Amplitudeggttbarg:ColourBasis SimpleColourBasis2
insert /Herwig/MatrixElements/Matchbox/Factory:Amplitudes 0 Amplitudellbarqqbar
insert /Herwig/MatrixElements/Matchbox/Factory:Amplitudes 0 Amplitudellbarqqbarg
insert /Herwig/MatrixElements/Matchbox/Factory:Amplitudes 0 Amplitudellbarqqbargg
insert /Herwig/MatrixElements/Matchbox/Factory:Amplitudes 0 Amplitudellbarqqbarqqbar
insert /Herwig/MatrixElements/Matchbox/Factory:Amplitudes 0 Amplitudelnuqqbar
insert /Herwig/MatrixElements/Matchbox/Factory:Amplitudes 0 Amplitudelnuqqbarg
insert /Herwig/MatrixElements/Matchbox/Factory:Amplitudes 0 Amplitudelnuqqbargg
insert /Herwig/MatrixElements/Matchbox/Factory:Amplitudes 0 Amplitudelnuqqbarqqbar
insert /Herwig/MatrixElements/Matchbox/Factory:Amplitudes 0 Amplitudehgg
insert /Herwig/MatrixElements/Matchbox/Factory:Amplitudes 0 Amplitudehggg
insert /Herwig/MatrixElements/Matchbox/Factory:Amplitudes 0 Amplitudehqqbarg
insert /Herwig/MatrixElements/Matchbox/Factory:Amplitudes 0 Amplitudeqqbarttbar
insert /Herwig/MatrixElements/Matchbox/Factory:Amplitudes 0 Amplitudeqqbarttbarg
insert /Herwig/MatrixElements/Matchbox/Factory:Amplitudes 0 Amplitudeggttbar
insert /Herwig/MatrixElements/Matchbox/Factory:Amplitudes 0 Amplitudeggttbarg
################################################################################
# Setup phasespace generators
################################################################################
cd /Herwig/MatrixElements/Matchbox
mkdir Phasespace
cd Phasespace
create Herwig::PhasespaceCouplings PhasespaceCouplings
create Herwig::MatchboxRambo Rambo
set Rambo:CouplingData PhasespaceCouplings
create Herwig::FlatInvertiblePhasespace InvertiblePhasespace
set InvertiblePhasespace:CouplingData PhasespaceCouplings
create Herwig::FlatInvertibleLabframePhasespace InvertibleLabframePhasespace
set InvertibleLabframePhasespace:CouplingData PhasespaceCouplings
set InvertibleLabframePhasespace:LogSHat False
create Herwig::TreePhasespaceChannels TreePhasespaceChannels
create Herwig::TreePhasespace TreePhasespace
set TreePhasespace:ChannelMap TreePhasespaceChannels
set TreePhasespace:M0 0.0001*GeV
-set TreePhasespace:MC 0.000001*GeV
+set TreePhasespace:MC 0.00005*GeV
set TreePhasespace:CouplingData PhasespaceCouplings
do TreePhasespace:SetPhysicalCoupling 21 -1 1 0.059
do TreePhasespace:SetPhysicalCoupling 21 -2 2 0.059
do TreePhasespace:SetPhysicalCoupling 21 -3 3 0.059
do TreePhasespace:SetPhysicalCoupling 21 -4 4 0.059
do TreePhasespace:SetPhysicalCoupling 21 -5 5 0.059
do TreePhasespace:SetPhysicalCoupling 21 -6 6 0.059
do TreePhasespace:SetPhysicalCoupling 21 1 -1 0.059
do TreePhasespace:SetPhysicalCoupling 21 2 -2 0.059
do TreePhasespace:SetPhysicalCoupling 21 3 -3 0.059
do TreePhasespace:SetPhysicalCoupling 21 4 -4 0.059
do TreePhasespace:SetPhysicalCoupling 21 5 -5 0.059
do TreePhasespace:SetPhysicalCoupling 21 6 -6 0.059
do TreePhasespace:SetPhysicalCoupling 1 21 1 0.15733333333333333333
do TreePhasespace:SetPhysicalCoupling 2 21 2 0.15733333333333333333
do TreePhasespace:SetPhysicalCoupling 3 21 3 0.15733333333333333333
do TreePhasespace:SetPhysicalCoupling 4 21 4 0.15733333333333333333
do TreePhasespace:SetPhysicalCoupling 5 21 5 0.15733333333333333333
do TreePhasespace:SetPhysicalCoupling 6 21 6 0.15733333333333333333
do TreePhasespace:SetPhysicalCoupling -1 21 -1 0.15733333333333333333
do TreePhasespace:SetPhysicalCoupling -2 21 -2 0.15733333333333333333
do TreePhasespace:SetPhysicalCoupling -3 21 -3 0.15733333333333333333
do TreePhasespace:SetPhysicalCoupling -4 21 -4 0.15733333333333333333
do TreePhasespace:SetPhysicalCoupling -5 21 -5 0.15733333333333333333
do TreePhasespace:SetPhysicalCoupling -6 21 -6 0.15733333333333333333
do TreePhasespace:SetPhysicalCoupling 1 1 21 0.15733333333333333333
do TreePhasespace:SetPhysicalCoupling 2 2 21 0.15733333333333333333
do TreePhasespace:SetPhysicalCoupling 3 3 21 0.15733333333333333333
do TreePhasespace:SetPhysicalCoupling 4 4 21 0.15733333333333333333
do TreePhasespace:SetPhysicalCoupling 5 5 21 0.15733333333333333333
do TreePhasespace:SetPhysicalCoupling 6 6 21 0.15733333333333333333
do TreePhasespace:SetPhysicalCoupling -1 -1 21 0.15733333333333333333
do TreePhasespace:SetPhysicalCoupling -2 -2 21 0.15733333333333333333
do TreePhasespace:SetPhysicalCoupling -3 -3 21 0.15733333333333333333
do TreePhasespace:SetPhysicalCoupling -4 -4 21 0.15733333333333333333
do TreePhasespace:SetPhysicalCoupling -5 -5 21 0.15733333333333333333
do TreePhasespace:SetPhysicalCoupling -6 -6 21 0.15733333333333333333
do TreePhasespace:SetCoupling 25 -1 1 0
do TreePhasespace:SetCoupling 25 -2 2 0
do TreePhasespace:SetCoupling 25 -3 3 0.00000001184279069851
do TreePhasespace:SetCoupling 25 -4 4 0.00000205034465001885
do TreePhasespace:SetCoupling 25 -5 5 0.00002314757096085280
do TreePhasespace:SetCoupling 25 -6 6 0.03982017320025470767
do TreePhasespace:SetCoupling 25 -11 11 0.00000000000034264835
do TreePhasespace:SetCoupling 25 -12 12 0
do TreePhasespace:SetCoupling 25 -13 13 0.00000001464912263400
do TreePhasespace:SetCoupling 25 -14 14 0
do TreePhasespace:SetCoupling 25 -15 15 0.00000414359033108195
do TreePhasespace:SetCoupling 25 -16 16 0
do TreePhasespace:SetCoupling 22 -1 1 0.00083932358497608365
do TreePhasespace:SetCoupling 22 -2 2 0.00335729433990433461
do TreePhasespace:SetCoupling 22 -3 3 0.00083932358497608365
do TreePhasespace:SetCoupling 22 -4 4 0.00335729433990433461
do TreePhasespace:SetCoupling 22 -5 5 0.00083932358497608365
do TreePhasespace:SetCoupling 22 -6 6 0.00335729433990433461
do TreePhasespace:SetCoupling 22 -11 11 0.00755391226478475287
do TreePhasespace:SetCoupling 22 -13 13 0.00755391226478475287
do TreePhasespace:SetCoupling 22 -15 15 0.00755391226478475287
do TreePhasespace:SetCoupling 24 -2 1 0.01652748072644379386
do TreePhasespace:SetCoupling 24 -4 1 0.00382028458188709739
do TreePhasespace:SetCoupling 24 -6 1 0.00014707756360995175
do TreePhasespace:SetCoupling 24 -2 3 0.00382265953677814621
do TreePhasespace:SetCoupling 24 -4 3 0.01651340063673257587
do TreePhasespace:SetCoupling 24 -6 3 0.00068534412570265868
do TreePhasespace:SetCoupling 24 -2 5 0.00005954351191129535
do TreePhasespace:SetCoupling 24 -4 5 0.00069891529650865192
do TreePhasespace:SetCoupling 24 -6 5 0.01694947628265615369
do TreePhasespace:SetCoupling 24 -12 11 0.01696396350749155147
do TreePhasespace:SetCoupling 24 -14 13 0.01696396350749155147
do TreePhasespace:SetCoupling 24 -16 15 0.01696396350749155147
do TreePhasespace:SetCoupling -24 2 -1 0.01652748072644379386
do TreePhasespace:SetCoupling -24 4 -1 0.00382028458188709739
do TreePhasespace:SetCoupling -24 6 -1 0.00014707756360995175
do TreePhasespace:SetCoupling -24 2 -3 0.00382265953677814621
do TreePhasespace:SetCoupling -24 4 -3 0.01651340063673257587
do TreePhasespace:SetCoupling -24 6 -3 0.00068534412570265868
do TreePhasespace:SetCoupling -24 2 -5 0.00005954351191129535
do TreePhasespace:SetCoupling -24 4 -5 0.00069891529650865192
do TreePhasespace:SetCoupling -24 6 -5 0.01694947628265615369
do TreePhasespace:SetCoupling -24 12 -11 0.01696396350749155147
do TreePhasespace:SetCoupling -24 14 -13 0.01696396350749155147
do TreePhasespace:SetCoupling -24 16 -15 0.01696396350749155147
do TreePhasespace:SetCoupling 23 -1 1 0.00407649129960709158
do TreePhasespace:SetCoupling 23 -2 2 0.00317809816318353030
do TreePhasespace:SetCoupling 23 -3 3 0.00407649129960709158
do TreePhasespace:SetCoupling 23 -4 4 0.00317809816318353030
do TreePhasespace:SetCoupling 23 -5 5 0.00407649129960709158
do TreePhasespace:SetCoupling 23 -6 6 0.00317809816318353030
do TreePhasespace:SetCoupling 23 -11 11 0.00276049468148072129
do TreePhasespace:SetCoupling 23 -12 12 0.00545567409075140513
do TreePhasespace:SetCoupling 23 -13 13 0.00276049468148072129
do TreePhasespace:SetCoupling 23 -14 14 0.00545567409075140513
do TreePhasespace:SetCoupling 23 -15 15 0.00276049468148072129
do TreePhasespace:SetCoupling 23 -16 16 0.00545567409075140513
do TreePhasespace:SetCoupling 21 21 21 0.354
do TreePhasespace:SetCoupling 25 21 21 0.00000000016160437564
do TreePhasespace:SetCoupling 25 25 25 0.18719783125611995353
do TreePhasespace:SetCoupling 25 22 22 0.00000000006295673620
do TreePhasespace:SetCoupling 25 24 -24 219.30463760755686425818
do TreePhasespace:SetCoupling 25 23 23 362.91922658249853887524
do TreePhasespace:SetCoupling 22 24 -24 0.00755391226478475287
do TreePhasespace:SetCoupling 23 24 -24 0.02637401475019835008
@CREATE_VBFNLO@ Herwig::VBFNLOPhasespace VBFNLOPhasespace
@SET_VBFNLO@ VBFNLOPhasespace:CouplingData PhasespaceCouplings
set /Herwig/MatrixElements/Matchbox/Factory:Phasespace TreePhasespace
################################################################################
# Setup utilities for matching
################################################################################
cd /Herwig/MatrixElements/Matchbox
create Herwig::HardScaleProfile HardScaleProfile
create Herwig::MEMatching MEMatching
set MEMatching:RestrictPhasespace On
set MEMatching:HardScaleProfile /Herwig/MatrixElements/Matchbox/HardScaleProfile
set MEMatching:BornScaleInSubtraction BornScale
set MEMatching:RealEmissionScaleInSubtraction RealScale
set MEMatching:EmissionScaleInSubtraction RealScale
set MEMatching:BornScaleInSplitting ShowerScale
set MEMatching:RealEmissionScaleInSplitting ShowerScale
set MEMatching:EmissionScaleInSplitting ShowerScale
set MEMatching:TruncatedShower Yes
set MEMatching:MaxPtIsMuF Yes
set MEMatching:FFPtCut 1.0*GeV
set MEMatching:FIPtCut 1.0*GeV
set MEMatching:IIPtCut 1.0*GeV
set MEMatching:SafeCut 0.*GeV
create Herwig::ShowerApproximationGenerator MECorrectionHandler
set MECorrectionHandler:ShowerApproximation MEMatching
set MECorrectionHandler:Phasespace /Herwig/MatrixElements/Matchbox/Phasespace/InvertiblePhasespace
set MECorrectionHandler:PresamplingPoints 50000
set MECorrectionHandler:FreezeGrid 100000
create Herwig::DipoleMatching DipoleMatching HwDipoleMatching.so
# set in DipoleShowerDefaults.in as not available at this point
# set DipoleMatching:ShowerHandler /Herwig/DipoleShower/DipoleShowerHandler
set DipoleMatching:BornScaleInSubtraction BornScale
set DipoleMatching:RealEmissionScaleInSubtraction BornScale
set DipoleMatching:EmissionScaleInSubtraction BornScale
set DipoleMatching:FFPtCut 1.0*GeV
set DipoleMatching:FIPtCut 1.0*GeV
set DipoleMatching:IIPtCut 1.0*GeV
set DipoleMatching:SafeCut 4.*GeV
create Herwig::QTildeMatching QTildeMatching HwQTildeMatching.so
set QTildeMatching:ShowerHandler /Herwig/Shower/ShowerHandler
set QTildeMatching:BornScaleInSubtraction BornScale
set QTildeMatching:RealEmissionScaleInSubtraction BornScale
set QTildeMatching:EmissionScaleInSubtraction BornScale
set QTildeMatching:QTildeFinder /Herwig/Shower/PartnerFinder
set QTildeMatching:SafeCut 4.*GeV
# just a dummy, since SudakovCommonn can't be used
# it's only used to get the value of the kinCutoffScale
set QTildeMatching:QTildeSudakov /Herwig/Shower/QtoQGSudakov
################################################################################
# Setup utilities for process generation
################################################################################
cd /Herwig/MatrixElements/Matchbox
mkdir Utility
cd Utility
create Herwig::Tree2toNGenerator DiagramGenerator
insert DiagramGenerator:Vertices 0 /Herwig/Vertices/FFGVertex
insert DiagramGenerator:Vertices 0 /Herwig/Vertices/GGGVertex
insert DiagramGenerator:Vertices 0 /Herwig/Vertices/FFPVertex
insert DiagramGenerator:Vertices 0 /Herwig/Vertices/FFZVertex
cp /Herwig/Vertices/FFWVertex /Herwig/Vertices/FFWMatchboxVertex
insert DiagramGenerator:Vertices 0 /Herwig/Vertices/FFWMatchboxVertex
insert DiagramGenerator:Vertices 0 /Herwig/Vertices/WWHVertex
insert DiagramGenerator:Vertices 0 /Herwig/Vertices/WWWVertex
insert DiagramGenerator:Vertices 0 /Herwig/Vertices/HGGVertex
insert DiagramGenerator:Vertices 0 /Herwig/Vertices/HHHVertex
cp /Herwig/Vertices/FFHVertex /Herwig/Vertices/TTHVertex
set /Herwig/Vertices/TTHVertex:Fermion 6
insert DiagramGenerator:Vertices 0 /Herwig/Vertices/TTHVertex
cp /Herwig/Vertices/FFHVertex /Herwig/Vertices/BBHVertex
set /Herwig/Vertices/BBHVertex:Fermion 5
create Herwig::ProcessData ProcessData
set /Herwig/MatrixElements/Matchbox/Factory:DiagramGenerator DiagramGenerator
set /Herwig/MatrixElements/Matchbox/Factory:ProcessData ProcessData
################################################################################
# Setup jet cuts
################################################################################
cd /Herwig/Cuts
create Herwig::MatchboxFactoryMatcher MatchboxJetMatcher
set MatchboxJetMatcher:Group j
create ThePEG::FastJetFinder JetFinder
set JetFinder:UnresolvedMatcher MatchboxJetMatcher
set JetFinder:Variant AntiKt
set JetFinder:RecombinationScheme E
set JetFinder:Mode Inclusive
set JetFinder:ConeRadius 0.7
create ThePEG::JetRegion FirstJet
set FirstJet:PtMin 20.*GeV
do FirstJet:YRange -5.0 5.0
set FirstJet:Fuzzy Yes
set FirstJet:EnergyCutWidth 4.0*GeV
set FirstJet:RapidityCutWidth 0.4
insert FirstJet:Accepts[0] 1
create ThePEG::JetRegion SecondJet
set SecondJet:PtMin 20.*GeV
do SecondJet:YRange -5.0 5.0
set SecondJet:Fuzzy Yes
set SecondJet:EnergyCutWidth 4.0*GeV
set SecondJet:RapidityCutWidth 0.4
insert SecondJet:Accepts[0] 2
create ThePEG::JetRegion ThirdJet
set ThirdJet:PtMin 20.*GeV
do ThirdJet:YRange -5.0 5.0
set ThirdJet:Fuzzy Yes
set ThirdJet:EnergyCutWidth 4.0*GeV
set ThirdJet:RapidityCutWidth 0.4
insert ThirdJet:Accepts[0] 3
create ThePEG::JetRegion FourthJet
set FourthJet:PtMin 20.*GeV
do FourthJet:YRange -5.0 5.0
set FourthJet:Fuzzy Yes
set FourthJet:EnergyCutWidth 4.0*GeV
set FourthJet:RapidityCutWidth 0.4
insert FourthJet:Accepts[0] 4
create ThePEG::FuzzyTheta FuzzyTheta
set FuzzyTheta:EnergyWidth 4.0*GeV
set FuzzyTheta:RapidityWidth 0.4
set FuzzyTheta:AngularWidth 0.4
create ThePEG::NJetsCut NJetsCut
set NJetsCut:UnresolvedMatcher MatchboxJetMatcher
set NJetsCut:NJetsMin 2
create ThePEG::JetCuts JetCuts
set JetCuts:UnresolvedMatcher MatchboxJetMatcher
set JetCuts:Ordering OrderPt
create Herwig::IdentifiedParticleCut IdentifiedParticleCut
cp IdentifiedParticleCut LeptonCut
set LeptonCut:Matcher /Herwig/Matchers/Lepton
cp IdentifiedParticleCut ChargedLeptonCut
set ChargedLeptonCut:Matcher /Herwig/Matchers/ChargedLepton
cp IdentifiedParticleCut BottomQuarkCut
set BottomQuarkCut:Matcher /Herwig/Matchers/Bottom
cp IdentifiedParticleCut TopQuarkCut
set TopQuarkCut:Matcher /Herwig/Matchers/Top
cp IdentifiedParticleCut WBosonCut
set WBosonCut:Matcher /Herwig/Matchers/WBoson
cp IdentifiedParticleCut ZBosonCut
set ZBosonCut:Matcher /Herwig/Matchers/ZBoson
cp IdentifiedParticleCut HiggsBosonCut
set HiggsBosonCut:Matcher /Herwig/Matchers/HiggsBoson
cp IdentifiedParticleCut PhotonCut
set PhotonCut:Matcher /Herwig/Matchers/Photon
create Herwig::FrixionePhotonSeparationCut PhotonIsolationCut
set PhotonIsolationCut:UnresolvedMatcher MatchboxJetMatcher
create Herwig::MatchboxDeltaRCut MatchboxDeltaRCut
cp MatchboxDeltaRCut LeptonDeltaRCut
set LeptonDeltaRCut:FirstMatcher /Herwig/Matchers/Lepton
set LeptonDeltaRCut:SecondMatcher /Herwig/Matchers/Lepton
cp MatchboxDeltaRCut ChargedLeptonDeltaRCut
set ChargedLeptonDeltaRCut:FirstMatcher /Herwig/Matchers/ChargedLepton
set ChargedLeptonDeltaRCut:SecondMatcher /Herwig/Matchers/ChargedLepton
create Herwig::InvariantMassCut InvariantMassCut
cp InvariantMassCut LeptonPairMassCut
set LeptonPairMassCut:FirstMatcher /Herwig/Matchers/Lepton
set LeptonPairMassCut:SecondMatcher /Herwig/Matchers/Lepton
cp InvariantMassCut ChargedLeptonPairMassCut
set ChargedLeptonPairMassCut:FirstMatcher /Herwig/Matchers/ChargedLepton
set ChargedLeptonPairMassCut:SecondMatcher /Herwig/Matchers/ChargedLepton
create Herwig::MissingPtCut MissingPtCut
set MissingPtCut:Matcher /Herwig/Matchers/Neutrino
################################################################################
# Setup scale choices
################################################################################
cd /Herwig/MatrixElements/Matchbox
mkdir Scales
cd Scales
create Herwig::MatchboxScaleChoice SHatScale
cp SHatScale FixedScale
set FixedScale:FixedScale 100.*GeV
create Herwig::MatchboxPtScale MaxJetPtScale
set MaxJetPtScale:JetFinder /Herwig/Cuts/JetFinder
create Herwig::MatchboxLeptonMassScale LeptonPairMassScale
create Herwig::MatchboxLeptonPtScale LeptonPairPtScale
create Herwig::MatchboxHtScale HTScale
create Herwig::MatchboxTopMassScale TopPairMassScale
create Herwig::MatchboxTopMTScale TopPairMTScale
set HTScale:JetFinder /Herwig/Cuts/JetFinder
set HTScale:IncludeMT No
set HTScale:JetPtCut 15.*GeV
cp HTScale HTPrimeScale
set HTPrimeScale:IncludeMT Yes
set HTPrimeScale:JetPtCut 15.*GeV
cp LeptonPairMassScale LeptonQ2Scale
set /Herwig/MatrixElements/Matchbox/Factory:ScaleChoice LeptonPairMassScale
################################################################################
# Factories for different colliders
# only provided for backwards compatibility; refer to Matchbox/*.in input file
# snippets for generic handling
################################################################################
cd /Herwig/MatrixElements/Matchbox
cp Factory EEFactory
set EEFactory:PartonExtractor /Herwig/Partons/EEExtractor
set EEFactory:Cuts /Herwig/Cuts/EECuts
set EEFactory:FirstPerturbativePDF No
set EEFactory:SecondPerturbativePDF No
cp Factory DISFactory
set DISFactory:PartonExtractor /Herwig/Partons/DISExtractor
set DISFactory:Cuts /Herwig/Cuts/DISCuts
set DISFactory:FirstPerturbativePDF No
set DISFactory:SecondPerturbativePDF Yes
cp Factory PPFactory
set PPFactory:PartonExtractor /Herwig/Partons/QCDExtractor
set PPFactory:Cuts /Herwig/Cuts/QCDCuts
set PPFactory:FirstPerturbativePDF Yes
set PPFactory:SecondPerturbativePDF Yes
cd /

File Metadata

Mime Type
text/x-diff
Expires
Sat, Dec 21, 3:37 PM (1 d, 6 h)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
4023313
Default Alt Text
(535 KB)

Event Timeline