diff --git a/Shower/Dipole/Merging/Merger.cc b/Shower/Dipole/Merging/Merger.cc --- a/Shower/Dipole/Merging/Merger.cc +++ b/Shower/Dipole/Merging/Merger.cc @@ -1,1547 +1,1548 @@ // -*- C++ -*- // // Merger.cc is a part of Herwig - A multi-purpose Monte Carlo event generator // Copyright ( C ) 2002-2007 The Herwig Collaboration // // Herwig is licenced under version 2 of the GPL , see COPYING for details. // Please respect the MCnet academic guidelines , see GUIDELINES for details. // // // This is the implementation of the non-inlined , non-templated member // functions of the Merger class. // #include "Merger.h" #include "Node.h" #include "MergingFactory.h" // other includes when needed below. using namespace Herwig; Merger::Merger() : MergerBase() , Unlopsweights( true ) , theCMWScheme( true ) , isUnitarized( true ) , isNLOUnitarized( true ) , defMERegionByJetAlg( false ) , theOpenInitialStateZ( false ) , theChooseHistory( 0 ) , theN0( 0 ) , theOnlyN( -1 ) , theCurrentMaxLegs( -1 ) , theGamma( 1. ) , ee_ycut( -1 ) , pp_dcut( -1 ) , theSmearing( 0. ) , theIRSafePT( 1.*GeV ) , theMergePt( 4.*GeV ) , theCentralMergePt( 4.*GeV ) , theMergingJetFinder() , theLargeNBasis() , theDipoleShowerHandler() , theTreeFactory() { FFLTK = new_ptr( FFLightTildeKinematics() ); FILTK = new_ptr( FILightTildeKinematics() ); IFLTK = new_ptr( IFLightTildeKinematics() ); IILTK = new_ptr( IILightTildeKinematics() ); FFMTK = new_ptr( FFMassiveTildeKinematics() ); FIMTK = new_ptr( FIMassiveTildeKinematics() ); IFMTK = new_ptr( IFMassiveTildeKinematics() ); } Merger::~Merger() {} IBPtr Merger::clone() const { return new_ptr( *this ); } IBPtr Merger::fullclone() const { return new_ptr( *this ); } pair<PVector , PVector> getInOut( NodePtr Node ){ PVector incoming; const auto me=Node->nodeME(); for( auto const & i : {0 , 1} ) incoming.push_back( me->mePartonData()[i]->produceParticle( me->lastMEMomenta()[i] ) ); PVector outgoing; for ( size_t i = 2;i<Node->nodeME()->mePartonData().size();i++ ){ PPtr p = me->mePartonData()[i]->produceParticle( me->lastMEMomenta()[i] ); outgoing.push_back( p ); } return make_pair( incoming , outgoing ); } CrossSection Merger::MergingDSigDRBornGamma( NodePtr Node ){ double weightCL = 0.; weight = 1.; if ( !Node->children().empty() ) { auto const inoutpair = getInOut( Node ); // Here we make sure that clustering and splitting are invertible. NodePtr rndCh = Node->randomChild(); // Check if point is part of the ME region. if( !matrixElementRegion( inoutpair.first , inoutpair.second , rndCh->pT() , theMergePt ) )weight *= 0.; } NodePtr Born = Node-> getHistory( true , DSH()->hardScaleFactor() ); NodePtr CLNode; NodePtr BornCL; if( !Node->children().empty() ){ if ( UseRandom::rndbool() ){ CLNode = Node->randomChild(); bool inhist = CLNode->isInHistoryOf( Born ); weight *= inhist?( -2.*int( Node->children().size() ) ):0.; projected = true; weightCL = 2.*int( Node->children().size() ); BornCL = CLNode-> getHistory( false , DSH()->hardScaleFactor() ); }else{ weight = 2.; projected = false; } }else{ weight = 1.; projected = false; } if ( treefactory()->onlymulti() != -1&& treefactory()->onlymulti() != int( Node->legsize()-(projected ? 1 : 0) ) ) return ZERO; if( !Node->allAbove( mergePt()*(1.-1e-6) ) )weight = 0.; if( CLNode&&!CLNode->allAbove( mergePt()*(1.-1e-6) ) )weightCL = 0.; if ( !Born->xcomb()->willPassCuts() ){ return ZERO; } CrossSection res = ZERO; bool maxMulti = Node->legsize() == int( maxLegsLO() ); if( weight != 0. ){ Energy startscale = CKKW_StartScale( Born ); fillHistory( startscale , Born , Node ); Node->runningPt( fillProjector( (projected ? 1 : 0) ) ); weight *= history.back().weight*alphaReweight()*pdfReweight(); if( weight == 0.&&weightCL == 0. )return ZERO; res += weight*TreedSigDR( startscale , Node , ( !maxMulti&&!projected )?theGamma:1. ); } if( CLNode&&theGamma != 1. ){ Energy startscale = CKKW_StartScale( BornCL ); fillHistory( startscale , BornCL , CLNode ); Node->runningPt( fillProjector( projected ? 1 : 0 ) ); weightCL *= history.back().weight*alphaReweight()*pdfReweight(); CrossSection diff = ZERO; Node->nodeME()->factory()->setAlphaParameter( 1. ); diff -= weightCL*CLNode->dipole()->dSigHatDR( sqr( startscale*theCurrentME->renFac() ) ); Node->nodeME()->factory()->setAlphaParameter( theGamma ); string alp = ( CLNode->dipole()->aboveAlpha()?"Above":"Below" ); diff += weightCL*CLNode->dipole()->dSigHatDR( sqr( startscale*theCurrentME->renFac() ) ); Node->nodeME()->factory()->setAlphaParameter( 1. ); res += diff; } return res; } double decideClustering(const NodePtr sub,const NodePtr head,bool& pro){ if( sub != head ){ // at least one history step -> unitarisation if ( UseRandom::rndbool() ){ pro = true; return -2.; }else{ pro = false; return 2.; } }else{ // no ordered history -> no projection pro = false; return 1.; } } CrossSection Merger::MergingDSigDRBornStandard( NodePtr Node ){ // Check if phase space poing is in ME region if ( !Node->children().empty() ) { auto const & inoutpair = getInOut( Node ); NodePtr rndCh = Node->randomChild(); // Here we make sure that clustering and splitting are invertible if( !matrixElementRegion( inoutpair.first , inoutpair.second , rndCh->pT() , theMergePt ) )return ZERO; } // get the history for the process NodePtr Born = Node-> getHistory( true , DSH()->hardScaleFactor() ); // decide if to cluster weight = decideClustering(Born,Node,projected); if ( treefactory()->onlymulti() != -1&& treefactory()->onlymulti() != int( Node->legsize()-( projected ? 1 : 0 ) ) ) return ZERO; // Check for cuts on the production proces. if ( !Born->xcomb()->willPassCuts() )return ZERO; // calculate the staring scale Energy startscale = CKKW_StartScale( Born ); // fill history with caluclation of sudakov supression fillHistory( startscale , Born , Node ); // fill the projector -> return the scale of the last splitting Node->runningPt( fillProjector( projected ? 1 : 0 ) ); // the weight has three components to get shower history weight weight *= history.back().weight*alphaReweight()*pdfReweight(); if( weight == 0. )return ZERO; //calculate the cross section return weight*TreedSigDR( startscale , Node , 1. ); } /// calculate the history weighted born cross section #include "ThePEG/Handlers/SamplerBase.h" CrossSection Merger::MergingDSigDRBornCheapME( NodePtr Node ){ // Check if phase space poing is in ME region if ( !Node->children().empty() ) { auto const & inoutpair = getInOut( Node ); NodePtr rndCh = Node->randomChild(); // Here we make sure that clustering and splitting are invertible if( !matrixElementRegion( inoutpair.first , inoutpair.second , rndCh->pT() , theMergePt ) )return ZERO; } CrossSection meweight=(SamplerBase::runLevel() == SamplerBase::RunMode)?TreedSigDR( 60.*GeV , Node , 1. ):1.*nanobarn; if (SamplerBase::runLevel() == SamplerBase::RunMode && theHighMeWeightMap.count(Node) ) { if (abs(meweight)>abs(theHighMeWeightMap[Node])) { theHighMeWeightMap[Node]=abs(meweight); }else{ if (meweight/theHighMeWeightMap[Node]<UseRandom::rnd()) { return ZERO; } } }else{ theHighMeWeightMap.insert(make_pair(Node,abs(meweight))); } // get the history for the process NodePtr Born = Node-> getHistory( true , DSH()->hardScaleFactor() ); // decide if to cluster weight = decideClustering(Born,Node,projected); if ( treefactory()->onlymulti() != -1&& treefactory()->onlymulti() != int( Node->legsize()-( projected ? 1 : 0 ) ) ) return ZERO; // Check for cuts on the production proces. if ( !Born->xcomb()->willPassCuts() )return ZERO; // calculate the staring scale Energy startscale = CKKW_StartScale( Born ); // fill history with caluclation of sudakov supression fillHistory( startscale , Born , Node ); // fill the projector -> return the scale of the last splitting Node->runningPt( fillProjector( projected ? 1 : 0 ) ); // the weight has three components to get shower history weight weight *= history.back().weight*alphaReweight()*pdfReweight(); if( weight == 0. )return ZERO; //calculate the cross section return weight*TreedSigDR( startscale , Node , 1. )*theHighMeWeightMap[Node]/meweight; } CrossSection Merger::MergingDSigDRVirtualStandard( NodePtr Node ){ // Check if phase space poing is in ME region if ( !Node->children().empty() ) { auto inoutpair = getInOut( Node ); NodePtr rndCh = Node->randomChild(); // Here we make sure that clustering and splitting are invertible if( !matrixElementRegion( inoutpair.first , inoutpair.second , rndCh->pT() , theMergePt ) )return ZERO; } NodePtr Born = Node-> getHistory( true , DSH()->hardScaleFactor() ); weight = decideClustering(Born,Node,projected); if ( !Born->xcomb()->willPassCuts() )return ZERO; Energy startscale = CKKW_StartScale( Born ); fillHistory( startscale , Born , Node ); Node->runningPt( fillProjector( projected ? 1 : 0 ) ); double ww1 = history.back().weight; double ww2 = alphaReweight(); double ww3 = pdfReweight(); weight *= ww1*ww2*ww3; if( weight == 0. )return ZERO; CrossSection matrixElement = LoopdSigDR( startscale , Node ); CrossSection Bornweight = Node->nodeME()->dSigHatDRB(); double w1 = -sumpdfReweightUnlops(); double w2 = -sumalphaReweightUnlops(); double w3 = -sumfillHistoryUnlops(); CrossSection unlopsweight = ( w1+w2+w3 ) *Bornweight *SM().alphaS()/( 2.*ThePEG::Constants::pi ); if ( Node->legsize() == 5&&Debug::level > 2 ) { Energy minPT = Constants::MaxEnergy; for( auto const & no :Node->children() )minPT = min( no->pT() , minPT ); generator()->log() << "\nVIRT " << minPT/GeV << " " << weight << " " << w1; generator()->log() << " " << w2; generator()->log() << " " << w3; generator()->log() << " " << ( matrixElement/nanobarn ) << " " << ww1 << " " << ww2 << " " << ww3 << " " << Born->pT()/GeV << " " << Born->nodeME()->mePartonData()[3]->mass()/GeV << " " << ( Bornweight*SM().alphaS()/( 2.*ThePEG::Constants::pi )/nanobarn ); } return weight* as( startscale*DSH()->renFac() )/SM().alphaS()* ( matrixElement+unlopsweight ); } CrossSection Merger::MergingDSigDRRealStandard( NodePtr Node ){ bool allAbove = Node->allAbove( mergePt() ); //TODO: VW Abgas Skandal if( !Node->allAbove( ( Debug::level > 2?0.01:1. )*theIRSafePT ) )return ZERO; if ( allAbove )return MergingDSigDRRealAllAbove( Node ); if ( UseRandom::rndbool() ) return 2.*MergingDSigDRRealBelowSubReal( Node ); return 2.*MergingDSigDRRealBelowSubInt( Node ); } CrossSection Merger::MergingDSigDRRealAllAbove( NodePtr Node ){ if ( Node->children().empty() ) { throw Exception() << "Real emission contribution without underlying born." << "These are finite contibutions already handled in LO merging." << Exception::abortnow; } //If all dipoles pts are above , we cluster to this dipole. NodePtr CLNode = Node->randomChild(); // Check if phase space poing is in ME region--> else rm PSP if ( !CLNode->children().empty() ) { auto inoutpair = getInOut( CLNode ); NodePtr rndCh = CLNode->randomChild(); // Here we make sure that clustering and splitting are invertible if( !matrixElementRegion( inoutpair.first , inoutpair.second , rndCh->pT() , theMergePt ) )return ZERO; } // first find the history for the acctual Node NodePtr Born = Node-> getHistory( true , DSH()->hardScaleFactor() ); // check if the randomly choosen CLNode is part of the history. // If CLNode is not part of the history , dont calculate the Real contribution // else multiply the real contribution with N ( number of children ). // this returns the sudakov suppression according to the clustering of the born parts. bool inhist = CLNode->isInHistoryOf( Born ); // get the history for the clustered Node. Born = CLNode-> getHistory( false , DSH()->hardScaleFactor() ); weight = decideClustering(Born,CLNode,projected); if ( !CLNode->allAbove( mergePt() ) )return ZERO; if ( !Born->xcomb()->willPassCuts() )return ZERO; Energy startscale = CKKW_StartScale( Born ); fillHistory( startscale , Born , CLNode ); Node->runningPt( fillProjector( projected ? 2 : 1 ) ); weight *= history.back().weight*alphaReweight()*pdfReweight(); if( weight == 0. )return ZERO; CrossSection me = ( inhist?TreedSigDR( startscale , Node ):ZERO ); CrossSection dip = CLNode->calcDip( startscale*theCurrentME->renFac() ); CrossSection res = weight*as( startscale*DSH()->renFac() )/SM().alphaS()* ( double )Node->children().size()*( me -dip ); if ( Node->legsize() == 6&&Debug::level > 2 ) { Energy minPT = Constants::MaxEnergy; for( auto const & no :Node->children() )minPT = min( no->pT() , minPT ); generator()->log() << "\nRAA " << minPT/GeV << " " << weight << " " << ( me-dip )/nanobarn << " " << me/nanobarn << " " << dip/nanobarn; } return res; } CrossSection Merger::MergingDSigDRRealBelowSubReal( NodePtr Node ){ NodePtr HistNode = Node->randomChild(); if ( !HistNode->children().empty() ) { auto inoutpair = getInOut( HistNode ); NodePtr rndCh = HistNode->randomChild(); // Here we make sure that clustering and splitting are invertible if( !matrixElementRegion( inoutpair.first , inoutpair.second , rndCh->pT() , theMergePt ) )return ZERO; } NodePtr Born = HistNode-> getHistory( false , DSH()->hardScaleFactor() ); weight = decideClustering(Born,HistNode,projected); if ( !Born->xcomb()->willPassCuts() )return ZERO; Energy startscale = CKKW_StartScale( Born ); fillHistory( startscale , Born , HistNode ); Node->runningPt( fillProjector( projected ? 1 : 0 ) ); weight *= history.back().weight*alphaReweight()*pdfReweight(); if( weight == 0. )return ZERO; CrossSection sumPS = ZERO; for( auto const & child : Node->children() ){ if ( child->allAbove( mergePt() ) ){ if( ( child )->pT()>mergePt()/3. )//TODO: this is a dynamical cutoff( see below ) sumPS += child->calcPs( startscale*theCurrentME->renFac() ); else sumPS += child->calcDip( startscale*theCurrentME->renFac() ); }else{ assert( child->pT()>mergePt() ); } } CrossSection me = TreedSigDR( startscale , Node ); if ( Node->legsize() == 6&&Debug::level > 2 ) { Energy minPT = Constants::MaxEnergy; for( auto const & no :Node->children() )minPT = min( no->pT() , minPT ); generator()->log() << "\nRBSR " << minPT/GeV << " " << weight << " " << ( me-sumPS )/nanobarn << " " << me/nanobarn << " " << sumPS/nanobarn; } //Here we subtract the PS ( and below the dynamical cutoff the Dip ) return weight*as( startscale*DSH()->renFac() )/SM().alphaS()* ( me-sumPS ); } CrossSection Merger::MergingDSigDRRealBelowSubInt( NodePtr Node ){ if( Node->children().empty() )return ZERO; NodePtr CLNode = Node->randomChild(); if( CLNode->pT()<mergePt()/3. )return ZERO;//TODO: this is a dynamical cutoff( see above ) if ( !CLNode->children().empty() ) { auto inoutpair = getInOut( CLNode ); NodePtr rndCh = CLNode->randomChild(); // Here we make sure that clustering and splitting are invertible if( !matrixElementRegion( inoutpair.first , inoutpair.second , rndCh->pT() , theMergePt ) )return ZERO; } NodePtr Born = CLNode-> getHistory( false , DSH()->hardScaleFactor() ); weight = decideClustering(Born,CLNode,projected); if ( !CLNode->allAbove( mergePt() ) )return ZERO; if ( !Born->xcomb()->willPassCuts() )return ZERO; Energy startscale = CKKW_StartScale( Born ); fillHistory( startscale , Born , CLNode ); Node->runningPt( fillProjector( projected ? 2 : 1 ) ); weight *= history.back().weight*alphaReweight()*pdfReweight(); if( weight == 0. )return ZERO; pair<CrossSection , CrossSection> DipAndPs = CLNode->calcDipandPS( startscale*theCurrentME->renFac() ); if ( Node->legsize() == 6&&Debug::level > 2 ) { Energy minPT = Constants::MaxEnergy; for( auto const & no :Node->children() )minPT = min( no->pT() , minPT ); generator()->log() << "\nRBSI " << CLNode->pT()/GeV << " " << weight << " " << ( DipAndPs.second-DipAndPs.first )/nanobarn << " " << DipAndPs.second/nanobarn << " " << DipAndPs.first/nanobarn; } //Here we add the PS and subtrac the Dip ( only above the dynamical cutoff ) return weight*as( startscale*DSH()->renFac() )/SM().alphaS()* ( double )Node->children().size()*( DipAndPs.second-DipAndPs.first ); } CrossSection Merger::TreedSigDR( Energy startscale , NodePtr Node , double diffAlpha ){ assert( Node->deepHead() == Node ); Node->nodeME()->setScale( sqr( startscale ) , sqr( startscale ) ); CrossSection res = Node->nodeME()->dSigHatDRB(); if ( diffAlpha != 1. ) { res += Node->nodeME()->dSigHatDRAlphaDiff( diffAlpha ); } if( std::isnan( double( res/nanobarn ) ) ){ generator()->logWarning(Exception() << "Merger: TreedSigDR is nan" << Exception::warning); res = ZERO;}; return res; } CrossSection Merger::LoopdSigDR( Energy startscale , NodePtr Node ){ // The deephead should be calculated here. assert( Node->deepHead() == Node ); //Node->nodeME()->setXComb( Node->xcomb() ); Node->nodeME()->setScale( sqr( startscale ) , sqr( startscale ) ); Node->nodeME()->doOneLoopNoBorn(); CrossSection res = Node->nodeME()->dSigHatDRV()+Node->nodeME()->dSigHatDRI(); Node->nodeME()->noOneLoopNoBorn(); return res; } Energy Merger::fillProjector( int pjs ){ // in the shower handler the scale is multiplied by DSH()->hardScaleFactor() // so here we need to devide this double xiQSh = history.begin()->node->legsize() == N0()?DSH()->hardScaleFactor():1.; if( pjs == 0 ){ return ( history.size() == 1?1.:( 1./xiQSh ) )*history.back().scale; } for( auto const & hs : history ) if ( isProjectorStage( hs.node , pjs )&&pjs != 0 ){ history.begin()->node->deepHead()->xcomb()->lastProjector( hs.node->xcomb() ); return ( hs.node == history[0].node?1.:( 1./xiQSh ) )*hs.scale; } throw Exception() << "Could not fill projector." << Exception::abortnow; return ZERO; } double Merger::pdfReweight(){ double res = 1.; double facfactor = ( history[0].node->legsize() == N0()?theCurrentME->renFac():DSH()->facFac() ); for( int side : {0 , 1} ){ if( history[0].node->xcomb()->mePartonData()[side]->coloured() ){ for ( auto const & hs : history ){ //pdf-ratio only to the last step if( !hs.node->parent() )continue; if ( hs.node == history.back().node )continue; if( !hs.node->dipole() ){ throw Exception() << "\nMerger: pdfReweight: history step has no dipol. "<< Exception::abortnow; return 0.; } res *= pdfratio( hs.node , facfactor*hs.scale , DSH()->facFac()*( hs.node->pT() ) , side ); facfactor = DSH()->facFac(); } res *= pdfratio( history.back().node , facfactor*history.back().scale , history[0].scale*theCurrentME->renFac() , side ); } } if ( std::isnan( res ) ) generator()->logWarning(Exception() << "Merger: pdfReweight is nan." << Exception::warning); return res; } double Merger::alphaReweight(){ double res = 1.; Energy Q_R = ( history[0].node->legsize() == N0()? theCurrentME->renFac(): DSH()->renFac() )* history[0].scale; const auto pi=Constants::pi; const auto Q_qed=history[0].node->nodeME()->factory()->scaleChoice()->renormalizationScaleQED(); const auto Oew=history[0].node->nodeME()->orderInAlphaEW(); const auto Oqcd=history[0].node->nodeME()->orderInAlphaS(); const auto glfac=3.*( 67./18.-1./6.*pi*pi ); res *= pow( as( Q_R ) / SM().alphaS() , Oqcd ); res *= pow( SM().alphaEMME( Q_qed )/ SM().alphaEMMZ() , Oew ); if ( !( history[0].node->children().empty() ) ){ res *= pow( ( theCMWScheme?( 1.+ ( glfac-5./9.*Nf( Q_R ) )*as( Q_R )/2./pi ):1. ) , int( history[0].node->legsize()-N0() ) ); } for ( auto const & hs : history ) if ( hs.node!= history.back().node ){ Energy q_i = DSH()->renFac()* hs.node->pT(); res *= as( q_i )/ SM().alphaS() *( theCMWScheme?( 1.+ ( glfac-5./9.*Nf( q_i ) )*as( q_i )/2./pi ):1. ); } if ( std::isnan( res ) ) generator()->logWarning(Exception() << "Merger: alphaReweight is nan. "<< Exception::warning); return res; } void Merger::fillHistory( Energy scale , NodePtr Begin , NodePtr EndNode ){ history.clear(); double sudakov0_n = 1.; - history.push_back( HistoryStep( Begin , sudakov0_n , scale ) ); + + history.push_back( {Begin , sudakov0_n , scale} ); double xiQSh = history.begin()->node->legsize() == N0()?DSH()->hardScaleFactor():1.; scale *= xiQSh; if ( Begin->parent()||!isUnitarized ) { while ( Begin->parent() && ( Begin != EndNode ) ) { if ( !dosudakov( Begin , scale , Begin->pT() , sudakov0_n ) ){ - history.push_back( HistoryStep( Begin->parent() , 0. , scale ) ); + history.push_back( { Begin->parent() , 0. , scale } ); } if ( std::isnan( sudakov0_n ) ) generator()->logWarning(Exception() << "Merger: sudakov"<<scale/GeV<<" "<<Begin->pT()/GeV<<"0_n is nan. " << Exception::warning); scale = Begin->pT(); - history.push_back( HistoryStep( Begin->parent() , sudakov0_n , Begin->pT() ) ); + history.push_back( { Begin->parent() , sudakov0_n , Begin->pT() } ); Begin = Begin->parent(); } Energy notunirunning = scale; if ( !isUnitarized&&N()+N0() > int( Begin->deepHead()->legsize() ) ) { if ( !dosudakov( Begin , notunirunning , mergePt() , sudakov0_n ) ){ history.back().weight = 0.; }else{ history.back().weight = sudakov0_n; } } } if( history.size() == 1 )scale /= DSH()->hardScaleFactor(); } double Merger::sumpdfReweightUnlops(){ double res = 0.; Energy beam1Scale = history[0].scale*( history[0].node->legsize() == N0()?theCurrentME->renFac():DSH()->facFac() ); Energy beam2Scale = history[0].scale*( history[0].node->legsize() == N0()?theCurrentME->renFac():DSH()->facFac() ); for ( auto const & hs : history ){ //pdf expansion only to the last step if( !hs.node->parent() )continue; if( hs.node->xcomb()->mePartonData()[0]->coloured()&&hs.node->nodeME()->lastX1()>0.99 )return 0.; if( hs.node->xcomb()->mePartonData()[1]->coloured()&&hs.node->nodeME()->lastX2()>0.99 )return 0.; if( hs.node->nodeME()->lastX1()<0.00001 )return 0.; if( hs.node->nodeME()->lastX2()<0.00001 )return 0.; if ( hs.node->dipole()->bornEmitter() == 0 ){ res += pdfUnlops( hs.node , 0 , beam1Scale , ( hs.node->pT() ) , hs.node->nodeME()->lastX1() , Nf( history[0].scale ) , history[0].scale ); beam1Scale = ( hs.node->pT() )*DSH()->facFac(); } if ( hs.node->dipole()->bornEmitter() == 1 ){ res += pdfUnlops( hs.node , 1 , beam2Scale , ( hs.node->pT() ) , hs.node->nodeME()->lastX2() , Nf( history[0].scale ) , history[0].scale ); beam2Scale = ( hs.node->pT() )*DSH()->facFac(); } if ( hs.node->dipole()->bornSpectator() == 0 && hs.node->dipole()->bornEmitter() >1 ){ res += pdfUnlops( hs.node , 0 , beam1Scale , ( hs.node->pT() ) , hs.node->nodeME()->lastX1() , Nf( history[0].scale ) , history[0].scale ); beam1Scale = ( hs.node->pT() )*DSH()->facFac(); } if ( hs.node->dipole()->bornSpectator() == 1 && hs.node->dipole()->bornEmitter() >1 ){ res += pdfUnlops( hs.node , 1 , beam2Scale , ( hs.node->pT() ) , hs.node->nodeME()->lastX2() , Nf( history[0].scale ) , history[0].scale ); beam2Scale = ( hs.node->pT() )*DSH()->facFac(); } } if ( history[0].node->deepHead()->xcomb()->mePartonData()[0]->coloured() ){ res += pdfUnlops( history.back().node , 0 , beam1Scale , history[0].scale*theCurrentME->renFac() , ( history.back() ).node->nodeME()->lastX1() , Nf( history[0].scale ) , history[0].scale ); } if ( history[0].node->deepHead()->xcomb()->mePartonData()[1]->coloured() ) { res += pdfUnlops( history.back().node , 1 , beam2Scale , history[0].scale*theCurrentME->renFac() , ( history.back() ).node->nodeME()->lastX2() , Nf( history[0].scale ) , history[0].scale ); } return res; } #include "Herwig/MatrixElement/Matchbox/Phasespace/RandomHelpers.h" double Merger::pdfUnlops( NodePtr node , int side , Energy running , Energy next , double x , int nlp , Energy fixedScale ) { tcPDPtr particle , parton; tcPDFPtr pdf; if ( side == 0 ) { particle = node->nodeME()->lastParticles().first->dataPtr(); parton = node->nodeME()->lastPartons().first->dataPtr(); pdf = node->xcomb()->partonBins().first->pdf(); }else{ assert( side == 1 ); particle = node->nodeME()->lastParticles().second->dataPtr(); parton = node->nodeME()->lastPartons().second->dataPtr(); pdf = node->xcomb()->partonBins().second->pdf(); } //copied from PKOperator double res = 0.; int number = 10; for ( int nr = 0;nr<number;nr++ ){ double restmp = 0; using namespace RandomHelpers; double r = UseRandom::rnd(); double eps = 1e-3; pair<double , double> zw = generate( ( piecewise() , flat( 0.0 , x ) , match( inverse( 0.0 , x , 1.0 ) + inverse( 1.0+eps , x , 1.0 ) ) ) , r ); double z = zw.first; double mapz = zw.second; double PDFxparton = pdf->xfx( particle , parton , sqr( fixedScale ) , x )/x; double CA = SM().Nc(); double CF = ( SM().Nc()*SM().Nc()-1.0 )/( 2.*SM().Nc() ); if ( abs( parton->id() ) < 7 ) { double PDFxByzgluon = pdf->xfx( particle , getParticleData( ParticleID::g ) , sqr( fixedScale ) , x/z )*z/x; double PDFxByzparton = pdf->xfx( particle , parton , sqr( fixedScale ) , x/z )*z/x; assert( abs( parton->id() ) < 7 ); restmp += CF*( 3./2.+2.*log( 1.-x ) ) * PDFxparton; if ( z > x ) { restmp += 0.5 * ( sqr( z ) + sqr( 1.-z ) ) * PDFxByzgluon / z; restmp += CF*2.*( PDFxByzparton - z*PDFxparton )/( z*( 1.-z ) ); restmp -= CF*PDFxByzparton * ( 1.+z )/z; } }else{ assert( parton->id() == ParticleID::g ); double PDFxByzgluon = pdf->xfx( particle , getParticleData( ParticleID::g ) , sqr( fixedScale ) , x/z )*z/x; if ( z > x ){ double factor = CF * ( 1. + sqr( 1.-z ) ) / sqr( z ); for ( int f = -nlp; f <= nlp; ++f ) { if ( f == 0 ) continue; restmp += pdf->xfx( particle , getParticleData( f ) , sqr( fixedScale ) , x/z )*z/x*factor; } } restmp += ( ( 11./6. ) * CA - ( 1./3. )*Nf( history[0].scale ) + 2.*CA*log( 1.-x ) ) *PDFxparton; if ( z > x ) { restmp += 2. * CA * ( PDFxByzgluon - z*PDFxparton ) / ( z*( 1.-z ) ); restmp += 2.* CA *( ( 1.-z )/z - 1. + z*( 1.-z ) ) * PDFxByzgluon / z; } } if ( PDFxparton<1e-8 )restmp = 0.; res += 1*restmp*log( sqr( running/next ) )/PDFxparton*mapz; } return res/number; } double Merger::sumalphaReweightUnlops(){ double res = 0.; if ( !( history[0].node->children().empty() ) ){ res += alphasUnlops( history[0].scale*DSH()->renFac() , history[0].scale*theCurrentME->renFac() )* int( history[0].node->legsize()-N0() ); } // dsig is calculated with fixed alpha_s for ( auto const & hs : history ){ //expansion only to the last step if( !hs.node->parent() )continue; res += alphasUnlops( hs.node->pT()*DSH()->renFac() , history[0].scale ); } return res; } double Merger::sumfillHistoryUnlops(){ double res = 0.; double xiQSh = history[0].node->legsize() == N0()?DSH()->hardScaleFactor():1.; for ( auto const & hs : history ){ if( !hs.node->parent() )continue; doUNLOPS( hs.node , ( hs.node == history[0].node?xiQSh:1. )*hs.scale , hs.node->pT() , history[0].scale , res ); } return res; } MergingFactoryPtr Merger::treefactory(){return theTreeFactory;} void Merger::doinit(){ if ( !DSH()->hardScaleIsMuF() ) { throw Exception() << "Merger: Merging is currently only sensible if we are using the hardScale as MuF." << Exception::abortnow; } } CrossSection Merger::MergingDSigDR() { assert(history.empty()); if ( theFirstNodeMap.find( theCurrentME ) == theFirstNodeMap.end() ) { throw Exception() << "Merger: The current matrix element could not be found." << Exception::abortnow; } NodePtr Node = theFirstNodeMap[theCurrentME]; //DSH()->setCurrentHandler(); DSH()->eventHandler( generator()->eventHandler() ); CrossSection res = ZERO; if( Node->deepHead()->subtractedReal() ){ res = MergingDSigDRRealStandard( Node ); theCurrentMaxLegs = maxLegsNLO(); }else if( Node->deepHead()->virtualContribution() ){ res = MergingDSigDRVirtualStandard( Node ); theCurrentMaxLegs = maxLegsNLO(); }else if (true){ res = MergingDSigDRBornCheapME( Node ); theCurrentMaxLegs = maxLegsLO(); }else if( theGamma!= 1. ){ res = MergingDSigDRBornGamma( Node ); theCurrentMaxLegs = maxLegsLO(); }else{ res = MergingDSigDRBornStandard( Node ); theCurrentMaxLegs = maxLegsLO(); } auto lxc=theCurrentME->lastXCombPtr(); //lxc->lastCentralScale( sqr( Node->runningPt() ) ); lxc->lastShowerScale( sqr( Node->runningPt() ) ); auto lp=theCurrentME->lastXCombPtr()->lastProjector(); if( lp ){ //lp->lastCentralScale( sqr( Node->runningPt() ) ); lp->lastShowerScale( sqr( Node->runningPt() ) ); } if ( res == ZERO ){ history.clear(); return res; } cleanup( Node ); lxc->subProcess( SubProPtr() ); history.clear(); if( std::isnan( double( res/nanobarn ) )|| !std::isfinite( double( res/nanobarn ) ) ){ generator()->logWarning(Exception() << "Merger weight is " << res/nanobarn<< " -> setting to 0" << Exception::warning); return ZERO; } return res; } #include "Herwig/PDF/HwRemDecayer.h" void Merger::CKKW_PrepareSudakov( NodePtr Born , Energy running ){ //cleanup( Born ); tSubProPtr sub = Born->xcomb()->construct(); DSH()->resetPDFs( make_pair( Born->xcomb()->partonBins().first->pdf() , Born->xcomb()->partonBins().second->pdf() ) , Born->xcomb()->partonBins() ); DSH()->setCurrentHandler(); DSH()->currentHandler()->generator()->currentEventHandler( Born->deepHead()->xcomb()->eventHandlerPtr() ); DSH()->currentHandler()->remnantDecayer()->setHadronContent( Born->deepHead()->xcomb()->lastParticles() ); DSH()->eventRecord().clear(); DSH()->eventRecord().slimprepare( sub , dynamic_ptr_cast<tStdXCombPtr>( Born->xcomb() ) , DSH()->pdfs() , Born->deepHead()->xcomb()->lastParticles() ); DSH()->hardScales( sqr( running ) ); } Energy Merger::CKKW_StartScale( NodePtr Born ){ Energy res = generator()->maximumCMEnergy();; if( !Born->children().empty() ){ for ( int i = 0;i<Born->legsize();i++ ){ if ( !Born->nodeME()->mePartonData()[i]->coloured() )continue; for ( int j = i+1;j<Born->legsize();j++ ){ if ( i == j||!Born->nodeME()->mePartonData()[j]->coloured() )continue; res = min( res , sqrt( 2.*Born->nodeME()->lastMEMomenta()[i]*Born->nodeME()->lastMEMomenta()[j] ) ); } } }else{ Born->nodeME()->factory()->scaleChoice()->setXComb( Born->xcomb() ); res = sqrt( Born->nodeME()->factory()->scaleChoice()->renormalizationScale() ); } Born->nodeME()->factory()->scaleChoice()->setXComb( Born->xcomb() ); res = max( res , sqrt( Born->nodeME()->factory()->scaleChoice()->renormalizationScale() ) ); return res; } double Merger::alphasUnlops( Energy next , Energy fixedScale ) { double betaZero = ( 11./6. )*SM().Nc() - ( 1./3. )*Nf( history[0].scale ); return ( betaZero*log( sqr( fixedScale/next ) ) )+ ( theCMWScheme?( ( ( 3.*( 67./18.-1./6.*Constants::pi*Constants::pi )-5./9.*Nf( history[0].scale ) ) ) ):0. ); } double Merger::pdfratio( NodePtr Born , Energy nominator_scale , Energy denominator_scale , int side ){ StdXCombPtr bXc = Born->xcomb(); if( !bXc->mePartonData()[side]->coloured() ) throw Exception() << "Merger: pdf-ratio required for non-coloured particle." << Exception::abortnow; double from , to; if ( side == 0 ){ if ( denominator_scale == nominator_scale ) { return 1.; } from = Born->nodeME()->pdf1( sqr( denominator_scale ) ); to = Born->nodeME()->pdf1( sqr( nominator_scale ) ); if ( ( to < 1e-8||from < 1e-8 )&&( to/from>10000000. ) ){ generator()->logWarning(Exception() << "Merger: pdfratio to = " << to << " from = " << from << Exception::warning); return 0.; } } else{ if ( denominator_scale == nominator_scale ) { return 1.; } from = Born->nodeME()->pdf2( sqr( denominator_scale ) ); to = Born->nodeME()->pdf2( sqr( nominator_scale ) ); if ( ( to < 1e-8||from < 1e-8 )&&( to/from>10000000. ) ){ generator()->logWarning(Exception() << "Merger: pdfratio to = " << to << " from = " << from << Exception::warning); return 0.;} } return to/from; } bool Merger::dosudakov( NodePtr Born , Energy running , Energy next , double& sudakov0_n ) { CKKW_PrepareSudakov( Born , running ); for( DipoleChain const & chain : DSH()->eventRecord().chains() ){ for( Dipole const & dip : chain.dipoles() ){ sudakov0_n *= singlesudakov( dip , next , running , make_pair( true , false ) ); sudakov0_n *= singlesudakov( dip , next , running , make_pair( false , true ) ); if ( sudakov0_n == 0.0 ){ cleanup( Born ); return false; } } } cleanup( Born ); return true; } bool Merger::doUNLOPS( NodePtr Born , Energy running , Energy next , Energy fixedScale , double& UNLOPS ) { CKKW_PrepareSudakov( Born , running ); for( DipoleChain const & chain : DSH()->eventRecord().chains() ){ for( Dipole const & dip : chain.dipoles() ){ UNLOPS += singleUNLOPS( dip , next , running , fixedScale , make_pair( true , false ) );; UNLOPS += singleUNLOPS( dip , next , running , fixedScale , make_pair( false , true ) ); } } cleanup( Born ); return true; } bool Merger::isProjectorStage( NodePtr Born , int pjs ){ return ( pjs == int( ( Born->deepHead()->legsize() - Born->legsize() ) ) ); } void Merger::cleanup( NodePtr Born ) { DSH()->eventRecord().clear(); if( !Born->xcomb()->subProcess() )return; ParticleVector vecfirst = Born->xcomb()->subProcess()->incoming().first->children(); for( auto const & particle : vecfirst ) Born->xcomb()->subProcess()->incoming().first->abandonChild( particle ); ParticleVector vecsecond = Born->xcomb()->subProcess()->incoming().second->children(); for( auto const & particle : vecsecond ) Born->xcomb()->subProcess()->incoming().second->abandonChild( particle ); Born->xcomb()->subProcess( SubProPtr() ); } double Merger::singlesudakov( Dipole dip , Energy next , Energy running , pair<bool , bool> conf ){ double res = 1.; tPPtr emitter = dip.emitter( conf ); tPPtr spectator = dip.spectator( conf ); DipoleSplittingInfo candidate( dip.index( conf ) , conf , dip.emitterX( conf ) , dip.spectatorX( conf ) , emitter , spectator ); if ( DSH()->generators().find( candidate.index() ) == DSH()->generators().end() ) DSH()->getGenerators( candidate.index() ); auto const & gens = DSH()->generators().equal_range( candidate.index() ); for ( auto gen = gens.first; gen != gens.second; ++gen ) { if ( !( gen->first == candidate.index() ) ) continue; Energy dScale = gen->second->splittingKinematics()->dipoleScale( emitter->momentum() , spectator->momentum() ); candidate.scale( dScale ); candidate.continuesEvolving(); Energy ptMax = gen->second->splittingKinematics()->ptMax( candidate.scale() , candidate.emitterX() , candidate.spectatorX() , candidate.index() , *gen->second->splittingKernel() ); candidate.hardPt( min( running , ptMax ) ); if ( candidate.hardPt()>next ){ res *= gen->second->sudakov( candidate , next ); } } return res; } double Merger::singleUNLOPS( Dipole dip , Energy next , Energy running , Energy fixedScale , pair<bool , bool> conf ){ double res = 0.; tPPtr emitter = dip.emitter( conf ); tPPtr spectator = dip.spectator( conf ); DipoleSplittingInfo candidate( dip.index( conf ) , conf , dip.emitterX( conf ) , dip.spectatorX( conf ) , emitter , spectator ); if ( DSH()->generators().find( candidate.index() ) == DSH()->generators().end() ) DSH()->getGenerators( candidate.index() ); auto const & gens = DSH()->generators().equal_range( candidate.index() ); for ( auto gen = gens.first; gen != gens.second; ++gen ) { if ( !( gen->first == candidate.index() ) ) continue; Energy dScale = gen->second->splittingKinematics()->dipoleScale( emitter->momentum() , spectator->momentum() ); candidate.scale( dScale ); candidate.continuesEvolving(); Energy ptMax = gen->second-> splittingKinematics()->ptMax( candidate.scale() , candidate.emitterX() , candidate.spectatorX() , candidate.index() , *gen->second->splittingKernel() ); candidate.hardPt( min( running , ptMax ) ); if ( candidate.hardPt()>next ){ res += gen->second->unlops( candidate , next , fixedScale ); } } return res; } void Merger::firstNodeMap( MatchboxMEBasePtr a , NodePtr b ){theFirstNodeMap.insert( make_pair( a , b ) );} map<MatchboxMEBasePtr , NodePtr> Merger::firstNodeMap(){return theFirstNodeMap;} void Merger::setXComb( MatchboxMEBasePtr me , tStdXCombPtr xc ){ theFirstNodeMap[me]->setXComb( xc ); } void Merger::setKinematics( MatchboxMEBasePtr me ){ theFirstNodeMap[me]->setKinematics(); } void Merger::clearKinematics( MatchboxMEBasePtr me ){ theFirstNodeMap[me]->clearKinematics(); } bool Merger::generateKinematics( MatchboxMEBasePtr me , const double * r ){ theFirstNodeMap[me]->firstgenerateKinematics( r , 0 ); if ( theFirstNodeMap[me]->cutStage() == 0 ){ bool inAlphaPS = false; NodePtrVec children = theFirstNodeMap[me]->children(); for ( NodePtr const & child: children ) { treefactory()->setAlphaParameter( theGamma ); inAlphaPS |= theGamma!= 1.?child->dipole()->aboveAlpha():false; treefactory()->setAlphaParameter( 1. ); } SafeClusterMap temp = theFirstNodeMap[me]->clusterSafe(); for ( auto const & cs: temp ) { if ( !cs.second.first&&!inAlphaPS )return false; } } if ( theFirstNodeMap[me]->cutStage() == 1 ){ SafeClusterMap temp = theFirstNodeMap[me]->clusterSafe(); for ( auto const & sc: temp ) { if ( !sc.second.first && !sc.second.second )return false; } } return true; } void Merger::fillProjectors( MatchboxMEBasePtr me ){ for ( auto const & propair: theFirstNodeMap[me]->Projector() ) { me->lastXCombPtr()->projectors().insert( propair.first , propair.second->xcomb() ); } } pair<bool , bool> Merger::clusterSafe( MatchboxMEBasePtr me , int emit , int emis , int spec ){ return theFirstNodeMap[me]->clusterSafe().find({emit, emis, spec})->second; } //#include "fastjet/ClusterSequence.hh" bool Merger::matrixElementRegion( PVector incoming , PVector outgoing , Energy winnerScale , Energy cutscale ){ //generator()->log() << "\nparticles s" << particles.size() << " " << particles[0] << " " << particles[1] << flush; /* if ( defMERegionByJetAlg && !particles[0]->coloured()&& !particles[1]->coloured() ) { assert( false ); vector<fastjet::PseudoJet> input_particles; for( size_t em = 2; em < particles.size();em++ ){ input_particles.push_back( fastjet::PseudoJet( em->momentum().x()/GeV , em->momentum().y()/GeV , em->momentum().z()/GeV , em->momentum().e()/GeV ) ); } fastjet::JetDefinition jet_def( fastjet::ee_kt_algorithm ); fastjet::ClusterSequence clust_seq( input_particles , jet_def ); size_t n = particles.size()-2; vector<fastjet::PseudoJet> exclusive_jets = clust_seq.exclusive_jets_ycut( ee_ycut ); return n == exclusive_jets.size(); } if ( defMERegionByJetAlg ) { assert( false ); size_t noncol = 0; vector<fastjet::PseudoJet> input_particles; for( size_t em = 2; em < particles.size();em++ ){ if ( em->coloured() ) input_particles.push_back( fastjet::PseudoJet( em->momentum().x()/GeV , em->momentum().y()/GeV , em->momentum().z()/GeV , em->momentum().e()/GeV ) ); else noncol++; } double Rparam = 1.0; fastjet::Strategy strategy = fastjet::Best; fastjet::RecombinationScheme recomb_scheme = fastjet::E_scheme; fastjet::JetDefinition jet_def( fastjet::kt_algorithm , Rparam , recomb_scheme , strategy ); fastjet::ClusterSequence clust_seq( input_particles , jet_def ); size_t n = particles.size()-2-noncol; vector<fastjet::PseudoJet> exclusive_jets = clust_seq.exclusive_jets( pp_dcut ); // generator()->log() << "\nn = " << n << " jets = " << exclusive_jets.size(); //for ( size_t i = 0; i<exclusive_jets.size(); i++ ) { //generator()->log() << "\nn = " << n << " jetspt = " << exclusive_jets[i].perp(); //} return n == exclusive_jets.size(); } */ Energy ptx = Constants::MaxEnergy; bool foundwinnerpt = false; using namespace boost; //FF for( auto const & em : outgoing ){ if ( ! em->coloured() ) continue; for( auto const & emm : outgoing ){ if ( !emm->coloured() ) continue; if ( em == emm ) continue; for( auto const & spe : outgoing ){ if ( !spe->coloured() ) continue; if ( em == spe||emm == spe ) continue; if ( !( em->id() == -emm->id()||emm->id()>6 ) )continue; Energy pt = ZERO; if ( em->momentum().m()<= 0.001*GeV&& emm->momentum().m()<= 0.001*GeV&& spe->momentum().m()<= 0.001*GeV ) { pt = FFLTK->lastPt( em->momentum() , emm->momentum() , spe->momentum() ); }else{ pt = FFMTK->lastPt( em->momentum() , emm->momentum() , spe->momentum() ); } if ( abs( pt-winnerScale )<0.001*GeV ) { foundwinnerpt = true; } ptx = min( ptx , pt ); } } } //FI for( auto const & spe : incoming ){ if ( ! spe->coloured() ) continue; for( auto const & emm : outgoing ){ if ( ! emm->coloured() ) continue; for( auto const & em : outgoing ){ if ( ! em->coloured() ) continue; if ( em == emm ) continue; if ( !( em->id() == -emm->id() || emm->id()>6 ) )continue; Energy pt = ZERO; if ( em->momentum().m()<= 0.001*GeV&& emm->momentum().m()<= 0.001*GeV&& spe->momentum().m()<= 0.001*GeV ) { pt = FILTK->lastPt( em->momentum() , emm->momentum() , spe->momentum() ); }else{ pt = FIMTK->lastPt( em->momentum() , emm->momentum() , spe->momentum() ); } if ( abs( pt-winnerScale )<0.001*GeV ) { foundwinnerpt = true; } if( pt > 0.*GeV ) ptx = min( ptx , pt ); } } } //IF for( auto const & em : incoming ){ if ( ! em->coloured() ) continue; for( auto const & emm : outgoing ){ if ( ! emm->coloured() ) continue; for( auto const & spe : outgoing ){ if ( ! spe->coloured() ) continue; if ( emm == spe ) continue; if ( !( em->id()>6|| em->id() == emm->id() ||emm->id()>6 ) )continue; Energy pt = ZERO; if ( em->momentum().m()<= 0.001*GeV&& emm->momentum().m()<= 0.001*GeV&& spe->momentum().m()<= 0.001*GeV ) { //massless pt = IFLTK->lastPt( em->momentum() , emm->momentum() , spe->momentum() ); }else{ //massiv pt = IFMTK->lastPt( em->momentum() , emm->momentum() , spe->momentum() ); } if ( abs( pt-winnerScale )<0.01*GeV ) { foundwinnerpt = true; } ptx = min( ptx , pt ); } } } //II for( auto const & em : incoming ){ if ( ! em->coloured() ) continue; for( auto const & spe : incoming ){ if ( ! spe->coloured() ) continue; if ( em == spe ) continue; for( auto const & emm : outgoing ){ if ( ! emm->coloured() ) continue; if ( !( em->id()>6||em->id() == emm->id() ||emm->id()>6 ) )continue; Energy pt = IILTK->lastPt( em->momentum() , emm->momentum() , spe->momentum() ); if ( abs( pt-winnerScale )<0.01*GeV ) { foundwinnerpt = true; } ptx = min( ptx , pt ); } } } if( !foundwinnerpt ){ generator()->logWarning( Exception() << "Merger: Could not find winner with pt." << "Run with -d2 to get phase space points. " << Exception::warning ); if( Debug::level > 2 ) { generator()->log() << "\nWarning: could not find winner with pt: " << winnerScale/GeV; for( auto const & emm : incoming ){ generator()->log() << "\n" << emm->id() << " " << emm->momentum()/GeV << " " << emm->momentum().m()/GeV << flush; } for( auto const & emm : outgoing ){ generator()->log() <<"\n" << emm->id() << " " << emm->momentum()/GeV << " " << emm->momentum().m()/GeV << flush; } } } return ( ptx>cutscale ) ; } int Merger::M()const{return theTreeFactory->M();} int Merger::N()const{return theTreeFactory->N();} // If needed , insert default implementations of virtual function defined // in the InterfacedBase class here ( using ThePEG-interfaced-impl in Emacs ). #include "ThePEG/Persistency/PersistentOStream.h" void Merger::persistentOutput( PersistentOStream & os ) const { os << Unlopsweights << theCMWScheme << projected << isUnitarized << isNLOUnitarized << defMERegionByJetAlg << theOpenInitialStateZ << theChooseHistory << theN0 << theOnlyN << weight << weightCB << theGamma << ee_ycut << pp_dcut << theSmearing << ounit( theIRSafePT , GeV ) << ounit( theMergePt , GeV ) << ounit( theCentralMergePt , GeV ) << theMergingJetFinder << theLargeNBasis << FFLTK << FILTK << IFLTK << IILTK << FFMTK << FIMTK << IFMTK << theDipoleShowerHandler << theTreeFactory << theFirstNodeMap; } #include "ThePEG/Persistency/PersistentIStream.h" void Merger::persistentInput( PersistentIStream & is , int ) { is >> Unlopsweights >> theCMWScheme >> projected >> isUnitarized >> isNLOUnitarized >> defMERegionByJetAlg >> theOpenInitialStateZ >> theChooseHistory >> theN0 >> theOnlyN >> weight >> weightCB >> theGamma >> ee_ycut >> pp_dcut >> theSmearing >> iunit( theIRSafePT , GeV ) >> iunit( theMergePt , GeV ) >> iunit( theCentralMergePt , GeV ) >> theMergingJetFinder >> theLargeNBasis >> FFLTK >> FILTK >> IFLTK >> IILTK >> FFMTK >> FIMTK >> IFMTK >> theDipoleShowerHandler >> theTreeFactory >> theFirstNodeMap ; } // *** Attention *** The following static variable is needed for the type // description system in ThePEG. Please check that the template arguments // are correct ( the class and its base class ) , and that the constructor // arguments are correct ( the class name and the name of the dynamically // loadable library where the class implementation can be found ). #include "ThePEG/Utilities/DescribeClass.h" DescribeClass<Merger , Herwig::MergerBase> describeHerwigMerger( "Herwig::Merger" , "HwDipoleShower.so" ); //ClassDescription<Merger> Merger::initMerger; // Definition of the static class description member. #include "ThePEG/Interface/ClassDocumentation.h" #include "ThePEG/Interface/Parameter.h" #include "ThePEG/Interface/Reference.h" #include "ThePEG/Interface/Switch.h" void Merger::Init() { static ClassDocumentation<Merger> documentation ( "Merger." ); static Reference<Merger , DipoleShowerHandler> interfaceShowerHandler ( "DipoleShowerHandler" , "" , &Merger::theDipoleShowerHandler , false , false , true , true , false ); static Switch<Merger , bool> interfaceUnlopsweights ( "Unlopsweights" , "" , &Merger::Unlopsweights , false , false , false ); static SwitchOption interfaceUnlopsweightsYes ( interfaceUnlopsweights , "Yes" , "" , true ); static SwitchOption interfaceUnlopsweightsNo ( interfaceUnlopsweights , "No" , "" , false ); static Switch<Merger , bool> interfacetheCMWScheme ( "CMWScheme" , "" , &Merger::theCMWScheme , false , false , false ); static SwitchOption interfacetheCMWSchemeYes ( interfacetheCMWScheme , "Yes" , "" , true ); static SwitchOption interfacetheCMWSchemeNo ( interfacetheCMWScheme , "No" , "" , false ); static Parameter<Merger , Energy> interfaceMergerScale ( "MergingScale" , "The MergingScale." , &Merger::theCentralMergePt , GeV , 20.0*GeV , 0.0*GeV , 0*GeV , false , false , Interface::lowerlim ); static Reference<Merger , MergingFactory> interfaceMergerHelper ( "MergingFactory" , "" , &Merger::theTreeFactory , false , false , true , true , false ); static Parameter<Merger , double> interfacedcut ( "pp_dcut" , "The MergingScale." , &Merger::pp_dcut , 5.0 , 0.0 , 0 , false , false , Interface::lowerlim ); static Parameter<Merger , double> interfaceycut ( "ee_ycut" , "The MergingScale." , &Merger::ee_ycut , 0.2 , 0.0 , 0 , false , false , Interface::lowerlim ); static Parameter<Merger , double> interfacegamma ( "gamma" , "gamma parameter." , &Merger::theGamma , 1.0 , 0.0 , 0 , false , false , Interface::lowerlim ); static Reference<Merger , JetFinder> interfaceMergingJetFinder ( "MergingJetFinder" , "A reference to the jet finder used in Merging." , &Merger::theMergingJetFinder , false , false , true , false , false ); static Reference<Merger , ColourBasis> interfaceLargeNBasis ( "LargeNBasis" , "Set the large-N colour basis implementation." , &Merger::theLargeNBasis , false , false , true , true , false ); static Switch<Merger , bool> interfacedefMERegionByJetAlg ( "MERegionByJetAlg" , "" , &Merger::defMERegionByJetAlg , false , false , false ); static SwitchOption interfacedefMERegionByJetAlgYes ( interfacedefMERegionByJetAlg , "Yes" , "" , true ); static SwitchOption interfacedefMERegionByJetAlgNo ( interfacedefMERegionByJetAlg , "No" , "" , false ); static Switch<Merger , bool> interfaceOpenInitialSateZ ( "OpenInitialStateZ" , "" , &Merger::theOpenInitialStateZ , false , false , false ); static SwitchOption interfaceOpenInitialSateZYes ( interfaceOpenInitialSateZ , "Yes" , "" , true ); static SwitchOption interfaceOpenInitialSateZNo ( interfaceOpenInitialSateZ , "No" , "" , false ); static Parameter<Merger , Energy> interfaceIRSafePT ( "IRSafePT" , "Set the pt for which a matrixelement should be treated as IR-safe." , &Merger::theIRSafePT , GeV , 0.0 * GeV , ZERO , Constants::MaxEnergy , true , false , Interface::limited ); interfaceIRSafePT.setHasDefault( false ); static Parameter<Merger , double> interfacemergePtsmearing( "MergingScaleSmearing" , "Set the percentage the merging pt should be smeared." , &Merger::theSmearing , 0. , 0. , 0.0 , 0.5 , true , false , Interface::limited ); static Parameter<Merger , int> interfacechooseHistory( "chooseHistory" , "different ways of choosing the history" , &Merger::theChooseHistory , 3 , -1 , 0 , false , false , Interface::lowerlim ); } diff --git a/Shower/Dipole/Merging/Merger.h b/Shower/Dipole/Merging/Merger.h --- a/Shower/Dipole/Merging/Merger.h +++ b/Shower/Dipole/Merging/Merger.h @@ -1,401 +1,398 @@ /// -*- C++ -*- // /// Merger.h is a part of Herwig - A multi-purpose Monte Carlo event generator /// Copyright (C) 2002-2007 The Herwig Collaboration // /// Herwig is licenced under version 2 of the GPL, see COPYING for details. /// Please respect the MCnet academic guidelines, see GUIDELINES for details. // #ifndef HERWIG_Merger_H #define HERWIG_Merger_H // /// This is the declaration of the Merger class. // #include "MergingFactory.fh" #include "Node.fh" #include "ThePEG/Handlers/HandlerBase.h" #include "Herwig/Shower/Dipole/DipoleShowerHandler.h" //#include "Herwig/Shower/Dipole/Base/DipoleSplittingGenerator.h" #include "Herwig/MatrixElement/Matchbox/Base/MergerBase.h" #include "Herwig/MatrixElement/Matchbox/Phasespace/FFLightTildeKinematics.h" #include "Herwig/MatrixElement/Matchbox/Phasespace/IFLightTildeKinematics.h" #include "Herwig/MatrixElement/Matchbox/Phasespace/FFMassiveTildeKinematics.h" #include "Herwig/MatrixElement/Matchbox/Phasespace/IFMassiveTildeKinematics.h" #include "Herwig/MatrixElement/Matchbox/Phasespace/FILightTildeKinematics.h" #include "Herwig/MatrixElement/Matchbox/Phasespace/IILightTildeKinematics.h" #include "Herwig/MatrixElement/Matchbox/Phasespace/FIMassiveTildeKinematics.h" #include "ThePEG/Cuts/JetFinder.h" #include "ThePEG/Cuts/Cuts.h" namespace Herwig { using namespace ThePEG; class Merger; ThePEG_DECLARE_POINTERS(Merger , MergerPtr ); typedef vector<NodePtr> NodePtrVec; //definition of a history step struct HistoryStep { - HistoryStep(NodePtr cn = NodePtr(), double w =0., Energy sc=ZERO): - node(cn),weight(w),scale(sc){} - /// the cluster node defining this step /// containing the full information NodePtr node; /// current sudakov weight of the history double weight; /// current scale of the history Energy scale; }; typedef vector< HistoryStep > History; typedef multimap<DipoleIndex, Ptr<DipoleSplittingGenerator>::ptr> GeneratorMap2; /** * \ingroup DipoleShower * \author Johannes Bellm * * \brief Merger handles the Merger ....... //TODO . * * @see \ref MergerInterfaces "The interfaces" * defined for Merger. */ class Merger: public MergerBase { friend class MergingFactory; friend class Node; //friend class MScale; public: /** @name Standard constructors and destructors. */ //@{ /** * The default constructor. */ Merger(); /** * The destructor. */ virtual ~Merger(); //@} public: // define the ME region for a particle vector. bool matrixElementRegion(PVector incoming, PVector outgoing, Energy winnerScale=ZERO, Energy cutscale=ZERO); /// return the current merging scale, /// gets smeared around the central merging scale in generate kinematics. Energy mergingScale()const{return theMergePt;} /// return the current merging pt (should be unified with mergingScale) Energy mergePt()const {return theMergePt;} /// legsize of highest process with NLO corrections int M()const; /// legsize of the highest LO merged process int N()const; /// legsize of the production process int N0()const{return theN0;} /// cross section of as given by the merging CrossSection MergingDSigDR() ; /// ***** virtual functions of the base class ****/// /// set the current xcomb, called from ME void setXComb(MatchboxMEBasePtr, tStdXCombPtr); /// set kinematics, called from ME void setKinematics(MatchboxMEBasePtr); /// clear kinematics, called from ME void clearKinematics(MatchboxMEBasePtr); /// generate kinematics, called from ME bool generateKinematics(MatchboxMEBasePtr, const double *); /// fill the projector the subprocess is build from void fillProjectors(MatchboxMEBasePtr); /// return true true if a given ME phase space point is TODO pair<bool, bool> clusterSafe(MatchboxMEBasePtr, int, int, int); /// return the current maximum legs, the shower should veto size_t maxLegs() const {return theCurrentMaxLegs;} /// set the current ME void setME(MatchboxMEBasePtr me){theCurrentME=me;} protected: /// the merging factory needs to set the legsize of the production process void N0(int n){ theN0=n;} /// return the large-N basis (TODO: implement check if born ME works with the choice) Ptr<ColourBasis>::ptr largeNBasis(){return theLargeNBasis;} /// smeare the merging pt void smeareMergePt(){theMergePt=centralMergePt()*(1.+0.*(-1.+2.*UseRandom::rnd())*smear());} /// true if the phase space for initial emissions should not be restricted in z. bool openInitialStateZ(){return theOpenInitialStateZ;} private: /// calculate a single sudakov step for a given dipole double singlesudakov(Dipole, Energy, Energy, pair<bool, bool>); /// calculate the sudakov supression for a clusternode between /// the current running scale and next scale bool dosudakov(NodePtr Born, Energy running, Energy next, double& sudakov0_n); /// cleanup void cleanup(NodePtr); /// return true if the cluster node has the matching number of /// legs to the current projector stage bool isProjectorStage( NodePtr , int ); /** * Calculate the staring scale: * if Node is part of the production process, calculate according to the * scale choice object in the merging scale objekt, else * return max(scale as scalechoice , min(Mass(i, j))) */ Energy CKKW_StartScale(NodePtr); /// prepare the sudakov calculation void CKKW_PrepareSudakov(NodePtr, Energy); /// number of active flavours as given by the shower double Nf(Energy scale)const{return DSH()->Nf(scale);} /// pointer to the factory MergingFactoryPtr treefactory(); /// map from ME to first clusternode map<MatchboxMEBasePtr, NodePtr> firstNodeMap(); /// set the current merging pt, smeared in generate kinematics void mergePt(Energy x) {theMergePt = x;} /// return the central merging pt Energy centralMergePt() {return theCentralMergePt;} private: /// calculate the history weighted born cross section CrossSection MergingDSigDRBornStandard(NodePtr Node); /// calculate the history weighted born cross section CrossSection MergingDSigDRBornCheapME(NodePtr Node); /** * calculate the history weighted born cross section * add the difference of IPK with and without alpha parameter * subtract the dipoles above the alpha parameter */ CrossSection MergingDSigDRBornGamma(NodePtr Node); /// calculate the history weighted virtual contribution CrossSection MergingDSigDRVirtualStandard(NodePtr Node); /** * calculate the history weighted real contribution * splitted into 3 differnt contibutions */ CrossSection MergingDSigDRRealStandard(NodePtr Node); /// calculate the history weighted real contribution /// all dipoles above: /// N*(R rnd(i)-Dip_i) history_i U(\phi^n_i) CrossSection MergingDSigDRRealAllAbove(NodePtr Node); /// calculate the history weighted real contribution /// not all dipoles above: /// (R - sum PS_i) history_rnd U(\phi^n+1) CrossSection MergingDSigDRRealBelowSubReal(NodePtr Node); /// calculate the history weighted real contribution /// not all dipoles above: /// rnd(i)-> N*(PS_i - Dip_i) history_i U(\phi^n_i) CrossSection MergingDSigDRRealBelowSubInt(NodePtr Node); /// max legssize the shower should veto for LO size_t maxLegsLO() const {return N0()+N();} /// Calculate the LO partonic cross section. /// if diffalpha != 1, add the difference of IPK(1)-IPK(diffalpha) CrossSection TreedSigDR(Energy startscale, NodePtr, double diffalpha=1.); /// fill the projecting xcomb Energy fillProjector(int); /// fill the history, including calculation of sudakov supression void fillHistory(Energy, NodePtr, NodePtr ); /// calculate the pdf ratio for the given clusternode double pdfratio(NodePtr, Energy, Energy, int); /// return the pdf-ratio reweight for the history double pdfReweight(); /// return the alpha_s reweight for the history double alphaReweight(); /// max legssize the shower should veto for NLO size_t maxLegsNLO()const {return N0()+M();} /// calculate the virtual contribution. CrossSection LoopdSigDR(Energy startscale, NodePtr); /// calculate alpha_s expansion of the pdf-ratios double sumpdfReweightUnlops(); /// calculate alpha_s expansion of the alpha_s-ratios, including K_g double sumalphaReweightUnlops(); /// calculate alpha_s expansion of the sudakov exponents double sumfillHistoryUnlops(); /// calculate alpha_s expansion of the single step alpha_s-ratio, including K_g double alphasUnlops( Energy next, Energy fixedScale); /// calculate alpha_s expansion of the single step pdf-ratio double pdfUnlops(NodePtr, int, Energy, Energy, double, int, Energy); /// calculate alpha_s expansion of the single step sudakov exponent bool doUNLOPS(NodePtr Born, Energy running, Energy next, Energy fixedScale, double& UNLOPS); /// calculate alpha_s expansion of the single dipole sudakov exponent double singleUNLOPS(Dipole, Energy, Energy, Energy, pair<bool, bool>); //alpha_s as given in the shower double as(Energy q){return DSH()->as(q);} //return the dipole shower handler DipoleShowerHandlerPtr DSH(){return theDipoleShowerHandler;} //return the const dipole shower handler DipoleShowerHandlerPtr DSH()const{return theDipoleShowerHandler;} /// insert map from ME to first clusternode void firstNodeMap(MatchboxMEBasePtr, NodePtr); /// the gamma parameter to subtract dipoles above a alpha parameter /// and subtract the corresponding IPK operator double gamma()const{return theGamma;} /// history choice: weighted history choice int chooseHistory()const {return theChooseHistory;} /// the smearing factor for the merging scale double smear()const{return theSmearing;} /// flag to tell if ME region shoulcd be defined by jet algorithm /// currently not implemented bool MERegionByJetAlg(){return defMERegionByJetAlg;} /// return the large-N colour basis void largeNBasis(Ptr<ColourBasis>::ptr x){theLargeNBasis=x;} private: /// calculate the history expansion bool Unlopsweights; /// use CMW scheme bool theCMWScheme; /// true if current point should be projected bool projected; /// true if LO cross sections should be unitarised bool isUnitarized; /// true if NLO contributions should be unitarised bool isNLOUnitarized; /// define ME region by jet algorithm bool defMERegionByJetAlg; /// no z-restricions on initial state emissions in clustering bool theOpenInitialStateZ; /// history weight choice int theChooseHistory; /// legsize of production process int theN0; /// calculate only the N particle contribution int theOnlyN; /// the current maxlegs (either LO or NLO maxlegs) size_t theCurrentMaxLegs; /// current weight and weight of clustered born double weight, weightCB; /// subtract the dipole contribution above a given gamma double theGamma; /// if ME region defined by jet algorithm, this is the y cut for ee double ee_ycut; /// if ME region defined by jet algorithm, this is the d cut for pp double pp_dcut; /// smearing factor for merging scale double theSmearing; /// cutoff for real emission contribution Energy theIRSafePT; /// current merging scale Energy theMergePt; /// central merging scale Energy theCentralMergePt; /// current cluster histoy including sudakov weights History history; /// if ME region defined by jet algorithm, this is the jetfinder Ptr<JetFinder>::ptr theMergingJetFinder; /// pointer to the large-N basis Ptr<ColourBasis>::ptr theLargeNBasis; /// current ME MatchboxMEBasePtr theCurrentME; /// Tilde kinematics pointers, only to use lastPt(emitter, emission, spectator) Ptr<FFLightTildeKinematics>::ptr FFLTK; Ptr<FILightTildeKinematics>::ptr FILTK; Ptr<IFLightTildeKinematics>::ptr IFLTK; Ptr<IILightTildeKinematics>::ptr IILTK; Ptr<FFMassiveTildeKinematics>::ptr FFMTK; Ptr<FIMassiveTildeKinematics>::ptr FIMTK; Ptr<IFMassiveTildeKinematics>::ptr IFMTK; //pointer to the shower handler DipoleShowerHandlerPtr theDipoleShowerHandler; /// pointer to the MergingFactory MergingFactoryPtr theTreeFactory; /// map from ME to first Node map<MatchboxMEBasePtr, NodePtr> theFirstNodeMap; /// map from ME to highest ME weight so far map<NodePtr, CrossSection> theHighMeWeightMap; protected: /** @name Standard Interfaced functions. */ //@{ /** * Initialize this object after the setup phase before saving an * EventGenerator to disk. * @throws InitException if object could not be initialized properly. */ virtual void doinit(); //@} public: /** @name Functions used by the persistent I/O system. */ //@{ /** * Function used to write out object persistently. * @param os the persistent output stream written to. */ void persistentOutput(PersistentOStream & os) const; /** * Function used to read in object persistently. * @param is the persistent input stream read from. * @param version the version number of the object when written. */ void persistentInput(PersistentIStream & is, int version); //@} /** * The standard Init function used to initialize the interfaces. * Called exactly once for each class by the class description system * before the main function starts or * when this class is dynamically loaded. */ static void Init(); protected: /** @name Clone Methods. */ //@{ /** * Make a simple clone of this object. * @return a pointer to the new object. */ virtual IBPtr clone() const; /** Make a clone of this object, possibly modifying the cloned object * to make it sane. * @return a pointer to the new object. */ virtual IBPtr fullclone() const; //@} // If needed, insert declarations of virtual function defined in the // InterfacedBase class here (using ThePEG-interfaced-decl in Emacs). private: /** * The assignment operator is private and must never be called. * In fact, it should not even be implemented. */ Merger & operator=(const Merger &); }; } #endif /* HERWIG_Merger_H */ diff --git a/Shower/Dipole/Merging/MergingFactory.cc b/Shower/Dipole/Merging/MergingFactory.cc --- a/Shower/Dipole/Merging/MergingFactory.cc +++ b/Shower/Dipole/Merging/MergingFactory.cc @@ -1,725 +1,724 @@ // -*- C++ -*- // // MergeboxFactory.cc is a part of Herwig - A multi-purpose Monte Carlo event generator // Copyright (C) 2002-2012 The Herwig Collaboration // // Herwig is licenced under version 2 of the GPL, see COPYING for details. // Please respect the MCnet academic guidelines, see GUIDELINES for details. // // // This is the implementation of the non-inlined, non-templated member // functions of the MergeboxFactory class. // #include "MergingFactory.h" #include "Node.h" using namespace Herwig; using std::ostream_iterator; -MergingFactory::MergingFactory() :MatchboxFactory(), -theonlyNLOParts(false), -theonlyvirtualNLOParts(false), -theonlyrealNLOParts(false), -theonlyUnlopsweights(false), -theunitarizeNLOParts(true), -calc_born(true), -calc_virtual(true), -calc_real(true), -unitarized(true), -NLOunitarized(true), -ransetup(false), -theonlymulti(-1), -theonlysub(-1), -divideSub(-1), -divideSubNumber(-1) -{} - -MergingFactory::~MergingFactory(){} - IBPtr MergingFactory::clone() const { return new_ptr(*this); } IBPtr MergingFactory::fullclone() const { return new_ptr(*this); } void MergingFactory::doinit(){ MatchboxFactory::doinit(); if (subProcessGroups()) { throw InitException() << "There are no subprocess groups in merging!!!"; } - - if(!(!(divideSub!=-1&÷SubNumber==-1)||!(divideSub==-1&÷SubNumber!=-1))){ + // two permitted conditions + const bool both_neg_one = divideSub == -1 && divideSubNumber == -1; + const bool nobody_neg_one = divideSub != -1 && divideSubNumber != -1; + + if (!( both_neg_one || nobody_neg_one )){ throw InitException() << "dividing the subprocesses is not performed correct."; } MH()->largeNBasis()->factory(this); } -void MergingFactory::fill_amplitudes() { +void MergingFactory::fillMEsMap() { olpProcesses().clear(); + assert( getProcesses().size() == 1 ); processMap[0] = getProcesses()[0]; - if(MH()->M()>=0) setHighestVirt(processMap[0].size()+MH()->M()); + if ( MH()->M() >= 0 ) + setHighestVirt(processMap[0].size()+MH()->M()); MH()->N0(processMap[0].size()); for ( int i = 1 ; i <= MH()->N() ; ++i ) { processMap[i] = processMap[i - 1]; processMap[i].push_back("j"); } for ( int i = 0 ; i <= MH()->N() ; ++i ) { - vector<MatchboxMEBasePtr> ames = makeMEs(processMap[i], orderInAlphaS() + i,i<MH()->M()+1); + const bool below_maxNLO = i < MH()->M() + 1; + vector<MatchboxMEBasePtr> ames + = makeMEs(processMap[i], orderInAlphaS() + i, below_maxNLO ); copy(ames.begin(), ames.end(), back_inserter(pureMEsMap()[i])); } } #include "Herwig/MatrixElement/Matchbox/Base/DipoleRepository.h" void MergingFactory::prepare_BV(int i) { // check if we have virtual contributions bool haveVirtuals = true; + for ( auto born : pureMEsMap()[i]) { prepareME(born); if ( born->isOLPTree() ) { int id = orderOLPProcess(born->subProcess(), born->matchboxAmplitude(), ProcessType::treeME2); born->olpProcess(ProcessType::treeME2,id); id = orderOLPProcess(born->subProcess(), born->matchboxAmplitude(), ProcessType::colourCorrelatedME2); born->olpProcess(ProcessType::colourCorrelatedME2,id); bool haveGluon = false; - for ( const auto p : born->subProcess().legs ) + for ( const auto & p : born->subProcess().legs ) if ( p->id() == 21 ) { haveGluon = true; break; } if ( haveGluon ) { id = orderOLPProcess(born->subProcess(), born->matchboxAmplitude(), ProcessType::spinColourCorrelatedME2); born->olpProcess(ProcessType::spinColourCorrelatedME2,id); } } - if ( born->isOLPLoop() && i <= MH()->M()) { + if ( born->isOLPLoop() && i <= MH()->M() ) { int id = orderOLPProcess(born->subProcess(), born->matchboxAmplitude(), ProcessType::oneLoopInterference); born->olpProcess(ProcessType::oneLoopInterference,id); if ( !born->onlyOneLoop() && born->needsOLPCorrelators() ) { id = orderOLPProcess(born->subProcess(), born->matchboxAmplitude(), ProcessType::colourCorrelatedME2); born->olpProcess(ProcessType::colourCorrelatedME2,id); } } haveVirtuals &= born->haveOneLoop(); } - - // check the additional insertion operators - if ( !theVirtualsMap[i].empty() ) haveVirtuals = true; - - for ( auto & virt : theVirtualsMap[i] ) virt->factory(this); - + // check for consistent conventions on virtuals, if we are to include MH()->M() - if(!(i > MH()->M()||haveVirtuals))throw InitException() << MH()->M()<<" NLO corrections requested,\n but no virtual contributions are found."; + if (!(i > MH()->M()||haveVirtuals)) + throw InitException() + << MH()->M() + << " NLO corrections requested,\n" + << "but no virtual contributions are found."; for ( auto & virt : DipoleRepository::insertionIOperators(dipoleSet()) ) - virt->factory(this); + virt->factory(this); for ( auto & virt : DipoleRepository::insertionPKOperators(dipoleSet()) ) - virt->factory(this); + virt->factory(this); } void MergingFactory::prepare_R(int i) { for ( auto real : pureMEsMap()[i]) - prepareME(real); + prepareME(real); } #include "Herwig/MatrixElement/Matchbox/Base/DipoleRepository.h" -void MergingFactory::getVirtuals(MatchboxMEBasePtr nlo,int i){ +void MergingFactory::getVirtuals(MatchboxMEBasePtr nlo, bool clone){ - for ( auto OV : theVirtualsMap[i] ) - if ( OV->apply(nlo->diagrams().front()->partons()) ){ - nlo->virtuals().push_back(OV); - } - + const auto & partons = nlo->diagrams().front()->partons(); + for ( auto I : DipoleRepository::insertionIOperators(dipoleSet()) ) - if ( I->apply(nlo->diagrams().front()->partons()) ){ - nlo->virtuals().push_back(I); + if ( I->apply(partons) ){ + auto myI = I; + if ( clone ) + myI = I->cloneMe(); + nlo->virtuals().push_back(myI); } for ( auto PK : DipoleRepository::insertionPKOperators(dipoleSet()) ) - if ( PK->apply(nlo->diagrams().front()->partons()) ){ - nlo->virtuals().push_back(PK); + if ( PK->apply(partons) ){ + auto myPK = PK; + if ( clone ) + myPK = PK->cloneMe(); + nlo->virtuals().push_back(myPK); } } void MergingFactory::pushB(MatchboxMEBasePtr born, int i) { MatchboxMEBasePtr bornme = born->cloneMe(); bornme->maxMultCKKW(1); bornme->minMultCKKW(0); string pname = fullName() + "/" + bornme->name() + ".Born"; - if ( !(generator()->preinitRegister(bornme, pname)) ) throw InitException() << "Born ME " << pname << " already existing."; + if ( !(generator()->preinitRegister(bornme, pname)) ) + throw InitException() + << "Born ME " + << pname + << " already existing."; - getVirtuals(bornme,i); + getVirtuals(bornme,false); - NodePtr clusternode = new_ptr(Node(bornme, 0,MH())); + NodePtr clusternode = new_ptr(Node(bornme, 0, MH())); clusternode->deepHead(clusternode); MH()->firstNodeMap(bornme,clusternode); bornme->factory(this); bornme->merger(MH()); + - - - - vector<NodePtr> temp; - vector<NodePtr> temp1; - temp.push_back(clusternode); + vector<NodePtr> current = {{clusternode}}; + vector<NodePtr> children; + unsigned int k = 1; - while (thePureMEsMap[i - k].size() != 0) { - for ( auto tmp : temp){//j + while ( ! thePureMEsMap[i - k].empty() ) { + for ( auto tmp : current ){//j tmp->birth(thePureMEsMap[i - k]); - for ( auto tmpchild : tmp->children()){//m + for ( auto tmpchild : tmp->children() ) {//m if ( i <= MH()->M() ) { - - for ( auto I : DipoleRepository::insertionIOperators(dipoleSet()) ) - if ( I->apply(tmpchild->nodeME()->diagrams().front()->partons()) ){ - Ptr<MatchboxInsertionOperator>::ptr myI = I->cloneMe(); - tmpchild->nodeME()->virtuals().push_back(myI); - } - for ( auto I : DipoleRepository::insertionPKOperators(dipoleSet()) ) - if ( I->apply(tmpchild->nodeME()->diagrams().front()->partons()) ){ - Ptr<MatchboxInsertionOperator>::ptr myI = I->cloneMe(); - tmpchild->nodeME()->virtuals().push_back(myI); - } - + getVirtuals(tmpchild->nodeME(),true); tmpchild->nodeME()->noOneLoop(); } - temp1.push_back(tmpchild); + children.push_back(tmpchild); } } - temp = temp1; - temp1.clear(); - k++; + current = children; + children.clear(); + ++k; } - if(MH()->N()>i) - bornme->needsCorrelations(); - else bornme->needsNoCorrelations(); + if ( MH()->N() > i ) + bornme->needsCorrelations(); + else + bornme->needsNoCorrelations(); bornme->cloneDependencies(); MEs().push_back(bornme); } void MergingFactory::pushV(MatchboxMEBasePtr born, int i) { MatchboxMEBasePtr nlo = born->cloneMe(); string pname = fullName() + "/" + nlo->name() + ".Virtual"; - if ( !(generator()->preinitRegister(nlo, pname)) ) throw InitException() << "Virtual ME " << pname << " already existing."; + if ( !(generator()->preinitRegister(nlo, pname)) ) + throw InitException() + << "Virtual ME " + << pname + << " already existing."; ////////////////////////////////////NLO/////////////////////////// nlo->virtuals().clear(); - getVirtuals(nlo , i); + getVirtuals(nlo , false); if ( nlo->virtuals().empty() ) - throw InitException() << "No insertion operators have been found for " - << born->name() << "\n"; + throw InitException() + << "No insertion operators have been found for " + << born->name() + << "\n"; nlo->doOneLoopNoBorn(); ////////////////////////////////////NLO/////////////////////////// NodePtr clusternode = new_ptr(Node(nlo, 0,MH())); clusternode->deepHead(clusternode); clusternode->virtualContribution(true); MH()->firstNodeMap(nlo,clusternode); nlo->merger(MH()); - vector<NodePtr> temp; - vector<NodePtr> temp1; - temp.push_back(clusternode); + vector<NodePtr> current = {{clusternode}}; + vector<NodePtr> children; unsigned int k = 1; - while (thePureMEsMap[i - k].size() != 0) { - for ( auto tmp : temp ){ + while ( ! thePureMEsMap[i - k].empty() ) { + for ( auto tmp : current ){ tmp->birth(thePureMEsMap[i - k]); for ( auto tmpchild : tmp->children()) - temp1.push_back(tmpchild); + children.push_back(tmpchild); } - temp = temp1; - k++; + current = children; + children.clear(); + ++k; } if ( nlo->isOLPLoop() ) { int id = orderOLPProcess(nlo->subProcess(), born->matchboxAmplitude(), ProcessType::oneLoopInterference); nlo->olpProcess(ProcessType::oneLoopInterference,id); if ( !nlo->onlyOneLoop() && nlo->needsOLPCorrelators() ) { id = orderOLPProcess(nlo->subProcess(), born->matchboxAmplitude(), ProcessType::colourCorrelatedME2); nlo->olpProcess(ProcessType::colourCorrelatedME2,id); } } nlo->needsCorrelations(); nlo->cloneDependencies(); MEs().push_back(nlo); } -void MergingFactory::pushProR(MatchboxMEBasePtr born, int i) { +void MergingFactory::pushR(MatchboxMEBasePtr born, int i) { MatchboxMEBasePtr bornme = born->cloneMe(); string pname = fullName() + "/" + bornme->name() + ".Real"; - if ( !(generator()->preinitRegister(bornme, pname)) ) throw InitException() << "Subtracted ME " << pname << " already existing."; + if ( !(generator()->preinitRegister(bornme, pname)) ) + throw InitException() + << "Subtracted ME " + << pname + << " already existing."; - NodePtr clusternode = new_ptr(Node(bornme, 1,MH())); + NodePtr clusternode = new_ptr(Node(bornme, 1, MH())); clusternode->deepHead(clusternode); clusternode->subtractedReal(true); MH()->firstNodeMap(bornme,clusternode); bornme->merger(MH()); - vector<NodePtr> temp; - vector<NodePtr> temp1; - temp.push_back(clusternode); - + vector<NodePtr> current = {{clusternode}}; + vector<NodePtr> children; + unsigned int k = 1; - while (thePureMEsMap[i - k].size() != 0) { - for ( auto tmp : temp ){ + while ( ! thePureMEsMap[i - k].empty() ) { + for ( auto tmp : current ){ tmp->birth(thePureMEsMap[i - k]); - for ( auto tmpchild : tmp->children()) temp1.push_back(tmpchild); + for ( auto tmpchild : tmp->children()) + children.push_back(tmpchild); } - temp = temp1; - k++; + current = children; + children.clear(); + ++k; } + - - - if(MH()->N()>i) - bornme->needsCorrelations(); - else bornme->needsNoCorrelations(); + if ( MH()->N() > i ) + bornme->needsCorrelations(); + else + bornme->needsNoCorrelations(); bornme->cloneDependencies(pname); MEs().push_back(bornme); } -void MergingFactory::orderOLPs() { - -} +// MergingFactory should never order OLPs here, +// they're done elsewhere. +void MergingFactory::orderOLPs() {} #include "ThePEG/Utilities/StringUtils.h" vector<string> MergingFactory::parseProcess(string in) { vector<string> process = StringUtils::split(in); if ( process.size() < 3 ) - throw Exception() << "MatchboxFactory: Invalid process." - << Exception::runerror; + throw Exception() + << "MatchboxFactory: Invalid process." + << Exception::runerror; for ( string & p : process) { p = StringUtils::stripws(p); } - theN=0; - bool prodprocess=true; - vector<string> pprocess; - for (string const p : process) { + theN = 0; + bool prodprocess = true; + vector<string> result; + for ( const string & p : process ) { if ( p == "->" ) - continue; + continue; if (p=="[") { - prodprocess=false; - }else if (p=="]") { - prodprocess=false; + prodprocess = false; + } else if (p=="]") { + prodprocess = false; + // TODO what if there's stuff after the bracket? + assert( p == process.back() ); break; - }else if (p=="[j") { - prodprocess=false; - theN++; - }else if (p=="j"&&!prodprocess) { - theN++; - prodprocess=false; - }else if (p=="j]") { - theN++; - prodprocess=false; + } else if (p=="[j") { + prodprocess = false; + ++theN; + } else if (p=="j" && !prodprocess) { + ++theN; + prodprocess = false; + } else if (p=="j]") { + ++theN; + prodprocess = false; + // TODO what if there's stuff after the bracket? + assert( p == process.back() ); break; - }else if(prodprocess){ - pprocess.push_back(p); - }else{ + } else if ( prodprocess ) { + result.push_back(p); + } else { throw InitException() - <<p<<" in the brackets of the process definition is not recognized." - << "\n Only j as jets are recognized."; + << "Unknown particle class \"" << p << '"' + << " in the process definition merging bracket.\n" + << "Only jets (\"j\") are supported at the moment."; } } - return pprocess; + return result; } #include <boost/progress.hpp> void MergingFactory::setup() { useMe(); if(!ransetup){ generator()->log() <<"\nStarting merging setup.\n\n"; olpProcesses().clear(); externalAmplitudes().clear(); setFixedCouplings(true); setFixedQEDCouplings(true); // rebind the particle data objects for ( auto & g : particleGroups()) { for ( auto & p : g.second) { p = getParticleData(p->id()); } } const PDVector& partons = particleGroups()["j"]; unsigned int nl = 0; for ( const auto p : partons ) { if ( abs(p->id()) < 7 && p->hardProcessMass() == ZERO ) ++nl; if ( p->id() > 0 && p->id() < 7 && p->hardProcessMass() == ZERO ) nLightJetVec( p->id() ); if ( p->id() > 0 && p->id() < 7 && p->hardProcessMass() != ZERO ) nHeavyJetVec( p->id() ); } nLight(nl/2); const PDVector& partonsInP = particleGroups()["p"]; for ( const auto pip : partonsInP ) if ( pip->id() > 0 && pip->id() < 7 && pip->hardProcessMass() == ZERO ) nLightProtonVec( pip->id() ); for ( auto & amp: amplitudes() ) amp->factory(this); //fill the amplitudes - if ( !amplitudes().empty() ) fill_amplitudes(); + if ( !amplitudes().empty() ) fillMEsMap(); // prepare the Born and virtual matrix elements for ( int i = 0 ; i <= max(0, MH()->N()) ; ++i ) prepare_BV(i); // prepare the real emission matrix elements for ( int i = 0 ; i <= MH()->N() ; ++i ) prepare_R(i); orderOLPs(); // start creating matrix elements MEs().clear(); int onlysubcounter=0; // count the subprocesses size_t numb = 0; size_t numv = 0; size_t numr = 0; for (int i = 0; i <= max(0, MH()->N()) ; ++i ){ for ( auto born : thePureMEsMap[i]){ if (calc_born && !theonlyUnlopsweights){ if(((theonlysub==-1||theonlysub==onlysubcounter)&÷Sub==-1) ||(divideSub!=-1&&onlysubcounter%divideSub==divideSubNumber)){ numb++; } } } } for (int i = 0 ; i <=max(0, MH()->N()); ++i ) for ( auto virt : thePureMEsMap[i]) if ( calc_virtual && i <= MH()->M()){ if(((theonlysub==-1||theonlysub==onlysubcounter)&÷Sub==-1) ||(divideSub!=-1&&onlysubcounter%divideSub==divideSubNumber)) numv++; } for (int i = 0; i <= max(0, MH()->N()) ; ++i ) for ( auto real : thePureMEsMap[i] ) if (calc_real&& i <= MH()->M() + 1 && i > 0 && !theonlyvirtualNLOParts&&!theonlyUnlopsweights){ if(((theonlysub==-1||theonlysub==onlysubcounter)&÷Sub==-1) ||(divideSub!=-1&&onlysubcounter%divideSub==divideSubNumber)) numr++; } onlysubcounter=0; generator()->log() << "\e[31mPreparing Merging:" << flush; generator()->log() << "\e[32m "<<numb<<" x Born \e[31m" << flush; if(MH()->M()>-1){ generator()->log() << "\e[93m"<<numv<<" x Virtual " << flush; generator()->log() << "\e[34m"<<numr<<" x Real \e[31m" << flush; } boost::progress_display * progressBar = new boost::progress_display(numb+numv+numr,generator()->log()); for (int i = 0; i <= max(0, MH()->N()) ; ++i ){ for ( auto born : thePureMEsMap[i]){ if (calc_born && !theonlyUnlopsweights){ if(((theonlysub==-1||theonlysub==onlysubcounter)&÷Sub==-1) ||(divideSub!=-1&&onlysubcounter%divideSub==divideSubNumber)){ pushB(born, i); onlysubcounter++; generator()->log() << "\e[32m"; ++(*progressBar); generator()->log() << "\e[0m"; } } } } for (int i = 0 ; i <=max(0, MH()->N()); ++i ) for ( auto virt : thePureMEsMap[i]) if ( calc_virtual && i <= MH()->M()){ if(((theonlysub==-1||theonlysub==onlysubcounter)&÷Sub==-1) ||(divideSub!=-1&&onlysubcounter%divideSub==divideSubNumber)) pushV(virt, i); onlysubcounter++; generator()->log() << "\e[93m"; ++(*progressBar); } for (int i = 0; i <= max(0, MH()->N()) ; ++i ) for ( auto real : thePureMEsMap[i] ) if (calc_real&& i <= MH()->M() + 1 && i > 0 && !theonlyvirtualNLOParts&&!theonlyUnlopsweights){ if(((theonlysub==-1||theonlysub==onlysubcounter)&÷Sub==-1) ||(divideSub!=-1&&onlysubcounter%divideSub==divideSubNumber)) - pushProR(real, i); + pushR(real, i); onlysubcounter++; generator()->log() << "\e[34m"; ++(*progressBar); } generator()->log() << "\e[0m"; delete progressBar; if ( !externalAmplitudes().empty() ) { generator()->log() << "Initializing external amplitudes.\n" << flush; boost::progress_display * progressBar = new boost::progress_display(externalAmplitudes().size(),generator()->log()); for ( const auto ext : externalAmplitudes()) { if ( ! ext->initializeExternal() ) { throw InitException() << "error: failed to initialize amplitude '" << ext->name() << "'\n"; } ++(*progressBar); } delete progressBar; generator()->log() << "---------------------------------------------------\n" << flush; } if ( !olpProcesses().empty() ) { generator()->log() << "Initializing one-loop provider(s).\n" << flush; map<Ptr<MatchboxAmplitude>::tptr, map<pair<Process, int>, int> > olps; for (const auto oit : olpProcesses()) { olps[oit.first] = oit.second; } boost::progress_display * progressBar = new boost::progress_display(olps.size(), generator()->log()); for ( const auto olpit : olps ) { if ( !olpit.first->startOLP(olpit.second) ) { throw InitException() << "error: failed to start OLP for amplitude '" << olpit.first->name() << "'\n"; } ++(*progressBar); } delete progressBar; generator()->log() << "---------------------------------------------------\n" << flush; } generator()->log() <<"\nGenerated "<<MEs().size()<<" Subprocesses.\n"<<flush; if(theonlysub!=-1)generator()->log() <<" ( "<<theonlysub<<"/"<<onlysubcounter<<" )"<<flush; generator()->log() << "---------------------------------------------------\n" << flush; generator()->log() <<"\n\n\e[31m" <<"Note: Due to the unitarization of the higher " <<"\nmultiplicities, the individual cross sections " <<"\ngiven in the integration and run step are not" <<"\nmeaningful without merging.\e[0m\n"<<flush; ransetup=true; } } #include "ThePEG/Persistency/PersistentOStream.h" #include "ThePEG/Persistency/PersistentIStream.h" void MergingFactory::persistentOutput(PersistentOStream & os) const { os << calc_born << calc_virtual << calc_real << theonlyUnlopsweights << theonlymulti << divideSub << divideSubNumber << theonlysub << ransetup << processMap << theMergingHelper <<theM<<theN; } void MergingFactory::persistentInput(PersistentIStream & is, int) { is >> calc_born >> calc_virtual >> calc_real >> theonlyUnlopsweights >> theonlymulti >> divideSub >> divideSubNumber >> theonlysub >> ransetup >> processMap >> theMergingHelper >>theM>>theN; } #include "ThePEG/Interface/ClassDocumentation.h" #include "ThePEG/Utilities/DescribeClass.h" #include "ThePEG/Interface/Reference.h" #include "ThePEG/Interface/RefVector.h" #include "ThePEG/Interface/Switch.h" #include "ThePEG/Interface/Parameter.h" #include "ThePEG/Interface/Command.h" void MergingFactory::Init() { static Parameter<MergingFactory, int> interfaceonlymulti("onlymulti", "calculate only the ME with k additional partons.", &MergingFactory::theonlymulti, -1, -1, 0, false, false, Interface::lowerlim); static Parameter<MergingFactory, int> interfaceonlysub("onlysub", "calculate only one subProcess. this is for building grids.", &MergingFactory::theonlysub, -1, -1, 0, false, false, Interface::lowerlim); static Parameter<MergingFactory, int> interfacedivideSub("divideSub", "calculate only one subProcess. this is for building grids.", &MergingFactory::divideSub, -1, -1, 0, false, false, Interface::lowerlim); static Parameter<MergingFactory, int> interfacedivideSubNumber("divideSubNumber", "calculate only one subProcess. this is for building grids.", &MergingFactory::divideSubNumber, -1, -1, 0, false, false, Interface::lowerlim); static Switch<MergingFactory, bool> interface_calc_born("calc_born", "[debug] Switch on or off the born contribution.", &MergingFactory::calc_born, true, false, false); static SwitchOption interface_calc_bornOn(interface_calc_born, "On", "Switch on calculation of born.", true); static SwitchOption interface_calc_bornOff(interface_calc_born, "Off", "Switch off calculation of born.", false); static Switch<MergingFactory, bool> interface_calc_virtual("calc_virtual", "[debug] Switch on or off the virtual contribution.", &MergingFactory::calc_virtual, true, false, false); static SwitchOption interface_calc_virtualOn(interface_calc_virtual, "On", "Switch on calculation of virtual.", true); static SwitchOption interface_calc_virtualOff(interface_calc_virtual, "Off", "Switch off calculation of virtual.", false); static Switch<MergingFactory, bool> interface_calc_real("calc_real", "[debug] Switch on or off the real contribution.", &MergingFactory::calc_real, true, false, false); static SwitchOption interface_calc_realOn(interface_calc_real, "On", "Switch on calculation of real.", true); static SwitchOption interface_calc_realOff(interface_calc_real, "Off", "Switch off calculation of real.", false); static Switch<MergingFactory, bool> interface_theonlyNLOParts("onlyNLOParts", "Switch on or off the onlyNLOParts.", &MergingFactory::theonlyNLOParts, true, false, false); static SwitchOption interface_theonlyNLOPartsOn(interface_theonlyNLOParts, "On", "Switch on the theonlyNLOParts.", true); static SwitchOption interface_theonlyNLOPartsOff(interface_theonlyNLOParts, "Off", "Switch off the theonlyNLOParts.", false); static Switch<MergingFactory, bool> interface_theonlyvirtualNLOParts("onlyvirtualNLOParts", "Switch on or off the onlyvirtualNLOParts.", &MergingFactory::theonlyvirtualNLOParts, true, false, false); static SwitchOption interface_theonlyvirtualNLOPartsOn(interface_theonlyvirtualNLOParts, "On", "Switch on the theonlyvirtualNLOParts.", true); static SwitchOption interface_theonlyvirtualNLOPartsOff(interface_theonlyvirtualNLOParts, "Off", "Switch off the theonlyvirtualNLOParts.", false); static Switch<MergingFactory, bool> interface_theonlyrealNLOParts("onlyrealNLOParts", "Switch on or off the onlyrealNLOParts.", &MergingFactory::theonlyrealNLOParts, true, false, false); static SwitchOption interface_theonlyrealNLOPartsOn(interface_theonlyrealNLOParts, "On", "Switch on the theonlyrealNLOParts.", true); static SwitchOption interface_theonlyrealNLOPartsOff(interface_theonlyrealNLOParts, "Off", "Switch off the theonlyrealNLOParts.", false); static Switch<MergingFactory, bool> interface_theunitarizeNLOParts("unitarizeNLOParts", "Switch on or off the unitarizeNLOParts.", &MergingFactory::theunitarizeNLOParts, true, false, false); static SwitchOption interface_theunitarizeNLOPartsOn(interface_theunitarizeNLOParts, "On", "Switch on the unitarizeNLOParts.", true); static SwitchOption interface_theunitarizeNLOPartsOff(interface_theunitarizeNLOParts, "Off", "Switch off the unitarizeNLOParts.", false); static Switch<MergingFactory, bool> interface_theonlyUnlopsweights("onlyUnlopsweights", "Switch on or off the onlyUnlopsweights.", &MergingFactory::theonlyUnlopsweights, true, false, false); static SwitchOption interface_theonlyUnlopsweightsOn(interface_theonlyUnlopsweights, "On", "Switch on the onlyUnlopsweights.", true); static SwitchOption interface_theonlyUnlopsweightsOff(interface_theonlyUnlopsweights, "Off", "Switch off the onlyUnlopsweights.", false); static Switch<MergingFactory, bool> interface_Unitarized("Unitarized", "Unitarize the cross section.", &MergingFactory::unitarized, true, false, false); static SwitchOption interface_UnitarizedOn(interface_Unitarized, "On", "Switch on the unitarized cross section.", true); static SwitchOption interface_UnitarizedOff(interface_Unitarized, "Off", "Switch off the unitarized cross section.", false); static Switch<MergingFactory, bool> interface_NLOUnitarized("NLOUnitarized", "Unitarize the cross section.", &MergingFactory::NLOunitarized, true, false, false); static SwitchOption interface_NLOUnitarizedOn(interface_NLOUnitarized, "On", "Switch on the unitarized NLO cross section.", true); static SwitchOption interface_NLOUnitarizedOff(interface_NLOUnitarized, "Off", "Switch off the unitarized NLO cross section.", false); static Reference<MergingFactory,Merger> interfaceMergingHelper ("MergingHelper", "", &MergingFactory::theMergingHelper, false, false, true, true, false); static Parameter<MergingFactory, int> interfaceaddNLOLegs("NLOProcesses", "Set the number of virtual corrections to consider. 0 is default for no virtual correction.", &MergingFactory::theM, 0, 0, 0, false, false, Interface::lowerlim); } // *** Attention *** The following static variable is needed for the type // description system in ThePEG. Please check that the template arguments // are correct (the class and its base class), and that the constructor // arguments are correct (the class name and the name of the dynamically // loadable library where the class implementation can be found). DescribeClass<MergingFactory, Herwig::MatchboxFactory> describeHerwigMergingFactory("Herwig::MergingFactory", "HwDipoleShower.so"); diff --git a/Shower/Dipole/Merging/MergingFactory.h b/Shower/Dipole/Merging/MergingFactory.h --- a/Shower/Dipole/Merging/MergingFactory.h +++ b/Shower/Dipole/Merging/MergingFactory.h @@ -1,203 +1,191 @@ /// -*- C++ -*- // /// MergingFactory.h is a part of Herwig - A multi-purpose Monte Carlo event generator /// Copyright (C) 2002-2012 The Herwig Collaboration // /// Herwig is licenced under version 2 of the GPL, see COPYING for details. /// Please respect the MCnet academic guidelines, see GUIDELINES for details. // #ifndef HERWIG_MergingFactory_H #define HERWIG_MergingFactory_H // /// This is the declaration of the MergingFactory class. // #include "MergingFactory.fh" #include "Herwig/MatrixElement/Matchbox/MatchboxFactory.h" #include "Node.fh" #include "Merger.h" #include "ThePEG/Persistency/PersistentOStream.h" #include "ThePEG/Persistency/PersistentIStream.h" namespace Herwig { using namespace ThePEG; /** * \ingroup Matchbox * \author Johannes Bellm * * \brief MergingFactory automatically sets up a NLO * QCD merging carried out in dipole subtraction. * * @see \ref MergingFactoryInterfaces "The interfaces" * defined for MergeboxFactory. */ class MergingFactory : public MatchboxFactory { public: - /** @name Standard constructors and destructors. */ - //@{ - /** - * The default constructor. - */ - MergingFactory(); //TODO MergingFactory - - /** - * The destructor. - */ - virtual ~MergingFactory(); - //@} /// main method to setup the ME vector virtual void setup(); /// fill all amplitudes, stored in pureMEsMap - void fill_amplitudes(); + void fillMEsMap(); /// prepare the Born and virtual matrix elements. void prepare_BV(int i); /// prepare the real emission matrix elements. void prepare_R(int i); /// push the born contributions to the ME vector. void pushB(MatchboxMEBasePtr, int); //push the virtual contributions to the ME vector. void pushV(MatchboxMEBasePtr, int); /// push the real contributions to the ME vector. - void pushProR(MatchboxMEBasePtr, int); - /// order matrix elements form one loop provider. + void pushR(MatchboxMEBasePtr, int); + /// order matrix elements from one loop provider. void orderOLPs(); /// Debugging: push only multiplicities to the ME vector /// in range of specified mulltiplicity. int onlymulti()const { - return theonlymulti==-1?-1:(theonlymulti+processMap.find(0)->second.size());} + return theonlymulti==-1?-1:(theonlymulti+processMap.find(0)->second.size()); + } /// calculate only unlops weights. bool onlyUnlopsweights() const {return theonlyUnlopsweights;} /// pointer to the merging helper. - MergerPtr MH(){return theMergingHelper;} + MergerPtr MH() {return theMergingHelper;} /// maximal NLO mulitplicity: 0=NLO corrections to the productio process. - int M()const {return theM-1;} + int M() const {return theM-1;} /// leg size of highest multiplicity. - int N()const {return theN;} + int N() const {return theN;} /// Return the Map of matrix elements to be considered /// (the Key is the number of additional jets) const map<int, vector<MatchboxMEBasePtr> >& pureMEsMap() const { - return thePureMEsMap;} + return thePureMEsMap; + } /// Access the Map of matrix elements to be considered /// (the Key is the number of additional jets) map<int, vector<MatchboxMEBasePtr> >& pureMEsMap() { return thePureMEsMap; } //Parse a process description virtual vector<string> parseProcess(string); // fill the virtuals vector (these are IPK-operators) - void getVirtuals(MatchboxMEBasePtr nlo,int i); + void getVirtuals(MatchboxMEBasePtr nlo, bool clone ); public: /** @name Functions used by the persistent I/O system. */ //@{ /** * Function used to write out object persistently. * @param os the persistent output stream written to. */ void persistentOutput(PersistentOStream & os) const; /** * Function used to read in object persistently. * @param is the persistent input stream read from. * @param version the version number of the object when written. */ void persistentInput(PersistentIStream & is, int); //@} static void Init(); protected: /** @name Standard Interfaced functions. */ //@{ /** * Initialize this object after the setup phase before saving an * EventGenerator to disk. * @throws InitException if object could not be initialized properly. */ virtual void doinit(); //@} protected: /** @name Clone Methods. */ //@{ /** * Make a simple clone of this object. * @return a pointer to the new object. */ virtual IBPtr clone() const; /** * Make a clone of this object, possibly modifying the cloned object * to make it sane. * @return a pointer to the new object. */ virtual IBPtr fullclone() const; //@} private: /// Calculate only virtual and real contributions. - bool theonlyNLOParts; + bool theonlyNLOParts = false; /// Calculate only virtual contributions. - bool theonlyvirtualNLOParts; + bool theonlyvirtualNLOParts = false; /// Calculate only real contributions. - bool theonlyrealNLOParts; + bool theonlyrealNLOParts = false; /// Calculate only expanded histories contributions. - bool theonlyUnlopsweights; + bool theonlyUnlopsweights = false; /// unitarize virtual and real contributions. - bool theunitarizeNLOParts; + bool theunitarizeNLOParts = true; /// Calculate born contributions. - bool calc_born; + bool calc_born = true; /// Calculate virtual contributions. - bool calc_virtual; + bool calc_virtual = true; /// Calculate real contributions. - bool calc_real; + bool calc_real = true; /// unitarise the LO contributions. - bool unitarized; + bool unitarized = true; /// unitarise the NLO contributions. - bool NLOunitarized; + bool NLOunitarized = true; /// did run setup. - bool ransetup; + bool ransetup = false; /// Debugging: push only multiplicities to the ME vector /// in range of specified mulltiplicity. - int theonlymulti; + int theonlymulti = -1; /// calculate only the specified subprocess with no. - int theonlysub; + int theonlysub = -1; /// cut the subprocesses in equal size pieces. - int divideSub; + int divideSub = -1; /// interface to calculate every # subprocess. - int divideSubNumber; + int divideSubNumber = -1; /// maximal legsize for NLO corrections. - int theM; + int theM = -1; /// maximal legsize for LO contributions. - int theN; + int theN = -1; /// Prefix for subtraction data. string theSubtractionData; /// map for processes. map< int, vector<string> > processMap; //The matrix elements: int = number of additional jets map< int, vector<MatchboxMEBasePtr> > thePureMEsMap; - /// map for virtual contributions - map<int, vector<Ptr<MatchboxInsertionOperator>::ptr> > theVirtualsMap; /// the merging helper MergerPtr theMergingHelper; /** * The assignment operator is private and must never be called. * In fact, it should not even be implemented. */ - MergingFactory & operator=(const MergingFactory &); + MergingFactory & operator=(const MergingFactory &) = delete; }; } #endif /* HERWIG_MergingFactory_H */ diff --git a/src/LEP-Merging.in b/src/LEP-Merging.in --- a/src/LEP-Merging.in +++ b/src/LEP-Merging.in @@ -1,138 +1,138 @@ # -*- ThePEG-repository -*- ################################################## ## Herwig/Merging example input file ################################################## ################################################## ## Collider type ################################################## read Matchbox/EECollider.in read Matchbox/Merging.in ################################################## ## Beam energy sqrt(s) ################################################## cd /Herwig/EventHandlers set EventHandler:LuminosityFunction:Energy 91.2*GeV ################################################## ## Process selection ################################################## ## Note that event generation may fail if no matching matrix element has ## been found. Coupling orders are with respect to the Born process, ## i.e. NLO QCD does not require an additional power of alphas. ## Model assumptions read Matchbox/StandardModelLike.in read Matchbox/DiagonalCKM.in ## Set the order of the couplings cd /Herwig/Merging -set MFactory:OrderInAlphaS 0 -set MFactory:OrderInAlphaEW 2 +set MergingFactory:OrderInAlphaS 0 +set MergingFactory:OrderInAlphaEW 2 ## Select the process ## You may use identifiers such as p, pbar, j, l, mu+, h0 etc. ## Please mind the spaces between *j , j* in the brackets. -do MFactory:Process e- e+ -> j j [ j , j ] +do MergingFactory:Process e- e+ -> j j [ j j ] -set MFactory:NLOProcesses 2 +set MergingFactory:NLOProcesses 2 set Merger:MergingScale 4.*GeV set Merger:MergingScaleSmearing 0.1 set Merger:gamma 1. cd /Herwig/MatrixElements/Matchbox/Utility insert DiagramGenerator:ExcludeInternal 0 /Herwig/Particles/gamma ## Special settings required for on-shell production of unstable particles ## enable for on-shell top production # read Matchbox/OnShellTopProduction.in ## enable for on-shell W, Z or h production # read Matchbox/OnShellWProduction.in # read Matchbox/OnShellZProduction.in # read Matchbox/OnShellHProduction.in ################################################## ## Matrix element library selection ################################################## ## Select a generic tree/loop combination or a ## specialized NLO package # read Matchbox/MadGraph-GoSam.in # read Matchbox/MadGraph-MadGraph.in # read Matchbox/MadGraph-NJet.in # read Matchbox/MadGraph-OpenLoops.in ## Uncomment this to use ggh effective couplings ## currently only supported by MadGraph-GoSam # read Matchbox/HiggsEffective.in ################################################## ## Cut selection ## See the documentation for more options ################################################## ## cuts on additional jets # read Matchbox/DefaultEEJets.in # set NJetsCut:NJetsMin 3 ################################################## ## Scale choice ## See the documentation for more options ################################################## cd /Herwig/MatrixElements/Matchbox/Scales/ -set /Herwig/Merging/MFactory:ScaleChoice SHatScale +set /Herwig/Merging/MergingFactory:ScaleChoice SHatScale ################################################## ## Scale uncertainties ################################################## # read Matchbox/MuDown.in # read Matchbox/MuUp.in ################################################## ## Shower scale uncertainties ################################################## # read Matchbox/MuQDown.in # read Matchbox/MuQUp.in ################################################## ## Analyses ################################################## cd /Herwig/Analysis insert /Herwig/Generators/EventGenerator:AnalysisHandlers 0 Rivet # insert /Herwig/Generators/EventGenerator:AnalysisHandlers 0 HepMC read Matchbox/LEP91-Analysis.in ################################################## ## Do not apply profile scales for LEP as hard ## scale coincides with kinematic limit ################################################## set /Herwig/Shower/ShowerHandler:HardScaleProfile NULL set /Herwig/DipoleShower/DipoleShowerHandler:HardScaleProfile NULL ################################################## ## Save the generator ################################################## -do /Herwig/Merging/MFactory:ProductionMode +do /Herwig/Merging/MergingFactory:ProductionMode cd /Herwig/Generators saverun LEP-Merging EventGenerator diff --git a/src/LHC-Merging.in b/src/LHC-Merging.in --- a/src/LHC-Merging.in +++ b/src/LHC-Merging.in @@ -1,157 +1,157 @@ # -*- ThePEG-repository -*- ################################################## ## Herwig/Merging example input file ################################################## ################################################## ## Collider type ################################################## read Matchbox/PPCollider.in read Matchbox/Merging.in ################################################## ## Beam energy sqrt(s) ################################################## cd /Herwig/EventHandlers set EventHandler:LuminosityFunction:Energy 7000*GeV ################################################## ## Process selection ################################################## ## Note that event generation may fail if no matching matrix element has ## been found. Coupling orders are with respect to the Born process, ## i.e. NLO QCD does not require an additional power of alphas. ## Model assumptions read Matchbox/StandardModelLike.in read Matchbox/DiagonalCKM.in ## Set the order of the couplings cd /Herwig/Merging -set MFactory:OrderInAlphaS 0 -set MFactory:OrderInAlphaEW 2 +set MergingFactory:OrderInAlphaS 0 +set MergingFactory:OrderInAlphaEW 2 ## Select the process ## You may use identifiers such as p, pbar, j, l, mu+, h0 etc. -do MFactory:Process p p -> e+ e- [ j j] +do MergingFactory:Process p p -> e+ e- [ j j ] -set MFactory:NLOProcesses 0 +set MergingFactory:NLOProcesses 0 set Merger:MergingScale 10.*GeV set Merger:MergingScaleSmearing 0.1 set Merger:gamma 1. set MPreWeight:HTPower 0 set MPreWeight:MaxPTPower 0 set MPreWeight:OnlyColoured False read Matchbox/PQCDLevel.in ## Special settings required for on-shell production of unstable particles ## enable for on-shell top production # read Matchbox/OnShellTopProduction.in ## enable for on-shell W, Z or h production # read Matchbox/OnShellWProduction.in # read Matchbox/OnShellZProduction.in # read Matchbox/OnShellHProduction.in # Special settings for the VBF approximation # read Matchbox/VBFDiagramsOnly.in ################################################## ## Matrix element library selection ################################################## ## Select a generic tree/loop combination or a ## specialized NLO package # read Matchbox/MadGraph-GoSam.in # read Matchbox/MadGraph-MadGraph.in # read Matchbox/MadGraph-NJet.in # read Matchbox/MadGraph-OpenLoops.in # read Matchbox/HJets.in # read Matchbox/VBFNLO.in ## Uncomment this to use ggh effective couplings ## currently only supported by MadGraph-GoSam # read Matchbox/HiggsEffective.in ################################################## ## Cut selection ## See the documentation for more options ################################################## set /Herwig/Cuts/ChargedLeptonPairMassCut:MinMass 60*GeV set /Herwig/Cuts/ChargedLeptonPairMassCut:MaxMass 120*GeV cd /Herwig/MatrixElements/Matchbox/Utility insert DiagramGenerator:ExcludeInternal 0 /Herwig/Particles/gamma ## cuts on additional jets # read Matchbox/DefaultPPJets.in # insert JetCuts:JetRegions 0 FirstJet # insert JetCuts:JetRegions 1 SecondJet # insert JetCuts:JetRegions 2 ThirdJet # insert JetCuts:JetRegions 3 FourthJet ################################################## ## Scale choice ## See the documentation for more options ################################################## cd /Herwig/MatrixElements/Matchbox/Scales/ -set /Herwig/Merging/MFactory:ScaleChoice LeptonPairMassScale +set /Herwig/Merging/MergingFactory:ScaleChoice LeptonPairMassScale ################################################## ## Scale uncertainties ################################################## # read Matchbox/MuDown.in # read Matchbox/MuUp.in ################################################## ## Shower scale uncertainties ################################################## # read Matchbox/MuQDown.in # read Matchbox/MuQUp.in ################################################## ## PDF choice ################################################## read Matchbox/FiveFlavourNoBMassScheme.in read Matchbox/MMHT2014.in ################################################## ## Analyses ################################################## cd /Herwig/Analysis insert /Herwig/Generators/EventGenerator:AnalysisHandlers 0 Rivet # insert /Herwig/Generators/EventGenerator:AnalysisHandlers 0 HepMC read Matchbox/LHC7-Z-Analysis.in #read Matchbox/LHC7-J-Analysis.in #read Matchbox/LHC7-T-Analysis.in ################################################## ## Save the generator ################################################## -do /Herwig/Merging/MFactory:ProductionMode +do /Herwig/Merging/MergingFactory:ProductionMode set /Herwig/Generators/EventGenerator:IntermediateOutput Yes cd /Herwig/Generators saverun LHC-Merging EventGenerator