diff --git a/Analysis/BasicConsistency.cc b/Analysis/BasicConsistency.cc --- a/Analysis/BasicConsistency.cc +++ b/Analysis/BasicConsistency.cc @@ -1,323 +1,322 @@ // -*- C++ -*- // // BasicConsistency.cc is a part of Herwig - A multi-purpose Monte Carlo event generator // Copyright (C) 2002-2011 The Herwig Collaboration // // Herwig is licenced under version 2 of the GPL, see COPYING for details. // Please respect the MCnet academic guidelines, see GUIDELINES for details. // // // This is the implementation of the non-inlined, non-templated member // functions of the BasicConsistency class. // #include "BasicConsistency.h" #include "ThePEG/Interface/Parameter.h" #include "ThePEG/Interface/Switch.h" #include "ThePEG/Interface/ClassDocumentation.h" #include "ThePEG/EventRecord/Event.h" #include "ThePEG/Repository/EventGenerator.h" #include "ThePEG/Persistency/PersistentOStream.h" #include "ThePEG/Persistency/PersistentIStream.h" #include "Herwig/Utilities/EnumParticles.h" #include "ThePEG/PDT/DecayMode.h" using namespace Herwig; using namespace ThePEG; BasicConsistency::BasicConsistency() : _epsmom(ZERO),_checkquark(true), _checkcharge(true), _checkcluster(true), _checkBR(true), _absolutemomentumtolerance(1*MeV), _relativemomentumtolerance(1e-5) {} IBPtr BasicConsistency::clone() const { return new_ptr(*this); } IBPtr BasicConsistency::fullclone() const { return new_ptr(*this); } void BasicConsistency::analyze(tEventPtr event, long, int, int) { bool writeEvent=false; set<tcPPtr> particles; event->selectFinalState(inserter(particles)); int charge(-event->incoming().first->dataPtr()->iCharge() -event->incoming().second->dataPtr()->iCharge()); Lorentz5Momentum ptotal(-event->incoming().first->momentum() -event->incoming().second->momentum()); const Energy beamenergy = ptotal.m(); for(set<tcPPtr>::const_iterator it = particles.begin(); it != particles.end(); ++it) { if (_checkquark && (*it)->coloured()) { cerr << "Had quarks in final state in event " << event->number() << '\n'; generator()->log() << "Had quarks in final state in event " << event->number() << '\n'; writeEvent = true; } else if( _checkcluster && (**it).id()==ParticleID::Cluster) { cerr << "Had clusters in final state in event " << event->number() << '\n'; generator()->log() << "Had clusters in final state in event " << event->number() << '\n'; writeEvent = true; } charge += (*it)->dataPtr()->iCharge(); ptotal += (*it)->momentum(); bool problem=false; LorentzDistance test; for(unsigned int ix=0;ix<5;++ix) { switch (ix) { case 0: test = (*it)->vertex(); break; case 1: test = (*it)->labVertex(); break; case 2: test = (*it)->decayVertex(); break; case 3: test = (*it)->labDecayVertex(); break; case 4: test = (*it)->lifeLength(); break; } - problem |= - isnan(test.x().rawValue()) || isnan(test.y().rawValue()) || - isnan(test.z().rawValue()) || isnan(test.t().rawValue()) || - isinf(test.x().rawValue()) || isinf(test.y().rawValue()) || - isinf(test.z().rawValue()) || isinf(test.t().rawValue()); + problem |= ! ( isfinite(test.x().rawValue()) && + isfinite(test.y().rawValue()) && + isfinite(test.z().rawValue()) && + isfinite(test.t().rawValue()) ); } if(problem) { generator()->log() << "Problem with position of " << **it << "\n" << (*it)->vertex()/mm << "\n" << (*it)->labVertex()/mm << "\n" << (*it)->decayVertex()/mm << "\n" << (*it)->labDecayVertex()/mm << "\n" << (*it)->lifeLength()/mm << "\n"; } } if ( _checkcharge && charge != 0 ) { cerr << "\nCharge imbalance by " << charge << "in event " << event->number() << '\n'; generator()->log() << "Charge imbalance by " << charge << "in event " << event->number() << '\n'; writeEvent = true; } Energy mag = ptotal.m(); Energy ee = ptotal.e(); - if (isnan(mag.rawValue())) { + if (std::isnan(mag.rawValue())) { cerr << "\nMomentum is 'nan'; " << ptotal/MeV << " MeV in event " << event->number() << '\n'; generator()->log() <<"\nMomentum is 'nan'; " << ptotal/MeV << " MeV in event " << event->number() << '\n'; writeEvent = true; } const Energy epsilonmax = max( _absolutemomentumtolerance, _relativemomentumtolerance * beamenergy ); if (abs(mag) > epsilonmax || abs(ee) > epsilonmax) { cerr << "\nMomentum imbalance by " << ptotal/MeV << " MeV in event " << event->number() << '\n'; generator()->log() <<"\nMomentum imbalance by " << ptotal/MeV << " MeV in event " << event->number() << '\n'; writeEvent = true; } if (abs(mag) > _epsmom) _epsmom = abs(mag); if (abs(ee) > _epsmom) _epsmom = abs(ee); if (abs(ptotal.x()) > _epsmom) _epsmom = abs(ptotal.x()); if (abs(ptotal.y()) > _epsmom) _epsmom = abs(ptotal.y()); if (abs(ptotal.z()) > _epsmom) _epsmom = abs(ptotal.z()); particles.clear(); event->select(inserter(particles), ThePEG::AllSelector()); for(set<tcPPtr>::const_iterator it = particles.begin(); it != particles.end(); ++it) { bool problem=false; LorentzDistance test; for(unsigned int ix=0;ix<5;++ix) { switch (ix) { case 0: test = (*it)->vertex(); break; case 1: test = (*it)->labVertex(); break; case 2: test = (*it)->decayVertex(); break; case 3: test = (*it)->labDecayVertex(); break; case 4: test = (*it)->lifeLength(); break; } - problem |= isnan(test.m2().rawValue()) || isinf(test.m2().rawValue()); + problem |= ( ! isfinite(test.m2().rawValue()) ); } if(problem) { generator()->log() << "Problem with position of " << **it << "\n" << (*it)->vertex()/mm << "\n" << (*it)->labVertex()/mm << "\n" << (*it)->decayVertex()/mm << "\n" << (*it)->labDecayVertex()/mm << "\n" << (*it)->lifeLength()/mm << "\n"; writeEvent=true; } } if(writeEvent) generator()->log() << *event; } void BasicConsistency::persistentOutput(PersistentOStream & os) const { os << _checkquark << _checkcharge << _checkcluster << _checkBR << ounit(_absolutemomentumtolerance,MeV) << _relativemomentumtolerance; } void BasicConsistency::persistentInput(PersistentIStream & is, int) { is >> _checkquark >> _checkcharge >> _checkcluster >> _checkBR >> iunit(_absolutemomentumtolerance,MeV) >> _relativemomentumtolerance; } ClassDescription<BasicConsistency> BasicConsistency::initBasicConsistency; // Definition of the static class description member. void BasicConsistency::Init() { static ClassDocumentation<BasicConsistency> documentation ("The BasicConsistency analysis handler checks for" " momentum and charge conservation."); static Switch<BasicConsistency,bool> interfaceCheckQuark ("CheckQuark", "Check whether there are quarks in the final state", &BasicConsistency::_checkquark, true, false, false); static SwitchOption interfaceCheckQuarkCheck (interfaceCheckQuark, "Yes", "Check for quarks", true); static SwitchOption interfaceCheckQuarkNoCheck (interfaceCheckQuark, "No", "Don't check for quarks", false); static Switch<BasicConsistency,bool> interfaceCheckCharge ("CheckCharge", "Check whether charge is conserved", &BasicConsistency::_checkcharge, true, false, false); static SwitchOption interfaceCheckChargeCheck (interfaceCheckCharge, "Yes", "Check charge conservation", true); static SwitchOption interfaceCheckChargeNoCheck (interfaceCheckCharge, "No", "Don't check charge conservation", false); static Switch<BasicConsistency,bool> interfaceCheckCluster ("CheckCluster", "Check whether there are clusters in the final state", &BasicConsistency::_checkcluster, true, false, false); static SwitchOption interfaceCheckClusterCheck (interfaceCheckCluster, "Yes", "Check for clusters", true); static SwitchOption interfaceCheckClusterNoCheck (interfaceCheckCluster, "No", "Don't check for clusters", false); static Switch<BasicConsistency,bool> interfaceCheckBranchingRatios ("CheckBranchingRatios", "Check whether the branching ratios of the particles add up to one.", &BasicConsistency::_checkBR, true, false, false); static SwitchOption interfaceCheckBranchingRatiosYes (interfaceCheckBranchingRatios, "Yes", "Perform the check", true); static SwitchOption interfaceCheckBranchingRatiosNo (interfaceCheckBranchingRatios, "No", "Don't perform the check", false); static Parameter<BasicConsistency,Energy> interfaceAbsoluteMomentumTolerance ("AbsoluteMomentumTolerance", "The value of the momentum imbalance above which warnings are issued/MeV.\n" "Final tolerance is the larger of AbsoluteMomentumTolerance and\n" "RelativeMomentumTolerance*beam energy.", &BasicConsistency::_absolutemomentumtolerance, MeV, 1*MeV, ZERO, 1e10*GeV, false, false, true); static Parameter<BasicConsistency,double> interfaceRelativeMomentumTolerance ("RelativeMomentumTolerance", "The value of the momentum imbalance as a fraction of the beam energy\n" "above which warnings are issued.\n" "Final tolerance is the larger of AbsoluteMomentumTolerance and\n" "RelativeMomentumTolerance*beam energy.", &BasicConsistency::_relativemomentumtolerance, 1e-5, 0.0, 1.0, false, false, true); } void BasicConsistency::dofinish() { AnalysisHandler::dofinish(); cout << "\nBasicConsistency: maximum 4-momentum violation: " << _epsmom/MeV << " MeV\n"; } void BasicConsistency::doinitrun() { AnalysisHandler::doinitrun(); static double eps=1e-12; for(ParticleMap::const_iterator it=generator()->particles().begin(); it!=generator()->particles().end();++it) { if(it->second->stable()) continue; double total(0.); for(DecaySet::const_iterator dit=it->second->decayModes().begin(); dit!=it->second->decayModes().end();++dit) { if((**dit).on()) total +=(**dit).brat(); } if(abs(total-1.)>eps) { cerr << "Warning: Total BR for " << it->second->PDGName() << " does not add up to 1. sum = " << total << "\n"; } } } diff --git a/Contrib/AlpGen/BasicLesHouchesFileReader.cc b/Contrib/AlpGen/BasicLesHouchesFileReader.cc --- a/Contrib/AlpGen/BasicLesHouchesFileReader.cc +++ b/Contrib/AlpGen/BasicLesHouchesFileReader.cc @@ -1,466 +1,466 @@ // -*- C++ -*- // // BasicLesHouchesFileReader.cc is a part of Herwig - A multi-purpose // Monte Carlo event generator. // Copyright (C) 2002-2011 The Herwig Collaboration // // Herwig is licenced under version 2 of the GPL, see COPYING for details. // Please respect the MCnet academic guidelines, see GUIDELINES for details. // // // This is the implementation of the non-inlined, non-templated member // functions of the BasicLesHouchesFileReader class. // #include "BasicLesHouchesFileReader.h" #include "ThePEG/Interface/ClassDocumentation.h" #include "ThePEG/Interface/Reference.h" #include "ThePEG/Interface/Switch.h" #include "ThePEG/Interface/Parameter.h" #include "ThePEG/Utilities/Throw.h" #include "ThePEG/PDT/DecayMode.h" #include "ThePEG/Persistency/PersistentOStream.h" #include "ThePEG/Persistency/PersistentIStream.h" #include "ThePEG/PDF/PartonExtractor.h" #include "ThePEG/PDF/NoPDF.h" #include "ThePEG/Cuts/Cuts.h" #include "ThePEG/EventRecord/TmpTransform.h" #include "ThePEG/Utilities/UtilityBase.h" using namespace Herwig; BasicLesHouchesFileReader:: BasicLesHouchesFileReader(const BasicLesHouchesFileReader & x) : LesHouchesReader(x), neve(x.neve), ieve(0), LHFVersion(x.LHFVersion), outsideBlock(x.outsideBlock), headerBlock(x.headerBlock), initComments(x.initComments), initAttributes(x.initAttributes), eventComments(x.eventComments), eventAttributes(x.eventAttributes), theFileName(x.theFileName),overSampling_(x.overSampling_) {} BasicLesHouchesFileReader::~BasicLesHouchesFileReader() {} IBPtr BasicLesHouchesFileReader::clone() const { return new_ptr(*this); } IBPtr BasicLesHouchesFileReader::fullclone() const { return new_ptr(*this); } bool BasicLesHouchesFileReader::preInitialize() const { return true; } void BasicLesHouchesFileReader::doinit() { LesHouchesReader::doinit(); } void BasicLesHouchesFileReader::initialize(LesHouchesEventHandler & eh) { LesHouchesReader::initialize(eh); if ( LHFVersion.empty() ) Throw<LesHouchesFileError>() << "The file associated with '" << name() << "' does not contain a " << "proper formatted Les Houches event file. The events may not be " << "properly sampled." << Exception::warning; } long BasicLesHouchesFileReader::scan() { open(); // Shall we write the events to a cache file for fast reading? If so // we write to a temporary file if the caches events should be // randomized. if ( cacheFileName().length() ) openWriteCacheFile(); // Keep track of the number of events scanned. long neve = 0; long cuteve = 0; bool negw = false; // If the open() has not already gotten information about subprocesses // and cross sections we have to scan through the events. if ( !heprup.NPRUP || cacheFile() || abs(heprup.IDWTUP) != 1 ) { // why scan if IDWTUP != 1? HoldFlag<> isScanning(scanning); double oldsum = 0.0; vector<int> lprup; vector<double> newmax; vector<long> oldeve; vector<long> neweve; for ( int i = 0; ( maxScan() < 0 || i < maxScan() ) && readEvent(); ++i ) { if ( !checkPartonBin() ) Throw<LesHouchesInitError>() << "Found event in LesHouchesReader '" << name() << "' which cannot be handeled by the assigned PartonExtractor '" << partonExtractor()->name() << "'." << Exception::runerror; vector<int>::iterator idit = find(lprup.begin(), lprup.end(), hepeup.IDPRUP); int id = lprup.size(); if ( idit == lprup.end() ) { lprup.push_back(hepeup.IDPRUP); newmax.push_back(0.0); neweve.push_back(0); oldeve.push_back(0); } else { id = idit - lprup.begin(); } ++neve; ++oldeve[id]; oldsum += hepeup.XWGTUP; if ( cacheFile() ) { if ( eventWeight() == 0.0 ) { ++cuteve; continue; } cacheEvent(); } ++neweve[id]; newmax[id] = max(newmax[id], abs(eventWeight())); if ( eventWeight() < 0.0 ) negw = true; } xSecWeights.resize(oldeve.size(), 1.0); for ( int i = 0, N = oldeve.size(); i < N; ++i ) if ( oldeve[i] ) xSecWeights[i] = double(neweve[i])/double(oldeve[i]); if ( maxScan() < 0 || neve > NEvents() ) NEvents(neve - cuteve); if ( lprup.size() == heprup.LPRUP.size() ) { for ( int id = 0, N = lprup.size(); id < N; ++id ) { vector<int>::iterator idit = find(heprup.LPRUP.begin(), heprup.LPRUP.end(), hepeup.IDPRUP); if ( idit == heprup.LPRUP.end() ) { Throw<LesHouchesInitError>() << "When scanning events, the LesHouschesReader '" << name() << "' found undeclared processes." << Exception::warning; heprup.NPRUP = 0; break; } int idh = idit - heprup.LPRUP.begin(); heprup.XMAXUP[idh] = newmax[id]; } } if ( heprup.NPRUP == 0 ) { // No heprup block was supplied or something went wrong. heprup.NPRUP = lprup.size(); heprup.LPRUP.resize(lprup.size()); heprup.XMAXUP.resize(lprup.size()); for ( int id = 0, N = lprup.size(); id < N; ++id ) { heprup.LPRUP[id] = lprup[id]; heprup.XMAXUP[id] = newmax[id]; } } else if ( abs(heprup.IDWTUP) != 1 ) { // Try to fix things if abs(heprup.IDWTUP) != 1. double sumxsec = 0.0; for ( int id = 0; id < heprup.NPRUP; ++id ) sumxsec += heprup.XSECUP[id]; weightScale = picobarn*neve*sumxsec/oldsum; } } if ( cacheFile() ) closeCacheFile(); if ( negw ) heprup.IDWTUP = min(-abs(heprup.IDWTUP), -1); return neve; } void BasicLesHouchesFileReader::open() { if ( filename().empty() ) throw LesHouchesFileError() << "No Les Houches file name. " << "Use 'set " << name() << ":FileName'." << Exception::runerror; cfile.open(filename()); if ( !cfile ) throw LesHouchesFileError() << "The BasicLesHouchesFileReader '" << name() << "' could not open the " << "event file called '" << theFileName << "'." << Exception::runerror; cfile.readline(); if ( !cfile.find("<LesHouchesEvents") ) return; map<string,string> attributes = StringUtils::xmlAttributes("LesHouchesEvents", cfile.getline()); LHFVersion = attributes["version"]; if ( LHFVersion.empty() ) return; bool readingHeader = false; bool readingInit = false; headerBlock = ""; // Loop over all lines until we hit the </init> tag. while ( cfile.readline() && !cfile.find("</init>") ) { if ( cfile.find("<header") ) { // We have hit the header block, so we should dump this and all // following lines to headerBlock until we hit the end of it. readingHeader = true; headerBlock = cfile.getline() + "\n"; } else if ( cfile.find("<init") ) { // We have hit the init block, so we should expect to find the // standard information in the following. But first check for // attributes. initAttributes = StringUtils::xmlAttributes("init", cfile.getline()); readingInit = true; cfile.readline(); if ( !( cfile >> heprup.IDBMUP.first >> heprup.IDBMUP.second >> heprup.EBMUP.first >> heprup.EBMUP.second >> heprup.PDFGUP.first >> heprup.PDFGUP.second >> heprup.PDFSUP.first >> heprup.PDFSUP.second >> heprup.IDWTUP >> heprup.NPRUP ) ) { heprup.NPRUP = -42; LHFVersion = ""; return; } heprup.resize(); for ( int i = 0; i < heprup.NPRUP; ++i ) { cfile.readline(); if ( !( cfile >> heprup.XSECUP[i] >> heprup.XERRUP[i] >> heprup.XMAXUP[i] >> heprup.LPRUP[i] ) ) { heprup.NPRUP = -42; LHFVersion = ""; return; } } } else if ( cfile.find("</header") ) { readingHeader = false; headerBlock += cfile.getline() + "\n"; } else if ( readingHeader ) { // We are in the process of reading the header block. Dump the // line to headerBlock. headerBlock += cfile.getline() + "\n"; } else if ( readingInit ) { // Here we found a comment line. Dump it to initComments. initComments += cfile.getline() + "\n"; } else { // We found some other stuff outside the standard tags. outsideBlock += cfile.getline() + "\n"; } } if ( !cfile ) { heprup.NPRUP = -42; LHFVersion = ""; return; } } bool BasicLesHouchesFileReader::readEvent() { reset(); if ( !doReadEvent() ) return false; // If we are just skipping event we do not need to reweight or do // anything fancy. if ( skipping ) return true; if ( cacheFile() && !scanning ) return true; // Reweight according to the re- and pre-weights objects in the // LesHouchesReader base class. lastweight = reweight(); if ( !reweightPDF && !cutEarly() ) return true; // We should try to reweight the PDFs or make early cuts here. fillEvent(); double x1 = incoming().first->momentum().plus()/ beams().first->momentum().plus(); if ( reweightPDF && inPDF.first && outPDF.first && inPDF.first != outPDF.first ) { if ( hepeup.XPDWUP.first <= 0.0 ) hepeup.XPDWUP.first = inPDF.first->xfx(inData.first, incoming().first->dataPtr(), sqr(hepeup.SCALUP*GeV), x1); double xf = outPDF.first->xfx(inData.first, incoming().first->dataPtr(), sqr(hepeup.SCALUP*GeV), x1); lastweight *= xf/hepeup.XPDWUP.first; hepeup.XPDWUP.first = xf; } double x2 = incoming().second->momentum().minus()/ beams().second->momentum().minus(); if ( reweightPDF && inPDF.second && outPDF.second && inPDF.second != outPDF.second ) { if ( hepeup.XPDWUP.second <= 0.0 ) hepeup.XPDWUP.second = inPDF.second->xfx(inData.second, incoming().second->dataPtr(), sqr(hepeup.SCALUP*GeV), x2); double xf = outPDF.second->xfx(inData.second, incoming().second->dataPtr(), sqr(hepeup.SCALUP*GeV), x2); lastweight *= xf/hepeup.XPDWUP.second; hepeup.XPDWUP.second = xf; } if ( cutEarly() ) { if ( !cuts().initSubProcess((incoming().first->momentum() + incoming().second->momentum()).m2(), 0.5*log(x1/x2)) ) lastweight = 0.0; tSubProPtr sub = getSubProcess(); TmpTransform<tSubProPtr> tmp(sub, Utilities::getBoostToCM(sub->incoming())); if ( !cuts().passCuts(*sub) ) lastweight = 0.0; } return true; } double BasicLesHouchesFileReader::getEvent() { if ( cacheFile() ) { if (overSampling_) { if ( !uncacheEvent() ) reopen(); } else { if ( !uncacheEvent() || stats.attempts()==NEvents() ) throw LesHouchesReopenWarning() << "More events requested than available in LesHouchesReader " << name() << Exception::runerror; } } else { if (overSampling_) { if ( !readEvent() ) reopen(); } else { if ( !readEvent() || stats.attempts()==NEvents() ) throw LesHouchesReopenWarning() << "More events requested than available in LesHouchesReader " << name() << Exception::runerror; } } ++position; double max = maxWeights[hepeup.IDPRUP]*maxFactor; return max != 0.0? eventWeight()/max: 0.0; } void BasicLesHouchesFileReader::skip(long n) { HoldFlag<> skipflag(skipping); if(overSampling_) while ( n-- ) getEvent(); } bool BasicLesHouchesFileReader::doReadEvent() { if ( !cfile ) return false; if ( LHFVersion.empty() ) return false; if ( heprup.NPRUP < 0 ) return false; eventComments = ""; outsideBlock = ""; hepeup.NUP = 0; hepeup.XPDWUP.first = hepeup.XPDWUP.second = 0.0; // Keep reading lines until we hit the next event or the end of // the event block. Save any inbetween lines. Exit if we didn't // find an event. while ( cfile.readline() && !cfile.find("<event") ) outsideBlock += cfile.getline() + "\n"; // We found an event. First scan for attributes. eventAttributes = StringUtils::xmlAttributes("event", cfile.getline()); if ( !cfile.readline() ) return false; // The first line determines how many subsequent particle lines we // have. if ( !( cfile >> hepeup.NUP >> hepeup.IDPRUP >> hepeup.XWGTUP >> hepeup.SCALUP >> hepeup.AQEDUP >> hepeup.AQCDUP ) ) return false; hepeup.resize(); // Read all particle lines. for ( int i = 0; i < hepeup.NUP; ++i ) { if ( !cfile.readline() ) return false; if ( !( cfile >> hepeup.IDUP[i] >> hepeup.ISTUP[i] >> hepeup.MOTHUP[i].first >> hepeup.MOTHUP[i].second >> hepeup.ICOLUP[i].first >> hepeup.ICOLUP[i].second >> hepeup.PUP[i][0] >> hepeup.PUP[i][1] >> hepeup.PUP[i][2] >> hepeup.PUP[i][3] >> hepeup.PUP[i][4] >> hepeup.VTIMUP[i] >> hepeup.SPINUP[i] ) ) return false; - if(isnan(hepeup.PUP[i][0])||isnan(hepeup.PUP[i][1])|| - isnan(hepeup.PUP[i][2])||isnan(hepeup.PUP[i][3])|| - isnan(hepeup.PUP[i][4])) + if(std::isnan(hepeup.PUP[i][0])||std::isnan(hepeup.PUP[i][1])|| + std::isnan(hepeup.PUP[i][2])||std::isnan(hepeup.PUP[i][3])|| + std::isnan(hepeup.PUP[i][4])) throw Exception() << "nan's as momenta in Les Houches file " << Exception::eventerror; } // Now read any additional comments. while ( cfile.readline() && !cfile.find("</event>") ) eventComments += cfile.getline() + "\n"; if ( !cfile ) return false; return true; } void BasicLesHouchesFileReader::close() { cfile.close(); } void BasicLesHouchesFileReader::persistentOutput(PersistentOStream & os) const { os << neve << LHFVersion << outsideBlock << headerBlock << initComments << initAttributes << eventComments << eventAttributes << theFileName << overSampling_; } void BasicLesHouchesFileReader::persistentInput(PersistentIStream & is, int) { is >> neve >> LHFVersion >> outsideBlock >> headerBlock >> initComments >> initAttributes >> eventComments >> eventAttributes >> theFileName >> overSampling_; ieve = 0; } ClassDescription<BasicLesHouchesFileReader> BasicLesHouchesFileReader::initBasicLesHouchesFileReader; // Definition of the static class description member. void BasicLesHouchesFileReader::Init() { static ClassDocumentation<BasicLesHouchesFileReader> documentation ("Herwig::BasicLesHouchesFileReader is an base class to be used for objects " "which reads event files from matrix element generators. This class is " "able to read plain event files conforming to the Les Houches Event File " "accord."); static Parameter<BasicLesHouchesFileReader,string> interfaceFileName ("FileName", "The name of a file containing events conforming to the Les Houches " "protocol to be read into ThePEG. A file name ending in " "<code>.gz</code> will be read from a pipe which uses " "<code>zcat</code>. If a file name ends in <code>|</code> the " "preceeding string is interpreted as a command, the output of which " "will be read through a pipe.", &BasicLesHouchesFileReader::theFileName, "", false, false); static Switch<BasicLesHouchesFileReader,bool> interfaceOverSampling ("OverSampling", "Allow / Forbid reading of LH events more than once by the " "LH reader, allowing / protecting against statistical problems.", &BasicLesHouchesFileReader::overSampling_, true, false, false); static SwitchOption AllowOverSampling (interfaceOverSampling, "AllowOverSampling", "The reader will read events in the file more than once if more " "events are needed to generate the requested number than that in " "the LH file.", true); static SwitchOption ForbidOverSampling (interfaceOverSampling, "ForbidOverSampling", "The reader will NOT read events in the file more than once if more " "events are needed to generate the requested number than that in " "the LH file - instead it will stop when all have been read.", false); interfaceFileName.fileType(); interfaceFileName.rank(11); } diff --git a/Contrib/Analysis2/Histogram2/Histogram2.cc b/Contrib/Analysis2/Histogram2/Histogram2.cc --- a/Contrib/Analysis2/Histogram2/Histogram2.cc +++ b/Contrib/Analysis2/Histogram2/Histogram2.cc @@ -1,717 +1,717 @@ // -*- C++ -*- // // This is the implementation of the non-inlined, non-templated member // functions of the Histogram2 class. // #include "Histogram2.h" #include "ThePEG/Interface/ClassDocumentation.h" #ifdef ThePEG_TEMPLATES_IN_CC_FILE // #include "Histogram2.tcc" #endif #include "ThePEG/Persistency/PersistentOStream.h" #include "ThePEG/Persistency/PersistentIStream.h" #include "ThePEG/Utilities/Throw.h" using namespace Analysis2; void HistogramChannel::persistentOutput(PersistentOStream & os) const { os << _isCountingChannel << _bins << _binEntries << _outOfRange << _visible << _total << _nanEvents << _nanWeights << _finished; } void HistogramChannel::persistentInput(PersistentIStream & is) { is >> _isCountingChannel >> _bins >> _binEntries >> _outOfRange >> _visible >> _total >> _nanEvents >> _nanWeights >> _finished; } HistogramChannel& HistogramChannel::operator += (const HistogramChannel& c) { if (!c.isCountingChannel()) _isCountingChannel = false; for (unsigned int i = 0; i < _bins.size(); ++i) { _bins[i].first += c.bin(i).first; _bins[i].second += c.bin(i).second; } // uppon addition of a counting channel it remains a counting channel if (_isCountingChannel) { for (unsigned int i = 0; i < _bins.size(); ++i) { _binEntries[i] += c.binEntries()[i]; _nanWeights[i] += c.nanWeights()[i]; } _outOfRange.first += c.outOfRange().first; _outOfRange.second += c.outOfRange().second; _total += c.total(); _visible += c.visible(); _nanEvents += c.nanEvents(); } return *this; } HistogramChannel& HistogramChannel::operator -= (const HistogramChannel& c) { for (unsigned int i = 0; i < _bins.size(); ++i) { _bins[i].first -= c.bin(i).first; _bins[i].second += c.bin(i).second; } _isCountingChannel = false; return *this; } HistogramChannel& HistogramChannel::operator *= (const HistogramChannel& c) { for (unsigned int i = 0; i < _bins.size(); ++i) { _bins[i].first *= c.bin(i).first; _bins[i].second = _bins[i].second*sqr(c.bin(i).first)+c.bin(i).second*sqr(_bins[i].first); } _isCountingChannel = false; return *this; } HistogramChannel& HistogramChannel::operator *= (double factor) { for (vector<pair<double,double> >::iterator b = _bins.begin(); b != _bins.end(); ++b) { b->first *= factor; b->second *= sqr(factor); } _isCountingChannel = false; return *this; } HistogramChannel& HistogramChannel::operator += (double off) { for (vector<pair<double,double> >::iterator b = _bins.begin(); b != _bins.end(); ++b) { b->first += off; } _isCountingChannel = false; return *this; } HistogramChannel& HistogramChannel::operator *= (pair<double,double> factor) { for (vector<pair<double,double> >::iterator b = _bins.begin(); b != _bins.end(); ++b) { b->first *= factor.first; b->second = b->first*factor.second + b->second*sqr(factor.first); } _isCountingChannel = false; return *this; } HistogramChannel& HistogramChannel::operator /= (pair<double,double> factor) { for (vector<pair<double,double> >::iterator b = _bins.begin(); b != _bins.end(); ++b) { if (factor.first != 0.) { b->first /= factor.first; if (b->first != 0.) b->second = sqr(b->first/factor.first)*(b->second/sqr(b->first) + factor.second/sqr(factor.first)); else b->second = 0.; } else { b->first = 0.; b->second = 0.; } } _isCountingChannel = false; return *this; } HistogramChannel& HistogramChannel::operator /= (const HistogramChannel& c) { for (unsigned int i = 0; i < _bins.size(); ++i) { if (c.bin(i).first != 0. && _bins[i].first != 0.) { _bins[i].first /= c.bin(i).first; _bins[i].second = sqr(_bins[i].first/c.bin(i).first)* (_bins[i].second/sqr(_bins[i].first)+c.bin(i).second/sqr(c.bin(i).first)); } else { _bins[i].first = 0.; _bins[i].second = 0.; } } _isCountingChannel = false; return *this; } unsigned long HistogramChannel::nanWeightEvents () const { unsigned long all = 0; for (vector<unsigned long>::const_iterator n = _nanWeights.begin(); n != _nanWeights.end(); ++n) all += *n; return all; } void HistogramChannel::differential (const vector<pair<double,double> >& binning) { for (unsigned int i=0; i<binning.size(); ++i) { _bins[i].first /= (binning[i].second-binning[i].first); _bins[i].second /= (binning[i].second-binning[i].first); } } pair<double,double> HistogramChannel::binSum () const { pair<double,double> s = make_pair(0.,0.); for (vector<pair<double,double> >::const_iterator b = _bins.begin(); b != _bins.end(); ++b) { s.first += b->first; s.second += b->second; } return s; } pair<double,double> HistogramChannel::integrate (const vector<pair<double,double> >& binning) const { pair<double,double> integral = make_pair(0.,0.); for (unsigned int i = 0; i< _bins.size(); ++i) { integral.first += (binning[i].second-binning[i].first)*_bins[i].first; integral.second += sqr(binning[i].second-binning[i].first)*_bins[i].second; } return integral; } pair<double,double> HistogramChannel::average (const vector<pair<double,double> >& binning) const { pair<double,double> integral = make_pair(0.,0.); double volume = 0.; for (unsigned int i = 0; i< _bins.size(); ++i) { volume += (binning[i].second-binning[i].first); integral.first += (binning[i].second-binning[i].first)*_bins[i].first; integral.second += sqr(binning[i].second-binning[i].first)*_bins[i].second; } integral.first /= volume; integral.second /= sqr(volume); return integral; } HistogramChannel HistogramChannel::delta (const HistogramChannel& channel) const { HistogramChannel c(*this); c /= channel; c += -1.; return c; } HistogramChannel HistogramChannel::chi2 (const HistogramChannel& channel, double minfrac) const { HistogramChannel chi2 (*this); chi2 -= channel; chi2 *= chi2; for (unsigned int i = 0; i<_bins.size(); ++i) { double var = 0.; if (channel.bin(i).second/sqr(channel.bin(i).first) < minfrac) var = sqr(minfrac*channel.bin(i).first); else var = channel.bin(i).second; if (var != 0) chi2.bin(i,make_pair(chi2.bin(i).first/var,chi2.bin(i).second/var)); else chi2.bin(i,make_pair(0,0)); } return chi2; } Histogram2::Histogram2 (double low, double high, unsigned int bins, const string& name) { // get the bin length double length = (high-low)/(double)(bins); _range = make_pair(low,high); for (unsigned int i = 0; i<bins; ++i) { _binning.push_back(make_pair(low+i*length,low+(i+1)*length)); _binhash.insert(make_pair(low+(i+1)*length,i)); } if (!name.empty()) insertChannel(name); } Histogram2::Histogram2 (const vector<pair<double,double> >& binning, const string& name) { _range = make_pair(binning.begin()->first,binning.end()->second); _binning = binning; for (unsigned int i = 0; i<_binning.size(); ++i) { _binhash.insert(make_pair(_binning[i].second,i)); } _range = make_pair(_binning.front().first,_binning.back().second); if (!name.empty()) insertChannel(name); } Histogram2::Histogram2 (const string& dataFile, const string& dataName) { ifstream data (dataFile.c_str()); if (!data) { Throw<InitException>() << "Histogram2::Histogram2 : Building from datafile, but cannot open " << dataFile; } vector<pair<double,double> > dataCache; double low, high, dataval, errstat, errsys; double sigma2; string in; while (getline(data,in)) { in = StringUtils::stripws(in); if (in[0] == '#') continue; if (in == "") continue; istringstream theIn (in); theIn >> low >> high >> dataval >> errstat >> errsys; _binning.push_back(make_pair(low,high)); sigma2 = sqr(errstat) + sqr(errsys); dataCache.push_back(make_pair(dataval,sigma2)); } for (unsigned int i = 0; i<_binning.size(); ++i) { _binhash.insert(make_pair(_binning[i].second,i)); } _range = make_pair(_binning.front().first,_binning.back().second); HistogramChannel theData (dataCache); insertChannel(dataName,theData); } void Histogram2::book (const string& name, double event, double weight) { map<string,HistogramChannel>::iterator c = _channels.find(name); if (c != _channels.end()) { - if (isnan(event) || isinf(event)) c->second.nanEvent(); + if (!isfinite(event)) c->second.nanEvent(); else { if (event < range().first) { c->second.bookUnderflow(weight); return; } if (event > range().second) { c->second.bookOverflow(weight); return; } unsigned int bin = _binhash.upper_bound(event)->second; c->second.book(bin,weight); } } } vector<string> Histogram2::channels () const { vector<string> all; for (map<string,HistogramChannel>::const_iterator c = _channels.begin(); c != _channels.end(); ++c) all.push_back(c->first); return all; } void Histogram2::output (ostream& os, const string& name, unsigned int flags, char comment) const { bool bincenters = (flags & ChannelOutput::Bincenters) == ChannelOutput::Bincenters; bool noerrorbars = (flags & ChannelOutput::NoErrorbars) == ChannelOutput::NoErrorbars; bool nanevents = (flags & ChannelOutput::NanEvents) == ChannelOutput::NanEvents; bool statistics = (flags & ChannelOutput::Statistics) == ChannelOutput::Statistics; map<string,HistogramChannel>::const_iterator c = _channels.find(name); if (c == _channels.end()) return; os << comment << " channel = " << name << endl; if(statistics) { os << comment << " total entries = " << c->second.total() << " , visible entries = " << c->second.visible() << endl; if (c->second.isCountingChannel()) os << comment << " underflow = " << c->second.outOfRange().first << " , overflow = " << c->second.outOfRange().second << endl; if (c->second.nanEvents() > 0) os << comment << " nan events = " << c->second.nanEvents(); if (c->second.nanWeightEvents() > 0) os << " , nan weight events = " << c->second.nanWeightEvents() << endl; } for (unsigned int i = 0; i<_binning.size(); ++i) { if (bincenters) { os << (_binning[i].first+_binning[i].second)/2. << "\t"; } else { os << _binning[i].first << "\t" << _binning[i].second << "\t"; } os << c->second.bin(i).first; if (!noerrorbars) { os << "\t" << sqrt(c->second.bin(i).second); } if (nanevents) { os << "\t" << c->second.nanWeights()[i]; } os << endl; } } HistogramChannel HistogramChannel::profile () const { HistogramChannel temp (_bins.size(),false); for (unsigned int i = 0; i< _bins.size(); ++i) { pair<double,double> prof; // mean of weights prof.first = weightMean(i); // varaince of mean prof.second = binEntries(i) > 0 ? weightVariance(i)/binEntries(i) : 0; temp.bin(i,prof,binEntries(i)); } return temp; } void HistogramChannel::write (ostream& os, const string& name) { finish(); os << "<channel" << " name=\"" << name << "\"" << " counting=\"" << _isCountingChannel << "\""; if (_isCountingChannel) { os << " underflow=\"" << _outOfRange.first << "\"" << " overflow=\"" << _outOfRange.second << "\"" << " visible=\"" << _visible << "\"" << " total=\"" << _total << "\""; } if (_nanEvents != 0) os << " nanevents=\"" << _nanEvents << "\""; os << ">" << endl; os << "<bins>" << endl; for (unsigned int b = 0; b < _bins.size(); ++b) { os << "<bincontent sumweights=\"" << _bins[b].first << "\" sumsquaredweights=\"" << _bins[b].second << "\" entries=\"" << _binEntries[b] << "\"/>" << endl; } os << "</bins>" << endl; os << "<nanweights>" << endl; // only write out, if at least happend for one event if (nanWeightEvents() > 0) for (vector<unsigned long>::const_iterator n = _nanWeights.begin(); n != _nanWeights.end(); ++n) os << "<bincontent entries=\"" << *n << "\"/>" << endl; os << "</nanweights>" << endl; os << "</channel>" << endl; } string HistogramChannel::read (istream& is) { _finished = true; string tag; string name; map<string,string> attributes; map<string,string>::iterator atit; if (!is) return ""; // parse the channel tag tag = getNextTag(is); if (tag.find("<channel") == string::npos) return ""; attributes = StringUtils::xmlAttributes("channel",tag); atit = attributes.find("name"); if (atit == attributes.end()) return ""; name = atit->second; atit = attributes.find("counting"); if (atit == attributes.end()) return ""; fromString(atit->second,_isCountingChannel); atit = attributes.find("underflow"); if (atit == attributes.end() && _isCountingChannel) return ""; if (_isCountingChannel) fromString(atit->second,_outOfRange.first); atit = attributes.find("overflow"); if (atit == attributes.end() && _isCountingChannel) return ""; if (_isCountingChannel) fromString(atit->second,_outOfRange.second); atit = attributes.find("visible"); if (atit == attributes.end() && _isCountingChannel) return ""; if (_isCountingChannel) fromString(atit->second,_visible); atit = attributes.find("total"); if (atit == attributes.end() && _isCountingChannel) return ""; if (_isCountingChannel) fromString(atit->second,_total); atit = attributes.find("nanevents"); if (atit != attributes.end()) fromString(atit->second,_nanEvents); // read in the bin contents tag = getNextTag(is); if (tag != "<bins>") return ""; for (unsigned int i = 0; i < _bins.size(); ++i) { tag = getNextTag(is); if (tag.find("<bincontent") == string::npos) return ""; attributes = StringUtils::xmlAttributes("bincontent",tag); atit = attributes.find("sumweights"); if (atit == attributes.end()) return ""; fromString(atit->second,_bins[i].first); atit = attributes.find("sumsquaredweights"); if (atit == attributes.end()) return ""; fromString(atit->second,_bins[i].second); atit = attributes.find("entries"); if (atit == attributes.end()) return ""; fromString(atit->second,_binEntries[i]); } tag = getNextTag(is); if (tag != "</bins>") return ""; // read in the nan weight histogram tag = getNextTag(is); if (tag != "<nanweights>") return ""; bool nanweights = true; for (unsigned int i = 0; i < _bins.size(); ++i) { tag = getNextTag(is); if (tag == "</nanweights>") { nanweights = false; break; } if (tag.find("<bincontent") == string::npos) return ""; attributes = StringUtils::xmlAttributes("bincontent",tag); atit = attributes.find("entries"); if (atit == attributes.end()) return ""; fromString(atit->second,_nanWeights[i]); } if (nanweights) tag = getNextTag(is); if (tag != "</nanweights>") return ""; tag = getNextTag(is); if (tag != "</channel>") return ""; return name; } void Histogram2::store (const string& name) { ofstream os ((name+".h2").c_str()); if (!os) return; os << "<?xml version=\"1.0\"?>" << endl; os << "<Analysis2Histogram version=\"1.0\"" << " AnalysisName=\"" << Named::name() << "\">" << endl; os << "<!--" << endl << " WARNING" << endl << " Though this is valid XML, the Histogram2 class will" << endl << " not be able to parse arbitraty, XML-valid changes" << endl << " to this file!" << endl << "-->" << endl; os << "<xsec unit=\"nanobarn\" value=\"" << _xSec/nanobarn << "\"/>" << endl; os << "<binning>" << endl; for (vector<pair<double,double> >::const_iterator b = _binning.begin(); b != _binning.end(); ++b) { os << "<bin lower=\"" << b->first << "\" upper=\"" << b->second << "\"/>" << endl; } os << "</binning>" << endl; os << "<channels size=\"" << _channels.size() << "\">" << endl; for(map<string,HistogramChannel>::iterator c = _channels.begin(); c != _channels.end(); ++c) { c->second.write(os,c->first); } os << "</channels>" << endl; os << "</Analysis2Histogram>" << endl; } bool Histogram2::load (const string& fname) { ifstream is ((fname+".h2").c_str()); if (!is) return false; string tag = getNextTag(is); if (tag.find("<Analysis2Histogram") == string::npos) return false; string name; map<string,string> attributes = StringUtils::xmlAttributes("Analysis2Histogram",tag); map<string,string>::iterator atit = attributes.find("name"); if (atit == attributes.end()) return false; Named::name(atit->second); // get the cross section tag = getNextTag(is); if (tag.find("<xsec") == string::npos) return false; attributes = StringUtils::xmlAttributes("xsec",tag); atit = attributes.find("unit"); if (atit == attributes.end()) return false; if (atit->second != "nanobarn") return false; // switch units in a future version atit = attributes.find("value"); if (atit == attributes.end()) return false; double theXsec; fromString(atit->second,theXsec); _xSec = theXsec * nanobarn; // get the binning tag = getNextTag(is); if (tag != "<binning>") return false; tag = getNextTag(is); double binlower, binupper; while (tag != "</binning>") { if (tag.find("<bin") == string::npos && tag != "</binning>") return false; attributes = StringUtils::xmlAttributes("bin",tag); atit = attributes.find("lower"); if (atit == attributes.end()) return false; fromString(atit->second,binlower); atit = attributes.find("upper"); if (atit == attributes.end()) return false; fromString(atit->second,binupper); _binning.push_back(make_pair(binlower,binupper)); tag = getNextTag(is); } if (!_binning.size()) return false; for (unsigned int i = 0; i<_binning.size(); ++i) { _binhash.insert(make_pair(_binning[i].second,i)); } _range = make_pair(_binning.front().first,_binning.back().second); // get the channels tag = getNextTag(is); if (tag.find("<channels") == string::npos) return false; attributes = StringUtils::xmlAttributes("channels",tag); atit = attributes.find("size"); if (atit == attributes.end()) return false; unsigned int numchannels; fromString(atit->second,numchannels); for (unsigned int i = 0; i<numchannels; ++i) { HistogramChannel ch (_binning.size()); name = ch.read(is); if (name != "") _channels.insert(make_pair(name,ch)); } // there has to be at least one channel if (!_channels.size()) return false; tag = getNextTag(is); if (tag != "</channels>") return false; tag = getNextTag(is); if (tag != "</Analysis2Histogram>") return false; return true; } Histogram2Ptr Histogram2::loadToHistogram (const string& name) const { Histogram2Ptr histo = new_ptr(Histogram2()); bool ok = histo->load(name); if (!ok) histo = Histogram2Ptr(); return histo; } void Histogram2::combine (const string& prefix, const string& name, unsigned int numRuns, const string& dataChannel, const string& mcChannel) { vector<Histogram2Ptr> inHistos; for (unsigned int i = 0; i<numRuns; ++i) { ostringstream fname (""); fname << prefix << "." << i << "/" << name; Histogram2Ptr in = loadToHistogram (fname.str()); if (in) { inHistos.push_back(in); } } // get the total cross section CrossSection all = 0.*nanobarn; unsigned long allEvents = 0; for(vector<Histogram2Ptr>::iterator h = inHistos.begin(); h != inHistos.end(); ++h) { if ((**h).haveChannel(mcChannel)) { all += (**h).xSec() * (**h).channel(mcChannel).total(); allEvents += (**h).channel(mcChannel).total(); } } all /= allEvents; xSec (all); if (!inHistos.size()) return; vector<string> channels = inHistos[0]->channels(); _binning = inHistos[0]->binning(); _range = inHistos[0]->range(); _binhash = inHistos[0]->binhash(); unsigned int numBins = _binning.size(); for (vector<string>::iterator c = channels.begin(); c != channels.end(); ++c) { if (*c == dataChannel && inHistos[0]->haveChannel(dataChannel)) { _channels.insert(make_pair(dataChannel,HistogramChannel(inHistos[0]->channel(dataChannel)))); continue; } HistogramChannel ch (numBins); for (vector<Histogram2Ptr>::iterator h = inHistos.begin(); h != inHistos.end(); ++h) { if ((**h).haveChannel(*c)) { ch += (**h).channel(*c); } } _channels.insert(make_pair(*c,ch)); } } Histogram2::Histogram2 (vector<double> limits) { for (unsigned int i = 0; i < limits.size()-1; ++i) _binning.push_back(make_pair(limits[i],limits[i+1])); for (unsigned int i = 0; i<_binning.size(); ++i) { _binhash.insert(make_pair(_binning[i].second,i)); } _range = make_pair(_binning.front().first,_binning.back().second); insertChannel("mc"); } Histogram2::Histogram2 (vector<double> limits, vector<double> data, vector<double> dataerror) { vector<pair<double,double> > dataCache; for (unsigned int i = 0; i < limits.size(); ++i) { if (i<limits.size()-1) _binning.push_back(make_pair(limits[i],limits[i+1])); dataCache.push_back(make_pair(data[i],sqr(dataerror[i]))); } for (unsigned int i = 0; i<_binning.size(); ++i) { _binhash.insert(make_pair(_binning[i].second,i)); } _range = make_pair(_binning.front().first,_binning.back().second); insertChannel("mc"); insertChannel("data",HistogramChannel(dataCache)); } Histogram2 Histogram2::ratioWith(const Histogram2& h2) const { Histogram2 tmp (*this); tmp.channel("mc") /= h2.channel("mc"); return tmp; } Histogram2::~Histogram2() {} void Histogram2::persistentOutput(PersistentOStream & os) const { // *** ATTENTION *** os << ; // Add all member variable which should be written persistently here. os << _binning << _range << _binhash << _channels << ounit(_xSec,nanobarn); } void Histogram2::persistentInput(PersistentIStream & is, int) { // *** ATTENTION *** is >> ; // Add all member variable which should be read persistently here. is >> _binning >> _range >> _binhash >> _channels >> iunit(_xSec,nanobarn); } ClassDescription<Histogram2> Histogram2::initHistogram2; // Definition of the static class description member. void Histogram2::Init() { static ClassDocumentation<Histogram2> documentation ("Histogram2 provides advanced histogramming."); } diff --git a/Contrib/Analysis2/Histogram2/Histogram2.icc b/Contrib/Analysis2/Histogram2/Histogram2.icc --- a/Contrib/Analysis2/Histogram2/Histogram2.icc +++ b/Contrib/Analysis2/Histogram2/Histogram2.icc @@ -1,353 +1,348 @@ // -*- C++ -*- // (C) 2007-2009 Simon Plaetzer -- sp@particle.uni-karlsruhe.de -// workaround for OS X bug where isnan() and isinf() are hidden -// when <iostream> is included -extern "C" int isnan(double) throw(); -extern "C" int isinf(double) throw(); - namespace Analysis2 { inline PersistentOStream& operator << (PersistentOStream& os, const HistogramChannel& h) { h.persistentOutput(os); return os; } inline PersistentIStream& operator >> (PersistentIStream& is, HistogramChannel& h) { h.persistentInput(is); return is; } inline string getNextTag (istream& is) { string line = ""; while (line == "") { if (!is) return ""; getline(is,line); line = StringUtils::stripws(line); if (line.length() && line[0] != '<') { line = ""; continue; } if (line.length() && line[0] == '<') { string::size_type a = line.find_first_of("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz/"); if (a != 1) { line = ""; continue; } } } return line; } inline HistogramChannel::HistogramChannel () : _isCountingChannel(true), _bins(vector<pair<double,double> >()), _binEntries(vector<unsigned long>()), _outOfRange(make_pair(0.,0.)), _visible(0), _total(0), _nanEvents(0), _nanWeights(vector<unsigned long>()), _finished(false) {} inline HistogramChannel::HistogramChannel (unsigned int bins, bool counting) : _isCountingChannel(counting), _bins(bins,make_pair(0.,0.)), _binEntries(bins,0), _outOfRange(make_pair(0.,0.)), _visible(0), _total(0), _nanEvents(0), _nanWeights(bins,0), _finished(false) {} inline HistogramChannel::HistogramChannel (const vector<pair<double,double> >& bins, double underflow, double overflow) : _isCountingChannel(false), _bins(bins), _binEntries(bins.size(),0), _outOfRange(make_pair(underflow,overflow)), _visible(0), _total(0), _nanEvents(0), _nanWeights(bins.size(),0), _finished(false) {} inline void HistogramChannel::book (unsigned int bin, double weight) { - if (isnan(weight) || isinf(weight)) { + if (!isfinite(weight)) { ++_nanWeights[bin]; return; } _bins[bin].first += weight; _bins[bin].second += sqr(weight); ++_binEntries[bin]; ++_visible; ++_total; } inline void HistogramChannel::bookUnderflow (double weight) { _outOfRange.first += weight; ++_total; } inline void HistogramChannel::bookOverflow (double weight) { _outOfRange.second += weight; ++_total; } inline void HistogramChannel::nanEvent () { ++_nanEvents; } inline HistogramChannel& HistogramChannel::operator /= (double a) { return this->operator *= (1/a); } inline bool HistogramChannel::isCountingChannel () const { return _isCountingChannel; } inline vector<pair<double,double> > HistogramChannel::bins () const { return _bins; } inline pair<double,double> HistogramChannel::bin (unsigned int bin) const { return _bins[bin]; } inline void HistogramChannel::bin (unsigned int bin, pair<double,double> val, unsigned long entries) { _bins[bin] = val; if (entries > 0) { _visible = _visible - _binEntries[bin] + entries; _binEntries[bin] = entries; } } inline vector<unsigned long> HistogramChannel::binEntries () const { return _binEntries; } inline unsigned long HistogramChannel::binEntries (unsigned int bin) const { return _binEntries[bin]; } inline pair<double,double> HistogramChannel::outOfRange () const { return _outOfRange; } inline unsigned long HistogramChannel::visible () const { return _visible; } inline unsigned long HistogramChannel::total () const { return _total; } inline vector<unsigned long> HistogramChannel::nanWeights () const { return _nanWeights; } inline unsigned long HistogramChannel::nanEvents () const { return _nanEvents; } inline void HistogramChannel::finish () { if (_finished) return; _finished = true; } inline double HistogramChannel::binVariance (unsigned int bin) const { return _bins[bin].second; } inline double HistogramChannel::weightMean (unsigned int bin) const { return _binEntries[bin] > 0 ? _bins[bin].first/_binEntries[bin] : 0.; } inline double HistogramChannel::weightVariance (unsigned int bin) const { return _binEntries[bin] > 1 ? (_bins[bin].second-sqr(_bins[bin].first)/_binEntries[bin])/(_binEntries[bin] - 1) : 0.; } inline pair<double,double> HistogramChannel::binAverage () const { return make_pair(binSum().first/_bins.size(),binSum().second/(_bins.size()*_bins.size())); } inline HistogramChannel operator + (const HistogramChannel& a, const HistogramChannel& b) { HistogramChannel c(a); c += b; return c; } inline HistogramChannel operator - (const HistogramChannel& a, const HistogramChannel& b) { HistogramChannel c(a); c -= b; return c; } inline HistogramChannel operator * (const HistogramChannel& a, const HistogramChannel& b) { HistogramChannel c(a); c *= b; return c; } inline HistogramChannel operator / (const HistogramChannel& a, const HistogramChannel& b) { HistogramChannel c(a); c /= b; return c; } inline Histogram2::Histogram2 () : _binning(), _binhash(), _channels(), _xSec(0.*nanobarn) { } inline void Histogram2::setName (const string& name) { Named::name(name); } inline bool Histogram2::haveChannel (const string& name) const { return _channels.find(name) != _channels.end(); } inline HistogramChannel& Histogram2::channel (const string& name) { return _channels.find(name)->second; } inline HistogramChannel Histogram2::channel (const string& name) const { map<string,HistogramChannel>::const_iterator c = _channels.find(name); if (c != _channels.end()) return c->second; return HistogramChannel (0); } inline void Histogram2::insertChannel (const string& name, const HistogramChannel& c) { _channels.insert(make_pair(name,c)); } inline void Histogram2::insertChannel (const string& name) { _channels.insert(make_pair(name,HistogramChannel(_binning.size()))); } inline HistogramChannel Histogram2::removeChannel (const string& name) { map<string,HistogramChannel>::iterator c = _channels.find(name); if (c != _channels.end()) { HistogramChannel rem (c->second); _channels.erase(c); return rem; } return HistogramChannel(0); } inline const vector<pair<double,double> >& Histogram2::binning () const { return _binning; } inline const map<double,unsigned int>& Histogram2::binhash () const { return _binhash; } inline pair<double,double> Histogram2::range () const { return _range; } inline CrossSection Histogram2::xSec () const { return _xSec; } inline void Histogram2::xSec (CrossSection xs) { _xSec = xs; } inline void Histogram2::finish (const string& name) { map<string,HistogramChannel>::iterator c = _channels.find(name); if (c != _channels.end()) { c->second.finish(); } } inline void Histogram2::differential (const string& name) { map<string,HistogramChannel>::iterator c = _channels.find(name); if (c != _channels.end()) { c->second.differential(_binning); } } inline pair<double,double> Histogram2::integrate (const string& name) const { pair<double,double> integral = make_pair(0.,0.); map<string,HistogramChannel>::const_iterator c = _channels.find(name); if (c != _channels.end()) { integral = c->second.integrate(_binning); } return integral; } inline void Histogram2::normalise (const string& name) { map<string,HistogramChannel>::iterator c = _channels.find(name); if (c != _channels.end()) { c->second /= integrate(name); } } inline void Histogram2::rescale (const string& name, double x) { map<string,HistogramChannel>::iterator c = _channels.find(name); if (c != _channels.end()) { c->second *= x; } } inline void Histogram2::rescale (double x) { for (map<string,HistogramChannel>::iterator c = _channels.begin(); c != _channels.end(); ++c) { c->second *= x; } } inline void Histogram2::normalise (const string& name, const string& data) { pair<double,double> norm = integrate(data); map<string,HistogramChannel>::iterator c = _channels.find(name); if (norm.first == 0. || c == _channels.end()) return; c->second /= c->second.integrate(_binning); c->second *= norm; } inline pair<double,double> Histogram2::chi2perDOF (const string& hyp, const string& dat, double minfrac) const { map<string,HistogramChannel>::const_iterator hypothesis = _channels.find(hyp); map<string,HistogramChannel>::const_iterator data = _channels.find(dat); pair<double,double> chi2 = make_pair(0.,0.); if (hypothesis != _channels.end() && data != _channels.end()) { chi2 = hypothesis->second.chi2(data->second,minfrac).average(_binning); } return chi2; } inline void Histogram2::operator+=(double event) { book("mc",event); } inline void Histogram2::addWeighted(double event, double weight) { book("mc",event,weight); } inline unsigned int Histogram2::numberOfBins() const { return _binning.size(); } inline void Histogram2::normaliseToData() { normalise("mc","data"); } inline void Histogram2::normaliseToCrossSection(const string& name) { normalise(name); channel(name) *= _xSec/nanobarn; } inline void Histogram2::chiSquared(double & chisq, unsigned int & ndegrees, double minfrac) const { ndegrees = _binning.size(); chisq = chi2perDOF("mc","data",minfrac).first; } inline IBPtr Histogram2::clone() const { return new_ptr(*this); } inline IBPtr Histogram2::fullclone() const { return new_ptr(*this); } // If needed, insert default implementations of virtual function defined // in the InterfacedBase class here (using ThePEG-interfaced-impl in Emacs). } diff --git a/Contrib/FxFx/FxFxFileReader.cc b/Contrib/FxFx/FxFxFileReader.cc --- a/Contrib/FxFx/FxFxFileReader.cc +++ b/Contrib/FxFx/FxFxFileReader.cc @@ -1,793 +1,793 @@ // -*- C++ -*- // // FxFxFileReader.cc is a part of ThePEG - Toolkit for HEP Event Generation // Copyright (C) 1999-2011 Leif Lonnblad // // ThePEG is licenced under version 2 of the GPL, see COPYING for details. // Please respect the MCnet academic guidelines, see GUIDELINES for details. // // // This is the implementation of the non-inlined, non-templated member // functions of the FxFxFileReader class. // #include "FxFxFileReader.h" #include "ThePEG/Interface/ClassDocumentation.h" #include "ThePEG/Interface/Reference.h" #include "ThePEG/Interface/Switch.h" #include "ThePEG/Interface/Parameter.h" #include "ThePEG/Utilities/Throw.h" #include "ThePEG/PDT/DecayMode.h" #include "ThePEG/Persistency/PersistentOStream.h" #include "ThePEG/Persistency/PersistentIStream.h" #include <boost/algorithm/string.hpp> #include <boost/lexical_cast.hpp> #include <sstream> #include <iostream> using namespace ThePEG; FxFxFileReader:: FxFxFileReader(const FxFxFileReader & x) : FxFxReader(x), neve(x.neve), ieve(0), LHFVersion(x.LHFVersion), outsideBlock(x.outsideBlock), headerBlock(x.headerBlock), initComments(x.initComments), initAttributes(x.initAttributes), eventComments(x.eventComments), eventAttributes(x.eventAttributes), theFileName(x.theFileName), theQNumbers(x.theQNumbers), theDecayer(x.theDecayer) {} FxFxFileReader::~FxFxFileReader() {} IBPtr FxFxFileReader::clone() const { return new_ptr(*this); } IBPtr FxFxFileReader::fullclone() const { return new_ptr(*this); } bool FxFxFileReader::preInitialize() const { return true; } void FxFxFileReader::doinit() { FxFxReader::doinit(); // are we using QNUMBERS if(!theQNumbers) return; // parse the header block and create // any new particles needed in QNUMBERS blocks string block = headerBlock; string line = ""; bool readingSLHA = false; int (*pf)(int) = tolower; unsigned int newNumber(0); do { line = StringUtils::car(block,"\r\n"); block = StringUtils::cdr(block,"\r\n"); if(line[0]=='#') continue; // are we reading the SLHA block if(readingSLHA) { // reached the end of slha block ? if(line.find("</slha") != string::npos) { readingSLHA = false; break; } // remove trailing comment from line vector<string> split = StringUtils::split(line,"#"); // check for a qnumbers block transform(split[0].begin(), split[0].end(), split[0].begin(), pf); // if not contine if(split[0].find("block qnumbers")==string::npos) continue; // get name from comment string name; if(split.size()>=2) { name = StringUtils::stripws(split[1]); } else { ++newNumber; ostringstream tname; tname << "NP" << newNumber; name = tname.str(); } // extract the PDG code split = StringUtils::split(split[0]," "); istringstream is(split[2]); long PDGCode(0); is >> PDGCode; // get the charge, spin, colour and whether an antiparticle int charge(0),spin(0),colour(0),anti(0); for(unsigned int ix=0;ix<4;++ix) { line = StringUtils::car(block,"\r\n"); block = StringUtils::cdr(block,"\r\n"); int dummy[2]; istringstream is(line); is >> dummy[0] >> dummy[1]; switch (dummy[0]) { case 1: charge = dummy[1]; break; case 2: spin = dummy[1]; break; case 3: colour = dummy[1]; break; case 4: anti = dummy[1]; break; default: assert(false); } } // check if particles already exist PDPair newParticle; newParticle.first = getParticleData(PDGCode); if(newParticle.first) Throw<SetupException>() << "Particle with PDG code " << PDGCode << " whose creation was requested in a QNUMBERS Block" << " already exists. Retaining the original particle" << Exception::warning; if(anti) { newParticle.second = getParticleData(-PDGCode); if(newParticle.second) Throw<SetupException>() << "Anti-particle with PDG code " << -PDGCode << " whose creation was requested in a QNUMBERS Block" << " already exists. Retaining the original particle" << Exception::warning; if(( newParticle.first && !newParticle.second ) || ( newParticle.second && !newParticle.first ) ) Throw<SetupException>() << "Either particle or anti-particle with PDG code " << PDGCode << " whose creation was requested in a QNUMBERS Block" << " already exists, but not both the particle and antiparticle. " << " Something dodgy here stopping" << Exception::runerror; } // already exists continue if(newParticle.first) continue; // create the particles // particle with no anti particle if( anti == 0 ) { // construct the name if(name=="") { ostringstream temp; temp << PDGCode; name = temp.str(); } // create the ParticleData object newParticle.first = ParticleData::Create(PDGCode,name); } // particle anti-particle pair else { // construct the names string nameAnti; if(name=="") { ostringstream temp; temp << PDGCode; name = temp.str(); ostringstream temp2; temp << -PDGCode; nameAnti = temp2.str(); } else { nameAnti=name; for(string::iterator it=nameAnti.begin();it!=nameAnti.end();++it) { if(*it=='+') nameAnti.replace(it,it+1,"-"); else if(*it=='-') nameAnti.replace(it,it+1,"+"); } if(nameAnti==name) nameAnti += "bar"; } // create the ParticleData objects newParticle = ParticleData::Create(PDGCode,name,nameAnti); } // set the particle properties if(colour==1) colour = 0; newParticle.first->iColour(PDT::Colour(colour)); newParticle.first->iSpin (PDT::Spin (spin )); newParticle.first->iCharge(PDT::Charge(charge)); // register it generator()->preinitRegister(newParticle.first, "/Herwig/Particles/"+newParticle.first->PDGName()); // set the antiparticle properties if(newParticle.second) { if(colour==3||colour==6) colour *= -1; charge = -charge; newParticle.second->iColour(PDT::Colour(colour)); newParticle.second->iSpin (PDT::Spin (spin )); newParticle.second->iCharge(PDT::Charge(charge)); // register it generator()->preinitRegister(newParticle.second, "/Herwig/Particles/"+newParticle.second->PDGName()); } } // start of SLHA block ? else if(line.find("<slha") != string::npos) { readingSLHA = true; } } while(line!=""); // now set any masses/decay modes block = headerBlock; line=""; readingSLHA=false; bool ok=true; do { line = StringUtils::car(block,"\r\n"); block = StringUtils::cdr(block,"\r\n"); // are we reading the SLHA block if(readingSLHA) { // reached the end? if(line.find("</slha") == 0 ) { readingSLHA = false; break; } // make lower case transform(line.begin(),line.end(),line.begin(), pf); // found the mass block ? if(line.find("block mass")!=string::npos) { // read it line = StringUtils::car(block,"\r\n"); // check not at end while(line[0] != 'D' && line[0] != 'B' && line[0] != 'd' && line[0] != 'b' && line != "") { // skip comment lines if(line[0] == '#') { block = StringUtils::cdr(block,"\r\n"); line = StringUtils::car(block,"\r\n"); continue; } // get the mass and PGD code istringstream temp(line); long id; double mass; temp >> id >> mass; // skip resetting masses on SM particles // as it can cause problems later on in event generation if(abs(id)<=6 || (abs(id)>=11 && abs(id)<=16) || abs(id)==23 || abs(id)==24) { // Throw<SetupException>() << "Standard model mass for PID " // << id // << " will not be changed." // << Exception::warning; block = StringUtils::cdr(block,"\r\n"); line = StringUtils::car(block,"\r\n"); continue; } // magnitude of mass for susy models mass = abs(mass); // set the mass tPDPtr particle = getParticleData(id); if(!particle) throw SetupException() << "FxFxFileReader::doinit() - Particle with PDG code not" << id << " not found." << Exception::runerror; const InterfaceBase * ifb = BaseRepository::FindInterface(particle, "NominalMass"); ostringstream os; os << mass; ifb->exec(*particle, "set", os.str()); // read the next line block = StringUtils::cdr(block,"\r\n"); line = StringUtils::car(block,"\r\n"); }; } // found a decay block else if(line.find("decay") == 0) { // get PGD code and width istringstream iss(line); string dummy; long parent(0); Energy width(ZERO); iss >> dummy >> parent >> iunit(width, GeV); // get the ParticleData object PDPtr inpart = getParticleData(parent); if(!inpart) { throw SetupException() << "FxFxFileReader::doinit() - A ParticleData object with the PDG code " << parent << " does not exist. " << Exception::runerror; return; } if ( abs(inpart->id()) == 6 || abs(inpart->id()) == 15 || abs(inpart->id()) == 23 || abs(inpart->id()) == 24 || abs(inpart->id()) == 25 ) { Throw<SetupException>() << "\n" "************************************************************************\n" "* Your LHE file changes the width of " << inpart->PDGName() << ".\n" "* This can cause serious problems in the event generation!\n" "************************************************************************\n" "\n" << Exception::warning; } else if (inpart->width() > ZERO && width <= ZERO) { Throw<SetupException>() << "\n" "************************************************************************\n" "* Your LHE file zeroes the non-zero width of " << inpart->PDGName() << ".\n" "* If " << inpart->PDGName() << " is a decaying SM particle,\n" "* this can cause serious problems in the event generation!\n" "************************************************************************\n" "\n" << Exception::warning; } // set the width inpart->width(width); if( width > ZERO ) { inpart->cTau(hbarc/width); inpart->widthCut(5.*width); inpart->stable(false); } // construct prefix for DecayModes string prefix(inpart->name() + "->"), tag(prefix),line(""); unsigned int nmode(0); // read any decay modes line = StringUtils::car(block,"\r\n"); while(line[0] != 'D' && line[0] != 'B' && line[0] != 'd' && line[0] != 'b' && line[0] != '<' && line != "") { // skip comments if(line[0] == '#') { block = StringUtils::cdr(block,"\r\n"); line = StringUtils::car(block,"\r\n"); continue; } // read decay mode and construct the tag istringstream is(line); double brat(0.); unsigned int nda(0),npr(0); is >> brat >> nda; while( true ) { long t; is >> t; if( is.fail() ) break; if( t == abs(parent) ) throw SetupException() << "An error occurred while read a decay of the " << inpart->PDGName() << ". One of its products has the same PDG code " << "as the parent particle in FxFxFileReader::doinit()." << " Please check the Les Houches file.\n" << Exception::runerror; tcPDPtr p = getParticleData(t); if( !p ) throw SetupException() << "FxFxFileReader::doinit() -" << " An unknown PDG code has been encounterd " << "while reading a decay mode. ID: " << t << Exception::runerror; ++npr; tag += p->name() + ","; } if( npr != nda ) throw SetupException() << "FxFxFileReader::doinit() - While reading a decay of the " << inpart->PDGName() << " from an SLHA file, an inconsistency " << "between the number of decay products and the value in " << "the 'NDA' column was found. Please check if the spectrum " << "file is correct.\n" << Exception::warning; // create the DecayMode if( npr > 1 ) { if( nmode==0 ) { generator()->preinitInterface(inpart, "VariableRatio" , "set","false"); if(inpart->massGenerator()) { ok = false; Throw<SetupException>() << inpart->PDGName() << " already has a MassGenerator set" << " this is incompatible with using QNUMBERS " << "Use\n" << "set " << inpart->fullName() << ":Mass_generator NULL\n" << "to fix this." << Exception::warning; } if(inpart->widthGenerator()) { ok = false; Throw<SetupException>() << inpart->PDGName() << " already has a WidthGenerator set" << " this is incompatible with using QNUMBERS " << "Use\n" << "set " << inpart->fullName() << ":Width_generator NULL\n" << "to fix this." << Exception::warning; } unsigned int ntemp=0; for(DecaySet::const_iterator dit = inpart->decayModes().begin(); dit != inpart->decayModes().end(); ++dit ) { if((**dit).on()) ++ntemp; } if(ntemp!=0) { ok = false; Throw<SetupException>() << inpart->PDGName() << " already has DecayModes" << " this is incompatible with using QNUMBERS " << "Use\n" << "do " << inpart->fullName() << ":SelectDecayModes none\n" << " to fix this." << Exception::warning; } } inpart->stable(false); tag.replace(tag.size() - 1, 1, ";"); DMPtr dm = generator()->findDecayMode(tag); if(!theDecayer) Throw<SetupException>() << "FxFxFileReader::doinit() Decayer must be set using the " << "FxFxFileReader:Decayer" << " must be set to allow the creation of new" << " decay modes." << Exception::runerror; if(!dm) { dm = generator()->preinitCreateDecayMode(tag); if(!dm) Throw<SetupException>() << "FxFxFileReader::doinit() - Needed to create " << "new decaymode but one could not be created for the tag " << tag << Exception::warning; } generator()->preinitInterface(dm, "Decayer", "set", theDecayer->fullName()); ostringstream br; br << setprecision(13) << brat; generator()->preinitInterface(dm, "BranchingRatio", "set", br.str()); generator()->preinitInterface(dm, "OnOff", "set", "On"); if(dm->CC()) { generator()->preinitInterface(dm->CC(), "BranchingRatio", "set", br.str()); generator()->preinitInterface(dm->CC(), "OnOff", "set", "On"); } ++nmode; } tag=prefix; // read the next line block = StringUtils::cdr(block,"\r\n"); line = StringUtils::car(block,"\r\n"); }; if(nmode>0) { inpart->update(); if(inpart->CC()) inpart->CC()->update(); } } } // start of SLHA block ? else if(line.find("<slha") != string::npos) { readingSLHA = true; } } while(line!=""); if(!ok) throw SetupException() << "Problem reading QNUMBERS blocks in FxFxFileReader::doinit()" << Exception::runerror; } void FxFxFileReader::initialize(FxFxEventHandler & eh) { FxFxReader::initialize(eh); if ( LHFVersion.empty() ) Throw<FxFxFileError>() << "The file associated with '" << name() << "' does not contain a " << "proper formatted Les Houches event file. The events may not be " << "properly sampled." << Exception::warning; } //vector<string> FxFxFileReader::optWeightNamesFunc() { return optionalWeightsNames; } vector<string> FxFxFileReader::optWeightsNamesFunc() { return optionalWeightsNames; } void FxFxFileReader::open() { if ( filename().empty() ) throw FxFxFileError() << "No Les Houches file name. " << "Use 'set " << name() << ":FileName'." << Exception::runerror; cfile.open(filename()); if ( !cfile ) throw FxFxFileError() << "The FxFxFileReader '" << name() << "' could not open the " << "event file called '" << theFileName << "'." << Exception::runerror; cfile.readline(); if ( !cfile.find("<LesHouchesEvents") ) return; map<string,string> attributes = StringUtils::xmlAttributes("LesHouchesEvents", cfile.getline()); LHFVersion = attributes["version"]; //cout << LHFVersion << endl; if ( LHFVersion.empty() ) return; bool readingHeader = false; bool readingInit = false; headerBlock = ""; char (cwgtinfo_weights_info[250][15]); string hs; int cwgtinfo_nn(0); while ( cfile.readline() ) { if(cfile.find("<initrwgt>")) { break; } } cfile.readline(); string scalename = ""; if(cfile.find("<weightgroup type='scale_variation'")) { while ( cfile.readline() && !cfile.find("</weightgroup>") ) { hs = cfile.getline(); std::string xmuR = hs.substr(hs.find("muR")+4,hs.length()); xmuR = xmuR.substr(0,xmuR.find("muF")-1); std::string xmuF = hs.substr(hs.find("muF")+4,hs.length()); xmuF = xmuF.substr(0,xmuF.find("</w")-1); double muR = atof(xmuR.c_str()); double muF = atof(xmuF.c_str()); istringstream isc(hs); int ws = 0; do { string sub; isc >> sub; if(ws==1) { boost::erase_all(sub, ">"); scalename = sub; } ++ws; } while (isc); // cout << scaleinfo.first << "\t" << scaleinfo.second << endl; std::string xmuRs = boost::lexical_cast<std::string>(muR); std::string xmuFs = boost::lexical_cast<std::string>(muF); string scinfo = "SC " + xmuRs + " " + xmuFs; scalemap[scalename] = scinfo.c_str(); boost::erase_all(scalename, "id="); boost::erase_all(scalename, "'"); optionalWeightsNames.push_back(scalename); } } cfile.readline(); // cout << cfile.getline() << endl; string pdfname = ""; if(cfile.find("<weightgroup type='PDF_variation'")) { while ( cfile.readline() && !cfile.find("</weightgroup>") ) { hs = cfile.getline(); std::string PDF = hs.substr(hs.find("pdfset")+8,hs.length()); PDF = PDF.substr(0,PDF.find("</w")-1); double iPDF = atof(PDF.c_str()); //store the plot label istringstream isp(hs); int wp = 0; do { string sub; isp >> sub; if(wp==1) { boost::erase_all(sub, ">"); pdfname = sub; } ++wp; } while (isp); // cout << pdfinfo.first << "\t" << pdfinfo.second << endl; string scinfo = "PDF " + PDF; scalename = pdfname; scalemap[scalename] = scinfo.c_str(); boost::erase_all(pdfname, "id="); boost::erase_all(pdfname, "'"); optionalWeightsNames.push_back(pdfname); } } /* for(int f = 0; f < optionalWeightsNames.size(); f++) { cout << "optionalWeightsNames = " << optionalWeightsNames[f] << endl; }*/ // Loop over all lines until we hit the </init> tag. while ( cfile.readline() && !cfile.find("</init>") ) { if ( cfile.find("<header") ) { // We have hit the header block, so we should dump this and all // following lines to headerBlock until we hit the end of it. readingHeader = true; headerBlock = cfile.getline() + "\n"; } else if ( cfile.find("<init ") || cfile.find("<init>") ) { // We have hit the init block, so we should expect to find the // standard information in the following. But first check for // attributes. initAttributes = StringUtils::xmlAttributes("init", cfile.getline()); readingInit = true; cfile.readline(); if ( !( cfile >> heprup.IDBMUP.first >> heprup.IDBMUP.second >> heprup.EBMUP.first >> heprup.EBMUP.second >> heprup.PDFGUP.first >> heprup.PDFGUP.second >> heprup.PDFSUP.first >> heprup.PDFSUP.second >> heprup.IDWTUP >> heprup.NPRUP ) ) { heprup.NPRUP = -42; LHFVersion = ""; return; } heprup.resize(); for ( int i = 0; i < heprup.NPRUP; ++i ) { cfile.readline(); if ( !( cfile >> heprup.XSECUP[i] >> heprup.XERRUP[i] >> heprup.XMAXUP[i] >> heprup.LPRUP[i] ) ) { heprup.NPRUP = -42; LHFVersion = ""; return; } } } else if ( cfile.find("</header") ) { readingHeader = false; headerBlock += cfile.getline() + "\n"; } else if ( readingHeader ) { // We are in the process of reading the header block. Dump the // line to headerBlock. headerBlock += cfile.getline() + "\n"; } else if ( readingInit ) { // Here we found a comment line. Dump it to initComments. initComments += cfile.getline() + "\n"; } else { // We found some other stuff outside the standard tags. outsideBlock += cfile.getline() + "\n"; } } if ( !cfile ) { heprup.NPRUP = -42; LHFVersion = ""; return; } } bool FxFxFileReader::doReadEvent() { if ( !cfile ) return false; if ( LHFVersion.empty() ) return false; if ( heprup.NPRUP < 0 ) return false; eventComments = ""; outsideBlock = ""; hepeup.NUP = 0; hepeup.XPDWUP.first = hepeup.XPDWUP.second = 0.0; optionalWeights.clear(); optionalWeightsTemp.clear(); // Keep reading lines until we hit the next event or the end of // the event block. Save any inbetween lines. Exit if we didn't // find an event. while ( cfile.readline() && !cfile.find("<event") ) outsideBlock += cfile.getline() + "\n"; // We found an event. First scan for attributes. eventAttributes = StringUtils::xmlAttributes("event", cfile.getline()); istringstream ievat(cfile.getline()); int we(0), npLO(-10), npNLO(-10); do { string sub; ievat >> sub; if(we==2) { npLO = atoi(sub.c_str()); } if(we==5) { npNLO = atoi(sub.c_str()); } ++we; } while (ievat); //cout << "npLO, npNLO = " << npLO << ", " << npNLO << endl; optionalnpLO = npLO; optionalnpNLO = npNLO; std::stringstream npstringstream; npstringstream << "np " << npLO << " " << npNLO; std::string npstrings = npstringstream.str(); // cout << npstrings.c_str() << endl; optionalWeights[npstrings.c_str()] = -999; if ( !cfile.readline() ) return false; // The first line determines how many subsequent particle lines we // have. if ( !( cfile >> hepeup.NUP >> hepeup.IDPRUP >> hepeup.XWGTUP >> hepeup.SCALUP >> hepeup.AQEDUP >> hepeup.AQCDUP ) ) return false; hepeup.resize(); // Read all particle lines. for ( int i = 0; i < hepeup.NUP; ++i ) { if ( !cfile.readline() ) return false; if ( !( cfile >> hepeup.IDUP[i] >> hepeup.ISTUP[i] >> hepeup.MOTHUP[i].first >> hepeup.MOTHUP[i].second >> hepeup.ICOLUP[i].first >> hepeup.ICOLUP[i].second >> hepeup.PUP[i][0] >> hepeup.PUP[i][1] >> hepeup.PUP[i][2] >> hepeup.PUP[i][3] >> hepeup.PUP[i][4] >> hepeup.VTIMUP[i] >> hepeup.SPINUP[i] ) ) return false; - if(isnan(hepeup.PUP[i][0])||isnan(hepeup.PUP[i][1])|| - isnan(hepeup.PUP[i][2])||isnan(hepeup.PUP[i][3])|| - isnan(hepeup.PUP[i][4])) + if(std::isnan(hepeup.PUP[i][0])||std::isnan(hepeup.PUP[i][1])|| + std::isnan(hepeup.PUP[i][2])||std::isnan(hepeup.PUP[i][3])|| + std::isnan(hepeup.PUP[i][4])) throw Exception() << "nan's as momenta in Les Houches file " << Exception::eventerror; if(hepeup.MOTHUP[i].first -1==i || hepeup.MOTHUP[i].second-1==i) { throw Exception() << "Particle has itself as a mother in Les Houches " << "file, this is not allowed\n" << Exception::eventerror; } } // Now read any additional comments and named weights. // read until the end of rwgt is found while ( cfile.readline() && !cfile.find("</rwgt>")) { if(!cfile.find("<wgt")) { continue; } istringstream iss(cfile.getline()); int wi = 0; double weightValue(0); string weightName = ""; do { string sub; iss >> sub; if(wi==1) { boost::erase_all(sub, ">"); weightName = sub; } if(wi==2) weightValue = atof(sub.c_str()); ++wi; } while (iss); // store the optional weights found in the temporary map optionalWeightsTemp[weightName] = weightValue; } // loop over the optional weights and add the extra information (pdf or scale) for (map<string,double>::const_iterator it=optionalWeightsTemp.begin(); it!=optionalWeightsTemp.end(); ++it){ //std::cout << it->first << " => " << it->second << '\n'; for (map<string,string>::const_iterator it2=scalemap.begin(); it2!=scalemap.end(); ++it2){ //find the scale id in the scale information and add this information if(it->first==it2->first) { string info = it2->second + " " + it->first; boost::erase_all(info, "'"); boost::erase_all(info, "id="); //set the optional weights optionalWeights[info] = it->second; } } } if ( !cfile ) return false; return true; } void FxFxFileReader::close() { cfile.close(); } void FxFxFileReader::persistentOutput(PersistentOStream & os) const { os << neve << LHFVersion << outsideBlock << headerBlock << initComments << initAttributes << eventComments << eventAttributes << theFileName << theQNumbers << theDecayer; } void FxFxFileReader::persistentInput(PersistentIStream & is, int) { is >> neve >> LHFVersion >> outsideBlock >> headerBlock >> initComments >> initAttributes >> eventComments >> eventAttributes >> theFileName >> theQNumbers >> theDecayer; ieve = 0; } ClassDescription<FxFxFileReader> FxFxFileReader::initFxFxFileReader; // Definition of the static class description member. void FxFxFileReader::Init() { static ClassDocumentation<FxFxFileReader> documentation ("ThePEG::FxFxFileReader is an base class to be used for objects " "which reads event files from matrix element generators. This class is " "able to read plain event files conforming to the Les Houches Event File " "accord."); static Parameter<FxFxFileReader,string> interfaceFileName ("FileName", "The name of a file containing events conforming to the Les Houches " "protocol to be read into ThePEG. A file name ending in " "<code>.gz</code> will be read from a pipe which uses " "<code>zcat</code>. If a file name ends in <code>|</code> the " "preceeding string is interpreted as a command, the output of which " "will be read through a pipe.", &FxFxFileReader::theFileName, "", false, false); interfaceFileName.fileType(); interfaceFileName.rank(11); static Switch<FxFxFileReader,bool> interfaceQNumbers ("QNumbers", "Whether or not to read search for and read a QNUMBERS" " block in the header of the file.", &FxFxFileReader::theQNumbers, false, false, false); static SwitchOption interfaceQNumbersYes (interfaceQNumbers, "Yes", "Use QNUMBERS", true); static SwitchOption interfaceQNumbersNo (interfaceQNumbers, "No", "Don't use QNUMBERS", false); static Reference<FxFxFileReader,Decayer> interfaceDecayer ("Decayer", "Decayer to use for any decays read from the QNUMBERS Blocks", &FxFxFileReader::theDecayer, false, false, true, true, false); } diff --git a/Contrib/FxFx/FxFxLHReader.cc b/Contrib/FxFx/FxFxLHReader.cc --- a/Contrib/FxFx/FxFxLHReader.cc +++ b/Contrib/FxFx/FxFxLHReader.cc @@ -1,466 +1,466 @@ // -*- C++ -*- // // FxFxLHReader.cc is a part of Herwig - A multi-purpose // Monte Carlo event generator. // Copyright (C) 2002-2011 The Herwig Collaboration // // Herwig is licenced under version 2 of the GPL, see COPYING for details. // Please respect the MCnet academic guidelines, see GUIDELINES for details. // // // This is the implementation of the non-inlined, non-templated member // functions of the FxFxLHReader class. // #include "FxFxLHReader.h" #include "ThePEG/Interface/ClassDocumentation.h" #include "ThePEG/Interface/Reference.h" #include "ThePEG/Interface/Switch.h" #include "ThePEG/Interface/Parameter.h" #include "ThePEG/Utilities/Throw.h" #include "ThePEG/PDT/DecayMode.h" #include "ThePEG/Persistency/PersistentOStream.h" #include "ThePEG/Persistency/PersistentIStream.h" #include "ThePEG/PDF/PartonExtractor.h" #include "ThePEG/PDF/NoPDF.h" #include "ThePEG/Cuts/Cuts.h" #include "ThePEG/EventRecord/TmpTransform.h" #include "ThePEG/Utilities/UtilityBase.h" using namespace Herwig; FxFxLHReader:: FxFxLHReader(const FxFxLHReader & x) : LesHouchesReader(x), neve(x.neve), ieve(0), LHFVersion(x.LHFVersion), outsideBlock(x.outsideBlock), headerBlock(x.headerBlock), initComments(x.initComments), initAttributes(x.initAttributes), eventComments(x.eventComments), eventAttributes(x.eventAttributes), theFileName(x.theFileName),overSampling_(x.overSampling_) {} FxFxLHReader::~FxFxLHReader() {} IBPtr FxFxLHReader::clone() const { return new_ptr(*this); } IBPtr FxFxLHReader::fullclone() const { return new_ptr(*this); } bool FxFxLHReader::preInitialize() const { return true; } void FxFxLHReader::doinit() { LesHouchesReader::doinit(); } void FxFxLHReader::initialize(LesHouchesEventHandler & eh) { LesHouchesReader::initialize(eh); if ( LHFVersion.empty() ) Throw<LesHouchesFileError>() << "The file associated with '" << name() << "' does not contain a " << "proper formatted Les Houches event file. The events may not be " << "properly sampled." << Exception::warning; } long FxFxLHReader::scan() { open(); // Shall we write the events to a cache file for fast reading? If so // we write to a temporary file if the caches events should be // randomized. if ( cacheFileName().length() ) openWriteCacheFile(); // Keep track of the number of events scanned. long neve = 0; long cuteve = 0; bool negw = false; // If the open() has not already gotten information about subprocesses // and cross sections we have to scan through the events. if ( !heprup.NPRUP || cacheFile() || abs(heprup.IDWTUP) != 1 ) { // why scan if IDWTUP != 1? HoldFlag<> isScanning(scanning); double oldsum = 0.0; vector<int> lprup; vector<double> newmax; vector<long> oldeve; vector<long> neweve; for ( int i = 0; ( maxScan() < 0 || i < maxScan() ) && readEvent(); ++i ) { if ( !checkPartonBin() ) Throw<LesHouchesInitError>() << "Found event in LesHouchesReader '" << name() << "' which cannot be handeled by the assigned PartonExtractor '" << partonExtractor()->name() << "'." << Exception::runerror; vector<int>::iterator idit = find(lprup.begin(), lprup.end(), hepeup.IDPRUP); int id = lprup.size(); if ( idit == lprup.end() ) { lprup.push_back(hepeup.IDPRUP); newmax.push_back(0.0); neweve.push_back(0); oldeve.push_back(0); } else { id = idit - lprup.begin(); } ++neve; ++oldeve[id]; oldsum += hepeup.XWGTUP; if ( cacheFile() ) { if ( eventWeight() == 0.0 ) { ++cuteve; continue; } cacheEvent(); } ++neweve[id]; newmax[id] = max(newmax[id], abs(eventWeight())); if ( eventWeight() < 0.0 ) negw = true; } xSecWeights.resize(oldeve.size(), 1.0); for ( int i = 0, N = oldeve.size(); i < N; ++i ) if ( oldeve[i] ) xSecWeights[i] = double(neweve[i])/double(oldeve[i]); if ( maxScan() < 0 || neve > NEvents() ) NEvents(neve - cuteve); if ( lprup.size() == heprup.LPRUP.size() ) { for ( int id = 0, N = lprup.size(); id < N; ++id ) { vector<int>::iterator idit = find(heprup.LPRUP.begin(), heprup.LPRUP.end(), hepeup.IDPRUP); if ( idit == heprup.LPRUP.end() ) { Throw<LesHouchesInitError>() << "When scanning events, the LesHouschesReader '" << name() << "' found undeclared processes." << Exception::warning; heprup.NPRUP = 0; break; } int idh = idit - heprup.LPRUP.begin(); heprup.XMAXUP[idh] = newmax[id]; } } if ( heprup.NPRUP == 0 ) { // No heprup block was supplied or something went wrong. heprup.NPRUP = lprup.size(); heprup.LPRUP.resize(lprup.size()); heprup.XMAXUP.resize(lprup.size()); for ( int id = 0, N = lprup.size(); id < N; ++id ) { heprup.LPRUP[id] = lprup[id]; heprup.XMAXUP[id] = newmax[id]; } } else if ( abs(heprup.IDWTUP) != 1 ) { // Try to fix things if abs(heprup.IDWTUP) != 1. double sumxsec = 0.0; for ( int id = 0; id < heprup.NPRUP; ++id ) sumxsec += heprup.XSECUP[id]; weightScale = picobarn*neve*sumxsec/oldsum; } } if ( cacheFile() ) closeCacheFile(); if ( negw ) heprup.IDWTUP = min(-abs(heprup.IDWTUP), -1); return neve; } void FxFxLHReader::open() { if ( filename().empty() ) throw LesHouchesFileError() << "No Les Houches file name. " << "Use 'set " << name() << ":FileName'." << Exception::runerror; cfile.open(filename()); if ( !cfile ) throw LesHouchesFileError() << "The FxFxLHReader '" << name() << "' could not open the " << "event file called '" << theFileName << "'." << Exception::runerror; cfile.readline(); if ( !cfile.find("<LesHouchesEvents") ) return; map<string,string> attributes = StringUtils::xmlAttributes("LesHouchesEvents", cfile.getline()); LHFVersion = attributes["version"]; if ( LHFVersion.empty() ) return; bool readingHeader = false; bool readingInit = false; headerBlock = ""; // Loop over all lines until we hit the </init> tag. while ( cfile.readline() && !cfile.find("</init>") ) { if ( cfile.find("<header") ) { // We have hit the header block, so we should dump this and all // following lines to headerBlock until we hit the end of it. readingHeader = true; headerBlock = cfile.getline() + "\n"; } else if ( cfile.find("<init") ) { // We have hit the init block, so we should expect to find the // standard information in the following. But first check for // attributes. initAttributes = StringUtils::xmlAttributes("init", cfile.getline()); readingInit = true; cfile.readline(); if ( !( cfile >> heprup.IDBMUP.first >> heprup.IDBMUP.second >> heprup.EBMUP.first >> heprup.EBMUP.second >> heprup.PDFGUP.first >> heprup.PDFGUP.second >> heprup.PDFSUP.first >> heprup.PDFSUP.second >> heprup.IDWTUP >> heprup.NPRUP ) ) { heprup.NPRUP = -42; LHFVersion = ""; return; } heprup.resize(); for ( int i = 0; i < heprup.NPRUP; ++i ) { cfile.readline(); if ( !( cfile >> heprup.XSECUP[i] >> heprup.XERRUP[i] >> heprup.XMAXUP[i] >> heprup.LPRUP[i] ) ) { heprup.NPRUP = -42; LHFVersion = ""; return; } } } else if ( cfile.find("</header") ) { readingHeader = false; headerBlock += cfile.getline() + "\n"; } else if ( readingHeader ) { // We are in the process of reading the header block. Dump the // line to headerBlock. headerBlock += cfile.getline() + "\n"; } else if ( readingInit ) { // Here we found a comment line. Dump it to initComments. initComments += cfile.getline() + "\n"; } else { // We found some other stuff outside the standard tags. outsideBlock += cfile.getline() + "\n"; } } if ( !cfile ) { heprup.NPRUP = -42; LHFVersion = ""; return; } } bool FxFxLHReader::readEvent() { reset(); if ( !doReadEvent() ) return false; // If we are just skipping event we do not need to reweight or do // anything fancy. if ( skipping ) return true; if ( cacheFile() && !scanning ) return true; // Reweight according to the re- and pre-weights objects in the // LesHouchesReader base class. lastweight = reweight(); if ( !reweightPDF && !cutEarly() ) return true; // We should try to reweight the PDFs or make early cuts here. fillEvent(); double x1 = incoming().first->momentum().plus()/ beams().first->momentum().plus(); if ( reweightPDF && inPDF.first && outPDF.first && inPDF.first != outPDF.first ) { if ( hepeup.XPDWUP.first <= 0.0 ) hepeup.XPDWUP.first = inPDF.first->xfx(inData.first, incoming().first->dataPtr(), sqr(hepeup.SCALUP*GeV), x1); double xf = outPDF.first->xfx(inData.first, incoming().first->dataPtr(), sqr(hepeup.SCALUP*GeV), x1); lastweight *= xf/hepeup.XPDWUP.first; hepeup.XPDWUP.first = xf; } double x2 = incoming().second->momentum().minus()/ beams().second->momentum().minus(); if ( reweightPDF && inPDF.second && outPDF.second && inPDF.second != outPDF.second ) { if ( hepeup.XPDWUP.second <= 0.0 ) hepeup.XPDWUP.second = inPDF.second->xfx(inData.second, incoming().second->dataPtr(), sqr(hepeup.SCALUP*GeV), x2); double xf = outPDF.second->xfx(inData.second, incoming().second->dataPtr(), sqr(hepeup.SCALUP*GeV), x2); lastweight *= xf/hepeup.XPDWUP.second; hepeup.XPDWUP.second = xf; } if ( cutEarly() ) { if ( !cuts().initSubProcess((incoming().first->momentum() + incoming().second->momentum()).m2(), 0.5*log(x1/x2)) ) lastweight = 0.0; tSubProPtr sub = getSubProcess(); TmpTransform<tSubProPtr> tmp(sub, Utilities::getBoostToCM(sub->incoming())); if ( !cuts().passCuts(*sub) ) lastweight = 0.0; } return true; } double FxFxLHReader::getEvent() { if ( cacheFile() ) { if (overSampling_) { if ( !uncacheEvent() ) reopen(); } else { if ( !uncacheEvent() || stats.attempts()==NEvents() ) throw LesHouchesReopenWarning() << "More events requested than available in LesHouchesReader " << name() << Exception::runerror; } } else { if (overSampling_) { if ( !readEvent() ) reopen(); } else { if ( !readEvent() || stats.attempts()==NEvents() ) throw LesHouchesReopenWarning() << "More events requested than available in LesHouchesReader " << name() << Exception::runerror; } } ++position; double max = maxWeights[hepeup.IDPRUP]*maxFactor; return max != 0.0? eventWeight()/max: 0.0; } void FxFxLHReader::skip(long n) { HoldFlag<> skipflag(skipping); if(overSampling_) while ( n-- ) getEvent(); } bool FxFxLHReader::doReadEvent() { if ( !cfile ) return false; if ( LHFVersion.empty() ) return false; if ( heprup.NPRUP < 0 ) return false; eventComments = ""; outsideBlock = ""; hepeup.NUP = 0; hepeup.XPDWUP.first = hepeup.XPDWUP.second = 0.0; // Keep reading lines until we hit the next event or the end of // the event block. Save any inbetween lines. Exit if we didn't // find an event. while ( cfile.readline() && !cfile.find("<event") ) outsideBlock += cfile.getline() + "\n"; // We found an event. First scan for attributes. eventAttributes = StringUtils::xmlAttributes("event", cfile.getline()); if ( !cfile.readline() ) return false; // The first line determines how many subsequent particle lines we // have. if ( !( cfile >> hepeup.NUP >> hepeup.IDPRUP >> hepeup.XWGTUP >> hepeup.SCALUP >> hepeup.AQEDUP >> hepeup.AQCDUP ) ) return false; hepeup.resize(); // Read all particle lines. for ( int i = 0; i < hepeup.NUP; ++i ) { if ( !cfile.readline() ) return false; if ( !( cfile >> hepeup.IDUP[i] >> hepeup.ISTUP[i] >> hepeup.MOTHUP[i].first >> hepeup.MOTHUP[i].second >> hepeup.ICOLUP[i].first >> hepeup.ICOLUP[i].second >> hepeup.PUP[i][0] >> hepeup.PUP[i][1] >> hepeup.PUP[i][2] >> hepeup.PUP[i][3] >> hepeup.PUP[i][4] >> hepeup.VTIMUP[i] >> hepeup.SPINUP[i] ) ) return false; - if(isnan(hepeup.PUP[i][0])||isnan(hepeup.PUP[i][1])|| - isnan(hepeup.PUP[i][2])||isnan(hepeup.PUP[i][3])|| - isnan(hepeup.PUP[i][4])) + if(std::isnan(hepeup.PUP[i][0])||std::isnan(hepeup.PUP[i][1])|| + std::isnan(hepeup.PUP[i][2])||std::isnan(hepeup.PUP[i][3])|| + std::isnan(hepeup.PUP[i][4])) throw Exception() << "nan's as momenta in Les Houches file " << Exception::eventerror; } // Now read any additional comments. while ( cfile.readline() && !cfile.find("</event>") ) eventComments += cfile.getline() + "\n"; if ( !cfile ) return false; return true; } void FxFxLHReader::close() { cfile.close(); } void FxFxLHReader::persistentOutput(PersistentOStream & os) const { os << neve << LHFVersion << outsideBlock << headerBlock << initComments << initAttributes << eventComments << eventAttributes << theFileName << overSampling_; } void FxFxLHReader::persistentInput(PersistentIStream & is, int) { is >> neve >> LHFVersion >> outsideBlock >> headerBlock >> initComments >> initAttributes >> eventComments >> eventAttributes >> theFileName >> overSampling_; ieve = 0; } ClassDescription<FxFxLHReader> FxFxLHReader::initFxFxLHReader; // Definition of the static class description member. void FxFxLHReader::Init() { static ClassDocumentation<FxFxLHReader> documentation ("Herwig::FxFxLHReader is an base class to be used for objects " "which reads event files from matrix element generators. This class is " "able to read plain event files conforming to the Les Houches Event File " "accord."); static Parameter<FxFxLHReader,string> interfaceFileName ("FileName", "The name of a file containing events conforming to the Les Houches " "protocol to be read into ThePEG. A file name ending in " "<code>.gz</code> will be read from a pipe which uses " "<code>zcat</code>. If a file name ends in <code>|</code> the " "preceeding string is interpreted as a command, the output of which " "will be read through a pipe.", &FxFxLHReader::theFileName, "", false, false); static Switch<FxFxLHReader,bool> interfaceOverSampling ("OverSampling", "Allow / Forbid reading of LH events more than once by the " "LH reader, allowing / protecting against statistical problems.", &FxFxLHReader::overSampling_, true, false, false); static SwitchOption AllowOverSampling (interfaceOverSampling, "AllowOverSampling", "The reader will read events in the file more than once if more " "events are needed to generate the requested number than that in " "the LH file.", true); static SwitchOption ForbidOverSampling (interfaceOverSampling, "ForbidOverSampling", "The reader will NOT read events in the file more than once if more " "events are needed to generate the requested number than that in " "the LH file - instead it will stop when all have been read.", false); interfaceFileName.fileType(); interfaceFileName.rank(11); } diff --git a/Decay/Perturbative/SMTopDecayer.cc b/Decay/Perturbative/SMTopDecayer.cc --- a/Decay/Perturbative/SMTopDecayer.cc +++ b/Decay/Perturbative/SMTopDecayer.cc @@ -1,1141 +1,1141 @@ // -*- C++ -*- // // SMTopDecayer.cc is a part of Herwig - A multi-purpose Monte Carlo event generator // Copyright (C) 2002-2011 The Herwig Collaboration // // Herwig is licenced under version 2 of the GPL, see COPYING for details. // Please respect the MCnet academic guidelines, see GUIDELINES for details. // // // This is the implementation of the non-inlined, non-templated member // functions of the SMTopDecayer class. // #include "SMTopDecayer.h" #include "ThePEG/Interface/ClassDocumentation.h" #include "ThePEG/Interface/ParVector.h" #include "ThePEG/Interface/Switch.h" #include "ThePEG/Interface/Reference.h" #include "ThePEG/Persistency/PersistentOStream.h" #include "ThePEG/Persistency/PersistentIStream.h" #include "ThePEG/PDT/DecayMode.h" #include "Herwig/Decay/DecayVertex.h" #include "ThePEG/Helicity/WaveFunction/VectorWaveFunction.h" #include "Herwig/PDT/ThreeBodyAllOn1IntegralCalculator.h" #include "Herwig/Shower/RealEmissionProcess.h" #include "Herwig/Shower/QTilde/Base/ShowerProgenitor.h" #include "Herwig/Shower/QTilde/Base/ShowerParticle.h" #include "Herwig/Shower/QTilde/Base/Branching.h" #include "Herwig/Decay/GeneralDecayMatrixElement.h" using namespace Herwig; using namespace ThePEG::Helicity; SMTopDecayer::SMTopDecayer() : _wquarkwgt(6,0.),_wleptonwgt(3,0.), _xg_sampling(1.5), _initialenhance(1.), _finalenhance(2.3), _useMEforT2(true) { _wleptonwgt[0] = 0.302583; _wleptonwgt[1] = 0.301024; _wleptonwgt[2] = 0.299548; _wquarkwgt[0] = 0.851719; _wquarkwgt[1] = 0.0450162; _wquarkwgt[2] = 0.0456962; _wquarkwgt[3] = 0.859839; _wquarkwgt[4] = 3.9704e-06; _wquarkwgt[5] = 0.000489657; generateIntermediates(true); } bool SMTopDecayer::accept(tcPDPtr parent, const tPDVector & children) const { if(abs(parent->id()) != ParticleID::t) return false; int id0(0),id1(0),id2(0); for(tPDVector::const_iterator it = children.begin(); it != children.end();++it) { int id=(**it).id(),absid(abs(id)); if(absid==ParticleID::b&&double(id)/double(parent->id())>0) { id0=id; } else { switch (absid) { case ParticleID::nu_e: case ParticleID::nu_mu: case ParticleID::nu_tau: id1 = id; break; case ParticleID::eminus: case ParticleID::muminus: case ParticleID::tauminus: id2 = id; break; case ParticleID::b: case ParticleID::d: case ParticleID::s: id1 = id; break; case ParticleID::u: case ParticleID::c: id2=id; break; default : break; } } } if(id0==0||id1==0||id2==0) return false; if(double(id1)/double(id2)>0) return false; return true; } ParticleVector SMTopDecayer::decay(const Particle & parent, const tPDVector & children) const { int id1(0),id2(0); for(tPDVector::const_iterator it = children.begin(); it != children.end();++it) { int id=(**it).id(),absid=abs(id); if(absid == ParticleID::b && double(id)/double(parent.id())>0) continue; //leptons if(absid > 10 && absid%2==0) id1=absid; if(absid > 10 && absid%2==1) id2=absid; //quarks if(absid < 10 && absid%2==0) id2=absid; if(absid < 10 && absid%2==1) id1=absid; } unsigned int imode(0); if(id2 >=11 && id2<=16) imode = (id1-12)/2; else imode = id1+1+id2/2; bool cc = parent.id() == ParticleID::tbar; ParticleVector out(generate(true,cc,imode,parent)); //arrange colour flow PPtr pparent=const_ptr_cast<PPtr>(&parent); out[1]->incomingColour(pparent,out[1]->id()<0); ParticleVector products = out[0]->children(); if(products[0]->hasColour()) products[0]->colourNeighbour(products[1],true); else if(products[0]->hasAntiColour()) products[0]->colourNeighbour(products[1],false); return out; } void SMTopDecayer::persistentOutput(PersistentOStream & os) const { os << _wvertex << _wquarkwgt << _wleptonwgt << _wplus << _alpha << _initialenhance << _finalenhance << _xg_sampling << _useMEforT2; } void SMTopDecayer::persistentInput(PersistentIStream & is, int) { is >> _wvertex >> _wquarkwgt >> _wleptonwgt >> _wplus >> _alpha >> _initialenhance >> _finalenhance >> _xg_sampling >> _useMEforT2; } ClassDescription<SMTopDecayer> SMTopDecayer::initSMTopDecayer; // Definition of the static class description member. void SMTopDecayer::Init() { static ClassDocumentation<SMTopDecayer> documentation ("This is the implementation of the SMTopDecayer which " "decays top quarks into bottom quarks and either leptons " "or quark-antiquark pairs including the matrix element for top decay", "The matrix element correction for top decay \\cite{Hamilton:2006ms}.", "%\\cite{Hamilton:2006ms}\n" "\\bibitem{Hamilton:2006ms}\n" " K.~Hamilton and P.~Richardson,\n" " ``A simulation of QCD radiation in top quark decays,''\n" " JHEP {\\bf 0702}, 069 (2007)\n" " [arXiv:hep-ph/0612236].\n" " %%CITATION = JHEPA,0702,069;%%\n"); static ParVector<SMTopDecayer,double> interfaceQuarkWeights ("QuarkWeights", "Maximum weights for the hadronic decays", &SMTopDecayer::_wquarkwgt, 6, 1.0, 0.0, 10.0, false, false, Interface::limited); static ParVector<SMTopDecayer,double> interfaceLeptonWeights ("LeptonWeights", "Maximum weights for the semi-leptonic decays", &SMTopDecayer::_wleptonwgt, 3, 1.0, 0.0, 10.0, false, false, Interface::limited); static Parameter<SMTopDecayer,double> interfaceEnhancementFactor ("InitialEnhancementFactor", "The enhancement factor for initial-state radiation in the shower to ensure" " the weight for the matrix element correction is less than one.", &SMTopDecayer::_initialenhance, 1.0, 1.0, 10000.0, false, false, Interface::limited); static Parameter<SMTopDecayer,double> interfaceFinalEnhancementFactor ("FinalEnhancementFactor", "The enhancement factor for final-state radiation in the shower to ensure" " the weight for the matrix element correction is less than one", &SMTopDecayer::_finalenhance, 1.6, 1.0, 1000.0, false, false, Interface::limited); static Parameter<SMTopDecayer,double> interfaceSamplingTopHardMEC ("SamplingTopHardMEC", "The importance sampling power for choosing an initial xg, " "to sample xg according to xg^-_xg_sampling", &SMTopDecayer::_xg_sampling, 1.5, 1.2, 2.0, false, false, Interface::limited); static Switch<SMTopDecayer,bool> interfaceUseMEForT2 ("UseMEForT2", "Use the matrix element correction, if available to fill the T2" " region for the decay shower and don't fill using the shower", &SMTopDecayer::_useMEforT2, true, false, false); static SwitchOption interfaceUseMEForT2Shower (interfaceUseMEForT2, "Shower", "Use the shower to fill the T2 region", false); static SwitchOption interfaceUseMEForT2ME (interfaceUseMEForT2, "ME", "Use the Matrix element to fill the T2 region", true); static Reference<SMTopDecayer,ShowerAlpha> interfaceCoupling ("Coupling", "Pointer to the object to calculate the coupling for the correction", &SMTopDecayer::_alpha, false, false, true, false, false); } double SMTopDecayer::me2(const int, const Particle & inpart, const ParticleVector & decay, MEOption meopt) const { if(!ME()) ME(new_ptr(GeneralDecayMatrixElement(PDT::Spin1Half,PDT::Spin1Half, PDT::Spin1Half,PDT::Spin1Half))); // spinors etc for the decaying particle if(meopt==Initialize) { // spinors and rho if(inpart.id()>0) SpinorWaveFunction ::calculateWaveFunctions(_inHalf,_rho, const_ptr_cast<tPPtr>(&inpart), incoming); else SpinorBarWaveFunction::calculateWaveFunctions(_inHalfBar,_rho, const_ptr_cast<tPPtr>(&inpart), incoming); } // setup spin info when needed if(meopt==Terminate) { // for the decaying particle if(inpart.id()>0) { SpinorWaveFunction:: constructSpinInfo(_inHalf,const_ptr_cast<tPPtr>(&inpart),incoming,true); SpinorBarWaveFunction::constructSpinInfo(_inHalfBar,decay[0],outgoing,true); SpinorWaveFunction ::constructSpinInfo(_outHalf ,decay[1],outgoing,true); SpinorBarWaveFunction::constructSpinInfo(_outHalfBar,decay[2],outgoing,true); } else { SpinorBarWaveFunction:: constructSpinInfo(_inHalfBar,const_ptr_cast<tPPtr>(&inpart),incoming,true); SpinorWaveFunction::constructSpinInfo(_inHalf,decay[0],outgoing,true); SpinorBarWaveFunction::constructSpinInfo(_outHalfBar,decay[1],outgoing,true); SpinorWaveFunction ::constructSpinInfo(_outHalf ,decay[2],outgoing,true); } } if ( ( decay[1]->momentum() + decay[2]->momentum() ).m() < decay[1]->data().constituentMass() + decay[2]->data().constituentMass() ) return 0.0; // spinors for the decay product if(inpart.id()>0) { SpinorBarWaveFunction::calculateWaveFunctions(_inHalfBar ,decay[0],outgoing); SpinorWaveFunction ::calculateWaveFunctions(_outHalf ,decay[1],outgoing); SpinorBarWaveFunction::calculateWaveFunctions(_outHalfBar,decay[2],outgoing); } else { SpinorWaveFunction ::calculateWaveFunctions(_inHalf ,decay[0],outgoing); SpinorBarWaveFunction::calculateWaveFunctions(_outHalfBar,decay[1],outgoing); SpinorWaveFunction ::calculateWaveFunctions(_outHalf ,decay[2],outgoing); } Energy2 scale(sqr(inpart.mass())); if(inpart.id() == ParticleID::t) { //Define intermediate vector wave-function for Wplus tcPDPtr Wplus(getParticleData(ParticleID::Wplus)); VectorWaveFunction inter; unsigned int thel,bhel,fhel,afhel; for(thel = 0;thel<2;++thel){ for(bhel = 0;bhel<2;++bhel){ inter = _wvertex->evaluate(scale,1,Wplus,_inHalf[thel], _inHalfBar[bhel]); for(afhel=0;afhel<2;++afhel){ for(fhel=0;fhel<2;++fhel){ (*ME())(thel,bhel,afhel,fhel) = _wvertex->evaluate(scale,_outHalf[afhel], _outHalfBar[fhel],inter); } } } } } else if(inpart.id() == ParticleID::tbar) { VectorWaveFunction inter; tcPDPtr Wminus(getParticleData(ParticleID::Wminus)); unsigned int tbhel,bbhel,afhel,fhel; for(tbhel = 0;tbhel<2;++tbhel){ for(bbhel = 0;bbhel<2;++bbhel){ inter = _wvertex-> evaluate(scale,1,Wminus,_inHalf[bbhel],_inHalfBar[tbhel]); for(afhel=0;afhel<2;++afhel){ for(fhel=0;fhel<2;++fhel){ (*ME())(tbhel,bbhel,fhel,afhel) = _wvertex->evaluate(scale,_outHalf[afhel], _outHalfBar[fhel],inter); } } } } } double output = (ME()->contract(_rho)).real(); if(abs(decay[1]->id())<=6) output *=3.; return output; } void SMTopDecayer::doinit() { DecayIntegrator::doinit(); //get vertices from SM object tcHwSMPtr hwsm = dynamic_ptr_cast<tcHwSMPtr>(standardModel()); if(!hwsm) throw InitException() << "Must have Herwig::StandardModel in " << "SMTopDecayer::doinit()"; _wvertex = hwsm->vertexFFW(); //initialise _wvertex->init(); //set up decay modes _wplus = getParticleData(ParticleID::Wplus); DecayPhaseSpaceModePtr mode; DecayPhaseSpaceChannelPtr Wchannel; tPDVector extpart(4); vector<double> wgt(1,1.0); extpart[0] = getParticleData(ParticleID::t); extpart[1] = getParticleData(ParticleID::b); //lepton modes for(int i=11; i<17;i+=2) { extpart[2] = getParticleData(-i); extpart[3] = getParticleData(i+1); mode = new_ptr(DecayPhaseSpaceMode(extpart,this)); Wchannel = new_ptr(DecayPhaseSpaceChannel(mode)); Wchannel->addIntermediate(extpart[0],0,0.0,-1,1); Wchannel->addIntermediate(_wplus,0,0.0,2,3); Wchannel->init(); mode->addChannel(Wchannel); addMode(mode,_wleptonwgt[(i-11)/2],wgt); } //quark modes unsigned int iz=0; for(int ix=1;ix<6;ix+=2) { for(int iy=2;iy<6;iy+=2) { // check that the combination of particles is allowed if(_wvertex->allowed(-ix,iy,ParticleID::Wminus)) { extpart[2] = getParticleData(-ix); extpart[3] = getParticleData( iy); mode = new_ptr(DecayPhaseSpaceMode(extpart,this)); Wchannel = new_ptr(DecayPhaseSpaceChannel(mode)); Wchannel->addIntermediate(extpart[0],0,0.0,-1,1); Wchannel->addIntermediate(_wplus,0,0.0,2,3); Wchannel->init(); mode->addChannel(Wchannel); addMode(mode,_wquarkwgt[iz],wgt); ++iz; } else { throw InitException() << "SMTopDecayer::doinit() the W vertex" << "cannot handle all the quark modes" << Exception::abortnow; } } } } void SMTopDecayer::dataBaseOutput(ofstream & os,bool header) const { if(header) os << "update decayers set parameters=\""; // parameters for the DecayIntegrator base class for(unsigned int ix=0;ix<_wquarkwgt.size();++ix) { os << "newdef " << name() << ":QuarkWeights " << ix << " " << _wquarkwgt[ix] << "\n"; } for(unsigned int ix=0;ix<_wleptonwgt.size();++ix) { os << "newdef " << name() << ":LeptonWeights " << ix << " " << _wleptonwgt[ix] << "\n"; } DecayIntegrator::dataBaseOutput(os,false); if(header) os << "\n\" where BINARY ThePEGName=\"" << fullName() << "\";" << endl; } void SMTopDecayer::doinitrun() { DecayIntegrator::doinitrun(); if(initialize()) { for(unsigned int ix=0;ix<numberModes();++ix) { if(ix<3) _wleptonwgt[ix ] = mode(ix)->maxWeight(); else _wquarkwgt [ix-3] = mode(ix)->maxWeight(); } } } WidthCalculatorBasePtr SMTopDecayer::threeBodyMEIntegrator(const DecayMode & dm) const { // identify W decay products int sign = dm.parent()->id() > 0 ? 1 : -1; int iferm(0),ianti(0); for(ParticleMSet::const_iterator pit=dm.products().begin(); pit!=dm.products().end();++pit) { int id = (**pit).id(); if(id*sign != ParticleID::b) { if (id*sign > 0 ) iferm = id*sign; else ianti = id*sign; } } assert(iferm!=0&&ianti!=0); // work out which mode we are doing int imode(-1); for(unsigned int ix=0;ix<numberModes();++ix) { if(mode(ix)->externalParticles(2)->id() == ianti && mode(ix)->externalParticles(3)->id() == iferm ) { imode = ix; break; } } assert(imode>=0); // get the masses we need Energy m[3] = {mode(imode)->externalParticles(1)->mass(), mode(imode)->externalParticles(3)->mass(), mode(imode)->externalParticles(2)->mass()}; return new_ptr(ThreeBodyAllOn1IntegralCalculator<SMTopDecayer> (3,_wplus->mass(),_wplus->width(),0.0,*this,imode,m[0],m[1],m[2])); } InvEnergy SMTopDecayer::threeBodydGammads(const int imode, const Energy2 mt2, const Energy2 mffb2, const Energy mb, const Energy mf, const Energy mfb) const { Energy mffb(sqrt(mffb2)); Energy mw(_wplus->mass()); Energy2 mw2(sqr(mw)),gw2(sqr(_wplus->width())); Energy mt(sqrt(mt2)); Energy Eb = 0.5*(mt2-mffb2-sqr(mb))/mffb; Energy Ef = 0.5*(mffb2-sqr(mfb)+sqr(mf))/mffb; Energy Ebm = sqrt(sqr(Eb)-sqr(mb)); Energy Efm = sqrt(sqr(Ef)-sqr(mf)); Energy2 upp = sqr(Eb+Ef)-sqr(Ebm-Efm); Energy2 low = sqr(Eb+Ef)-sqr(Ebm+Efm); InvEnergy width=(dGammaIntegrand(mffb2,upp,mt,mb,mf,mfb,mw)- dGammaIntegrand(mffb2,low,mt,mb,mf,mfb,mw)) /32./mt2/mt/8/pow(Constants::pi,3)/(sqr(mffb2-mw2)+mw2*gw2); // couplings width *= 0.25*sqr(4.*Constants::pi*generator()->standardModel()->alphaEM(mt2)/ generator()->standardModel()->sin2ThetaW()); width *= generator()->standardModel()->CKM(*mode(imode)->externalParticles(0), *mode(imode)->externalParticles(1)); if(abs(mode(imode)->externalParticles(2)->id())<=6) { width *=3.; if(abs(mode(imode)->externalParticles(2)->id())%2==0) width *=generator()->standardModel()->CKM(*mode(imode)->externalParticles(2), *mode(imode)->externalParticles(3)); else width *=generator()->standardModel()->CKM(*mode(imode)->externalParticles(3), *mode(imode)->externalParticles(2)); } // final spin average - assert(!isnan(width.rawValue())); + assert(!std::isnan(width.rawValue())); return 0.5*width; } Energy6 SMTopDecayer::dGammaIntegrand(Energy2 mffb2, Energy2 mbf2, Energy mt, Energy mb, Energy mf, Energy mfb, Energy mw) const { Energy2 mt2(sqr(mt)) ,mb2(sqr(mb)) ,mf2(sqr(mf )),mfb2(sqr(mfb )),mw2(sqr(mw )); Energy4 mt4(sqr(mt2)),mb4(sqr(mb2)),mf4(sqr(mf2)),mfb4(sqr(mfb2)),mw4(sqr(mw2)); return -mbf2 * ( + 6 * mb2 * mf2 * mfb2 * mffb2 + 6 * mb2 * mt2 * mfb2 * mffb2 + 6 * mb2 * mt2 * mf2 * mffb2 + 12 * mb2 * mt2 * mf2 * mfb2 - 3 * mb2 * mfb4 * mffb2 + 3 * mb2 * mf2 * mffb2 * mffb2 - 3 * mb2 * mf4 * mffb2 - 6 * mb2 * mt2 * mfb4 - 6 * mb2 * mt2 * mf4 - 3 * mb4 * mfb2 * mffb2 - 3 * mb4 * mf2 * mffb2 - 6 * mb4 * mf2 * mfb2 + 3 * mt4 * mf4 + 3 * mb4 * mfb4 + 3 * mb4 * mf4 + 3 * mt4 * mfb4 + 3 * mb2 * mfb2 * mffb2 * mffb2 + 3 * mt2 * mfb2 * mffb2 * mffb2 - 3 * mt2 * mfb4 * mffb2 + 3 * mt2 * mf2 * mffb2 * mffb2 - 3 * mt2 * mf4 * mffb2 - 3 * mt4 * mfb2 * mffb2 - 3 * mt4 * mf2 * mffb2 - 6 * mt4 * mf2 * mfb2 + 6 * mt2 * mf2 * mfb2 * mffb2 + 12 * mt2 * mf2 * mw4 + 12 * mb2 * mfb2 * mw4 + 12 * mb2 * mt2 * mw4 + 6 * mw2 * mt2 * mfb2 * mbf2 - 12 * mw2 * mt2 * mf2 * mffb2 - 6 * mw2 * mt2 * mf2 * mbf2 - 12 * mw2 * mt2 * mf2 * mfb2 - 12 * mw2 * mb2 * mfb2 * mffb2 - 6 * mw2 * mb2 * mfb2 * mbf2 + 6 * mw2 * mb2 * mf2 * mbf2 - 12 * mw2 * mb2 * mf2 * mfb2 - 12 * mw2 * mb2 * mt2 * mfb2 - 12 * mw2 * mb2 * mt2 * mf2 + 12 * mf2 * mfb2 * mw4 + 4 * mbf2 * mbf2 * mw4 - 6 * mfb2 * mbf2 * mw4 - 6 * mf2 * mbf2 * mw4 - 6 * mt2 * mbf2 * mw4 - 6 * mb2 * mbf2 * mw4 + 12 * mw2 * mt2 * mf4 + 12 * mw2 * mt4 * mf2 + 12 * mw2 * mb2 * mfb4 + 12 * mw2 * mb4 * mfb2) /mw4 / 3.; } void SMTopDecayer::initializeMECorrection(RealEmissionProcessPtr born, double & initial, double & final) { // check the outgoing particles PPtr part[2]; for(unsigned int ix=0;ix<born->bornOutgoing().size();++ix) { part[ix]= born->bornOutgoing()[ix]; } // check the final-state particles and get the masses if(abs(part[0]->id())==ParticleID::Wplus&&abs(part[1]->id())==ParticleID::b) { _ma=part[0]->mass(); _mc=part[1]->mass(); } else if(abs(part[1]->id())==ParticleID::Wplus&&abs(part[0]->id())==ParticleID::b) { _ma=part[1]->mass(); _mc=part[0]->mass(); } else { return; } // set the top mass _mt=born->bornIncoming()[0]->mass(); // set the gluon mass _mg=getParticleData(ParticleID::g)->constituentMass(); // set the radiation enhancement factors initial = _initialenhance; final = _finalenhance; // reduced mass parameters _a=sqr(_ma/_mt); _g=sqr(_mg/_mt); _c=sqr(_mc/_mt); double lambda = sqrt(1.+sqr(_a)+sqr(_c)-2.*_a-2.*_c-2.*_a*_c); _ktb = 0.5*(3.-_a+_c+lambda); _ktc = 0.5*(1.-_a+3.*_c+lambda); useMe(); } RealEmissionProcessPtr SMTopDecayer::applyHardMatrixElementCorrection(RealEmissionProcessPtr born) { // Get b and a and put them in particle vector ba in that order... ParticleVector ba; for(unsigned int ix=0;ix<born->bornOutgoing().size();++ix) ba.push_back(born->bornOutgoing()[ix]); if(abs(ba[0]->id())!=5) swap(ba[0],ba[1]); assert(born->bornIncoming().size()==1); // Now decide if we get an emission into the dead region. // If there is an emission newfs stores momenta for a,c,g // according to NLO decay matrix element. vector<Lorentz5Momentum> newfs = applyHard(ba,_ktb,_ktc); // If there was no gluon emitted return. if(newfs.size()!=3) return RealEmissionProcessPtr(); // Sanity checks to ensure energy greater than mass etc :) bool check = true; tcPDPtr gluondata=getParticleData(ParticleID::g); if (newfs[0].e()<ba[0]->data().constituentMass()) check = false; if (newfs[1].e()<ba[1]->mass()) check = false; if (newfs[2].e()<gluondata->constituentMass()) check = false; // Return if insane: if (!check) return RealEmissionProcessPtr(); // // Set masses in 5-vectors: newfs[0].setMass(ba[0]->mass()); newfs[1].setMass(ba[1]->mass()); newfs[2].setMass(ZERO); // The next part of this routine sets the colour structure. // To do this for decays we assume that the gluon comes from c! // First create new particle objects for c, a and gluon: PPtr newg = gluondata->produceParticle(newfs[2]); PPtr newc = ba[0]->data().produceParticle(newfs[0]); PPtr newa = ba[1]->data().produceParticle(newfs[1]); born->spectator(0); born->emitted(3); // decaying particle born->incoming().push_back(born->bornIncoming()[0]->dataPtr()-> produceParticle(born->bornIncoming()[0]->momentum())); // colour flow newg->incomingColour(born->incoming()[0],ba[0]->id()<0); newg->colourConnect(newc ,ba[0]->id()<0); if(born->bornOutgoing()[0]->id()==newc->id()) { born->outgoing().push_back(newc); born->outgoing().push_back(newa); born->emitter(1); } else { born->outgoing().push_back(newa); born->outgoing().push_back(newc); born->emitter(2); } born->outgoing().push_back(newg); // boost for the W LorentzRotation trans(ba[1]->momentum().findBoostToCM()); trans.boost(newfs[1].boostVector()); born->transformation(trans); if(!inTheDeadRegion(_xg,_xa,_ktb,_ktc)) { generator()->log() << "SMTopDecayer::applyHardMatrixElementCorrection()\n" << "Just found a point that escaped from the dead region!\n" << " _xg: " << _xg << " _xa: " << _xa << " newfs.size(): " << newfs.size() << endl; } born->interaction(ShowerInteraction::QCD); return born; } vector<Lorentz5Momentum> SMTopDecayer:: applyHard(const ParticleVector &p,double ktb, double ktc) { // ********************************* // // First we see if we get a dead // // region event: _xa,_xg // // ********************************* // vector<Lorentz5Momentum> fs; // Return if there is no (NLO) gluon emission: double weight = getHard(ktb,ktc); if(weight>1.) { generator()->log() << "Weight greater than 1 for hard emission in " << "SMTopDecayer::applyHard xg = " << _xg << " xa = " << _xa << "\n"; weight=1.; } // Accept/Reject if (weight<UseRandom::rnd()||p.size()!= 2) return fs; // Drop events if getHard returned a negative weight // as in events that, somehow have escaped from the dead region // or, worse, the allowed region. if(weight<0.) return fs; // Calculate xc by momentum conservation: _xc = 2.-_xa-_xg; // ************************************ // // Now we get the boosts & rotations to // // go from lab to top rest frame with // // a in the +z direction. // // ************************************ // Lorentz5Momentum pa_lab,pb_lab,pc_lab,pg_lab; // Calculate momentum of b: pb_lab = p[0]->momentum() + p[1]->momentum(); // Define/assign momenta of c,a and the gluon: if(abs(p[0]->id())==5) { pc_lab = p[0]->momentum(); pa_lab = p[1]->momentum(); } else { pc_lab = p[1]->momentum(); pa_lab = p[0]->momentum(); } // Calculate the boost to the b rest frame: SpinOneLorentzRotation rot0(pb_lab.findBoostToCM()); // Calculate the rotation matrix to position a along the +z direction // in the rest frame of b and does a random rotation about z: SpinOneLorentzRotation rot1 = rotateToZ(rot0*pa_lab); // Calculate the boost from the b rest frame back to the lab: // and the inverse of the random rotation about the z-axis and the // rotation required to align a with +z: SpinOneLorentzRotation invrot = rot0.inverse()*rot1.inverse(); // ************************************ // // Now we construct the momenta in the // // b rest frame using _xa,_xg. // // First we construct b, then c and g, // // finally we generate a by momentum // // conservation. // // ************************************ // Lorentz5Momentum pa_brf, pb_brf(_mt), pc_brf, pg_brf; // First we set the top quark to being on-shell and at rest. // Second we set the energies of c and g, pc_brf.setE(0.5*_mt*(2.-_xa-_xg)); pg_brf.setE(0.5*_mt*_xg); // then their masses, pc_brf.setMass(_mc); pg_brf.setMass(ZERO); // Now set the z-component of c and g. For pg we simply start from // _xa and _xg, while for pc we assume it is equal to minus the sum // of the z-components of a (assumed to point in the +z direction) and g. double root=sqrt(_xa*_xa-4.*_a); pg_brf.setZ(_mt*(1.-_xa-_xg+0.5*_xa*_xg-_c+_a)/root); pc_brf.setZ(-1.*( pg_brf.z()+_mt*0.5*root)); // Now set the y-component of c and g's momenta pc_brf.setY(ZERO); pg_brf.setY(ZERO); // Now set the x-component of c and g's momenta pg_brf.setX(sqrt(sqr(pg_brf.t())-sqr(pg_brf.z()))); pc_brf.setX(-pg_brf.x()); // Momenta b,c,g are now set. Now we obtain a from momentum conservation, pa_brf = pb_brf-pc_brf-pg_brf; pa_brf.setMass(pa_brf.m()); pa_brf.rescaleEnergy(); // ************************************ // // Now we orient the momenta and boost // // them back to the original lab frame. // // ************************************ // // As in herwig6507 we assume that, in the rest frame // of b, we have aligned the W boson momentum in the // +Z direction by rot1*rot0*pa_lab, therefore // we obtain the new pa_lab by applying: // invrot*pa_brf. pa_lab = invrot*pa_brf; pb_lab = invrot*pb_brf; pc_lab = invrot*pc_brf; pg_lab = invrot*pg_brf; fs.push_back(pc_lab); fs.push_back(pa_lab); fs.push_back(pg_lab); return fs; } double SMTopDecayer::getHard(double ktb, double ktc) { // zero the variables _xg = 0.; _xa = 0.; _xc = 0.; // Get a phase space point in the dead region: double volume_factor = deadRegionxgxa(ktb,ktc); // if outside region return -1 if(volume_factor<0) return volume_factor; // Compute the weight for this phase space point: double weight = volume_factor*me(_xa,_xg)*(1.+_a-_c-_xa); // Alpha_S and colour factors - this hard wired Alpha_S needs removing. weight *= (4./3.)/Constants::pi *(_alpha->value(_mt*_mt*_xg*(1.-_xa+_a-_c) /(2.-_xg-_xa-_c))); return weight; } bool SMTopDecayer::softMatrixElementVeto(ShowerProgenitorPtr initial, ShowerParticlePtr parent,Branching br) { // check if we need to apply the full correction long id[2]={abs(initial->progenitor()->id()),abs(parent->id())}; // the initial-state correction if(id[0]==ParticleID::t&&id[1]==ParticleID::t) { Energy pt=br.kinematics->pT(); // check if hardest so far // if not just need to remove effect of enhancement bool veto(false); // if not hardest so far if(pt<initial->highestpT()) veto=!UseRandom::rndbool(1./_initialenhance); // if hardest so far do calculation else { // values of kappa and z double z(br.kinematics->z()),kappa(sqr(br.kinematics->scale()/_mt)); // parameters for the translation double w(1.-(1.-z)*(kappa-1.)),u(1.+_a-_c-(1.-z)*kappa),v(sqr(u)-4.*_a*w*z); // veto if outside phase space if(v<0.) veto=true; // otherwise calculate the weight else { v = sqrt(v); double xa((0.5*(u+v)/w+0.5*(u-v)/z)),xg((1.-z)*kappa); double f(me(xa,xg)), J(0.5*(u+v)/sqr(w)-0.5*(u-v)/sqr(z)+_a*sqr(w-z)/(v*w*z)); double wgt(f*J*2./kappa/(1.+sqr(z)-2.*z/kappa)/_initialenhance); // This next `if' prevents the hardest emission from the // top shower ever entering the so-called T2 region of the // phase space if that region is to be populated by the hard MEC. if(_useMEforT2&&xg>xgbcut(_ktb)) wgt = 0.; if(wgt>1.) { generator()->log() << "Violation of maximum for initial-state " << " soft veto in " << "SMTopDecayer::softMatrixElementVeto" << "xg = " << xg << " xa = " << xa << "weight = " << wgt << "\n"; wgt=1.; } // compute veto from weight veto = !UseRandom::rndbool(wgt); } // if not vetoed reset max if(!veto) initial->highestpT(pt); } // if vetoing reset the scale if(veto) parent->vetoEmission(br.type,br.kinematics->scale()); // return the veto return veto; } // final-state correction else if(id[0]==ParticleID::b&&id[1]==ParticleID::b) { Energy pt=br.kinematics->pT(); // check if hardest so far // if not just need to remove effect of enhancement bool veto(false); // if not hardest so far if(pt<initial->highestpT()) return !UseRandom::rndbool(1./_finalenhance); // if hardest so far do calculation // values of kappa and z double z(br.kinematics->z()),kappa(sqr(br.kinematics->scale()/_mt)); // momentum fractions double xa(1.+_a-_c-z*(1.-z)*kappa),r(0.5*(1.+_c/(1.+_a-xa))),root(sqr(xa)-4.*_a); if(root<0.) { generator()->log() << "Imaginary root for final-state veto in " << "SMTopDecayer::softMatrixElementVeto" << "\nz = " << z << "\nkappa = " << kappa << "\nxa = " << xa << "\nroot^2= " << root; parent->vetoEmission(br.type,br.kinematics->scale()); return true; } root=sqrt(root); double xg((2.-xa)*(1.-r)-(z-r)*root); // xfact (below) is supposed to equal xg/(1-z). double xfact(z*kappa/2./(z*(1.-z)*kappa+_c)*(2.-xa-root)+root); // calculate the full result double f(me(xa,xg)); // jacobian double J(z*root); double wgt(f*J*2.*kappa/(1.+sqr(z)-2.*_c/kappa/z)/sqr(xfact)/_finalenhance); if(wgt>1.) { generator()->log() << "Violation of maximum for final-state soft veto in " << "SMTopDecayer::softMatrixElementVeto" << "xg = " << xg << " xa = " << xa << "weight = " << wgt << "\n"; wgt=1.; } // compute veto from weight veto = !UseRandom::rndbool(wgt); // if vetoing reset the scale if(veto) parent->vetoEmission(br.type,br.kinematics->scale()); // return the veto return veto; } // otherwise don't veto else return !UseRandom::rndbool(1./_finalenhance); } double SMTopDecayer::me(double xw,double xg) { double prop(1.+_a-_c-xw),xg2(sqr(xg)); double lambda=sqrt(1.+_a*_a+_c*_c-2.*_a-2.*_c-2.*_a*_c); double denom=(1.-2*_a*_a+_a+_c*_a+_c*_c-2.*_c); double wgt=-_c*xg2/prop+(1.-_a+_c)*xg-(prop*(1 - xg)+xg2) +(0.5*(1.+2.*_a+_c)*sqr(prop-xg)*xg+2.*_a*prop*xg2)/denom; return wgt/(lambda*prop); } // This function is auxiliary to the xab function. double SMTopDecayer::xgbr(int toggle) { return 1.+toggle*sqrt(_a)-_c*(1.-toggle*sqrt(_a))/(1.-_a); } // This function is auxiliary to the xab function. double SMTopDecayer::ktr(double xgb, int toggle) { return 2.*xgb/ (xgb+toggle*sqrt((1.-1./_a) *(xgb-xgbr( 1)) *(xgb-xgbr(-1)))); } // Function xab determines xa (2*W energy fraction) for a given value // of xg (2*gluon energy fraction) and kappa tilde (q tilde squared over // m_top squared). Hence this function allows you to draw 1: the total // phase space volume in the xa vs xg plane 2: for a given value of // kappa tilde (i.e. starting evolution scale) the associated contour // in the xa vs xg plane (and hence the regions that either shower can // populate). This calculation is done assuming the emission came from // the top quark i.e. kappa tilde here is the q tilde squared of the TOP // quark divided by m_top squared. double SMTopDecayer::xab(double xgb, double kt, int toggle) { double xab; if(toggle==2) { // This applies for g==0.&&kt==ktr(a,c,0.,xgb,1). xab = -2.*_a*(xgb-2.)/(1.+_a-_c-xgb); } else if(toggle==1) { // This applies for kt==1&&g==0. double lambda = sqrt(sqr(xgb-1.+_a+_c)-4.*_a*_c); xab = (0.5/(kt-xgb))*(kt*(1.+_a-_c-xgb)-lambda) + (0.5/(kt+xgb*(1.-kt)))*(kt*(1.+_a-_c-xgb)+lambda); } else { // This is the form of xab FOR _g=0. double ktmktrpktmktrm = kt*kt - 4.*_a*(kt-1.)*xgb*xgb / (sqr(1.-_a-_c-xgb)-4.*_a*_c); if(fabs(kt-(2.*xgb-2.*_g)/(xgb-sqrt(xgb*xgb-4.*_g)))/kt>1.e-6) { double lambda = sqrt((sqr(1.-_a-_c-xgb)-4.*_a*_c)*ktmktrpktmktrm); xab = (0.5/(kt-xgb))*(kt*(1.+_a-_c-xgb)-lambda) + (0.5/(kt+xgb*(1.-kt)))*(kt*(1.+_a-_c-xgb)+lambda); } else { // This is the value of xa as a function of xb when kt->infinity. // Where we take any kt > (2.*xgb-2.*_g)/(xgb-sqrt(xgb*xgb-4.*_g)) // as being effectively infinite. This kt value is actually the // maximum allowed value kt can have if the phase space is calculated // without the approximation of _g=0 (massless gluon). This formula // for xab below is then valid for _g=0 AND kt=infinity only. xab = ( 2.*_c+_a*(xgb-2.) + 3.*xgb - xgb*(_c+xgb+sqrt(_a*_a-2.*(_c-xgb+1.)*_a+sqr(_c+xgb-1.))) - 2. )/2./(xgb-1.); } } - if(isnan(xab)) { + if(std::isnan(xab)) { double ktmktrpktmktrm = ( sqr(xgb*kt-2.*(xgb-_g)) -kt*kt*(1.-1./_a)*(xgb-xgbr( 1)-_g/(1.+sqrt(_a))) *(xgb-xgbr(-1)-_g/(1.-sqrt(_a))) )/ (xgb*xgb-(1.-1./_a)*(xgb-xgbr( 1)-_g/(1.+sqrt(_a))) *(xgb-xgbr(-1)-_g/(1.-sqrt(_a))) ); double lambda = sqrt((xgb-1.+sqr(sqrt(_a)+sqrt(_c-_g))) *(xgb-1.+sqr(sqrt(_a)-sqrt(_c-_g)))* ktmktrpktmktrm); xab = (0.5/(kt-xgb+_g))*(kt*(1.+_a-_c+_g-xgb)-lambda) + (0.5/(kt+xgb*(1.-kt)-_g))*(kt*(1.+_a-_c+_g-xgb)+lambda); - if(isnan(xab)) + if(std::isnan(xab)) throw Exception() << "TopMECorrection::xab complex x_a value.\n" << " xgb = " << xgb << "\n" << " xab = " << xab << "\n" << " toggle = " << toggle << "\n" << " ktmktrpktmktrm = " << ktmktrpktmktrm << Exception::eventerror; } return xab; } // xgbcut is the point along the xg axis where the upper bound on the // top quark (i.e. b) emission phase space goes back on itself in the // xa vs xg plane i.e. roughly mid-way along the xg axis in // the xa vs xg Dalitz plot. double SMTopDecayer::xgbcut(double kt) { double lambda2 = 1.+_a*_a+_c*_c-2.*_a-2.*_c-2.*_a*_c; double num1 = kt*kt*(1.-_a-_c); double num2 = 2.*kt*sqrt(_a*(kt*kt*_c+lambda2*(kt-1.))); return (num1-num2)/(kt*kt-4.*_a*(kt-1.)); } double SMTopDecayer::xaccut(double kt) { return 1.+_a-_c-0.25*kt; } double SMTopDecayer::z(double xac, double kt, int toggle1, int toggle2) { double z = -1.0; if(toggle2==0) { z = (kt+toggle1*sqrt(kt*(kt-4.*(1.+_a-_c-xac))))/(2.*kt); } else if(toggle2==1) { z = ((1.+_a+_c-xac)+toggle1*(1.+_a-_c-xac)) /(2.*(1.+_a-xac)); } else if(toggle2==2) { z = 0.5; } else { throw Exception() << "Cannot determine z in SMTopDecayer::z()" << Exception::eventerror; } return z; } double SMTopDecayer::xgc(double xac, double kt, int toggle1, int toggle2) { double tiny(1.e-6); double xaToMinBoundary(xac*xac-4.*_a); if(xaToMinBoundary<0) { if(fabs(xaToMinBoundary/(1.-_a)/(1.-_a))<tiny) xaToMinBoundary *= -1.; else throw Exception() << "SMTopDecayer::xgc xa not in phase space!" << Exception::eventerror; } return (2.-xac)*(1.-0.5*(1.+_c/(1.+_a-xac))) -(z(xac,kt,toggle1,toggle2)-0.5*(1.+_c/(1.+_a-xac))) *sqrt(xaToMinBoundary); } double SMTopDecayer::xginvc0(double xg , double kt) { // The function xg(kappa_tilde_c,xa) surely, enough, draws a // line of constant kappa_tilde_c in the xg, xa Dalitz plot. // Such a function can therefore draw the upper and lower // edges of the phase space for emission from c (the b-quark). // However, to sample the soft part of the dead zone effectively // we want to generate a value of xg first and THEN distribute // xa in the associated allowed part of the dead zone. Hence, the // function we want, to define the dead zone in xa for a given // xg, is the inverse of xg(kappa_tilde_c,xa). The full expression // for xg(kappa_tilde_c,xa) is complicated and, sure enough, // does not invert. Therefore we try to overestimate the size // of the dead zone initially, rejecting events which do not // fall exactly inside it afterwards, with the immediate aim // of getting an approximate version of xg(kappa_tilde_c,xa) // that can be inverted. We do this by simply setting c=0 i.e. // the b-quark mass to zero (and the gluon mass of course), in // the full expression xg(...). The result of inverting this // function is the output of this routine (a value of xa) hence // the name xginvc0. xginvc0 is calculated to be, // xginvc0 = (1./3.)*(1.+a+pow((U+sqrt(4.*V*V*V+U*U))/2.,1./3.) // -V*pow(2./(U+sqrt(4.*V*V*V+U*U)),1./3.) // ) // U = 2.*a*a*a - 66.*a*a + 9.*a*kt*xg + 18.*a*kt // - 66.*a + 27.*kt*xg*xg - 45.*kt*xg +18.*kt +2. ; // V = -1.-a*a-14.*a-3.kt*xg+3.*kt; // This function, as with many functions in this ME correction, // is plagued by cuts that have to handled carefully in numerical // implementation. We have analysed the cuts and hence we implement // it in the following way, with a series of 'if' statements. // // A useful -definition- to know in deriving the v<0 terms is // that tanh^-1(z) = 0.5*(log(1.+z)-log(1.-z)). double u,v,output; u = 2.*_a*_a*_a-66.*_a*_a +9.*xg*kt*_a+18.*kt*_a -66.*_a+27.*xg*xg*kt -45.*xg*kt+18.*kt+2.; v = -_a*_a-14.*_a-3.*xg*kt+3.*kt-1.; double u2=u*u,v3=v*v*v; if(v<0.) { if(u>0.&&(4.*v3+u2)<0.) output = cos( atan(sqrt(-4.*v3-u2)/u)/3.); else if(u>0.&&(4.*v3+u2)>0.) output = cosh(atanh(sqrt( 4.*v3+u2)/u)/3.); else output = cos(( atan(sqrt(-4.*v3-u2)/u) +Constants::pi)/3.); output *= 2.*sqrt(-v); } else { output = sinh(log((u+sqrt(4.*v3+u2))/(2.*sqrt(v3)))/3.); output *= 2.*sqrt(v); } - if(isnan(output)||isinf(output)) { + if(!isfinite(output)) { throw Exception() << "TopMECorrection::xginvc0:\n" << "possible numerical instability detected.\n" << "\n v = " << v << " u = " << u << "\n4.*v3+u2 = " << 4.*v3+u2 << "\n_a = " << _a << " ma = " << sqrt(_a*_mt*_mt/GeV2) << "\n_c = " << _c << " mc = " << sqrt(_c*_mt*_mt/GeV2) << "\n_g = " << _g << " mg = " << sqrt(_g*_mt*_mt/GeV2) << Exception::eventerror; } return ( 1.+_a +output)/3.; } double SMTopDecayer::approxDeadMaxxa(double xg,double ktb,double ktc) { double maxxa(0.); double x = min(xginvc0(xg,ktc), xab(xg,(2.*xg-2.*_g)/(xg-sqrt(xg*xg-4.*_g)),0)); double y(-9999999999.); if(xg>2.*sqrt(_g)&&xg<=xgbcut(ktb)) { y = max(xab(xg,ktb,0),xab(xg,1.,1)); } else if(xg>=xgbcut(ktb)&&xg<=1.-sqr(sqrt(_a)+sqrt(_c))) { y = max(xab(xg,ktr(xg,1),2),xab(xg,1.,1)); } if(xg>2.*sqrt(_g)&&xg<=1.-sqr(sqrt(_a)+sqrt(_c))) { if(x>=y) { maxxa = x ; } else { maxxa = -9999999.; } } else { maxxa = -9999999.; } return maxxa; } double SMTopDecayer::approxDeadMinxa(double xg,double ktb,double ktc) { double minxa(0.); double x = min(xginvc0(xg,ktc), xab(xg,(2.*xg-2.*_g)/(xg-sqrt(xg*xg-4.*_g)),0)); double y(-9999999999.); if(xg>2.*sqrt(_g)&&xg<=xgbcut(ktb)) { y = max(xab(xg,ktb,0),xab(xg,1.,1)); } else if(xg>=xgbcut(ktb)&&xg<=1.-sqr(sqrt(_a)+sqrt(_c))) { if(_useMEforT2) y = xab(xg,1.,1); else y = max(xab(xg,ktr(xg,1),2),xab(xg,1.,1)); } if(xg>2.*sqrt(_g)&&xg<=1.-sqr(sqrt(_a)+sqrt(_c))) { if(x>=y) { minxa = y ; } else { minxa = 9999999.; } } else { minxa = 9999999.; } return minxa; } // This function returns true if the phase space point (xg,xa) is in the // kinematically allowed phase space. bool SMTopDecayer::inTheAllowedRegion(double xg , double xa) { bool output(true); if(xg<2.*sqrt(_g)||xg>1.-sqr(sqrt(_a)+sqrt(_c))) output = false; if(xa<xab(xg,1.,1)) output = false; if(xa>xab(xg,(2.*xg-2.*_g)/(xg-sqrt(xg*xg-4.*_g)),0)) output = false; return output; } // This function returns true if the phase space point (xg,xa) is in the // approximate (overestimated) dead region. bool SMTopDecayer::inTheApproxDeadRegion(double xg , double xa, double ktb, double ktc) { bool output(true); if(!inTheAllowedRegion(xg,xa)) output = false; if(xa<approxDeadMinxa(xg,ktb,ktc)) output = false; if(xa>approxDeadMaxxa(xg,ktb,ktc)) output = false; return output; } // This function returns true if the phase space point (xg,xa) is in the // dead region. bool SMTopDecayer::inTheDeadRegion(double xg , double xa, double ktb, double ktc) { bool output(true); if(!inTheApproxDeadRegion(xg,xa,ktb,ktc)) output = false; if(xa>xaccut(ktc)) { if(xg<xgc(max(xaccut(ktc),2.*sqrt(_a)),ktc, 1,2)&& xg>xgc(xa,ktc, 1,0)) { output = false; } if(xg>xgc(max(xaccut(ktc),2.*sqrt(_a)),ktc,-1,2)&& xg<xgc(xa,ktc,-1,0)) { output = false; } } return output; } // This function attempts to generate a phase space point in the dead // region and returns the associated phase space volume factor needed for // the associated event weight. double SMTopDecayer::deadRegionxgxa(double ktb,double ktc) { _xg=0.; _xa=0.; // Here we set limits on xg and generate a value inside the bounds. double xgmin(2.*sqrt(_g)),xgmax(1.-sqr(sqrt(_a)+sqrt(_c))); // Generate _xg. if(_xg_sampling==2.) { _xg=xgmin*xgmax/(xgmin+UseRandom::rnd()*(xgmax-xgmin)); } else { _xg=xgmin*xgmax/pow(( pow(xgmin,_xg_sampling-1.) + UseRandom::rnd()*(pow(xgmax,_xg_sampling-1.) -pow(xgmin,_xg_sampling-1.)) ),1./(_xg_sampling-1.)); } // Here we set the bounds on _xa for given _xg. if(_xg<xgmin||xgmin>xgmax) throw Exception() << "TopMECorrection::deadRegionxgxa:\n" << "upper xg bound is less than the lower xg bound.\n" << "\n_xg = " << _xg << "\n2.*sqrt(_g) = " << 2.*sqrt(_g) << "\n_a = " << _a << " ma = " << sqrt(_a*_mt*_mt/GeV2) << "\n_c = " << _c << " mc = " << sqrt(_c*_mt*_mt/GeV2) << "\n_g = " << _g << " mg = " << sqrt(_g*_mt*_mt/GeV2) << Exception::eventerror; double xamin(approxDeadMinxa(_xg,ktb,ktc)); double xamax(approxDeadMaxxa(_xg,ktb,ktc)); // Are the bounds sensible? If not return. if(xamax<=xamin) return -1.; _xa=1.+_a-(1.+_a-xamax)*pow((1.+_a-xamin)/(1.+_a-xamax),UseRandom::rnd()); // If outside the allowed region return -1. if(!inTheDeadRegion(_xg,_xa,ktb,ktc)) return -1.; // The integration volume for the weight double xg_vol,xa_vol; if(_xg_sampling==2.) { xg_vol = (xgmax-xgmin) / (xgmax*xgmin); } else { xg_vol = (pow(xgmax,_xg_sampling-1.)-pow(xgmin,_xg_sampling-1.)) / ((_xg_sampling-1.)*pow(xgmax*xgmin,_xg_sampling-1.)); } xa_vol = log((1.+_a-xamin)/(1.+_a-xamax)); // Here we return the integral volume factor multiplied by the part of the // weight left over which is not included in the BRACES function, i.e. // the part of _xg^-2 which is not absorbed in the integration measure. return xg_vol*xa_vol*pow(_xg,_xg_sampling-2.); } LorentzRotation SMTopDecayer::rotateToZ(Lorentz5Momentum v) { // compute the rotation matrix LorentzRotation trans; // rotate so in z-y plane trans.rotateZ(-atan2(v.y(),v.x())); // rotate so along Z trans.rotateY(-acos(v.z()/v.vect().mag())); // generate random rotation double c,s,cs; do { c = 2.*UseRandom::rnd()-1.; s = 2.*UseRandom::rnd()-1.; cs = c*c+s*s; } while(cs>1.||cs==0.); double cost=(c*c-s*s)/cs,sint=2.*c*s/cs; // apply random azimuthal rotation trans.rotateZ(atan2(sint,cost)); return trans; } diff --git a/Decay/Radiation/FFDipole.cc b/Decay/Radiation/FFDipole.cc --- a/Decay/Radiation/FFDipole.cc +++ b/Decay/Radiation/FFDipole.cc @@ -1,947 +1,947 @@ // -*- C++ -*- // // FFDipole.cc is a part of Herwig - A multi-purpose Monte Carlo event generator // Copyright (C) 2002-2011 The Herwig Collaboration // // Herwig is licenced under version 2 of the GPL, see COPYING for details. // Please respect the MCnet academic guidelines, see GUIDELINES for details. // // // This is the implementation of the non-inlined, non-templated member // functions of the FFDipole class. // #include "FFDipole.h" #include "ThePEG/PDT/EnumParticles.h" #include "ThePEG/EventRecord/Particle.h" #include "ThePEG/Interface/Parameter.h" #include "ThePEG/Interface/Switch.h" #include "ThePEG/Utilities/Debug.h" #include "ThePEG/Interface/ClassDocumentation.h" #include "ThePEG/Persistency/PersistentOStream.h" #include "ThePEG/Persistency/PersistentIStream.h" #include "YFSFormFactors.h" #include "Herwig/Decay/DecayPhaseSpaceMode.h" #include "Herwig/Decay/DecayIntegrator.h" using namespace Herwig; void FFDipole::persistentOutput(PersistentOStream & os) const { os << ounit(_emin,GeV) << ounit(_eminrest,GeV) << ounit(_eminlab,GeV) << _maxwgt << _weightOutput << _mode << _maxtry << _energyopt << _betaopt << _dipoleopt; } void FFDipole::persistentInput(PersistentIStream & is, int) { is >> iunit(_emin,GeV) >> iunit(_eminrest,GeV) >> iunit(_eminlab,GeV) >> _maxwgt >> _weightOutput >> _mode >> _maxtry >> _energyopt >> _betaopt >> _dipoleopt; } FFDipole::~FFDipole() {} ClassDescription<FFDipole> FFDipole::initFFDipole; // Definition of the static class description member. void FFDipole::Init() { static ClassDocumentation<FFDipole> documentation ("The FFDipole class implements the final-final dipole for the SOPTHY algorithm"); static Switch<FFDipole,unsigned int> interfaceUnWeight ("UnWeight", "Control the type of unweighting to perform, only one should be used the" " other options are for debugging purposes.", &FFDipole::_mode, 1, false, false); static SwitchOption interfaceUnWeightNoUnweighting (interfaceUnWeight, "NoUnweighting", "Perform no unweighting", 0); static SwitchOption interfaceUnWeightAllWeights (interfaceUnWeight, "AllWeights", "Include all the weights", 1); static SwitchOption interfaceUnWeightNoJacobian (interfaceUnWeight, "NoJacobian", "Only include the dipole and YFS weights", 2); static SwitchOption interfaceUnWeightDipole (interfaceUnWeight, "Dipole", "Only include the dipole weight", 3); static SwitchOption interfaceUnWeightYFS (interfaceUnWeight, "YFS", "Only include the YFS weight", 4); static SwitchOption interfaceUnWeightNLO (interfaceUnWeight, "NLO", "Weight to get the stict NLO rate", 5); static Parameter<FFDipole,unsigned int> interfaceMaximumTries ("MaximumTries", "Maximum number of attempts to unweight", &FFDipole::_maxtry, 500, 10, 100000, false, false, Interface::limited); static Parameter<FFDipole,Energy> interfaceMinimumEnergyBoosted ("MinimumEnergyBoosted", "The minimum energy of the photons in the boosted frame in which" " they are generated.", &FFDipole::_emin, MeV, 1.e-6*MeV, ZERO, 100.0*MeV, false, false, Interface::limited); static Parameter<FFDipole,Energy> interfaceMinimumEnergyRest ("MinimumEnergyRest", "The minimum energy of the photons in the rest frame of the decaying particle", &FFDipole::_eminrest, MeV, 100.0*MeV, 1.0*MeV, 10000.0*MeV, false, false, Interface::limited); static Parameter<FFDipole,Energy> interfaceMinimumEnergyLab ("MinimumEnergyLab", "The minimum energy of the photons in the lab frame", &FFDipole::_eminlab, MeV, 100.0*MeV, 1.0*MeV, 10000.0*MeV, false, false, Interface::limited); static Parameter<FFDipole,double> interfaceMaximumWeight ("MaximumWeight", "The maximum weight for unweighting", &FFDipole::_maxwgt, 7.0, 0.0, 100.0, false, false, Interface::limited); static Switch<FFDipole,unsigned int> interfaceEnergyCutOff ("EnergyCutOff", "The type of cut-off on the photon energy to apply", &FFDipole::_energyopt, 1, false, false); static SwitchOption interfaceEnergyCutOffBoostedFrame (interfaceEnergyCutOff, "BoostedFrame", "Only apply cut-off in boosted frame", 0); static SwitchOption interfaceEnergyCutOffRestFrame (interfaceEnergyCutOff, "RestFrame", "Apply cut-off in rest frame", 1); static SwitchOption interfaceEnergyCutOff2 (interfaceEnergyCutOff, "LabFrame", "Apply cut-off in lab frame", 2); static Switch<FFDipole,unsigned int> interfaceBetaOption ("BetaOption", "Option for the inclusive of the higher beta coefficients", &FFDipole::_betaopt, 4, false, false); static SwitchOption interfaceBetaOptionNone (interfaceBetaOption, "None", "No higher betas included", 0); static SwitchOption interfaceBetaOptionCollinear (interfaceBetaOption, "Collinear", "Include the collinear approx", 1); static SwitchOption interfaceBetaOptionCollinearVirtA (interfaceBetaOption, "CollinearVirtualA", "Include the collinear approx with virtual corrections", 2); static SwitchOption interfaceBetaOptionCollinearVirtB (interfaceBetaOption, "CollinearVirtualB", "Include the collinear approx with virtual corrections", 3); static SwitchOption interfaceBetaOptionExact (interfaceBetaOption, "Exact", "Include the exact higher order terms if available", 4); static Switch<FFDipole,unsigned int> interfaceDipoleOption ("DipoleOption", "Option for generating the primary dipole distribution", &FFDipole::_dipoleopt, 0, false, false); static SwitchOption interfaceDipoleOptionNoMass (interfaceDipoleOption, "NoMass", "Don't include the mass terms in the primary distribution", 0); static SwitchOption interfaceDipoleOptionMass (interfaceDipoleOption, "Mass", "Include the mass terms in the primary distribution", 1); static Switch<FFDipole,bool> interfaceWeightOutput ("WeightOutput", "Whether or not to output the average weight for testing", &FFDipole::_weightOutput, false, false, false); static SwitchOption interfaceWeightOutputNo (interfaceWeightOutput, "No", "Don't output the average", false); static SwitchOption interfaceWeightOutputYes (interfaceWeightOutput, "Yes", "Output the average", true); } void FFDipole::printDebugInfo(const Particle & p, const ParticleVector & children, double wgt) const { generator()->log() << "Input masses " << p.mass()/GeV << " -> " << children[0]->mass()/GeV << " " << children[1]->mass()/GeV << '\n'; generator()->log() << "Momenta\n"; generator()->log() << "parent " << p.momentum()/GeV << '\n'; for(unsigned int ix=0;ix<2;++ix) generator()->log() << "charged " << ix << " " << _qnewlab[ix]/GeV << " " << children[ix]->momentum()/GeV << '\n'; for(unsigned int ix=0;ix<_multiplicity;++ix) { generator()->log() << "photons " << ix << " " << "phocut " << _photcut[ix] << ' ' << _llab[ix]/GeV << '\n'; } generator()->log() << "wgt : " << wgt << '\n'; generator()->log() << "_mewgt : " << _mewgt << '\n'; generator()->log() << "_jacobianwgt: " << _jacobianwgt << '\n'; generator()->log() << "_yfswgt : " << _yfswgt << '\n'; generator()->log() << "_dipolewgt : " << _dipolewgt << '\n'; generator()->log() << "dipoleopt : " << _dipoleopt << '\n'; } ParticleVector FFDipole::generatePhotons(const Particle & p, ParticleVector children, tDecayIntegratorPtr decayer) { _parent = const_ptr_cast<tPPtr>(&p); // set the decayer _decayer=decayer; // set parameters which won't change in the event loop // masses of the particles _m[0] = p.mass(); _m[1] = children[0]->mass(); _m[2] = children[1]->mass(); // set the maximum photon energy (exact - no approximations here). _emax=(0.5*(_m[0]-sqr(_m[1]+_m[2])/_m[0]))*_m[0]/(_m[1]+_m[2]); // check masses non-zero for(unsigned int ix=0;ix<2;++ix) { if(children[ix]->mass()<1e-4*GeV) { ostringstream message; message << "FFDipole::generatePhotons() trying to generate QED radiation from " << children[ix]->dataPtr()->PDGName() << "\n with mass " << children[ix]->mass()/GeV << "which is much smaller than the mass of the electron.\n" << "This is probably due to reading events from a LHEF,\nskipping radiation in this case.\n"; generator()->logWarning( Exception(message.str(), Exception::warning)); return children; } } // momenta before radiation in lab for(unsigned int ix=0;ix<2;++ix) _qlab[ix]=children[ix]->momentum(); // get the charges of the particles in units of the positron charge _charge=children[0]->dataPtr()->iCharge()*children[1]->dataPtr()->iCharge()/9.; // boost the momenta to the rest frame Boost boostv(-p.momentum().boostVector()); // boost the particles to the parent rest frame // and set the initial momenta of the charged particles // in the dipole rest frame: currently this is the same // as the boson rest frame... for(unsigned int ix=0;ix<2;++ix) { children[ix]->deepBoost(boostv); _qdrf[ix]=children[ix]->momentum(); _qprf[ix]=children[ix]->momentum(); } _parent->boost(boostv); // perform the unweighting double wgt; unsigned int ntry(0); do { ++ntry; wgt = makePhotons(-boostv,children); // Error checks - if ( isnan(wgt) ) { + if ( std::isnan(wgt) ) { generator()->log() << "Infinite weight for decay " << p.PDGName() << " " << children[0]->PDGName() << " " << children[1]->PDGName() << '\n'; wgt = 0.0; } else if ( wgt < 0.0 && _mode != 5 ) { generator()->log() << "Negative weight for decay " << p.PDGName() << " " << children[0]->PDGName() << " " << children[1]->PDGName() << "in FFDipole: Weight = " << wgt << '\n'; if ( Debug::level ) printDebugInfo(p,children,wgt); } else if ( wgt > _maxwgt ) { generator()->log() << "Weight "<< wgt<<" exceeds maximum for decay " << p.PDGName() << ' ' << children[0]->PDGName() << " " << children[1]->PDGName() << " in FFDipole:\nresetting maximum weight.\n" << "Old Maximum = " << _maxwgt; _maxwgt = min(1.1 * wgt, 10.0); generator()->log() << " New Maximum = " << wgt << '\n'; if ( Debug::level && _mode!=5 ) printDebugInfo(p,children,wgt); } // End of error checks _wgtsum += wgt; _wgtsq += sqr(wgt); ++_nweight; } while ( wgt<(_maxwgt*UseRandom::rnd()) && ntry<_maxtry ); if(ntry>=_maxtry) { generator()->log() << "FFDipole failed to generate QED radiation for the decay " << p.PDGName() << " -> " << children[0]->PDGName() << " " << children[1]->PDGName() << '\n'; _parent->boost(-boostv); for(unsigned int ix=0;ix<2;++ix) children[ix]->deepBoost(-boostv); return children; } // produce products after radiation if needed if(_multiplicity>0) { // change the momenta of the children, they are currently // in original rest frame for(unsigned int ix=0;ix<2;++ix) { // unit vector along direction Boost br = children[ix]->momentum().vect().unit(); // calculate the boost vector using expression accurate for beta->1 double beta(sqrt((_qdrf[ix].e()+_m[ix+1])*(_qdrf[ix].e()-_m[ix+1]))/ _qdrf[ix].e()); double ombeta(sqr(_m[ix+1]/_qdrf[ix].e())/(1.+beta)); double betap(sqrt((_qnewdrf[ix].e()+_m[ix+1])*(_qnewdrf[ix].e()-_m[ix+1])) /_qnewdrf[ix].e()); double ombetap(sqr(_m[ix+1]/_qnewdrf[ix].e())/(1.+betap)); // boost to get correct momentum in dipole rest frame double bv = -(ombetap-ombeta)/(beta*ombetap + ombeta); br *= bv; children[ix]->deepBoost(br); // boost to the parent rest frame Lorentz5Momentum pnew(_bigLdrf); pnew.setMass(_m[0]); pnew.rescaleEnergy(); br = pnew.findBoostToCM(); children[ix]->deepBoost(br); // boost back to the lab children[ix]->deepBoost(-boostv); } // add the photons to the event record tcPDPtr photon=getParticleData(ParticleID::gamma); for(unsigned int ix=0;ix<_multiplicity;++ix) { // add if not removed because energy too low if(!_photcut[ix]) { PPtr newphoton=new_ptr(Particle(photon)); newphoton->set5Momentum(_llab[ix]); children.push_back(newphoton); } } _parent->boost(-boostv); //printDebugInfo(p, children, wgt); return children; } // otherwise just return the original particles else { for(unsigned int ix=0;ix<2;++ix) children[ix]->deepBoost(-boostv); _parent->boost(-boostv); return children; } } // member which generates the photons double FFDipole::makePhotons(const Boost & boostv, const ParticleVector & children) { // set the initial parameters // number of photons (zero) _multiplicity=0; // zero size of photon vectors _ldrf.clear(); _lprf.clear(); _llab.clear(); // zero size of angle storage _sinphot.clear(); _cosphot.clear(); _photcut.clear(); _photonwgt.clear(); // zero total momenta of the photons _bigLdrf=Lorentz5Momentum(); _bigLprf=Lorentz5Momentum(); // set the initial values of the reweighting factors to one _dipolewgt = 1.0; _yfswgt = 1.0; _jacobianwgt = 1.0; _mewgt = 1.0; // calculate the velocities of the charged particles (crude/overvalued) double beta1(sqrt((_qdrf[0].e()+_m[1])*(_qdrf[0].e()-_m[1]))/_qdrf[0].e()); double beta2(sqrt((_qdrf[1].e()+_m[2])*(_qdrf[1].e()-_m[2]))/_qdrf[1].e()); // calculate 1-beta to avoid numerical problems double ombeta1(sqr(_m[1]/_qdrf[0].e())/(1.+beta1)); double ombeta2(sqr(_m[2]/_qdrf[1].e())/(1.+beta2)); // calculate the average photon multiplicity double aver(YFSFormFactors::nbarFF(beta1,ombeta1,beta2,ombeta2,_charge, _emax,_emin,_dipoleopt==1)); // calculate the number of photons using the poisson _multiplicity = _mode !=5 ? UseRandom::rndPoisson(aver) : 1; // calculate the first part of the YFS factor // (N.B. crude form factor is just exp(-aver) to get a poisson) _yfswgt *= exp(aver); // if photons produced if(_multiplicity>0) { _photonwgt.resize(_multiplicity); // generate the photon momenta with respect to q1 // keeping track of the weight for(unsigned int ix=0;ix<_multiplicity;++ix) _photonwgt[ix] = photon(beta1,ombeta1,beta2,ombeta2); // rotate the photons so in dipole rest frame rather // than angle measured w.r.t q1 first work out the rotation SpinOneLorentzRotation rotation; rotation.setRotateZ(-_qdrf[0].phi()); rotation.rotateY(_qdrf[0].theta()); rotation.rotateZ(_qdrf[0].phi()); // rotate the total _bigLdrf *= rotation; // rotate the photons for(unsigned int ix=0;ix<_multiplicity;++ix) _ldrf[ix]*=rotation; // boost the momenta without any removal of low energy photons // resize arrays _photcut.resize(_multiplicity,false); _lprf.resize(_multiplicity); _llab.resize(_multiplicity); // perform the boost if(!boostMomenta(boostv)){return 0.;} // apply the cut on the photon energy if needed unsigned int nremoved(removePhotons()); // redo the boost if we have removed photons if(nremoved!=0){if(!boostMomenta(boostv)){return 0.;}} // form factor part of the removal term to remove existing cut if(_energyopt!=0) _dipolewgt *= YFSFormFactors::exponentialYFSFF(beta1,ombeta1,beta2,ombeta2, _qdrf[0].e(),_qdrf[1].e(), _m[1],_m[2],_m[0]*_m[0], _charge,_emin); // calculate the new dipole weight // calculate velocities and 1-velocites beta1=sqrt((_qnewdrf[0].e()+_m[1])*(_qnewdrf[0].e()-_m[1]))/_qnewdrf[0].e(); beta2=sqrt((_qnewdrf[1].e()+_m[2])*(_qnewdrf[1].e()-_m[2]))/_qnewdrf[1].e(); ombeta1=sqr(_m[1]/_qnewdrf[0].e())/(1.+beta1); ombeta2=sqr(_m[2]/_qnewdrf[1].e())/(1.+beta2); for(unsigned int ix=0;ix<_multiplicity;++ix) { if(!_photcut[ix]) _dipolewgt *= exactDipoleWeight(beta1,ombeta1,beta2,ombeta2,ix)/ _photonwgt[ix]; } // calculate the weight for the photon removal Energy2 s((_qnewdrf[0]+_qnewdrf[1]).m2()); // calculate the second part of the yfs form factor // this is different for the different photon removal options // option with no removal if(_energyopt==0) { _yfswgt *= YFSFormFactors::exponentialYFSFF(beta1,ombeta1,beta2,ombeta2, _qnewdrf[0].e(),_qnewdrf[1].e(), _m[1],_m[2],s,_charge,_emin); } // weight for option with cut in the rest frame else if(_energyopt==1) { // yfs piece double nbeta1(sqrt( (_qnewprf[0].e()+_m[1])*(_qnewprf[0].e()-_m[1])) /_qnewprf[0].e()); double nbeta2(sqrt( (_qnewprf[1].e()+_m[2])*(_qnewprf[1].e()-_m[2])) /_qnewprf[1].e()); double nomb1 (sqr(_m[1]/_qnewprf[0].e())/(1.+nbeta1)); double nomb2 (sqr(_m[2]/_qnewprf[1].e())/(1.+nbeta2)); _yfswgt *= YFSFormFactors::exponentialYFSFF(nbeta1,nomb1,nbeta2,nomb2, _qnewprf[0].e(),_qnewprf[1].e(), _m[1],_m[2],s,_charge,_eminrest); // dipole piece // Find the momenta of the particles of original particles in new rest frame Lorentz5Momentum pnew(_bigLdrf.x(),_bigLdrf.y(), _bigLdrf.z(),_bigLdrf.e(),_m[0]); pnew.rescaleEnergy(); SpinOneLorentzRotation boost(pnew.findBoostToCM()); Lorentz5Momentum q1=boost*_qdrf[0]; Lorentz5Momentum q2=boost*_qdrf[1]; // use this to calculate the form factor nbeta1=sqrt( (q1.e()+_m[1])*(q1.e()-_m[1]))/q1.e(); nbeta2=sqrt( (q2.e()+_m[2])*(q2.e()-_m[2]))/q2.e(); nomb1 =sqr(_m[1]/q1.e())/(1.+nbeta1); nomb2 =sqr(_m[2]/q2.e())/(1.+nbeta2); _dipolewgt /=YFSFormFactors::exponentialYFSFF(nbeta1,nomb1,nbeta2,nomb2, q1.e(),q2.e(), _m[1],_m[2],_m[0]*_m[0], _charge,_eminrest); } // weight for option with cut in the rest frame else if(_energyopt==2) { // yfs piece double nbeta1(sqrt( (_qnewlab[0].e()+_m[1])*(_qnewlab[0].e()-_m[1])) /_qnewlab[0].e()); double nbeta2(sqrt( (_qnewlab[1].e()+_m[2])*(_qnewlab[1].e()-_m[2])) /_qnewlab[1].e()); double nomb1 (sqr(_m[1]/_qnewlab[0].e())/(1.+nbeta1)); double nomb2 (sqr(_m[2]/_qnewlab[1].e())/(1.+nbeta2)); _yfswgt *= YFSFormFactors::exponentialYFSFF(nbeta1,nomb1,nbeta2,nomb2, _qnewlab[0].e(),_qnewlab[1].e(), _m[1],_m[2],s,_charge,_eminlab); // dipole piece // Find the momenta of the particles of original particles in new rest frame Lorentz5Momentum pnew(_bigLdrf.x(),_bigLdrf.y(), _bigLdrf.z(),_bigLdrf.e(),_m[0]); pnew.rescaleEnergy(); SpinOneLorentzRotation boost(pnew.findBoostToCM()); Lorentz5Momentum q1=boost*_qdrf[0]; Lorentz5Momentum q2=boost*_qdrf[1]; // then boost to the lab boost.setBoost(boostv); q1 *=boost; q2 *=boost; // use this to calculate the form factor nbeta1=sqrt( (q1.e()+_m[1])*(q1.e()-_m[1])) /q1.e(); nbeta2=sqrt( (q2.e()+_m[2])*(q2.e()-_m[2])) /q2.e(); nomb1 =sqr(_m[1]/q1.e())/(1.+nbeta1); nomb2 =sqr(_m[2]/q2.e())/(1.+nbeta2); _dipolewgt /=YFSFormFactors::exponentialYFSFF(nbeta1,nomb1,nbeta2,nomb2, q1.e(),q2.e(),_m[1],_m[2], _m[0]*_m[0],_charge,_eminlab); } // Calculating jacobian weight _jacobianwgt = jacobianWeight(); // Calculate the weight for the corrections _mewgt = meWeight(children); } // otherwise copy momenta else { for(unsigned int ix=0;ix<2;++ix) { _qnewdrf[ix]=_qdrf[ix]; _qnewprf[ix]=_qprf[ix]; _qnewlab[ix]=_qlab[ix]; } _jacobianwgt = 1.0; _yfswgt*=YFSFormFactors::exponentialYFSFF(beta1,ombeta1,beta2,ombeta2, _qdrf[0].e(),_qdrf[1].e(), _m[1],_m[2],_m[0]*_m[0], _charge,_emin); _dipolewgt = 1.0; } double wgt; if(_mode!=5) { // virtual corrections _mewgt += virtualWeight(children); // calculate the weight depending on the option if(_mode==0) wgt = _maxwgt; else if(_mode==1) wgt = _mewgt*_yfswgt*_jacobianwgt*_dipolewgt; else if(_mode==2) wgt = _jacobianwgt*_yfswgt*_dipolewgt; else if(_mode==3) wgt = _yfswgt*_dipolewgt; else wgt = _yfswgt; } // special to test NLO results else { double beta1 = sqrt((_qdrf[0].e()+_m[1])*(_qdrf[0].e()-_m[1]))/_qdrf[0].e(); double beta2 = sqrt((_qdrf[1].e()+_m[2])*(_qdrf[1].e()-_m[2]))/_qdrf[1].e(); double ombeta1 = sqr(_m[1]/_qdrf[0].e())/(1.+beta1); double ombeta2 = sqr(_m[2]/_qdrf[1].e())/(1.+beta2); double yfs = YFSFormFactors::YFSFF(beta1,ombeta1,beta2,ombeta2, _qdrf[0].e(),_qdrf[1].e(), _m[1],_m[2],_m[0]*_m[0], _charge,_emin); double nbar = YFSFormFactors::nbarFF(beta1,ombeta1,beta2,ombeta2,_charge, _emax,_emin,_dipoleopt==1); wgt = 1.+virtualWeight(children)+yfs+nbar*_dipolewgt*_mewgt*_jacobianwgt; } return wgt; } double FFDipole::photon(double beta1,double ombeta1, double beta2,double ombeta2) { // generate the polar angle double r1,r2,costh,sinth,opbc,ombc; // relative weights for the two terms double Pp(log((1+beta2)/ombeta2)); double Pm(log((1+beta1)/ombeta1)); Pp/=(Pp+Pm); // generate the angle double wgt=1.; do { r1=UseRandom::rnd(); r2=UseRandom::rnd(); // 1/(1+bc) branch if(r1<=Pp) { opbc = pow(1.+beta2,r2)*pow(ombeta2,1.-r2); costh = -1./beta2*(1.-opbc); ombc = 1.-beta1*costh; sinth = sqrt(opbc*(2.-opbc)-(1.+beta2)*ombeta2*sqr(costh)); } // 1/(1-bc) branch else { ombc = pow(1.+beta1,1.-r2)*pow(ombeta1,r2); costh = 1./beta1*(1.-ombc); opbc = 1.+beta2*costh; sinth = sqrt(ombc*(2.-ombc)-(1.+beta1)*ombeta1*sqr(costh)); } // wgt for rejection if(_dipoleopt==1) wgt = 1.-0.5/(1.+beta1*beta2)*(ombeta1*(1.+beta1)*opbc/ombc+ ombeta2*(1.+beta2)*ombc/opbc); } while(UseRandom::rnd()>wgt); // generate the polar angle randomly in -pi->+pi double phi(-pi+UseRandom::rnd()*2.*pi); // generate the ln(energy) uniformly in ln(_emin)->ln(_emax) Energy en(pow(_emax/_emin,UseRandom::rnd())*_emin); // calculate the weight (omit the pre and energy factors // which would cancel later anyway) if(_dipoleopt==0) wgt = 0.5*(1.+beta1*beta2)/opbc/ombc; else wgt = 0.25*(2.*(1.+beta1*beta2)/opbc/ombc -ombeta1*(1.+beta1)/sqr(ombc) -ombeta2*(1.+beta2)/sqr(opbc)); // store the angles _cosphot.push_back(costh); _sinphot.push_back(sinth); // store the four vector for the photon _ldrf.push_back(Lorentz5Momentum(en*sinth*cos(phi), en*sinth*sin(phi), en*costh,en, ZERO)); // add the photon momentum to the total _bigLdrf+=_ldrf.back(); // return the weight return wgt; } double FFDipole::meWeight(const ParticleVector & children) { if(_multiplicity==0) return 1.; // option which does nothing if(_betaopt==0) { return 1.; } // collinear approx else if(_betaopt <= 3) { return collinearWeight(children); } else if (_betaopt == 4 ) { if(_decayer&&_decayer->hasRealEmissionME()) { double outwgt=1.; // values of beta etc to evaluate the dipole double beta1(sqrt( (_qnewdrf[0].e()+_m[1])*(_qnewdrf[0].e()-_m[1]))/ _qnewdrf[0].e()); double beta2(sqrt( (_qnewdrf[1].e()+_m[2])*(_qnewdrf[1].e()-_m[2]))/ _qnewdrf[1].e()); double ombeta1(sqr(_m[1]/_qnewdrf[0].e())/(1.+beta1)); double ombeta2(sqr(_m[2]/_qnewdrf[1].e())/(1.+beta2)); // storage of the weights ParticleVector ptemp; for(unsigned int ix=0;ix<children.size();++ix) ptemp.push_back(new_ptr(Particle(children[ix]->dataPtr()))); ptemp.push_back(new_ptr(Particle(getParticleData(ParticleID::gamma)))); for(unsigned int i=0;i<_multiplicity;++i) { PPtr new_parent = new_ptr(Particle(*_parent)); if(_photcut[i]) continue; // compute the angle terms // if cos is greater than zero use result accurate as cos->1 double opbc,ombc; if(_cosphot[i]>0) { opbc=1.+beta2*_cosphot[i]; ombc=ombeta1+beta1*sqr(_sinphot[i])/(1.+_cosphot[i]); } // if cos is less than zero use result accurate as cos->-1 else { opbc=ombeta2+beta2*sqr(_sinphot[i])/(1.-_cosphot[i]); ombc=1.-beta1*_cosphot[i]; } // dipole factor for denominator double dipole = 2./opbc/ombc*(1.+beta1*beta2 -0.5*ombeta1*(1.+beta1)*opbc/ombc -0.5*ombeta2*(1.+beta2)*ombc/opbc); // energy and momentum of the photon Energy L0(_ldrf[i].e()),modL(_ldrf[i].rho()); // 3-momenta of charged particles Energy modq(_qdrf[0].rho()); // calculate the energy of the fermion pair Energy newE12(-L0+sqrt(sqr(_m[0])+sqr(modL))); // 3-momentum rescaling factor (NOT energy rescaling). double kappa(Kinematics::pstarTwoBodyDecay(newE12,_m[1],_m[2])/modq); // calculate the rescaled momenta Lorentz5Momentum porig[3]; for(unsigned int ix=0;ix<2;++ix) { porig[ix] = kappa*_qdrf[ix]; porig[ix].setMass(_m[ix+1]); porig[ix].rescaleEnergy(); } porig[2] = _ldrf[i]; // calculate the momentum of the decaying particle in dipole rest frame Lorentz5Momentum pnew(_ldrf[i].x(),_ldrf[i].y(), _ldrf[i].z(),_ldrf[i].e(),_m[0]); pnew.rescaleEnergy(); // Find the momenta of the particles in the rest frame of the parent... // First get the boost from the parent particle Boost boost = pnew.findBoostToCM(); LorentzRotation rot1(-boost, pnew.e()/pnew.mass()); // check the photon energy Lorentz5Momentum ptest = _ldrf[i]; ptest.boost(boost); if(_energyopt==1&&ptest.e()<_eminrest) continue; new_parent->transform(rot1); // rotation to put the emitter along the z axis // first particle emits unsigned int iemit = _cosphot[i]>0. ? 0 : 1; LorentzRotation rot2; rot2.setRotateZ(-porig[iemit].phi()); rot2.rotateY(porig[iemit].theta()); rot2.rotateZ(porig[iemit].phi()); rot2.invert(); // Boost the momenta of the charged particles for(unsigned int ix=0;ix<3;++ix) { porig[ix].transform(rot2); ptemp[ix]->set5Momentum(porig[ix]); } new_parent->transform(rot2); if(_cosphot[i]>0.) { outwgt -= _decayer-> realEmissionME(_decayer->imode(),*new_parent,ptemp, 0,_cosphot[i],_sinphot[i],rot1,rot2)/ (_charge/sqr(_ldrf[i].e())*dipole); } else { outwgt -= _decayer-> realEmissionME(_decayer->imode(),*new_parent,ptemp, 1,-_cosphot[i],_sinphot[i],rot1,rot2)/ (_charge/sqr(_ldrf[i].e())*dipole); } rot1.invert(); rot2.invert(); new_parent->transform(rot2); new_parent->transform(rot1); } return outwgt; } else return collinearWeight(children); } return 1.; } double FFDipole::collinearWeight(const ParticleVector & children) { double outwgt=1.; // spins of the decay products PDT::Spin spin1(children[0]->dataPtr()->iSpin()); PDT::Spin spin2(children[1]->dataPtr()->iSpin()); // values of beta etc to evaluate the dipole double beta1(sqrt( (_qnewdrf[0].e()+_m[1])*(_qnewdrf[0].e()-_m[1]))/ _qnewdrf[0].e()); double beta2(sqrt( (_qnewdrf[1].e()+_m[2])*(_qnewdrf[1].e()-_m[2]))/ _qnewdrf[1].e()); double ombeta1(sqr(_m[1]/_qnewdrf[0].e())/(1.+beta1)); double ombeta2(sqr(_m[2]/_qnewdrf[1].e())/(1.+beta2)); // storage of the weights double twgt,dipole; double opbc,ombc; // compute the collinear approx for(unsigned int i=0;i<_multiplicity;++i) { if(_photcut[i]) continue; // compute the angle terms // if cos is greater than zero use result accurate as cos->1 if(_cosphot[i]>0) { opbc=1.+beta2*_cosphot[i]; ombc=ombeta1+beta1*sqr(_sinphot[i])/(1.+_cosphot[i]); } // if cos is less than zero use result accurate as cos->-1 else { opbc=ombeta2+beta2*sqr(_sinphot[i])/(1.-_cosphot[i]); ombc=1.-beta1*_cosphot[i]; } // dipole factor for denominator dipole = 2.*(1.+beta1*beta2 -0.5*ombeta1*(1.+beta1)*opbc/ombc -0.5*ombeta2*(1.+beta2)*ombc/opbc); twgt=0.; // correction for the first particle double ratio(_ldrf[i].e()/_qnewdrf[0].e()); if(spin1==PDT::Spin0) twgt += 0.; else if(spin1==PDT::Spin1Half) twgt += opbc*ratio/(1.+(1.+beta1*beta2)/ratio/opbc); else twgt += 2.*sqr(opbc*ratio) * (+1./(1+beta1*beta2+_ldrf[i].e()/_qnewdrf[1].e()*ombc) +(1.+beta1*beta2)/sqr(1.+beta1*beta2 +_ldrf[i].e()/_qnewdrf[0].e()*opbc)); // correction for the second particle ratio =_ldrf[i].e()/_qnewdrf[1].e(); if(spin2==PDT::Spin0) twgt += 0.; else if(spin2==PDT::Spin1Half) twgt += ombc*ratio/(1.+(1.+beta1*beta2)/ratio/ombc); else twgt += 2.*sqr(ombc*ratio) * (1./(1. + beta1*beta2 + _ldrf[i].e()/_qnewdrf[0].e()*opbc) + (1.+beta1*beta2) / sqr(1. + beta1*beta2 + _ldrf[i].e()/_qnewdrf[1].e()*ombc)); twgt/=dipole; outwgt+=twgt; } return outwgt; } bool FFDipole::boostMomenta(const Boost & boostv) { // total energy and momentum of photons Energy L0(_bigLdrf.e()),modL(_bigLdrf.rho()); // 3-momenta of charged particles Energy modq(_qdrf[0].rho()); // calculate the energy of the fermion pair Energy newE12(-L0+sqrt(_m[0]*_m[0]+modL*modL)); // check this is allowed if(newE12<_m[1]+_m[2]){return false;} // 3-momentum rescaling factor (NOT energy rescaling). double kappa(Kinematics::pstarTwoBodyDecay(newE12,_m[1],_m[2])/modq); // calculate the rescaled momenta for(unsigned int ix=0;ix<2;++ix) { _qnewdrf[ix] = kappa*_qdrf[ix]; _qnewdrf[ix].setMass(_m[ix+1]); _qnewdrf[ix].rescaleEnergy(); } // calculate the momentum of the decaying particle in dipole rest frame Lorentz5Momentum pnew(_bigLdrf.x(),_bigLdrf.y(), _bigLdrf.z(),_bigLdrf.e(),_m[0]); pnew.rescaleEnergy(); // Find the momenta of the particles in the rest frame // of the parent... // First get the boost from the parent particle SpinOneLorentzRotation boost(pnew.findBoostToCM()); // Boost the momenta of the charged particles for(unsigned int ix=0;ix<2;++ix) _qnewprf[ix]=boost*_qnewdrf[ix]; // Boost the total photon momentum _bigLprf=boost*_bigLdrf; // Boost the individual photon momenta for(unsigned int ix=0;ix<_multiplicity;++ix){_lprf[ix]=boost*_ldrf[ix];} // Now boost from the parent rest frame to the lab frame boost.setBoost(boostv); // Boosting charged particles for(unsigned int ix=0;ix<2;++ix){_qnewlab[ix]=boost*_qnewprf[ix];} // Boosting total photon momentum _bigLlab=boost*_bigLprf; // Boosting individual photon momenta for(unsigned int ix=0;ix<_multiplicity;++ix){_llab[ix]=boost*_lprf[ix];} return true; } unsigned int FFDipole::removePhotons() { unsigned int nremoved(0); // apply the cut in the rest frame if(_energyopt==1) { for(unsigned int ix=0;ix<_multiplicity;++ix) { if(_lprf[ix].e()<_eminrest) { ++nremoved; _photcut[ix]=true; _bigLdrf-=_ldrf[ix]; _ldrf[ix]=Lorentz5Momentum(); } } } // apply the cut in the lab frame else if(_energyopt==2) { for(unsigned int ix=0;ix<_multiplicity;++ix) { if(_llab[ix].e()<_eminlab) { ++nremoved; _photcut[ix]=true; _bigLdrf-=_ldrf[ix]; _ldrf[ix]=Lorentz5Momentum(); } } } // correction factor for dipoles if needed if(_dipoleopt==0&&nremoved!=0) { // calculate the velocities of the charged particles (crude/overvalued) double beta1(sqrt((_qdrf[0].e()+_m[1])*(_qdrf[0].e()-_m[1]))/_qdrf[0].e()); double beta2(sqrt((_qdrf[1].e()+_m[2])*(_qdrf[1].e()-_m[2]))/_qdrf[1].e()); // calculate 1-beta to avoid numerical problems double ombeta1(sqr(_m[1]/_qdrf[0].e())/(1.+beta1)); double ombeta2(sqr(_m[2]/_qdrf[1].e())/(1.+beta2)); // calculate the weights for(unsigned int ix=0;ix<_multiplicity;++ix) { if(_photcut[ix]) _dipolewgt *= exactDipoleWeight(beta1,ombeta1,beta2,ombeta2,ix)/_photonwgt[ix]; } } // return number of remove photons return nremoved; } double FFDipole::virtualWeight(const ParticleVector & children) { double output = 0.; // Virtual corrections for beta_0: // These should be zero for the scalar case as there is no // collinear singularity going by the dipoles above... // Use mass of decaying particle... if(_betaopt==2) { if((children[0]->dataPtr()->iSpin())==2&& (children[1]->dataPtr()->iSpin())==2 ) { output += (1.0*YFSFormFactors::_alpha/pi) * log(sqr(_m[0]/_m[1])); } } // OR Use invariant mass of final state children... else if(_betaopt==3) { if((children[0]->dataPtr()->iSpin())==2&& (children[1]->dataPtr()->iSpin())==2 ) { output += (1.0*YFSFormFactors::_alpha/pi) * log((_qnewprf[0]+_qnewprf[1]).m2()/sqr(_m[1])); } } else if (_betaopt==4) { if(_decayer&&_decayer->hasOneLoopME()) { output += _decayer->oneLoopVirtualME(_decayer->imode(),*_parent, children); } else { output += (1.0*YFSFormFactors::_alpha/pi) * log(sqr(_m[0]/_m[1])); } } return output; } void FFDipole::dofinish() { Interfaced::dofinish(); if(_weightOutput) { _wgtsum /= double(_nweight); _wgtsq /= double(_nweight); _wgtsq = max(_wgtsq - sqr(_wgtsum),0.); _wgtsq /= double(_nweight); _wgtsq = sqrt(_wgtsq); generator()->log() << "The average weight for QED Radiation in " << fullName() << " was " << _wgtsum << " +/- " << _wgtsq << '\n'; } } diff --git a/Decay/Radiation/IFDipole.cc b/Decay/Radiation/IFDipole.cc --- a/Decay/Radiation/IFDipole.cc +++ b/Decay/Radiation/IFDipole.cc @@ -1,706 +1,706 @@ // -*- C++ -*- // // IFDipole.cc is a part of Herwig - A multi-purpose Monte Carlo event generator // Copyright (C) 2002-2011 The Herwig Collaboration // // Herwig is licenced under version 2 of the GPL, see COPYING for details. // Please respect the MCnet academic guidelines, see GUIDELINES for details. // // // This is the implementation of the non-inlined, non-templated member // functions of the IFDipole class. // #include "IFDipole.h" #include "ThePEG/PDT/EnumParticles.h" #include "ThePEG/EventRecord/Particle.h" #include "ThePEG/Interface/Parameter.h" #include "ThePEG/Interface/Switch.h" #include "ThePEG/Interface/ClassDocumentation.h" #include "ThePEG/Persistency/PersistentOStream.h" #include "ThePEG/Persistency/PersistentIStream.h" #include "ThePEG/Helicity/WaveFunction/SpinorWaveFunction.h" using namespace ThePEG::Helicity; using namespace Herwig; void IFDipole::persistentOutput(PersistentOStream & os) const { os << _alpha << ounit(_emin,GeV) << _maxwgt << _mode << _maxtry << _energyopt << _betaopt; } void IFDipole::persistentInput(PersistentIStream & is, int) { is >> _alpha >> iunit(_emin,GeV) >> _maxwgt >> _mode >> _maxtry >> _energyopt >> _betaopt; } ClassDescription<IFDipole> IFDipole::initIFDipole; // Definition of the static class description member. void IFDipole::Init() { static ClassDocumentation<IFDipole> documentation ("The IFDipole class implements the initial-final dipole for the SOPTHY algorithm"); static Switch<IFDipole,unsigned int> interfaceUnWeight ("UnWeight", "Control the type of unweighting to perform, only one should be used the" " other options are for debugging purposes.", &IFDipole::_mode, 1, false, false); static SwitchOption interfaceUnWeightNoUnweighting (interfaceUnWeight, "NoUnweighting", "Perform no unweighting", 0); static SwitchOption interfaceUnWeightAllWeights (interfaceUnWeight, "AllWeights", "Include all the weights", 1); static SwitchOption interfaceUnWeightNoJacobian (interfaceUnWeight, "NoJacobian", "Only include the dipole and YFS weights", 2); static SwitchOption interfaceUnWeightDipole (interfaceUnWeight, "Dipole", "Only include the dipole weight", 3); static SwitchOption interfaceUnWeightYFS (interfaceUnWeight, "YFS", "Only include the YFS weight", 4); static Parameter<IFDipole,unsigned int> interfaceMaximumTries ("MaximumTries", "Maximum number of attempts to unweight", &IFDipole::_maxtry, 500, 10, 100000, false, false, Interface::limited); static Parameter<IFDipole,Energy> interfaceMinimumEnergyRest ("MinimumEnergyRest", "The minimum energy of the photons in the rest frame of the decaying particle", &IFDipole::_emin, MeV, 1.*MeV, ZERO, 10000.0*MeV, false, false, Interface::limited); static Parameter<IFDipole,double> interfaceMaximumWeight ("MaximumWeight", "The maximum weight for unweighting", &IFDipole::_maxwgt, 2.0, 0.0, 100.0, false, false, Interface::limited); static Switch<IFDipole,unsigned int> interfaceEnergyCutOff ("EnergyCutOff", "The type of cut-off on the photon energy to apply", &IFDipole::_energyopt, 1, false, false); static SwitchOption interfaceEnergyCutOffRestFrame (interfaceEnergyCutOff, "RestFrame", "Apply cut-off in rest frame", 1); static SwitchOption interfaceEnergyCutOff2 (interfaceEnergyCutOff, "LabFrame", "Apply cut-off in lab frame", 2); static Switch<IFDipole,unsigned int> interfaceBetaOption ("BetaOption", "Option for the inclusive of the higher beta coefficients", &IFDipole::_betaopt, 1, false, false); static SwitchOption interfaceBetaOptionNone (interfaceBetaOption, "None", "No higher betas included", 0); static SwitchOption interfaceBetaOptionCollinear (interfaceBetaOption, "Collinear", "Include the collinear approx", 1); static SwitchOption interfaceBetaOptionCollinearVirtA (interfaceBetaOption, "CollinearVirtualA", "Include the collinear approx with virtual corrections", 2); static SwitchOption interfaceBetaOptionCollinearVirtB (interfaceBetaOption, "CollinearVirtualB", "Include the collinear approx with virtual corrections", 3); static SwitchOption interfaceBetaOptionExact (interfaceBetaOption, "Exact", "Include the exact higher order terms if available", 4); } ParticleVector IFDipole::generatePhotons(const Particle & p,ParticleVector children) { // set parameters which won't change in the event loop // masses of the particles _m[0] = p.mass(); _m[1] = children[0]->mass(); _m[2] = children[1]->mass(); // momenta before radiation in lab for(unsigned int ix=0;ix<2;++ix){_qlab[ix]=children[ix]->momentum();} // get the charges of the particles in units of the positron charge // chrg1 is the charge of the parent and chrg2 is the charge of the // charged child. Also we create a map between the arguments of // _q???[X] _m[X] etc so that // _q???[_map[0]] and _m[_map[0]] are the momenta and masses of // the charged child while // _q???[_map[1]] and _m[_map[1]] are the momenta and masses of // the neutral child. _chrg1 = p.dataPtr()->iCharge()/3.0; if(children[1]->dataPtr()->iCharge()/3.0==0.0) { _chrg2 = children[0]->dataPtr()->iCharge()/3.0; _map[0] = 0; _map[1] = 1; } else if(children[0]->dataPtr()->iCharge()/3.0==0.0) { _chrg2 = children[1]->dataPtr()->iCharge()/3.0; _map[0] = 1; _map[1] = 0; } // check the radiating particle is not massless // if(children[1]->mass()< if(children[_map[0]]->mass()<1e-4*GeV) { ostringstream message; message << "IFDipole::generatePhotons() trying to generate QED radiation from " << children[_map[0]]->dataPtr()->PDGName() << "\n with mass " << children[_map[0]]->mass()/GeV << "which is much smaller than the mass of the electron.\n" << "This is probably due to reading events from a LHEF,\nskipping radiation in this case.\n"; generator()->logWarning( Exception(message.str(), Exception::warning)); return children; } // boost the momenta to the rest frame Boost boostv(p.momentum().boostVector()); // boost the particles to the parent rest frame // and set the initial momenta of the charged particles // in the dipole rest frame: currently this is the same // as the boson rest frame... for(unsigned int ix=0;ix<2;++ix) { // KMH - 08/11/05 - This used to be boostv instead of -boostv // -boostv is the boost from the lab to the parent rest frame // whereas boostv goes the other way!!! children[ix]->deepBoost(-boostv); _qprf[ix]=children[ix]->momentum(); } // perform the unweighting double wgt; unsigned int ntry(0); do { wgt =makePhotons(boostv,children); ++ntry; // Record warnings about large and weird weights in the .log file. - if(wgt>_maxwgt||wgt<0.0||isnan(wgt)) { + if(wgt>_maxwgt||wgt<0.0||std::isnan(wgt)) { generator()->log() << "IFDipole.cc:\n"; if(wgt>_maxwgt) { generator()->log() << "Weight exceeds maximum for decay!\n"; } if(wgt<0.0) { generator()->log() << "Weight is negative! \n"; } - if(isnan(wgt)) { + if(std::isnan(wgt)) { generator()->log() << "Weight is NAN! \n"; wgt = 0.; } generator()->log() << p.PDGName() << " " << children[0]->PDGName() << " " << children[1]->PDGName() << endl << " Current Maximum = " << _maxwgt << endl << " Current Weight = " << wgt << endl; generator()->log() << "Photon Multiplicity : " << _multiplicity << endl << "Original Parent rest frame momenta: " << endl << "charged child: " << ounit(_qprf[_map[0]],GeV) << endl << "neutral child: " << ounit(_qprf[_map[1]],GeV) << endl << "Parent rest frame momenta: " << endl << "charged child: " << ounit(_qnewprf[_map[0]],GeV)<< endl << "neutral child: " << ounit(_qnewprf[_map[1]],GeV)<< endl << "photons : " << ounit(_bigLprf,GeV) << endl << "Weights : " << endl << "_dipolewgt : " << _dipolewgt << endl << "_yfswgt : " << _yfswgt << endl << "_jacobianwgt : " << _jacobianwgt << endl << "_mewgt : " << _mewgt << endl; for(unsigned int ct=0;ct<_multiplicity;ct++) { generator()->log() << "_cosphot[" << ct << "]: " << _cosphot[ct] << endl; generator()->log() << "_sinphot[" << ct << "]: " << _sinphot[ct] << endl; } if(wgt>_maxwgt) { if(wgt<15.0) { generator()->log() << "Resetting maximum weight" << endl << " New Maximum = " << wgt << endl; _maxwgt=wgt; } else { generator()->log() << "Maximum weight set to limit (15)" << endl; _maxwgt=15.0; } } } } while (wgt<(_maxwgt*UseRandom::rnd()) && ntry<_maxtry); if(ntry>=_maxtry) { generator()->log() << "IFDipole Failed to generate QED radiation for the decay " << p.PDGName() << " -> " << children[0]->PDGName() << " " << children[1]->PDGName() << endl; return children; } // produce products after radiation if needed if(_multiplicity>0) { // change the momenta of the children, they are currently // in parent rest frame for(unsigned int ix=0;ix<2;++ix) { LorentzRotation boost(solveBoost(_qnewprf[ix],children[ix]->momentum())); children[ix]->deepTransform(boost); // boost back to the lab // KMH - 08/11/05 - This used to be -boostv instead of boostv // -boostv is the boost from the lab to the parent rest frame // whereas boostv goes the other way!!! children[ix]->deepBoost(boostv); } // add the photons to the event record tcPDPtr photon=getParticleData(ParticleID::gamma); for(unsigned int ix=0;ix<_multiplicity;++ix) { PPtr newphoton=new_ptr(Particle(photon)); newphoton->set5Momentum(_llab[ix]); children.push_back(newphoton); } return children; } // otherwise just return the orginial particles // boosted back to lab else { for(unsigned int ix=0;ix<children.size();++ix) children[ix]->deepBoost(boostv); return children; } } // member which generates the photons double IFDipole::makePhotons(Boost boostv,ParticleVector children) { // set the initial parameters // number of photons (zero) _multiplicity=0; // zero size of photon vectors _lprf.clear(); _llab.clear(); // zero size of angle storage _sinphot.clear(); _cosphot.clear(); // zero total momenta of the photons _bigLprf=Lorentz5Momentum(); // set the initial values of the reweighting factors to one _dipolewgt = 1.0; _yfswgt = 1.0; _jacobianwgt = 1.0; _mewgt = 1.0; // set the maximum photon energy (exact - no approximations here). double boost_factor = 1.0; _emax=(0.5*(_m[0]-sqr(_m[1]+_m[2])/_m[0]))*boost_factor; // calculate the velocities of the children (crude/overvalued) double beta1(sqrt( (_qprf[_map[0]].e()+_m[_map[0]+1]) *(_qprf[_map[0]].e()-_m[_map[0]+1]) ) /_qprf[_map[0]].e()); double beta2(sqrt( (_qprf[_map[1]].e()+_m[_map[1]+1]) *(_qprf[_map[1]].e()-_m[_map[1]+1]) ) /_qprf[_map[1]].e()); // calculate 1-beta to avoid numerical problems double ombeta1(sqr(_m[_map[0]+1]/_qprf[_map[0]].e())/(1.+beta1)); double ombeta2(sqr(_m[_map[1]+1]/_qprf[_map[1]].e())/(1.+beta2)); // calculate the average photon multiplicity double aver(nbar(beta1,ombeta1)); // calculate the number of photons using the poisson _multiplicity = UseRandom::rndPoisson(aver); // calculate the first part of the YFS factor _yfswgt/=crudeYFSFormFactor(beta1,ombeta1); // generate the photon momenta with respect to q1 // keeping track of the weight double dipoles(1.); for(unsigned int ix=0;ix<_multiplicity;++ix) { dipoles *= photon(beta1,ombeta1); } // calculate contributions to the dipole weights so far _dipolewgt /=dipoles; // now do the momentum reshuffling Lorentz5Momentum pmom(ZERO,ZERO,ZERO,_m[0],_m[0]); if(_multiplicity>0) { // total energy and momentum of photons Energy L0(_bigLprf.e()),modL(_bigLprf.rho()); // squared invariant mass of final state fermions... Energy2 m122 = sqr(_m[0]-L0)-sqr(modL); if(m122<sqr(_m[1]+_m[2])) return 0.; // 3-momenta of charged particles Energy modq(_qprf[_map[0]].rho()); // total photon momentum perpendicular to charged child... Energy LT(_bigLprf.perp()); // kallen function... Energy4 kallen = ( m122 - sqr(_m[1]+_m[2]) ) * ( m122 - sqr(_m[1]-_m[2]) ); // discriminant of rho... Energy4 droot = kallen-4.*sqr(_m[_map[0]+1]*LT); if(droot<ZERO) return 0.; double disc = (_m[0]-L0) * sqrt(droot) / (2.*modq*(m122+LT*LT)); // calculate the energy rescaling factor double rho = disc-_bigLprf.z() * (m122+sqr(_m[_map[0]+1])-sqr(_m[_map[1]+1])) / (2.*modq*(m122+LT*LT)); // calculate the rescaled charged child momentum _qnewprf[_map[0]]=rho*_qprf[_map[0]]; _qnewprf[_map[0]].setMass(_m[_map[0]+1]); _qnewprf[_map[0]].rescaleEnergy(); // rotate the photons so in parent rest frame rather // than angle measured w.r.t q1 first work out the rotation SpinOneLorentzRotation rotation; rotation.setRotateZ(-_qprf[_map[0]].phi()); rotation.rotateY(_qprf[_map[0]].theta()); rotation.rotateZ(_qprf[_map[0]].phi()); // rotate the total _bigLprf*=rotation; // rotate the photons for(unsigned int ix=0;ix<_multiplicity;++ix){_lprf[ix]*=rotation;} // calculate the rescaled neutral child momentum _qnewprf[_map[1]]=pmom-_qnewprf[_map[0]]-_bigLprf; _qnewprf[_map[1]].setMass(_m[_map[1]+1]); _qnewprf[_map[1]].rescaleEnergy(); // calculate the new dipole weight // Note this (weight) is Lorentz invariant // calculate velocities and 1-velocites beta1=sqrt( (_qnewprf[_map[0]].e()+_m[_map[0]+1]) *(_qnewprf[_map[0]].e()-_m[_map[0]+1])) /_qnewprf[_map[0]].e(); beta2=sqrt( (_qnewprf[_map[1]].e()+_m[_map[1]+1]) *(_qnewprf[_map[1]].e()-_m[_map[1]+1])) /_qnewprf[_map[1]].e(); ombeta1=sqr(_m[_map[0]+1]/_qnewprf[_map[0]].e())/(1.+beta1); ombeta2=sqr(_m[_map[1]+1]/_qnewprf[_map[1]].e())/(1.+beta2); for(unsigned int ix=0;ix<_multiplicity;++ix) {_dipolewgt*=exactDipoleWeight(beta1,ombeta1,ix);} // calculate the second part of the yfs form factor _yfswgt*=exactYFSFormFactor(beta1,ombeta1,beta2,ombeta2); // Now boost from the parent rest frame to the lab frame SpinOneLorentzRotation boost(boostv); // Boosting charged particles for(unsigned int ix=0;ix<2;++ix){_qnewlab[ix]=boost*_qnewprf[ix];} // Boosting total photon momentum _bigLlab=boost*_bigLprf; // Boosting individual photon momenta for(unsigned int ix=0;ix<_multiplicity;++ix) {_llab.push_back(boost*_lprf[ix]);} // Calculating jacobian weight _jacobianwgt = jacobianWeight(); // Calculating beta^1 weight _mewgt = meWeight(children); // Apply phase space vetos... if(kallen<(4.*sqr(_m[_map[0]+1]*LT))||m122<sqr(_m[1]+_m[2])||rho<0.0) { // generator()->log() << "Outside Phase Space" << endl; // generator()->log() << "Photon Multiplicity: " // << _multiplicity << endl // << "Original Parent rest frame momenta: " << endl // << "charged child: " << _qprf[_map[0]] << endl // << "neutral child: " << _qprf[_map[1]] << endl // << "rescaling : " << rho << endl // << "Parent rest frame momenta: " << endl // << "charged child: " << _qnewprf[_map[0]] << endl // << "neutral child: " << _qnewprf[_map[1]] << endl // << "photons : " << _bigLprf << endl // << endl; _dipolewgt = 0.0 ; _yfswgt = 0.0 ; _jacobianwgt = 0.0 ; _mewgt = 0.0 ; } _qprf[_map[0]].rescaleEnergy(); _qprf[_map[1]].rescaleEnergy(); _qnewprf[_map[0]].rescaleEnergy(); _qnewprf[_map[1]].rescaleEnergy(); if( ((abs(_m[0]-_bigLprf.e()-_qnewprf[0].e()-_qnewprf[1].e())>0.00001*MeV)|| (abs( _bigLprf.x()+_qnewprf[0].x()+_qnewprf[1].x())>0.00001*MeV)|| (abs( _bigLprf.y()+_qnewprf[0].y()+_qnewprf[1].y())>0.00001*MeV)|| (abs( _bigLprf.z()+_qnewprf[0].z()+_qnewprf[1].z())>0.00001*MeV)) &&(_dipolewgt*_jacobianwgt*_yfswgt*_mewgt>0.0)) { Lorentz5Momentum ptotal = _bigLprf+_qnewprf[0]+_qnewprf[1]; ptotal.setE(ptotal.e()-_m[0]); generator()->log() << "Warning! Energy Not Conserved! tol = 0.00001 MeV" << "\nwgt = " << _dipolewgt*_yfswgt*_jacobianwgt*_mewgt << "\nrho = " << rho << "\nmultiplicity = " << _multiplicity << "\n_qprf[_map[0]] = " << _qprf[_map[0]]/GeV << "\n_qprf[_map[1]] = " << _qprf[_map[1]]/GeV << "\n_qnewprf[_map[0]] = " << _qnewprf[_map[0]]/GeV << " " << _qnewprf[_map[0]].m()/GeV << " " << _m[_map[0]+1]/GeV << "\n_qnewprf[_map[1]] = " << _qnewprf[_map[1]]/GeV << " " << _qnewprf[_map[1]].m()/GeV << " " << _m[_map[1]+1]/GeV << "\n_bigLprf = " << _bigLprf/GeV << "\n_bigLprf.m2() = " << _bigLprf.m2()/GeV2 << "\n_total out -in = " << ptotal/GeV << "\nRejecting Event. " << "\n"; _dipolewgt = 0.0 ; _yfswgt = 0.0 ; _jacobianwgt = 0.0 ; _mewgt = 0.0 ; } } // otherwise copy momenta else { for(unsigned int ix=0;ix<2;++ix) { _qnewprf[ix]=_qprf[ix]; _qnewlab[ix]=_qlab[ix]; } _jacobianwgt = 1.0; // calculate the second part of the yfs form factor _yfswgt*=exactYFSFormFactor(beta1,ombeta1,beta2,ombeta2); _dipolewgt = 1.0; } // Virtual corrections for beta_0: // These should be zero for the scalar case as there is no // collinear singularity going by the dipoles above... // Use mass of decaying particle... if(_betaopt==2) { if((children[_map[0]]->dataPtr()->iSpin())==2) { _mewgt += (0.5*_alpha/pi) * log(sqr(_m[0] /_m[_map[0]+1]) ); } } // OR Use invariant mass of final state children... if(_betaopt==3) { if((children[_map[0]]->dataPtr()->iSpin())==2) { _mewgt += (0.5*_alpha/pi) * log((_qnewprf[0]+_qnewprf[1]).m2() /sqr(_m[_map[0]+1]) ); } } // calculate the weight depending on the option double wgt; if(_mode==0){wgt=_maxwgt;} else if(_mode==1){wgt=_mewgt*_jacobianwgt*_yfswgt*_dipolewgt;} else if(_mode==2){wgt=_jacobianwgt*_yfswgt*_dipolewgt;} else if(_mode==3){wgt=_yfswgt*_dipolewgt;} else {wgt=_yfswgt;} return wgt; } double IFDipole::photon(double beta1,double ombeta1) { // generate the azimuthal angle randomly in -pi->+pi double phi(-pi+UseRandom::rnd()*2.*pi); // generate the polar angle double r(UseRandom::rnd()); double costh,sinth,ombc; ombc = pow(1.+beta1,1.-r)*pow(ombeta1,r); costh = 1./beta1*(1.-ombc); sinth = sqrt(ombc*(2.-ombc)-(1.+beta1)*ombeta1*sqr(costh)); // generate the ln(energy) uniformly in ln(_emin)->ln(_emax) Energy energy = pow(_emax/_emin,UseRandom::rnd())*_emin; // calculate the weight (omit the pre and energy factors // which would cancel later anyway) double wgt = 2./ombc; // store the angles _cosphot.push_back(costh); _sinphot.push_back(sinth); // store the four vector for the photon _lprf.push_back(Lorentz5Momentum(energy*sinth*cos(phi),energy*sinth*sin(phi), energy*costh,energy,ZERO)); // add the photon momentum to the total _bigLprf+=_lprf.back(); // return the weight return wgt; } double IFDipole::meWeight(ParticleVector children) { unsigned int spin = children[_map[0]]->dataPtr()->iSpin(); double mewgt = 1.0; double beta1=sqrt( (_qnewprf[_map[0]].e()+_m[_map[0]+1]) *(_qnewprf[_map[0]].e()-_m[_map[0]+1])) /_qnewprf[_map[0]].e(); double ombeta1=sqr(_m[_map[0]+1]/_qnewprf[_map[0]].e())/(1.+beta1); // option which does nothing if(_betaopt==0){mewgt=1.;} // collinear approx else if(_betaopt==1||_betaopt==2||_betaopt==3) { double ombc; InvEnergy2 dipole; for(unsigned int i=0;i<_multiplicity;++i) { double opbc; if(_cosphot[i]<0.0) { opbc=ombeta1+beta1*sqr(_sinphot[i])/(1.-_cosphot[i]); } // if cos is greater than zero use result accurate as cos->-1 else { opbc=1.+beta1*_cosphot[i]; } // if cos is greater than zero use result accurate as cos->1 if(_cosphot[i]>0.0) { ombc=ombeta1+beta1*sqr(_sinphot[i])/(1.+_cosphot[i]); } // if cos is less than zero use result accurate as cos->-1 else { ombc=1.-beta1*_cosphot[i]; } if(((_qnewprf[_map[0]].z()>ZERO)&&(_qprf[_map[0]].z()<ZERO))|| ((_qnewprf[_map[0]].z()<ZERO)&&(_qprf[_map[0]].z()>ZERO))) { dipole = sqr(beta1*_sinphot[i]/(opbc*_lprf[i].e())); } else { dipole = sqr(beta1*_sinphot[i]/(ombc*_lprf[i].e())); } // here "dipole" is the exact dipole function divided by alpha/4pi^2. if(spin==2) { Energy magpi= sqrt( sqr(_qnewprf[_map[0]].x()) + sqr(_qnewprf[_map[0]].y()) + sqr(_qnewprf[_map[0]].z()) ); mewgt += sqr(_lprf[i].e())*_qnewprf[_map[0]].e()*ombc / (sqr(magpi*_sinphot[i])*(_qnewprf[_map[0]].e()+_lprf[i].e())); } else if(spin==3) { Energy2 pik = _qnewprf[_map[0]].e()*_lprf[i].e() - _qnewprf[_map[0]].x()*_lprf[i].x() - _qnewprf[_map[0]].y()*_lprf[i].y() - _qnewprf[_map[0]].z()*_lprf[i].z(); Energy2 pjk = _m[0]*_lprf[i].e(); Energy2 pipj = _m[0]*_qnewprf[_map[0]].e(); mewgt += (2.*pjk*pipj/(pik*sqr(pipj+pjk)) +2.*pjk/(pik*(pipj+pik)) )/dipole; } else { mewgt = 1.0; } } } return mewgt; } double IFDipole::exactYFSFormFactor(double beta1,double ombeta1, double beta2,double ombeta2) { double Y = 0.0 ; double b = beta1 ; double omb = ombeta1; double c = beta2 ; double omc = ombeta2; double arg1 = -omc/(2.*c); double arg2 = -omb*omc/(2.*(b+c)); double arg3 = 2.*b/(1.+b); if(_m[_map[1]+1]!=ZERO) { Y = _chrg1*_chrg2*(_alpha/(2.*pi))*( log(_m[0]*_m[_map[1]+1]/sqr(2.*_emin)) +log(_m[_map[0]+1]*_m[_map[1]+1]/sqr(2.*_emin)) -(1./b )*log((1.+b)/omb)*log(sqr(_m[_map[1]+1]/(2.*_emin))) -(1./b )*log(omb/(1.+b)) -(0.5/b )*sqr(log(omb/(1.+b))) +((b+c )/(b*omc))*log((b+c )/(b*omc)) -((c+b*c)/(b*omc))*log((c+b*c)/(b*omc)) +((b+c )/(b+b*c))*log((b+c )/(b+b*c)) -((c*omb)/(b+b*c))*log((c*omb)/(b+b*c)) +(0.5/b)*( sqr(log( (b+c)/(b*omc)))-sqr(log((c+b*c)/(b*omc))) + sqr(log((c*omb)/(b+b*c)))-sqr(log((b+ c)/(b+b*c))) ) +(2./b )*( real(Math::Li2(arg1)) - real(Math::Li2(arg2)) - real(Math::Li2(arg3)) ) +(1./b )*log((b+c)/(b+b*c))*log((1.+c)/(2.*c)) -(1./b )*log((c*omb)/(b*(1.+c)))*log((1.+b)*(1.+c)/(2.*(b+c))) -(1./b )*log((2.*c/b)*((b+c)/(omc*(1.+c))))*log((b+c)/(c*omb)) ); } else if(_m[_map[1]+1]==ZERO) { Y = _chrg1*_chrg2*(_alpha/(2.*pi))*( log(sqr(_m[0]/(2.*_emin))) +log(sqr(_m[_map[0]+1]/(2.*_emin))) -(1./b )*log((1.+b)/omb) *log((sqr(_m[0])-sqr(_m[_map[0]+1]))/sqr(2.*_emin)) -0.5*log(omb*(1.+b)/sqr(2.*b)) +((1.+b)/(2.*b))*log((1.+b)/(2.*b)) -( omb/(2.*b))*log( omb/(2.*b)) -(1./b )*log((1.-b)/(1.+b)) +1. +(0.5/b)*sqr(log( omb/(2.*b))) -(0.5/b)*sqr(log((1.+b)/(2.*b))) -(0.5/b)*sqr(log((1.-b)/(1.+b))) -(2. /b)*real(Math::Li2(arg3)) ); } return exp(Y); } double IFDipole::jacobianWeight() { // calculate the velocities of the children (crude/overvalued) Energy mag1old = sqrt( (_qprf[_map[0]].e() +_m[_map[0]+1]) *(_qprf[_map[0]].e() -_m[_map[0]+1]) ); Energy mag1new = sqrt( (_qnewprf[_map[0]].e()+_m[_map[0]+1]) *(_qnewprf[_map[0]].e()-_m[_map[0]+1]) ); Energy magL = sqrt( sqr(_bigLprf.x()) + sqr(_bigLprf.y()) + sqr(_bigLprf.z()) ); // 14/12/05 - KMH - This was another mistake. This is supposed to be // the angel between _qnewprf[_map[0]] and _bigLprf instead of // between _qnewprf[0] and _bigLprf. Stupid. Hopefully this weight // is correct now. // double cos1L = (_qnewprf[0].x()*_bigLprf.x() // +_qnewprf[0].y()*_bigLprf.y() // +_qnewprf[0].z()*_bigLprf.z() // ) // /(mag1new*magL); double cos1L = (_qnewprf[_map[0]].x()*_bigLprf.x() +_qnewprf[_map[0]].y()*_bigLprf.y() +_qnewprf[_map[0]].z()*_bigLprf.z() ) /(mag1new*magL); return abs( (_m[0]*sqr(mag1new)/mag1old) / ( mag1new*(_m[0]-_bigLprf.e()) +_qnewprf[_map[0]].e()*magL*cos1L ) ); } LorentzRotation IFDipole::solveBoost(const Lorentz5Momentum & q, const Lorentz5Momentum & p ) const { Energy modp = p.vect().mag(); Energy modq = q.vect().mag(); double betam = (p.e()*modp-q.e()*modq)/(sqr(modq)+sqr(modp)+p.mass2()); Boost beta = -betam*q.vect().unit(); ThreeVector<Energy2> ax = p.vect().cross( q.vect() ); double delta = p.vect().angle( q.vect() ); LorentzRotation R; using Constants::pi; if ( ax.mag2()/GeV2/MeV2 > 1e-16 ) { R.rotate( delta, unitVector(ax) ).boost( beta ); } else { if(p.mass()>ZERO) { R.boost(p.findBoostToCM(),p.e()/p.mass()); R.boost(q.boostVector(),q.e()/q.mass()); } else { if(modp>modq) beta = -betam*p.vect().unit(); R.boost( beta ); } } return R; } void IFDipole::doinit() { Interfaced::doinit(); // get the value fo alpha from the Standard Model object _alpha=generator()->standardModel()->alphaEM(); } diff --git a/Decay/Radiation/YFSFormFactors.cc b/Decay/Radiation/YFSFormFactors.cc --- a/Decay/Radiation/YFSFormFactors.cc +++ b/Decay/Radiation/YFSFormFactors.cc @@ -1,280 +1,280 @@ // -*- C++ -*- // // YFSFormFactors.cc is a part of Herwig - A multi-purpose Monte Carlo event generator // Copyright (C) 2002-2011 The Herwig Collaboration // // Herwig is licenced under version 2 of the GPL, see COPYING for details. // Please respect the MCnet academic guidelines, see GUIDELINES for details. // // // This is the implementation of the non-inlined, non-templated member // functions of the YFSFormFactors class. // #include "YFSFormFactors.h" #include "ThePEG/Interface/ClassDocumentation.h" #include <cassert> using namespace Herwig; using Constants::pi; using Herwig::Math::ReLi2; const double YFSFormFactors::_alpha=1./137.03599911; const Energy YFSFormFactors::_mgamma=1e-10*MeV; const Energy2 YFSFormFactors::_tcut=1.e-11*GeV2; const Energy YFSFormFactors::_ecut=1e-6*GeV; double YFSFormFactors::ReBIF(Energy m0 ,Energy m1 , Energy2 t , double charge ,bool includegamma, Energy mgamma) { // mass squared for speed Energy2 m02(m0*m0),m12(m1*m1),nu(0.5*(m02+m12-t)),mprod(m0*m1); double Anu,vfinite; double output; // t>0 if(t>_tcut) { // parameters Energy2 lambda(sqrt((nu-mprod)*(nu+mprod))); double eta(0.5*m12*t/lambda/(lambda+nu-m12)),zeta((lambda+nu)*eta/m12); // simple A functions for virtual piece InvEnergy2 A; if(lambda>1e-6*GeV2){A=(log((lambda+nu)/mprod)/lambda);} else{A=1./mprod;} double A1((m02-m12)/t*log(m0/m1)-2.*sqr(lambda)/t*A-2.); InvEnergy2 A3(A*log(2.*lambda/mprod) +1./lambda* (+0.25*(log((lambda+nu)/m02)+2.*log((lambda-nu+m02)/t ))* log((lambda+nu)/m02) +0.25*(log((lambda+nu)/m12)-2.*log((lambda+nu-m12)/m12))* log((lambda+nu)/m12) +0.5*(log(eta)*log(1.+eta)-log(zeta)*log(1.+zeta)) +ReLi2(-eta)-ReLi2(-zeta))); Anu=nu*A; vfinite=0.5*A1-nu*A3; } // t==0 else { // virtual part of the dipole Anu = (m02+m12)/(m02-m12)*log(m0/m1); vfinite=0.5*(Anu-1.); } if(includegamma){output=-_alpha*charge/pi*((Anu-1.)*log(sqr(mgamma)/mprod)+vfinite);} else {output=-_alpha*charge/pi*((Anu-1.)*log(MeV2/mprod)+vfinite);} - // assert(!isnan(output) && !isinf(output)); + // assert(isfinite(output)); return output; } double YFSFormFactors::ReBFF(Energy m1,Energy m2,Energy2 s,double charge, bool includegamma,Energy mgamma) { // masses etc Energy2 m12(m1*m1),m22(m2*m2),mu(0.5*(s-m12-m22)),mprod(m1*m2); // parameters double ratio(m1*m2/mu),rho(sqrt((1.-ratio)*(1.+ratio))); Energy2 prod(mu*(1.+rho)); // the finite piece double vfinite(mu*rho/s*log(prod/mprod)+0.5*(m12-m22)/s*log(m1/m2) +1./rho*(pi*pi-0.5*log(prod/m12)*log(prod/m22) -0.5*sqr(log((m12+prod)/(m22+prod))) -ReLi2(2.*mu*rho/(m12+prod)) -ReLi2(2.*mu*rho/(m22+prod)))-1.); // the cut-off piece double Anu(log(prod/mprod)/rho),output; if(includegamma){output=-_alpha*charge/pi*((Anu-1.)*log(sqr(mgamma)/mprod)+vfinite);} else {output=-_alpha*charge/pi*((Anu-1.)*log(MeV2/mprod)+vfinite);} - // assert(!isnan(output) && !isinf(output)); + // assert(isfinite(output)); return output; } double YFSFormFactors::BtildeIF(double beta0 ,double ombeta0 , double beta1 ,double ombeta1 , Energy en0 ,Energy en1 , Energy m0 ,Energy m1 , Energy2 t ,double charge , Energy emin ,bool includegamma, Energy mgamma) { // coefficient of the divergent piece Energy2 mprod(m0*m1),nu(0.5*(m0*m0+m1*m1-t)); double Anu; if(nu-mprod>1e-12*GeV2) { Energy2 lambda(sqrt((nu-mprod)*(nu+mprod))); Anu=nu/lambda*log((lambda+nu)/mprod); } else {Anu=1.;} // finite piece double rfinite(-0.5*A4single(beta0,ombeta0)-0.5*A4single(beta1,ombeta1) +nu*A4IF(beta0,ombeta0,beta1,ombeta1,en0,en1,m0,m1,t)); - // assert(!isnan(rfinite) && !isinf(rfinite)); + // assert(isfinite(rfinite)); // return the answer double output; if(includegamma) { output=-_alpha*charge/pi*((Anu-1.)*2.*log(2.*emin/mgamma)+rfinite); } else { output=-_alpha*charge/pi*((Anu-1.)*2.*log(2.*emin/MeV)+rfinite); } - // assert(!isnan(output) && !isinf(output)); + // assert(isfinite(output)); return output; } double YFSFormFactors::BtildeFF(double beta1 ,double ombeta1 , double beta2 ,double ombeta2 , Energy en1 ,Energy en2 , Energy m1 ,Energy m2 , Energy2 s ,double charge , Energy emin ,bool includegamma, Energy mgamma) { // masses etc Energy2 m12(m1*m1),m22(m2*m2),mu(0.5*(s-m12-m22)),mprod(m1*m2); // parameters double ratio(m1*m2/mu),rho(sqrt((1.-ratio)*(1.+ratio))); Energy2 prod(mu*(1.+rho)); // finite piece double rfinite(-0.5*A4single(beta1,ombeta1)-0.5*A4single(beta2,ombeta2) +mu*A4FFFull(en1,en2,beta1,beta2,m1,m2,s)); double Anu(log(prod/mprod)/rho); // return the answer double output; if(includegamma){output=-_alpha*charge/pi*((Anu-1.)*2.*log(2.*emin/mgamma)+rfinite);} else {output=-_alpha*charge/pi*((Anu-1.)*2.*log(2.*emin/MeV)+rfinite);} - // assert(!isnan(output) && !isinf(output)); + // assert(isfinite(output)); return output; } InvEnergy2 YFSFormFactors::A4FFFull(Energy inen1 ,Energy inen2, double beta1,double beta2, Energy inm1 ,Energy inm2,Energy2 s ) { Energy en1(inen1),en2(inen2),m1(inm1),m2(inm2); // order the particles so en1>en2 if(inen1*beta1<inen2*beta2) { en1=inen2; en2=inen1; m1=inm2; m2=inm1; } Energy Delta(en1-en2); Energy Omega(en1+en2),delta(m1-m2),omega(m1+m2); Energy2 Q2(s-2.*(m1*m1+m2*m2)); Energy root(sqrt(Delta*Delta+Q2)); Energy eta[2]={sqrt((en2-m2)*(en2+m2)),sqrt((en1-m1)*(en1+m1))+root}; if(0.5*(s-m1*m1-m2*m2)>en1*en2){eta[0]=-eta[0];} Energy2 root2(sqrt((Q2+omega*omega)*(Q2+delta*delta))); double Y[2]; // various limits Energy y[4]; y[0]=0.5*(root-Omega+(omega*delta+root2)/(root+Delta)); y[1]=y[0]-root2/(root+Delta); y[2]=0.5*(root+Omega+(omega*delta+root2)/(root-Delta)); y[3]=y[2]-root2/(root-Delta); // the Y function at both limits for(unsigned int ix=0;ix<2;++ix) {Y[ix]=Zij(eta[ix],y[0],y[3])+Zij(eta[ix],y[1],y[0]) +Zij(eta[ix],y[2],y[1])-Zij(eta[ix],y[2],y[3]) +0.5*Xijkl(eta[ix],y[0],y[1],y[2],y[3])*Xijkl(eta[ix],y[1],y[2],y[0],y[3]);} // the answer // the Z function at both limits double output(0.); if(abs(Delta)>_ecut) { output=log(abs((root-Delta)/(root+Delta)))*(+Xijkl(eta[1],y[0],y[3],y[1],y[2]) -Xijkl(eta[0],y[0],y[3],y[1],y[2])); } return 1./root2*(output+Y[1]-Y[0]); } InvEnergy2 YFSFormFactors::A4IF(double beta0 ,double ombeta0 , double beta1 ,double ombeta1 , Energy en0 ,Energy en1 , Energy m0 ,Energy m1 , Energy2 t) { // this is the general function so pick the special case if(t>_tcut){ // rest frame of decaying particle t!=0 if(abs(en0-m0)<_ecut){return A4IFRest(m0,m1,beta1,ombeta1,en1);} // rest frame of decay product t!=0 else if(abs(en1-m1)<_ecut){return A4IFRest(m1,m0,beta0,ombeta0,en0);} // general frame t!=0 else {return A4IFFull(beta0,beta1,en0,en1,m0,m1,t);} } else { // rest frame of decaying particle t=0 if(abs(en0-m0)<_ecut){return A4IFRestZero(m0,m1);} // rest frame of decay products t=0 else if(abs(en1-m1)<_ecut){return A4IFRestZero(m1,m0);} // general frame t=0 else{return A4IFZero(beta0,beta1,ombeta1,en0,en1,m0,m1);} } } InvEnergy2 YFSFormFactors::A4IFZero(double beta0, double beta1, double ombeta1, Energy en0, Energy en1 , Energy m0 , Energy m1) { Energy Delta = en0-en1; Energy2 mu2 = (m0-m1)*(m0+m1); long double z[2]={ beta1*en1/Delta, beta0*en0/Delta-1. }; long double y[3],xi[3]; y[0]=en1/Delta; y[1]=y[0]-0.5*mu2/sqr(Delta); y[2]=-y[0]+2.*m1*m1/mu2; for(unsigned int ix = 0; ix < 3; ++ix) { if ( ix == 0 ) xi[0] = -ombeta1*y[0] / (z[1] - y[0] ); else xi[ix] = (z[0] - y[ix]) / (z[1] - y[ix]); } long double U[2]; for(unsigned int ix=0;ix<2;++ix) { // U[ix] = 0.5*sqr(log(abs((z[ix]-y[0])*(z[ix]-y[1])/(z[ix]-y[2])))) // +log(abs(z[ix]-y[0]))*log(abs(z[ix]-y[0])/sqr(z[ix]-y[1])) // +2.*ReLi2((y[1]-y[0])/(z[ix]-y[0])) // +2.*ReLi2((y[2]-y[1])/(z[ix]-y[1])); const long double a = ix==0 ? -ombeta1*y[0] : z[ix]-y[0]; const long double b = z[ix]-y[1]; const long double c = z[ix]-y[2]; const long double A = abs(a*b/c); const long double B = abs(a); const long double C = B/sqr(b); const long double D = (y[1]-y[0])/a; const long double E = (y[2]-y[1])/b; U[ix] = 0.5*sqr(log(A)) + log(B)*log(C) + 2.*ReLi2(D) + 2.*ReLi2(E); } return 1./mu2*(log(2.*sqr(Delta)/mu2)*log(abs(xi[1]*xi[2]/xi[0]))+U[1]-U[0]); } InvEnergy2 YFSFormFactors::A4IFRest(Energy m0 ,Energy m1, double beta1, double ombeta1, Energy E1) { Energy Mfact0 = m0-E1*ombeta1; Energy Mfact1 = m0-E1*(1.+beta1); Energy2 Mfact2 = m0*E1*(1.+beta1)-m1*m1; Energy2 Mfact3 = m0*E1*ombeta1-m1*m1; Energy2 qprod(m0*E1*beta1); return 0.5/qprod*(+log(abs(Mfact0/Mfact1))*log(E1*(1.+beta1)/m0) -2.*log(abs(2.*beta1*E1*Mfact0/m0/m1))*log(E1*(1.+beta1)/m1) +2.*ReLi2(E1/m0*ombeta1)-2.*ReLi2(E1/m0*(1.+beta1)) +ReLi2(-0.5*Mfact1/beta1/E1)-ReLi2( 0.5*Mfact0/beta1/E1) +ReLi2( 0.5*Mfact2/qprod )-ReLi2(-0.5*Mfact3/qprod)); } InvEnergy2 YFSFormFactors::A4IFFull(Velocity beta0,Velocity beta1, Energy en0 ,Energy en1 , Energy m0 ,Energy m1 , Energy2 t) { Energy Delta(en0-en1),Omega(en0+en1),delta(m0-m1),omega(m0+m1); Energy T(sqrt(sqr(Delta)-t)),V(Delta+T); Energy2 kappa(sqrt((sqr(omega)-t)*(sqr(delta)-t))); long double y[4]={-0.5/T*(T+Omega-(omega*delta+kappa)*V/t), -0.5/T*(T+Omega-(omega*delta-kappa)*V/t), -0.5/T*(T-Omega+(omega*delta+kappa)/V), -0.5/T*(T-Omega+(omega*delta-kappa)/V)}; long double z[2]={beta1*en1/T,beta0*en0/T-1.}; double Y[2],lfact(log(abs(V*V/t))); for(unsigned int ix=0;ix<2;++ix) { Y[ix] = lfact*Xijkl(z[ix],y[0],y[3],y[1],y[2]) +Zij(z[ix],y[0],y[3]) +Zij(z[ix],y[1],y[0]) +Zij(z[ix],y[2],y[1]) -Zij(z[ix],y[2],y[3]) +0.5*Xijkl(z[ix],y[0],y[1],y[2],y[3])*Xijkl(z[ix],y[1],y[2],y[0],y[3]); } return (Y[1]-Y[0])/kappa; } diff --git a/MatrixElement/Matchbox/Base/MatchboxMEBase.cc b/MatrixElement/Matchbox/Base/MatchboxMEBase.cc --- a/MatrixElement/Matchbox/Base/MatchboxMEBase.cc +++ b/MatrixElement/Matchbox/Base/MatchboxMEBase.cc @@ -1,1722 +1,1721 @@ // -*- C++ -*- // // MatchboxMEBase.cc is a part of Herwig - A multi-purpose Monte Carlo event generator // Copyright (C) 2002-2012 The Herwig Collaboration // // Herwig is licenced under version 2 of the GPL, see COPYING for details. // Please respect the MCnet academic guidelines, see GUIDELINES for details. // // // This is the implementation of the non-inlined, non-templated member // functions of the MatchboxMEBase class. // #include "MatchboxMEBase.h" #include "ThePEG/Utilities/DescribeClass.h" #include "ThePEG/Interface/ClassDocumentation.h" #include "ThePEG/Interface/Parameter.h" #include "ThePEG/Interface/Reference.h" #include "ThePEG/Interface/RefVector.h" #include "ThePEG/Interface/Switch.h" #include "ThePEG/Persistency/PersistentOStream.h" #include "ThePEG/Persistency/PersistentIStream.h" #include "ThePEG/PDF/PDF.h" #include "ThePEG/PDT/PDT.h" #include "ThePEG/StandardModel/StandardModelBase.h" #include "ThePEG/Cuts/Cuts.h" #include "ThePEG/Handlers/StdXCombGroup.h" #include "ThePEG/EventRecord/SubProcess.h" #include "Herwig/MatrixElement/Matchbox/Dipoles/SubtractionDipole.h" #include "Herwig/MatrixElement/Matchbox/Utility/DiagramDrawer.h" #include "Herwig/MatrixElement/Matchbox/MatchboxFactory.h" #include "Herwig/Utilities/RunDirectories.h" #include "Herwig/MatrixElement/ProductionMatrixElement.h" #include "Herwig/MatrixElement/HardVertex.h" #include <boost/foreach.hpp> #include <cctype> #include <iterator> using std::ostream_iterator; using namespace Herwig; MatchboxMEBase::MatchboxMEBase() : MEBase(), theOneLoop(false), theOneLoopNoBorn(false), theOneLoopNoLoops(false), theNoCorrelations(false), theHavePDFs(false,false), checkedPDFs(false), theDiagramWeightVerboseDown(10000000000000.), theDiagramWeightVerboseUp(0.) {} MatchboxMEBase::~MatchboxMEBase() {} Ptr<MatchboxFactory>::tptr MatchboxMEBase::factory() const { return theFactory; } void MatchboxMEBase::factory(Ptr<MatchboxFactory>::tptr f) { theFactory = f; } Ptr<Tree2toNGenerator>::tptr MatchboxMEBase::diagramGenerator() const { return factory()->diagramGenerator(); } Ptr<ProcessData>::tptr MatchboxMEBase::processData() const { return factory()->processData(); } unsigned int MatchboxMEBase::getNLight() const { return factory()->nLight(); } vector<int> MatchboxMEBase::getNLightJetVec() const { return factory()->nLightJetVec(); } vector<int> MatchboxMEBase::getNHeavyJetVec() const { return factory()->nHeavyJetVec(); } vector<int> MatchboxMEBase::getNLightProtonVec() const { return factory()->nLightProtonVec(); } double MatchboxMEBase::factorizationScaleFactor() const { return factory()->factorizationScaleFactor(); } double MatchboxMEBase::renormalizationScaleFactor() const { return factory()->renormalizationScaleFactor(); } bool MatchboxMEBase::fixedCouplings() const { return factory()->fixedCouplings(); } bool MatchboxMEBase::fixedQEDCouplings() const { return factory()->fixedQEDCouplings(); } bool MatchboxMEBase::checkPoles() const { return factory()->checkPoles(); } bool MatchboxMEBase::verbose() const { return factory()->verbose(); } bool MatchboxMEBase::initVerbose() const { return factory()->initVerbose(); } void MatchboxMEBase::getDiagrams() const { if ( diagramGenerator() && processData() ) { vector<Ptr<Tree2toNDiagram>::ptr> diags; vector<Ptr<Tree2toNDiagram>::ptr>& res = processData()->diagramMap()[subProcess().legs]; if ( res.empty() ) { res = diagramGenerator()->generate(subProcess().legs,orderInAlphaS(),orderInAlphaEW()); } copy(res.begin(),res.end(),back_inserter(diags)); processData()->fillMassGenerators(subProcess().legs); if ( diags.empty() ) return; for ( vector<Ptr<Tree2toNDiagram>::ptr>::iterator d = diags.begin(); d != diags.end(); ++d ) { add(*d); } return; } throw Exception() << "MatchboxMEBase::getDiagrams() expects a Tree2toNGenerator and ProcessData object.\n" << "Please check your setup." << Exception::runerror; } Selector<MEBase::DiagramIndex> MatchboxMEBase::diagrams(const DiagramVector & diags) const { if ( phasespace() ) { return phasespace()->selectDiagrams(diags); } throw Exception() << "MatchboxMEBase::diagrams() expects a MatchboxPhasespace object.\n" << "Please check your setup." << Exception::runerror; return Selector<MEBase::DiagramIndex>(); } Selector<const ColourLines *> MatchboxMEBase::colourGeometries(tcDiagPtr diag) const { if ( matchboxAmplitude() ) { if ( matchboxAmplitude()->haveColourFlows() ) { if ( matchboxAmplitude()->treeAmplitudes() ) matchboxAmplitude()->prepareAmplitudes(this); return matchboxAmplitude()->colourGeometries(diag); } } Ptr<Tree2toNDiagram>::tcptr tdiag = dynamic_ptr_cast<Ptr<Tree2toNDiagram>::tcptr>(diag); assert(diag && processData()); vector<ColourLines*>& flows = processData()->colourFlowMap()[tdiag]; if ( flows.empty() ) { list<list<list<pair<int,bool> > > > cflows = ColourBasis::colourFlows(tdiag); for ( list<list<list<pair<int,bool> > > >::const_iterator fit = cflows.begin(); fit != cflows.end(); ++fit ) { flows.push_back(new ColourLines(ColourBasis::cfstring(*fit))); } } Selector<const ColourLines *> res; for ( vector<ColourLines*>::const_iterator f = flows.begin(); f != flows.end(); ++f ) res.insert(1.0,*f); return res; } void MatchboxMEBase::constructVertex(tSubProPtr sub, const ColourLines* cl) { if ( !canFillRhoMatrix() || !factory()->spinCorrelations() ) return; assert(matchboxAmplitude()); assert(matchboxAmplitude()->colourBasis()); // get the colour structure for the selected colour flow size_t cStructure = matchboxAmplitude()->colourBasis()->tensorIdFromFlow(lastXComb().lastDiagram(),cl); // hard process for processing the spin info tPVector hard; hard.push_back(sub->incoming().first); hard.push_back(sub->incoming().second); vector<PDT::Spin> out; for ( size_t k = 0; k < sub->outgoing().size(); ++k ) { out.push_back(sub->outgoing()[k]->data().iSpin()); hard.push_back(sub->outgoing()[k]); } // calculate dummy wave functions to fill the spin info static vector<VectorWaveFunction> dummyPolarizations; static vector<SpinorWaveFunction> dummySpinors; static vector<SpinorBarWaveFunction> dummyBarSpinors; for ( size_t k = 0; k < hard.size(); ++k ) { if ( hard[k]->data().iSpin() == PDT::Spin1Half ) { if ( hard[k]->id() > 0 && k > 1 ) { SpinorBarWaveFunction(dummyBarSpinors,hard[k], outgoing, true); } else if ( hard[k]->id() < 0 && k > 1 ) { SpinorWaveFunction(dummySpinors,hard[k], outgoing, true); } else if ( hard[k]->id() > 0 && k < 2 ) { SpinorWaveFunction(dummySpinors,hard[k], incoming, false); } else if ( hard[k]->id() < 0 && k < 2 ) { SpinorBarWaveFunction(dummyBarSpinors,hard[k], incoming, false); } } else if ( hard[k]->data().iSpin() == PDT::Spin1 ) { VectorWaveFunction(dummyPolarizations,hard[k], k > 1 ? outgoing : incoming, k > 1 ? true : false, hard[k]->data().hardProcessMass() == ZERO); } else if (hard[k]->data().iSpin() == PDT::Spin0 ) { ScalarWaveFunction(hard[k],k > 1 ? outgoing : incoming, k > 1 ? true : false); } else assert(false); } // fill the production matrix element ProductionMatrixElement pMe(mePartonData()[0]->iSpin(), mePartonData()[1]->iSpin(), out); for ( map<vector<int>,CVector>::const_iterator lamp = lastLargeNAmplitudes().begin(); lamp != lastLargeNAmplitudes().end(); ++lamp ) { vector<unsigned int> pMeHelicities = matchboxAmplitude()->physicalHelicities(lamp->first); pMe(pMeHelicities) = lamp->second[cStructure]; } // set the spin information HardVertexPtr hardvertex = new_ptr(HardVertex()); hardvertex->ME(pMe); if ( sub->incoming().first->spinInfo() ) sub->incoming().first->spinInfo()->productionVertex(hardvertex); if ( sub->incoming().second->spinInfo() ) sub->incoming().second->spinInfo()->productionVertex(hardvertex); for ( ParticleVector::const_iterator p = sub->outgoing().begin(); p != sub->outgoing().end(); ++p ) { if ( (**p).spinInfo() ) (**p).spinInfo()->productionVertex(hardvertex); } } unsigned int MatchboxMEBase::orderInAlphaS() const { return subProcess().orderInAlphaS; } unsigned int MatchboxMEBase::orderInAlphaEW() const { return subProcess().orderInAlphaEW; } void MatchboxMEBase::setXComb(tStdXCombPtr xc) { MEBase::setXComb(xc); lastMatchboxXComb(xc); if ( phasespace() ) phasespace()->setXComb(xc); if ( scaleChoice() ) scaleChoice()->setXComb(xc); if ( matchboxAmplitude() ) matchboxAmplitude()->setXComb(xc); } double MatchboxMEBase::generateIncomingPartons(const double* r1, const double* r2) { // shamelessly stolen from PartonExtractor.cc Energy2 shmax = lastCuts().sHatMax(); Energy2 shmin = lastCuts().sHatMin(); Energy2 sh = shmin*pow(shmax/shmin, *r1); double ymax = lastCuts().yHatMax(); double ymin = lastCuts().yHatMin(); double km = log(shmax/shmin); ymax = min(ymax, log(lastCuts().x1Max()*sqrt(lastS()/sh))); ymin = max(ymin, -log(lastCuts().x2Max()*sqrt(lastS()/sh))); double y = ymin + (*r2)*(ymax - ymin); double x1 = exp(-0.5*log(lastS()/sh) + y); double x2 = exp(-0.5*log(lastS()/sh) - y); Lorentz5Momentum P1 = lastParticles().first->momentum(); LorentzMomentum p1 = lightCone((P1.rho() + P1.e())*x1, Energy()); p1.rotateY(P1.theta()); p1.rotateZ(P1.phi()); meMomenta()[0] = p1; Lorentz5Momentum P2 = lastParticles().second->momentum(); LorentzMomentum p2 = lightCone((P2.rho() + P2.e())*x2, Energy()); p2.rotateY(P2.theta()); p2.rotateZ(P2.phi()); meMomenta()[1] = p2; lastXCombPtr()->lastX1X2(make_pair(x1,x2)); lastXCombPtr()->lastSHat((meMomenta()[0]+meMomenta()[1]).m2()); return km*(ymax - ymin); } bool MatchboxMEBase::generateKinematics(const double * r) { if ( phasespace() ) { jacobian(phasespace()->generateKinematics(r,meMomenta())); if ( jacobian() == 0.0 ) return false; setScale(); logGenerateKinematics(r); assert(lastMatchboxXComb()); if ( nDimAmplitude() > 0 ) { amplitudeRandomNumbers().resize(nDimAmplitude()); copy(r + nDimPhasespace(), r + nDimPhasespace() + nDimAmplitude(), amplitudeRandomNumbers().begin()); } if ( nDimInsertions() > 0 ) { insertionRandomNumbers().resize(nDimInsertions()); copy(r + nDimPhasespace() + nDimAmplitude(), r + nDimPhasespace() + nDimAmplitude() + nDimInsertions(), insertionRandomNumbers().begin()); } return true; } throw Exception() << "MatchboxMEBase::generateKinematics() expects a MatchboxPhasespace object.\n" << "Please check your setup." << Exception::runerror; return false; } int MatchboxMEBase::nDim() const { if ( lastMatchboxXComb() ) return nDimPhasespace() + nDimAmplitude() + nDimInsertions(); int ampAdd = 0; if ( matchboxAmplitude() ) { ampAdd = matchboxAmplitude()->nDimAdditional(); } int insertionAdd = 0; for ( vector<Ptr<MatchboxInsertionOperator>::ptr>::const_iterator v = virtuals().begin(); v != virtuals().end(); ++v ) { insertionAdd = max(insertionAdd,(**v).nDimAdditional()); } return nDimBorn() + ampAdd + insertionAdd; } int MatchboxMEBase::nDimBorn() const { if ( lastMatchboxXComb() ) return nDimPhasespace(); if ( phasespace() ) return phasespace()->nDim(diagrams().front()->partons()); throw Exception() << "MatchboxMEBase::nDim() expects a MatchboxPhasespace object.\n" << "Please check your setup." << Exception::runerror; return 0; } void MatchboxMEBase::setScale() const { if ( haveX1X2() ) { lastXCombPtr()->lastSHat((meMomenta()[0]+meMomenta()[1]).m2()); } Energy2 fcscale = factorizationScale(); Energy2 fscale = fcscale*sqr(factorizationScaleFactor()); Energy2 rscale = renormalizationScale()*sqr(renormalizationScaleFactor()); Energy2 ewrscale = renormalizationScaleQED(); lastXCombPtr()->lastScale(fscale); lastXCombPtr()->lastCentralScale(fcscale); lastXCombPtr()->lastShowerScale(showerScale()); lastMatchboxXComb()->lastRenormalizationScale(rscale); if ( !fixedCouplings() ) { if ( rscale > lastCuts().scaleMin() ) lastXCombPtr()->lastAlphaS(SM().alphaS(rscale)); else lastXCombPtr()->lastAlphaS(SM().alphaS(lastCuts().scaleMin())); } else { lastXCombPtr()->lastAlphaS(SM().alphaS()); } if ( !fixedQEDCouplings() ) { lastXCombPtr()->lastAlphaEM(SM().alphaEMME(ewrscale)); } else { lastXCombPtr()->lastAlphaEM(SM().alphaEMMZ()); } logSetScale(); } Energy2 MatchboxMEBase::factorizationScale() const { if ( scaleChoice() ) { return scaleChoice()->factorizationScale(); } throw Exception() << "MatchboxMEBase::factorizationScale() expects a MatchboxScaleChoice object.\n" << "Please check your setup." << Exception::runerror; return ZERO; } Energy2 MatchboxMEBase::renormalizationScale() const { if ( scaleChoice() ) { return scaleChoice()->renormalizationScale(); } throw Exception() << "MatchboxMEBase::renormalizationScale() expects a MatchboxScaleChoice object.\n" << "Please check your setup." << Exception::runerror; return ZERO; } Energy2 MatchboxMEBase::renormalizationScaleQED() const { if ( scaleChoice() ) { return scaleChoice()->renormalizationScaleQED(); } return renormalizationScale(); } Energy2 MatchboxMEBase::showerScale() const { if ( scaleChoice() ) { return scaleChoice()->showerScale(); } throw Exception() << "MatchboxMEBase::showerScale() expects a MatchboxScaleChoice object.\n" << "Please check your setup." << Exception::runerror; return ZERO; } void MatchboxMEBase::setVetoScales(tSubProPtr) const {} bool MatchboxMEBase::havePDFWeight1() const { if ( checkedPDFs ) return theHavePDFs.first; theHavePDFs.first = factory()->isIncoming(mePartonData()[0]) && lastXCombPtr()->partonBins().first->pdf(); theHavePDFs.second = factory()->isIncoming(mePartonData()[1]) && lastXCombPtr()->partonBins().second->pdf(); checkedPDFs = true; return theHavePDFs.first; } bool MatchboxMEBase::havePDFWeight2() const { if ( checkedPDFs ) return theHavePDFs.second; theHavePDFs.first = factory()->isIncoming(mePartonData()[0]) && lastXCombPtr()->partonBins().first->pdf(); theHavePDFs.second = factory()->isIncoming(mePartonData()[1]) && lastXCombPtr()->partonBins().second->pdf(); checkedPDFs = true; return theHavePDFs.second; } void MatchboxMEBase::getPDFWeight(Energy2 factorizationScale) const { if ( !havePDFWeight1() && !havePDFWeight2() ) { lastMEPDFWeight(1.0); logPDFWeight(); return; } double w = 1.; if ( havePDFWeight1() ) w *= pdf1(factorizationScale); if ( havePDFWeight2() ) w *= pdf2(factorizationScale); lastMEPDFWeight(w); logPDFWeight(); } double MatchboxMEBase::pdf1(Energy2 fscale, double xEx, double xFactor) const { assert(lastXCombPtr()->partonBins().first->pdf()); if ( xEx < 1. && lastX1()*xFactor >= xEx ) { return ( ( 1. - lastX1()*xFactor ) / ( 1. - xEx ) ) * lastXCombPtr()->partonBins().first->pdf()->xfx(lastParticles().first->dataPtr(), lastPartons().first->dataPtr(), fscale == ZERO ? lastScale() : fscale, xEx)/xEx; } return lastXCombPtr()->partonBins().first->pdf()->xfx(lastParticles().first->dataPtr(), lastPartons().first->dataPtr(), fscale == ZERO ? lastScale() : fscale, lastX1()*xFactor)/lastX1()/xFactor; } double MatchboxMEBase::pdf2(Energy2 fscale, double xEx, double xFactor) const { assert(lastXCombPtr()->partonBins().second->pdf()); if ( xEx < 1. && lastX2()*xFactor >= xEx ) { return ( ( 1. - lastX2()*xFactor ) / ( 1. - xEx ) ) * lastXCombPtr()->partonBins().second->pdf()->xfx(lastParticles().second->dataPtr(), lastPartons().second->dataPtr(), fscale == ZERO ? lastScale() : fscale, xEx)/xEx; } return lastXCombPtr()->partonBins().second->pdf()->xfx(lastParticles().second->dataPtr(), lastPartons().second->dataPtr(), fscale == ZERO ? lastScale() : fscale, lastX2()*xFactor)/lastX2()/xFactor; } double MatchboxMEBase::me2() const { if ( matchboxAmplitude() ) { if ( matchboxAmplitude()->treeAmplitudes() ) matchboxAmplitude()->prepareAmplitudes(this); double res = matchboxAmplitude()->me2()* me2Norm(); return res; } throw Exception() << "MatchboxMEBase::me2() expects a MatchboxAmplitude object.\n" << "Please check your setup." << Exception::runerror; return 0.; } double MatchboxMEBase::largeNME2(Ptr<ColourBasis>::tptr largeNBasis) const { if ( matchboxAmplitude() ) { if ( matchboxAmplitude()->treeAmplitudes() ) { largeNBasis->prepare(mePartonData(),false); matchboxAmplitude()->prepareAmplitudes(this); } double res = matchboxAmplitude()->largeNME2(largeNBasis)* me2Norm(); return res; } throw Exception() << "MatchboxMEBase::largeNME2() expects a MatchboxAmplitude object.\n" << "Please check your setup." << Exception::runerror; return 0.; } double MatchboxMEBase::finalStateSymmetry() const { if ( symmetryFactor() > 0.0 ) return symmetryFactor(); double sFactor = 1.; map<long,int> counts; cPDVector checkData; copy(mePartonData().begin()+2,mePartonData().end(),back_inserter(checkData)); cPDVector::iterator p = checkData.begin(); while ( !checkData.empty() ) { if ( counts.find((**p).id()) != counts.end() ) { counts[(**p).id()] += 1; } else { counts[(**p).id()] = 1; } checkData.erase(p); p = checkData.begin(); continue; } for ( map<long,int>::const_iterator c = counts.begin(); c != counts.end(); ++c ) { if ( c->second == 1 ) continue; if ( c->second == 2 ) sFactor /= 2.; else if ( c->second == 3 ) sFactor /= 6.; else if ( c->second == 4 ) sFactor /= 24.; } symmetryFactor(sFactor); return symmetryFactor(); } double MatchboxMEBase::me2Norm(unsigned int addAlphaS) const { // assume that we always have incoming // spin-1/2 or massless spin-1 particles double fac = 1./4.; if ( hasInitialAverage() ) fac = 1.; double couplings = 1.0; if ( (orderInAlphaS() > 0 || addAlphaS != 0) && !hasRunningAlphaS() ) { fac *= pow(lastAlphaS()/SM().alphaS(),double(orderInAlphaS()+addAlphaS)); couplings *= pow(lastAlphaS(),double(orderInAlphaS()+addAlphaS)); } if ( orderInAlphaEW() > 0 && !hasRunningAlphaEW() ) { fac *= pow(lastAlphaEM()/SM().alphaEMMZ(),double(orderInAlphaEW())); couplings *= pow(lastAlphaEM(),double(orderInAlphaEW())); } lastMECouplings(couplings); if ( !hasInitialAverage() ) { if ( mePartonData()[0]->iColour() == PDT::Colour3 || mePartonData()[0]->iColour() == PDT::Colour3bar ) fac /= SM().Nc(); else if ( mePartonData()[0]->iColour() == PDT::Colour8 ) fac /= (SM().Nc()*SM().Nc()-1.); if ( mePartonData()[1]->iColour() == PDT::Colour3 || mePartonData()[1]->iColour() == PDT::Colour3bar ) fac /= SM().Nc(); else if ( mePartonData()[1]->iColour() == PDT::Colour8 ) fac /= (SM().Nc()*SM().Nc()-1.); } return !hasFinalStateSymmetry() ? finalStateSymmetry()*fac : fac; } CrossSection MatchboxMEBase::dSigHatDR() const { getPDFWeight(); if ( !lastXCombPtr()->willPassCuts() ) { lastMECrossSection(ZERO); return lastMECrossSection(); } double xme2 = me2(); if (factory()->verboseDia()){ double diagweightsum = 0.0; for ( vector<Ptr<DiagramBase>::ptr>::const_iterator d = diagrams().begin(); d != diagrams().end(); ++d ) { diagweightsum += phasespace()->diagramWeight(dynamic_cast<const Tree2toNDiagram&>(**d)); } double piWeight = pow(2.*Constants::pi,(int)(3*(meMomenta().size()-2)-4)); double units = pow(lastSHat() / GeV2, mePartonData().size() - 4.); bookMEoverDiaWeight(log(xme2/(diagweightsum*piWeight*units)));// } if ( xme2 == 0. && !oneLoopNoBorn() ) { lastMECrossSection(ZERO); return lastMECrossSection(); } double vme2 = 0.; if ( oneLoop() && !oneLoopNoLoops() ) vme2 = oneLoopInterference(); CrossSection res = ZERO; if ( !oneLoopNoBorn() ) res += (sqr(hbarc)/(2.*lastSHat())) * jacobian()* lastMEPDFWeight() * xme2; if ( oneLoop() && !oneLoopNoLoops() ) res += (sqr(hbarc)/(2.*lastSHat())) * jacobian()* lastMEPDFWeight() * vme2; if ( !onlyOneLoop() ) { for ( vector<Ptr<MatchboxInsertionOperator>::ptr>::const_iterator v = virtuals().begin(); v != virtuals().end(); ++v ) { (**v).setXComb(lastXCombPtr()); res += (**v).dSigHatDR(); } if ( checkPoles() && oneLoop() ) logPoles(); } double weight = 0.0; bool applied = false; for ( vector<Ptr<MatchboxReweightBase>::ptr>::const_iterator rw = theReweights.begin(); rw != theReweights.end(); ++rw ) { (**rw).setXComb(lastXCombPtr()); if ( !(**rw).apply() ) continue; weight += (**rw).evaluate(); applied = true; } if ( applied ) res *= weight; lastMECrossSection(res); return lastMECrossSection(); } double MatchboxMEBase::oneLoopInterference() const { if ( matchboxAmplitude() ) { if ( matchboxAmplitude()->oneLoopAmplitudes() ) matchboxAmplitude()->prepareOneLoopAmplitudes(this); double res = matchboxAmplitude()->oneLoopInterference()* me2Norm(1); return res; } throw Exception() << "MatchboxMEBase::oneLoopInterference() expects a MatchboxAmplitude object.\n" << "Please check your setup." << Exception::runerror; return 0.; } MatchboxMEBase::AccuracyHistogram::AccuracyHistogram(double low, double up, unsigned int nbins) : lower(low), upper(up), sameSign(0), oppositeSign(0), nans(0), overflow(0), underflow(0) { double step = (up-low)/nbins; for ( unsigned int k = 1; k <= nbins; ++k ) bins[lower + k*step] = 0.0; } void MatchboxMEBase::AccuracyHistogram::book(double a, double b) { - if ( isnan(a) || isnan(b) || - isinf(a) || isinf(b) ) { + if ( ! (isfinite(a) && isfinite(b)) ) { ++nans; return; } if ( a*b >= 0. ) ++sameSign; if ( a*b < 0. ) ++oppositeSign; double r = 1.; if ( abs(a) != 0.0 ) r = abs(1.-abs(b/a)); else if ( abs(b) != 0.0 ) r = abs(b); if ( log10(r) < lower || r == 0.0 ) { ++underflow; return; } if ( log10(r) > upper ) { ++overflow; return; } map<double,double>::iterator bin = bins.upper_bound(log10(r)); if ( bin == bins.end() ) return; bin->second += 1.; } void MatchboxMEBase::AccuracyHistogram::dump(const std::string& folder, const std::string& prefix, const cPDVector& proc) const { ostringstream fname(""); for ( cPDVector::const_iterator p = proc.begin(); p != proc.end(); ++p ) fname << (**p).PDGName(); ofstream out((folder+"/"+prefix+fname.str()+".dat").c_str()); out << "# same sign : " << sameSign << " opposite sign : " << oppositeSign << " nans : " << nans << " overflow : " << overflow << " underflow : " << underflow << "\n"; for ( map<double,double>::const_iterator b = bins.begin(); b != bins.end(); ++b ) { map<double,double>::const_iterator bp = b; --bp; if ( b->second != 0. ) { if ( b != bins.begin() ) out << bp->first; else out << lower; out << " " << b->first << " " << b->second << "\n" << flush; } } ofstream gpout((folder+"/"+prefix+fname.str()+".gp").c_str()); gpout << "set terminal png\n" << "set xlabel 'accuracy of pole cancellation [decimal places]'\n" << "set ylabel 'counts\n" << "set xrange [-20:0]\n" << "set output '" << prefix << fname.str() << ".png'\n" << "plot '" << prefix << fname.str() << ".dat' using (0.5*($1+$2)):3 with linespoints pt 7 ps 1 not"; } void MatchboxMEBase::AccuracyHistogram::persistentOutput(PersistentOStream& os) const { os << lower << upper << bins << sameSign << oppositeSign << nans << overflow << underflow; } void MatchboxMEBase::AccuracyHistogram::persistentInput(PersistentIStream& is) { is >> lower >> upper >> bins >> sameSign >> oppositeSign >> nans >> overflow >> underflow; } void MatchboxMEBase::logPoles() const { double res2me = oneLoopDoublePole(); double res1me = oneLoopSinglePole(); double res2i = 0.; double res1i = 0.; for ( vector<Ptr<MatchboxInsertionOperator>::ptr>::const_iterator v = virtuals().begin(); v != virtuals().end(); ++v ) { res2i += (**v).oneLoopDoublePole(); res1i += (**v).oneLoopSinglePole(); } if (res2me != 0.0 || res2i != 0.0) epsilonSquarePoleHistograms[mePartonData()].book(res2me,res2i); if (res1me != 0.0 || res1i != 0.0) epsilonPoleHistograms[mePartonData()].book(res1me,res1i); } bool MatchboxMEBase::haveOneLoop() const { if ( matchboxAmplitude() ) return matchboxAmplitude()->haveOneLoop(); return false; } bool MatchboxMEBase::onlyOneLoop() const { if ( matchboxAmplitude() ) return matchboxAmplitude()->onlyOneLoop(); return false; } bool MatchboxMEBase::isDRbar() const { if ( matchboxAmplitude() ) return matchboxAmplitude()->isDRbar(); return false; } bool MatchboxMEBase::isDR() const { if ( matchboxAmplitude() ) return matchboxAmplitude()->isDR(); return false; } bool MatchboxMEBase::isCS() const { if ( matchboxAmplitude() ) return matchboxAmplitude()->isCS(); return false; } bool MatchboxMEBase::isBDK() const { if ( matchboxAmplitude() ) return matchboxAmplitude()->isBDK(); return false; } bool MatchboxMEBase::isExpanded() const { if ( matchboxAmplitude() ) return matchboxAmplitude()->isExpanded(); return false; } Energy2 MatchboxMEBase::mu2() const { if ( matchboxAmplitude() ) return matchboxAmplitude()->mu2(); return 0*GeV2; } double MatchboxMEBase::oneLoopDoublePole() const { if ( matchboxAmplitude() ) { return matchboxAmplitude()->oneLoopDoublePole()* me2Norm(1); } return 0.; } double MatchboxMEBase::oneLoopSinglePole() const { if ( matchboxAmplitude() ) { return matchboxAmplitude()->oneLoopSinglePole()* me2Norm(1); } return 0.; } vector<Ptr<SubtractionDipole>::ptr> MatchboxMEBase::getDipoles(const vector<Ptr<SubtractionDipole>::ptr>& dipoles, const vector<Ptr<MatchboxMEBase>::ptr>& borns) const { vector<Ptr<SubtractionDipole>::ptr> res; // keep track of the dipoles we already did set up set<pair<pair<pair<int,int>,int>,pair<Ptr<MatchboxMEBase>::tptr,Ptr<SubtractionDipole>::tptr> > > done; cPDVector rep = diagrams().front()->partons(); int nreal = rep.size(); // now loop over configs for ( int emitter = 0; emitter < nreal; ++emitter ) { list<Ptr<SubtractionDipole>::ptr> matchDipoles; for ( vector<Ptr<SubtractionDipole>::ptr>::const_iterator d = dipoles.begin(); d != dipoles.end(); ++d ) { if ( !(**d).canHandleEmitter(rep,emitter) ) continue; matchDipoles.push_back(*d); } if ( matchDipoles.empty() ) continue; for ( int emission = 2; emission < nreal; ++emission ) { if ( emission == emitter ) continue; list<Ptr<SubtractionDipole>::ptr> matchDipoles2; for ( list<Ptr<SubtractionDipole>::ptr>::const_iterator d = matchDipoles.begin(); d != matchDipoles.end(); ++d ) { if ( !(**d).canHandleSplitting(rep,emitter,emission) ) continue; matchDipoles2.push_back(*d); } if ( matchDipoles2.empty() ) continue; map<Ptr<DiagramBase>::ptr,SubtractionDipole::MergeInfo> mergeInfo; for ( DiagramVector::const_iterator d = diagrams().begin(); d != diagrams().end(); ++d ) { Ptr<Tree2toNDiagram>::ptr check = new_ptr(Tree2toNDiagram(*dynamic_ptr_cast<Ptr<Tree2toNDiagram>::ptr>(*d))); map<int,int> theMergeLegs; for ( unsigned int i = 0; i < check->external().size(); ++i ) theMergeLegs[i] = -1; int theEmitter = check->mergeEmission(emitter,emission,theMergeLegs); // no underlying Born if ( theEmitter == -1 ) continue; SubtractionDipole::MergeInfo info; info.diagram = check; info.emitter = theEmitter; info.mergeLegs = theMergeLegs; mergeInfo[*d] = info; } if ( mergeInfo.empty() ) continue; for ( int spectator = 0; spectator < nreal; ++spectator ) { if ( spectator == emitter || spectator == emission ) continue; list<Ptr<SubtractionDipole>::ptr> matchDipoles3; for ( list<Ptr<SubtractionDipole>::ptr>::const_iterator d = matchDipoles2.begin(); d != matchDipoles2.end(); ++d ) { if ( !(**d).canHandleSpectator(rep,spectator) ) continue; matchDipoles3.push_back(*d); } if ( matchDipoles3.empty() ) continue; if ( noDipole(emitter,emission,spectator) ) continue; for ( list<Ptr<SubtractionDipole>::ptr>::const_iterator d = matchDipoles3.begin(); d != matchDipoles3.end(); ++d ) { if ( !(**d).canHandle(rep,emitter,emission,spectator) ) continue; for ( vector<Ptr<MatchboxMEBase>::ptr>::const_iterator b = borns.begin(); b != borns.end(); ++b ) { if ( (**b).onlyOneLoop() ) continue; if ( done.find(make_pair(make_pair(make_pair(emitter,emission),spectator),make_pair(*b,*d))) != done.end() ) continue; // now get to work (**d).clearBookkeeping(); (**d).factory(factory()); (**d).realEmitter(emitter); (**d).realEmission(emission); (**d).realSpectator(spectator); (**d).realEmissionME(const_cast<MatchboxMEBase*>(this)); (**d).underlyingBornME(*b); (**d).setupBookkeeping(mergeInfo); if ( !((**d).empty()) ) { res.push_back((**d).cloneMe()); Ptr<SubtractionDipole>::tptr nDipole = res.back(); done.insert(make_pair(make_pair(make_pair(emitter,emission),spectator),make_pair(*b,*d))); if ( nDipole->isSymmetric() ) done.insert(make_pair(make_pair(make_pair(emission,emitter),spectator),make_pair(*b,*d))); ostringstream dname; dname << fullName() << "." << (**b).name() << "." << (**d).name() << ".[(" << emitter << "," << emission << ")," << spectator << "]"; if ( ! (generator()->preinitRegister(nDipole,dname.str()) ) ) throw Exception() << "MatchboxMEBase::getDipoles(): Dipole " << dname.str() << " already existing." << Exception::runerror; if ( !factory()->reweighters().empty() ) { for ( vector<ReweightPtr>::const_iterator rw = factory()->reweighters().begin(); rw != factory()->reweighters().end(); ++rw ) nDipole->addReweighter(*rw); } if ( !factory()->preweighters().empty() ) { for ( vector<ReweightPtr>::const_iterator rw = factory()->preweighters().begin(); rw != factory()->preweighters().end(); ++rw ) nDipole->addPreweighter(*rw); } nDipole->cloneDependencies(dname.str()); } } } } } } vector<Ptr<SubtractionDipole>::tptr> partners; copy(res.begin(),res.end(),back_inserter(partners)); for ( vector<Ptr<SubtractionDipole>::ptr>::iterator d = res.begin(); d != res.end(); ++d ) (**d).partnerDipoles(partners); return res; } double MatchboxMEBase::colourCorrelatedME2(pair<int,int> ij) const { if ( matchboxAmplitude() ) { if ( matchboxAmplitude()->treeAmplitudes() ) matchboxAmplitude()->prepareAmplitudes(this); double res = matchboxAmplitude()->colourCorrelatedME2(ij)* me2Norm(); return res; } throw Exception() << "MatchboxMEBase::colourCorrelatedME2() expects a MatchboxAmplitude object.\n" << "Please check your setup." << Exception::runerror; return 0.; } double MatchboxMEBase::largeNColourCorrelatedME2(pair<int,int> ij, Ptr<ColourBasis>::tptr largeNBasis) const { if ( matchboxAmplitude() ) { if ( matchboxAmplitude()->treeAmplitudes() ) { largeNBasis->prepare(mePartonData(),false); matchboxAmplitude()->prepareAmplitudes(this); } double res = matchboxAmplitude()->largeNColourCorrelatedME2(ij,largeNBasis)* me2Norm(); return res; } throw Exception() << "MatchboxMEBase::largeNColourCorrelatedME2() expects a MatchboxAmplitude object.\n" << "Please check your setup." << Exception::runerror; return 0.; } double MatchboxMEBase::spinColourCorrelatedME2(pair<int,int> ij, const SpinCorrelationTensor& c) const { if ( matchboxAmplitude() ) { if ( matchboxAmplitude()->treeAmplitudes() ) matchboxAmplitude()->prepareAmplitudes(this); double res = matchboxAmplitude()->spinColourCorrelatedME2(ij,c)* me2Norm(); return res; } throw Exception() << "MatchboxMEBase::spinColourCorrelatedME2() expects a MatchboxAmplitude object.\n" << "Please check your setup." << Exception::runerror; return 0.; } double MatchboxMEBase::spinCorrelatedME2(pair<int,int> ij, const SpinCorrelationTensor& c) const { if ( matchboxAmplitude() ) { if ( matchboxAmplitude()->treeAmplitudes() ) matchboxAmplitude()->prepareAmplitudes(this); double res = matchboxAmplitude()->spinCorrelatedME2(ij,c)* me2Norm(); return res; } throw Exception() << "MatchboxMEBase::spinCorrelatedME2() expects a MatchboxAmplitude object.\n" << "Please check your setup." << Exception::runerror; return 0.; } void MatchboxMEBase::flushCaches() { MEBase::flushCaches(); if ( matchboxAmplitude() ) matchboxAmplitude()->flushCaches(); for ( vector<Ptr<MatchboxReweightBase>::ptr>::iterator r = reweights().begin(); r != reweights().end(); ++r ) { (**r).flushCaches(); } for ( vector<Ptr<MatchboxInsertionOperator>::ptr>::const_iterator v = virtuals().begin(); v != virtuals().end(); ++v ) { (**v).flushCaches(); } } void MatchboxMEBase::print(ostream& os) const { os << "--- MatchboxMEBase setup -------------------------------------------------------\n"; os << " '" << name() << "' for subprocess:\n"; os << " "; for ( PDVector::const_iterator pp = subProcess().legs.begin(); pp != subProcess().legs.end(); ++pp ) { os << (**pp).PDGName() << " "; if ( pp == subProcess().legs.begin() + 1 ) os << "-> "; } os << "\n"; os << " including " << (oneLoop() ? "" : "no ") << "virtual corrections"; if ( oneLoopNoBorn() ) os << " without Born contributions"; if ( oneLoopNoLoops() ) os << " without loop contributions"; os << "\n"; if ( oneLoop() && !onlyOneLoop() ) { os << " using insertion operators\n"; for ( vector<Ptr<MatchboxInsertionOperator>::ptr>::const_iterator v = virtuals().begin(); v != virtuals().end(); ++v ) { os << " '" << (**v).name() << "' with " << ((**v).isDR() ? "" : "C") << "DR/"; if ( (**v).isCS() ) os << "CS"; if ( (**v).isBDK() ) os << "BDK"; if ( (**v).isExpanded() ) os << "expanded"; os << " conventions\n"; } } os << "--------------------------------------------------------------------------------\n"; os << flush; } void MatchboxMEBase::printLastEvent(ostream& os) const { os << "--- MatchboxMEBase last event information --------------------------------------\n"; os << " for matrix element '" << name() << "'\n"; os << " process considered:\n "; int in = 0; for ( cPDVector::const_iterator p = mePartonData().begin(); p != mePartonData().end(); ++p ) { os << (**p).PDGName() << " "; if ( ++in == 2 ) os << " -> "; } os << " kinematic environment as set by the XComb " << lastXCombPtr() << ":\n" << " sqrt(shat)/GeV = " << sqrt(lastSHat()/GeV2) << " x1 = " << lastX1() << " x2 = " << lastX2() << " alphaS = " << lastAlphaS() << "\n"; os << " momenta/GeV generated from random numbers\n "; copy(lastXComb().lastRandomNumbers().begin(), lastXComb().lastRandomNumbers().end(),ostream_iterator<double>(os," ")); os << ":\n "; for ( vector<Lorentz5Momentum>::const_iterator p = meMomenta().begin(); p != meMomenta().end(); ++p ) { os << (*p/GeV) << "\n "; } os << "last cross section/nb calculated was:\n " << (lastMECrossSection()/nanobarn) << " (pdf weight " << lastMEPDFWeight() << ")\n"; os << "--------------------------------------------------------------------------------\n"; os << flush; } void MatchboxMEBase::logGenerateKinematics(const double * r) const { if ( !verbose() ) return; generator()->log() << "'" << name() << "' generated kinematics\nfrom " << nDim() << " random numbers:\n"; copy(r,r+nDim(),ostream_iterator<double>(generator()->log()," ")); generator()->log() << "\n"; generator()->log() << "storing phase space information in XComb " << lastXCombPtr() << "\n"; generator()->log() << "generated phase space point (in GeV):\n"; vector<Lorentz5Momentum>::const_iterator pit = meMomenta().begin(); cPDVector::const_iterator dit = mePartonData().begin(); for ( ; pit != meMomenta().end() ; ++pit, ++dit ) generator()->log() << (**dit).PDGName() << " : " << (*pit/GeV) << "\n"; generator()->log() << "with x1 = " << lastX1() << " x2 = " << lastX2() << "\n" << "and Jacobian = " << jacobian() << " sHat/GeV2 = " << (lastSHat()/GeV2) << "\n" << flush; } void MatchboxMEBase::logSetScale() const { if ( !verbose() ) return; generator()->log() << "'" << name() << "' set scales using XComb " << lastXCombPtr() << ":\n" << "scale/GeV2 = " << (scale()/GeV2) << " xi_R = " << renormalizationScaleFactor() << " xi_F = " << factorizationScaleFactor() << "\n" << "alpha_s = " << lastAlphaS() << "\n" << flush; } void MatchboxMEBase::logPDFWeight() const { if ( !verbose() ) return; generator()->log() << "'" << name() << "' calculated pdf weight = " << lastMEPDFWeight() << " from XComb " << lastXCombPtr() << "\n" << "x1 = " << lastX1() << " (" << (mePartonData()[0]->coloured() ? "" : "not ") << "used) " << "x2 = " << lastX2() << " (" << (mePartonData()[1]->coloured() ? "" : "not ") << "used)\n" << flush; } void MatchboxMEBase::logME2() const { if ( !verbose() ) return; generator()->log() << "'" << name() << "' evaluated me2 using XComb " << lastXCombPtr() << "\n" << "and phase space point (in GeV):\n"; vector<Lorentz5Momentum>::const_iterator pit = meMomenta().begin(); cPDVector::const_iterator dit = mePartonData().begin(); for ( ; pit != meMomenta().end() ; ++pit, ++dit ) generator()->log() << (**dit).PDGName() << " : " << (*pit/GeV) << "\n"; generator()->log() << "with x1 = " << lastX1() << " x2 = " << lastX2() << "\n" << "sHat/GeV2 = " << (lastSHat()/GeV2) << "\n" << flush; } void MatchboxMEBase::logDSigHatDR() const { if ( !verbose() ) return; generator()->log() << "'" << name() << "' evaluated cross section using XComb " << lastXCombPtr() << "\n" << "Jacobian = " << jacobian() << " sHat/GeV2 = " << (lastSHat()/GeV2) << " dsig/nb = " << (lastMECrossSection()/nanobarn) << "\n" << flush; } void MatchboxMEBase::cloneDependencies(const std::string& prefix) { if ( phasespace() ) { Ptr<MatchboxPhasespace>::ptr myPhasespace = phasespace()->cloneMe(); ostringstream pname; pname << (prefix == "" ? fullName() : prefix) << "/" << myPhasespace->name(); if ( ! (generator()->preinitRegister(myPhasespace,pname.str()) ) ) throw Exception() << "MatchboxMEBase::cloneDependencies(): Phasespace generator " << pname.str() << " already existing." << Exception::runerror; myPhasespace->cloneDependencies(pname.str()); phasespace(myPhasespace); } theAmplitude = dynamic_ptr_cast<Ptr<MatchboxAmplitude>::ptr>(amplitude()); if ( matchboxAmplitude() ) { Ptr<MatchboxAmplitude>::ptr myAmplitude = matchboxAmplitude()->cloneMe(); ostringstream pname; pname << (prefix == "" ? fullName() : prefix) << "/" << myAmplitude->name(); if ( ! (generator()->preinitRegister(myAmplitude,pname.str()) ) ) throw Exception() << "MatchboxMEBase::cloneDependencies(): Amplitude " << pname.str() << " already existing." << Exception::runerror; myAmplitude->cloneDependencies(pname.str()); matchboxAmplitude(myAmplitude); amplitude(myAmplitude); matchboxAmplitude()->orderInGs(orderInAlphaS()); matchboxAmplitude()->orderInGem(orderInAlphaEW()); } if ( scaleChoice() ) { Ptr<MatchboxScaleChoice>::ptr myScaleChoice = scaleChoice()->cloneMe(); ostringstream pname; pname << (prefix == "" ? fullName() : prefix) << "/" << myScaleChoice->name(); if ( ! (generator()->preinitRegister(myScaleChoice,pname.str()) ) ) throw Exception() << "MatchboxMEBase::cloneDependencies(): Scale choice " << pname.str() << " already existing." << Exception::runerror; scaleChoice(myScaleChoice); } for ( vector<Ptr<MatchboxReweightBase>::ptr>::iterator rw = theReweights.begin(); rw != theReweights.end(); ++rw ) { Ptr<MatchboxReweightBase>::ptr myReweight = (**rw).cloneMe(); ostringstream pname; pname << (prefix == "" ? fullName() : prefix) << "/" << (**rw).name(); if ( ! (generator()->preinitRegister(myReweight,pname.str()) ) ) throw Exception() << "MatchboxMEBase::cloneDependencies(): Reweight " << pname.str() << " already existing." << Exception::runerror; myReweight->cloneDependencies(pname.str()); *rw = myReweight; } for ( vector<Ptr<MatchboxInsertionOperator>::ptr>::iterator v = virtuals().begin(); v != virtuals().end(); ++v ) { Ptr<MatchboxInsertionOperator>::ptr myIOP = (**v).cloneMe(); ostringstream pname; pname << (prefix == "" ? fullName() : prefix) << "/" << (**v).name(); if ( ! (generator()->preinitRegister(myIOP,pname.str()) ) ) throw Exception() << "MatchboxMEBase::cloneDependencies(): Insertion operator " << pname.str() << " already existing." << Exception::runerror; *v = myIOP; } } void MatchboxMEBase::prepareXComb(MatchboxXCombData& xc) const { // fixme We need to pass on the partons from the xcmob here, not // assuming one subprocess per matrix element if ( phasespace() ) xc.nDimPhasespace(phasespace()->nDim(diagrams().front()->partons())); if ( matchboxAmplitude() ) { xc.nDimAmplitude(matchboxAmplitude()->nDimAdditional()); if ( matchboxAmplitude()->colourBasis() ) { size_t cdim = matchboxAmplitude()->colourBasis()->prepare(diagrams(),noCorrelations()); xc.colourBasisDim(cdim); } if ( matchboxAmplitude()->isExternal() ) { xc.externalId(matchboxAmplitude()->externalId(diagrams().front()->partons())); } } int insertionAdd = 0; for ( vector<Ptr<MatchboxInsertionOperator>::ptr>::const_iterator v = virtuals().begin(); v != virtuals().end(); ++v ) { insertionAdd = max(insertionAdd,(**v).nDimAdditional()); } xc.nDimInsertions(insertionAdd); xc.nLight(getNLight()); for (size_t inlv=0; inlv<getNLightJetVec().size(); ++inlv) xc.nLightJetVec(getNLightJetVec()[inlv]); for (size_t inhv=0; inhv<getNHeavyJetVec().size(); ++inhv) xc.nHeavyJetVec(getNHeavyJetVec()[inhv]); for (size_t inlpv=0; inlpv<getNLightProtonVec().size(); ++inlpv) xc.nLightProtonVec(getNLightProtonVec()[inlpv]); xc.olpId(olpProcess()); if ( initVerbose() ) { ostringstream fname_strm; // only allow alphanumeric, / and _ in filename BOOST_FOREACH (const char c, name()) { switch (c) { case '+' : fname_strm << "+"; break; case '-' : fname_strm << "-"; break; case '~' : fname_strm << "_tilde"; break; case ']' : break; case ',' : fname_strm << "__"; break; default : fname_strm << (isalnum(c) ? c : '_'); break; } } fname_strm << ".diagrams"; const string fname = fname_strm.str(); ifstream test(fname.c_str()); if ( !test ) { test.close(); ofstream out(fname.c_str()); for ( vector<Ptr<DiagramBase>::ptr>::const_iterator d = diagrams().begin(); d != diagrams().end(); ++d ) { DiagramDrawer::drawDiag(out,dynamic_cast<const Tree2toNDiagram&>(**d)); out << "\n"; } } } } StdXCombPtr MatchboxMEBase::makeXComb(Energy newMaxEnergy, const cPDPair & inc, tEHPtr newEventHandler,tSubHdlPtr newSubProcessHandler, tPExtrPtr newExtractor, tCascHdlPtr newCKKW, const PBPair & newPartonBins, tCutsPtr newCuts, const DiagramVector & newDiagrams, bool mir, const PartonPairVec&, tStdXCombPtr newHead, tMEPtr newME) { if ( !newME ) newME = this; Ptr<MatchboxXComb>::ptr xc = new_ptr(MatchboxXComb(newMaxEnergy, inc, newEventHandler, newSubProcessHandler, newExtractor, newCKKW, newPartonBins, newCuts, newME, newDiagrams, mir, newHead)); prepareXComb(*xc); return xc; } StdXCombPtr MatchboxMEBase::makeXComb(tStdXCombPtr newHead, const PBPair & newPartonBins, const DiagramVector & newDiagrams, tMEPtr newME) { if ( !newME ) newME = this; Ptr<MatchboxXComb>::ptr xc = new_ptr(MatchboxXComb(newHead, newPartonBins, newME, newDiagrams)); prepareXComb(*xc); return xc; } void MatchboxMEBase::persistentOutput(PersistentOStream & os) const { os << theLastXComb << theFactory << thePhasespace << theAmplitude << theScaleChoice << theVirtuals << theReweights << theSubprocess << theOneLoop << theOneLoopNoBorn << theOneLoopNoLoops << epsilonSquarePoleHistograms << epsilonPoleHistograms << theOLPProcess << theNoCorrelations << theHavePDFs << checkedPDFs<<theDiagramWeightVerboseDown<<theDiagramWeightVerboseUp; } void MatchboxMEBase::persistentInput(PersistentIStream & is, int) { is >> theLastXComb >> theFactory >> thePhasespace >> theAmplitude >> theScaleChoice >> theVirtuals >> theReweights >> theSubprocess >> theOneLoop >> theOneLoopNoBorn >> theOneLoopNoLoops >> epsilonSquarePoleHistograms >> epsilonPoleHistograms >> theOLPProcess >> theNoCorrelations >> theHavePDFs >> checkedPDFs>>theDiagramWeightVerboseDown>>theDiagramWeightVerboseUp; lastMatchboxXComb(theLastXComb); } void MatchboxMEBase::Init() { static ClassDocumentation<MatchboxMEBase> documentation ("MatchboxMEBase is the base class for matrix elements " "in the context of the matchbox NLO interface."); } IBPtr MatchboxMEBase::clone() const { return new_ptr(*this); } IBPtr MatchboxMEBase::fullclone() const { return new_ptr(*this); } void MatchboxMEBase::doinit() { MEBase::doinit(); if ( !theAmplitude ) theAmplitude = dynamic_ptr_cast<Ptr<MatchboxAmplitude>::ptr>(amplitude()); if ( matchboxAmplitude() ) matchboxAmplitude()->init(); if ( phasespace() ) { phasespace()->init(); matchboxAmplitude()->checkReshuffling(phasespace()); } if ( scaleChoice() ) { scaleChoice()->init(); } for ( vector<Ptr<MatchboxReweightBase>::ptr>::iterator rw = theReweights.begin(); rw != theReweights.end(); ++rw ) { (**rw).init(); } for ( vector<Ptr<MatchboxInsertionOperator>::ptr>::iterator v = virtuals().begin(); v != virtuals().end(); ++v ) { (**v).init(); } } void MatchboxMEBase::bookMEoverDiaWeight(double x) const { if (MEoverDiaWeight.size()==0){ theDiagramWeightVerboseDown=min(theDiagramWeightVerboseDown,x*0.9); theDiagramWeightVerboseUp=max(theDiagramWeightVerboseUp,x*1.1); } map<double,double>::iterator bx =MEoverDiaWeight.upper_bound(x); if ( bx == MEoverDiaWeight.end() ) { return; } bx->second += 1.; Nevents++; if (int(Nevents)%1000==0){ ofstream out((RunDirectories::runStorage()+"/"+name()+"-MeoDiaW.dat").c_str()); int i=0; double m=0.; for ( map<double,double>::const_iterator bx = MEoverDiaWeight.begin();bx != MEoverDiaWeight.end(); ++bx,i++ ) { out << " " << bx->first<<" "<<( bx->second/double(Nevents))<<"\n "; m=max(m,bx->second/double(Nevents)); } out.close(); ofstream gpout((RunDirectories::runStorage()+"/"+name()+"-MeoDiaW.gp").c_str()); gpout << "set terminal epslatex color solid\n" << "set output '" << name()<<"-MeoDiaW"<< "-plot.tex'\n" << "#set logscale x\n" << "set xrange [" << theDiagramWeightVerboseDown << ":" << theDiagramWeightVerboseUp << "]\n" << "set yrange [0.:"<<(m*0.95)<<"]\n" << "set xlabel '$log(ME/\\sum DiaW)$'\n" << "set size 0.7,0.7\n" << "plot 1 w lines lc rgbcolor \"#DDDDDD\" notitle, '" << name()<<"-MeoDiaW" << ".dat' with histeps lc rgbcolor \"#00AACC\" t '$"<<name()<<"$'"; gpout.close(); } } void MatchboxMEBase::doinitrun() { MEBase::doinitrun(); if ( matchboxAmplitude() ) matchboxAmplitude()->initrun(); if ( phasespace() ) { phasespace()->initrun(); } if ( scaleChoice() ) { scaleChoice()->initrun(); } for ( vector<Ptr<MatchboxReweightBase>::ptr>::iterator rw = theReweights.begin(); rw != theReweights.end(); ++rw ) { (**rw).initrun(); } for ( vector<Ptr<MatchboxInsertionOperator>::ptr>::iterator v = virtuals().begin(); v != virtuals().end(); ++v ) { (**v).initrun(); } if ( factory()->verboseDia() ) { for ( int k = 0; k < factory()->diagramWeightVerboseNBins() ; ++k ) { MEoverDiaWeight[theDiagramWeightVerboseDown+ double(k)*(theDiagramWeightVerboseUp- theDiagramWeightVerboseDown) /double(factory()->diagramWeightVerboseNBins()) ] = 0.; } Nevents=0.; ofstream out("DiagramWeights.sh"); out<<"P=$(pwd)" <<"\ncd "<<RunDirectories::runStorage() <<"\nrm -f DiagramWeights.tex" <<"\n echo \"\\documentclass{article}\" >> DiagramWeights.tex" <<"\n echo \"\\usepackage{amsmath,amsfonts,amssymb,graphicx,color}\" >> DiagramWeights.tex" <<"\n echo \"\\usepackage[left=2cm,right=2cm,top=2cm,bottom=2cm]{geometry}\" >> DiagramWeights.tex" <<"\n echo \"\\begin{document}\" >> DiagramWeights.tex" <<"\n echo \"\\setlength{\\parindent}{0cm}\" >> DiagramWeights.tex" <<"\n\n for i in $(ls *.gp | sed s/'\\.gp'//g) ; " <<"\n do" <<"\n echo \"\\input{\"\"$i\"-plot\"}\" >> DiagramWeights.tex" <<"\n done" <<"\n echo \"\\end{document}\" >> DiagramWeights.tex " <<"\n for i in *.gp ; do " <<"\n gnuplot $i " <<"\n done " <<"\n pdflatex DiagramWeights.tex \ncp DiagramWeights.pdf $P"; out.close(); } } void MatchboxMEBase::dofinish() { MEBase::dofinish(); for ( map<cPDVector,AccuracyHistogram>::const_iterator b = epsilonSquarePoleHistograms.begin(); b != epsilonSquarePoleHistograms.end(); ++b ) { b->second.dump(factory()->poleData(),"epsilonSquarePoles-",b->first); } for ( map<cPDVector,AccuracyHistogram>::const_iterator b = epsilonPoleHistograms.begin(); b != epsilonPoleHistograms.end(); ++b ) { b->second.dump(factory()->poleData(),"epsilonPoles-",b->first); } } // *** Attention *** The following static variable is needed for the type // description system in ThePEG. Please check that the template arguments // are correct (the class and its base class), and that the constructor // arguments are correct (the class name and the name of the dynamically // loadable library where the class implementation can be found). DescribeClass<MatchboxMEBase,MEBase> describeHerwigMatchboxMEBase("Herwig::MatchboxMEBase", "Herwig.so"); diff --git a/MatrixElement/Matchbox/Builtin/Amplitudes/MatchboxCurrents.cc b/MatrixElement/Matchbox/Builtin/Amplitudes/MatchboxCurrents.cc --- a/MatrixElement/Matchbox/Builtin/Amplitudes/MatchboxCurrents.cc +++ b/MatrixElement/Matchbox/Builtin/Amplitudes/MatchboxCurrents.cc @@ -1,2861 +1,2861 @@ // - * - C++ - * - // // MatchboxCurrents.cc is a part of Herwig - A multi-purpose Monte Carlo event generator // Copyright (C) 2002-2012 The Herwig Collaboration // // Herwig is licenced under version 2 of the GPL, see COPYING for details. // Please respect the MCnet academic guidelines, see GUIDELINES for details. // #include "MatchboxCurrents.h" #include "Herwig/Utilities/Maths.h" using namespace Herwig; using namespace Herwig::Math; using Constants::pi; namespace { const static LorentzVector<Complex> czero(0.,0.,0.,0.); inline Complex csqr(const Complex & a) { return a * a; } inline double theta(const double x) { if ( x >= 0. ) return 1.; return 0.; } inline double sign(const double x) { if ( x >= 0. ) return 1.; return -1.; } // quick'n'dirty fix to template troubles Complex operator * (const Complex& a, const double b) { return Complex(a.real() * b,a.imag() * b); } Complex operator * (const double b, const Complex& a) { return Complex(a.real() * b,a.imag() * b); } Complex operator+(const Complex& a, const double b) { return Complex(a.real()+b,a.imag()); } Complex operator+(const double b, const Complex& a) { return Complex(a.real()+b,a.imag()); } Complex operator-(const Complex& a, const double b) { return Complex(a.real()-b,a.imag()); } Complex operator-(const double b, const Complex& a) { return Complex(b-a.real(),-a.imag()); } // end fix, needs to be looked at in ThePEG/Config/ } void MatchboxCurrents::setupLeptons(const int l, const Lorentz5Momentum& pl, const int lbar, const Lorentz5Momentum& plbar) { const Energy4 Delta = (sqr(pl*plbar) - (pl*pl)*(plbar*plbar)); const Energy2 prod = pl*plbar; // Variable to contain the sign of pl*plbar double sgn; if (prod < ZERO ) {sgn = -1;} else if (prod > ZERO) {sgn = 1;} else {sgn = 0;} InvEnergy2 fact = 0.5/(sgn*sqrt(Delta)); Lorentz5Momentum lmassless = ( double(fact*(sgn*sqrt(Delta) + prod))*pl - double(fact*( pl*pl))*plbar ); Lorentz5Momentum lbarmassless = ( double(fact*(sgn*sqrt(Delta) + prod))*plbar - double(fact*(plbar*plbar))*pl ); lmassless.setMass(ZERO); lmassless.rescaleEnergy(); lbarmassless.setMass(ZERO); lbarmassless.rescaleEnergy(); if ( pl.t() < ZERO ) lmassless.setT(-lmassless.t()); if ( plbar.t() < ZERO ) lbarmassless.setT(-lbarmassless.t()); momentum(l,lmassless,true,pl.mass()); momentum(lbar,lbarmassless,true,plbar.mass()); } void MatchboxCurrents::setupQuarks(const int q, const Lorentz5Momentum& pq, const int qbar, const Lorentz5Momentum& pqbar) { const Energy4 Delta = (sqr(pq*pqbar) - (pq*pq)*(pqbar*pqbar)); const Energy2 prod = pq*pqbar; // Variable to contain the sign of pq*pqbar double sgn; if (prod < ZERO) {sgn = -1;} else if (prod > ZERO) {sgn = 1;} else {sgn = 0;} InvEnergy2 fact = 0.5/(sgn*sqrt(Delta)); Lorentz5Momentum qmassless = ( double(fact*(sgn*sqrt(Delta) + prod))*pq - double(fact*(pq*pq))*pqbar ); Lorentz5Momentum qbarmassless = ( double(fact*(sgn*sqrt(Delta) + prod))*pqbar - double(fact*(pqbar*pqbar))*pq ); qmassless.setMass(ZERO); qmassless.rescaleEnergy(); qbarmassless.setMass(ZERO); qbarmassless.rescaleEnergy(); if ( pq.t() < ZERO ) qmassless.setT(-qmassless.t()); if ( pqbar.t() < ZERO ) qbarmassless.setT(-qbarmassless.t()); momentum(q,qmassless,true,pq.mass()); momentum(qbar,qbarmassless,true,pqbar.mass()); } const LorentzVector<Complex>& MatchboxCurrents::llbarLeftCurrent(const int l, const int lHel, const int lbar, const int lbarHel) { if ( getCurrent(hash<0>(1,1,l,lHel,lbar,lbarHel)) ) { if ( lHel == 1 && lbarHel == 1 ) cacheCurrent(Complex(0.,1.) * minusCurrent(l,lbar)); if ( lHel == 1 && lbarHel == -1 ) cacheCurrent((Complex(0.,2.) * mass(lbar)/plusProduct(l,lbar)) * momentum(l)); if ( lHel == -1 && lbarHel == 1 ) cacheCurrent((Complex(0.,-2.) * mass(l)/minusProduct(l,lbar)) * momentum(lbar)); if ( lHel == -1 && lbarHel == -1 ) cacheCurrent((Complex(0.,1.) * mass(l) * mass(lbar)/invariant(l,lbar)) * minusCurrent(lbar,l)); } return cachedCurrent(); } const LorentzVector<Complex>& MatchboxCurrents::llbarRightCurrent(const int l, const int lHel, const int lbar, const int lbarHel) { if ( getCurrent(hash<0>(2,1,l,lHel,lbar,lbarHel)) ) { if ( lHel == 1 && lbarHel == 1 ) cacheCurrent((Complex(0.,1.) * mass(l) * mass(lbar)/invariant(l,lbar)) * minusCurrent(l,lbar)); if ( lHel == 1 && lbarHel == -1 ) cacheCurrent((Complex(0.,-2.) * mass(l)/plusProduct(l,lbar)) * momentum(lbar)); if ( lHel == -1 && lbarHel == 1 ) cacheCurrent((Complex(0.,2.) * mass(lbar)/minusProduct(l,lbar)) * momentum(l)); if ( lHel == -1 && lbarHel == -1 ) cacheCurrent(Complex(0.,1.) * minusCurrent(lbar,l)); } return cachedCurrent(); } const LorentzVector<Complex>& MatchboxCurrents::qqbarLeftCurrent(const int q, const int qHel, const int qbar, const int qbarHel) { if ( getCurrent(hash<1>(1,1,q,qHel,qbar,qbarHel)) ) { if ( qHel == 1 && qbarHel == 1 ) cacheCurrent(Complex(0.,1.) * minusCurrent(q,qbar)); if ( qHel == 1 && qbarHel == -1 ) cacheCurrent((Complex(0.,2.) * mass(qbar)/plusProduct(q,qbar)) * momentum(q)); if ( qHel == -1 && qbarHel == 1 ) cacheCurrent((Complex(0.,-2.) * mass(q)/minusProduct(q,qbar)) * momentum(qbar)); if ( qHel == -1 && qbarHel == -1 ) cacheCurrent((Complex(0.,1.) * mass(q) * mass(qbar)/invariant(q,qbar)) * minusCurrent(qbar,q)); } #ifdef CHECK_MatchboxCurrents checkCurrent("qqbarLeftCurrent",cachedCurrent(),momentum(q)+momentum(qbar)); #endif return cachedCurrent(); } const LorentzVector<Complex>& MatchboxCurrents::qqbarRightCurrent(const int q, const int qHel, const int qbar, const int qbarHel) { if ( getCurrent(hash<1>(2,1,q,qHel,qbar,qbarHel)) ) { if ( qHel == 1 && qbarHel == 1 ) cacheCurrent((Complex(0.,1.) * mass(q) * mass(qbar)/invariant(q,qbar)) * minusCurrent(q,qbar)); if ( qHel == 1 && qbarHel == -1 ) cacheCurrent((Complex(0.,-2.) * mass(q)/plusProduct(q,qbar)) * momentum(qbar)); if ( qHel == -1 && qbarHel == 1 ) cacheCurrent((Complex(0.,2.) * mass(qbar)/minusProduct(q,qbar)) * momentum(q)); if ( qHel == -1 && qbarHel == -1 ) cacheCurrent(Complex(0.,1.) * minusCurrent(qbar,q)); } #ifdef CHECK_MatchboxCurrents checkCurrent("qqbarRightCurrent",cachedCurrent(),momentum(q)+momentum(qbar)); #endif return cachedCurrent(); } const LorentzVector<Complex>& MatchboxCurrents::qqbargLeftCurrent(const int q, const int qHel, const int qbar, const int qbarHel, const int g, const int gHel) { if ( gHel == 1 ) { if ( getCurrent(hash<2>(1,1,q,qHel,qbar,qbarHel,g,gHel)) ) { // Invariant products from propagator denominators const Complex den_i = invariant(q,g) + (sqr(mass(q))/invariant(q,qbar))*invariant(qbar,g); const Complex den_j = invariant(qbar,g) + (sqr(mass(qbar))/invariant(q,qbar))*invariant(q,g); // 2*factor from the spinor definition of the negative helicity gluon // Note that the gluon is outgoing so the polarisation vector of the hel=+1 gluon is conjugated to give the hel=-1 vector const Complex cminus = sqrt(2.0) / minusProduct(g,q); if ( qHel == 1 && qbarHel == 1 ) cacheCurrent( Complex(0.,1.)*cminus*( ((sqr(mass(q))*plusProduct(qbar,g)/(plusProduct(qbar,q)*den_i)) - (minusProduct(qbar,q)*plusProduct(g,qbar)/den_j))*minusCurrent(q, qbar) - (minusProduct(g,q)*plusProduct(g,qbar)/den_j)*minusCurrent(q,g) ) ); if ( qHel == 1 && qbarHel == -1 ) cacheCurrent( Complex(0.,1.)*cminus*(-mass(qbar)/plusProduct(qbar,q)) * ( ((sqr(mass(q))*plusProduct(qbar,g)/(plusProduct(qbar,q)*den_i)) - (plusProduct(qbar,g)*minusProduct(q,qbar)/den_j))*2*momentum(q) + (invariant(q,g)/den_j)*minusCurrent(q,g) ) ); if ( qHel == -1 && qbarHel == 1 ) cacheCurrent( Complex(0.,1.)*cminus*(mass(q)/minusProduct(qbar,q)) * ( ((sqr(mass(q))*plusProduct(g,qbar)/(plusProduct(q,qbar)*den_i)) - (plusProduct(g,qbar)*minusProduct(qbar,q)/den_j))*2*momentum(qbar) - (minusProduct(g,q)*plusProduct(g,qbar)/den_j)*minusCurrent(qbar,g) ) ); if ( qHel == -1 && qbarHel == -1 ) cacheCurrent( Complex(0.,1.)*cminus*(mass(qbar)*mass(q)/(invariant(q,qbar))) * ( ((sqr(mass(q))*plusProduct(g,qbar)/(plusProduct(q,qbar)*den_i)) - (minusProduct(q,qbar)*plusProduct(qbar,g)/den_j))*minusCurrent(qbar,q) + (invariant(q,g)/den_j)*minusCurrent(qbar,g) ) ); } #ifdef CHECK_MatchboxCurrents checkCurrent("qqbargLeftCurrent",cachedCurrent(),momentum(q)+momentum(qbar)+momentum(g)); #endif return cachedCurrent(); } if ( gHel == -1 ) { if ( getCurrent(hash<2>(1,1,q,qHel,qbar,qbarHel,g,gHel)) ) { // Invariant products from propagator denominators const Complex den_i = invariant(q,g) + (sqr(mass(q))/invariant(q,qbar))*invariant(qbar,g); const Complex den_j = invariant(qbar,g) + (sqr(mass(qbar))/invariant(q,qbar))*invariant(q,g); // 2*factor from the spinor definition of the positive helicity gluon const Complex cplus = sqrt(2.0) / plusProduct(q,g); if ( qHel == 1 && qbarHel == 1 ) cacheCurrent( Complex(0.,1.)*cplus*( ((sqr(mass(q))*minusProduct(g,qbar)/(minusProduct(q,qbar)*den_i)) - (minusProduct(qbar,g)*plusProduct(q,qbar)/den_j))*minusCurrent(q, qbar) - (invariant(q,g)/den_i)*minusCurrent(g,qbar) ) ); if ( qHel == 1 && qbarHel == -1 ) cacheCurrent( Complex(0.,1.)*cplus*(-mass(qbar)/plusProduct(qbar,q)) * ( ((sqr(mass(q))*minusProduct(g,qbar)/(minusProduct(q,qbar)*den_i)) - (plusProduct(qbar,q)*minusProduct(g,qbar)/den_j))*2*momentum(q) - (invariant(q,g)/den_i)*minusCurrent(g,q) ) ); if ( qHel == -1 && qbarHel == 1 ) cacheCurrent( Complex(0.,1.)*cplus*(mass(q)/minusProduct(qbar,q)) * ( ((sqr(mass(q))*minusProduct(qbar,g)/(minusProduct(qbar,q)*den_i)) - (minusProduct(qbar,g)*plusProduct(q,qbar)/den_j))*2*momentum(qbar) + (minusProduct(qbar,g)*plusProduct(q,g)/den_i)*minusCurrent(g,qbar) ) ); if ( qHel == -1 && qbarHel == -1 ) cacheCurrent( Complex(0.,1.)*cplus*(mass(qbar)*mass(q)/(invariant(q,qbar))) * ( ((sqr(mass(q))*minusProduct(qbar,g)/(minusProduct(qbar,q)*den_i)) - (plusProduct(qbar,q)*minusProduct(g,qbar)/den_j))*minusCurrent(qbar, q) + (minusProduct(qbar,g)*plusProduct(q,g)/den_i)*minusCurrent(g,q) ) ); } #ifdef CHECK_MatchboxCurrents checkCurrent("qqbargLeftCurrent",cachedCurrent(),momentum(q)+momentum(qbar)+momentum(g)); #endif return cachedCurrent(); } return czero; } const LorentzVector<Complex>& MatchboxCurrents::qqbargRightCurrent(const int q, const int qHel, const int qbar, const int qbarHel, const int g, const int gHel) { if ( gHel == 1 ) { if ( getCurrent(hash<2>(2,1,q,qHel,qbar,qbarHel,g,gHel)) ) { // Invariant products from propagator denominators const Complex den_i = invariant(q,g) + (sqr(mass(q))/invariant(q,qbar))*invariant(qbar,g); const Complex den_j = invariant(qbar,g) + (sqr(mass(qbar))/invariant(q,qbar))*invariant(q,g); // 2*factor from the spinor definition of the positive helicity gluon const Complex cminus = sqrt(2.0) / minusProduct(g,q); if ( qHel == 1 && qbarHel == 1 ) cacheCurrent( Complex(0.,1.)*cminus*(mass(qbar)*mass(q)/(invariant(q,qbar))) * ( ((sqr(mass(q))*plusProduct(qbar,g)/(plusProduct(qbar,q)*den_i)) - (minusProduct(qbar,q)*plusProduct(g,qbar)/den_j))*plusCurrent(qbar, q) + (plusProduct(qbar,g)*minusProduct(q,g)/den_i)*plusCurrent(g,q) ) ); if ( qHel == 1 && qbarHel == -1 ) cacheCurrent( Complex(0.,1.)*cminus*(mass(q)/plusProduct(qbar,q)) * ( ((sqr(mass(q))*plusProduct(qbar,g)/(plusProduct(qbar,q)*den_i)) - (plusProduct(qbar,g)*minusProduct(q,qbar)/den_j))*2*momentum(qbar) + (plusProduct(qbar,g)*minusProduct(q,g)/den_i)*plusCurrent(g,qbar) ) ); if ( qHel == -1 && qbarHel == 1 ) cacheCurrent( Complex(0.,1.)*cminus*(-mass(qbar)/minusProduct(qbar,q)) * ( ((sqr(mass(q))*plusProduct(g,qbar)/(plusProduct(q,qbar)*den_i)) - (minusProduct(qbar,q)*plusProduct(g,qbar)/den_j))*2*momentum(q) - (invariant(q,g)/den_i)*plusCurrent(g,q) ) ); if ( qHel == -1 && qbarHel == -1 ) cacheCurrent( Complex(0.,1.)*cminus*( ((sqr(mass(q))*plusProduct(g,qbar)/(plusProduct(q,qbar)*den_i)) - (plusProduct(qbar,g)*minusProduct(q,qbar)/den_j))*plusCurrent(q, qbar) - (invariant(q,g)/den_i)*plusCurrent(g,qbar) ) ); } #ifdef CHECK_MatchboxCurrents checkCurrent("qqbargRightCurrent",cachedCurrent(),momentum(q)+momentum(qbar)+momentum(g)); #endif return cachedCurrent(); } if ( gHel == -1 ) { if ( getCurrent(hash<2>(2,1,q,qHel,qbar,qbarHel,g,gHel)) ) { // Invariant products from propagator denominators const Complex den_i = invariant(q,g) + (sqr(mass(q))/invariant(q,qbar))*invariant(qbar,g); const Complex den_j = invariant(qbar,g) + (sqr(mass(qbar))/invariant(q,qbar))*invariant(q,g); // 2*factor from the spinor definition of the positive helicity gluon const Complex cplus = sqrt(2.0) / plusProduct(q,g); if ( qHel == 1 && qbarHel == 1 ) cacheCurrent( Complex(0.,1.)*cplus*(mass(qbar)*mass(q)/(invariant(q,qbar))) * ( ((sqr(mass(q))*minusProduct(g,qbar)/(minusProduct(q,qbar)*den_i)) - (plusProduct(q,qbar)*minusProduct(qbar,g)/den_j))*plusCurrent(qbar, q) + (invariant(q,g)/den_j)*plusCurrent(qbar,g) ) ); if ( qHel == 1 && qbarHel == -1 ) cacheCurrent( Complex(0.,1.)*cplus*(mass(q)/plusProduct(qbar,q)) * ( ((sqr(mass(q))*minusProduct(g,qbar)/(minusProduct(q,qbar)*den_i)) - (minusProduct(g,qbar)*plusProduct(qbar,q)/den_j))*2*momentum(qbar) - (plusProduct(g,q)*minusProduct(g,qbar)/den_j)*plusCurrent(qbar,g) ) ); if ( qHel == -1 && qbarHel == 1 ) cacheCurrent( Complex(0.,1.)*cplus*(-mass(qbar)/minusProduct(qbar,q)) * ( ((sqr(mass(q))*minusProduct(qbar,g)/(minusProduct(qbar,q)*den_i)) - (minusProduct(qbar,g)*plusProduct(q,qbar)/den_j))*2*momentum(q) + (invariant(q,g)/den_j)*plusCurrent(q,g) ) ); if ( qHel == -1 && qbarHel == -1 ) cacheCurrent( Complex(0.,1.)*cplus*( ((sqr(mass(q))*minusProduct(qbar,g)/(minusProduct(qbar,q)*den_i)) - (plusProduct(qbar,q)*minusProduct(g,qbar)/den_j))*plusCurrent(q, qbar) - (plusProduct(g,q)*minusProduct(g,qbar)/den_j)*plusCurrent(q,g) ) ); } #ifdef CHECK_MatchboxCurrents checkCurrent("qqbargRightCurrent",cachedCurrent(),momentum(q)+momentum(qbar)+momentum(g)); #endif return cachedCurrent(); } return czero; } LorentzVector<Complex> MatchboxCurrents::qqbarggGeneralLeftCurrent(const int i, const int, const int j, const int, const int k, const int g1Hel, const int l, const int g2Hel, const int n) { const double ik = invariant(i,k); const double il = invariant(i,l); const double jk = invariant(j,k); const double jl = invariant(j,l); const double kl = invariant(k,l); const Complex plusP_ik = plusProduct(i,k); const Complex plusP_il = plusProduct(i,l); const Complex plusP_in = plusProduct(i,n); const Complex plusP_jk = plusProduct(j,k); const Complex plusP_jl = plusProduct(j,l); const Complex plusP_jn = plusProduct(j,n); const Complex plusP_kl = plusProduct(k,l); const Complex plusP_kn = plusProduct(k,n); const Complex plusP_ln = plusProduct(l,n); const Complex minusP_ik = minusProduct(i,k); const Complex minusP_il = minusProduct(i,l); const Complex minusP_in = minusProduct(i,n); const Complex minusP_jk = minusProduct(j,k); const Complex minusP_jl = minusProduct(j,l); const Complex minusP_jn = minusProduct(j,n); const Complex minusP_kl = minusProduct(k,l); const Complex minusP_kn = minusProduct(k,n); const Complex minusP_ln = minusProduct(l,n); const LorentzVector<Complex> & minusC_ij = minusCurrent(i,j); const LorentzVector<Complex> & minusC_ik = minusCurrent(i,k); const LorentzVector<Complex> & minusC_il = minusCurrent(i,l); const LorentzVector<Complex> & minusC_kj = minusCurrent(k,j); const LorentzVector<Complex> & minusC_kl = minusCurrent(k,l); const LorentzVector<Complex> & minusC_lj = minusCurrent(l,j); if ( g1Hel == 1 && g2Hel == 1 ) { return (Complex(0,-2) * plusP_jl * plusP_kl * minusC_ik)/ (jl * (jk + jl + kl)) - (Complex(0,2) * plusP_jl * plusP_kl * minusC_ik)/ (kl * (jk + jl + kl)) - (Complex(0,2) * plusP_jk * plusP_kl * minusC_il)/ (kl * (jk + jl + kl)) + (Complex(0,2) * plusP_il * plusP_kl * minusC_ij * minusP_in)/ (kl * (ik + il + kl) * minusP_kn) - (Complex(0,2) * plusP_ik * plusP_jl * minusC_il * minusP_in)/ (ik * jl * minusP_kn) + (Complex(0,2) * sqr(plusP_kl) * minusC_kj * minusP_in)/ (kl * (ik + il + kl) * minusP_kn) - (Complex(0,2) * plusP_jl * plusP_kl * minusC_ij * minusP_jn)/ (jl * (jk + jl + kl) * minusP_kn) - (Complex(0,2) * plusP_jl * plusP_kl * minusC_ij * minusP_jn)/ (kl * (jk + jl + kl) * minusP_kn) + (Complex(0,2) * plusP_jk * plusP_jl * minusC_il * minusP_jn)/ (jl * (jk + jl + kl) * minusP_kn) + (Complex(0,2) * plusP_ik * plusP_kl * minusC_ij * minusP_in)/ (kl * (ik + il + kl) * minusP_ln) - (Complex(0,2) * sqr(plusP_kl) * minusC_lj * minusP_in)/ (kl * (ik + il + kl) * minusP_ln) - (Complex(0,2) * plusP_jk * plusP_kl * minusC_ij * minusP_jn)/ (kl * (jk + jl + kl) * minusP_ln) + (Complex(0,2) * plusP_jk * plusP_jl * minusC_ik * minusP_jn)/ (jl * (jk + jl + kl) * minusP_ln) + (Complex(0,2) * plusP_ik * plusP_il * minusC_ij * sqr(minusP_in))/ (ik * (ik + il + kl) * minusP_kn * minusP_ln) + (Complex(0,2) * plusP_ik * plusP_kl * minusC_kj * sqr(minusP_in))/ (ik * (ik + il + kl) * minusP_kn * minusP_ln) - (Complex(0,2) * plusP_ik * plusP_jl * minusC_ij * minusP_in * minusP_jn)/ (ik * jl * minusP_kn * minusP_ln) + (Complex(0,2) * plusP_jk * plusP_jl * minusC_ij * sqr(minusP_jn))/ (jl * (jk + jl + kl) * minusP_kn * minusP_ln) - (Complex(0,2) * plusP_jk * plusP_kl * minusC_ik * minusP_kn)/ (kl * (jk + jl + kl) * minusP_ln) - (Complex(0,2) * plusP_jl * plusP_kl * minusC_il * minusP_ln)/ (jl * (jk + jl + kl) * minusP_kn) - (Complex(0,2) * plusP_jl * plusP_kl * minusC_il * minusP_ln)/ (kl * (jk + jl + kl) * minusP_kn); } if ( g1Hel == 1 && g2Hel == -1 ) { return (Complex(0,-2) * plusP_jk * plusP_jn * minusC_ik * minusP_jl)/ (jl * (jk + jl + kl) * plusP_ln) + (Complex(0,2) * plusP_jk * plusP_kn * minusC_ik * minusP_kl)/ (kl * (jk + jl + kl) * plusP_ln) - (Complex(0,2) * plusP_ik * plusP_in * minusC_ij * minusP_il * minusP_in)/ (ik * (ik + il + kl) * plusP_ln * minusP_kn) - (Complex(0,2) * plusP_ik * plusP_kn * minusC_kj * minusP_il * minusP_in)/ (ik * (ik + il + kl) * plusP_ln * minusP_kn) - (Complex(0,2) * plusP_ik * minusC_lj * minusP_il * minusP_in)/ (ik * (ik + il + kl) * minusP_kn) + (Complex(0,2) * plusP_ik * plusP_jn * minusC_ij * minusP_in * minusP_jl)/ (ik * jl * plusP_ln * minusP_kn) - (Complex(0,2) * plusP_jk * plusP_jn * minusC_ij * minusP_jl * minusP_jn)/ (jl * (jk + jl + kl) * plusP_ln * minusP_kn) - (Complex(0,2) * plusP_ik * plusP_kn * minusC_ij * minusP_in * minusP_kl)/ (kl * (ik + il + kl) * plusP_ln * minusP_kn) + (Complex(0,2) * plusP_kl * plusP_kn * minusC_lj * minusP_in * minusP_kl)/ (kl * (ik + il + kl) * plusP_ln * minusP_kn) + (Complex(0,2) * plusP_jk * plusP_kn * minusC_ij * minusP_jn * minusP_kl)/ (kl * (jk + jl + kl) * plusP_ln * minusP_kn) - (Complex(0,1) * plusP_ik * plusP_kn * minusC_ij * minusP_ik * minusP_ln)/ (kl * (ik + il + kl) * plusP_ln * minusP_kn) + (Complex(0,1) * plusP_kl * plusP_kn * minusC_lj * minusP_ik * minusP_ln)/ (kl * (ik + il + kl) * plusP_ln * minusP_kn) - (Complex(0,2) * plusP_in * plusP_kl * minusC_ij * minusP_il * minusP_ln)/ (kl * (ik + il + kl) * plusP_ln * minusP_kn) + (Complex(0,1) * plusP_il * plusP_kn * minusC_ij * minusP_il * minusP_ln)/ (kl * (ik + il + kl) * plusP_ln * minusP_kn) - (Complex(0,1) * plusP_kl * plusP_kn * minusC_kj * minusP_il * minusP_ln)/ (kl * (ik + il + kl) * plusP_ln * minusP_kn) - (Complex(0,2) * plusP_kl * minusC_lj * minusP_il * minusP_ln)/ (kl * (ik + il + kl) * minusP_kn) + (Complex(0,1) * plusP_jk * plusP_kn * minusC_ij * minusP_jk * minusP_ln)/ (kl * (jk + jl + kl) * plusP_ln * minusP_kn) + (Complex(0,2) * plusP_jn * plusP_kl * minusC_ij * minusP_jl * minusP_ln)/ (kl * (jk + jl + kl) * plusP_ln * minusP_kn) - (Complex(0,1) * plusP_jl * plusP_kn * minusC_ij * minusP_jl * minusP_ln)/ (kl * (jk + jl + kl) * plusP_ln * minusP_kn) - (Complex(0,2) * plusP_jk * plusP_jn * minusC_il * minusP_jl * minusP_ln)/ (jl * (jk + jl + kl) * plusP_ln * minusP_kn) + (Complex(0,2) * plusP_jn * plusP_kl * minusC_ik * minusP_kl * minusP_ln)/ (kl * (jk + jl + kl) * plusP_ln * minusP_kn) - (Complex(0,1) * plusP_jl * plusP_kn * minusC_ik * minusP_kl * minusP_ln)/ (kl * (jk + jl + kl) * plusP_ln * minusP_kn) + (Complex(0,1) * plusP_jk * plusP_kn * minusC_il * minusP_kl * minusP_ln)/ (kl * (jk + jl + kl) * plusP_ln * minusP_kn); } if ( g1Hel == -1 && g2Hel == 1 ) { return (Complex(0,2) * plusP_in * plusP_jl * minusC_il * minusP_ik)/ (ik * jl * plusP_kn) + (Complex(0,2) * plusP_jl * minusC_kl * minusP_ik)/(ik * jl) - (Complex(0,2) * plusP_jl * plusP_ln * minusC_ij * minusP_jk)/ (jl * (jk + jl + kl) * plusP_kn) + (Complex(0,2) * plusP_jl * plusP_ln * minusC_il * minusP_kl)/ (jl * (jk + jl + kl) * plusP_kn) + (Complex(0,2) * plusP_jl * plusP_ln * minusC_il * minusP_kl)/ (kl * (jk + jl + kl) * plusP_kn) - (Complex(0,2) * plusP_il * plusP_in * minusC_ij * minusP_ik * minusP_in)/ (ik * (ik + il + kl) * plusP_kn * minusP_ln) - (Complex(0,2) * plusP_in * plusP_kl * minusC_kj * minusP_ik * minusP_in)/ (ik * (ik + il + kl) * plusP_kn * minusP_ln) + (Complex(0,2) * plusP_in * plusP_jl * minusC_ij * minusP_ik * minusP_jn)/ (ik * jl * plusP_kn * minusP_ln) + (Complex(0,2) * plusP_jl * minusC_kj * minusP_ik * minusP_jn)/ (ik * jl * minusP_ln) - (Complex(0,2) * plusP_jl * plusP_jn * minusC_ij * minusP_jk * minusP_jn)/ (jl * (jk + jl + kl) * plusP_kn * minusP_ln) - (Complex(0,2) * plusP_il * plusP_ln * minusC_ij * minusP_in * minusP_kl)/ (kl * (ik + il + kl) * plusP_kn * minusP_ln) - (Complex(0,2) * plusP_kl * plusP_ln * minusC_kj * minusP_in * minusP_kl)/ (kl * (ik + il + kl) * plusP_kn * minusP_ln) + (Complex(0,2) * plusP_jl * plusP_ln * minusC_ij * minusP_jn * minusP_kl)/ (kl * (jk + jl + kl) * plusP_kn * minusP_ln) + (Complex(0,2) * plusP_jl * plusP_jn * minusC_il * minusP_jn * minusP_kl)/ (jl * (jk + jl + kl) * plusP_kn * minusP_ln) - (Complex(0,2) * plusP_il * minusC_ij * minusP_ik * minusP_kn)/ (ik * (ik + il + kl) * minusP_ln) - (Complex(0,2) * plusP_in * plusP_kl * minusC_ij * minusP_ik * minusP_kn)/ (kl * (ik + il + kl) * plusP_kn * minusP_ln) - (Complex(0,1) * plusP_ik * plusP_ln * minusC_ij * minusP_ik * minusP_kn)/ (kl * (ik + il + kl) * plusP_kn * minusP_ln) - (Complex(0,2) * plusP_kl * minusC_kj * minusP_ik * minusP_kn)/ (ik * (ik + il + kl) * minusP_ln) - (Complex(0,2) * plusP_kl * minusC_kj * minusP_ik * minusP_kn)/ (kl * (ik + il + kl) * minusP_ln) - (Complex(0,1) * plusP_kl * plusP_ln * minusC_lj * minusP_ik * minusP_kn)/ (kl * (ik + il + kl) * plusP_kn * minusP_ln) + (Complex(0,1) * plusP_il * plusP_ln * minusC_ij * minusP_il * minusP_kn)/ (kl * (ik + il + kl) * plusP_kn * minusP_ln) + (Complex(0,1) * plusP_kl * plusP_ln * minusC_kj * minusP_il * minusP_kn)/ (kl * (ik + il + kl) * plusP_kn * minusP_ln) + (Complex(0,2) * plusP_jn * plusP_kl * minusC_ij * minusP_jk * minusP_kn)/ (kl * (jk + jl + kl) * plusP_kn * minusP_ln) + (Complex(0,1) * plusP_jk * plusP_ln * minusC_ij * minusP_jk * minusP_kn)/ (kl * (jk + jl + kl) * plusP_kn * minusP_ln) - (Complex(0,1) * plusP_jl * plusP_ln * minusC_ij * minusP_jl * minusP_kn)/ (kl * (jk + jl + kl) * plusP_kn * minusP_ln) + (Complex(0,1) * plusP_jl * plusP_ln * minusC_ik * minusP_kl * minusP_kn)/ (kl * (jk + jl + kl) * plusP_kn * minusP_ln) - (Complex(0,2) * plusP_jn * plusP_kl * minusC_il * minusP_kl * minusP_kn)/ (kl * (jk + jl + kl) * plusP_kn * minusP_ln) - (Complex(0,1) * plusP_jk * plusP_ln * minusC_il * minusP_kl * minusP_kn)/ (kl * (jk + jl + kl) * plusP_kn * minusP_ln); } if ( g1Hel == -1 && g2Hel == -1 ) { return (Complex(0,2) * sqr(plusP_in) * minusC_ij * minusP_ik * minusP_il)/ (ik * (ik + il + kl) * plusP_kn * plusP_ln) + (Complex(0,2) * plusP_in * minusC_kj * minusP_ik * minusP_il)/ (ik * (ik + il + kl) * plusP_ln) + (Complex(0,2) * plusP_in * minusC_lj * minusP_ik * minusP_il)/ (ik * (ik + il + kl) * plusP_kn) - (Complex(0,2) * plusP_in * plusP_jn * minusC_ij * minusP_ik * minusP_jl)/ (ik * jl * plusP_kn * plusP_ln) - (Complex(0,2) * plusP_jn * minusC_kj * minusP_ik * minusP_jl)/ (ik * jl * plusP_ln) + (Complex(0,2) * sqr(plusP_jn) * minusC_ij * minusP_jk * minusP_jl)/ (jl * (jk + jl + kl) * plusP_kn * plusP_ln) + (Complex(0,2) * plusP_in * minusC_ij * minusP_ik * minusP_kl)/ (ik * (ik + il + kl) * plusP_ln) + (Complex(0,2) * plusP_in * minusC_ij * minusP_ik * minusP_kl)/ (kl * (ik + il + kl) * plusP_ln) + (Complex(0,2) * plusP_kn * minusC_kj * minusP_ik * minusP_kl)/ (ik * (ik + il + kl) * plusP_ln) + (Complex(0,2) * plusP_kn * minusC_kj * minusP_ik * minusP_kl)/ (kl * (ik + il + kl) * plusP_ln) + (Complex(0,2) * minusC_lj * minusP_ik * minusP_kl)/ (ik * (ik + il + kl)) + (Complex(0,2) * minusC_lj * minusP_ik * minusP_kl)/ (kl * (ik + il + kl)) + (Complex(0,2) * plusP_in * minusC_ij * minusP_il * minusP_kl)/ (kl * (ik + il + kl) * plusP_kn) + (Complex(0,2) * minusC_kj * minusP_il * minusP_kl)/ (kl * (ik + il + kl)) + (Complex(0,2) * plusP_ln * minusC_lj * minusP_il * minusP_kl)/ (kl * (ik + il + kl) * plusP_kn) - (Complex(0,2) * plusP_jn * minusC_ij * minusP_jk * minusP_kl)/ (kl * (jk + jl + kl) * plusP_ln) - (Complex(0,2) * plusP_jn * minusC_ij * minusP_jl * minusP_kl)/ (kl * (jk + jl + kl) * plusP_kn) - (Complex(0,2) * sqr(plusP_jn) * minusC_il * minusP_jl * minusP_kl)/ (jl * (jk + jl + kl) * plusP_kn * plusP_ln) - (Complex(0,2) * plusP_jn * minusC_ik * sqr(minusP_kl))/ (kl * (jk + jl + kl) * plusP_kn) + (Complex(0,2) * plusP_jn * minusC_il * sqr(minusP_kl))/ (kl * (jk + jl + kl) * plusP_ln); } return czero; } LorentzVector<Complex> MatchboxCurrents::qqbarggFixedLeftCurrent(const int i, const int, const int j, const int, const int k, const int g1Hel, const int l, const int g2Hel) { const double ik = invariant(i,k); const double il = invariant(i,l); const double jk = invariant(j,k); const double jl = invariant(j,l); const double kl = invariant(k,l); const Complex plusP_ij = plusProduct(i,j); const Complex plusP_ik = plusProduct(i,k); const Complex plusP_il = plusProduct(i,l); const Complex plusP_jk = plusProduct(j,k); const Complex plusP_jl = plusProduct(j,l); const Complex plusP_kl = plusProduct(k,l); const Complex minusP_ij = minusProduct(i,j); const Complex minusP_ik = minusProduct(i,k); const Complex minusP_il = minusProduct(i,l); const Complex minusP_jk = minusProduct(j,k); const Complex minusP_jl = minusProduct(j,l); const Complex minusP_kl = minusProduct(k,l); const LorentzVector<Complex> & minusC_ij = minusCurrent(i,j); const LorentzVector<Complex> & minusC_ik = minusCurrent(i,k); const LorentzVector<Complex> & minusC_il = minusCurrent(i,l); const LorentzVector<Complex> & minusC_kj = minusCurrent(k,j); const LorentzVector<Complex> & minusC_kl = minusCurrent(k,l); const LorentzVector<Complex> & minusC_lj = minusCurrent(l,j); if ( g1Hel == 1 && g2Hel == 1 ) { return (Complex(0,-2) * plusP_jl * plusP_kl * minusC_ik)/ (jl * (jk + jl + kl)) - (Complex(0,2) * plusP_jl * plusP_kl * minusC_ik)/ (kl * (jk + jl + kl)) - (Complex(0,2) * plusP_jk * plusP_kl * minusC_il)/ (kl * (jk + jl + kl)) - (Complex(0,2) * plusP_jl * plusP_kl * minusC_ij * minusP_ij)/ (jl * (jk + jl + kl) * minusP_ik) - (Complex(0,2) * plusP_jl * plusP_kl * minusC_ij * minusP_ij)/ (kl * (jk + jl + kl) * minusP_ik) + (Complex(0,2) * plusP_jk * plusP_jl * minusC_il * minusP_ij)/ (jl * (jk + jl + kl) * minusP_ik) - (Complex(0,2) * plusP_jk * plusP_kl * minusC_ij * minusP_ij)/ (kl * (jk + jl + kl) * minusP_il) + (Complex(0,2) * plusP_jk * plusP_jl * minusC_ik * minusP_ij)/ (jl * (jk + jl + kl) * minusP_il) + (Complex(0,2) * plusP_jk * plusP_jl * minusC_ij * sqr(minusP_ij))/ (jl * (jk + jl + kl) * minusP_ik * minusP_il) - (Complex(0,2) * plusP_jk * plusP_kl * minusC_ik * minusP_ik)/ (kl * (jk + jl + kl) * minusP_il) - (Complex(0,2) * plusP_jl * plusP_kl * minusC_il * minusP_il)/ (jl * (jk + jl + kl) * minusP_ik) - (Complex(0,2) * plusP_jl * plusP_kl * minusC_il * minusP_il)/ (kl * (jk + jl + kl) * minusP_ik); } if ( g1Hel == 1 && g2Hel == -1 ) { return (Complex(0,-1) * sqr(plusP_ik) * minusC_ij * minusP_il)/ (kl * (ik + il + kl) * plusP_il) + (Complex(0,1) * plusP_ik * plusP_kl * minusC_lj * minusP_il)/ (kl * (ik + il + kl) * plusP_il) + (Complex(0,1) * plusP_ik * minusC_ij * sqr(minusP_il))/ (kl * (ik + il + kl) * minusP_ik) - (Complex(0,1) * plusP_ik * plusP_kl * minusC_kj * sqr(minusP_il))/ (kl * (ik + il + kl) * plusP_il * minusP_ik) - (Complex(0,2) * plusP_kl * minusC_lj * sqr(minusP_il))/ (kl * (ik + il + kl) * minusP_ik) + (Complex(0,1) * plusP_ik * plusP_jk * minusC_ij * minusP_il * minusP_jk)/ (kl * (jk + jl + kl) * plusP_il * minusP_ik) - (Complex(0,2) * plusP_ij * plusP_jk * minusC_ik * minusP_jl)/ (jl * (jk + jl + kl) * plusP_il) - (Complex(0,2) * plusP_ij * plusP_jk * minusC_ij * minusP_ij * minusP_jl)/ (jl * (jk + jl + kl) * plusP_il * minusP_ik) - (Complex(0,1) * plusP_ik * plusP_jl * minusC_ij * minusP_il * minusP_jl)/ (kl * (jk + jl + kl) * plusP_il * minusP_ik) + (Complex(0,2) * plusP_ij * plusP_kl * minusC_ij * minusP_il * minusP_jl)/ (kl * (jk + jl + kl) * plusP_il * minusP_ik) - (Complex(0,2) * plusP_ij * plusP_jk * minusC_il * minusP_il * minusP_jl)/ (jl * (jk + jl + kl) * plusP_il * minusP_ik) + (Complex(0,2) * plusP_ik * plusP_jk * minusC_ik * minusP_kl)/ (kl * (jk + jl + kl) * plusP_il) + (Complex(0,2) * plusP_ik * plusP_jk * minusC_ij * minusP_ij * minusP_kl)/ (kl * (jk + jl + kl) * plusP_il * minusP_ik) - (Complex(0,1) * plusP_ik * plusP_jl * minusC_ik * minusP_il * minusP_kl)/ (kl * (jk + jl + kl) * plusP_il * minusP_ik) + (Complex(0,2) * plusP_ij * plusP_kl * minusC_ik * minusP_il * minusP_kl)/ (kl * (jk + jl + kl) * plusP_il * minusP_ik) + (Complex(0,1) * plusP_ik * plusP_jk * minusC_il * minusP_il * minusP_kl)/ (kl * (jk + jl + kl) * plusP_il * minusP_ik); } if ( g1Hel == -1 && g2Hel == 1 ) { return (Complex(0,1) * sqr(plusP_il) * minusC_ij * minusP_ik)/ (kl * (ik + il + kl) * plusP_ik) + (Complex(0,1) * plusP_il * plusP_kl * minusC_kj * minusP_ik)/ (kl * (ik + il + kl) * plusP_ik) + (Complex(0,2) * plusP_jl * minusC_kl * minusP_ik)/(ik * jl) + (Complex(0,2) * plusP_jl * minusC_kj * minusP_ij * minusP_ik)/ (ik * jl * minusP_il) - (Complex(0,2) * plusP_il * minusC_ij * sqr(minusP_ik))/ (ik * (ik + il + kl) * minusP_il) - (Complex(0,1) * plusP_il * minusC_ij * sqr(minusP_ik))/ (kl * (ik + il + kl) * minusP_il) - (Complex(0,2) * plusP_kl * minusC_kj * sqr(minusP_ik))/ (ik * (ik + il + kl) * minusP_il) - (Complex(0,2) * plusP_kl * minusC_kj * sqr(minusP_ik))/ (kl * (ik + il + kl) * minusP_il) - (Complex(0,1) * plusP_il * plusP_kl * minusC_lj * sqr(minusP_ik))/ (kl * (ik + il + kl) * plusP_ik * minusP_il) - (Complex(0,2) * plusP_il * plusP_jl * minusC_ij * minusP_jk)/ (jl * (jk + jl + kl) * plusP_ik) - (Complex(0,2) * plusP_ij * plusP_jl * minusC_ij * minusP_ij * minusP_jk)/ (jl * (jk + jl + kl) * plusP_ik * minusP_il) + (Complex(0,1) * plusP_il * plusP_jk * minusC_ij * minusP_ik * minusP_jk)/ (kl * (jk + jl + kl) * plusP_ik * minusP_il) + (Complex(0,2) * plusP_ij * plusP_kl * minusC_ij * minusP_ik * minusP_jk)/ (kl * (jk + jl + kl) * plusP_ik * minusP_il) - (Complex(0,1) * plusP_il * plusP_jl * minusC_ij * minusP_ik * minusP_jl)/ (kl * (jk + jl + kl) * plusP_ik * minusP_il) + (Complex(0,2) * plusP_il * plusP_jl * minusC_il * minusP_kl)/ (jl * (jk + jl + kl) * plusP_ik) + (Complex(0,2) * plusP_il * plusP_jl * minusC_il * minusP_kl)/ (kl * (jk + jl + kl) * plusP_ik) + (Complex(0,2) * plusP_il * plusP_jl * minusC_ij * minusP_ij * minusP_kl)/ (kl * (jk + jl + kl) * plusP_ik * minusP_il) + (Complex(0,2) * plusP_ij * plusP_jl * minusC_il * minusP_ij * minusP_kl)/ (jl * (jk + jl + kl) * plusP_ik * minusP_il) + (Complex(0,1) * plusP_il * plusP_jl * minusC_ik * minusP_ik * minusP_kl)/ (kl * (jk + jl + kl) * plusP_ik * minusP_il) - (Complex(0,1) * plusP_il * plusP_jk * minusC_il * minusP_ik * minusP_kl)/ (kl * (jk + jl + kl) * plusP_ik * minusP_il) - (Complex(0,2) * plusP_ij * plusP_kl * minusC_il * minusP_ik * minusP_kl)/ (kl * (jk + jl + kl) * plusP_ik * minusP_il); } if ( g1Hel == -1 && g2Hel == -1 ) { return (Complex(0,-2) * plusP_ij * minusC_kj * minusP_ik * minusP_jl)/ (ik * jl * plusP_il) + (Complex(0,2) * sqr(plusP_ij) * minusC_ij * minusP_jk * minusP_jl)/ (jl * (jk + jl + kl) * plusP_ik * plusP_il) + (Complex(0,2) * plusP_ik * minusC_kj * minusP_ik * minusP_kl)/ (ik * (ik + il + kl) * plusP_il) + (Complex(0,2) * plusP_ik * minusC_kj * minusP_ik * minusP_kl)/ (kl * (ik + il + kl) * plusP_il) + (Complex(0,2) * minusC_lj * minusP_ik * minusP_kl)/ (ik * (ik + il + kl)) + (Complex(0,2) * minusC_lj * minusP_ik * minusP_kl)/ (kl * (ik + il + kl)) + (Complex(0,2) * minusC_kj * minusP_il * minusP_kl)/ (kl * (ik + il + kl)) + (Complex(0,2) * plusP_il * minusC_lj * minusP_il * minusP_kl)/ (kl * (ik + il + kl) * plusP_ik) - (Complex(0,2) * plusP_ij * minusC_ij * minusP_jk * minusP_kl)/ (kl * (jk + jl + kl) * plusP_il) - (Complex(0,2) * plusP_ij * minusC_ij * minusP_jl * minusP_kl)/ (kl * (jk + jl + kl) * plusP_ik) - (Complex(0,2) * sqr(plusP_ij) * minusC_il * minusP_jl * minusP_kl)/ (jl * (jk + jl + kl) * plusP_ik * plusP_il) - (Complex(0,2) * plusP_ij * minusC_ik * sqr(minusP_kl))/ (kl * (jk + jl + kl) * plusP_ik) + (Complex(0,2) * plusP_ij * minusC_il * sqr(minusP_kl))/ (kl * (jk + jl + kl) * plusP_il); } return czero; } LorentzVector<Complex> MatchboxCurrents::qqbarggGeneralRightCurrent(const int i, const int, const int j, const int, const int k, const int g1Hel, const int l, const int g2Hel, const int n) { const double ik = invariant(i,k); const double il = invariant(i,l); const double jk = invariant(j,k); const double jl = invariant(j,l); const double kl = invariant(k,l); const Complex plusP_ik = plusProduct(i,k); const Complex plusP_il = plusProduct(i,l); const Complex plusP_in = plusProduct(i,n); const Complex plusP_jk = plusProduct(j,k); const Complex plusP_jl = plusProduct(j,l); const Complex plusP_jn = plusProduct(j,n); const Complex plusP_kl = plusProduct(k,l); const Complex plusP_kn = plusProduct(k,n); const Complex plusP_ln = plusProduct(l,n); const Complex minusP_ik = minusProduct(i,k); const Complex minusP_il = minusProduct(i,l); const Complex minusP_in = minusProduct(i,n); const Complex minusP_jk = minusProduct(j,k); const Complex minusP_jl = minusProduct(j,l); const Complex minusP_jn = minusProduct(j,n); const Complex minusP_kl = minusProduct(k,l); const Complex minusP_kn = minusProduct(k,n); const Complex minusP_ln = minusProduct(l,n); const LorentzVector<Complex> & minusC_ji = minusCurrent(j,i); const LorentzVector<Complex> & minusC_jk = minusCurrent(j,k); const LorentzVector<Complex> & minusC_jl = minusCurrent(j,l); const LorentzVector<Complex> & minusC_ki = minusCurrent(k,i); const LorentzVector<Complex> & minusC_li = minusCurrent(l,i); const LorentzVector<Complex> & minusC_lk = minusCurrent(l,k); if ( g1Hel == 1 && g2Hel == 1 ) { return (Complex(0,2) * plusP_il * plusP_kl * minusC_jk)/ (kl * (ik + il + kl)) + (Complex(0,2) * plusP_ik * plusP_kl * minusC_jl)/ (ik * (ik + il + kl)) + (Complex(0,2) * plusP_ik * plusP_kl * minusC_jl)/ (kl * (ik + il + kl)) + (Complex(0,2) * plusP_il * plusP_kl * minusC_ji * minusP_in)/ (kl * (ik + il + kl) * minusP_kn) + (Complex(0,2) * plusP_ik * plusP_il * minusC_jl * minusP_in)/ (ik * (ik + il + kl) * minusP_kn) - (Complex(0,2) * plusP_jl * plusP_kl * minusC_ji * minusP_jn)/ (kl * (jk + jl + kl) * minusP_kn) - (Complex(0,2) * sqr(plusP_kl) * minusC_ki * minusP_jn)/ (kl * (jk + jl + kl) * minusP_kn) + (Complex(0,2) * plusP_ik * plusP_kl * minusC_ji * minusP_in)/ (ik * (ik + il + kl) * minusP_ln) + (Complex(0,2) * plusP_ik * plusP_kl * minusC_ji * minusP_in)/ (kl * (ik + il + kl) * minusP_ln) + (Complex(0,2) * plusP_ik * plusP_il * minusC_jk * minusP_in)/ (ik * (ik + il + kl) * minusP_ln) - (Complex(0,2) * plusP_jk * plusP_kl * minusC_ji * minusP_jn)/ (kl * (jk + jl + kl) * minusP_ln) - (Complex(0,2) * plusP_ik * plusP_jl * minusC_jk * minusP_jn)/ (ik * jl * minusP_ln) + (Complex(0,2) * sqr(plusP_kl) * minusC_li * minusP_jn)/ (kl * (jk + jl + kl) * minusP_ln) + (Complex(0,2) * plusP_ik * plusP_il * minusC_ji * sqr(minusP_in))/ (ik * (ik + il + kl) * minusP_kn * minusP_ln) - (Complex(0,2) * plusP_ik * plusP_jl * minusC_ji * minusP_in * minusP_jn)/ (ik * jl * minusP_kn * minusP_ln) + (Complex(0,2) * plusP_jk * plusP_jl * minusC_ji * sqr(minusP_jn))/ (jl * (jk + jl + kl) * minusP_kn * minusP_ln) - (Complex(0,2) * plusP_jl * plusP_kl * minusC_li * sqr(minusP_jn))/ (jl * (jk + jl + kl) * minusP_kn * minusP_ln) + (Complex(0,2) * plusP_ik * plusP_kl * minusC_jk * minusP_kn)/ (ik * (ik + il + kl) * minusP_ln) + (Complex(0,2) * plusP_ik * plusP_kl * minusC_jk * minusP_kn)/ (kl * (ik + il + kl) * minusP_ln) + (Complex(0,2) * plusP_il * plusP_kl * minusC_jl * minusP_ln)/ (kl * (ik + il + kl) * minusP_kn); } if ( g1Hel == 1 && g2Hel == -1 ) { return (Complex(0,-2) * plusP_ik * plusP_kn * minusC_ji * minusP_il)/ (ik * (ik + il + kl) * plusP_ln) + (Complex(0,2) * plusP_ik * plusP_jn * minusC_jk * minusP_jl)/ (ik * jl * plusP_ln) + (Complex(0,2) * plusP_ik * minusC_lk * minusP_jl)/(ik * jl) - (Complex(0,2) * plusP_ik * plusP_kn * minusC_jk * minusP_kl)/ (ik * (ik + il + kl) * plusP_ln) - (Complex(0,2) * plusP_ik * plusP_kn * minusC_jk * minusP_kl)/ (kl * (ik + il + kl) * plusP_ln) - (Complex(0,2) * plusP_ik * plusP_in * minusC_ji * minusP_il * minusP_in)/ (ik * (ik + il + kl) * plusP_ln * minusP_kn) + (Complex(0,2) * plusP_ik * plusP_jn * minusC_ji * minusP_in * minusP_jl)/ (ik * jl * plusP_ln * minusP_kn) + (Complex(0,2) * plusP_ik * minusC_li * minusP_in * minusP_jl)/ (ik * jl * minusP_kn) - (Complex(0,2) * plusP_jk * plusP_jn * minusC_ji * minusP_jl * minusP_jn)/ (jl * (jk + jl + kl) * plusP_ln * minusP_kn) + (Complex(0,2) * plusP_jn * plusP_kl * minusC_li * minusP_jl * minusP_jn)/ (jl * (jk + jl + kl) * plusP_ln * minusP_kn) - (Complex(0,2) * plusP_ik * plusP_kn * minusC_ji * minusP_in * minusP_kl)/ (kl * (ik + il + kl) * plusP_ln * minusP_kn) - (Complex(0,2) * plusP_ik * plusP_in * minusC_jk * minusP_in * minusP_kl)/ (ik * (ik + il + kl) * plusP_ln * minusP_kn) + (Complex(0,2) * plusP_jk * plusP_kn * minusC_ji * minusP_jn * minusP_kl)/ (kl * (jk + jl + kl) * plusP_ln * minusP_kn) - (Complex(0,2) * plusP_kl * plusP_kn * minusC_li * minusP_jn * minusP_kl)/ (kl * (jk + jl + kl) * plusP_ln * minusP_kn) - (Complex(0,1) * plusP_ik * plusP_kn * minusC_ji * minusP_ik * minusP_ln)/ (kl * (ik + il + kl) * plusP_ln * minusP_kn) - (Complex(0,2) * plusP_in * plusP_kl * minusC_ji * minusP_il * minusP_ln)/ (kl * (ik + il + kl) * plusP_ln * minusP_kn) + (Complex(0,1) * plusP_il * plusP_kn * minusC_ji * minusP_il * minusP_ln)/ (kl * (ik + il + kl) * plusP_ln * minusP_kn) + (Complex(0,1) * plusP_jk * plusP_kn * minusC_ji * minusP_jk * minusP_ln)/ (kl * (jk + jl + kl) * plusP_ln * minusP_kn) - (Complex(0,1) * plusP_kl * plusP_kn * minusC_li * minusP_jk * minusP_ln)/ (kl * (jk + jl + kl) * plusP_ln * minusP_kn) - (Complex(0,2) * plusP_jk * minusC_ji * minusP_jl * minusP_ln)/ (jl * (jk + jl + kl) * minusP_kn) + (Complex(0,2) * plusP_jn * plusP_kl * minusC_ji * minusP_jl * minusP_ln)/ (kl * (jk + jl + kl) * plusP_ln * minusP_kn) - (Complex(0,1) * plusP_jl * plusP_kn * minusC_ji * minusP_jl * minusP_ln)/ (kl * (jk + jl + kl) * plusP_ln * minusP_kn) + (Complex(0,1) * plusP_kl * plusP_kn * minusC_ki * minusP_jl * minusP_ln)/ (kl * (jk + jl + kl) * plusP_ln * minusP_kn) + (Complex(0,2) * plusP_kl * minusC_li * minusP_jl * minusP_ln)/ (jl * (jk + jl + kl) * minusP_kn) + (Complex(0,2) * plusP_kl * minusC_li * minusP_jl * minusP_ln)/ (kl * (jk + jl + kl) * minusP_kn) - (Complex(0,2) * plusP_in * plusP_kl * minusC_jk * minusP_kl * minusP_ln)/ (kl * (ik + il + kl) * plusP_ln * minusP_kn) + (Complex(0,1) * plusP_il * plusP_kn * minusC_jk * minusP_kl * minusP_ln)/ (kl * (ik + il + kl) * plusP_ln * minusP_kn) - (Complex(0,1) * plusP_ik * plusP_kn * minusC_jl * minusP_kl * minusP_ln)/ (kl * (ik + il + kl) * plusP_ln * minusP_kn); } if ( g1Hel == -1 && g2Hel == 1 ) { return (Complex(0,-2) * plusP_il * plusP_in * minusC_jl * minusP_ik)/ (ik * (ik + il + kl) * plusP_kn) - (Complex(0,2) * plusP_il * plusP_ln * minusC_jl * minusP_kl)/ (kl * (ik + il + kl) * plusP_kn) - (Complex(0,2) * plusP_il * plusP_in * minusC_ji * minusP_ik * minusP_in)/ (ik * (ik + il + kl) * plusP_kn * minusP_ln) + (Complex(0,2) * plusP_in * plusP_jl * minusC_ji * minusP_ik * minusP_jn)/ (ik * jl * plusP_kn * minusP_ln) - (Complex(0,2) * plusP_jl * plusP_jn * minusC_ji * minusP_jk * minusP_jn)/ (jl * (jk + jl + kl) * plusP_kn * minusP_ln) - (Complex(0,2) * plusP_jl * minusC_ki * minusP_jk * minusP_jn)/ (jl * (jk + jl + kl) * minusP_ln) - (Complex(0,2) * plusP_jl * plusP_ln * minusC_li * minusP_jk * minusP_jn)/ (jl * (jk + jl + kl) * plusP_kn * minusP_ln) - (Complex(0,2) * plusP_il * plusP_ln * minusC_ji * minusP_in * minusP_kl)/ (kl * (ik + il + kl) * plusP_kn * minusP_ln) + (Complex(0,2) * plusP_jl * plusP_ln * minusC_ji * minusP_jn * minusP_kl)/ (kl * (jk + jl + kl) * plusP_kn * minusP_ln) + (Complex(0,2) * plusP_kl * plusP_ln * minusC_ki * minusP_jn * minusP_kl)/ (kl * (jk + jl + kl) * plusP_kn * minusP_ln) - (Complex(0,2) * plusP_in * plusP_kl * minusC_ji * minusP_ik * minusP_kn)/ (kl * (ik + il + kl) * plusP_kn * minusP_ln) - (Complex(0,1) * plusP_ik * plusP_ln * minusC_ji * minusP_ik * minusP_kn)/ (kl * (ik + il + kl) * plusP_kn * minusP_ln) - (Complex(0,2) * plusP_il * plusP_in * minusC_jk * minusP_ik * minusP_kn)/ (ik * (ik + il + kl) * plusP_kn * minusP_ln) + (Complex(0,1) * plusP_il * plusP_ln * minusC_ji * minusP_il * minusP_kn)/ (kl * (ik + il + kl) * plusP_kn * minusP_ln) + (Complex(0,2) * plusP_jn * plusP_kl * minusC_ji * minusP_jk * minusP_kn)/ (kl * (jk + jl + kl) * plusP_kn * minusP_ln) + (Complex(0,1) * plusP_jk * plusP_ln * minusC_ji * minusP_jk * minusP_kn)/ (kl * (jk + jl + kl) * plusP_kn * minusP_ln) + (Complex(0,2) * plusP_kl * minusC_ki * minusP_jk * minusP_kn)/ (kl * (jk + jl + kl) * minusP_ln) + (Complex(0,1) * plusP_kl * plusP_ln * minusC_li * minusP_jk * minusP_kn)/ (kl * (jk + jl + kl) * plusP_kn * minusP_ln) - (Complex(0,1) * plusP_jl * plusP_ln * minusC_ji * minusP_jl * minusP_kn)/ (kl * (jk + jl + kl) * plusP_kn * minusP_ln) - (Complex(0,1) * plusP_kl * plusP_ln * minusC_ki * minusP_jl * minusP_kn)/ (kl * (jk + jl + kl) * plusP_kn * minusP_ln) - (Complex(0,1) * plusP_il * plusP_ln * minusC_jk * minusP_kl * minusP_kn)/ (kl * (ik + il + kl) * plusP_kn * minusP_ln) + (Complex(0,2) * plusP_in * plusP_kl * minusC_jl * minusP_kl * minusP_kn)/ (kl * (ik + il + kl) * plusP_kn * minusP_ln) + (Complex(0,1) * plusP_ik * plusP_ln * minusC_jl * minusP_kl * minusP_kn)/ (kl * (ik + il + kl) * plusP_kn * minusP_ln); } if ( g1Hel == -1 && g2Hel == -1 ) { return (Complex(0,2) * sqr(plusP_in) * minusC_ji * minusP_ik * minusP_il)/ (ik * (ik + il + kl) * plusP_kn * plusP_ln) - (Complex(0,2) * plusP_in * plusP_jn * minusC_ji * minusP_ik * minusP_jl)/ (ik * jl * plusP_kn * plusP_ln) - (Complex(0,2) * plusP_in * minusC_li * minusP_ik * minusP_jl)/ (ik * jl * plusP_kn) + (Complex(0,2) * sqr(plusP_jn) * minusC_ji * minusP_jk * minusP_jl)/ (jl * (jk + jl + kl) * plusP_kn * plusP_ln) + (Complex(0,2) * plusP_jn * minusC_ki * minusP_jk * minusP_jl)/ (jl * (jk + jl + kl) * plusP_ln) + (Complex(0,2) * plusP_jn * minusC_li * minusP_jk * minusP_jl)/ (jl * (jk + jl + kl) * plusP_kn) + (Complex(0,2) * plusP_in * minusC_ji * minusP_ik * minusP_kl)/ (kl * (ik + il + kl) * plusP_ln) + (Complex(0,2) * sqr(plusP_in) * minusC_jk * minusP_ik * minusP_kl)/ (ik * (ik + il + kl) * plusP_kn * plusP_ln) + (Complex(0,2) * plusP_in * minusC_ji * minusP_il * minusP_kl)/ (kl * (ik + il + kl) * plusP_kn) - (Complex(0,2) * plusP_jn * minusC_ji * minusP_jk * minusP_kl)/ (kl * (jk + jl + kl) * plusP_ln) - (Complex(0,2) * plusP_kn * minusC_ki * minusP_jk * minusP_kl)/ (kl * (jk + jl + kl) * plusP_ln) - (Complex(0,2) * minusC_li * minusP_jk * minusP_kl)/ (kl * (jk + jl + kl)) - (Complex(0,2) * plusP_jn * minusC_ji * minusP_jl * minusP_kl)/ (jl * (jk + jl + kl) * plusP_kn) - (Complex(0,2) * plusP_jn * minusC_ji * minusP_jl * minusP_kl)/ (kl * (jk + jl + kl) * plusP_kn) - (Complex(0,2) * minusC_ki * minusP_jl * minusP_kl)/ (jl * (jk + jl + kl)) - (Complex(0,2) * minusC_ki * minusP_jl * minusP_kl)/ (kl * (jk + jl + kl)) - (Complex(0,2) * plusP_ln * minusC_li * minusP_jl * minusP_kl)/ (jl * (jk + jl + kl) * plusP_kn) - (Complex(0,2) * plusP_ln * minusC_li * minusP_jl * minusP_kl)/ (kl * (jk + jl + kl) * plusP_kn) + (Complex(0,2) * plusP_in * minusC_jk * sqr(minusP_kl))/ (kl * (ik + il + kl) * plusP_kn) - (Complex(0,2) * plusP_in * minusC_jl * sqr(minusP_kl))/ (kl * (ik + il + kl) * plusP_ln); } return czero; } LorentzVector<Complex> MatchboxCurrents::qqbarggFixedRightCurrent(const int i, const int, const int j, const int, const int k, const int g1Hel, const int l, const int g2Hel) { const double ik = invariant(i,k); const double il = invariant(i,l); const double jk = invariant(j,k); const double jl = invariant(j,l); const double kl = invariant(k,l); const Complex plusP_ij = plusProduct(i,j); const Complex plusP_ik = plusProduct(i,k); const Complex plusP_il = plusProduct(i,l); const Complex plusP_jk = plusProduct(j,k); const Complex plusP_jl = plusProduct(j,l); const Complex plusP_kl = plusProduct(k,l); const Complex minusP_ij = minusProduct(i,j); const Complex minusP_ik = minusProduct(i,k); const Complex minusP_il = minusProduct(i,l); const Complex minusP_jk = minusProduct(j,k); const Complex minusP_jl = minusProduct(j,l); const Complex minusP_kl = minusProduct(k,l); const LorentzVector<Complex> & minusC_ji = minusCurrent(j,i); const LorentzVector<Complex> & minusC_jk = minusCurrent(j,k); const LorentzVector<Complex> & minusC_jl = minusCurrent(j,l); const LorentzVector<Complex> & minusC_ki = minusCurrent(k,i); const LorentzVector<Complex> & minusC_li = minusCurrent(l,i); const LorentzVector<Complex> & minusC_lk = minusCurrent(l,k); if ( g1Hel == 1 && g2Hel == 1 ) { return (Complex(0,2) * plusP_il * plusP_kl * minusC_jk)/ (kl * (ik + il + kl)) + (Complex(0,2) * plusP_ik * plusP_kl * minusC_jl)/ (ik * (ik + il + kl)) + (Complex(0,2) * plusP_ik * plusP_kl * minusC_jl)/ (kl * (ik + il + kl)) - (Complex(0,2) * plusP_jl * plusP_kl * minusC_ji * minusP_ij)/ (kl * (jk + jl + kl) * minusP_ik) - (Complex(0,2) * sqr(plusP_kl) * minusC_ki * minusP_ij)/ (kl * (jk + jl + kl) * minusP_ik) - (Complex(0,2) * plusP_jk * plusP_kl * minusC_ji * minusP_ij)/ (kl * (jk + jl + kl) * minusP_il) - (Complex(0,2) * plusP_ik * plusP_jl * minusC_jk * minusP_ij)/ (ik * jl * minusP_il) + (Complex(0,2) * sqr(plusP_kl) * minusC_li * minusP_ij)/ (kl * (jk + jl + kl) * minusP_il) + (Complex(0,2) * plusP_jk * plusP_jl * minusC_ji * sqr(minusP_ij))/ (jl * (jk + jl + kl) * minusP_ik * minusP_il) - (Complex(0,2) * plusP_jl * plusP_kl * minusC_li * sqr(minusP_ij))/ (jl * (jk + jl + kl) * minusP_ik * minusP_il) + (Complex(0,2) * plusP_ik * plusP_kl * minusC_jk * minusP_ik)/ (ik * (ik + il + kl) * minusP_il) + (Complex(0,2) * plusP_ik * plusP_kl * minusC_jk * minusP_ik)/ (kl * (ik + il + kl) * minusP_il) + (Complex(0,2) * plusP_il * plusP_kl * minusC_jl * minusP_il)/ (kl * (ik + il + kl) * minusP_ik); } if ( g1Hel == 1 && g2Hel == -1 ) { return (Complex(0,-2) * sqr(plusP_ik) * minusC_ji * minusP_il)/ (ik * (ik + il + kl) * plusP_il) - (Complex(0,1) * sqr(plusP_ik) * minusC_ji * minusP_il)/ (kl * (ik + il + kl) * plusP_il) + (Complex(0,1) * plusP_ik * minusC_ji * sqr(minusP_il))/ (kl * (ik + il + kl) * minusP_ik) + (Complex(0,1) * plusP_ik * plusP_jk * minusC_ji * minusP_il * minusP_jk)/ (kl * (jk + jl + kl) * plusP_il * minusP_ik) - (Complex(0,1) * plusP_ik * plusP_kl * minusC_li * minusP_il * minusP_jk)/ (kl * (jk + jl + kl) * plusP_il * minusP_ik) + (Complex(0,2) * plusP_ij * plusP_ik * minusC_jk * minusP_jl)/ (ik * jl * plusP_il) + (Complex(0,2) * plusP_ik * minusC_lk * minusP_jl)/(ik * jl) - (Complex(0,2) * plusP_ij * plusP_jk * minusC_ji * minusP_ij * minusP_jl)/ (jl * (jk + jl + kl) * plusP_il * minusP_ik) + (Complex(0,2) * plusP_ij * plusP_kl * minusC_li * minusP_ij * minusP_jl)/ (jl * (jk + jl + kl) * plusP_il * minusP_ik) - (Complex(0,2) * plusP_jk * minusC_ji * minusP_il * minusP_jl)/ (jl * (jk + jl + kl) * minusP_ik) - (Complex(0,1) * plusP_ik * plusP_jl * minusC_ji * minusP_il * minusP_jl)/ (kl * (jk + jl + kl) * plusP_il * minusP_ik) + (Complex(0,2) * plusP_ij * plusP_kl * minusC_ji * minusP_il * minusP_jl)/ (kl * (jk + jl + kl) * plusP_il * minusP_ik) + (Complex(0,1) * plusP_ik * plusP_kl * minusC_ki * minusP_il * minusP_jl)/ (kl * (jk + jl + kl) * plusP_il * minusP_ik) + (Complex(0,2) * plusP_kl * minusC_li * minusP_il * minusP_jl)/ (jl * (jk + jl + kl) * minusP_ik) + (Complex(0,2) * plusP_kl * minusC_li * minusP_il * minusP_jl)/ (kl * (jk + jl + kl) * minusP_ik) - (Complex(0,2) * sqr(plusP_ik) * minusC_jk * minusP_kl)/ (ik * (ik + il + kl) * plusP_il) - (Complex(0,2) * sqr(plusP_ik) * minusC_jk * minusP_kl)/ (kl * (ik + il + kl) * plusP_il) + (Complex(0,2) * plusP_ik * plusP_jk * minusC_ji * minusP_ij * minusP_kl)/ (kl * (jk + jl + kl) * plusP_il * minusP_ik) - (Complex(0,2) * plusP_ik * plusP_kl * minusC_li * minusP_ij * minusP_kl)/ (kl * (jk + jl + kl) * plusP_il * minusP_ik) + (Complex(0,1) * plusP_ik * minusC_jk * minusP_il * minusP_kl)/ (kl * (ik + il + kl) * minusP_ik) - (Complex(0,1) * sqr(plusP_ik) * minusC_jl * minusP_il * minusP_kl)/ (kl * (ik + il + kl) * plusP_il * minusP_ik); } if ( g1Hel == -1 && g2Hel == 1 ) { return (Complex(0,1) * sqr(plusP_il) * minusC_ji * minusP_ik)/ (kl * (ik + il + kl) * plusP_ik) - (Complex(0,1) * plusP_il * minusC_ji * sqr(minusP_ik))/ (kl * (ik + il + kl) * minusP_il) - (Complex(0,2) * plusP_ij * plusP_jl * minusC_ji * minusP_ij * minusP_jk)/ (jl * (jk + jl + kl) * plusP_ik * minusP_il) - (Complex(0,2) * plusP_jl * minusC_ki * minusP_ij * minusP_jk)/ (jl * (jk + jl + kl) * minusP_il) - (Complex(0,2) * plusP_il * plusP_jl * minusC_li * minusP_ij * minusP_jk)/ (jl * (jk + jl + kl) * plusP_ik * minusP_il) + (Complex(0,1) * plusP_il * plusP_jk * minusC_ji * minusP_ik * minusP_jk)/ (kl * (jk + jl + kl) * plusP_ik * minusP_il) + (Complex(0,2) * plusP_ij * plusP_kl * minusC_ji * minusP_ik * minusP_jk)/ (kl * (jk + jl + kl) * plusP_ik * minusP_il) + (Complex(0,2) * plusP_kl * minusC_ki * minusP_ik * minusP_jk)/ (kl * (jk + jl + kl) * minusP_il) + (Complex(0,1) * plusP_il * plusP_kl * minusC_li * minusP_ik * minusP_jk)/ (kl * (jk + jl + kl) * plusP_ik * minusP_il) - (Complex(0,1) * plusP_il * plusP_jl * minusC_ji * minusP_ik * minusP_jl)/ (kl * (jk + jl + kl) * plusP_ik * minusP_il) - (Complex(0,1) * plusP_il * plusP_kl * minusC_ki * minusP_ik * minusP_jl)/ (kl * (jk + jl + kl) * plusP_ik * minusP_il) - (Complex(0,2) * sqr(plusP_il) * minusC_jl * minusP_kl)/ (kl * (ik + il + kl) * plusP_ik) + (Complex(0,2) * plusP_il * plusP_jl * minusC_ji * minusP_ij * minusP_kl)/ (kl * (jk + jl + kl) * plusP_ik * minusP_il) + (Complex(0,2) * plusP_il * plusP_kl * minusC_ki * minusP_ij * minusP_kl)/ (kl * (jk + jl + kl) * plusP_ik * minusP_il) - (Complex(0,1) * sqr(plusP_il) * minusC_jk * minusP_ik * minusP_kl)/ (kl * (ik + il + kl) * plusP_ik * minusP_il) + (Complex(0,1) * plusP_il * minusC_jl * minusP_ik * minusP_kl)/ (kl * (ik + il + kl) * minusP_il); } if ( g1Hel == -1 && g2Hel == -1 ) { return (Complex(0,2) * sqr(plusP_ij) * minusC_ji * minusP_jk * minusP_jl)/ (jl * (jk + jl + kl) * plusP_ik * plusP_il) + (Complex(0,2) * plusP_ij * minusC_ki * minusP_jk * minusP_jl)/ (jl * (jk + jl + kl) * plusP_il) + (Complex(0,2) * plusP_ij * minusC_li * minusP_jk * minusP_jl)/ (jl * (jk + jl + kl) * plusP_ik) - (Complex(0,2) * plusP_ij * minusC_ji * minusP_jk * minusP_kl)/ (kl * (jk + jl + kl) * plusP_il) - (Complex(0,2) * plusP_ik * minusC_ki * minusP_jk * minusP_kl)/ (kl * (jk + jl + kl) * plusP_il) - (Complex(0,2) * minusC_li * minusP_jk * minusP_kl)/ (kl * (jk + jl + kl)) - (Complex(0,2) * plusP_ij * minusC_ji * minusP_jl * minusP_kl)/ (jl * (jk + jl + kl) * plusP_ik) - (Complex(0,2) * plusP_ij * minusC_ji * minusP_jl * minusP_kl)/ (kl * (jk + jl + kl) * plusP_ik) - (Complex(0,2) * minusC_ki * minusP_jl * minusP_kl)/ (jl * (jk + jl + kl)) - (Complex(0,2) * minusC_ki * minusP_jl * minusP_kl)/ (kl * (jk + jl + kl)) - (Complex(0,2) * plusP_il * minusC_li * minusP_jl * minusP_kl)/ (jl * (jk + jl + kl) * plusP_ik) - (Complex(0,2) * plusP_il * minusC_li * minusP_jl * minusP_kl)/ (kl * (jk + jl + kl) * plusP_ik); } return czero; } const LorentzVector<Complex>& MatchboxCurrents::qqbarggLeftCurrent(const int q, const int qHel, const int qbar, const int qbarHel, const int g1, const int g1Hel, const int g2, const int g2Hel) { if ( qHel != 1 || qbarHel != 1 ) return czero; if ( getCurrent(hash<3>(1,1,q,qHel,qbar,qbarHel,g1,g1Hel,g2,g2Hel)) ) { #ifdef CHECK_MatchboxCurrents LorentzVector<Complex> ni = qqbarggGeneralLeftCurrent(q,qHel,qbar,qbarHel,g1,g1Hel,g2,g2Hel,q); LorentzVector<Complex> nj = qqbarggGeneralLeftCurrent(q,qHel,qbar,qbarHel,g1,g1Hel,g2,g2Hel,qbar); LorentzVector<Complex> nl = qqbarggGeneralLeftCurrent(q,qHel,qbar,qbarHel,g1,g1Hel,g2,g2Hel,0); LorentzVector<Complex> nlbar = qqbarggGeneralLeftCurrent(q,qHel,qbar,qbarHel,g1,g1Hel,g2,g2Hel,1); LorentzVector<Complex> fixed = qqbarggFixedLeftCurrent(q,qHel,qbar,qbarHel,g1,g1Hel,g2,g2Hel); LorentzVector<Complex> x1 = fixed - ni; LorentzVector<Complex> x2 = fixed - nj; LorentzVector<Complex> x3 = fixed - nl; LorentzVector<Complex> x4 = fixed - nlbar; double c1 = real(x1.t() * conj(x1.t())) + real(x1.x() * conj(x1.x())) + real(x1.y() * conj(x1.y())) + real(x1.z() * conj(x1.z())); double c2 = real(x2.t() * conj(x2.t())) + real(x2.x() * conj(x2.x())) + real(x2.y() * conj(x2.y())) + real(x2.z() * conj(x2.z())); double c3 = real(x3.t() * conj(x3.t())) + real(x3.x() * conj(x3.x())) + real(x3.y() * conj(x3.y())) + real(x3.z() * conj(x3.z())); double c4 = real(x4.t() * conj(x4.t())) + real(x4.x() * conj(x4.x())) + real(x4.y() * conj(x4.y())) + real(x4.z() * conj(x4.z())); ostream& ncheck = checkStream("qqbarggLeftCurrentNChoice"); ncheck << (c1 != 0. ? log10(abs(c1)) : 0.) << " " << (c2 != 0. ? log10(abs(c2)) : 0.) << " " << (c3 != 0. ? log10(abs(c3)) : 0.) << " " << (c4 != 0. ? log10(abs(c4)) : 0.) << " " << "\n" << flush; #endif cacheCurrent(qqbarggFixedLeftCurrent(q,qHel,qbar,qbarHel,g1,g1Hel,g2,g2Hel)); } #ifdef CHECK_MatchboxCurrents checkCurrent("qqbarggLeftCurrent",cachedCurrent(),momentum(q)+momentum(qbar)+momentum(g1)+momentum(g2)); #endif return cachedCurrent(); } const LorentzVector<Complex>& MatchboxCurrents::qqbarggRightCurrent(const int q, const int qHel, const int qbar, const int qbarHel, const int g1, const int g1Hel, const int g2, const int g2Hel) { if ( qHel != -1 || qbarHel != -1 ) return czero; if ( getCurrent(hash<3>(2,1,q,qHel,qbar,qbarHel,g1,g1Hel,g2,g2Hel)) ) { #ifdef CHECK_MatchboxCurrents LorentzVector<Complex> ni = qqbarggGeneralRightCurrent(q,qHel,qbar,qbarHel,g1,g1Hel,g2,g2Hel,q); LorentzVector<Complex> nj = qqbarggGeneralRightCurrent(q,qHel,qbar,qbarHel,g1,g1Hel,g2,g2Hel,qbar); LorentzVector<Complex> nl = qqbarggGeneralRightCurrent(q,qHel,qbar,qbarHel,g1,g1Hel,g2,g2Hel,0); LorentzVector<Complex> nlbar = qqbarggGeneralRightCurrent(q,qHel,qbar,qbarHel,g1,g1Hel,g2,g2Hel,1); LorentzVector<Complex> fixed = qqbarggFixedRightCurrent(q,qHel,qbar,qbarHel,g1,g1Hel,g2,g2Hel); LorentzVector<Complex> x1 = fixed - ni; LorentzVector<Complex> x2 = fixed - nj; LorentzVector<Complex> x3 = fixed - nl; LorentzVector<Complex> x4 = fixed - nlbar; double c1 = real(x1.t() * conj(x1.t())) + real(x1.x() * conj(x1.x())) + real(x1.y() * conj(x1.y())) + real(x1.z() * conj(x1.z())); double c2 = real(x2.t() * conj(x2.t())) + real(x2.x() * conj(x2.x())) + real(x2.y() * conj(x2.y())) + real(x2.z() * conj(x2.z())); double c3 = real(x3.t() * conj(x3.t())) + real(x3.x() * conj(x3.x())) + real(x3.y() * conj(x3.y())) + real(x3.z() * conj(x3.z())); double c4 = real(x4.t() * conj(x4.t())) + real(x4.x() * conj(x4.x())) + real(x4.y() * conj(x4.y())) + real(x4.z() * conj(x4.z())); ostream& ncheck = checkStream("qqbarggRightCurrentNChoice"); ncheck << (c1 != 0. ? log10(abs(c1)) : 0.) << " " << (c2 != 0. ? log10(abs(c2)) : 0.) << " " << (c3 != 0. ? log10(abs(c3)) : 0.) << " " << (c4 != 0. ? log10(abs(c4)) : 0.) << " " << "\n" << flush; #endif cacheCurrent(qqbarggFixedRightCurrent(q,qHel,qbar,qbarHel,g1,g1Hel,g2,g2Hel)); } #ifdef CHECK_MatchboxCurrents checkCurrent("qqbarggRightCurrent",cachedCurrent(),momentum(q)+momentum(qbar)+momentum(g1)+momentum(g2)); #endif return cachedCurrent(); } const LorentzVector<Complex>& MatchboxCurrents::qqbarqqbarLeftCurrent(const int q, const int qHel, const int qbar, const int qbarHel, const int k, const int kHel, const int kbar, const int kbarHel) { if ( qHel != 1 || qbarHel != 1 || abs(kHel+kbarHel) != 2 ) return czero; const int i = q; const int j = qbar; const int l = kbar; const double ik = invariant(i,k); const double il = invariant(i,l); const double jk = invariant(j,k); const double jl = invariant(j,l); const double kl = invariant(k,l); const Complex plusP_ik = plusProduct(i,k); const Complex plusP_il = plusProduct(i,l); const Complex plusP_kj = plusProduct(k,j); const Complex plusP_kl = plusProduct(k,l); const Complex plusP_lj = plusProduct(l,j); const Complex plusP_lk = plusProduct(l,k); const Complex minusP_ik = minusProduct(i,k); const Complex minusP_il = minusProduct(i,l); const Complex minusP_jk = minusProduct(j,k); const Complex minusP_jl = minusProduct(j,l); const Complex minusP_ki = minusProduct(k,i); const Complex minusP_kl = minusProduct(k,l); const Complex minusP_li = minusProduct(l,i); const Complex minusP_lk = minusProduct(l,k); const LorentzVector<Complex> & minusC_ij = minusCurrent(i,j); const LorentzVector<Complex> & minusC_ik = minusCurrent(i,k); const LorentzVector<Complex> & minusC_il = minusCurrent(i,l); const LorentzVector<Complex> & minusC_kj = minusCurrent(k,j); const LorentzVector<Complex> & minusC_lj = minusCurrent(l,j); if ( kHel == 1 && kbarHel == 1 ) { if ( getCurrent(hash<4>(1,1,q,qHel,qbar,qbarHel,k,kHel,kbar,kbarHel)) ) { cacheCurrent((Complex(0.,-2.)/kl)* ((minusP_ki * plusP_il * minusC_ij+ minusP_ik * plusP_lk * minusC_kj)/ (kl+il+ik)- (minusP_jk * plusP_lj * minusC_ij+ minusP_lk * plusP_lj * minusC_il)/ (kl+jl+jk))); } #ifdef CHECK_MatchboxCurrents checkCurrent("qqbarqqbarLeftCurrent",cachedCurrent(),momentum(q)+momentum(qbar)+momentum(k)+momentum(kbar)); #endif return cachedCurrent(); } if ( kHel == -1 && kbarHel == -1 ) { if ( getCurrent(hash<4>(1,1,q,qHel,qbar,qbarHel,k,kHel,kbar,kbarHel)) ) { cacheCurrent((Complex(0.,-2.)/kl)* ((minusP_li * plusP_ik * minusC_ij+ minusP_il * plusP_kl * minusC_lj)/ (kl+il+ik)- (minusP_jl * plusP_kj * minusC_ij+ minusP_kl * plusP_kj * minusC_ik)/ (kl+jl+jk))); } #ifdef CHECK_MatchboxCurrents checkCurrent("qqbarqqbarLeftCurrent",cachedCurrent(),momentum(q)+momentum(qbar)+momentum(k)+momentum(kbar)); #endif return cachedCurrent(); } return czero; } const LorentzVector<Complex>& MatchboxCurrents::qqbarqqbarRightCurrent(const int q, const int qHel, const int qbar, const int qbarHel, const int k, const int kHel, const int kbar, const int kbarHel) { if ( qHel != -1 || qbarHel != -1 || abs(kHel+kbarHel) != 2 ) return czero; const int i = q; const int j = qbar; const int l = kbar; const double ik = invariant(i,k); const double il = invariant(i,l); const double jk = invariant(j,k); const double jl = invariant(j,l); const double kl = invariant(k,l); const Complex plusP_ik = plusProduct(i,k); const Complex plusP_il = plusProduct(i,l); const Complex plusP_ki = plusProduct(k,i); const Complex plusP_kj = plusProduct(k,j); const Complex plusP_kl = plusProduct(k,l); const Complex plusP_li = plusProduct(l,i); const Complex plusP_lj = plusProduct(l,j); const Complex plusP_lk = plusProduct(l,k); const Complex minusP_jk = minusProduct(j,k); const Complex minusP_jl = minusProduct(j,l); const Complex minusP_ki = minusProduct(k,i); const Complex minusP_kl = minusProduct(k,l); const Complex minusP_li = minusProduct(l,i); const Complex minusP_lk = minusProduct(l,k); const LorentzVector<Complex> & minusC_ji = minusCurrent(j,i); const LorentzVector<Complex> & minusC_jk = minusCurrent(j,k); const LorentzVector<Complex> & minusC_jl = minusCurrent(j,l); const LorentzVector<Complex> & minusC_ki = minusCurrent(k,i); const LorentzVector<Complex> & minusC_li = minusCurrent(l,i); if ( kHel == 1 && kbarHel == 1 ) { if ( getCurrent(hash<4>(2,1,q,qHel,qbar,qbarHel,k,kHel,kbar,kbarHel)) ) { cacheCurrent((Complex(0.,-2.)/kl)* ((minusP_ki * plusP_il * minusC_ji+ minusP_lk * plusP_li * minusC_jl)/ (kl+il+ik)- (minusP_jk * plusP_lj * minusC_ji+ minusP_jk * plusP_lk * minusC_ki)/ (kl+jl+jk))); } #ifdef CHECK_MatchboxCurrents checkCurrent("qqbarqqbarRightCurrent",cachedCurrent(),momentum(q)+momentum(qbar)+momentum(k)+momentum(kbar)); #endif return cachedCurrent(); } if ( kHel == -1 && kbarHel == -1 ) { if ( getCurrent(hash<4>(2,1,q,qHel,qbar,qbarHel,k,kHel,kbar,kbarHel)) ) { cacheCurrent((Complex(0.,-2.)/kl)* ((minusP_li * plusP_ik * minusC_ji+ minusP_kl * plusP_ki * minusC_jk)/ (kl+il+ik)- (minusP_jl * plusP_kj * minusC_ji+ minusP_jl * plusP_kl * minusC_li)/ (kl+jl+jk))); } #ifdef CHECK_MatchboxCurrents checkCurrent("qqbarqqbarRightCurrent",cachedCurrent(),momentum(q)+momentum(qbar)+momentum(k)+momentum(kbar)); #endif return cachedCurrent(); } return czero; } // Definition of sqrt to enable calculation of the sqrt of a negative double inline Complex sqrt1 (double a) { if (a > 0.) { return Complex(sqrt(a), 0.) ;} else if (a < 0.) { return Complex(0., sqrt(abs(a))) ;} else { return Complex(0., 0.); } } // Definition of sqrt to enable calculation of the sqrt of Complex arguments inline Complex sqrt1 (Complex a) { const double real_part = sqrt(abs(a))*cos(0.5*arg(a)); const double imag_part = sqrt(abs(a))*sin(0.5*arg(a)); return Complex(real_part, imag_part) ; } // Definition of log to enable continuation of the log of a negative double inline Complex log1 (double a) { if (a < 0.) { return Complex(log(abs(a)), Constants::pi) ;} else { return Complex(log(a), 0.) ;} } // Definition of log to enable continuation of the log of a Complex argument with a negative real part inline Complex log1 (Complex a) { return Complex(log(abs(a)), arg(a)) ; } const LorentzVector<Complex>& MatchboxCurrents::qqbarLeftOneLoopCurrent(const int q, const int qHel, const int qbar, const int qbarHel) { // Note this cannot currently handle the case of one massive quark and one massless quark assert( (mass(q) == 0 && mass(qbar) == 0) || (mass(q) != 0 && mass(qbar) != 0) ); // Massless quarks if ( mass(q) == 0 && mass(qbar) == 0 ) { if ( qHel != 1 || qbarHel != 1 ) return czero; const LorentzVector<Complex>& tree = qqbarLeftCurrent(q,qHel,qbar,qbarHel); if ( getCurrent(hash<1>(1,2,q,qHel,qbar,qbarHel)) ) { cacheCurrent( 0.5*CF*( -8. - 3.*log1(-1./invariant(q,qbar)) - sqr(log1(-1./invariant(q,qbar))) ) * tree ); } #ifdef CHECK_MatchboxCurrents checkCurrent("qqbarLeftOneLoopCurrent",cachedCurrent(),momentum(q)+momentum(qbar)); #endif return cachedCurrent(); } // Massive quarks else { const LorentzVector<Complex>& momQ = momentum(q) + (sqr(mass(q))/invariant(q,qbar))*momentum(qbar); const LorentzVector<Complex>& momQbar = momentum(qbar) + (sqr(mass(qbar))/invariant(q,qbar))*momentum(q); const Complex s = (momQ+momQbar).dot(momQ+momQbar); const Complex inv12 = s - sqr(mass(q)) - sqr(mass(qbar)); // Coefficient of the left-handed born-level current const Complex coeffLeftTree = -1.0*log1(1./sqr(mass(q)))-1.0*log1(1./sqr(mass(qbar)))-4.0 + 0.5*((2.*log1(sqr(mass(q))/sqr(mass(qbar)))*(0.5*inv12+sqr(mass(q))))/(1.*inv12+sqr(mass(q))+sqr(mass(qbar)))-(2.*inv12*Li2(0.5-(0.25*inv12)/sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))-(0.5*sqr(mass(qbar)))/sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))))/sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))+(2.*inv12*Li2((0.5*(0.5*inv12+sqr(mass(q))+sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))))/sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))))/sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))-(1.*inv12*log1(-((0.5*inv12+sqr(mass(q))-1.*sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar))))/(1.*inv12+sqr(mass(q))+sqr(mass(qbar)))))*log1(-((1.*inv12+sqr(mass(q))+sqr(mass(qbar)))/(0.5*inv12+sqr(mass(q))+sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))))))/sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))+(2.*inv12*log1((2*sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar))))/(1.*inv12+sqr(mass(q))+sqr(mass(qbar))))*log1((-0.5*inv12-1.*sqr(mass(qbar))+sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar))))/(0.5*inv12+sqr(mass(q))+sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar))))))/sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))+(1.*inv12*log1((1.*inv12+sqr(mass(q))+sqr(mass(qbar)))/(0.5*inv12+sqr(mass(qbar))-1.*sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))))*log1((0.5*inv12+sqr(mass(qbar))+sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar))))/(1.*inv12+sqr(mass(q))+sqr(mass(qbar)))))/sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))+(0.5*inv12*sqr(log1(-((0.5*inv12+sqr(mass(q))-1.*sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar))))/(1.*inv12+sqr(mass(q))+sqr(mass(qbar)))))))/sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))+(0.5*inv12*sqr(log1((1.*inv12+sqr(mass(q))+sqr(mass(qbar)))/(0.5*inv12+sqr(mass(qbar))-1.*sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))))))/sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))-(0.5*inv12*sqr(log1(-((1.*inv12+sqr(mass(q))+sqr(mass(qbar)))/(0.5*inv12+sqr(mass(q))+sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar))))))))/sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))-(0.5*inv12*sqr(log1((0.5*inv12+sqr(mass(qbar))+sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar))))/(1.*inv12+sqr(mass(q))+sqr(mass(qbar))))))/sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))+(4*log1((-1.*mass(q)*mass(qbar))/(0.5*inv12+sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))))*(-0.25*(3+2*log1(1./(1.*inv12+sqr(mass(q))+sqr(mass(qbar)))))*sqr(inv12)+sqr(mass(q))*sqr(mass(qbar))-0.5*inv12*(1.+log1(1./(1.*inv12+sqr(mass(q))+sqr(mass(qbar)))))*(sqr(mass(q))+sqr(mass(qbar)))))/((1.*inv12+sqr(mass(q))+sqr(mass(qbar)))*sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar))))); // Coefficient of the right-handed born-level current const Complex coeffRightTree = (2*log1((-1.*mass(q)*mass(qbar))/(0.5*inv12+sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))))*mass(q)*mass(qbar))/sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar))); const LorentzVector<Complex>& leftTree = qqbarLeftCurrent(q,qHel,qbar,qbarHel); const LorentzVector<Complex>& rightTree = qqbarRightCurrent(q,qHel,qbar,qbarHel); if ( getCurrent(hash<1>(1,2,q,qHel,qbar,qbarHel)) ) { if ( qHel == 1 && qbarHel == 1 ) { cacheCurrent( 0.5*CF*( coeffLeftTree*leftTree + coeffRightTree*rightTree) ); } if ( qHel == 1 && qbarHel == -1 ) { // Coefficients of the left and right handed products of massive spinors const LorentzVector<Complex>& coeffLeftProd = ( (mass(qbar)*(-2.*(momQ+momQbar)*(1.*inv12+sqr(mass(q))+sqr(mass(qbar)))+log1(sqr(mass(q))/sqr(mass(qbar)))*(1.*inv12*(2.*momQ+momQbar)+(3.*momQ+2.*momQbar)*sqr(mass(q))+momQ*sqr(mass(qbar)))-(2.*log1((-1.*mass(q)*mass(qbar))/(0.5*inv12+sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))))*(0.5*(2.*momQ+momQbar)*sqr(inv12)-momQbar*sqr(mass(q))*sqr(mass(qbar))+0.5*inv12*((5.*momQ+2.*momQbar)*sqr(mass(q))+momQ*sqr(mass(qbar)))+(2.*momQ+momQbar)*sqr(sqr(mass(q)))))/sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))))/sqr(1.*inv12+sqr(mass(q))+sqr(mass(qbar))) ); const LorentzVector<Complex>& coeffRightProd = ( (mass(q)*(2.*(momQ+momQbar)*(1.*inv12+sqr(mass(q))+sqr(mass(qbar)))+log1(sqr(mass(q))/sqr(mass(qbar)))*(1.*inv12*(momQ+2.*momQbar)+momQbar*sqr(mass(q))+(2.*momQ+3.*momQbar)*sqr(mass(qbar)))+(2.*log1((-1.*mass(q)*mass(qbar))/(0.5*inv12+sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))))*(0.5*(momQ+2.*momQbar)*sqr(inv12)-momQ*sqr(mass(q))*sqr(mass(qbar))+0.5*inv12*(momQbar*sqr(mass(q))+(2.*momQ+5.*momQbar)*sqr(mass(qbar)))+(momQ+2.*momQbar)*sqr(sqr(mass(qbar)))))/sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))))/sqr(1.*inv12+sqr(mass(q))+sqr(mass(qbar))) ); const Complex leftProd = Complex(0.,1.) * minusProduct(q,qbar); const Complex rightProd = Complex(0.,1.) * mass(q)*mass(qbar)/plusProduct(q,qbar); cacheCurrent( 0.5*CF*( coeffLeftTree*leftTree + coeffRightTree*rightTree + coeffLeftProd*leftProd + coeffRightProd*rightProd ) ); } if ( qHel == -1 && qbarHel == 1 ){ // Coefficients of the left and right handed products of massive spinors const LorentzVector<Complex>& coeffLeftProd = ( (mass(qbar)*(-2.*(momQ+momQbar)*(1.*inv12+sqr(mass(q))+sqr(mass(qbar)))+log1(sqr(mass(q))/sqr(mass(qbar)))*(1.*inv12*(2.*momQ+momQbar)+(3.*momQ+2.*momQbar)*sqr(mass(q))+momQ*sqr(mass(qbar)))-(2.*log1((-1.*mass(q)*mass(qbar))/(0.5*inv12+sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))))*(0.5*(2.*momQ+momQbar)*sqr(inv12)-momQbar*sqr(mass(q))*sqr(mass(qbar))+0.5*inv12*((5.*momQ+2.*momQbar)*sqr(mass(q))+momQ*sqr(mass(qbar)))+(2.*momQ+momQbar)*sqr(sqr(mass(q)))))/sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))))/sqr(1.*inv12+sqr(mass(q))+sqr(mass(qbar))) ); const LorentzVector<Complex>& coeffRightProd = ( (mass(q)*(2.*(momQ+momQbar)*(1.*inv12+sqr(mass(q))+sqr(mass(qbar)))+log1(sqr(mass(q))/sqr(mass(qbar)))*(1.*inv12*(momQ+2.*momQbar)+momQbar*sqr(mass(q))+(2.*momQ+3.*momQbar)*sqr(mass(qbar)))+(2.*log1((-1.*mass(q)*mass(qbar))/(0.5*inv12+sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))))*(0.5*(momQ+2.*momQbar)*sqr(inv12)-momQ*sqr(mass(q))*sqr(mass(qbar))+0.5*inv12*(momQbar*sqr(mass(q))+(2.*momQ+5.*momQbar)*sqr(mass(qbar)))+(momQ+2.*momQbar)*sqr(sqr(mass(qbar)))))/sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))))/sqr(1.*inv12+sqr(mass(q))+sqr(mass(qbar))) ); const Complex leftProd = Complex(0.,1.) * mass(q)*mass(qbar)/minusProduct(q,qbar); const Complex rightProd = Complex(0.,1.) * plusProduct(q,qbar); cacheCurrent( 0.5*CF*( coeffLeftTree*leftTree + coeffRightTree*rightTree + coeffLeftProd*leftProd + coeffRightProd*rightProd ) ); } if ( qHel == -1 && qbarHel == -1 ){ cacheCurrent( 0.5*CF*( coeffLeftTree*leftTree + coeffRightTree*rightTree ) ); } } #ifdef CHECK_MatchboxCurrents checkCurrent("qqbarLeftOneLoopCurrent",cachedCurrent(),momentum(q)+momentum(qbar)); #endif return cachedCurrent(); } } const LorentzVector<Complex>& MatchboxCurrents::qqbarRightOneLoopCurrent(const int q, const int qHel, const int qbar, const int qbarHel) { // Note this cannot currently handle the case of one massive quark and one massless quark assert( (mass(q) == 0 && mass(qbar) == 0) || (mass(q) != 0 && mass(qbar) != 0) ); // Massless quarks if ( mass(q) == 0 && mass(qbar) ==0 ) { if ( qHel != -1 || qbarHel != -1 ) return czero; const LorentzVector<Complex>& tree = qqbarRightCurrent(q,qHel,qbar,qbarHel); if ( getCurrent(hash<1>(2,2,q,qHel,qbar,qbarHel)) ) { cacheCurrent( 0.5*CF*( -8. - 3.*log1(-1./invariant(q,qbar)) - sqr(log1(-1./invariant(q,qbar))) ) * tree ); } #ifdef CHECK_MatchboxCurrents checkCurrent("qqbarRightOneLoopCurrent",cachedCurrent(),momentum(q)+momentum(qbar)); #endif return cachedCurrent(); } // Massive quarks else { const LorentzVector<Complex>& momQ = momentum(q) + (sqr(mass(q))/invariant(q,qbar))*momentum(qbar); const LorentzVector<Complex>& momQbar = momentum(qbar) + (sqr(mass(qbar))/invariant(q,qbar))*momentum(q); const Complex s = (momQ+momQbar).dot(momQ+momQbar); const Complex inv12 = s - sqr(mass(q)) - sqr(mass(qbar)); // Coefficient of the right-handed born-level current const Complex coeffRightTree = -1.0*log1(1./sqr(mass(q)))-1.0*log1(1./sqr(mass(qbar)))-4.0 + 0.5*((2.*log1(sqr(mass(q))/sqr(mass(qbar)))*(0.5*inv12+sqr(mass(q))))/(1.*inv12+sqr(mass(q))+sqr(mass(qbar)))-(2.*inv12*Li2(0.5-(0.25*inv12)/sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))-(0.5*sqr(mass(qbar)))/sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))))/sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))+(2.*inv12*Li2((0.5*(0.5*inv12+sqr(mass(q))+sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))))/sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))))/sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))-(1.*inv12*log1(-((0.5*inv12+sqr(mass(q))-1.*sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar))))/(1.*inv12+sqr(mass(q))+sqr(mass(qbar)))))*log1(-((1.*inv12+sqr(mass(q))+sqr(mass(qbar)))/(0.5*inv12+sqr(mass(q))+sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))))))/sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))+(2.*inv12*log1((2*sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar))))/(1.*inv12+sqr(mass(q))+sqr(mass(qbar))))*log1((-0.5*inv12-1.*sqr(mass(qbar))+sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar))))/(0.5*inv12+sqr(mass(q))+sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar))))))/sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))+(1.*inv12*log1((1.*inv12+sqr(mass(q))+sqr(mass(qbar)))/(0.5*inv12+sqr(mass(qbar))-1.*sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))))*log1((0.5*inv12+sqr(mass(qbar))+sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar))))/(1.*inv12+sqr(mass(q))+sqr(mass(qbar)))))/sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))+(0.5*inv12*sqr(log1(-((0.5*inv12+sqr(mass(q))-1.*sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar))))/(1.*inv12+sqr(mass(q))+sqr(mass(qbar)))))))/sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))+(0.5*inv12*sqr(log1((1.*inv12+sqr(mass(q))+sqr(mass(qbar)))/(0.5*inv12+sqr(mass(qbar))-1.*sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))))))/sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))-(0.5*inv12*sqr(log1(-((1.*inv12+sqr(mass(q))+sqr(mass(qbar)))/(0.5*inv12+sqr(mass(q))+sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar))))))))/sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))-(0.5*inv12*sqr(log1((0.5*inv12+sqr(mass(qbar))+sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar))))/(1.*inv12+sqr(mass(q))+sqr(mass(qbar))))))/sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))+(4*log1((-1.*mass(q)*mass(qbar))/(0.5*inv12+sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))))*(-0.25*(3+2*log1(1./(1.*inv12+sqr(mass(q))+sqr(mass(qbar)))))*sqr(inv12)+sqr(mass(q))*sqr(mass(qbar))-0.5*inv12*(1.+log1(1./(1.*inv12+sqr(mass(q))+sqr(mass(qbar)))))*(sqr(mass(q))+sqr(mass(qbar)))))/((1.*inv12+sqr(mass(q))+sqr(mass(qbar)))*sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar))))); // Coefficient of the left-handed born-level current const Complex coeffLeftTree = (2*log1((-1.*mass(q)*mass(qbar))/(0.5*inv12+sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))))*mass(q)*mass(qbar))/sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar))); const LorentzVector<Complex>& leftTree = qqbarLeftCurrent(q,qHel,qbar,qbarHel); const LorentzVector<Complex>& rightTree = qqbarRightCurrent(q,qHel,qbar,qbarHel); if ( getCurrent(hash<1>(2,2,q,qHel,qbar,qbarHel)) ) { if ( qHel == 1 && qbarHel == 1 ) { cacheCurrent( 0.5*CF*( coeffLeftTree*leftTree + coeffRightTree*rightTree ) ); } if ( qHel == 1 && qbarHel == -1 ) { // Coefficients of the right and left handed products of massive spinors const LorentzVector<Complex>& coeffRightProd = ( (mass(qbar)*(-2.*(momQ+momQbar)*(1.*inv12+sqr(mass(q))+sqr(mass(qbar)))+log1(sqr(mass(q))/sqr(mass(qbar)))*(1.*inv12*(2.*momQ+momQbar)+(3.*momQ+2.*momQbar)*sqr(mass(q))+momQ*sqr(mass(qbar)))-(2.*log1((-1.*mass(q)*mass(qbar))/(0.5*inv12+sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))))*(0.5*(2.*momQ+momQbar)*sqr(inv12)-momQbar*sqr(mass(q))*sqr(mass(qbar))+0.5*inv12*((5.*momQ+2.*momQbar)*sqr(mass(q))+momQ*sqr(mass(qbar)))+(2.*momQ+momQbar)*sqr(sqr(mass(q)))))/sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))))/sqr(1.*inv12+sqr(mass(q))+sqr(mass(qbar))) ); const LorentzVector<Complex>& coeffLeftProd = ( (mass(q)*(2.*(momQ+momQbar)*(1.*inv12+sqr(mass(q))+sqr(mass(qbar)))+log1(sqr(mass(q))/sqr(mass(qbar)))*(1.*inv12*(momQ+2.*momQbar)+momQbar*sqr(mass(q))+(2.*momQ+3.*momQbar)*sqr(mass(qbar)))+(2.*log1((-1.*mass(q)*mass(qbar))/(0.5*inv12+sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))))*(0.5*(momQ+2.*momQbar)*sqr(inv12)-momQ*sqr(mass(q))*sqr(mass(qbar))+0.5*inv12*(momQbar*sqr(mass(q))+(2.*momQ+5.*momQbar)*sqr(mass(qbar)))+(momQ+2.*momQbar)*sqr(sqr(mass(qbar)))))/sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))))/sqr(1.*inv12+sqr(mass(q))+sqr(mass(qbar))) ); const Complex leftProd = Complex(0.,1.) * minusProduct(q,qbar); const Complex rightProd = Complex(0.,1.) * mass(q)*mass(qbar)/plusProduct(q,qbar); cacheCurrent( 0.5*CF*( coeffLeftTree*leftTree + coeffRightTree*rightTree + coeffLeftProd*leftProd + coeffRightProd*rightProd ) ); } if ( qHel == -1 && qbarHel == 1 ){ // Coefficients of the right and left handed products of massive spinors const LorentzVector<Complex>& coeffRightProd = ( (mass(qbar)*(-2.*(momQ+momQbar)*(1.*inv12+sqr(mass(q))+sqr(mass(qbar)))+log1(sqr(mass(q))/sqr(mass(qbar)))*(1.*inv12*(2.*momQ+momQbar)+(3.*momQ+2.*momQbar)*sqr(mass(q))+momQ*sqr(mass(qbar)))-(2.*log1((-1.*mass(q)*mass(qbar))/(0.5*inv12+sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))))*(0.5*(2.*momQ+momQbar)*sqr(inv12)-momQbar*sqr(mass(q))*sqr(mass(qbar))+0.5*inv12*((5.*momQ+2.*momQbar)*sqr(mass(q))+momQ*sqr(mass(qbar)))+(2.*momQ+momQbar)*sqr(sqr(mass(q)))))/sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))))/sqr(1.*inv12+sqr(mass(q))+sqr(mass(qbar))) ); const LorentzVector<Complex>& coeffLeftProd = ( (mass(q)*(2.*(momQ+momQbar)*(1.*inv12+sqr(mass(q))+sqr(mass(qbar)))+log1(sqr(mass(q))/sqr(mass(qbar)))*(1.*inv12*(momQ+2.*momQbar)+momQbar*sqr(mass(q))+(2.*momQ+3.*momQbar)*sqr(mass(qbar)))+(2.*log1((-1.*mass(q)*mass(qbar))/(0.5*inv12+sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))))*(0.5*(momQ+2.*momQbar)*sqr(inv12)-momQ*sqr(mass(q))*sqr(mass(qbar))+0.5*inv12*(momQbar*sqr(mass(q))+(2.*momQ+5.*momQbar)*sqr(mass(qbar)))+(momQ+2.*momQbar)*sqr(sqr(mass(qbar)))))/sqrt1(0.25*sqr(inv12)-sqr(mass(q))*sqr(mass(qbar)))))/sqr(1.*inv12+sqr(mass(q))+sqr(mass(qbar))) ); const Complex leftProd = Complex(0.,1.) * mass(q)*mass(qbar)/minusProduct(q,qbar); const Complex rightProd = Complex(0.,1.) * plusProduct(q,qbar); cacheCurrent( 0.5*CF*( coeffLeftTree*leftTree + coeffRightTree*rightTree + coeffLeftProd*leftProd + coeffRightProd*rightProd ) ); } if ( qHel == -1 && qbarHel == -1 ){ cacheCurrent( 0.5*CF*( coeffLeftTree*leftTree + coeffRightTree*rightTree ) ); } } #ifdef CHECK_MatchboxCurrents checkCurrent("qqbarRightOneLoopCurrent",cachedCurrent(),momentum(q)+momentum(qbar)); #endif return cachedCurrent(); } } // ln(s(a+i0)) inline Complex log(double s, double a) { return s < 0. ? Complex(log(abs(a)),-pi * theta(a)) : Complex(log(abs(a)),pi * theta(-a)); } // ln(s(a+i0)/(b+i0)) inline Complex log(double s, double a, double b) { return s < 0. ? Complex(log(abs(a/b)),-pi * theta(a/b) * sign(b-a)) : Complex(log(abs(a/b)),pi * theta(-a/b) * sign(b-a)); } // Li2(-(a+i0)/(b+i0)) inline Complex Li2(double a, double b) { if ( -a/b < 1. ) return Complex(Herwig::Math::ReLi2(-a/b),0.0); return Complex(Herwig::Math::ReLi2(-a/b),-pi * log(-a/b) * sign(b-a)); } Complex MatchboxCurrents::box6(const int i, const int j, const int k) { const double sij = invariant(i,j); const double sik = invariant(i,k); const double sjk = invariant(j,k); return -( Li2(sik+sjk,sij) + Li2(sik+sij,sjk) + 0.5 * csqr(log(1.,sij,sjk)) + sqr(pi)/6. )/8.; } void MatchboxCurrents::qqbargLoopCoefficients(const int i, const int j, const int k) { // use a dummy cache entry to check if we need to get some work done static Complex dummy; if ( getAmplitude(hash<5>(1,2,i,0,j,0,k,0)) ) { dummy = 0.; cacheAmplitude(dummy); cachedAmplitude(); } else { cachedAmplitude(); return; } qqbargLoops.resize(13); // get the transcendentals const double ij = invariant(i,j); const double ij2 = sqr(ij); const double ij3 = ij2 * ij; const double ik = invariant(i,k); const double ik2 = sqr(ik); //const double ik3 = ik2 * ik; const double jk = invariant(j,k); const double jk2 = sqr(jk); const double jk3 = jk2 * jk; const double ij_ik = ij + ik; const double ij_ik_2 = sqr(ij_ik); const double ij_jk = ij + jk; const double ij_jk_2 = sqr(ij_jk); const double ik_jk = ik + jk; const double ik_jk_2 = sqr(ik_jk); const double Q2 = ij + ik + jk; // checked for LEP that virtuals + I operator are mu2 independent //double xmu2 = 10 * GeV2/sqr(amplitudeScale()); const double xmu2 = 1.; const Complex Lijk = log(1.,-xmu2/Q2); const Complex Lij = log(1.,Q2,ij); const Complex Lik = log(1.,Q2,ik); const Complex Ljk = log(1.,Q2,jk); const Complex Box6ijk = box6(i,j,k); const Complex Box6ikj = box6(i,k,j); const Complex Box6jik = box6(j,i,k); // get the coefficients qqbargLoops[0] = ( (2 * CF * ij2) - (32 * CA * Box6ijk * ij2) + (64 * CF * Box6ijk * ij2) - (8 * CA * Box6jik * ij2) + (16 * CF * Box6jik * ij2) + (2 * CA * Lij * ij2) - (4 * CF * Lij * ij2) - (CA * Lik * ij2) - (2 * CF * Lik * ij2) - (4 * CF * Ljk * ij2) - (16 * CA * Box6ijk * ij3) / ik + (32 * CF * Box6ijk * ij3) / ik + (CA * Lij * ij3) / ik - (2 * CF * Lij * ij3) / ik + (2 * CF * ij * ik) - (16 * CA * Box6ijk * ij * ik) + (32 * CF * Box6ijk * ij * ik) - (16 * CA * Box6jik * ij * ik) + (32 * CF * Box6jik * ij * ik) + (CA * Lij * ij * ik) - (2 * CF * Lij * ij * ik) - (2 * CA * Lik * ij * ik) - (4 * CF * Lik * ij * ik) - (4 * CF * Ljk * ij * ik) - (8 * CA * Box6jik * ik2) + (16 * CF * Box6jik * ik2) - (CA * Lik * ik2) - (2 * CF * Lik * ik2) - (8 * CA * Box6jik * ij3) / jk + (16 * CF * Box6jik * ij3) / jk - (16 * CA * Box6jik * ij2 * ik) / jk + (32 * CF * Box6jik * ij2 * ik) / jk - (8 * CA * Box6jik * ij * ik2) / jk + (16 * CF * Box6jik * ij * ik2) / jk + (2 * CF * ij * jk) - (40 * CA * Box6ijk * ij * jk) + (80 * CF * Box6ijk * ij * jk) + (24 * CA * Box6ikj * ij * jk) + (2 * CA * Lij * ij * jk) - (4 * CF * Lij * ij * jk) - (CA * Lik * ij * jk) - (4 * CF * Lik * ij * jk) - (12 * CF * Ljk * ij * jk) - (8 * CA * Box6ijk * ij3 * jk) / ik2 + (16 * CF * Box6ijk * ij3 * jk) / ik2 - (32 * CA * Box6ijk * ij2 * jk) / ik + (64 * CF * Box6ijk * ij2 * jk) / ik + (CA * Lij * ij2 * jk) / ik - (2 * CF * Lij * ij2 * jk) / ik + (CA * Ljk * ij2 * jk) / ik - (2 * CF * Ljk * ij2 * jk) / ik + (2 * CF * ik * jk) - (16 * CA * Box6ijk * ik * jk) + (32 * CF * Box6ijk * ik * jk) + (48 * CA * Box6ikj * ik * jk) + (CA * Lij * ik * jk) - (2 * CF * Lij * ik * jk) - (2 * CA * Lik * ik * jk) - (8 * CF * Lik * ik * jk) - (CA * Ljk * ik * jk) - (8 * CF * Ljk * ik * jk) + (24 * CA * Box6ikj * ik2 * jk) / ij - (CA * Lik * ik2 * jk) / ij - (4 * CF * Lik * ik2 * jk) / ij - (8 * CA * Box6ijk * jk2) + (16 * CF * Box6ijk * jk2) + (24 * CA * Box6ikj * jk2) - (8 * CF * Ljk * jk2) - (8 * CA * Box6ijk * ij2 * jk2) / ik2 + (16 * CF * Box6ijk * ij2 * jk2) / ik2 - (16 * CA * Box6ijk * ij * jk2) / ik + (32 * CF * Box6ijk * ij * jk2) / ik + (CA * Ljk * ij * jk2) / ik - (2 * CF * Ljk * ij * jk2) / ik + (48 * CA * Box6ikj * ik * jk2) / ij - (CA * Ljk * ik * jk2) / ij - (4 * CF * Ljk * ik * jk2) / ij + (24 * CA * Box6ikj * ik2 * jk2) / ij2 ) / (ij_ik_2 * ij_jk); qqbargLoops[1] = ( (-2 * CF * ij2) + (8 * CA * Box6ijk * ij2) - (16 * CF * Box6ijk * ij2) + (32 * CA * Box6jik * ij2) - (64 * CF * Box6jik * ij2) - (2 * CA * Lij * ij2) + (4 * CF * Lij * ij2) + (4 * CF * Lik * ij2) + (CA * Ljk * ij2) + (2 * CF * Ljk * ij2) + (8 * CA * Box6ijk * ij3) / ik - (16 * CF * Box6ijk * ij3) / ik - (2 * CF * ij * ik) - (24 * CA * Box6ikj * ij * ik) + (40 * CA * Box6jik * ij * ik) - (80 * CF * Box6jik * ij * ik) - (2 * CA * Lij * ij * ik) + (4 * CF * Lij * ij * ik) + (12 * CF * Lik * ij * ik) + (CA * Ljk * ij * ik) + (4 * CF * Ljk * ij * ik) - (24 * CA * Box6ikj * ik2) + (8 * CA * Box6jik * ik2) - (16 * CF * Box6jik * ik2) + (8 * CF * Lik * ik2) + (8 * CA * Box6jik * ij3 * ik) / jk2 - (16 * CF * Box6jik * ij3 * ik) / jk2 + (8 * CA * Box6jik * ij2 * ik2) / jk2 - (16 * CF * Box6jik * ij2 * ik2) / jk2 + (16 * CA * Box6jik * ij3) / jk - (32 * CF * Box6jik * ij3) / jk - (CA * Lij * ij3) / jk + (2 * CF * Lij * ij3) / jk + (32 * CA * Box6jik * ij2 * ik) / jk - (64 * CF * Box6jik * ij2 * ik) / jk - (CA * Lij * ij2 * ik) / jk + (2 * CF * Lij * ij2 * ik) / jk - (CA * Lik * ij2 * ik) / jk + (2 * CF * Lik * ij2 * ik) / jk + (16 * CA * Box6jik * ij * ik2) / jk - (32 * CF * Box6jik * ij * ik2) / jk - (CA * Lik * ij * ik2) / jk + (2 * CF * Lik * ij * ik2) / jk - (2 * CF * ij * jk) + (16 * CA * Box6ijk * ij * jk) - (32 * CF * Box6ijk * ij * jk) + (16 * CA * Box6jik * ij * jk) - (32 * CF * Box6jik * ij * jk) - (CA * Lij * ij * jk) + (2 * CF * Lij * ij * jk) + (4 * CF * Lik * ij * jk) + (2 * CA * Ljk * ij * jk) + (4 * CF * Ljk * ij * jk) + (16 * CA * Box6ijk * ij2 * jk) / ik - (32 * CF * Box6ijk * ij2 * jk) / ik - (2 * CF * ik * jk) - (48 * CA * Box6ikj * ik * jk) + (16 * CA * Box6jik * ik * jk) - (32 * CF * Box6jik * ik * jk) - (CA * Lij * ik * jk) + (2 * CF * Lij * ik * jk) + (CA * Lik * ik * jk) + (8 * CF * Lik * ik * jk) + (2 * CA * Ljk * ik * jk) + (8 * CF * Ljk * ik * jk) - (48 * CA * Box6ikj * ik2 * jk) / ij + (CA * Lik * ik2 * jk) / ij + (4 * CF * Lik * ik2 * jk) / ij + (8 * CA * Box6ijk * jk2) - (16 * CF * Box6ijk * jk2) + (CA * Ljk * jk2) + (2 * CF * Ljk * jk2) + (8 * CA * Box6ijk * ij * jk2) / ik - (16 * CF * Box6ijk * ij * jk2) / ik - (24 * CA * Box6ikj * ik * jk2) / ij + (CA * Ljk * ik * jk2) / ij + (4 * CF * Ljk * ik * jk2) / ij - (24 * CA * Box6ikj * ik2 * jk2) / ij2 ) / (ij_ik * ij_jk_2); qqbargLoops[2] = -3 * CF * Lijk + ( (-4 * CA * Box6jik * ij3) + (8 * CF * Box6jik * ij3) + (CA * Lij * ij3) / 2. - (CF * Lij * ij3) + (CA * ij2 * ik) - (9 * CF * ij2 * ik) + (8 * CA * Box6ijk * ij2 * ik) - (16 * CF * Box6ijk * ij2 * ik) - (8 * CA * Box6ikj * ij2 * ik) - (8 * CA * Box6jik * ij2 * ik) + (16 * CF * Box6jik * ij2 * ik) + (CA * Lij * ij2 * ik) / 2. - (CF * Lij * ij2 * ik) + (CA * Lik * ij2 * ik) / 2. - (CF * Lik * ij2 * ik) + (CA * ij * ik2) - (9 * CF * ij * ik2) + (8 * CA * Box6ijk * ij * ik2) - (16 * CF * Box6ijk * ij * ik2) - (8 * CA * Box6ikj * ij * ik2) - (4 * CA * Box6jik * ij * ik2) + (8 * CF * Box6jik * ij * ik2) + (CA * Lik * ij * ik2) / 2. - (CF * Lik * ij * ik2) - (4 * CA * Box6jik * ij3 * ik) / jk + (8 * CF * Box6jik * ij3 * ik) / jk - (4 * CA * Box6jik * ij2 * ik2) / jk + (8 * CF * Box6jik * ij2 * ik2) / jk + (CA * ij2 * jk) - (9 * CF * ij2 * jk) + (12 * CA * Box6ijk * ij2 * jk) - (24 * CF * Box6ijk * ij2 * jk) - (8 * CA * Box6ikj * ij2 * jk) - (4 * CA * Box6jik * ij2 * jk) + (8 * CF * Box6jik * ij2 * jk) + (CA * Lik * ij2 * jk) / 2. - (CF * Lik * ij2 * jk) - (CA * Ljk * ij2 * jk) / 2. + (CF * Ljk * ij2 * jk) + (4 * CA * Box6ijk * ij3 * jk) / ik - (8 * CF * Box6ijk * ij3 * jk) / ik - (CA * Lij * ij3 * jk) / (2. * ik) + (CF * Lij * ij3 * jk) / ik + (2 * CA * ij * ik * jk) - (18 * CF * ij * ik * jk) + (16 * CA * Box6ijk * ij * ik * jk) - (32 * CF * Box6ijk * ij * ik * jk) - (28 * CA * Box6ikj * ij * ik * jk) - (4 * CA * Box6jik * ij * ik * jk) + (8 * CF * Box6jik * ij * ik * jk) + (CA * Lij * ij * ik * jk) / 2. - (CF * Lij * ij * ik * jk) + (CA * Lik * ij * ik * jk) - (CF * Lik * ij * ik * jk) - (CA * Ljk * ij * ik * jk) / 2. + (3 * CF * Ljk * ij * ik * jk) + (CA * ik2 * jk) - (9 * CF * ik2 * jk) + (8 * CA * Box6ijk * ik2 * jk) - (16 * CF * Box6ijk * ik2 * jk) - (20 * CA * Box6ikj * ik2 * jk) + (CA * Lik * ik2 * jk) / 2. + (CA * ij * jk2) - (9 * CF * ij * jk2) + (12 * CA * Box6ijk * ij * jk2) - (24 * CF * Box6ijk * ij * jk2) - (20 * CA * Box6ikj * ij * jk2) - (CA * Lij * ij * jk2) / 2. + (CF * Lij * ij * jk2) + (CA * Lik * ij * jk2) / 2. - (CA * Ljk * ij * jk2) + (4 * CF * Ljk * ij * jk2) + (4 * CA * Box6ijk * ij3 * jk2) / ik2 - (8 * CF * Box6ijk * ij3 * jk2) / ik2 + (8 * CA * Box6ijk * ij2 * jk2) / ik - (16 * CF * Box6ijk * ij2 * jk2) / ik - (CA * Lij * ij2 * jk2) / (2. * ik) + (CF * Lij * ij2 * jk2) / ik - (CA * Ljk * ij2 * jk2) / (2. * ik) + (CF * Ljk * ij2 * jk2) / ik + (CA * ik * jk2) - (9 * CF * ik * jk2) + (8 * CA * Box6ijk * ik * jk2) - (16 * CF * Box6ijk * ik * jk2) - (32 * CA * Box6ikj * ik * jk2) + (CA * Lik * ik * jk2) / 2. - (CA * Ljk * ik * jk2) / 2. + (3 * CF * Ljk * ik * jk2) - (12 * CA * Box6ikj * ik2 * jk2) / ij - (12 * CA * Box6ikj * jk3) - (CA * Ljk * jk3) / 2. + (3 * CF * Ljk * jk3) + (4 * CA * Box6ijk * ij2 * jk3) / ik2 - (8 * CF * Box6ijk * ij2 * jk3) / ik2 + (4 * CA * Box6ijk * ij * jk3) / ik - (8 * CF * Box6ijk * ij * jk3) / ik - (CA * Ljk * ij * jk3) / (2. * ik) + (CF * Ljk * ij * jk3) / ik - (12 * CA * Box6ikj * ik * jk3) / ij ) / (ij_ik * ij_jk * ik_jk); qqbargLoops[3] = 3 * CF * Lijk + ( (8 * CF * ij2) - (8 * CA * Box6ijk * ij2) + (16 * CF * Box6ijk * ij2) + (8 * CA * Box6ikj * ij2) - (8 * CA * Box6jik * ij2) + (16 * CF * Box6jik * ij2) + (CA * Lij * ij2) / 2. - (CF * Lij * ij2) + (8 * CF * ij * ik) - (8 * CA * Box6ijk * ij * ik) + (16 * CF * Box6ijk * ij * ik) + (8 * CA * Box6ikj * ij * ik) - (12 * CA * Box6jik * ij * ik) + (24 * CF * Box6jik * ij * ik) + (CA * Lij * ij * ik) / 2. - (CF * Lij * ij * ik) + (CA * Lik * ij * ik) / 2. - (CF * Lik * ij * ik) - (4 * CA * Box6jik * ik2) + (8 * CF * Box6jik * ik2) + (CA * Lik * ik2) / 2. - (CF * Lik * ik2) - (4 * CA * Box6jik * ij2 * ik) / jk + (8 * CF * Box6jik * ij2 * ik) / jk - (4 * CA * Box6jik * ij * ik2) / jk + (8 * CF * Box6jik * ij * ik2) / jk + (8 * CF * ij * jk) - (12 * CA * Box6ijk * ij * jk) + (24 * CF * Box6ijk * ij * jk) + (8 * CA * Box6ikj * ij * jk) - (8 * CA * Box6jik * ij * jk) + (16 * CF * Box6jik * ij * jk) + (CA * Lij * ij * jk) / 2. - (CF * Lij * ij * jk) + (CA * Ljk * ij * jk) / 2. - (CF * Ljk * ij * jk) - (4 * CA * Box6ijk * ij2 * jk) / ik + (8 * CF * Box6ijk * ij2 * jk) / ik + (8 * CF * ik * jk) - (8 * CA * Box6ijk * ik * jk) + (16 * CF * Box6ijk * ik * jk) - (4 * CA * Box6ikj * ik * jk) - (8 * CA * Box6jik * ik * jk) + (16 * CF * Box6jik * ik * jk) + (CA * Lij * ik * jk) / 2. - (CF * Lij * ik * jk) + (CA * Lik * ik * jk) / 2. + (2 * CF * Lik * ik * jk) + (CA * Ljk * ik * jk) / 2. + (2 * CF * Ljk * ik * jk) - (12 * CA * Box6ikj * ik2 * jk) / ij + (CA * Lik * ik2 * jk) / (2. * ij) + (2 * CF * Lik * ik2 * jk) / ij - (4 * CA * Box6ijk * jk2) + (8 * CF * Box6ijk * jk2) + (CA * Ljk * jk2) / 2. - (CF * Ljk * jk2) - (4 * CA * Box6ijk * ij * jk2) / ik + (8 * CF * Box6ijk * ij * jk2) / ik - (12 * CA * Box6ikj * ik * jk2) / ij + (CA * Ljk * ik * jk2) / (2. * ij) + (2 * CF * Ljk * ik * jk2) / ij - (12 * CA * Box6ikj * ik2 * jk2) / ij2 ) / (ij_ik * ij_jk); qqbargLoops[4] = -3 * CF * Lijk + ( (-8 * CF * ij2) + (8 * CA * Box6ijk * ij2) - (16 * CF * Box6ijk * ij2) - (8 * CA * Box6ikj * ij2) + (8 * CA * Box6jik * ij2) - (16 * CF * Box6jik * ij2) - (CA * Lij * ij2) / 2. + (CF * Lij * ij2) - (8 * CF * ij * ik) + (8 * CA * Box6ijk * ij * ik) - (16 * CF * Box6ijk * ij * ik) - (8 * CA * Box6ikj * ij * ik) + (12 * CA * Box6jik * ij * ik) - (24 * CF * Box6jik * ij * ik) - (CA * Lij * ij * ik) / 2. + (CF * Lij * ij * ik) - (CA * Lik * ij * ik) / 2. + (CF * Lik * ij * ik) + (4 * CA * Box6jik * ik2) - (8 * CF * Box6jik * ik2) - (CA * Lik * ik2) / 2. + (CF * Lik * ik2) + (4 * CA * Box6jik * ij2 * ik) / jk - (8 * CF * Box6jik * ij2 * ik) / jk + (4 * CA * Box6jik * ij * ik2) / jk - (8 * CF * Box6jik * ij * ik2) / jk - (8 * CF * ij * jk) + (12 * CA * Box6ijk * ij * jk) - (24 * CF * Box6ijk * ij * jk) - (8 * CA * Box6ikj * ij * jk) + (8 * CA * Box6jik * ij * jk) - (16 * CF * Box6jik * ij * jk) - (CA * Lij * ij * jk) / 2. + (CF * Lij * ij * jk) - (CA * Ljk * ij * jk) / 2. + (CF * Ljk * ij * jk) + (4 * CA * Box6ijk * ij2 * jk) / ik - (8 * CF * Box6ijk * ij2 * jk) / ik - (8 * CF * ik * jk) + (8 * CA * Box6ijk * ik * jk) - (16 * CF * Box6ijk * ik * jk) + (4 * CA * Box6ikj * ik * jk) + (8 * CA * Box6jik * ik * jk) - (16 * CF * Box6jik * ik * jk) - (CA * Lij * ik * jk) / 2. + (CF * Lij * ik * jk) - (CA * Lik * ik * jk) / 2. - (2 * CF * Lik * ik * jk) - (CA * Ljk * ik * jk) / 2. - (2 * CF * Ljk * ik * jk) + (12 * CA * Box6ikj * ik2 * jk) / ij - (CA * Lik * ik2 * jk) / (2. * ij) - (2 * CF * Lik * ik2 * jk) / ij + (4 * CA * Box6ijk * jk2) - (8 * CF * Box6ijk * jk2) - (CA * Ljk * jk2) / 2. + (CF * Ljk * jk2) + (4 * CA * Box6ijk * ij * jk2) / ik - (8 * CF * Box6ijk * ij * jk2) / ik + (12 * CA * Box6ikj * ik * jk2) / ij - (CA * Ljk * ik * jk2) / (2. * ij) - (2 * CF * Ljk * ik * jk2) / ij + (12 * CA * Box6ikj * ik2 * jk2) / ij2 ) / (ij_ik * ij_jk); qqbargLoops[5] = 3 * CF * Lijk + ( (-4 * CA * Box6jik * ij2) + (8 * CF * Box6jik * ij2) + (CA * Lij * ij2) / 2. - (CF * Lij * ij2) - (CA * ij * ik) + (9 * CF * ij * ik) - (8 * CA * Box6ijk * ij * ik) + (16 * CF * Box6ijk * ij * ik) + (8 * CA * Box6ikj * ij * ik) - (4 * CA * Box6jik * ij * ik) + (8 * CF * Box6jik * ij * ik) + (CA * Lij * ij * ik) / 2. - (CF * Lij * ij * ik) + (CA * Lik * ij * ik) / 2. - (CF * Lik * ij * ik) - (CA * ik2) + (9 * CF * ik2) - (8 * CA * Box6ijk * ik2) + (16 * CF * Box6ijk * ik2) + (8 * CA * Box6ikj * ik2) + (CA * Lik * ik2) / 2. - (CF * Lik * ik2) - (4 * CA * Box6jik * ij2 * ik) / jk + (8 * CF * Box6jik * ij2 * ik) / jk - (4 * CA * Box6jik * ij * ik2) / jk + (8 * CF * Box6jik * ij * ik2) / jk - (CA * ij * jk) + (9 * CF * ij * jk) - (4 * CA * Box6ijk * ij * jk) + (8 * CF * Box6ijk * ij * jk) + (8 * CA * Box6ikj * ij * jk) - (CA * Lij * ij * jk) / 2. + (CF * Lij * ij * jk) + (CA * Lik * ij * jk) / 2. - (CF * Lik * ij * jk) - (CA * Ljk * ij * jk) / 2. + (CF * Ljk * ij * jk) + (4 * CA * Box6ijk * ij2 * jk) / ik - (8 * CF * Box6ijk * ij2 * jk) / ik - (CA * Lij * ij2 * jk) / (2. * ik) + (CF * Lij * ij2 * jk) / ik - (CA * ik * jk) + (9 * CF * ik * jk) - (8 * CA * Box6ijk * ik * jk) + (16 * CF * Box6ijk * ik * jk) + (20 * CA * Box6ikj * ik * jk) + (CA * Lik * ik * jk) / 2. - (CF * Lik * ik * jk) - (CA * Ljk * ik * jk) / 2. - (2 * CF * Ljk * ik * jk) + (12 * CA * Box6ikj * ik2 * jk) / ij + (12 * CA * Box6ikj * jk2) - (CA * Ljk * jk2) / 2. - (2 * CF * Ljk * jk2) + (4 * CA * Box6ijk * ij2 * jk2) / ik2 - (8 * CF * Box6ijk * ij2 * jk2) / ik2 + (4 * CA * Box6ijk * ij * jk2) / ik - (8 * CF * Box6ijk * ij * jk2) / ik - (CA * Ljk * ij * jk2) / (2. * ik) + (CF * Ljk * ij * jk2) / ik + (12 * CA * Box6ikj * ik * jk2) / ij ) / (ij_ik * ik_jk); qqbargLoops[6] = ( (-2 * CF * ij) + (32 * CA * Box6ijk * ij) - (64 * CF * Box6ijk * ij) - (4 * CA * Lij * ij) + (8 * CF * Lij * ij) + (4 * CF * Ljk * ij) + (16 * CA * Box6ijk * ij2) / ik - (32 * CF * Box6ijk * ij2) / ik - (2 * CA * Lij * ij2) / ik + (4 * CF * Lij * ij2) / ik - (2 * CF * ik) + (16 * CA * Box6ijk * ik) - (32 * CF * Box6ijk * ik) - (2 * CA * Lij * ik) + (4 * CF * Lij * ik) + (4 * CF * Ljk * ik) + (16 * CA * Box6ijk * jk) - (32 * CF * Box6ijk * jk) - (2 * CA * Ljk * jk) + (6 * CF * Ljk * jk) + (16 * CA * Box6ijk * ij2 * jk) / ik2 - (32 * CF * Box6ijk * ij2 * jk) / ik2 + (32 * CA * Box6ijk * ij * jk) / ik - (64 * CF * Box6ijk * ij * jk) / ik - (2 * CA * Ljk * ij * jk) / ik + (4 * CF * Ljk * ij * jk) / ik ) / ij_ik_2; qqbargLoops[7] = ( (8 * CA * Box6jik * ij) - (16 * CF * Box6jik * ij) + (CA * Lij * ij) - (2 * CF * Lij * ij) + (CA * Lik * ij) + (2 * CF * Lik * ij) + (CA * Lij * ij2) / ik - (2 * CF * Lij * ij2) / ik + (8 * CA * Box6jik * ik) - (16 * CF * Box6jik * ik) + (CA * Lik * ik) + (2 * CF * Lik * ik) + (8 * CA * Box6jik * ij2) / jk - (16 * CF * Box6jik * ij2) / jk + (8 * CA * Box6jik * ij * ik) / jk - (16 * CF * Box6jik * ij * ik) / jk - (24 * CA * Box6ikj * jk) + (CA * Lij * jk) - (2 * CF * Lij * jk) + (CA * Lik * jk) + (4 * CF * Lik * jk) + (CA * Ljk * jk) + (4 * CF * Ljk * jk) - (8 * CA * Box6ijk * ij2 * jk) / ik2 + (16 * CF * Box6ijk * ij2 * jk) / ik2 - (8 * CA * Box6ijk * ij * jk) / ik + (16 * CF * Box6ijk * ij * jk) / ik + (CA * Lij * ij * jk) / ik - (2 * CF * Lij * ij * jk) / ik + (CA * Ljk * ij * jk) / ik - (2 * CF * Ljk * ij * jk) / ik - (24 * CA * Box6ikj * ik * jk) / ij + (CA * Lik * ik * jk) / ij + (4 * CF * Lik * ik * jk) / ij - (24 * CA * Box6ikj * jk2) / ij + (CA * Ljk * jk2) / ij + (4 * CF * Ljk * jk2) / ij - (8 * CA * Box6ijk * ij * jk2) / ik2 + (16 * CF * Box6ijk * ij * jk2) / ik2 - (8 * CA * Box6ijk * jk2) / ik + (16 * CF * Box6ijk * jk2) / ik + (CA * Ljk * jk2) / ik - (2 * CF * Ljk * jk2) / ik - (24 * CA * Box6ikj * ik * jk2) / ij2 ) / (ij_ik * ij_jk); qqbargLoops[8] = ( (-8 * CA * Box6ijk * ij) + (16 * CF * Box6ijk * ij) - (CA * Lij * ij) + (2 * CF * Lij * ij) - (CA * Ljk * ij) - (2 * CF * Ljk * ij) - (8 * CA * Box6ijk * ij2) / ik + (16 * CF * Box6ijk * ij2) / ik + (24 * CA * Box6ikj * ik) - (CA * Lij * ik) + (2 * CF * Lij * ik) - (CA * Lik * ik) - (4 * CF * Lik * ik) - (CA * Ljk * ik) - (4 * CF * Ljk * ik) + (24 * CA * Box6ikj * ik2) / ij - (CA * Lik * ik2) / ij - (4 * CF * Lik * ik2) / ij + (8 * CA * Box6jik * ij2 * ik) / jk2 - (16 * CF * Box6jik * ij2 * ik) / jk2 + (8 * CA * Box6jik * ij * ik2) / jk2 - (16 * CF * Box6jik * ij * ik2) / jk2 - (CA * Lij * ij2) / jk + (2 * CF * Lij * ij2) / jk + (8 * CA * Box6jik * ij * ik) / jk - (16 * CF * Box6jik * ij * ik) / jk - (CA * Lij * ij * ik) / jk + (2 * CF * Lij * ij * ik) / jk - (CA * Lik * ij * ik) / jk + (2 * CF * Lik * ij * ik) / jk + (8 * CA * Box6jik * ik2) / jk - (16 * CF * Box6jik * ik2) / jk - (CA * Lik * ik2) / jk + (2 * CF * Lik * ik2) / jk - (8 * CA * Box6ijk * jk) + (16 * CF * Box6ijk * jk) - (CA * Ljk * jk) - (2 * CF * Ljk * jk) - (8 * CA * Box6ijk * ij * jk) / ik + (16 * CF * Box6ijk * ij * jk) / ik + (24 * CA * Box6ikj * ik * jk) / ij - (CA * Ljk * ik * jk) / ij - (4 * CF * Ljk * ik * jk) / ij + (24 * CA * Box6ikj * ik2 * jk) / ij2 ) / (ij_ik * ij_jk); qqbargLoops[9] = ( (2 * CF * ij) - (32 * CA * Box6jik * ij) + (64 * CF * Box6jik * ij) + (4 * CA * Lij * ij) - (8 * CF * Lij * ij) - (4 * CF * Lik * ij) - (16 * CA * Box6jik * ik) + (32 * CF * Box6jik * ik) + (2 * CA * Lik * ik) - (6 * CF * Lik * ik) - (16 * CA * Box6jik * ij2 * ik) / jk2 + (32 * CF * Box6jik * ij2 * ik) / jk2 - (16 * CA * Box6jik * ij2) / jk + (32 * CF * Box6jik * ij2) / jk + (2 * CA * Lij * ij2) / jk - (4 * CF * Lij * ij2) / jk - (32 * CA * Box6jik * ij * ik) / jk + (64 * CF * Box6jik * ij * ik) / jk + (2 * CA * Lik * ij * ik) / jk - (4 * CF * Lik * ij * ik) / jk + (2 * CF * jk) - (16 * CA * Box6jik * jk) + (32 * CF * Box6jik * jk) + (2 * CA * Lij * jk) - (4 * CF * Lij * jk) - (4 * CF * Lik * jk) ) / ij_jk_2; qqbargLoops[10] = ( (-8 * CA * Box6ijk * ij2 * jk) + (16 * CF * Box6ijk * ij2 * jk) + (2 * CA * Lij * ij2 * jk) - (4 * CF * Lij * ij2 * jk) - (CA * ij * ik * jk) + (2 * CF * ij * ik * jk) - (8 * CA * Box6ijk * ij * ik * jk) + (16 * CF * Box6ijk * ij * ik * jk) + (3 * CA * Lij * ij * ik * jk) - (6 * CF * Lij * ij * ik * jk) + (CA * Ljk * ij * ik * jk) - (2 * CF * Ljk * ij * ik * jk) - (CA * ik2 * jk) + (2 * CF * ik2 * jk) + (CA * Lij * ik2 * jk) - (2 * CF * Lij * ik2 * jk) + (CA * Ljk * ik2 * jk) - (CF * Ljk * ik2 * jk) - (CA * ij * jk2) + (2 * CF * ij * jk2) - (16 * CA * Box6ijk * ij * jk2) + (32 * CF * Box6ijk * ij * jk2) + (2 * CA * Lij * ij * jk2) - (4 * CF * Lij * ij * jk2) + (2 * CA * Ljk * ij * jk2) - (4 * CF * Ljk * ij * jk2) - (16 * CA * Box6ijk * ij2 * jk2) / ik + (32 * CF * Box6ijk * ij2 * jk2) / ik + (CA * Lij * ij2 * jk2) / ik - (2 * CF * Lij * ij2 * jk2) / ik - (CA * ik * jk2) + (2 * CF * ik * jk2) + (CA * Lij * ik * jk2) - (2 * CF * Lij * ik * jk2) + (2 * CA * Ljk * ik * jk2) - (2 * CF * Ljk * ik * jk2) + (CA * Ljk * jk3) - (CF * Ljk * jk3) - (8 * CA * Box6ijk * ij2 * jk3) / ik2 + (16 * CF * Box6ijk * ij2 * jk3) / ik2 - (8 * CA * Box6ijk * ij * jk3) / ik + (16 * CF * Box6ijk * ij * jk3) / ik + (CA * Ljk * ij * jk3) / ik - (2 * CF * Ljk * ij * jk3) / ik ) / (ij_ik * ik_jk_2); qqbargLoops[11] = ( (16 * CA * Box6jik * ij2 * ik) - (32 * CF * Box6jik * ij2 * ik) - (CA * Lij * ij2 * ik) + (2 * CF * Lij * ij2 * ik) + (8 * CA * Box6jik * ij * ik2) - (16 * CF * Box6jik * ij * ik2) - (CA * Lik * ij * ik2) + (2 * CF * Lik * ij * ik2) + (8 * CA * Box6jik * ij2 * ik2) / jk - (16 * CF * Box6jik * ij2 * ik2) / jk + (8 * CA * Box6jik * ij2 * jk) - (16 * CF * Box6jik * ij2 * jk) - (2 * CA * Lij * ij2 * jk) + (4 * CF * Lij * ij2 * jk) + (CA * ij * ik * jk) - (2 * CF * ij * ik * jk) + (16 * CA * Box6jik * ij * ik * jk) - (32 * CF * Box6jik * ij * ik * jk) - (2 * CA * Lij * ij * ik * jk) + (4 * CF * Lij * ij * ik * jk) - (2 * CA * Lik * ij * ik * jk) + (4 * CF * Lik * ij * ik * jk) - (CA * Lik * ik2 * jk) + (CF * Lik * ik2 * jk) + (CA * ij * jk2) - (2 * CF * ij * jk2) + (8 * CA * Box6jik * ij * jk2) - (16 * CF * Box6jik * ij * jk2) - (3 * CA * Lij * ij * jk2) + (6 * CF * Lij * ij * jk2) - (CA * Lik * ij * jk2) + (2 * CF * Lik * ij * jk2) + (CA * ik * jk2) - (2 * CF * ik * jk2) - (CA * Lij * ik * jk2) + (2 * CF * Lij * ik * jk2) - (2 * CA * Lik * ik * jk2) + (2 * CF * Lik * ik * jk2) + (CA * jk3) - (2 * CF * jk3) - (CA * Lij * jk3) + (2 * CF * Lij * jk3) - (CA * Lik * jk3) + (CF * Lik * jk3) ) / (ij_jk * ik_jk_2); qqbargLoops[12] = -3 * CF * Lijk + ( (CA * ij2 * ik) - (9 * CF * ij2 * ik) + (8 * CA * Box6ijk * ij2 * ik) - (16 * CF * Box6ijk * ij2 * ik) - (8 * CA * Box6ikj * ij2 * ik) + (CA * ij * ik2) - (9 * CF * ij * ik2) + (8 * CA * Box6ijk * ij * ik2) - (16 * CF * Box6ijk * ij * ik2) - (8 * CA * Box6ikj * ij * ik2) + (CA * ij2 * jk) - (9 * CF * ij2 * jk) - (8 * CA * Box6ikj * ij2 * jk) + (8 * CA * Box6jik * ij2 * jk) - (16 * CF * Box6jik * ij2 * jk) + (2 * CA * ij * ik * jk) - (18 * CF * ij * ik * jk) + (8 * CA * Box6ijk * ij * ik * jk) - (16 * CF * Box6ijk * ij * ik * jk) - (40 * CA * Box6ikj * ij * ik * jk) + (8 * CA * Box6jik * ij * ik * jk) - (16 * CF * Box6jik * ij * ik * jk) + (3 * CF * Lik * ij * ik * jk) + (3 * CF * Ljk * ij * ik * jk) + (CA * ik2 * jk) - (9 * CF * ik2 * jk) + (8 * CA * Box6ijk * ik2 * jk) - (16 * CF * Box6ijk * ik2 * jk) - (32 * CA * Box6ikj * ik2 * jk) + (3 * CF * Lik * ik2 * jk) + (CA * ij * jk2) - (9 * CF * ij * jk2) - (8 * CA * Box6ikj * ij * jk2) + (8 * CA * Box6jik * ij * jk2) - (16 * CF * Box6jik * ij * jk2) + (CA * ik * jk2) - (9 * CF * ik * jk2) - (32 * CA * Box6ikj * ik * jk2) + (8 * CA * Box6jik * ik * jk2) - (16 * CF * Box6jik * ik * jk2) + (3 * CF * Ljk * ik * jk2) - (24 * CA * Box6ikj * ik2 * jk2) / ij ) / (ij_ik * ij_jk * ik_jk); /* // idendities implied by gauge invariance and current conservation; checked analytically and numerically Complex c1 = qqbargLoops[0] + qqbargLoops[6] + qqbargLoops[7]; Complex c2 = qqbargLoops[1] + qqbargLoops[8] + qqbargLoops[9]; Complex c3 = qqbargLoops[3] + qqbargLoops[4]; Complex c4 = qqbargLoops[2] + qqbargLoops[5] + qqbargLoops[10] + qqbargLoops[11]; Complex c5 = 2. * qqbargLoops[3]/ik + 2. * qqbargLoops[5]/jk + qqbargLoops[6] * (1.+ij/ik) + qqbargLoops[8] * (jk+ij)/ik + 2. * qqbargLoops[10] * (1./ik+1./jk) + 2. * qqbargLoops[12] * (1./ik+1./jk); Complex c6 = 2. * qqbargLoops[4]/jk + 2. * qqbargLoops[5]/jk + qqbargLoops[7] * (ik+ij)/jk + qqbargLoops[9] * (1.+ij/jk) + 2. * qqbargLoops[11] * (ik/jk2+1./jk); Complex c7 = 0.5 * qqbargLoops[0] * (ij+ik) + 0.5 * qqbargLoops[1] * (ij+jk) + qqbargLoops[2] * (1.+ik/jk) - qqbargLoops[12] * (1.+ik/jk); double x1 = c1 != 0. ? log(abs(real(c1 * conj(c1)))) : 0.; double x2 = c2 != 0. ? log(abs(real(c2 * conj(c2)))) : 0.; double x3 = c3 != 0. ? log(abs(real(c3 * conj(c3)))) : 0.; double x4 = c4 != 0. ? log(abs(real(c4 * conj(c4)))) : 0.; double x5 = c5 != 0. ? log(abs(real(c5 * conj(c5)))) : 0.; double x6 = c6 != 0. ? log(abs(real(c6 * conj(c6)))) : 0.; double x7 = c7 != 0. ? log(abs(real(c7 * conj(c7)))) : 0.; cerr << x1 << " " << x2 << " " << x3 << " " << x4 << " " << x5 << " " << x6 << " " << x7 << "\n"; */ } LorentzVector<Complex> MatchboxCurrents::qqbargGeneralLeftLoopCurrent(const int i, const int, const int j, const int, const int k, const int gHel, const int n) { qqbargLoopCoefficients(i,j,k); const double ik = invariant(i,k); const double jk = invariant(j,k); const Complex plusP_ik = plusProduct(i,k); const Complex plusP_in = plusProduct(i,n); const Complex plusP_jk = plusProduct(j,k); const Complex plusP_jn = plusProduct(j,n); const Complex plusP_kn = plusProduct(k,n); const Complex minusP_ik = minusProduct(i,k); const Complex minusP_in = minusProduct(i,n); const Complex minusP_jk = minusProduct(j,k); const Complex minusP_jn = minusProduct(j,n); const Complex minusP_kn = minusProduct(k,n); const LorentzVector<Complex> & minusC_ij = minusCurrent(i,j); const LorentzVector<Complex> & minusC_nk = minusCurrent(n,k); const LorentzVector<Complex> & minusC_kj = minusCurrent(k,j); const LorentzVector<Complex> & minusC_kn = minusCurrent(k,n); Complex c1 = qqbargLoops[0]; Complex c2 = qqbargLoops[1]; Complex c3 = qqbargLoops[2]; Complex c4 = qqbargLoops[3]; Complex c5 = qqbargLoops[4]; Complex c6 = qqbargLoops[5]; Complex c7 = qqbargLoops[6]; Complex c8 = qqbargLoops[7]; Complex c9 = qqbargLoops[8]; Complex c10 = qqbargLoops[9]; Complex c11 = qqbargLoops[10]; Complex c12 = qqbargLoops[11]; Complex c13 = qqbargLoops[12]; if ( gHel == 1 ) { return (sqrt(2) * c6 * plusP_jk * minusC_nk * minusP_ik)/(jk * minusP_kn) + (sqrt(2) * c1 * plusP_jk * momentum(i) * minusP_in)/minusP_kn + (sqrt(2) * c2 * plusP_jk * momentum(j) * minusP_in)/minusP_kn + (2 * sqrt(2) * c3 * plusP_jk * momentum(k) * minusP_in)/(jk * minusP_kn) + (sqrt(2) * c4 * plusP_ik * minusC_ij * minusP_in)/(ik * minusP_kn) - (sqrt(2) * c7 * plusP_ik * plusP_jk * momentum(i) * minusP_ik * minusP_in)/(ik * minusP_kn) - (sqrt(2) * c9 * plusP_ik * plusP_jk * momentum(j) * minusP_ik * minusP_in)/(ik * minusP_kn) - (2 * sqrt(2) * c11 * plusP_ik * plusP_jk * momentum(k) * minusP_ik * minusP_in)/(ik * jk * minusP_kn) + (sqrt(2) * c5 * plusP_jk * minusC_ij * minusP_jn)/(jk * minusP_kn) - (sqrt(2) * c8 * sqr(plusP_jk) * momentum(i) * minusP_ik * minusP_jn)/(jk * minusP_kn) - (sqrt(2) * c10 * sqr(plusP_jk) * momentum(j) * minusP_ik * minusP_jn)/(jk * minusP_kn) - (2 * sqrt(2) * c12 * sqr(plusP_jk) * momentum(k) * minusP_ik * minusP_jn)/(sqr(jk) * minusP_kn); } if ( gHel == -1 ) { return -((sqrt(2) * c1 * plusP_jn * momentum(i) * minusP_ik)/plusP_kn) - (sqrt(2) * c2 * plusP_jn * momentum(j) * minusP_ik)/plusP_kn - (2 * sqrt(2) * c3 * plusP_jn * momentum(k) * minusP_ik)/(jk * plusP_kn) - (sqrt(2) * c4 * plusP_in * minusC_ij * minusP_ik)/(ik * plusP_kn) + (sqrt(2) * c13 * minusC_kj * minusP_ik)/ik + (sqrt(2) * c13 * minusC_kj * minusP_ik)/jk - (sqrt(2) * c6 * plusP_jk * minusC_kn * minusP_ik)/(jk * plusP_kn) + (sqrt(2) * c7 * plusP_in * plusP_jk * momentum(i) * sqr(minusP_ik))/(ik * plusP_kn) + (sqrt(2) * c9 * plusP_in * plusP_jk * momentum(j) * sqr(minusP_ik))/(ik * plusP_kn) + (2 * sqrt(2) * c11 * plusP_in * plusP_jk * momentum(k) * sqr(minusP_ik))/(ik * jk * plusP_kn) - (sqrt(2) * c5 * plusP_jn * minusC_ij * minusP_jk)/(jk * plusP_kn) + (sqrt(2) * c8 * plusP_jk * plusP_jn * momentum(i) * minusP_ik * minusP_jk)/(jk * plusP_kn) + (sqrt(2) * c10 * plusP_jk * plusP_jn * momentum(j) * minusP_ik * minusP_jk)/(jk * plusP_kn) + (2 * sqrt(2) * c12 * plusP_jk * plusP_jn * momentum(k) * minusP_ik * minusP_jk)/(sqr(jk) * plusP_kn); } return czero; } LorentzVector<Complex> MatchboxCurrents::qqbargFixedLeftLoopCurrent(const int i, const int, const int j, const int, const int k, const int gHel) { qqbargLoopCoefficients(i,j,k); const double ik = invariant(i,k); const double jk = invariant(j,k); const Complex plusP_ij = plusProduct(i,j); const Complex plusP_jk = plusProduct(j,k); const Complex minusP_ij = minusProduct(i,j); const Complex minusP_ik = minusProduct(i,k); const LorentzVector<Complex> & minusC_ij = minusCurrent(i,j); const LorentzVector<Complex> & minusC_ik = minusCurrent(i,k); const LorentzVector<Complex> & minusC_kj = minusCurrent(k,j); //Complex c1 = qqbargLoops[0]; Complex c2 = qqbargLoops[1]; Complex c3 = qqbargLoops[2]; Complex c4 = qqbargLoops[3]; Complex c5 = qqbargLoops[4]; Complex c6 = qqbargLoops[5]; Complex c7 = qqbargLoops[6]; Complex c8 = qqbargLoops[7]; Complex c9 = qqbargLoops[8]; Complex c10 = qqbargLoops[9]; Complex c11 = qqbargLoops[10]; Complex c12 = qqbargLoops[11]; Complex c13 = qqbargLoops[12]; if ( gHel == 1 ) { return -((sqrt(2) * c6 * plusP_jk * minusC_ik)/jk) - (sqrt(2) * c8 * sqr(plusP_jk) * momentum(i) * minusP_ij)/jk - (sqrt(2) * c10 * sqr(plusP_jk) * momentum(j) * minusP_ij)/jk - (2 * sqrt(2) * c12 * sqr(plusP_jk) * momentum(k) * minusP_ij)/sqr(jk) + (sqrt(2) * c5 * plusP_jk * minusC_ij * minusP_ij)/(jk * minusP_ik); } if ( gHel == -1 ) { return (sqrt(2) * c4 * plusP_ij * minusC_ij * minusP_ik)/(ik * plusP_jk) + (sqrt(2) * c13 * minusC_kj * minusP_ik)/ik + (sqrt(2) * c13 * minusC_kj * minusP_ik)/jk + (sqrt(2) * c6 * minusC_kj * minusP_ik)/jk - (sqrt(2) * c7 * plusP_ij * momentum(i)* sqr(minusP_ik))/ik - (sqrt(2) * c9 * plusP_ij * momentum(j) * sqr(minusP_ik))/ik - (2 * sqrt(2) * c11 * plusP_ij * momentum(k) * sqr(minusP_ik))/(ik * jk); } return czero; } LorentzVector<Complex> MatchboxCurrents::qqbargGeneralRightLoopCurrent(const int i, const int, const int j, const int, const int k, const int gHel, const int n) { qqbargLoopCoefficients(i,j,k); const double ik = invariant(i,k); const double jk = invariant(j,k); const Complex plusP_ik = plusProduct(i,k); const Complex plusP_in = plusProduct(i,n); const Complex plusP_jk = plusProduct(j,k); const Complex plusP_jn = plusProduct(j,n); const Complex plusP_kn = plusProduct(k,n); const Complex minusP_ik = minusProduct(i,k); const Complex minusP_in = minusProduct(i,n); const Complex minusP_jk = minusProduct(j,k); const Complex minusP_jn = minusProduct(j,n); const Complex minusP_kn = minusProduct(k,n); const LorentzVector<Complex> & minusC_ji = minusCurrent(j,i); const LorentzVector<Complex> & minusC_jk = minusCurrent(j,k); const LorentzVector<Complex> & minusC_nk = minusCurrent(n,k); const LorentzVector<Complex> & minusC_kn = minusCurrent(k,n); Complex c1 = qqbargLoops[0]; Complex c2 = qqbargLoops[1]; Complex c3 = qqbargLoops[2]; Complex c4 = qqbargLoops[3]; Complex c5 = qqbargLoops[4]; Complex c6 = qqbargLoops[5]; Complex c7 = qqbargLoops[6]; Complex c8 = qqbargLoops[7]; Complex c9 = qqbargLoops[8]; Complex c10 = qqbargLoops[9]; Complex c11 = qqbargLoops[10]; Complex c12 = qqbargLoops[11]; Complex c13 = qqbargLoops[12]; if ( gHel == 1 ) { return -((sqrt(2) * c13 * plusP_ik * minusC_jk)/ik) - (sqrt(2) * c13 * plusP_ik * minusC_jk)/jk + (sqrt(2) * c4 * plusP_ik * minusC_ji * minusP_in)/(ik * minusP_kn) + (sqrt(2) * c6 * plusP_ik * minusC_nk * minusP_jk)/(jk * minusP_kn) - (sqrt(2) * c7 * sqr(plusP_ik) * momentum(i) * minusP_in * minusP_jk)/(ik * minusP_kn) - (sqrt(2) * c9 * sqr(plusP_ik) * momentum(j) * minusP_in * minusP_jk)/(ik * minusP_kn) - (2 * sqrt(2) * c11 * sqr(plusP_ik) * momentum(k) * minusP_in * minusP_jk)/(ik * jk * minusP_kn) + (sqrt(2) * c1 * plusP_ik * momentum(i) * minusP_jn)/minusP_kn + (sqrt(2) * c2 * plusP_ik * momentum(j) * minusP_jn)/minusP_kn + (2 * sqrt(2) * c3 * plusP_ik * momentum(k) * minusP_jn)/(jk * minusP_kn) + (sqrt(2) * c5 * plusP_jk * minusC_ji * minusP_jn)/(jk * minusP_kn) - (sqrt(2) * c8 * plusP_ik * plusP_jk * momentum(i) * minusP_jk * minusP_jn)/(jk * minusP_kn) - (sqrt(2) * c10 * plusP_ik * plusP_jk * momentum(j) * minusP_jk * minusP_jn)/(jk * minusP_kn) - (2 * sqrt(2) * c12 * plusP_ik * plusP_jk * momentum(k) * minusP_jk * minusP_jn)/(sqr(jk) * minusP_kn); } if ( gHel == -1 ) { return -((sqrt(2) * c4 * plusP_in * minusC_ji * minusP_ik)/(ik * plusP_kn)) - (sqrt(2) * c1 * plusP_in * momentum(i) * minusP_jk)/plusP_kn - (sqrt(2) * c2 * plusP_in * momentum(j) * minusP_jk)/plusP_kn - (2 * sqrt(2) * c3 * plusP_in * momentum(k) * minusP_jk)/(jk * plusP_kn) - (sqrt(2) * c5 * plusP_jn * minusC_ji * minusP_jk)/(jk * plusP_kn) - (sqrt(2) * c6 * plusP_ik * minusC_kn * minusP_jk)/(jk * plusP_kn) + (sqrt(2) * c7 * plusP_ik * plusP_in * momentum(i) * minusP_ik * minusP_jk)/(ik * plusP_kn) + (sqrt(2) * c9 * plusP_ik * plusP_in * momentum(j) * minusP_ik * minusP_jk)/(ik * plusP_kn) + (2 * sqrt(2) * c11 * plusP_ik * plusP_in * momentum(k) * minusP_ik * minusP_jk)/ (ik * jk * plusP_kn) + (sqrt(2) * c8 * plusP_ik * plusP_jn * momentum(i) * sqr(minusP_jk))/(jk * plusP_kn) + (sqrt(2) * c10 * plusP_ik * plusP_jn * momentum(j) * sqr(minusP_jk))/(jk * plusP_kn) + (2 * sqrt(2) * c12 * plusP_ik * plusP_jn * momentum(k) * sqr(minusP_jk))/(sqr(jk) * plusP_kn); } return czero; } LorentzVector<Complex> MatchboxCurrents::qqbargFixedRightLoopCurrent(const int i, const int, const int j, const int, const int k, const int gHel) { qqbargLoopCoefficients(i,j,k); const double ik = invariant(i,k); const double jk = invariant(j,k); const Complex plusP_ij = plusProduct(i,j); const Complex plusP_ik = plusProduct(i,k); const Complex minusP_ij = minusProduct(i,j); const Complex minusP_jk = minusProduct(j,k); const LorentzVector<Complex> & minusC_ji = minusCurrent(j,i); const LorentzVector<Complex> & minusC_jk = minusCurrent(j,k); const LorentzVector<Complex> & minusC_ki = minusCurrent(k,i); //Complex c1 = qqbargLoops[0]; Complex c2 = qqbargLoops[1]; Complex c3 = qqbargLoops[2]; Complex c4 = qqbargLoops[3]; Complex c5 = qqbargLoops[4]; Complex c6 = qqbargLoops[5]; Complex c7 = qqbargLoops[6]; Complex c8 = qqbargLoops[7]; Complex c9 = qqbargLoops[8]; Complex c10 = qqbargLoops[9]; Complex c11 = qqbargLoops[10]; Complex c12 = qqbargLoops[11]; Complex c13 = qqbargLoops[12]; if ( gHel == 1 ) { return -((sqrt(2) * c13 * plusP_ik * minusC_jk)/ik) - (sqrt(2) * c13 * plusP_ik * minusC_jk)/jk - (sqrt(2) * c6 * plusP_ik * minusC_jk)/jk + (sqrt(2) * c7 * sqr(plusP_ik) * momentum(i) * minusP_ij)/ik + (sqrt(2) * c9 * sqr(plusP_ik) * momentum(j) * minusP_ij)/ik + (2 * sqrt(2) * c11 * sqr(plusP_ik) * momentum(k) * minusP_ij)/(ik * jk) - (sqrt(2) * c4 * plusP_ik * minusC_ji * minusP_ij)/(ik * minusP_jk); } if ( gHel == -1 ) { return -((sqrt(2) * c5 * plusP_ij * minusC_ji * minusP_jk)/(jk * plusP_ik)) + (sqrt(2) * c6 * minusC_ki * minusP_jk)/jk + (sqrt(2) * c8 * plusP_ij * momentum(i) * sqr(minusP_jk))/jk + (sqrt(2) * c10 * plusP_ij * momentum(j) * sqr(minusP_jk))/jk + (2 * sqrt(2) * c12 * plusP_ij * momentum(k) * sqr(minusP_jk))/sqr(jk); } return czero; } const LorentzVector<Complex>& MatchboxCurrents::qqbargLeftOneLoopCurrent(const int q, const int qHel, const int qbar, const int qbarHel, const int g1, const int g1Hel) { if ( qHel != 1 || qbarHel != 1 ) return czero; if ( getCurrent(hash<2>(1,2,q,qHel,qbar,qbarHel,g1,g1Hel)) ) { #ifdef CHECK_MatchboxCurrents LorentzVector<Complex> ni = Complex(0.,0.5) * qqbargGeneralLeftLoopCurrent(q,qHel,qbar,qbarHel,g1,g1Hel,q); LorentzVector<Complex> nj = Complex(0.,0.5) * qqbargGeneralLeftLoopCurrent(q,qHel,qbar,qbarHel,g1,g1Hel,qbar); LorentzVector<Complex> nl = Complex(0.,0.5) * qqbargGeneralLeftLoopCurrent(q,qHel,qbar,qbarHel,g1,g1Hel,0); LorentzVector<Complex> nlbar = Complex(0.,0.5) * qqbargGeneralLeftLoopCurrent(q,qHel,qbar,qbarHel,g1,g1Hel,1); LorentzVector<Complex> fixed = Complex(0.,0.5) * qqbargFixedLeftLoopCurrent(q,qHel,qbar,qbarHel,g1,g1Hel); LorentzVector<Complex> x1 = fixed - ni; LorentzVector<Complex> x2 = fixed - nj; LorentzVector<Complex> x3 = fixed - nl; LorentzVector<Complex> x4 = fixed - nlbar; double c1 = real(x1.t() * conj(x1.t())) + real(x1.x() * conj(x1.x())) + real(x1.y() * conj(x1.y())) + real(x1.z() * conj(x1.z())); double c2 = real(x2.t() * conj(x2.t())) + real(x2.x() * conj(x2.x())) + real(x2.y() * conj(x2.y())) + real(x2.z() * conj(x2.z())); double c3 = real(x3.t() * conj(x3.t())) + real(x3.x() * conj(x3.x())) + real(x3.y() * conj(x3.y())) + real(x3.z() * conj(x3.z())); double c4 = real(x4.t() * conj(x4.t())) + real(x4.x() * conj(x4.x())) + real(x4.y() * conj(x4.y())) + real(x4.z() * conj(x4.z())); ostream& ncheck = checkStream("qqbargLeftLoopCurrentNChoice"); ncheck << (c1 != 0. ? log10(abs(c1)) : 0.) << " " << (c2 != 0. ? log10(abs(c2)) : 0.) << " " << (c3 != 0. ? log10(abs(c3)) : 0.) << " " << (c4 != 0. ? log10(abs(c4)) : 0.) << " " << "\n" << flush; #endif cacheCurrent(Complex(0.,0.5) * qqbargFixedLeftLoopCurrent(q,qHel,qbar,qbarHel,g1,g1Hel)); } #ifdef CHECK_MatchboxCurrents checkCurrent("qqbargLeftLoopCurrent",cachedCurrent(),momentum(q)+momentum(qbar)+momentum(g1)); #endif return cachedCurrent(); } const LorentzVector<Complex>& MatchboxCurrents::qqbargRightOneLoopCurrent(const int q, const int qHel, const int qbar, const int qbarHel, const int g1, const int g1Hel) { if ( qHel != -1 || qbarHel != -1 ) return czero; if ( getCurrent(hash<2>(2,2,q,qHel,qbar,qbarHel,g1,g1Hel)) ) { #ifdef CHECK_MatchboxCurrents LorentzVector<Complex> ni = Complex(0.,0.5) * qqbargGeneralRightLoopCurrent(q,qHel,qbar,qbarHel,g1,g1Hel,q); LorentzVector<Complex> nj = Complex(0.,0.5) * qqbargGeneralRightLoopCurrent(q,qHel,qbar,qbarHel,g1,g1Hel,qbar); LorentzVector<Complex> nl = Complex(0.,0.5) * qqbargGeneralRightLoopCurrent(q,qHel,qbar,qbarHel,g1,g1Hel,0); LorentzVector<Complex> nlbar = Complex(0.,0.5) * qqbargGeneralRightLoopCurrent(q,qHel,qbar,qbarHel,g1,g1Hel,1); LorentzVector<Complex> fixed = Complex(0.,0.5) * qqbargFixedRightLoopCurrent(q,qHel,qbar,qbarHel,g1,g1Hel); LorentzVector<Complex> x1 = fixed - ni; LorentzVector<Complex> x2 = fixed - nj; LorentzVector<Complex> x3 = fixed - nl; LorentzVector<Complex> x4 = fixed - nlbar; double c1 = real(x1.t() * conj(x1.t())) + real(x1.x() * conj(x1.x())) + real(x1.y() * conj(x1.y())) + real(x1.z() * conj(x1.z())); double c2 = real(x2.t() * conj(x2.t())) + real(x2.x() * conj(x2.x())) + real(x2.y() * conj(x2.y())) + real(x2.z() * conj(x2.z())); double c3 = real(x3.t() * conj(x3.t())) + real(x3.x() * conj(x3.x())) + real(x3.y() * conj(x3.y())) + real(x3.z() * conj(x3.z())); double c4 = real(x4.t() * conj(x4.t())) + real(x4.x() * conj(x4.x())) + real(x4.y() * conj(x4.y())) + real(x4.z() * conj(x4.z())); ostream& ncheck = checkStream("qqbargRightLoopCurrentNChoice"); ncheck << (c1 != 0. ? log10(abs(c1)) : 0.) << " " << (c2 != 0. ? log10(abs(c2)) : 0.) << " " << (c3 != 0. ? log10(abs(c3)) : 0.) << " " << (c4 != 0. ? log10(abs(c4)) : 0.) << " " << "\n" << flush; #endif cacheCurrent(Complex(0.,0.5) * qqbargFixedRightLoopCurrent(q,qHel,qbar,qbarHel,g1,g1Hel)); } #ifdef CHECK_MatchboxCurrents checkCurrent("qqbargRightLoopCurrent",cachedCurrent(),momentum(q)+momentum(qbar)+momentum(g1)); #endif return cachedCurrent(); } #ifdef CHECK_MatchboxCurrents map<string,ofstream * >& MatchboxCurrents::checkStreams() { static map<string,ofstream * > theMap; return theMap; } ostream& MatchboxCurrents::checkStream(const string& id) { map<string,ofstream * >::iterator ret = checkStreams().find(id); if ( ret == checkStreams().end() ) { checkStreams()[id] = new ofstream(id.c_str()); ret = checkStreams().find(id); } return *(ret->second); } void MatchboxCurrents::checkCurrent(const string& id, const LorentzVector<Complex>& current, const LorentzVector<double>& q) { Complex c = current.dot(q); double ac = abs(real(conj(c) * c)); - if ( isnan(ac) || isinf(ac) ) { + if ( ! isfinite(ac) ) { cerr << "ooops ... nan encountered in current conservation\n" << flush; return; } checkStream(id) << (ac > 0. ? log10(ac) : 0.) << "\n"; } #endif // CHECK_MatchboxCurrents diff --git a/MatrixElement/Matchbox/Utility/ColourBasis.cc b/MatrixElement/Matchbox/Utility/ColourBasis.cc --- a/MatrixElement/Matchbox/Utility/ColourBasis.cc +++ b/MatrixElement/Matchbox/Utility/ColourBasis.cc @@ -1,1278 +1,1278 @@ // -*- C++ -*- // // ColourBasis.cc is a part of Herwig - A multi-purpose Monte Carlo event generator // Copyright (C) 2002-2012 The Herwig Collaboration // // Herwig is licenced under version 2 of the GPL, see COPYING for details. // Please respect the MCnet academic guidelines, see GUIDELINES for details. // // // This is the implementation of the non-inlined, non-templated member // functions of the ColourBasis class. // #include "ColourBasis.h" #include "ThePEG/Interface/ClassDocumentation.h" #include "ThePEG/Interface/Parameter.h" #include "ThePEG/Interface/Switch.h" #include "ThePEG/EventRecord/Particle.h" #include "ThePEG/Repository/UseRandom.h" #include "ThePEG/Repository/EventGenerator.h" #include "ThePEG/Utilities/DescribeClass.h" #include "Herwig/MatrixElement/Matchbox/MatchboxFactory.h" #include "ThePEG/Persistency/PersistentOStream.h" #include "ThePEG/Persistency/PersistentIStream.h" #include <boost/numeric/ublas/io.hpp> #include <boost/numeric/ublas/matrix_proxy.hpp> #include <iterator> using std::ostream_iterator; #include "DiagramDrawer.h" using namespace Herwig; using boost::numeric::ublas::trans; // default gcc on SLC6 confuses this with std::conj, // use explicit namespacing in the code instead // // using boost::numeric::ublas::conj; using boost::numeric::ublas::row; using boost::numeric::ublas::column; using boost::numeric::ublas::prod; Ptr<MatchboxFactory>::tptr ColourBasis::factory() const { return theFactory; } void ColourBasis::factory(Ptr<MatchboxFactory>::tptr f) { theFactory = f; } ColourBasis::ColourBasis() : theLargeN(false), didRead(false), didWrite(false), theSearchPath("") {} ColourBasis::~ColourBasis() { for ( map<Ptr<Tree2toNDiagram>::tcptr,vector<ColourLines*> >::iterator cl = theColourLineMap.begin(); cl != theColourLineMap.end(); ++cl ) { for ( vector<ColourLines*>::iterator c = cl->second.begin(); c != cl->second.end(); ++c ) { if ( *c ) delete *c; } } theColourLineMap.clear(); } void ColourBasis::clear() { theLargeN = false; theNormalOrderedLegs.clear(); theIndexMap.clear(); theScalarProducts.clear(); theCharges.clear(); theChargeNonZeros.clear(); theCorrelators.clear(); theFlowMap.clear(); theColourLineMap.clear(); theOrderingStringIdentifiers.clear(); theOrderingIdentifiers.clear(); didRead = false; didWrite = false; tmp.clear(); } // If needed, insert default implementations of virtual function defined // in the InterfacedBase class here (using ThePEG-interfaced-impl in Emacs). bool ColourBasis::colourConnected(const cPDVector& sub, const vector<PDT::Colour>& basis, const pair<int,bool>& i, const pair<int,bool>& j, size_t a) const { // translate process to basis ids map<cPDVector,map<size_t,size_t> >::const_iterator trans = indexMap().find(sub); assert(trans != indexMap().end()); int idColoured = i.second ? j.first : i.first; idColoured = trans->second.find(idColoured)->second; int idAntiColoured = i.second ? i.first : j.first; idAntiColoured = trans->second.find(idAntiColoured)->second; return colourConnected(basis,idColoured,idAntiColoured,a); } const string& ColourBasis::orderingString(const cPDVector& sub, const map<size_t,size_t>& colourToAmplitude, size_t tensorId) { map<size_t,string>& tensors = theOrderingStringIdentifiers[sub]; if ( !tensors.empty() ) { assert(tensors.find(tensorId) != tensors.end()); return tensors[tensorId]; } const set<vector<size_t> >& xordering = ordering(sub,colourToAmplitude,tensorId); ostringstream os; os << "["; for ( set<vector<size_t> >::const_iterator t = xordering.begin(); t != xordering.end(); ++t ) { os << "["; for ( vector<size_t>::const_iterator s = t->begin(); s != t->end(); ++s ) { os << *s << (s != --t->end() ? "," : ""); } os << "]" << (t != --xordering.end() ? "," : ""); } os << "]"; tensors[tensorId] = os.str(); return tensors[tensorId]; } const set<vector<size_t> >& ColourBasis::ordering(const cPDVector& sub, const map<size_t,size_t>& colourToAmplitude, size_t tensorId, size_t shift) { map<size_t,set<vector<size_t> > >& tensors = theOrderingIdentifiers[sub]; if ( !tensors.empty() ) { assert(tensors.find(tensorId) != tensors.end()); return tensors[tensorId]; } const vector<PDT::Colour>& basisId = normalOrderedLegs(sub); map<size_t,vector<vector<size_t> > > labels = basisList(basisId); for ( map<size_t,vector<vector<size_t> > >::const_iterator t = labels.begin(); t != labels.end(); ++t ) { set<vector<size_t> > xordering; for ( vector<vector<size_t> >::const_iterator s = t->second.begin(); s != t->second.end(); ++s ) { vector<size_t> crossed; for ( vector<size_t>::const_iterator l = s->begin(); l != s->end(); ++l ) { map<size_t,size_t>::const_iterator trans = colourToAmplitude.find(*l); assert(trans != colourToAmplitude.end()); crossed.push_back(trans->second + shift); } xordering.insert(crossed); } tensors[t->first] = xordering; } assert(tensors.find(tensorId) != tensors.end()); return tensors[tensorId]; } vector<PDT::Colour> ColourBasis::normalOrderMap(const cPDVector& sub) { vector<PDT::Colour> allLegs = projectColour(sub); vector<PDT::Colour> legs = normalOrder(allLegs); if ( allLegs[0] == PDT::Colour3 ) allLegs[0] = PDT::Colour3bar; else if ( allLegs[0] == PDT::Colour3bar ) allLegs[0] = PDT::Colour3; if ( allLegs[1] == PDT::Colour3 ) allLegs[1] = PDT::Colour3bar; else if ( allLegs[1] == PDT::Colour3bar ) allLegs[1] = PDT::Colour3; if ( theIndexMap.find(sub) == theIndexMap.end() ) { map<size_t,size_t> trans; vector<PDT::Colour> checkLegs = legs; size_t n = checkLegs.size(); for ( size_t i = 0; i < allLegs.size(); ++i ) { size_t j = 0; while ( checkLegs[j] != allLegs[i] ) { ++j; if ( j == n ) break; } if ( j == n ) continue; trans[i] = j; checkLegs[j] = PDT::ColourUndefined; } theIndexMap[sub] = trans; } return legs; } const vector<PDT::Colour>& ColourBasis::normalOrderedLegs(const cPDVector& sub) const { static vector<PDT::Colour> empty; map<cPDVector,vector<PDT::Colour> >::const_iterator n = theNormalOrderedLegs.find(sub); if ( n != theNormalOrderedLegs.end() ) return n->second; return empty; } size_t ColourBasis::prepare(const cPDVector& sub, bool noCorrelations) { vector<PDT::Colour> legs = normalOrderMap(sub); bool doPrepare = false; if ( theNormalOrderedLegs.find(sub) == theNormalOrderedLegs.end() ) theNormalOrderedLegs[sub] = legs; if ( theScalarProducts.find(legs) == theScalarProducts.end() ) doPrepare = true; if ( doPrepare ) doPrepare = !readBasis(legs); size_t dim = doPrepare ? prepareBasis(legs) : theScalarProducts[legs].size1(); if ( theCharges.find(legs) != theCharges.end() ) return dim; if ( !doPrepare && noCorrelations ) return dim; symmetric_matrix<double,upper>& sp = theScalarProducts.insert(make_pair(legs,symmetric_matrix<double,upper>(dim,dim))).first->second; for ( size_t a = 0; a < dim; ++a ) for ( size_t b = a; b < dim; ++b ) sp(a,b) = scalarProduct(a,b,legs); if ( noCorrelations ) return dim; vector<PDT::Colour> legsPlus = legs; legsPlus.push_back(PDT::Colour8); legsPlus = normalOrder(legsPlus); bool doPreparePlus = theScalarProducts.find(legsPlus) == theScalarProducts.end(); size_t dimPlus = doPreparePlus ? prepareBasis(legsPlus) : theScalarProducts[legsPlus].size1(); symmetric_matrix<double,upper>& spPlus = doPreparePlus ? theScalarProducts.insert(make_pair(legsPlus,symmetric_matrix<double,upper>(dimPlus,dimPlus))).first->second : theScalarProducts[legsPlus]; if ( doPreparePlus ) { for ( size_t a = 0; a < dimPlus; ++a ) for ( size_t b = a; b < dimPlus; ++b ) spPlus(a,b) = scalarProduct(a,b,legsPlus); } typedef map<size_t,compressed_matrix<double> > cMap; cMap& cm = theCharges.insert(make_pair(legs,cMap())).first->second; typedef map<size_t,vector<pair<size_t,size_t> > > ccMap; ccMap& ccm = theChargeNonZeros.insert(make_pair(legs,ccMap())).first->second; tmp.resize(dimPlus,dim); for ( size_t i = 0; i < legs.size(); ++i ) { size_t nonZero = 0; vector<pair<size_t,size_t> > nonZeros; for ( size_t a = 0; a < dimPlus; ++a ) for ( size_t b = 0; b < dim; ++b ) { tmp(a,b) = tMatrixElement(i,a,b,legsPlus,legs); if ( tmp(a,b) != 0. ) { ++nonZero; nonZeros.push_back(make_pair(a,b)); } } ccm.insert(make_pair(i,nonZeros)); compressed_matrix<double>& tm = cm.insert(make_pair(i,compressed_matrix<double>(dimPlus,dim,nonZero))).first->second; for ( size_t a = 0; a < dimPlus; ++a ) for ( size_t b = 0; b < dim; ++b ) { if ( tmp(a,b) != 0. ) tm(a,b) = tmp(a,b); } } map<pair<size_t,size_t>,symmetric_matrix<double,upper> >& xm = theCorrelators[legs]; for ( size_t i = 0; i < legs.size(); ++i ) for ( size_t j = i+1; j < legs.size(); ++j ) { symmetric_matrix<double,upper>& mm = xm.insert(make_pair(make_pair(i,j),symmetric_matrix<double,upper>(dim,dim))).first->second; chargeProduct(cm[i],ccm[i],spPlus,cm[j],ccm[j],mm); } return dim; } void ColourBasis::chargeProduct(const compressed_matrix<double>& ti, const vector<pair<size_t,size_t> >& tiNonZero, const symmetric_matrix<double,upper>& X, const compressed_matrix<double>& tj, const vector<pair<size_t,size_t> >& tjNonZero, symmetric_matrix<double,upper>& result) const { for ( size_t i = 0; i < result.size1(); ++i ) for ( size_t j = i; j < result.size1(); ++j ) result(i,j) = 0.; for ( vector<pair<size_t,size_t> >::const_iterator i = tiNonZero.begin(); i != tiNonZero.end(); ++i ) for ( vector<pair<size_t,size_t> >::const_iterator j = tjNonZero.begin(); j != tjNonZero.end(); ++j ) { if ( j->second < i->second ) continue; result(i->second,j->second) += ti(i->first,i->second)*tj(j->first,j->second)*X(i->first,j->first); } } void ColourBasis::chargeProductAdd(const compressed_matrix<double>& ti, const vector<pair<size_t,size_t> >& tiNonZero, const matrix<Complex>& X, const compressed_matrix<double>& tj, const vector<pair<size_t,size_t> >& tjNonZero, matrix<Complex>& result, double factor) const { for ( vector<pair<size_t,size_t> >::const_iterator i = tiNonZero.begin(); i != tiNonZero.end(); ++i ) for ( vector<pair<size_t,size_t> >::const_iterator j = tjNonZero.begin(); j != tjNonZero.end(); ++j ) { result(i->first,j->first) += factor* ti(i->first,i->second)*tj(j->first,j->second)*X(i->second,j->second); } } string ColourBasis::cfstring(const list<list<pair<int,bool> > >& flow) { ostringstream out(""); for ( list<list<pair<int,bool> > >::const_iterator line = flow.begin(); line != flow.end(); ++line ) { for ( list<pair<int,bool> >::const_iterator node = line->begin(); node != line->end(); ++node ) { out << (node->second ? "-" : "") << (node->first+1) << " "; } if ( line != --(flow.end()) ) out << ", "; } return out.str(); } vector<string> ColourBasis::makeFlows(Ptr<Tree2toNDiagram>::tcptr diag, size_t dim) const { vector<string> res(dim); list<list<list<pair<int,bool> > > > fdata = colourFlows(diag); cPDVector ext; tcPDVector dext = diag->external(); copy(dext.begin(),dext.end(),back_inserter(ext)); vector<PDT::Colour> colouredLegs = normalOrder(projectColour(ext)); for ( list<list<list<pair<int,bool> > > >::const_iterator flow = fdata.begin(); flow != fdata.end(); ++flow ) { for ( size_t i = 0; i < dim; ++i ) { bool matches = true; for ( list<list<pair<int,bool> > >::const_iterator line = flow->begin(); line != flow->end(); ++line ) { pair<int,bool> front(diag->externalId(line->front().first),line->front().second); if ( front.first < 2 ) front.second = !front.second; pair<int,bool> back(diag->externalId(line->back().first),line->back().second); if ( back.first < 2 ) back.second = !back.second; if ( !colourConnected(ext,colouredLegs,front,back,i) ) { matches = false; break; } } if ( matches ) { assert(res[i] == "" && "only support colour bases with unique mapping to large-N colour flows"); res[i] = cfstring(*flow); } } } bool gotone = false; for ( vector<string>::const_iterator f = res.begin(); f != res.end(); ++f ) { if ( *f != "" ) { gotone = true; break; } } if ( !gotone ) { generator()->log() << "warning no color flow found for diagram\n"; DiagramDrawer::drawDiag(generator()->log(),*diag); } return res; } size_t ColourBasis::prepare(const MEBase::DiagramVector& diags, bool noCorrelations) { size_t dim = 0; for ( MEBase::DiagramVector::const_iterator d = diags.begin(); d != diags.end(); ++d ) { Ptr<Tree2toNDiagram>::tcptr dd = dynamic_ptr_cast<Ptr<Tree2toNDiagram>::ptr>(*d); assert(dd); dim = prepare(dd->partons(),noCorrelations); if ( !haveColourFlows() || theFlowMap.find(dd) != theFlowMap.end() ) continue; theFlowMap[dd] = makeFlows(dd,dim); } return dim; } bool matchEnd(int a, pair<int,bool> b, Ptr<Tree2toNDiagram>::tcptr diag) { if ( a != b.first ) return false; if ( b.first != diag->nSpace()-1 ) { return !b.second ? diag->allPartons()[b.first]->hasColour() : diag->allPartons()[b.first]->hasAntiColour(); } else { return !b.second ? diag->allPartons()[b.first]->hasAntiColour() : diag->allPartons()[b.first]->hasColour(); } return false; } bool findPath(pair<int,bool> a, pair<int,bool> b, Ptr<Tree2toNDiagram>::tcptr diag, list<pair<int,bool> >& path, bool backward) { assert(a.first==0 ? !backward : true); if ( path.empty() ) path.push_back(a); if ( !backward ) { if ( diag->children(a.first).first == -1 ) return matchEnd(a.first,b,diag); pair<int,int> children = diag->children(a.first); bool cc = (children.first == diag->nSpace()-1); if ( diag->allPartons()[children.first]->coloured() ) if ( !cc ? (!a.second ? diag->allPartons()[children.first]->hasColour() : diag->allPartons()[children.first]->hasAntiColour()) : (!a.second ? diag->allPartons()[children.first]->hasAntiColour() : diag->allPartons()[children.first]->hasColour()) ) { pair<int,bool> next(children.first,a.second); path.push_back(next); if ( !findPath(next,b,diag,path,false) ) { path.pop_back(); } else return true; } cc = (children.second == diag->nSpace()-1); if ( diag->allPartons()[children.second]->coloured() ) if ( !cc ? (!a.second ? diag->allPartons()[children.second]->hasColour() : diag->allPartons()[children.second]->hasAntiColour()) : (!a.second ? diag->allPartons()[children.second]->hasAntiColour() : diag->allPartons()[children.second]->hasColour()) ) { pair<int,bool> next(children.second,a.second); path.push_back(next); if ( !findPath(next,b,diag,path,false) ) { path.pop_back(); } else return true; } if ( path.size() == 1 ) path.pop_back(); return false; } else { int parent = diag->parent(a.first); pair<int,int> neighbours = diag->children(parent); int neighbour = a.first == neighbours.first ? neighbours.second : neighbours.first; if ( matchEnd(parent,b,diag) ) { path.push_back(b); return true; } if ( matchEnd(neighbour,b,diag) ) { path.push_back(b); return true; } if ( diag->allPartons()[neighbour]->coloured() ) if ( a.second ? diag->allPartons()[neighbour]->hasColour() : diag->allPartons()[neighbour]->hasAntiColour() ) { pair<int,bool> next(neighbour,!a.second); path.push_back(next); if ( !findPath(next,b,diag,path,false) ) { path.pop_back(); } else return true; } if ( parent == 0 ) { if ( path.size() == 1 ) path.pop_back(); return false; } if ( diag->allPartons()[parent]->coloured() ) if ( !a.second ? diag->allPartons()[parent]->hasColour() : diag->allPartons()[parent]->hasAntiColour() ) { pair<int,bool> next(parent,a.second); path.push_back(next); if ( !findPath(next,b,diag,path,true) ) { path.pop_back(); } else return true; } if ( path.size() == 1 ) path.pop_back(); return false; } return false; } list<pair<int,bool> > ColourBasis::colouredPath(pair<int,bool> a, pair<int,bool> b, Ptr<Tree2toNDiagram>::tcptr diag) { list<pair<int,bool> > res; if ( a.first == b.first ) return res; bool aIn = (a.first < 2); bool bIn = (b.first < 2); if ( (aIn && bIn) || (!aIn && !bIn) ) if ( (a.second && b.second) || (!a.second && !b.second) ) return res; if ( (aIn && !bIn) || (!aIn && bIn) ) if ( (!a.second && b.second) || (a.second && !b.second) ) return res; if ( a.first > b.first ) swap(a,b); a.first = diag->diagramId(a.first); b.first = diag->diagramId(b.first); if ( a.first == diag->nSpace()-1 ) a.second = !a.second; if ( b.first == diag->nSpace()-1 ) b.second = !b.second; if ( !findPath(a,b,diag,res,a.first != 0) ) return res; if ( b.first == diag->nSpace()-1 ) { res.back().second = !res.back().second; } if ( a.first == diag->nSpace()-1 ) { res.front().second = !res.front().second; } return res; } list<list<list<pair<int,bool> > > > ColourBasis::colourFlows(Ptr<Tree2toNDiagram>::tcptr diag) { vector<pair<int,bool> > connectSource; vector<pair<int,bool> > connectSink; for ( size_t i = 0; i != diag->partons().size(); ++i ) { if ( i < 2 && diag->partons()[i]->hasAntiColour() ) connectSource.push_back(make_pair(i,true)); if ( i < 2 && diag->partons()[i]->hasColour() ) connectSink.push_back(make_pair(i,false)); if ( i > 1 && diag->partons()[i]->hasColour() ) connectSource.push_back(make_pair(i,false)); if ( i > 1 && diag->partons()[i]->hasAntiColour() ) connectSink.push_back(make_pair(i,true)); } assert(connectSource.size() == connectSink.size()); list<list<list<pair<int,bool> > > > ret; do { vector<pair<int,bool> >::iterator source = connectSource.begin(); vector<pair<int,bool> >::iterator sink = connectSink.begin(); list<list<pair<int,bool> > > res; for ( ; source != connectSource.end(); ++source, ++sink ) { if ( source->first == sink->first ) { res.clear(); break; } list<pair<int,bool> > line = colouredPath(*source,*sink,diag); if ( line.empty() ) { res.clear(); break; } res.push_back(line); } if ( !res.empty() ) { // check, if all dressed properly vector<pair<int,int> > dressed((*diag).allPartons().size(),make_pair(0,0)); for ( size_t p = 0; p < diag->allPartons().size(); ++p ) { if ( diag->allPartons()[p]->hasColour() && !diag->allPartons()[p]->hasAntiColour() ) dressed[p].first = 1; if ( diag->allPartons()[p]->hasAntiColour() && !diag->allPartons()[p]->hasColour() ) dressed[p].second = 1; if ( diag->allPartons()[p]->hasAntiColour() && diag->allPartons()[p]->hasColour() ) { dressed[p].first = 1; dressed[p].second = 1; } } for ( list<list<pair<int,bool> > >::const_iterator l = res.begin(); l != res.end(); ++l ) { for ( list<pair<int,bool> >::const_iterator n = l->begin(); n != l->end(); ++n ) { if ( !(n->second) ) dressed[n->first].first -= 1; else dressed[n->first].second -= 1; } } for ( vector<pair<int,int> >::const_iterator d = dressed.begin(); d != dressed.end(); ++d ) { if ( d->first != 0 || d->second != 0 ) { res.clear(); break; } } if ( !res.empty() ) ret.push_back(res); } } while ( std::next_permutation(connectSink.begin(),connectSink.end()) ); return ret; } void ColourBasis::updateColourLines(Ptr<Tree2toNDiagram>::tcptr dd) { map<Ptr<Tree2toNDiagram>::tcptr,vector<string> >::const_iterator cl = theFlowMap.find(dd); assert(cl != theFlowMap.end()); vector<ColourLines*> clines(cl->second.size()); for ( size_t k = 0; k < cl->second.size(); ++k ) { if ( cl->second[k] == "" ) { clines[k] = 0; continue; } clines[k] = new ColourLines(cl->second[k]); } theColourLineMap[cl->first] = clines; } map<Ptr<Tree2toNDiagram>::tcptr,vector<ColourLines*> >& ColourBasis::colourLineMap() { if ( !theColourLineMap.empty() ) return theColourLineMap; for ( map<Ptr<Tree2toNDiagram>::tcptr,vector<string> >::const_iterator cl = theFlowMap.begin(); cl != theFlowMap.end(); ++cl ) { vector<ColourLines*> clines(cl->second.size()); for ( size_t k = 0; k < cl->second.size(); ++k ) { if ( cl->second[k] == "" ) { clines[k] = 0; continue; } clines[k] = new ColourLines(cl->second[k]); } theColourLineMap[cl->first] = clines; } return theColourLineMap; } Selector<const ColourLines *> ColourBasis::colourGeometries(tcDiagPtr diag, const map<vector<int>,CVector>& amps) { Ptr<Tree2toNDiagram>::tcptr dd = dynamic_ptr_cast<Ptr<Tree2toNDiagram>::tcptr>(diag); assert(dd && theFlowMap.find(dd) != theFlowMap.end()); map<Ptr<Tree2toNDiagram>::tcptr,vector<ColourLines*> >::const_iterator colit = colourLineMap().find(dd); if ( colit == colourLineMap().end() ) { updateColourLines(dd); colit = colourLineMap().find(dd); } const vector<ColourLines*>& cl = colit->second; Selector<const ColourLines *> sel; size_t dim = amps.begin()->second.size(); assert(dim == cl.size()); double w = 0.; for ( size_t i = 0; i < dim; ++i ) { if ( !cl[i] ) continue; w = 0.; for ( map<vector<int>,CVector>::const_iterator a = amps.begin(); a != amps.end(); ++a ) w += real(conj((a->second)(i))*((a->second)(i))); if ( w > 0. ) sel.insert(w,cl[i]); } assert(!sel.empty()); return sel; } size_t ColourBasis::tensorIdFromFlow(tcDiagPtr diag, const ColourLines * flow) { Ptr<Tree2toNDiagram>::tcptr dd = dynamic_ptr_cast<Ptr<Tree2toNDiagram>::tcptr>(diag); assert(dd && theFlowMap.find(dd) != theFlowMap.end()); map<Ptr<Tree2toNDiagram>::tcptr,vector<ColourLines*> >::const_iterator colit = colourLineMap().find(dd); if ( colit == colourLineMap().end() ) { updateColourLines(dd); colit = colourLineMap().find(dd); } const vector<ColourLines*>& cl = colit->second; size_t res = 0; for ( ; res < cl.size(); ++res ) { if ( flow == cl[res] ) break; } assert(res < cl.size()); return res; } const symmetric_matrix<double,upper>& ColourBasis::scalarProducts(const cPDVector& sub) const { map<cPDVector,vector<PDT::Colour> >::const_iterator lit = theNormalOrderedLegs.find(sub); assert(lit != theNormalOrderedLegs.end()); ScalarProductMap::const_iterator spit = theScalarProducts.find(lit->second); assert(spit != theScalarProducts.end()); return spit->second; } const compressed_matrix<double>& ColourBasis::charge(const cPDVector& sub, size_t iIn) const { map<cPDVector,vector<PDT::Colour> >::const_iterator lit = theNormalOrderedLegs.find(sub); assert(lit != theNormalOrderedLegs.end()); ChargeMap::const_iterator ct = theCharges.find(lit->second); assert(ct != theCharges.end()); map<cPDVector,map<size_t,size_t> >::const_iterator trans = theIndexMap.find(sub); assert(trans != theIndexMap.end()); size_t i = trans->second.find(iIn)->second; map<size_t,compressed_matrix<double> >::const_iterator cit = ct->second.find(i); assert(cit != ct->second.end()); return cit->second; } const vector<pair<size_t,size_t> >& ColourBasis::chargeNonZero(const cPDVector& sub, size_t iIn) const { map<cPDVector,vector<PDT::Colour> >::const_iterator lit = theNormalOrderedLegs.find(sub); assert(lit != theNormalOrderedLegs.end()); ChargeNonZeroMap::const_iterator ct = theChargeNonZeros.find(lit->second); assert(ct != theChargeNonZeros.end()); map<cPDVector,map<size_t,size_t> >::const_iterator trans = theIndexMap.find(sub); assert(trans != theIndexMap.end()); size_t i = trans->second.find(iIn)->second; map<size_t,vector<pair<size_t,size_t> > >::const_iterator cit = ct->second.find(i); assert(cit != ct->second.end()); return cit->second; } const symmetric_matrix<double,upper>& ColourBasis::correlator(const cPDVector& sub, const pair<size_t,size_t>& ijIn) const { map<cPDVector,vector<PDT::Colour> >::const_iterator lit = theNormalOrderedLegs.find(sub); assert(lit != theNormalOrderedLegs.end()); CorrelatorMap::const_iterator cit = theCorrelators.find(lit->second); assert(cit != theCorrelators.end()); map<cPDVector,map<size_t,size_t> >::const_iterator trans = theIndexMap.find(sub); assert(trans != theIndexMap.end()); pair<size_t,size_t> ij(trans->second.find(ijIn.first)->second, trans->second.find(ijIn.second)->second); if ( ij.first > ij.second ) swap(ij.first,ij.second); map<pair<size_t,size_t>,symmetric_matrix<double,upper> >::const_iterator cijit = cit->second.find(ij); assert(cijit != cit->second.end()); return cijit->second; } double ColourBasis::me2(const cPDVector& sub, const map<vector<int>,CVector>& amps) const { const symmetric_matrix<double,upper>& sp = scalarProducts(sub); double res = 0.; for ( map<vector<int>,CVector>::const_iterator a = amps.begin(); a != amps.end(); ++a ) { res += real(inner_prod(boost::numeric::ublas::conj(a->second),prod(sp,a->second))); } return res; } double ColourBasis::interference(const cPDVector& sub, const map<vector<int>,CVector>& amps1, const map<vector<int>,CVector>& amps2) const { const symmetric_matrix<double,upper>& sp = scalarProducts(sub); double res = 0.; map<vector<int>,CVector>::const_iterator a = amps1.begin(); map<vector<int>,CVector>::const_iterator b = amps2.begin(); for ( ; a != amps1.end(); ++a, ++b ) { assert(a->first == b->first); res += 2.*real(inner_prod(boost::numeric::ublas::conj(a->second),prod(sp,b->second))); } - assert(!isnan(res)); + assert(!std::isnan(res)); return res; } double ColourBasis::colourCorrelatedME2(const pair<size_t,size_t>& ij, const cPDVector& sub, const map<vector<int>,CVector>& amps) const { const symmetric_matrix<double,upper>& cij = correlator(sub,ij); double res = 0.; for ( map<vector<int>,CVector>::const_iterator a = amps.begin(); a != amps.end(); ++a ) { res += real(inner_prod(boost::numeric::ublas::conj(a->second),prod(cij,a->second))); } return res; } Complex ColourBasis::interference(const cPDVector& sub, const CVector& left, const CVector& right) const { const symmetric_matrix<double,upper>& sp = scalarProducts(sub); return inner_prod(boost::numeric::ublas::conj(left),prod(sp,right)); } Complex ColourBasis::colourCorrelatedInterference(const pair<size_t,size_t>& ij, const cPDVector& sub, const CVector& left, const CVector& right) const { const symmetric_matrix<double,upper>& cij = correlator(sub,ij); return inner_prod(boost::numeric::ublas::conj(left),prod(cij,right)); } double ColourBasis::me2(const cPDVector& sub, const matrix<Complex>& amp) const { const symmetric_matrix<double,upper>& sp = scalarProducts(sub); double tr = 0; size_t n = amp.size1(); for ( size_t i = 0; i < n; ++i ) { tr += real(inner_prod(row(sp,i),column(amp,i))); } return tr; } double ColourBasis::colourCorrelatedME2(const pair<size_t,size_t>& ij, const cPDVector& sub, const matrix<Complex>& amp) const { const symmetric_matrix<double,upper>& cij = correlator(sub,ij); double tr = 0; size_t n = amp.size1(); for ( size_t i = 0; i < n; ++i ) { tr += real(inner_prod(row(cij,i),column(amp,i))); } return tr; } struct pickColour { PDT::Colour operator()(tcPDPtr p) const { return p->iColour(); } }; vector<PDT::Colour> ColourBasis::projectColour(const cPDVector& sub) const { vector<PDT::Colour> res(sub.size()); transform(sub.begin(),sub.end(),res.begin(),pickColour()); return res; } vector<PDT::Colour> ColourBasis::normalOrder(const vector<PDT::Colour>& legs) const { vector<PDT::Colour> crosslegs = legs; if ( crosslegs[0] == PDT::Colour3 ) crosslegs[0] = PDT::Colour3bar; else if ( crosslegs[0] == PDT::Colour3bar ) crosslegs[0] = PDT::Colour3; if ( crosslegs[1] == PDT::Colour3 ) crosslegs[1] = PDT::Colour3bar; else if ( crosslegs[1] == PDT::Colour3bar ) crosslegs[1] = PDT::Colour3; int n3 = count_if(crosslegs.begin(),crosslegs.end(),matchRep(PDT::Colour3)); int n8 = count_if(crosslegs.begin(),crosslegs.end(),matchRep(PDT::Colour8)); vector<PDT::Colour> ordered(2*n3+n8,PDT::Colour8); int i = 0; while ( i < 2*n3 ) { ordered[i] = PDT::Colour3; ordered[i+1] = PDT::Colour3bar; i+=2; } return ordered; } string ColourBasis::file(const vector<PDT::Colour>& sub) const { string res = name() + "-"; for ( vector<PDT::Colour>::const_iterator lit = sub.begin(); lit != sub.end(); ++lit ) { if ( *lit == PDT::Colour3 ) res += "3"; if ( *lit == PDT::Colour3bar ) res += "3bar"; if ( *lit == PDT::Colour8 ) res += "8"; } if ( largeN() ) res += "largeN"; return res; } void ColourBasis::writeBasis(const string& prefix) const { if ( didWrite ) return; set<vector<PDT::Colour> > legs; for ( map<cPDVector,vector<PDT::Colour> >::const_iterator lit = theNormalOrderedLegs.begin(); lit != theNormalOrderedLegs.end(); ++lit ) { legs.insert(lit->second); } string searchPath = theSearchPath; if ( searchPath != "" ) if ( *(--searchPath.end()) != '/' ) searchPath += "/"; for ( set<vector<PDT::Colour> >::const_iterator known = legs.begin(); known != legs.end(); ++known ) { string fname = searchPath + prefix + file(*known) + ".cdat"; ifstream check(fname.c_str()); if ( check ) continue; ofstream out(fname.c_str()); if ( !out ) throw Exception() << "ColourBasis: Failed to open " << fname << " for storing colour basis information." << Exception::runerror; out << setprecision(18); const symmetric_matrix<double,upper>& sp = theScalarProducts.find(*known)->second; write(sp,out); if ( theCharges.find(*known) != theCharges.end() ) { out << "#charges\n"; const map<size_t,compressed_matrix<double> >& tm = theCharges.find(*known)->second; const map<size_t,vector<pair<size_t,size_t> > >& tc = theChargeNonZeros.find(*known)->second; map<size_t,vector<pair<size_t,size_t> > >::const_iterator kc = tc.begin(); for ( map<size_t,compressed_matrix<double> >::const_iterator k = tm.begin(); k != tm.end(); ++k, ++kc ) { out << k->first << "\n"; write(k->second,out,kc->second); } const map<pair<size_t,size_t>,symmetric_matrix<double,upper> >& cm = theCorrelators.find(*known)->second; for ( map<pair<size_t,size_t>,symmetric_matrix<double,upper> >::const_iterator k = cm.begin(); k != cm.end(); ++k ) { out << k->first.first << "\n" << k->first.second << "\n"; write(k->second,out); } } else { out << "#nocharges\n"; } out << flush; } didWrite = true; } bool ColourBasis::readBasis(const vector<PDT::Colour>& legs) { string searchPath = theSearchPath; if ( searchPath != "" ) if ( *(--searchPath.end()) != '/' ) searchPath += "/"; string fname = searchPath + file(legs) + ".cdat"; ifstream in(fname.c_str()); if ( !in ) return false; read(theScalarProducts[legs],in); string tag; in >> tag; if ( tag != "#nocharges" ) { for ( size_t k = 0; k < legs.size(); ++k ) { size_t i; in >> i; read(theCharges[legs][i],in,theChargeNonZeros[legs][i]); } for ( size_t k = 0; k < legs.size()*(legs.size()-1)/2; ++k ) { size_t i,j; in >> i >> j; read(theCorrelators[legs][make_pair(i,j)],in); } } readBasisDetails(legs); return true; } void ColourBasis::readBasis() { if ( didRead ) return; string searchPath = theSearchPath; if ( searchPath != "" ) if ( *(--searchPath.end()) != '/' ) searchPath += "/"; set<vector<PDT::Colour> > legs; for ( map<cPDVector,vector<PDT::Colour> >::const_iterator lit = theNormalOrderedLegs.begin(); lit != theNormalOrderedLegs.end(); ++lit ) legs.insert(lit->second); for ( set<vector<PDT::Colour> >::const_iterator known = legs.begin(); known != legs.end(); ++known ) { if ( theScalarProducts.find(*known) != theScalarProducts.end() ) continue; string fname = searchPath + file(*known) + ".cdat"; if ( !readBasis(*known) ) throw Exception() << "ColourBasis: Failed to open " << fname << " for reading colour basis information." << Exception::runerror; } didRead = true; } void ColourBasis::write(const symmetric_matrix<double,upper>& m, ostream& os) const { os << m.size1() << "\n"; for ( size_t i = 0; i < m.size1(); ++i ) for ( size_t j = i; j < m.size1(); ++j ) os << m(i,j) << "\n"; os << flush; } void ColourBasis::read(symmetric_matrix<double,upper>& m, istream& is) { size_t s; is >> s; m.resize(s); for ( size_t i = 0; i < m.size1(); ++i ) for ( size_t j = i; j < m.size1(); ++j ) is >> m(i,j); } void ColourBasis::write(const compressed_matrix<double>& m, ostream& os, const vector<pair<size_t,size_t> >& nonZeros) const { os << nonZeros.size() << "\n" << m.size1() << "\n" << m.size2() << "\n"; for ( vector<pair<size_t,size_t> >::const_iterator nz = nonZeros.begin(); nz != nonZeros.end(); ++nz ) os << nz->first << "\n" << nz->second << "\n" << m(nz->first,nz->second) << "\n"; os << flush; } void ColourBasis::read(compressed_matrix<double>& m, istream& is, vector<pair<size_t,size_t> >& nonZeros) { size_t nonZero, size1, size2; is >> nonZero >> size1 >> size2; nonZeros.resize(nonZero); m = compressed_matrix<double>(size1,size2,nonZero); for ( size_t k = 0; k < nonZero; ++k ) { size_t i,j; double val; is >> i >> j >> val; nonZeros[k] = make_pair(i,j); m(i,j) = val; } } void ColourBasis::doinit() { HandlerBase::doinit(); if ( theSearchPath.empty() && factory() ) theSearchPath = factory()->buildStorage(); readBasis(); } void ColourBasis::dofinish() { HandlerBase::dofinish(); writeBasis(); } void ColourBasis::doinitrun() { HandlerBase::doinitrun(); if ( theSearchPath.empty() && factory() ) theSearchPath = factory()->buildStorage(); readBasis(); } void ColourBasis::persistentOutput(PersistentOStream & os) const { os << theLargeN << theNormalOrderedLegs << theIndexMap << theFlowMap << theOrderingStringIdentifiers << theOrderingIdentifiers << theFactory << theSearchPath; writeBasis(); } void ColourBasis::persistentInput(PersistentIStream & is, int) { is >> theLargeN >> theNormalOrderedLegs >> theIndexMap >> theFlowMap >> theOrderingStringIdentifiers >> theOrderingIdentifiers >> theFactory >> theSearchPath; } // *** Attention *** The following static variable is needed for the type // description system in ThePEG. Please check that the template arguments // are correct (the class and its base class), and that the constructor // arguments are correct (the class name and the name of the dynamically // loadable library where the class implementation can be found). DescribeAbstractClass<ColourBasis,HandlerBase> describeColourBasis("Herwig::ColourBasis", "Herwig.so"); void ColourBasis::Init() { static ClassDocumentation<ColourBasis> documentation ("ColourBasis is an interface to a colour basis " "implementation."); static Switch<ColourBasis,bool> interfaceLargeN ("LargeN", "Switch on or off large-N evaluation.", &ColourBasis::theLargeN, false, false, false); static SwitchOption interfaceLargeNOn (interfaceLargeN, "On", "Work in N=infinity", true); static SwitchOption interfaceLargeNOff (interfaceLargeN, "Off", "Work in N=3", false); } diff --git a/MatrixElement/Powheg/MEPP2VVPowheg.cc b/MatrixElement/Powheg/MEPP2VVPowheg.cc --- a/MatrixElement/Powheg/MEPP2VVPowheg.cc +++ b/MatrixElement/Powheg/MEPP2VVPowheg.cc @@ -1,5278 +1,5278 @@ // -*- C++ -*- // // MEPP2VVPowheg.cc is a part of Herwig - A multi-purpose Monte Carlo event generator // Copyright (C) 2002-2011 The Herwig Collaboration // // Herwig is licenced under version 2 of the GPL, see COPYING for details. // Please respect the MCnet academic guidelines, see GUIDELINES for details. // // // This is the implementation of the non-inlined, non-templated member // functions of the MEPP2VVPowheg class. // #include "MEPP2VVPowheg.h" #include "ThePEG/Interface/Switch.h" #include "ThePEG/Interface/Reference.h" #include "ThePEG/Interface/Parameter.h" #include "ThePEG/Interface/ClassDocumentation.h" #include "ThePEG/Persistency/PersistentOStream.h" #include "ThePEG/Persistency/PersistentIStream.h" #include "ThePEG/PDT/EnumParticles.h" #include "ThePEG/MatrixElement/Tree2toNDiagram.h" #include "ThePEG/Handlers/StandardXComb.h" #include "Herwig/Models/StandardModel/StandardModel.h" #include "Herwig/MatrixElement/HardVertex.h" #include "Herwig/Decay/GeneralDecayMatrixElement.h" #include "Herwig/Shower/QTilde/Base/ShowerProgenitor.h" #include "Herwig/Shower/QTilde/Base/Branching.h" #include "Herwig/Shower/RealEmissionProcess.h" using namespace Herwig; MEPP2VVPowheg::MEPP2VVPowheg() : tiny(1.e-10), CF_(4./3.), TR_(0.5), NC_(3.), contrib_(1), channels_(0), nlo_alphaS_opt_(0) , fixed_alphaS_(0.1180346226), removebr_(1), scaleopt_(1), mu_F_(100.*GeV), mu_UV_(100.*GeV), ckm_(3,vector<Complex>(3,0.0)), helicityConservation_(true), realMESpinCorrelations_(true), power_(2.0), preqqbar_(3.7),preqg_(16.0),pregqbar_(11.0), b0_((11.-2./3.*5.)/4./Constants::pi), LambdaQCD_(91.118*GeV*exp(-1./2./((11.-2./3.*5.)/4./Constants::pi)/0.118)), min_pT_(2.*GeV){ massOption(vector<unsigned int>(2,1)); } void MEPP2VVPowheg::persistentOutput(PersistentOStream & os) const { os << contrib_ << channels_ << nlo_alphaS_opt_ << fixed_alphaS_ << removebr_ << scaleopt_ << ounit(mu_F_,GeV) << ounit(mu_UV_,GeV) << ckm_ << helicityConservation_ << FFPvertex_ << FFWvertex_ << FFZvertex_ << WWWvertex_ << FFGvertex_ << realMESpinCorrelations_ << showerAlphaS_ << power_ << preqqbar_ << preqg_ << pregqbar_ << prefactor_ << b0_ << ounit(LambdaQCD_,GeV) << ounit( min_pT_,GeV ); } void MEPP2VVPowheg::persistentInput(PersistentIStream & is, int) { is >> contrib_ >> channels_ >> nlo_alphaS_opt_ >> fixed_alphaS_ >> removebr_ >> scaleopt_ >> iunit(mu_F_,GeV) >> iunit(mu_UV_,GeV) >> ckm_ >> helicityConservation_ >> FFPvertex_ >> FFWvertex_ >> FFZvertex_ >> WWWvertex_ >> FFGvertex_ >> realMESpinCorrelations_ >> showerAlphaS_ >> power_ >> preqqbar_ >> preqg_ >> pregqbar_ >> prefactor_ >> b0_ >> iunit(LambdaQCD_,GeV) >> iunit( min_pT_, GeV ); } ClassDescription<MEPP2VVPowheg> MEPP2VVPowheg::initMEPP2VVPowheg; // Definition of the static class description member. void MEPP2VVPowheg::Init() { static ClassDocumentation<MEPP2VVPowheg> documentation ("The MEPP2VVPowheg class implements the NLO matrix elements for the production of" "pairs of electroweak vector bosons.", "The calcultaion of $W^+W^-$, $W^\\pm Z^0$ and $Z^0Z^0$ production" " in hadron collisions at next-to-leading order in the POWHEG scheme" " is described in \\cite{Hamilton:2010mb}.", "\\bibitem{Hamilton:2010mb}\n" " K.~Hamilton,\n" "%``A positive-weight next-to-leading order simulation of weak boson pair\n" "%production,''\n" "JHEP {\bf 1101} (2011) 009\n" "[arXiv:1009.5391 [hep-ph]].\n" "%%CITATION = JHEPA,1101,009;%%\n"); static Switch<MEPP2VVPowheg,unsigned int> interfaceContribution ("Contribution", "Which contributions to the cross section to include", &MEPP2VVPowheg::contrib_, 1, false, false); static SwitchOption interfaceContributionLeadingOrder (interfaceContribution, "LeadingOrder", "Just generate the leading order cross section", 0); static SwitchOption interfaceContributionPositiveNLO (interfaceContribution, "PositiveNLO", "Generate the positive contribution to the full NLO cross section", 1); static SwitchOption interfaceContributionNegativeNLO (interfaceContribution, "NegativeNLO", "Generate the negative contribution to the full NLO cross section", 2); static Switch<MEPP2VVPowheg,unsigned int> interfaceChannels ("Channels", "Which channels to include in the cross section", &MEPP2VVPowheg::channels_, 0, false, false); static SwitchOption interfaceChannelsAll (interfaceChannels, "All", "All channels required for the full NLO cross section: qqb, qg, gqb", 0); static SwitchOption interfaceChannelsAnnihilation (interfaceChannels, "Annihilation", "Only include the qqb annihilation channel, omitting qg and gqb channels", 1); static SwitchOption interfaceChannelsCompton (interfaceChannels, "Compton", "Only include the qg and gqb compton channels, omitting all qqb processes", 2); static Switch<MEPP2VVPowheg,unsigned int> interfaceNLOalphaSopt ("NLOalphaSopt", "An option allowing you to supply a fixed value of alpha_S " "through the FixedNLOAlphaS interface.", &MEPP2VVPowheg::nlo_alphaS_opt_, 0, false, false); static SwitchOption interfaceNLOalphaSoptRunningAlphaS (interfaceNLOalphaSopt, "RunningAlphaS", "Use the usual running QCD coupling evaluated at scale mu_UV2()", 0); static SwitchOption interfaceNLOalphaSoptFixedAlphaS (interfaceNLOalphaSopt, "FixedAlphaS", "Use a constant QCD coupling for comparison/debugging purposes", 1); static Parameter<MEPP2VVPowheg,double> interfaceFixedNLOalphaS ("FixedNLOalphaS", "The value of alphaS to use for the nlo weight if nlo_alphaS_opt_=1", &MEPP2VVPowheg::fixed_alphaS_, 0.1180346226, 0., 1.0, false, false, Interface::limited); static Switch<MEPP2VVPowheg,unsigned int> interfaceremovebr ("removebr", "Whether to multiply the event weights by the MCFM branching ratios", &MEPP2VVPowheg::removebr_, 1, false, false); static SwitchOption interfaceProductionCrossSection (interfaceremovebr, "true", "Do not multiply in the branching ratios (default running)", 1); static SwitchOption interfaceIncludeBRs (interfaceremovebr, "false", "Multiply by MCFM branching ratios for comparison/debugging purposes", 0); static Switch<MEPP2VVPowheg,unsigned int> interfaceScaleOption ("ScaleOption", "Option for running / fixing EW and QCD factorization & renormalization scales", &MEPP2VVPowheg::scaleopt_, 1, false, false); static SwitchOption interfaceDynamic (interfaceScaleOption, "Dynamic", "QCD factorization & renormalization scales are sqr(pV1+pV2). " "EW scale is (mV1^2+mV2^2)/2 (similar to MCatNLO)", 1); static SwitchOption interfaceFixed (interfaceScaleOption, "Fixed", "QCD factorization fixed to value by FactorizationScaleValue." "EW and QCD renormalization scales fixed by RenormalizationScaleValue.", 2); static Parameter<MEPP2VVPowheg,Energy> interfaceFactorizationScaleValue ("FactorizationScaleValue", "Value to use for the QCD factorization scale if fixed scales" "have been requested with the ScaleOption interface.", &MEPP2VVPowheg::mu_F_, GeV, 100.0*GeV, 50.0*GeV, 500.0*GeV, true, false, Interface::limited); static Parameter<MEPP2VVPowheg,Energy> interfaceRenormalizationScaleValue ("RenormalizationScaleValue", "Value to use for the EW and QCD renormalization scales if fixed " "scales have been requested with the ScaleOption interface.", &MEPP2VVPowheg::mu_UV_, GeV, 100.0*GeV, 50.0*GeV, 500.0*GeV, true, false, Interface::limited); static Switch<MEPP2VVPowheg,bool> interfaceSpinCorrelations ("SpinCorrelations", "Flag to select leading order spin correlations or a " "calculation taking into account the real NLO effects", &MEPP2VVPowheg::realMESpinCorrelations_, 1, false, false); static SwitchOption interfaceSpinCorrelationsLeadingOrder (interfaceSpinCorrelations, "LeadingOrder", "Decay bosons using a leading order 2->2 calculation of the " "production spin density matrix", 0); static SwitchOption interfaceSpinCorrelationsRealNLO (interfaceSpinCorrelations, "RealNLO", "Decay bosons using a production spin density matrix which " "takes into account the effects of real radiation", 1); static Reference<MEPP2VVPowheg,ShowerAlpha> interfaceCoupling ("Coupling", "The object calculating the strong coupling constant", &MEPP2VVPowheg::showerAlphaS_, false, false, true, false, false); static Parameter<MEPP2VVPowheg,double> interfacePower ("Power", "The power for the sampling of the matrix elements", &MEPP2VVPowheg::power_, 2.0, 1.0, 10.0, false, false, Interface::limited); static Parameter<MEPP2VVPowheg,double> interfacePrefactorqqbar ("Prefactorqqbar", "The prefactor for the sampling of the q qbar channel", &MEPP2VVPowheg::preqqbar_, 5.0, 0.0, 1000.0, false, false, Interface::limited); static Parameter<MEPP2VVPowheg,double> interfacePrefactorqg ("Prefactorqg", "The prefactor for the sampling of the q g channel", &MEPP2VVPowheg::preqg_, 3.0, 0.0, 1000.0, false, false, Interface::limited); static Parameter<MEPP2VVPowheg,double> interfacePrefactorgqbar ("Prefactorgqbar", "The prefactor for the sampling of the g qbar channel", &MEPP2VVPowheg::pregqbar_, 3.0, 0.0, 1000.0, false, false, Interface::limited); static Parameter<MEPP2VVPowheg, Energy> interfacepTMin ("minPt", "The pT cut on hardest emision generation" "2*(1-Beta)*exp(-sqr(intrinsicpT/RMS))/sqr(RMS)", &MEPP2VVPowheg::min_pT_, GeV, 2.*GeV, ZERO, 100000.0*GeV, false, false, Interface::limited); } Energy2 MEPP2VVPowheg::scale() const { // N.B. This scale is the electroweak scale! // It is used in the evaluation of the LO code // in the MEPP2VV base class. This means it // should appear in the denominator of the // NLOweight here and all other LO parts like // the function for the lumi ratio (Lhat). It // should also be used for evaluating any EW // parameters / vertices in the numerator. // The scaleopt_ == 1 "running" option is // chosen to be like the MC@NLO one (it ought // to be more like sHat instead?). return scaleopt_ == 1 ? // 0.5*(meMomenta()[2].m2()+meMomenta()[3].m2()) : sqr(mu_UV_); sHat() : sqr(mu_UV_); } Energy2 MEPP2VVPowheg::mu_F2() const { return scaleopt_ == 1 ? // ((H_.k1r()).m2()+k1r_perp2_lab_+(H_.k2r()).m2()+k2r_perp2_lab_)/2. : sqr(mu_F_); sHat() : sqr(mu_F_); } Energy2 MEPP2VVPowheg::mu_UV2() const { return scaleopt_ == 1 ? // ((H_.k1r()).m2()+k1r_perp2_lab_+(H_.k2r()).m2()+k2r_perp2_lab_)/2. : sqr(mu_UV_); sHat() : sqr(mu_UV_); } void MEPP2VVPowheg::doinit() { MEPP2VV::doinit(); // get the vertices we need // get a pointer to the standard model object in the run static const tcHwSMPtr hwsm = dynamic_ptr_cast<tcHwSMPtr>(standardModel()); if (!hwsm) throw InitException() << "missing hwsm pointer in MEPP2VVPowheg::doinit()" << Exception::abortnow; // get pointers to all required Vertex objects FFPvertex_ = hwsm->vertexFFP(); FFZvertex_ = hwsm->vertexFFZ(); WWWvertex_ = hwsm->vertexWWW(); FFWvertex_ = hwsm->vertexFFW(); FFGvertex_ = hwsm->vertexFFG(); // get the ckm object Ptr<StandardCKM>::pointer theCKM=dynamic_ptr_cast<Ptr<StandardCKM>::pointer>(SM().CKM()); if(!theCKM) throw InitException() << "MEPP2VVPowheg::doinit() " << "the CKM object must be the Herwig one" << Exception::runerror; unsigned int ix,iy; // get the CKM matrix (unsquared for interference) vector< vector<Complex > > CKM(theCKM->getUnsquaredMatrix(SM().families())); for(ix=0;ix<3;++ix){for(iy=0;iy<3;++iy){ckm_[ix][iy]=CKM[ix][iy];}} // insert the different prefactors in the vector for easy look up prefactor_.push_back(preqqbar_); prefactor_.push_back(preqg_); prefactor_.push_back(pregqbar_); } int MEPP2VVPowheg::nDim() const { int output = MEPP2VV::nDim(); // See related comment in MEPP2VVPowheg::generateKinematics! if(contrib_>0) output += 2; return output; } bool MEPP2VVPowheg::generateKinematics(const double * r) { // N.B. A fix was made to make theta2 a radiative // variable in r4532. Originally theta2 was take to // be the azimuthal angle coming from the generation // of the Born kinematics inherited from MEPP2VV i.e. // before the change theta2 was a random number between // 0 and 2pi. On changing theta2 was set to be // theta2 = (*(r+3)) * 2.*Constants::pi; // and nDim returned if(contrib_>0) output += 3; // In the months following it was noticed that agreement // with MCFM was per mille at Tevatron energies but got // close to 1 percent for LHC energies (for all VV // processes). After searching back up the svn branch // running 2M events each time, the change was spotted // to occur on r4532. Changing: // if(contrib_>0) output += 3; // in ::nDim() and also, // xt = (*(r +nDim() -3)); // y = (*(r +nDim() -2)) * 2. - 1.; // theta2 = (*(r +nDim() -1)) * 2.*Constants::pi; // did not fix the problem. The following code gives the // same good level of agreement at LHC and TVT: double xt( -999.); double y( -999.); double theta2( -999.); if(contrib_>0) { // Generate the radiative integration variables: xt = (*(r +nDim() -2)); y = (*(r +nDim() -1)) * 2. - 1.; // KH 19th August - next line changed for phi in 0->pi not 0->2pi // theta2 = UseRandom::rnd() * 2.*Constants::pi; theta2 = UseRandom::rnd() *Constants::pi; } // Continue with lo matrix element code: bool output(MEPP2VV::generateKinematics(r)); // Work out the kinematics for the leading order / virtual process // and also get the leading order luminosity function: getKinematics(xt,y,theta2); return output; } double MEPP2VVPowheg::me2() const { double output(0.0); useMe(); output = MEPP2VV::me2(); double mcfm_brs(1.); if(!removebr_) { switch(MEPP2VV::process()) { case 1: // W+(->e+,nu_e) W-(->e-,nu_ebar) (MCFM: 61 [nproc]) mcfm_brs *= 0.109338816; mcfm_brs *= 0.109338816; break; case 2: // W+/-(mu+,nu_mu / mu-,nu_mubar) Z(nu_e,nu_ebar) // (MCFM: 72+77 [nproc]) mcfm_brs *= 0.109338816; mcfm_brs *= 0.06839002; break; case 3: // Z(mu-,mu+) Z(e-,e+) (MCFM: 86 [nproc]) mcfm_brs *= 0.034616433; mcfm_brs *= 0.034616433; mcfm_brs *= 2.; // as identical particle factor 1/2 is now obsolete. break; case 4: // W+(mu+,nu_mu) Z(nu_e,nu_ebar) (MCFM: 72 [nproc]) mcfm_brs *= 0.109338816; mcfm_brs *= 0.06839002; break; case 5: // W-(mu-,nu_mubar) Z(nu_e,nu_ebar) (MCFM: 77 [nproc]) mcfm_brs *= 0.109338816; mcfm_brs *= 0.06839002; break; } } // Store the value of the leading order squared matrix element: lo_me2_ = output; output *= NLOweight(); output *= mcfm_brs; return output; } void MEPP2VVPowheg::getKinematics(double xt, double y, double theta2) { // In this member we want to get the lo_lumi_ as this is a // common denominator in the NLO weight. We want also the // bornVVKinematics object and all of the realVVKinematics // objects needed for the NLO weight. // Check if the W- is first in W+W- production. Already confirmed // mePartonData()[0] is a quark, and mePartonData()[1] is an antiquark. // We assume mePartonData[2] and mePartonData[3] are, respectively, // W+/- Z, W+/- W-/+, or Z Z. bool wminus_first(false); if((mePartonData()[2]->id()==-24)&&(mePartonData()[3]->id()==24)) wminus_first=true; // Now get all data on the LO process needed for the NLO computation: // The +z hadron in the lab: hadron_A_=dynamic_ptr_cast<Ptr<BeamParticleData>::transient_const_pointer> (lastParticles().first->dataPtr()); // The -z hadron in the lab: hadron_B_=dynamic_ptr_cast<Ptr<BeamParticleData>::transient_const_pointer> (lastParticles().second->dataPtr()); // Leading order momentum fractions: double xa(lastX1()); // The +z momentum fraction in the lab. double xb(lastX2()); // The -z momentum fraction in the lab. // Particle data for incoming +z & -z QCD particles respectively: ab_ = lastPartons().first ->dataPtr(); // The +z momentum parton in the lab. bb_ = lastPartons().second->dataPtr(); // The -z momentum parton in the lab. // We checked TVT & LHC for all VV channels with 10K events: // lastParticles().first ->momentum().z() is always positive // lastParticles().second->momentum().z() is always negative // lastParticles().first ->momentum().z()*xa=lastPartons().first ->momentum().z() 1 in 10^6 // lastParticles().second->momentum().z()*xb=lastPartons().second->momentum().z() 1 in 10^6 // Set the quark and antiquark data pointers. quark_ = mePartonData()[0]; antiquark_ = mePartonData()[1]; if(quark_->id()<0) swap(quark_,antiquark_); // Now in _our_ calculation we basically define the +z axis as being // given by the direction of the incoming quark for q+qb & q+g processes // and the incoming gluon for g+qbar processes. So now we might need to // flip the values of hadron_A_, hadron_B_, ab_, bb_, xa, xb accordingly: if(ab_->id()!=quark_->id()) { swap(hadron_A_,hadron_B_); swap(ab_,bb_); swap(xa,xb); } // So hadron_A_ is the thing containing a quark (ab_) with momentum frac xa, // hadron_B_ is the thing containing an antiquark (bb_) with momentum frac xb. // Now get the partonic flux for the Born process: lo_lumi_ = hadron_A_->pdf()->xfx(hadron_A_,ab_,scale(),xa)/xa * hadron_B_->pdf()->xfx(hadron_B_,bb_,scale(),xb)/xb; // For W+W- events make sure k1 corresponds to the W+ momentum: if(MEPP2VV::process()==1&&wminus_first) swap(meMomenta()[2],meMomenta()[3]); // Create the object containing all 2->2 __kinematic__ information: B_ = bornVVKinematics(meMomenta(),xa,xb); // We checked that meMomenta()[0] (quark) is in the +z direction and meMomenta()[1] // is in the -z direction (antiquark). // Revert momentum swap in case meMomenta and mePartonData correlation // needs preserving for other things. if(MEPP2VV::process()==1&&wminus_first) swap(meMomenta()[2],meMomenta()[3]); // Check the Born kinematics objects is internally consistent: // B_.sanityCheck(); // If we are going beyond leading order then lets calculate all of // the necessary real emission kinematics. if(contrib_>0) { // Soft limit of the 2->3 real emission kinematics: S_ = realVVKinematics(B_, 1., y, theta2); // Soft-collinear limit of the 2->3 kinematics (emission in +z direction): SCp_ = realVVKinematics(B_, 1., 1., theta2); // Soft-collinear limit of the 2->3 kinematics (emission in -z direction): SCm_ = realVVKinematics(B_, 1.,-1., theta2); // Collinear limit of the 2->3 kinematics (emission in +z direction): Cp_ = realVVKinematics(B_, xt, 1., theta2); // Collinear limit of the 2->3 kinematics (emission in -z direction): Cm_ = realVVKinematics(B_, xt,-1., theta2); // The resolved 2->3 real emission kinematics: H_ = realVVKinematics(B_, xt, y, theta2); // Borrowed from VVhardGenerator (lab momenta of k1,k2): Energy pT(sqrt(H_.pT2_in_lab())); LorentzRotation yzRotation; yzRotation.setRotateX(-atan2(pT/GeV,sqrt(B_.sb())/GeV)); LorentzRotation boostFrompTisZero; boostFrompTisZero.setBoostY(-pT/sqrt(B_.sb()+pT*pT)); LorentzRotation boostFromYisZero; boostFromYisZero.setBoostZ(tanh(B_.Yb())); k1r_perp2_lab_ = (boostFromYisZero*boostFrompTisZero*yzRotation*(H_.k1r())).perp2(); k2r_perp2_lab_ = (boostFromYisZero*boostFrompTisZero*yzRotation*(H_.k2r())).perp2(); // Check all the real kinematics objects are internally consistent: // S_.sanityCheck(); // SCp_.sanityCheck(); // SCm_.sanityCheck(); // Cp_.sanityCheck(); // Cm_.sanityCheck(); // H_.sanityCheck(); } return; } double MEPP2VVPowheg::NLOweight() const { // If only leading order is required return 1: if(contrib_==0) return lo_me()/lo_me2_; // Calculate alpha_S and alpha_S/(2*pi). alphaS_ = nlo_alphaS_opt_==1 ? fixed_alphaS_ : SM().alphaS(mu_UV2()); double alsOn2pi(alphaS_/2./pi); // Particle data objects for the new plus and minus colliding partons. tcPDPtr gluon; gluon = getParticleData(ParticleID::g); // Get the all couplings. gW_ = sqrt(4.0*pi*SM().alphaEM(scale())/SM().sin2ThetaW()); sin2ThetaW_ = SM().sin2ThetaW(); double cosThetaW(sqrt(1.-sin2ThetaW_)); guL_ = gW_/2./cosThetaW*( 1.-4./3.*sin2ThetaW_); gdL_ = gW_/2./cosThetaW*(-1.+2./3.*sin2ThetaW_); guR_ = gW_/2./cosThetaW*( -4./3.*sin2ThetaW_); gdR_ = gW_/2./cosThetaW*( +2./3.*sin2ThetaW_); eZ_ = gW_*cosThetaW; eZ2_ = sqr(eZ_); // MCFM has gwsq = 0.4389585130009 -> gw = 0.662539442600115 // Here we have gW_ = 0.662888 // MCFM has xw = 0.22224653300000 -> sqrt(xw) = 0.471430306 // Here we have 0.222247 // MCFM has esq = 0.097557007645279 -> e = 0.31234117187024679 // Here we have 4.0*pi*SM().alphaEM(sqr(100.*GeV)) = 0.0976596 // If the process is W-Z instead of W+Z we must transform these // couplings as follows, according to NPB 383(1992)3-44 Eq.3.23 if(mePartonData()[2]->id()==-24&&mePartonData()[3]->id()==23) { swap(guL_,gdL_); eZ_ *= -1.; } // Get the CKM entry. Note that this code was debugged // considerably; the call to CKM(particle,particle) // did not appear to work, so we extract the elements // as follows below. The right numbers now appear to // to be associated with the right quarks. double Kij(-999.); // W+Z / W-Z if(abs(mePartonData()[2]->id())==24&&mePartonData()[3]->id()==23) { int up_id(-999),dn_id(-999); if(abs(quark_->id())%2==0&&abs(antiquark_->id())%2==1) { up_id = abs(quark_->id()); dn_id = abs(antiquark_->id()); } else if(abs(quark_->id())%2==1&&abs(antiquark_->id())%2==0) { up_id = abs(antiquark_->id()); dn_id = abs(quark_->id()); } else { cout << "MEPP2VVPowheg:" << endl; cout << "WZ needs an up and a down type quark as incoming!" << endl; } up_id /= 2; up_id -= 1; dn_id -= 1; dn_id /= 2; Kij = sqrt(SM().CKM(up_id,dn_id)); } // W+W- else if(abs(mePartonData()[2]->id())==24&&abs(mePartonData()[3]->id())==24) { if(abs(quark_->id())%2==0&&abs(antiquark_->id())%2==0) { int up_ida(abs(quark_->id())/2-1); int up_idb(abs(antiquark_->id())/2-1); Kij = sqrt(std::norm( CKM(up_ida,0)*CKM(up_idb,0) + CKM(up_ida,1)*CKM(up_idb,1) + CKM(up_ida,2)*CKM(up_idb,2))); } else if(abs(quark_->id())%2==1&&abs(antiquark_->id())%2==1) { int dn_ida((abs(quark_->id())-1)/2); int dn_idb((abs(antiquark_->id())-1)/2); Kij = sqrt(std::norm( CKM(0,dn_ida)*CKM(0,dn_idb) + CKM(1,dn_ida)*CKM(1,dn_idb) + CKM(2,dn_ida)*CKM(2,dn_idb))); } else { cout << "MEPP2VVPowheg:" << endl; cout << "WW needs 2 down-type / 2 up-type!" << endl; } } // ZZ else if(mePartonData()[2]->id()==23&&mePartonData()[3]->id()==23) { Kij = 2.*sqrt(2.)/gW_; } else { cout << "MEPP2VVPowheg: incompatible final state particles!" << endl; } Fij2_ = sqr(gW_/2./sqrt(2.)*Kij); // Get the leading order matrix element (this is necessary!) M_Born_ = M_Born_WZ(B_); // // Get the regular part of the virtual correction (only needed for sanityCheck()!) // M_V_regular_ = M_V_regular(S_); // // Get the q + qbar real emission matrix element (only needed for sanityCheck()!) // t_u_M_R_qqb_ = t_u_M_R_qqb(H_); // Calculate the integrand double wgt(0.); double wqqb(0.); double wgqb(0.); double wqg(0.); double wqqbvirt(0.); double wqqbcollin(0.); double wqqbreal(0.); double wqgcollin(0.); double wqgreal(0.); double wgqbcollin(0.); double wgqbreal(0.); if(channels_==0||channels_==1) { // q+qb wqqbvirt = Vtilde_universal(S_) + M_V_regular(S_)/lo_me2_; wqqbcollin = alsOn2pi*( Ctilde_Ltilde_qq_on_x(quark_,antiquark_,Cp_) + Ctilde_Ltilde_qq_on_x(quark_,antiquark_,Cm_) ); wqqbreal = alsOn2pi*Rtilde_Ltilde_qqb_on_x(quark_,antiquark_); wqqb = wqqbvirt + wqqbcollin + wqqbreal; } if(channels_==0||channels_==2) { // q+g wqgcollin = alsOn2pi*Ctilde_Ltilde_gq_on_x(quark_,gluon,Cm_); wqgreal = alsOn2pi*Rtilde_Ltilde_qg_on_x(quark_,gluon); wqg = wqgreal + wqgcollin; // g+qb wgqbcollin = alsOn2pi*Ctilde_Ltilde_gq_on_x(gluon,antiquark_,Cp_); wgqbreal = alsOn2pi*Rtilde_Ltilde_gqb_on_x(gluon,antiquark_); wgqb = wgqbreal+wgqbcollin; } // total contribution wgt = 1.+(wqqb+wgqb+wqg); // If restricting to qg, gqb channels then subtract the LO contribution: if(channels_==2) wgt -= 1.; - if(isnan(wgt)||isinf(wgt)) { + if(!isfinite(wgt)) { cout << "MEPP2VVPowheg:: NLO weight " << "is bad: wgt = " << wgt << endl; cout << "MEPP2VVPowheg sanityCheck invoked!" << endl; cout << ab_->PDGName() << ", " << bb_->PDGName() << ", " << mePartonData()[2]->PDGName() << ", " << mePartonData()[3]->PDGName() << endl; cout << "lo_me2_ - M_Born_ (rel) = " << lo_me2_-M_Born_ << " (" << (lo_me2_-M_Born_)/M_Born_ << ")\n"; cout << "lo_me2_, M_Born_ " << lo_me2_ << ", " << M_Born_ << endl; cout << "xr = " << H_.xr() << " 1-xr = " << 1.-H_.xr() << " y = " << H_.y() << endl; cout << "tkr = " << H_.tkr()/GeV2 << " ukr = " << H_.ukr()/GeV2 << endl; cout << "root(sb) = " << sqrt(B_.sb())/GeV << endl; cout << "sb+tb+ub = " << B_.sb()/GeV2 << " + " << B_.tb()/GeV2 << " + " << B_.ub()/GeV2 << endl; cout << "sqrt(k12) " << sqrt(H_.k12r())/GeV << endl; cout << "sqrt(k22) " << sqrt(H_.k22r())/GeV << endl; cout << "sqr(Kij) " << Kij*Kij << endl; cout << "wqqbvirt " << wqqbvirt << endl; cout << "wqqbcollin " << wqqbcollin << endl; cout << "wqqbreal " << wqqbreal << endl; cout << "wqqb " << wqqb << endl; cout << "wqgcollin " << wqgcollin << endl; cout << "wqgreal " << wqgreal << endl; cout << "wqg " << wqg << endl; cout << "wgqbcollin " << wgqbcollin << endl; cout << "wgqbreal " << wgqbreal << endl; cout << "wgqb " << wgqb << endl; cout << "wgt " << wgt << endl; throw Exception() << "MEPP2VVPowheg:: NLO weight " << "is bad: " << wgt << Exception::eventerror; } return contrib_==1 ? max(0.,wgt) : max(0.,-wgt); } double MEPP2VVPowheg::Lhat_ab(tcPDPtr a, tcPDPtr b, realVVKinematics Kinematics) const { if(!(abs(a->id())<=6||a->id()==21)||!(abs(b->id())<=6||b->id()==21)) cout << "MEPP2VVPowheg::Lhat_ab: Error," << "particle a = " << a->PDGName() << ", " << "particle b = " << b->PDGName() << endl; double nlo_lumi(-999.); double x1(Kinematics.x1r()),x2(Kinematics.x2r()); nlo_lumi = (hadron_A_->pdf()->xfx(hadron_A_,a,mu_F2(),x1)/x1) * (hadron_B_->pdf()->xfx(hadron_B_,b,mu_F2(),x2)/x2); return nlo_lumi / lo_lumi_; } double MEPP2VVPowheg::Vtilde_universal(realVVKinematics S) const { double xbar_y = S.xbar(); double y = S.y(); double eta1b(S.bornVariables().eta1b()); double eta2b(S.bornVariables().eta2b()); Energy2 sb(S.s2r()); return alphaS_/2./pi*CF_ * ( log(sb/mu_F2()) * (3. + 4.*log(eta1b)+4.*log(eta2b)) + 8.*sqr(log(eta1b)) +8.*sqr(log(eta2b)) - 2.*sqr(pi)/3. ) + alphaS_/2./pi*CF_ * ( 8./(1.+y)*log(sqrt(1.-xbar_y)/eta2b) + 8./(1.-y)*log(sqrt(1.-xbar_y)/eta1b) ); } double MEPP2VVPowheg::Ctilde_Ltilde_qq_on_x(tcPDPtr a, tcPDPtr b, realVVKinematics C) const { if(C.y()!= 1.&&C.y()!=-1.) cout << "\nCtilde_qq::y value not allowed."; if(C.y()== 1.&&!(abs(a->id())>0&&abs(a->id())<7)) cout << "\nCtilde_qq::for Cqq^plus a must be a quark! id = " << a->id() << "\n"; if(C.y()==-1.&&!(abs(b->id())>0&&abs(b->id())<7)) cout << "\nCtilde_qq::for Cqq^minus b must be a quark! id = " << b->id() << "\n"; double xt = C.xt(); double x = C.xr(); double etab = C.y() == 1. ? C.bornVariables().eta1b() : C.bornVariables().eta2b() ; Energy2 sb(C.s2r()); if(fabs(1.-xt)<=tiny||fabs(1.-H_.xr())<=tiny) return 0.; return ( ( (1./(1.-xt))*log(sb/mu_F2()/x)+4.*log(etab)/(1.-xt) + 2.*log(1.-xt)/(1.-xt) )*CF_*(1.+sqr(x)) + sqr(etab)*CF_*(1.-x) )*Lhat_ab(a,b,C) / x - ( ( (1./(1.-xt))*log(sb/mu_F2() )+4.*log(etab)/(1.-xt) + 2.*log(1.-xt)/(1.-xt) )*CF_*2. )*Lhat_ab(a,b,S_); } double MEPP2VVPowheg::Ctilde_Ltilde_gq_on_x(tcPDPtr a, tcPDPtr b, realVVKinematics C) const { if(C.y()!= 1.&&C.y()!=-1.) cout << "\nCtilde_gq::y value not allowed."; if(C.y()== 1.&&a->id()!=21) cout << "\nCtilde_gq::for Cgq^plus a must be a gluon! id = " << a->id() << "\n"; if(C.y()==-1.&&b->id()!=21) cout << "\nCtilde_gq::for Cgq^minus b must be a gluon! id = " << b->id() << "\n"; double xt = C.xt(); double x = C.xr(); double etab = C.y() == 1. ? C.bornVariables().eta1b() : C.bornVariables().eta2b() ; Energy2 sb(C.s2r()); return ( ( (1./(1.-xt))*log(sb/mu_F2()/x)+4.*log(etab)/(1.-xt) + 2.*log(1.-xt)/(1.-xt) )*(1.-x)*TR_*(sqr(x)+sqr(1.-x)) + sqr(etab)*TR_*2.*x*(1.-x) )*Lhat_ab(a,b,C) / x; } double MEPP2VVPowheg::Rtilde_Ltilde_qqb_on_x(tcPDPtr a , tcPDPtr b) const { if(!(abs(a->id())<=6||a->id()==21)||!(abs(b->id())<=6||b->id()==21)) cout << "MEPP2VVPowheg::Rtilde_Ltilde_qqb_on_x: Error," << "particle a = " << a->PDGName() << ", " << "particle b = " << b->PDGName() << endl; double xt(H_.xt()); double y(H_.y()); Energy2 s(H_.sr()); Energy2 sCp(Cp_.sr()); Energy2 sCm(Cm_.sr()); Energy2 t_u_M_R_qqb_H (t_u_M_R_qqb(H_ )); Energy2 t_u_M_R_qqb_Cp(t_u_M_R_qqb(Cp_)); Energy2 t_u_M_R_qqb_Cm(t_u_M_R_qqb(Cm_)); // Energy2 t_u_M_R_qqb_H (t_u_M_R_qqb_hel_amp(H_)); // Energy2 t_u_M_R_qqb_Cp(8.*pi*alphaS_*Cp_.sr()/Cp_.xr() // *CF_*(1.+sqr(Cp_.xr()))*lo_me2_); // Energy2 t_u_M_R_qqb_Cm(8.*pi*alphaS_*Cm_.sr()/Cm_.xr() // *CF_*(1.+sqr(Cm_.xr()))*lo_me2_); int config(0); if(fabs(1.-xt)<=tiny||fabs(1.-H_.xr())<=tiny) return 0.; if(fabs(1.-y )<=tiny) { t_u_M_R_qqb_H = t_u_M_R_qqb_Cp ; config = 1; } if(fabs(1.+y )<=tiny) { t_u_M_R_qqb_H = t_u_M_R_qqb_Cm ; config = -1; } if(fabs(H_.tkr()/s)<=tiny) { t_u_M_R_qqb_H = t_u_M_R_qqb_Cp ; config = 1; } if(fabs(H_.ukr()/s)<=tiny) { t_u_M_R_qqb_H = t_u_M_R_qqb_Cm ; config = -1; } if(config== 0) return ( ( (t_u_M_R_qqb_H*Lhat_ab(a,b,H_)/s - t_u_M_R_qqb_Cp*Lhat_ab(a,b,Cp_)/sCp) )*2./(1.-y)/(1.-xt) + ( (t_u_M_R_qqb_H*Lhat_ab(a,b,H_)/s - t_u_M_R_qqb_Cm*Lhat_ab(a,b,Cm_)/sCm) )*2./(1.+y)/(1.-xt) ) / lo_me2_ / 8. / pi / alphaS_; else if(config== 1) return ( (t_u_M_R_qqb_H*Lhat_ab(a,b,H_)/s - t_u_M_R_qqb_Cm*Lhat_ab(a,b,Cm_)/sCm) )*2./(1.+y)/(1.-xt) / lo_me2_ / 8. / pi / alphaS_; else if(config==-1) return ( (t_u_M_R_qqb_H*Lhat_ab(a,b,H_)/s - t_u_M_R_qqb_Cp*Lhat_ab(a,b,Cp_)/sCp) )*2./(1.-y)/(1.-xt) / lo_me2_ / 8. / pi / alphaS_; else throw Exception() << "MEPP2VVPowheg::Rtilde_Ltilde_qqb_on_x\n" << "The configuration is not identified as hard / soft / fwd collinear or bwd collinear." << "config = " << config << "\n" << "xt = " << xt << " 1.-xt = " << 1.-xt << "\n" << "y = " << y << " 1.-y = " << 1.-y << "\n" << Exception::eventerror; } double MEPP2VVPowheg::Rtilde_Ltilde_gqb_on_x(tcPDPtr a , tcPDPtr b) const { if(!(abs(a->id())<=6||a->id()==21)||!(abs(b->id())<=6||b->id()==21)) cout << "MEPP2VVPowheg::Rtilde_Ltilde_gqb_on_x: Error," << "particle a = " << a->PDGName() << ", " << "particle b = " << b->PDGName() << endl; double xt(H_.xt()); double y(H_.y()); Energy2 s(H_.sr()); Energy2 sCp(Cp_.sr()); Energy2 sCm(Cm_.sr()); Energy2 t_u_M_R_gqb_H (t_u_M_R_gqb(H_ )); Energy2 t_u_M_R_gqb_Cp(t_u_M_R_gqb(Cp_)); Energy2 t_u_M_R_gqb_Cm(t_u_M_R_gqb(Cm_)); // Energy2 t_u_M_R_gqb_H (t_u_M_R_gqb_hel_amp(H_)); // Energy2 t_u_M_R_gqb_Cp(8.*pi*alphaS_*Cp_.sr()/Cp_.xr()*(1.-Cp_.xr()) // *TR_*(sqr(Cp_.xr())+sqr(1.-Cp_.xr()))*lo_me2_); // Energy2 t_u_M_R_gqb_Cm(t_u_M_R_gqb(Cm_)); // // Energy2 t_u_M_R_gqb_Cm(t_u_M_R_gqb_hel_amp(Cm_)); int config(0); if(fabs(1.-xt)<=tiny||fabs(1.-H_.xr())<=tiny) return 0.; if(fabs(1.-y )<=tiny) { t_u_M_R_gqb_H = t_u_M_R_gqb_Cp ; config = 1; } if(fabs(1.+y )<=tiny) { t_u_M_R_gqb_H = t_u_M_R_gqb_Cm ; config = -1; } if(fabs(H_.tkr()/s)<=tiny) { t_u_M_R_gqb_H = t_u_M_R_gqb_Cp ; config = 1; } if(fabs(H_.ukr()/s)<=tiny) { t_u_M_R_gqb_H = t_u_M_R_gqb_Cm ; config = -1; } if(config== 0) return ( ( (t_u_M_R_gqb_H*Lhat_ab(a,b,H_)/s - t_u_M_R_gqb_Cp*Lhat_ab(a,b,Cp_)/sCp) )*2./(1.-y)/(1.-xt) + ( (t_u_M_R_gqb_H*Lhat_ab(a,b,H_)/s - t_u_M_R_gqb_Cm*Lhat_ab(a,b,Cm_)/sCm) )*2./(1.+y)/(1.-xt) ) / lo_me2_ / 8. / pi / alphaS_; else if(config== 1) return ( (t_u_M_R_gqb_H*Lhat_ab(a,b,H_)/s - t_u_M_R_gqb_Cm*Lhat_ab(a,b,Cm_)/sCm) )*2./(1.+y)/(1.-xt) / lo_me2_ / 8. / pi / alphaS_; else if(config==-1) return ( (t_u_M_R_gqb_H*Lhat_ab(a,b,H_)/s - t_u_M_R_gqb_Cp*Lhat_ab(a,b,Cp_)/sCp) )*2./(1.-y)/(1.-xt) / lo_me2_ / 8. / pi / alphaS_; else throw Exception() << "MEPP2VVPowheg::Rtilde_Ltilde_gqb_on_x\n" << "The configuration is not identified as hard / soft / fwd collinear or bwd collinear." << "config = " << config << "\n" << "xt = " << xt << " 1.-xt = " << 1.-xt << "\n" << "y = " << y << " 1.-y = " << 1.-y << "\n" << Exception::eventerror; } double MEPP2VVPowheg::Rtilde_Ltilde_qg_on_x(tcPDPtr a , tcPDPtr b) const { if(!(abs(a->id())<=6||a->id()==21)||!(abs(b->id())<=6||b->id()==21)) cout << "MEPP2VVPowheg::Rtilde_Ltilde_qg_on_x: Error," << "particle a = " << a->PDGName() << ", " << "particle b = " << b->PDGName() << endl; double xt(H_.xt()); double y(H_.y()); Energy2 s(H_.sr()); Energy2 sCp(Cp_.sr()); Energy2 sCm(Cm_.sr()); Energy2 t_u_M_R_qg_H (t_u_M_R_qg(H_ )); Energy2 t_u_M_R_qg_Cp(t_u_M_R_qg(Cp_)); Energy2 t_u_M_R_qg_Cm(t_u_M_R_qg(Cm_)); // Energy2 t_u_M_R_qg_H (t_u_M_R_qg_hel_amp(H_)); // Energy2 t_u_M_R_qg_Cp(t_u_M_R_qg(Cp_)); // // Energy2 t_u_M_R_qg_Cp(t_u_M_R_qg_hel_amp(Cp_)); // Energy2 t_u_M_R_qg_Cm(8.*pi*alphaS_*Cm_.sr()/Cm_.xr()*(1.-Cm_.xr()) // *TR_*(sqr(Cm_.xr())+sqr(1.-Cm_.xr()))*lo_me2_); int config(0); if(fabs(1.-xt)<=tiny||fabs(1.-H_.xr())<=tiny) return 0.; if(fabs(1.-y )<=tiny) { t_u_M_R_qg_H = t_u_M_R_qg_Cp ; config = 1; } if(fabs(1.+y )<=tiny) { t_u_M_R_qg_H = t_u_M_R_qg_Cm ; config = -1; } if(fabs(H_.tkr()/s)<=tiny) { t_u_M_R_qg_H = t_u_M_R_qg_Cp ; config = 1; } if(fabs(H_.ukr()/s)<=tiny) { t_u_M_R_qg_H = t_u_M_R_qg_Cm ; config = -1; } if(config== 0) return ( ( (t_u_M_R_qg_H*Lhat_ab(a,b,H_)/s - t_u_M_R_qg_Cp*Lhat_ab(a,b,Cp_)/sCp) )*2./(1.-y)/(1.-xt) + ( (t_u_M_R_qg_H*Lhat_ab(a,b,H_)/s - t_u_M_R_qg_Cm*Lhat_ab(a,b,Cm_)/sCm) )*2./(1.+y)/(1.-xt) ) / lo_me2_ / 8. / pi / alphaS_; else if(config== 1) return ( (t_u_M_R_qg_H*Lhat_ab(a,b,H_)/s - t_u_M_R_qg_Cm*Lhat_ab(a,b,Cm_)/sCm) )*2./(1.+y)/(1.-xt) / lo_me2_ / 8. / pi / alphaS_; else if(config==-1) return ( (t_u_M_R_qg_H*Lhat_ab(a,b,H_)/s - t_u_M_R_qg_Cp*Lhat_ab(a,b,Cp_)/sCp) )*2./(1.-y)/(1.-xt) / lo_me2_ / 8. / pi / alphaS_; else throw Exception() << "MEPP2VVPowheg::Rtilde_Ltilde_qg_on_x\n" << "The configuration is not identified as hard / soft / fwd collinear or bwd collinear." << "config = " << config << "\n" << "xt = " << xt << " 1.-xt = " << 1.-xt << "\n" << "y = " << y << " 1.-y = " << 1.-y << "\n" << Exception::eventerror; } /***************************************************************************/ // The following three functions are identically \tilde{I}_{4,t}, // \tilde{I}_{3,WZ} and \tilde{I}_{3,W} given in Eqs. B.8,B.9,B.10 // of NPB 383(1992)3-44, respectively. They are related to / derived // from the loop integrals in Eqs. A.3, A.5 and A.8 of the same paper. InvEnergy4 TildeI4t(Energy2 s, Energy2 t, Energy2 mW2, Energy2 mZ2); InvEnergy2 TildeI3WZ(Energy2 s, Energy2 mW2, Energy2 mZ2, double beta); InvEnergy2 TildeI3W(Energy2 s, Energy2 t, Energy2 mW2); /***************************************************************************/ // The following six functions are identically I_{dd}^{(1)}, I_{ud}^{(1)}, // I_{uu}^{(1)}, F_{u}^{(1)}, F_{d}^{(1)}, H^{(1)} from Eqs. B.4, B.5, B.3, // B.3, B.6, B.7 of NPB 383(1992)3-44, respectively. They make up the // one-loop matrix element. Ixx functions correspond to the graphs // with no TGC, Fx functions are due to non-TGC graphs interfering // with TGC graphs, while the H function is due purely to TGC graphs. double Idd1(Energy2 s,Energy2 t,Energy2 u,Energy2 mW2,Energy2 mZ2,double beta); double Iud1(Energy2 s,Energy2 t,Energy2 u,Energy2 mW2,Energy2 mZ2,double beta); double Iuu1(Energy2 s,Energy2 t,Energy2 u,Energy2 mW2,Energy2 mZ2,double beta); Energy2 Fu1(Energy2 s,Energy2 t,Energy2 u,Energy2 mW2,Energy2 mZ2,double beta); Energy2 Fd1(Energy2 s,Energy2 t,Energy2 u,Energy2 mW2,Energy2 mZ2,double beta); Energy4 H1 (Energy2 s,Energy2 t,Energy2 u,Energy2 mW2,Energy2 mZ2); /***************************************************************************/ // M_V_Regular is the regular part of the one-loop matrix element // exactly as defined in Eqs. B.1 and B.2 of of NPB 383(1992)3-44. double MEPP2VVPowheg::M_V_regular(realVVKinematics S) const { Energy2 s(S.bornVariables().sb()); Energy2 t(S.bornVariables().tb()); Energy2 u(S.bornVariables().ub()); Energy2 mW2(S.k12r()); // N.B. the diboson masses are preserved in getting Energy2 mZ2(S.k22r()); // the 2->2 from the 2->3 kinematics. double beta(S.betaxr()); // N.B. for x=1 \beta_x=\beta in NPB 383(1992)3-44. double cosThetaW(sqrt(1.-sin2ThetaW_)); double eZ2(eZ2_); double eZ(eZ_); double gdL(gdL_); double guL(guL_); double gdR(gdR_); double guR(guR_); // W+W- if(abs(mePartonData()[2]->id())==24&&abs(mePartonData()[3]->id())==24) { double e2(sqr(gW_)*sin2ThetaW_); if(abs(quark_->id())%2==0&&abs(antiquark_->id())%2==0) { // N.B. OLD eZ used to calculate new eZ2 *then* new eZ is set! if(quark_->id()==-antiquark_->id()) { eZ2 = 1./2.*sqr(s-mW2)/Fij2_ * (e2*e2/s/s*(sqr( 2./3.+eZ*(guL+guR)/2./e2*s/(s-mW2/sqr(cosThetaW))) +sqr( eZ*(guL-guR)/2./e2*s/(s-mW2/sqr(cosThetaW)))) ); eZ = -1./2./Fij2_/(gW_*gW_/4./sqrt(Fij2_))*(s-mW2) * (gW_*gW_*e2/4./s *( 2./3.+2.*eZ*guL/2./e2*s/(s-mW2/sqr(cosThetaW)))); } else { eZ2 =0.; eZ =0.; } gdL = gW_/sqrt(2.); guL = 0.; } else if(abs(quark_->id())%2==1&&abs(antiquark_->id())%2==1) { // N.B. OLD eZ used to calculate new eZ2 *then* new eZ is set! if(quark_->id()==-antiquark_->id()) { eZ2 = 1./2.*sqr(s-mW2)/Fij2_ * (e2*e2/s/s*(sqr(-1./3.+eZ*(gdL+gdR)/2./e2*s/(s-mW2/sqr(cosThetaW))) +sqr( eZ*(gdL-gdR)/2./e2*s/(s-mW2/sqr(cosThetaW)))) ); eZ = -1./2./Fij2_/(gW_*gW_/4./sqrt(Fij2_))*(s-mW2) * (gW_*gW_*e2/4./s *(-1./3.+2.*eZ*gdL/2./e2*s/(s-mW2/sqr(cosThetaW)))); } else { eZ2 =0.; eZ =0.; } guL = gW_/sqrt(2.); gdL = 0.; } } // ZZ else if(mePartonData()[2]->id()==23&&mePartonData()[3]->id()==23) { eZ = 0.; eZ2 = 0.; double gV2,gA2; gV2 = sqr(guL/2.-gW_/2./cosThetaW*2./3.*sin2ThetaW_); gA2 = sqr(guL/2.+gW_/2./cosThetaW*2./3.*sin2ThetaW_); guL = sqrt(gV2*gV2+gA2*gA2+6.*gA2*gV2)/2.; gV2 = sqr(gdL/2.+gW_/2./cosThetaW*1./3.*sin2ThetaW_); gA2 = sqr(gdL/2.-gW_/2./cosThetaW*1./3.*sin2ThetaW_); gdL = sqrt(gV2*gV2+gA2*gA2+6.*gA2*gV2)/2.; if(abs(quark_->id())%2==0&&abs(antiquark_->id())%2==0) gdL = guL; else if(abs(quark_->id())%2==1&&abs(antiquark_->id())%2==1) guL = gdL; else { cout << "MEPP2VVPowheg:" << endl; cout << "ZZ needs 2 down-type / 2 up-type!" << endl; } } return 4.*pi*alphaS_*Fij2_*CF_*(1./sqr(4.*pi))/NC_ * ( gdL*gdL*Idd1(s,t,u,mW2,mZ2,beta) + gdL*guL*Iud1(s,t,u,mW2,mZ2,beta) + guL*guL*Iuu1(s,t,u,mW2,mZ2,beta) - eZ/(s-mW2) * ( gdL*Fd1(s,t,u,mW2,mZ2,beta) - guL*Fu1(s,t,u,mW2,mZ2,beta) ) + eZ2/sqr(s-mW2) * H1(s,t,u,mW2,mZ2) ); } /***************************************************************************/ InvEnergy4 TildeI4t(Energy2 s, Energy2 t, Energy2 mW2, Energy2 mZ2) { double sqrBrackets; sqrBrackets = ( sqr(log(-t/mW2))/2.+log(-t/mW2)*log(-t/mZ2)/2. - 2.*log(-t/mW2)*log((mW2-t)/mW2)-2.*ReLi2(t/mW2) ); swap(mW2,mZ2); sqrBrackets+= ( sqr(log(-t/mW2))/2.+log(-t/mW2)*log(-t/mZ2)/2. - 2.*log(-t/mW2)*log((mW2-t)/mW2)-2.*ReLi2(t/mW2) ); swap(mW2,mZ2); return sqrBrackets/s/t; } InvEnergy2 TildeI3WZ(Energy2 s, Energy2 mW2, Energy2 mZ2, double beta) { Energy2 sig(mZ2+mW2); Energy2 del(mZ2-mW2); double sqrBrackets ; sqrBrackets = ( ReLi2(2.*mW2/(sig-del*(del/s+beta))) + ReLi2((1.-del/s+beta)/2.) + sqr(log((1.-del/s+beta)/2.))/2. + log((1.-del/s-beta)/2.)*log((1.+del/s-beta)/2.) ); beta *= -1; sqrBrackets -= ( ReLi2(2.*mW2/(sig-del*(del/s+beta))) + ReLi2((1.-del/s+beta)/2.) + sqr(log((1.-del/s+beta)/2.))/2. + log((1.-del/s-beta)/2.)*log((1.+del/s-beta)/2.) ); beta *= -1; swap(mW2,mZ2); del *= -1.; sqrBrackets += ( ReLi2(2.*mW2/(sig-del*(del/s+beta))) + ReLi2((1.-del/s+beta)/2.) + sqr(log((1.-del/s+beta)/2.))/2. + log((1.-del/s-beta)/2.)*log((1.+del/s-beta)/2.) ); swap(mW2,mZ2); del *= -1.; beta *= -1; swap(mW2,mZ2); del *= -1.; sqrBrackets -= ( ReLi2(2.*mW2/(sig-del*(del/s+beta))) + ReLi2((1.-del/s+beta)/2.) + sqr(log((1.-del/s+beta)/2.))/2. + log((1.-del/s-beta)/2.)*log((1.+del/s-beta)/2.) ); beta *= -1; swap(mW2,mZ2); del *= -1.; return sqrBrackets/s/beta; } InvEnergy2 TildeI3W(Energy2 s, Energy2 t, Energy2 mW2) { return 1./(mW2-t)*(sqr(log(mW2/s))/2.-sqr(log(-t/s))/2.-sqr(pi)/2.); } /***************************************************************************/ double Idd1(Energy2 s, Energy2 t, Energy2 u, Energy2 mW2, Energy2 mZ2, double beta) { Energy2 sig(mZ2+mW2); Energy2 del(mZ2-mW2); double Val(0.); Val += 2.*(22.*t*t+t*(19.*s-18.*sig)+18.*mW2*mZ2)/t/t - 8.*(u*t+2*s*sig)/mW2/mZ2 - 2.*sqr(t-u)/t/s/sqr(beta); Val += +( 2.*(8.*t*t+4.*t*(s-3.*sig)+4*sqr(sig)-5.*s*sig+s*s)/t/s/sqr(beta) + 4.*(t*(3.*u+s)-3.*mW2*mZ2)/t/t + 6.*(t+u)*sqr(t-u)/t/s/s/sqr(sqr(beta)) )*log(-t/s); Val += +( ( 8.*t*t*(-2.*s+del)+8.*t*(-s*s+3.*s*sig-2.*del*sig) - 2.*(s-sig)*(s*s-4.*s*sig+3.*del*sig) )/t/s/s/beta/beta + 16.*s*(t-mZ2)/(t*(u+s)-mW2*mZ2) + 2.*(4.*t*t+t*(10.*s-3.*mZ2-9.*mW2)+12.*mW2*mZ2)/t/t -6.*(s-del)*(t+u)*sqr(t-u)/t/s/s/s/sqr(sqr(beta)) )*log(-t/mW2); Val += ( - ( 4.*t*t*(2.*sig-3.*s) - 4.*t*(s-sig)*(2.*s-3.*sig) - 2.*(s-2.*sig)*sqr(s-sig) )/t/s/beta/beta + ( 4.*sig*t-3.*s*s+4.*s*sig - 4.*(mW2*mW2+mZ2*mZ2) )/t - 3.*sqr(t*t-u*u)/t/s/s/sqr(sqr(beta)) )*TildeI3WZ(s,mW2,mZ2,beta); Val += +( 4.*(t*u+2.*s*sig)/3./mW2/mZ2 - 4.*(t-2.*u)/3./t )*pi*pi; Val += -( 4.*s*(t*u-2.*mW2*mZ2)/t )*TildeI4t(s,t,mW2,mZ2); Val += ( 8.*(t-mW2)*(u*t-2.*mW2*mZ2)/t/t )*TildeI3W(s,t,mW2); swap(mW2,mZ2); del *= -1; Val += 2.*(22.*t*t+t*(19.*s-18.*sig)+18.*mW2*mZ2)/t/t - 8.*(u*t+2*s*sig)/mW2/mZ2 - 2.*sqr(t-u)/t/s/sqr(beta); Val += +( 2.*(8.*t*t+4.*t*(s-3.*sig)+4*sqr(sig)-5.*s*sig+s*s)/t/s/sqr(beta) + 4.*(t*(3.*u+s)-3.*mW2*mZ2)/t/t + 6.*(t+u)*sqr(t-u)/t/s/s/sqr(sqr(beta)) )*log(-t/s); Val += +( ( 8.*t*t*(-2.*s+del)+8.*t*(-s*s+3.*s*sig-2.*del*sig) - 2.*(s-sig)*(s*s-4.*s*sig+3.*del*sig) )/t/s/s/beta/beta + 16.*s*(t-mZ2)/(t*(u+s)-mW2*mZ2) + 2.*(4.*t*t+t*(10.*s-3.*mZ2-9.*mW2)+12.*mW2*mZ2)/t/t -6.*(s-del)*(t+u)*sqr(t-u)/t/s/s/s/sqr(sqr(beta)) )*log(-t/mW2); Val += ( - ( 4.*t*t*(2.*sig-3.*s) - 4.*t*(s-sig)*(2.*s-3.*sig) - 2.*(s-2.*sig)*sqr(s-sig) )/t/s/beta/beta + ( 4.*sig*t-3.*s*s+4.*s*sig - 4.*(mW2*mW2+mZ2*mZ2) )/t - 3.*sqr(t*t-u*u)/t/s/s/sqr(sqr(beta)) )*TildeI3WZ(s,mW2,mZ2,beta); Val += +( 4.*(t*u+2.*s*sig)/3./mW2/mZ2 - 4.*(t-2.*u)/3./t )*pi*pi; Val += -( 4.*s*(t*u-2.*mW2*mZ2)/t )*TildeI4t(s,t,mW2,mZ2); Val += ( 8.*(t-mW2)*(u*t-2.*mW2*mZ2)/t/t )*TildeI3W(s,t,mW2); swap(mW2,mZ2); del *= -1; return Val; } /***************************************************************************/ double Iud1(Energy2 s, Energy2 t, Energy2 u, Energy2 mW2, Energy2 mZ2, double beta) { Energy2 sig(mZ2+mW2); Energy2 del(mZ2-mW2); double Val(0.); Val += 2.*(4.*t*t+t*(9.*s-4.*sig)-18.*s*sig)/t/u + 8.*(t*u+2.*s*sig)/mW2/mZ2 + 4.*s*s*(2.*t-sig)/u/(mW2*mZ2-t*(u+s)) - 2.*sqr(t-u)/u/s/sqr(beta); Val += ( 2.*(8.*t*t-4.*t*(s+3.*sig)-(s-sig)*(3.*s+4.*sig))/u/s/sqr(beta) + 6.*(t+u)*sqr(t-u)/u/s/s/sqr(sqr(beta)) - 12.*s*(t-sig)/t/u )*log(-t/s); Val += ( (2./u/s/s/sqr(beta))*( 4.*t*t*(-2.*s+del) + 4.*t*(s*s+s*(mZ2+5.*mW2)-2.*sig*del) + (s-sig)*(3.*s*s+8.*mW2*s-3.*sig*del) ) + (2.*t*(18.*s+3.*mW2+mZ2)-24.*s*sig)/t/u - 8.*s*(2.*t*t-t*(3.*s+4.*mZ2+2.*mW2)+2.*mZ2*(s+sig)) /u/(mW2*mZ2-t*(u+s)) - 8.*s*s*t*(2.*t-sig)*(t-mZ2)/u/sqr(mW2*mZ2-t*(u+s)) + 6.*(s-del)*(s-sig)*sqr(t-u)/u/s/s/s/sqr(sqr(beta)) )*log(-t/mW2); Val += ( -2.*(2.*t*t*(2.*sig-3.*s)+6.*sig*t*(s-sig)+sqr(s-sig)*(s+2.*sig)) /u/s/sqr(beta) +3.*s*(4.*t-4.*sig-s)/u -3.*sqr(s-sig)*sqr(t-u)/u/s/s/sqr(sqr(beta)) )*TildeI3WZ(s,mW2,mZ2,beta); Val += ( 4.*(u+4.*s)/3./u - 4.*(u*t+2.*s*sig)/3./mW2/mZ2 )*pi*pi; Val += -( 16.*s*(t-sig)*(t-mW2)/t/u )*TildeI3W(s,t,mW2); Val += ( 8.*s*s*(t-sig)/u )*TildeI4t(s,t,mW2,mZ2); swap(t,u); Val += 2.*(4.*t*t+t*(9.*s-4.*sig)-18.*s*sig)/t/u + 8.*(t*u+2.*s*sig)/mW2/mZ2 + 4.*s*s*(2.*t-sig)/u/(mW2*mZ2-t*(u+s)) - 2.*sqr(t-u)/u/s/sqr(beta); Val += ( 2.*(8.*t*t-4.*t*(s+3.*sig)-(s-sig)*(3.*s+4.*sig))/u/s/sqr(beta) + 6.*(t+u)*sqr(t-u)/u/s/s/sqr(sqr(beta)) - 12.*s*(t-sig)/t/u )*log(-t/s); Val += ( (2./u/s/s/sqr(beta))*( 4.*t*t*(-2.*s+del) + 4.*t*(s*s+s*(mZ2+5.*mW2)-2.*sig*del) + (s-sig)*(3.*s*s+8.*mW2*s-3.*sig*del) ) + (2.*t*(18.*s+3.*mW2+mZ2)-24.*s*sig)/t/u - 8.*s*(2.*t*t-t*(3.*s+4.*mZ2+2.*mW2)+2.*mZ2*(s+sig)) /u/(mW2*mZ2-t*(u+s)) - 8.*s*s*t*(2.*t-sig)*(t-mZ2)/u/sqr(mW2*mZ2-t*(u+s)) + 6.*(s-del)*(s-sig)*sqr(t-u)/u/s/s/s/sqr(sqr(beta)) )*log(-t/mW2); Val += ( -2.*(2.*t*t*(2.*sig-3.*s)+6.*sig*t*(s-sig)+sqr(s-sig)*(s+2.*sig)) /u/s/sqr(beta) +3.*s*(4.*t-4.*sig-s)/u -3.*sqr(s-sig)*sqr(t-u)/u/s/s/sqr(sqr(beta)) )*TildeI3WZ(s,mW2,mZ2,beta); Val += ( 4.*(u+4.*s)/3./u - 4.*(u*t+2.*s*sig)/3./mW2/mZ2 )*pi*pi; Val += -( 16.*s*(t-sig)*(t-mW2)/t/u )*TildeI3W(s,t,mW2); Val += ( 8.*s*s*(t-sig)/u )*TildeI4t(s,t,mW2,mZ2); swap(t,u); swap(mW2,mZ2); del *= -1; Val += 2.*(4.*t*t+t*(9.*s-4.*sig)-18.*s*sig)/t/u + 8.*(t*u+2.*s*sig)/mW2/mZ2 + 4.*s*s*(2.*t-sig)/u/(mW2*mZ2-t*(u+s)) - 2.*sqr(t-u)/u/s/sqr(beta); Val += ( 2.*(8.*t*t-4.*t*(s+3.*sig)-(s-sig)*(3.*s+4.*sig))/u/s/sqr(beta) + 6.*(t+u)*sqr(t-u)/u/s/s/sqr(sqr(beta)) - 12.*s*(t-sig)/t/u )*log(-t/s); Val += ( (2./u/s/s/sqr(beta))*( 4.*t*t*(-2.*s+del) + 4.*t*(s*s+s*(mZ2+5.*mW2)-2.*sig*del) + (s-sig)*(3.*s*s+8.*mW2*s-3.*sig*del) ) + (2.*t*(18.*s+3.*mW2+mZ2)-24.*s*sig)/t/u - 8.*s*(2.*t*t-t*(3.*s+4.*mZ2+2.*mW2)+2.*mZ2*(s+sig)) /u/(mW2*mZ2-t*(u+s)) - 8.*s*s*t*(2.*t-sig)*(t-mZ2)/u/sqr(mW2*mZ2-t*(u+s)) + 6.*(s-del)*(s-sig)*sqr(t-u)/u/s/s/s/sqr(sqr(beta)) )*log(-t/mW2); Val += ( -2.*(2.*t*t*(2.*sig-3.*s)+6.*sig*t*(s-sig)+sqr(s-sig)*(s+2.*sig)) /u/s/sqr(beta) +3.*s*(4.*t-4.*sig-s)/u -3.*sqr(s-sig)*sqr(t-u)/u/s/s/sqr(sqr(beta)) )*TildeI3WZ(s,mW2,mZ2,beta); Val += ( 4.*(u+4.*s)/3./u - 4.*(u*t+2.*s*sig)/3./mW2/mZ2 )*pi*pi; Val += -( 16.*s*(t-sig)*(t-mW2)/t/u )*TildeI3W(s,t,mW2); Val += ( 8.*s*s*(t-sig)/u )*TildeI4t(s,t,mW2,mZ2); swap(mW2,mZ2); del *= -1; swap(t,u); swap(mW2,mZ2); del *= -1; Val += 2.*(4.*t*t+t*(9.*s-4.*sig)-18.*s*sig)/t/u + 8.*(t*u+2.*s*sig)/mW2/mZ2 + 4.*s*s*(2.*t-sig)/u/(mW2*mZ2-t*(u+s)) - 2.*sqr(t-u)/u/s/sqr(beta); Val += ( 2.*(8.*t*t-4.*t*(s+3.*sig)-(s-sig)*(3.*s+4.*sig))/u/s/sqr(beta) + 6.*(t+u)*sqr(t-u)/u/s/s/sqr(sqr(beta)) - 12.*s*(t-sig)/t/u )*log(-t/s); Val += ( (2./u/s/s/sqr(beta))*( 4.*t*t*(-2.*s+del) + 4.*t*(s*s+s*(mZ2+5.*mW2)-2.*sig*del) + (s-sig)*(3.*s*s+8.*mW2*s-3.*sig*del) ) + (2.*t*(18.*s+3.*mW2+mZ2)-24.*s*sig)/t/u - 8.*s*(2.*t*t-t*(3.*s+4.*mZ2+2.*mW2)+2.*mZ2*(s+sig)) /u/(mW2*mZ2-t*(u+s)) - 8.*s*s*t*(2.*t-sig)*(t-mZ2)/u/sqr(mW2*mZ2-t*(u+s)) + 6.*(s-del)*(s-sig)*sqr(t-u)/u/s/s/s/sqr(sqr(beta)) )*log(-t/mW2); Val += ( -2.*(2.*t*t*(2.*sig-3.*s)+6.*sig*t*(s-sig)+sqr(s-sig)*(s+2.*sig)) /u/s/sqr(beta) +3.*s*(4.*t-4.*sig-s)/u -3.*sqr(s-sig)*sqr(t-u)/u/s/s/sqr(sqr(beta)) )*TildeI3WZ(s,mW2,mZ2,beta); Val += ( 4.*(u+4.*s)/3./u - 4.*(u*t+2.*s*sig)/3./mW2/mZ2 )*pi*pi; Val += -( 16.*s*(t-sig)*(t-mW2)/t/u )*TildeI3W(s,t,mW2); Val += ( 8.*s*s*(t-sig)/u )*TildeI4t(s,t,mW2,mZ2); swap(t,u); swap(mW2,mZ2); del *= -1; return Val; } /***************************************************************************/ double Iuu1(Energy2 s, Energy2 t, Energy2 u, Energy2 mW2, Energy2 mZ2, double beta) { double Val(Idd1(s,u,t,mW2,mZ2,beta)); return Val; } /***************************************************************************/ Energy2 Fd1 (Energy2 s, Energy2 t, Energy2 u, Energy2 mW2, Energy2 mZ2, double beta) { Energy2 sig(mZ2+mW2); Energy2 del(mZ2-mW2); Energy2 Val(0.*GeV2); Val += 4.*(17.*t*t+t*(11.*s-13.*sig)+17.*(s*sig+mW2*mZ2))/t + 16.*(s-sig)*(t*u+2.*s*sig)/mW2/mZ2 + 4*s*s*(2.*t-sig)/(t*(u+s)-mW2*mZ2); Val += ( 8.*(t-u)/sqr(beta) - 4.*(3.*t*t-t*(s+3.*sig)+3.*(s*sig+mW2*mZ2))/t )*log(-t/s); Val += ( 8.*(t*t-t*(2.*s+3.*mW2+mZ2)+3.*(s*sig+mW2*mZ2))/t + 8.*s*(t*(3.*s+2.*sig)-2.*mZ2*(s+sig))/(t*(u+s)-mW2*mZ2) + 8.*s*s*t*(2.*t-sig)*(t-mZ2)/sqr(t*(u+s)-mW2*mZ2) - 8.*(s-del)*(t-u)/s/sqr(beta) )*log(-t/mW2); Val += ( 4.*(s-sig)*(t-u)/sqr(beta) + 4.*(sig-3.*s)*t + 4.*(4.*s*sig-mZ2*mZ2-mW2*mW2) )*TildeI3WZ(s,mW2,mZ2,beta); Val += -( 8.*(3.*t*t+2.*t*(2.*s-sig)+2.*(s*sig+mW2*mZ2))/3./t + 8.*(s-sig)*(t*u+2.*s*sig)/3./mW2/mZ2 )*pi*pi; Val += ( 4.*(s*t*t-s*(s+sig)*t+2.*s*(s*sig+mW2*mZ2)) )*TildeI4t(s,t,mW2,mZ2); Val += -( 8.*(t-mW2)*(t*t-t*(s+sig)+2.*(s*sig+mW2*mZ2))/t )*TildeI3W(s,t,mW2); swap(mW2,mZ2); del *= -1; Val += 4.*(17.*t*t+t*(11.*s-13.*sig)+17.*(s*sig+mW2*mZ2))/t + 16.*(s-sig)*(t*u+2.*s*sig)/mW2/mZ2 + 4*s*s*(2.*t-sig)/(t*(u+s)-mW2*mZ2); Val += ( 8.*(t-u)/sqr(beta) - 4.*(3.*t*t-t*(s+3.*sig)+3.*(s*sig+mW2*mZ2))/t )*log(-t/s); Val += ( 8.*(t*t-t*(2.*s+3.*mW2+mZ2)+3.*(s*sig+mW2*mZ2))/t + 8.*s*(t*(3.*s+2.*sig)-2.*mZ2*(s+sig))/(t*(u+s)-mW2*mZ2) + 8.*s*s*t*(2.*t-sig)*(t-mZ2)/sqr(t*(u+s)-mW2*mZ2) - 8.*(s-del)*(t-u)/s/sqr(beta) )*log(-t/mW2); Val += ( 4.*(s-sig)*(t-u)/sqr(beta) + 4.*(sig-3.*s)*t + 4.*(4.*s*sig-mZ2*mZ2-mW2*mW2) )*TildeI3WZ(s,mW2,mZ2,beta); Val += -( 8.*(3.*t*t+2.*t*(2.*s-sig)+2.*(s*sig+mW2*mZ2))/3./t + 8.*(s-sig)*(t*u+2.*s*sig)/3./mW2/mZ2 )*pi*pi; Val += ( 4.*(s*t*t-s*(s+sig)*t+2.*s*(s*sig+mW2*mZ2)) )*TildeI4t(s,t,mW2,mZ2); Val += -( 8.*(t-mW2)*(t*t-t*(s+sig)+2.*(s*sig+mW2*mZ2))/t )*TildeI3W(s,t,mW2); swap(mW2,mZ2); del *= -1; return Val; } /***************************************************************************/ Energy2 Fu1 (Energy2 s, Energy2 t, Energy2 u, Energy2 mW2, Energy2 mZ2, double beta) { Energy2 Val(Fd1(s,u,t,mW2,mZ2,beta)); return Val; } /***************************************************************************/ Energy4 H1 (Energy2 s, Energy2 t, Energy2 u, Energy2 mW2, Energy2 mZ2) { Energy2 sig(mZ2+mW2); Energy4 Val(0.*GeV2*GeV2); Val = 8.*t*t+8.*t*(s-sig)+s*s+6.*s*sig+mZ2*mZ2+10.*mW2*mZ2+mW2*mW2 - sqr(s-sig)*(t*u+2.*s*sig)/mW2/mZ2; Val *= ( 16.-8.*pi*pi/3.); return Val; } Energy2 t_u_Rdd(Energy2 s , Energy2 tk , Energy2 uk , Energy2 q1 , Energy2 q2, Energy2 mW2, Energy2 mZ2); Energy2 t_u_Rud(Energy2 s , Energy2 tk , Energy2 uk , Energy2 q1 , Energy2 q2, Energy2 q1h, Energy2 q2h, Energy2 mW2, Energy2 mZ2); Energy2 t_u_Ruu(Energy2 s , Energy2 tk , Energy2 uk, Energy2 q1h, Energy2 q2h, Energy2 mW2, Energy2 mZ2); Energy4 t_u_RZds(Energy2 s ,Energy2 tk , Energy2 uk , Energy2 q1, Energy2 q2, Energy2 s2,Energy2 mW2, Energy2 mZ2); Energy4 t_u_RZda(Energy2 s , Energy2 tk , Energy2 uk , Energy2 q1, Energy2 q2, Energy2 s2, Energy2 mW2, Energy2 mZ2); Energy4 t_u_RZd(Energy2 s , Energy2 tk , Energy2 uk , Energy2 q1 , Energy2 q2 , Energy2 s2 , Energy2 mW2, Energy2 mZ2); Energy4 t_u_RZu(Energy2 s , Energy2 tk , Energy2 uk , Energy2 q1h, Energy2 q2h, Energy2 s2 , Energy2 mW2, Energy2 mZ2); Energy6 t_u_RZs(Energy2 s , Energy2 tk , Energy2 uk , Energy2 q1, Energy2 q2, Energy2 s2, Energy2 mW2, Energy2 mZ2); Energy6 t_u_RZa(Energy2 s , Energy2 tk , Energy2 uk , Energy2 q1, Energy2 q2, Energy2 s2, Energy2 mW2, Energy2 mZ2); Energy6 t_u_RZ(Energy2 s , Energy2 tk , Energy2 uk , Energy2 q1, Energy2 q2, Energy2 s2 , Energy2 mW2, Energy2 mZ2); /***************************************************************************/ // t_u_M_R_qqb is the real emission q + qb -> n + g matrix element // exactly as defined in Eqs. C.1 of NPB 383(1992)3-44, multiplied by // tk * uk! Energy2 MEPP2VVPowheg::t_u_M_R_qqb(realVVKinematics R) const { // First the Born variables: Energy2 s2(R.s2r()); Energy2 mW2(R.k12r()); Energy2 mZ2(R.k22r()); // Then the rest: Energy2 s(R.sr()); Energy2 tk(R.tkr()); Energy2 uk(R.ukr()); Energy2 q1(R.q1r()); Energy2 q2(R.q2r()); Energy2 q1h(R.q1hatr()); Energy2 q2h(R.q2hatr()); double cosThetaW(sqrt(1.-sin2ThetaW_)); double eZ2(eZ2_); double eZ(eZ_); double gdL(gdL_); double guL(guL_); double gdR(gdR_); double guR(guR_); // W+W- if(abs(mePartonData()[2]->id())==24&&abs(mePartonData()[3]->id())==24) { double e2(sqr(gW_)*sin2ThetaW_); if(abs(quark_->id())%2==0&&abs(antiquark_->id())%2==0) { // N.B. OLD eZ used to calculate new eZ2 *then* new eZ is set! if(quark_->id()==-antiquark_->id()) { eZ2 = 1./2.*sqr(s2-mW2)/Fij2_ * (e2*e2/s2/s2*(sqr( 2./3.+eZ*(guL+guR)/2./e2*s2/(s2-mW2/sqr(cosThetaW))) +sqr( eZ*(guL-guR)/2./e2*s2/(s2-mW2/sqr(cosThetaW)))) ); eZ = -1./2./Fij2_/(gW_*gW_/4./sqrt(Fij2_))*(s2-mW2) * (gW_*gW_*e2/4./s2 *( 2./3.+2.*eZ*guL/2./e2*s2/(s2-mW2/sqr(cosThetaW)))); } else { eZ2 =0.; eZ =0.; } gdL = gW_/sqrt(2.); guL = 0.; } else if(abs(quark_->id())%2==1&&abs(antiquark_->id())%2==1) { // N.B. OLD eZ used to calculate new eZ2 *then* new eZ is set! if(quark_->id()==-antiquark_->id()) { eZ2 = 1./2.*sqr(s2-mW2)/Fij2_ * (e2*e2/s2/s2*(sqr(-1./3.+eZ*(gdL+gdR)/2./e2*s2/(s2-mW2/sqr(cosThetaW))) +sqr( eZ*(gdL-gdR)/2./e2*s2/(s2-mW2/sqr(cosThetaW)))) ); eZ = -1./2./Fij2_/(gW_*gW_/4./sqrt(Fij2_))*(s2-mW2) * (gW_*gW_*e2/4./s2 *(-1./3.+2.*eZ*gdL/2./e2*s2/(s2-mW2/sqr(cosThetaW)))); } else { eZ2 =0.; eZ =0.; } guL = gW_/sqrt(2.); gdL = 0.; } } // ZZ else if(mePartonData()[2]->id()==23&&mePartonData()[3]->id()==23) { eZ = 0.; eZ2 = 0.; double gV2,gA2; gV2 = sqr(guL/2.-gW_/2./cosThetaW*2./3.*sin2ThetaW_); gA2 = sqr(guL/2.+gW_/2./cosThetaW*2./3.*sin2ThetaW_); guL = sqrt(gV2*gV2+gA2*gA2+6.*gA2*gV2)/2.; gV2 = sqr(gdL/2.+gW_/2./cosThetaW*1./3.*sin2ThetaW_); gA2 = sqr(gdL/2.-gW_/2./cosThetaW*1./3.*sin2ThetaW_); gdL = sqrt(gV2*gV2+gA2*gA2+6.*gA2*gV2)/2.; if(abs(quark_->id())%2==0&&abs(antiquark_->id())%2==0) gdL = guL; else if(abs(quark_->id())%2==1&&abs(antiquark_->id())%2==1) guL = gdL; else { cout << "MEPP2VVPowheg:" << endl; cout << "ZZ needs 2 down-type / 2 up-type!" << endl; } } return -2.*pi*alphaS_*Fij2_*CF_/NC_ * ( gdL*gdL*t_u_Rdd(s,tk,uk,q1,q2,mW2,mZ2) + 2.*gdL*guL*t_u_Rud(s,tk,uk,q1,q2,q1h,q2h,mW2,mZ2) + guL*guL*t_u_Ruu(s,tk,uk,q1h,q2h,mW2,mZ2) - 2.*eZ/(s2-mW2) * ( gdL * t_u_RZd(s,tk,uk,q1 ,q2 ,s2,mW2,mZ2) - guL * t_u_RZu(s,tk,uk,q1h,q2h,s2,mW2,mZ2) ) + eZ2/sqr(s2-mW2) *t_u_RZ(s,tk,uk,q1,q2,s2,mW2,mZ2) ); } Energy2 t_u_Rdd(Energy2 s ,Energy2 tk ,Energy2 uk ,Energy2 q1,Energy2 q2, Energy2 mW2, Energy2 mZ2) { Energy2 Val(0.*GeV2); Val += 4.*(q2*(uk+2.*s+q2)+q1*(s+q1))/mW2/mZ2*uk + 16.*(uk+s)/q2*uk - 4.*(2.*uk+4.*s+q2)/mW2*uk - 4.*(2.*uk+5.*s+q2+2.*q1-mW2)/mZ2*uk + 4.*q1*s*(s+q1)/mW2/mZ2 + 16.*s*(s+q2-mZ2-mW2)/q1 - 4.*s*(4.*s+q2+q1)/mW2 + 16.*mW2*mZ2*s/q1/q2 + 4.*s + 16.*mZ2*(tk-2.*mW2)/q1/q2/q2*tk*uk + 16.*(2.*mZ2+mW2-tk)/q1/q2*tk*uk + 16.*mW2*(s-mZ2-mW2)/q1/q2*uk + 16.*mZ2*(q1-2.*mW2)/q2/q2*uk + 32.*mW2*mW2*mZ2/q1/q2/q2*uk + 16.*mW2/q1*uk + 4.*uk + 8./q2*tk*uk + 4.*q1/mW2/mZ2*tk*uk - 24./q1*tk*uk - 4./mW2*tk*uk; swap(mW2,mZ2); swap(q1,q2); swap(tk,uk); Val += 4.*(q2*(uk+2.*s+q2)+q1*(s+q1))/mW2/mZ2*uk + 16.*(uk+s)/q2*uk - 4.*(2.*uk+4.*s+q2)/mW2*uk - 4.*(2.*uk+5.*s+q2+2.*q1-mW2)/mZ2*uk + 4.*q1*s*(s+q1)/mW2/mZ2 + 16.*s*(s+q2-mZ2-mW2)/q1 - 4.*s*(4.*s+q2+q1)/mW2 + 16.*mW2*mZ2*s/q1/q2 + 4.*s + 16.*mZ2*(tk-2.*mW2)/q1/q2/q2*tk*uk + 16.*(2.*mZ2+mW2-tk)/q1/q2*tk*uk + 16.*mW2*(s-mZ2-mW2)/q1/q2*uk + 16.*mZ2*(q1-2.*mW2)/q2/q2*uk + 32.*mW2*mW2*mZ2/q1/q2/q2*uk + 16.*mW2/q1*uk + 4.*uk + 8./q2*tk*uk + 4.*q1/mW2/mZ2*tk*uk - 24./q1*tk*uk - 4./mW2*tk*uk; swap(mW2,mZ2); swap(q1,q2); swap(tk,uk); return Val; } Energy2 t_u_Rud(Energy2 s ,Energy2 tk ,Energy2 uk ,Energy2 q1,Energy2 q2, Energy2 q1h,Energy2 q2h,Energy2 mW2, Energy2 mZ2) { Energy2 Val(0.*GeV2); Val += (uk*s*(uk+3.*s+q1h)+s*s*(s+mZ2)-(s+uk)*(2.*mZ2*s+3.*mW2*s+mW2*q1h) ) * 8./q1/q2h/q2*uk - (uk*(uk+3.*s+q1h-mW2)-(q2+s)*(q2-s)+s*(q2-mW2)+q1h*(q2-mW2)+mW2*q2 ) * 4.*s/mZ2/q1/q2h*uk - 4.*((s+uk+q2h-2.*mZ2)*(s+q1h-mZ2)-mZ2*q1)/mW2/q2*uk + 4.*(2.*s*uk+2.*mW2*uk+5.*s*s+2.*q1h*s-2.*mZ2*s)/q1/q2h*uk + 4.*(2.*s*uk-s*s-2.*q1h*s+2.*mW2*s+2.*mW2*q1h)/q1/q2h/q2*tk*uk + ((2.*uk+s)*(s+q1h)+s*(q2+q2h)+2.*q2*(s+q2h)-q1*s+q1*q2+q1h*q2h ) /mW2/mZ2*uk + 8.*s*(uk-q1h+mZ2)/q1/q2*uk + 4.*s*(-uk+s-q2+q1+q1h)/mZ2/q2h*uk + 4.*s*(-uk-q2+q1h)/mZ2/q1*uk + 8.*(mZ2*uk-s*s+mW2*s-2.*mZ2*q1-2.*mZ2*q1h)/q2h/q2*uk + 2.*(-uk-9.*s-4.*q2-5.*q2h-3.*q1-4.*q1h+8.*mZ2)/mW2*uk + 2.*(-4.*uk+3.*s+5.*q1+4.*q1h)/q2h*uk + 2.*(s*tk+q2*tk+s*s-q2*q2+q1h*q2)/mW2/mZ2*tk - 8.*s*(tk+s+q1h)/mW2/q2*tk + 2.*(-tk+3.*s+q2-q1h)/mW2*tk - 8.*s*s*s/q1h/q2 - 2.*s*q2*(s+q2)/mW2/mZ2 + 2.*s*(2.*s+q2)/mZ2 + 2.*s*(2.*s+q2)/mW2 - 16.*s*s/q1h - 2.*s - 16.*s*s/q1h/q2*tk - 8.*s/q2*tk - 16.*s/q1h*tk + 6.*s/mZ2*tk + 4.*s/q1*uk + 4.*s/mZ2*uk + 12.*uk + 4.*s*(tk+q1h-mW2)/mZ2/q1/q2h*tk*uk + 2.*(s+4.*q1+5.*q1h-4.*mZ2)/q2*uk - 4.*s*s*s/q1h/q1/q2h/q2*tk*uk - 4.*s*s/q1h/q2h/q2*tk*uk - 4.*s*s/q1h/q1/q2*tk*uk + 8.*s*s/mW2/q1h/q2*tk*uk - 4.*s*s/q1h/q1/q2h*tk*uk + 4.*(s+mZ2)/mW2/q2*tk*uk - 4.*s/q1h/q2h*tk*uk - 4.*s/q1h/q1*tk*uk + 12.*s/mW2/q1h*tk*uk - (s+4.*q2)/mW2/mZ2*tk*uk - 4.*(s+2.*mZ2)/q2h/q2*tk*uk - 4.*(3.*s+2.*q1h)/q1/q2*tk*uk - 8.*mW2/q1/q2h*tk*uk + 8./q2h*tk*uk + 8./q1*tk*uk; swap(mW2,mZ2); swap(q1,q2); swap(tk,uk); swap(q1h,q2h); // Note this swap is done in accordance with MC@NLO. // It is not in NPB 383(1992)3-44 Eq.C.4! Val += (uk*s*(uk+3.*s+q1h)+s*s*(s+mZ2)-(s+uk)*(2.*mZ2*s+3.*mW2*s+mW2*q1h) ) * 8./q1/q2h/q2*uk - (uk*(uk+3.*s+q1h-mW2)-(q2+s)*(q2-s)+s*(q2-mW2)+q1h*(q2-mW2)+mW2*q2 ) * 4.*s/mZ2/q1/q2h*uk - 4.*((s+uk+q2h-2.*mZ2)*(s+q1h-mZ2)-mZ2*q1)/mW2/q2*uk + 4.*(2.*s*uk+2.*mW2*uk+5.*s*s+2.*q1h*s-2.*mZ2*s)/q1/q2h*uk + 4.*(2.*s*uk-s*s-2.*q1h*s+2.*mW2*s+2.*mW2*q1h)/q1/q2h/q2*tk*uk + ((2.*uk+s)*(s+q1h)+s*(q2+q2h)+2.*q2*(s+q2h)-q1*s+q1*q2+q1h*q2h ) /mW2/mZ2*uk + 8.*s*(uk-q1h+mZ2)/q1/q2*uk + 4.*s*(-uk+s-q2+q1+q1h)/mZ2/q2h*uk + 4.*s*(-uk-q2+q1h)/mZ2/q1*uk + 8.*(mZ2*uk-s*s+mW2*s-2.*mZ2*q1-2.*mZ2*q1h)/q2h/q2*uk + 2.*(-uk-9.*s-4.*q2-5.*q2h-3.*q1-4.*q1h+8.*mZ2)/mW2*uk + 2.*(-4.*uk+3.*s+5.*q1+4.*q1h)/q2h*uk + 2.*(s*tk+q2*tk+s*s-q2*q2+q1h*q2)/mW2/mZ2*tk - 8.*s*(tk+s+q1h)/mW2/q2*tk + 2.*(-tk+3.*s+q2-q1h)/mW2*tk - 8.*s*s*s/q1h/q2 - 2.*s*q2*(s+q2)/mW2/mZ2 + 2.*s*(2.*s+q2)/mZ2 + 2.*s*(2.*s+q2)/mW2 - 16.*s*s/q1h - 2.*s - 16.*s*s/q1h/q2*tk - 8.*s/q2*tk - 16.*s/q1h*tk + 6.*s/mZ2*tk + 4.*s/q1*uk + 4.*s/mZ2*uk + 12.*uk + 4.*s*(tk+q1h-mW2)/mZ2/q1/q2h*tk*uk + 2.*(s+4.*q1+5.*q1h-4.*mZ2)/q2*uk - 4.*s*s*s/q1h/q1/q2h/q2*tk*uk - 4.*s*s/q1h/q2h/q2*tk*uk - 4.*s*s/q1h/q1/q2*tk*uk + 8.*s*s/mW2/q1h/q2*tk*uk - 4.*s*s/q1h/q1/q2h*tk*uk + 4.*(s+mZ2)/mW2/q2*tk*uk - 4.*s/q1h/q2h*tk*uk - 4.*s/q1h/q1*tk*uk + 12.*s/mW2/q1h*tk*uk - (s+4.*q2)/mW2/mZ2*tk*uk - 4.*(s+2.*mZ2)/q2h/q2*tk*uk - 4.*(3.*s+2.*q1h)/q1/q2*tk*uk - 8.*mW2/q1/q2h*tk*uk + 8./q2h*tk*uk + 8./q1*tk*uk; swap(mW2,mZ2); swap(q1,q2); swap(tk,uk); swap(q1h,q2h); // Note this swap is done in accordance with MC@NLO. // It is not in NPB 383(1992)3-44 Eq.C.4! swap(tk,uk); swap(q1,q2h); swap(q2,q1h); Val += (uk*s*(uk+3.*s+q1h)+s*s*(s+mZ2)-(s+uk)*(2.*mZ2*s+3.*mW2*s+mW2*q1h) ) * 8./q1/q2h/q2*uk - (uk*(uk+3.*s+q1h-mW2)-(q2+s)*(q2-s)+s*(q2-mW2)+q1h*(q2-mW2)+mW2*q2 ) * 4.*s/mZ2/q1/q2h*uk - 4.*((s+uk+q2h-2.*mZ2)*(s+q1h-mZ2)-mZ2*q1)/mW2/q2*uk + 4.*(2.*s*uk+2.*mW2*uk+5.*s*s+2.*q1h*s-2.*mZ2*s)/q1/q2h*uk + 4.*(2.*s*uk-s*s-2.*q1h*s+2.*mW2*s+2.*mW2*q1h)/q1/q2h/q2*tk*uk + ((2.*uk+s)*(s+q1h)+s*(q2+q2h)+2.*q2*(s+q2h)-q1*s+q1*q2+q1h*q2h ) /mW2/mZ2*uk + 8.*s*(uk-q1h+mZ2)/q1/q2*uk + 4.*s*(-uk+s-q2+q1+q1h)/mZ2/q2h*uk + 4.*s*(-uk-q2+q1h)/mZ2/q1*uk + 8.*(mZ2*uk-s*s+mW2*s-2.*mZ2*q1-2.*mZ2*q1h)/q2h/q2*uk + 2.*(-uk-9.*s-4.*q2-5.*q2h-3.*q1-4.*q1h+8.*mZ2)/mW2*uk + 2.*(-4.*uk+3.*s+5.*q1+4.*q1h)/q2h*uk + 2.*(s*tk+q2*tk+s*s-q2*q2+q1h*q2)/mW2/mZ2*tk - 8.*s*(tk+s+q1h)/mW2/q2*tk + 2.*(-tk+3.*s+q2-q1h)/mW2*tk - 8.*s*s*s/q1h/q2 - 2.*s*q2*(s+q2)/mW2/mZ2 + 2.*s*(2.*s+q2)/mZ2 + 2.*s*(2.*s+q2)/mW2 - 16.*s*s/q1h - 2.*s - 16.*s*s/q1h/q2*tk - 8.*s/q2*tk - 16.*s/q1h*tk + 6.*s/mZ2*tk + 4.*s/q1*uk + 4.*s/mZ2*uk + 12.*uk + 4.*s*(tk+q1h-mW2)/mZ2/q1/q2h*tk*uk + 2.*(s+4.*q1+5.*q1h-4.*mZ2)/q2*uk - 4.*s*s*s/q1h/q1/q2h/q2*tk*uk - 4.*s*s/q1h/q2h/q2*tk*uk - 4.*s*s/q1h/q1/q2*tk*uk + 8.*s*s/mW2/q1h/q2*tk*uk - 4.*s*s/q1h/q1/q2h*tk*uk + 4.*(s+mZ2)/mW2/q2*tk*uk - 4.*s/q1h/q2h*tk*uk - 4.*s/q1h/q1*tk*uk + 12.*s/mW2/q1h*tk*uk - (s+4.*q2)/mW2/mZ2*tk*uk - 4.*(s+2.*mZ2)/q2h/q2*tk*uk - 4.*(3.*s+2.*q1h)/q1/q2*tk*uk - 8.*mW2/q1/q2h*tk*uk + 8./q2h*tk*uk + 8./q1*tk*uk; swap(tk,uk); swap(q1,q2h); swap(q2,q1h); swap(mW2,mZ2); swap(q1,q1h); swap(q2,q2h); Val += (uk*s*(uk+3.*s+q1h)+s*s*(s+mZ2)-(s+uk)*(2.*mZ2*s+3.*mW2*s+mW2*q1h) ) * 8./q1/q2h/q2*uk - (uk*(uk+3.*s+q1h-mW2)-(q2+s)*(q2-s)+s*(q2-mW2)+q1h*(q2-mW2)+mW2*q2 ) * 4.*s/mZ2/q1/q2h*uk - 4.*((s+uk+q2h-2.*mZ2)*(s+q1h-mZ2)-mZ2*q1)/mW2/q2*uk + 4.*(2.*s*uk+2.*mW2*uk+5.*s*s+2.*q1h*s-2.*mZ2*s)/q1/q2h*uk + 4.*(2.*s*uk-s*s-2.*q1h*s+2.*mW2*s+2.*mW2*q1h)/q1/q2h/q2*tk*uk + ((2.*uk+s)*(s+q1h)+s*(q2+q2h)+2.*q2*(s+q2h)-q1*s+q1*q2+q1h*q2h ) /mW2/mZ2*uk + 8.*s*(uk-q1h+mZ2)/q1/q2*uk + 4.*s*(-uk+s-q2+q1+q1h)/mZ2/q2h*uk + 4.*s*(-uk-q2+q1h)/mZ2/q1*uk + 8.*(mZ2*uk-s*s+mW2*s-2.*mZ2*q1-2.*mZ2*q1h)/q2h/q2*uk + 2.*(-uk-9.*s-4.*q2-5.*q2h-3.*q1-4.*q1h+8.*mZ2)/mW2*uk + 2.*(-4.*uk+3.*s+5.*q1+4.*q1h)/q2h*uk + 2.*(s*tk+q2*tk+s*s-q2*q2+q1h*q2)/mW2/mZ2*tk - 8.*s*(tk+s+q1h)/mW2/q2*tk + 2.*(-tk+3.*s+q2-q1h)/mW2*tk - 8.*s*s*s/q1h/q2 - 2.*s*q2*(s+q2)/mW2/mZ2 + 2.*s*(2.*s+q2)/mZ2 + 2.*s*(2.*s+q2)/mW2 - 16.*s*s/q1h - 2.*s - 16.*s*s/q1h/q2*tk - 8.*s/q2*tk - 16.*s/q1h*tk + 6.*s/mZ2*tk + 4.*s/q1*uk + 4.*s/mZ2*uk + 12.*uk + 4.*s*(tk+q1h-mW2)/mZ2/q1/q2h*tk*uk + 2.*(s+4.*q1+5.*q1h-4.*mZ2)/q2*uk - 4.*s*s*s/q1h/q1/q2h/q2*tk*uk - 4.*s*s/q1h/q2h/q2*tk*uk - 4.*s*s/q1h/q1/q2*tk*uk + 8.*s*s/mW2/q1h/q2*tk*uk - 4.*s*s/q1h/q1/q2h*tk*uk + 4.*(s+mZ2)/mW2/q2*tk*uk - 4.*s/q1h/q2h*tk*uk - 4.*s/q1h/q1*tk*uk + 12.*s/mW2/q1h*tk*uk - (s+4.*q2)/mW2/mZ2*tk*uk - 4.*(s+2.*mZ2)/q2h/q2*tk*uk - 4.*(3.*s+2.*q1h)/q1/q2*tk*uk - 8.*mW2/q1/q2h*tk*uk + 8./q2h*tk*uk + 8./q1*tk*uk; swap(mW2,mZ2); swap(q1,q1h); swap(q2,q2h); return Val; } Energy2 t_u_Ruu(Energy2 s ,Energy2 tk ,Energy2 uk ,Energy2 q1h,Energy2 q2h, Energy2 mW2, Energy2 mZ2) { return t_u_Rdd(s,tk,uk,q1h,q2h,mZ2,mW2); } Energy4 t_u_RZds(Energy2 s ,Energy2 tk ,Energy2 uk ,Energy2 q1,Energy2 q2, Energy2 s2, Energy2 mW2, Energy2 mZ2) { Energy4 Val(0.*GeV2*GeV2); Energy2 sig(mZ2+mW2); Val += ( q1*q2*(5./2.*s*s+5.*s*tk+3.*tk*tk)+(tk*uk*uk+q1*q1*q2)*(tk+s) + q1*(tk*tk*uk+s*uk*uk-s*s*tk+s*s*uk)+q1*q1*q1*(uk+s)-q1*q1*s*s2 ) * 8./q1/q2 - ( tk*tk*(4.*uk+s+q1-2.*q2)+tk*(sqr(q1+q2)-q1*s-3.*q2*s-2.*q1*q1) - q1*s*(4.*s-2.*q1-q2)+tk*uk*(q1+3.*s) ) * 4.*sig/q1/q2 - 4.*sig*sig*(s*(2.*s+q1)+tk*(uk+5./2.*tk+5.*s+q1+q2) )/mW2/mZ2 + 2.*sig*s2*(4.*sqr(s+tk)+tk*(uk+s+4.*q1+2.*q2)+2.*q1*(2.*s+q1) )/mW2/mZ2 + 4.*sig*sig*(s2+s-q1+q2)/q1/q2*tk - 16.*mW2*mZ2*(tk*uk/2.+q2*tk-q1*s)/q1/q2 - 4.*s2*s2*q1*(tk+s+q1)/mW2/mZ2 + sig*sig*sig*(uk+tk)/mW2/mZ2 + 4.*mW2*mZ2*sig*(uk+tk)/q1/q2; swap(mW2,mZ2); swap(q1,q2); swap(tk,uk); Val += ( q1*q2*(5./2.*s*s+5.*s*tk+3.*tk*tk)+(tk*uk*uk+q1*q1*q2)*(tk+s) + q1*(tk*tk*uk+s*uk*uk-s*s*tk+s*s*uk)+q1*q1*q1*(uk+s)-q1*q1*s*s2 ) * 8./q1/q2 - ( tk*tk*(4.*uk+s+q1-2.*q2)+tk*(sqr(q1+q2)-q1*s-3.*q2*s-2.*q1*q1) - q1*s*(4.*s-2.*q1-q2)+tk*uk*(q1+3.*s) ) * 4.*sig/q1/q2 - 4.*sig*sig*(s*(2.*s+q1)+tk*(uk+5./2.*tk+5.*s+q1+q2) )/mW2/mZ2 + 2.*sig*s2*(4.*sqr(s+tk)+tk*(uk+s+4.*q1+2.*q2)+2.*q1*(2.*s+q1) )/mW2/mZ2 + 4.*sig*sig*(s2+s-q1+q2)/q1/q2*tk - 16.*mW2*mZ2*(tk*uk/2.+q2*tk-q1*s)/q1/q2 - 4.*s2*s2*q1*(tk+s+q1)/mW2/mZ2 + sig*sig*sig*(uk+tk)/mW2/mZ2 + 4.*mW2*mZ2*sig*(uk+tk)/q1/q2; swap(mW2,mZ2); swap(q1,q2); swap(tk,uk); return Val; } Energy4 t_u_RZda(Energy2 s ,Energy2 tk ,Energy2 uk ,Energy2 q1,Energy2 q2, Energy2 s2, Energy2 mW2, Energy2 mZ2) { Energy4 Val(0.*GeV2*GeV2); Val += 4.*mZ2*(2.*uk*uk-s*tk+q1*(uk-tk-s+q1+0.5*q2)+q2*(s-3.*q2) ) /q1/q2*tk - 4.*mZ2*mZ2*(q1-tk-2.*s-q2)/q1/q2*tk - 2.*mZ2*(tk+2.*s+2.*q2)/mW2*tk - 2.*s2*(s+2.*q2)/mZ2*tk + 8.*mW2*mZ2*mZ2/q1/q2*tk + 2.*mZ2*mZ2/mW2*tk; swap(mW2,mZ2); // N.B. Here we subtract! Val -= 4.*mZ2*(2.*uk*uk-s*tk+q1*(uk-tk-s+q1+0.5*q2)+q2*(s-3.*q2) ) /q1/q2*tk - 4.*mZ2*mZ2*(q1-tk-2.*s-q2)/q1/q2*tk - 2.*mZ2*(tk+2.*s+2.*q2)/mW2*tk - 2.*s2*(s+2.*q2)/mZ2*tk + 8.*mW2*mZ2*mZ2/q1/q2*tk + 2.*mZ2*mZ2/mW2*tk; swap(mW2,mZ2); swap(q1,q2); // N.B. Here we subtract! swap(tk,uk); Val -= 4.*mZ2*(2.*uk*uk-s*tk+q1*(uk-tk-s+q1+0.5*q2)+q2*(s-3.*q2) ) /q1/q2*tk - 4.*mZ2*mZ2*(q1-tk-2.*s-q2)/q1/q2*tk - 2.*mZ2*(tk+2.*s+2.*q2)/mW2*tk - 2.*s2*(s+2.*q2)/mZ2*tk + 8.*mW2*mZ2*mZ2/q1/q2*tk + 2.*mZ2*mZ2/mW2*tk; swap(q1,q2); swap(tk,uk); swap(mW2,mZ2); // N.B. Here we add! swap(q1,q2); swap(tk,uk); Val += 4.*mZ2*(2.*uk*uk-s*tk+q1*(uk-tk-s+q1+0.5*q2)+q2*(s-3.*q2) ) /q1/q2*tk - 4.*mZ2*mZ2*(q1-tk-2.*s-q2)/q1/q2*tk - 2.*mZ2*(tk+2.*s+2.*q2)/mW2*tk - 2.*s2*(s+2.*q2)/mZ2*tk + 8.*mW2*mZ2*mZ2/q1/q2*tk + 2.*mZ2*mZ2/mW2*tk; swap(mW2,mZ2); swap(q1,q2); swap(tk,uk); return Val; } Energy4 t_u_RZd(Energy2 s , Energy2 tk , Energy2 uk , Energy2 q1 , Energy2 q2 , Energy2 s2, Energy2 mW2, Energy2 mZ2) { Energy4 Val(0.*GeV2*GeV2); Val = t_u_RZds(s,tk,uk,q1,q2,s2,mW2,mZ2) + t_u_RZda(s,tk,uk,q1,q2,s2,mW2,mZ2); return Val; } Energy4 t_u_RZu(Energy2 s , Energy2 tk , Energy2 uk , Energy2 q1h, Energy2 q2h, Energy2 s2, Energy2 mW2, Energy2 mZ2) { Energy4 Val(0.*GeV2*GeV2); Val = t_u_RZd(s,tk,uk,q1h,q2h,s2,mZ2,mW2); return Val; } Energy6 t_u_RZs(Energy2 s,Energy2 tk,Energy2 uk,Energy2 q1,Energy2 q2, Energy2 s2,Energy2 mW2,Energy2 mZ2) { Energy6 Val(0.*GeV2*GeV2*GeV2); Energy2 sig(mZ2+mW2); Val += 2.*sig*sig*s2*( tk*(3.*uk+9.*tk+19.*s+6.*q1+4.*q2)+8.*s*s+6.*q1*s + 2.*q1*q1 )/mW2/mZ2 - 2.*sig*sig*sig*(tk*(3.*uk+6.*tk+11.*s+2.*q1+2.*q2)+2.*s*(2.*s+q1)) / mW2/mZ2 - 2.*sig*s2*s2*(tk*(uk+4.*tk+9.*s+6.*q1+2.*q2)+4.*sqr(s+q1)-2.*q1*s) /mW2/mZ2 - 16.*sig*(2.*tk*(uk/2.-tk-s+q1+q2)-s*(3.*s/2.-2.*q1)) + 8.*s2*(s*(s/2.+tk)+4.*q1*(tk+s+q1)) + 4.*s2*s2*s2*q1*(tk+s+q1)/mW2/mZ2 + 8.*sig*sig*(2.*tk+s/2.) + 2.*sig*sig*sig*sig*tk/mW2/mZ2 + 32.*mW2*mZ2*s; swap(mW2,mZ2); swap(q1,q2); swap(tk,uk); Val += 2.*sig*sig*s2*( tk*(3.*uk+9.*tk+19.*s+6.*q1+4.*q2)+8.*s*s+6.*q1*s + 2.*q1*q1 )/mW2/mZ2 - 2.*sig*sig*sig*(tk*(3.*uk+6.*tk+11.*s+2.*q1+2.*q2)+2.*s*(2.*s+q1)) / mW2/mZ2 - 2.*sig*s2*s2*(tk*(uk+4.*tk+9.*s+6.*q1+2.*q2)+4.*sqr(s+q1)-2.*q1*s) /mW2/mZ2 - 16.*sig*(2.*tk*(uk/2.-tk-s+q1+q2)-s*(3.*s/2.-2.*q1)) + 8.*s2*(s*(s/2.+tk)+4.*q1*(tk+s+q1)) + 4.*s2*s2*s2*q1*(tk+s+q1)/mW2/mZ2 + 8.*sig*sig*(2.*tk+s/2.) + 2.*sig*sig*sig*sig*tk/mW2/mZ2 + 32.*mW2*mZ2*s; swap(mW2,mZ2); swap(q1,q2); swap(tk,uk); return Val; } Energy6 t_u_RZa(Energy2 s,Energy2 tk,Energy2 uk,Energy2 q1,Energy2 q2, Energy2 s2,Energy2 mW2,Energy2 mZ2) { Energy6 Val(0.*GeV2*GeV2*GeV2); Val += - 2.*mZ2*(2.*tk+11.*s+18.*q2)*tk - 2.*mZ2*mZ2*(2.*tk+3.*s+2.*q2)/mW2*tk + 2.*mZ2*s2*(tk+3.*s+4.*q2)/mW2*tk - 2.*s2*s2*(s+2.*q2)/mW2*tk + 2.*mZ2*mZ2*mZ2/mW2*tk + 20.*mZ2*mZ2*tk; swap(mW2,mZ2); Val -= - 2.*mZ2*(2.*tk+11.*s+18.*q2)*tk - 2.*mZ2*mZ2*(2.*tk+3.*s+2.*q2)/mW2*tk + 2.*mZ2*s2*(tk+3.*s+4.*q2)/mW2*tk - 2.*s2*s2*(s+2.*q2)/mW2*tk + 2.*mZ2*mZ2*mZ2/mW2*tk + 20.*mZ2*mZ2*tk; swap(mW2,mZ2); swap(q1,q2); swap(tk,uk); Val -= - 2.*mZ2*(2.*tk+11.*s+18.*q2)*tk - 2.*mZ2*mZ2*(2.*tk+3.*s+2.*q2)/mW2*tk + 2.*mZ2*s2*(tk+3.*s+4.*q2)/mW2*tk - 2.*s2*s2*(s+2.*q2)/mW2*tk + 2.*mZ2*mZ2*mZ2/mW2*tk + 20.*mZ2*mZ2*tk; swap(q1,q2); swap(tk,uk); swap(mW2,mZ2); swap(q1,q2); swap(tk,uk); Val += - 2.*mZ2*(2.*tk+11.*s+18.*q2)*tk - 2.*mZ2*mZ2*(2.*tk+3.*s+2.*q2)/mW2*tk + 2.*mZ2*s2*(tk+3.*s+4.*q2)/mW2*tk - 2.*s2*s2*(s+2.*q2)/mW2*tk + 2.*mZ2*mZ2*mZ2/mW2*tk + 20.*mZ2*mZ2*tk; swap(mW2,mZ2); swap(q1,q2); swap(tk,uk); return Val; } Energy6 t_u_RZ(Energy2 s , Energy2 tk , Energy2 uk , Energy2 q1, Energy2 q2, Energy2 s2, Energy2 mW2, Energy2 mZ2) { Energy6 Val(0.*GeV2*GeV2*GeV2); Val = t_u_RZs(s,tk,uk,q1,q2,s2,mW2,mZ2) + t_u_RZa(s,tk,uk,q1,q2,s2,mW2,mZ2); return Val; } /***************************************************************************/ // t_u_M_R_qg is the real emission q + qb -> n + g matrix element // exactly as defined in Eqs. C.9 of NPB 383(1992)3-44, multiplied by // tk * uk! Energy2 MEPP2VVPowheg::t_u_M_R_qg(realVVKinematics R) const { // First the Born variables: Energy2 s2(R.s2r()); Energy2 mW2(R.k12r()); Energy2 mZ2(R.k22r()); // Then the rest: Energy2 s(R.sr()); Energy2 tk(R.tkr()); Energy2 uk(R.ukr()); Energy2 q1(R.q1r()); Energy2 q2(R.q2r()); Energy2 q1h(R.q1hatr()); Energy2 q2h(R.q2hatr()); Energy2 w1(R.w1r()); Energy2 w2(R.w2r()); double cosThetaW(sqrt(1.-sin2ThetaW_)); double eZ2(eZ2_); double eZ(eZ_); double gdL(gdL_); double guL(guL_); double gdR(gdR_); double guR(guR_); // W+W- if(abs(mePartonData()[2]->id())==24&&abs(mePartonData()[3]->id())==24) { double e2(sqr(gW_)*sin2ThetaW_); if(abs(quark_->id())%2==0&&abs(antiquark_->id())%2==0) { // N.B. OLD eZ used to calculate new eZ2 *then* new eZ is set! if(quark_->id()==-antiquark_->id()) { eZ2 = 1./2.*sqr(s2-mW2)/Fij2_ * (e2*e2/s2/s2*(sqr( 2./3.+eZ*(guL+guR)/2./e2*s2/(s2-mW2/sqr(cosThetaW))) +sqr( eZ*(guL-guR)/2./e2*s2/(s2-mW2/sqr(cosThetaW)))) ); eZ = -1./2./Fij2_/(gW_*gW_/4./sqrt(Fij2_))*(s2-mW2) * (gW_*gW_*e2/4./s2 *( 2./3.+2.*eZ*guL/2./e2*s2/(s2-mW2/sqr(cosThetaW)))); } else { eZ2 =0.; eZ =0.; } gdL = gW_/sqrt(2.); guL = 0.; } else if(abs(quark_->id())%2==1&&abs(antiquark_->id())%2==1) { // N.B. OLD eZ used to calculate new eZ2 *then* new eZ is set! if(quark_->id()==-antiquark_->id()) { eZ2 = 1./2.*sqr(s2-mW2)/Fij2_ * (e2*e2/s2/s2*(sqr(-1./3.+eZ*(gdL+gdR)/2./e2*s2/(s2-mW2/sqr(cosThetaW))) +sqr( eZ*(gdL-gdR)/2./e2*s2/(s2-mW2/sqr(cosThetaW)))) ); eZ = -1./2./Fij2_/(gW_*gW_/4./sqrt(Fij2_))*(s2-mW2) * (gW_*gW_*e2/4./s2 *(-1./3.+2.*eZ*gdL/2./e2*s2/(s2-mW2/sqr(cosThetaW)))); } else { eZ2 =0.; eZ =0.; } guL = gW_/sqrt(2.); gdL = 0.; } } // ZZ else if(mePartonData()[2]->id()==23&&mePartonData()[3]->id()==23) { eZ = 0.; eZ2 = 0.; double gV2,gA2; gV2 = sqr(guL/2.-gW_/2./cosThetaW*2./3.*sin2ThetaW_); gA2 = sqr(guL/2.+gW_/2./cosThetaW*2./3.*sin2ThetaW_); guL = sqrt(gV2*gV2+gA2*gA2+6.*gA2*gV2)/2.; gV2 = sqr(gdL/2.+gW_/2./cosThetaW*1./3.*sin2ThetaW_); gA2 = sqr(gdL/2.-gW_/2./cosThetaW*1./3.*sin2ThetaW_); gdL = sqrt(gV2*gV2+gA2*gA2+6.*gA2*gV2)/2.; if(abs(quark_->id())%2==0&&abs(antiquark_->id())%2==0) gdL = guL; else if(abs(quark_->id())%2==1&&abs(antiquark_->id())%2==1) guL = gdL; else { cout << "MEPP2VVPowheg:" << endl; cout << "ZZ needs 2 down-type / 2 up-type!" << endl; } } Energy2 Val(0.*GeV2); swap(s,tk); swap(q2,w2); swap(q2h,w1); Val = -2.*pi*alphaS_*Fij2_*CF_/NC_ * ( gdL*gdL*t_u_Rdd(s,tk,uk,q1,q2,mW2,mZ2) + 2.*gdL*guL*t_u_Rud(s,tk,uk,q1,q2,q1h,q2h,mW2,mZ2) + guL*guL*t_u_Ruu(s,tk,uk,q1h,q2h,mW2,mZ2) - 2.*eZ/(s2-mW2) * ( gdL * t_u_RZd(s,tk,uk,q1 ,q2 ,s2,mW2,mZ2) - guL * t_u_RZu(s,tk,uk,q1h,q2h,s2,mW2,mZ2) ) + eZ2/sqr(s2-mW2) *t_u_RZ(s,tk,uk,q1,q2,s2,mW2,mZ2) ); swap(s,tk); swap(q2,w2); swap(q2h,w1); Val *= -tk/s * TR_/CF_; return Val; } /***************************************************************************/ // t_u_M_R_gqb is the real emission g + qb -> n + q matrix element // exactly as defined in Eqs. C.9 of NPB 383(1992)3-44, multiplied by // tk * uk! Energy2 MEPP2VVPowheg::t_u_M_R_gqb(realVVKinematics R) const { // First the Born variables: Energy2 s2(R.s2r()); Energy2 mW2(R.k12r()); Energy2 mZ2(R.k22r()); // Then the rest: Energy2 s(R.sr()); Energy2 tk(R.tkr()); Energy2 uk(R.ukr()); Energy2 q1(R.q1r()); Energy2 q2(R.q2r()); Energy2 q1h(R.q1hatr()); Energy2 q2h(R.q2hatr()); Energy2 w1(R.w1r()); Energy2 w2(R.w2r()); double cosThetaW(sqrt(1.-sin2ThetaW_)); double eZ2(eZ2_); double eZ(eZ_); double gdL(gdL_); double guL(guL_); double gdR(gdR_); double guR(guR_); // W+W- if(abs(mePartonData()[2]->id())==24&&abs(mePartonData()[3]->id())==24) { double e2(sqr(gW_)*sin2ThetaW_); if(abs(quark_->id())%2==0&&abs(antiquark_->id())%2==0) { // N.B. OLD eZ used to calculate new eZ2 *then* new eZ is set! if(quark_->id()==-antiquark_->id()) { eZ2 = 1./2.*sqr(s2-mW2)/Fij2_ * (e2*e2/s2/s2*(sqr( 2./3.+eZ*(guL+guR)/2./e2*s2/(s2-mW2/sqr(cosThetaW))) +sqr( eZ*(guL-guR)/2./e2*s2/(s2-mW2/sqr(cosThetaW)))) ); eZ = -1./2./Fij2_/(gW_*gW_/4./sqrt(Fij2_))*(s2-mW2) * (gW_*gW_*e2/4./s2 *( 2./3.+2.*eZ*guL/2./e2*s2/(s2-mW2/sqr(cosThetaW)))); } else { eZ2 =0.; eZ =0.; } gdL = gW_/sqrt(2.); guL = 0.; } else if(abs(quark_->id())%2==1&&abs(antiquark_->id())%2==1) { // N.B. OLD eZ used to calculate new eZ2 *then* new eZ is set! if(quark_->id()==-antiquark_->id()) { eZ2 = 1./2.*sqr(s2-mW2)/Fij2_ * (e2*e2/s2/s2*(sqr(-1./3.+eZ*(gdL+gdR)/2./e2*s2/(s2-mW2/sqr(cosThetaW))) +sqr( eZ*(gdL-gdR)/2./e2*s2/(s2-mW2/sqr(cosThetaW)))) ); eZ = -1./2./Fij2_/(gW_*gW_/4./sqrt(Fij2_))*(s2-mW2) * (gW_*gW_*e2/4./s2 *(-1./3.+2.*eZ*gdL/2./e2*s2/(s2-mW2/sqr(cosThetaW)))); } else { eZ2 =0.; eZ =0.; } guL = gW_/sqrt(2.); gdL = 0.; } } // ZZ else if(mePartonData()[2]->id()==23&&mePartonData()[3]->id()==23) { eZ = 0.; eZ2 = 0.; double gV2,gA2; gV2 = sqr(guL/2.-gW_/2./cosThetaW*2./3.*sin2ThetaW_); gA2 = sqr(guL/2.+gW_/2./cosThetaW*2./3.*sin2ThetaW_); guL = sqrt(gV2*gV2+gA2*gA2+6.*gA2*gV2)/2.; gV2 = sqr(gdL/2.+gW_/2./cosThetaW*1./3.*sin2ThetaW_); gA2 = sqr(gdL/2.-gW_/2./cosThetaW*1./3.*sin2ThetaW_); gdL = sqrt(gV2*gV2+gA2*gA2+6.*gA2*gV2)/2.; if(abs(quark_->id())%2==0&&abs(antiquark_->id())%2==0) gdL = guL; else if(abs(quark_->id())%2==1&&abs(antiquark_->id())%2==1) guL = gdL; else { cout << "MEPP2VVPowheg:" << endl; cout << "ZZ needs 2 down-type / 2 up-type!" << endl; } } Energy2 Val(0.*GeV2); swap(s,uk); swap(q1,w1); swap(q1h,w2); Val = -2.*pi*alphaS_*Fij2_*CF_/NC_ * ( gdL*gdL*t_u_Rdd(s,tk,uk,q1,q2,mW2,mZ2) + 2.*gdL*guL*t_u_Rud(s,tk,uk,q1,q2,q1h,q2h,mW2,mZ2) + guL*guL*t_u_Ruu(s,tk,uk,q1h,q2h,mW2,mZ2) - 2.*eZ/(s2-mW2) * ( gdL * t_u_RZd(s,tk,uk,q1 ,q2 ,s2,mW2,mZ2) - guL * t_u_RZu(s,tk,uk,q1h,q2h,s2,mW2,mZ2) ) + eZ2/sqr(s2-mW2) *t_u_RZ(s,tk,uk,q1,q2,s2,mW2,mZ2) ); swap(s,uk); swap(q1,w1); swap(q1h,w2); Val *= -uk/s * TR_/CF_; return Val; } /***************************************************************************/ // The following six functions are I_{dd}^{(0)}, I_{ud}^{(0)}, // I_{uu}^{(0)}, F_{u}^{(0)}, F_{d}^{(0)}, H^{(0)} from Eqs. 3.9 - 3.14 // They make up the Born matrix element. Ixx functions correspond to the // graphs with no TGC, Fx functions are due to non-TGC graphs interfering // with TGC graphs, while the H function is due purely to TGC graphs. double Idd0(Energy2 s,Energy2 t,Energy2 u,Energy2 mW2,Energy2 mZ2); double Iud0(Energy2 s,Energy2 t,Energy2 u,Energy2 mW2,Energy2 mZ2); double Iuu0(Energy2 s,Energy2 t,Energy2 u,Energy2 mW2,Energy2 mZ2); Energy2 Fu0(Energy2 s,Energy2 t,Energy2 u,Energy2 mW2,Energy2 mZ2); Energy2 Fd0(Energy2 s,Energy2 t,Energy2 u,Energy2 mW2,Energy2 mZ2); Energy4 H0 (Energy2 s,Energy2 t,Energy2 u,Energy2 mW2,Energy2 mZ2); /***************************************************************************/ // M_Born_WZ is the Born matrix element exactly as defined in Eqs. 3.3-3.14 // of of NPB 383(1992)3-44 (with a spin*colour averaging factor 1./4./NC_/NC_). double MEPP2VVPowheg::M_Born_WZ(bornVVKinematics B) const { Energy2 s(B.sb()); Energy2 t(B.tb()); Energy2 u(B.ub()); Energy2 mW2(B.k12b()); // N.B. the diboson masses are preserved in getting Energy2 mZ2(B.k22b()); // the 2->2 from the 2->3 kinematics. double cosThetaW(sqrt(1.-sin2ThetaW_)); double eZ2(eZ2_); double eZ(eZ_); double gdL(gdL_); double guL(guL_); double gdR(gdR_); double guR(guR_); // W+W- if(abs(mePartonData()[2]->id())==24&&abs(mePartonData()[3]->id())==24) { double e2(sqr(gW_)*sin2ThetaW_); if(abs(quark_->id())%2==0&&abs(antiquark_->id())%2==0) { // N.B. OLD eZ used to calculate new eZ2 *then* new eZ is set! if(quark_->id()==-antiquark_->id()) { eZ2 = 1./2.*sqr(s-mW2)/Fij2_ * (e2*e2/s/s*(sqr( 2./3.+eZ*(guL+guR)/2./e2*s/(s-mW2/sqr(cosThetaW))) +sqr( eZ*(guL-guR)/2./e2*s/(s-mW2/sqr(cosThetaW)))) ); eZ = -1./2./Fij2_/(gW_*gW_/4./sqrt(Fij2_))*(s-mW2) * (gW_*gW_*e2/4./s *( 2./3.+2.*eZ*guL/2./e2*s/(s-mW2/sqr(cosThetaW)))); } else { eZ2 =0.; eZ =0.; } gdL = gW_/sqrt(2.); guL = 0.; } else if(abs(quark_->id())%2==1&&abs(antiquark_->id())%2==1) { // N.B. OLD eZ used to calculate new eZ2 *then* new eZ is set! if(quark_->id()==-antiquark_->id()) { eZ2 = 1./2.*sqr(s-mW2)/Fij2_ * (e2*e2/s/s*(sqr(-1./3.+eZ*(gdL+gdR)/2./e2*s/(s-mW2/sqr(cosThetaW))) +sqr( eZ*(gdL-gdR)/2./e2*s/(s-mW2/sqr(cosThetaW)))) ); eZ = -1./2./Fij2_/(gW_*gW_/4./sqrt(Fij2_))*(s-mW2) * (gW_*gW_*e2/4./s *(-1./3.+2.*eZ*gdL/2./e2*s/(s-mW2/sqr(cosThetaW)))); } else { eZ2 =0.; eZ =0.; } guL = gW_/sqrt(2.); gdL = 0.; } } // ZZ else if(mePartonData()[2]->id()==23&&mePartonData()[3]->id()==23) { eZ = 0.; eZ2 = 0.; double gV2,gA2; gV2 = sqr(guL/2.-gW_/2./cosThetaW*2./3.*sin2ThetaW_); gA2 = sqr(guL/2.+gW_/2./cosThetaW*2./3.*sin2ThetaW_); guL = sqrt(gV2*gV2+gA2*gA2+6.*gA2*gV2)/2.; gV2 = sqr(gdL/2.+gW_/2./cosThetaW*1./3.*sin2ThetaW_); gA2 = sqr(gdL/2.-gW_/2./cosThetaW*1./3.*sin2ThetaW_); gdL = sqrt(gV2*gV2+gA2*gA2+6.*gA2*gV2)/2.; if(abs(quark_->id())%2==0&&abs(antiquark_->id())%2==0) gdL = guL; else if(abs(quark_->id())%2==1&&abs(antiquark_->id())%2==1) guL = gdL; else { cout << "MEPP2VVPowheg:" << endl; cout << "ZZ needs 2 down-type / 2 up-type!" << endl; } } return Fij2_/2./NC_ * ( gdL*gdL*Idd0(s,t,u,mW2,mZ2) + 2.*gdL*guL*Iud0(s,t,u,mW2,mZ2) + guL*guL*Iuu0(s,t,u,mW2,mZ2) - 2.*eZ/(s-mW2) * ( gdL*Fd0(s,t,u,mW2,mZ2) - guL*Fu0(s,t,u,mW2,mZ2) ) + eZ2/sqr(s-mW2) * H0(s,t,u,mW2,mZ2) ); } /***************************************************************************/ double Idd0(Energy2 s, Energy2 t, Energy2 u, Energy2 mW2, Energy2 mZ2) { return 8.*((u*t/mW2/mZ2-1.)/4.+s/2.*(mW2+mZ2)/mW2/mZ2) + 8.*(u/t-mW2*mZ2/t/t); } /***************************************************************************/ double Iud0(Energy2 s, Energy2 t, Energy2 u, Energy2 mW2, Energy2 mZ2) { return - 8.*((u*t/mW2/mZ2-1.)/4.+s/2.*(mW2+mZ2)/mW2/mZ2) + 8.*s/t/u*(mW2+mZ2); } /***************************************************************************/ double Iuu0(Energy2 s, Energy2 t, Energy2 u, Energy2 mW2, Energy2 mZ2) { return Idd0(s,u,t,mW2,mZ2); } /***************************************************************************/ Energy2 Fd0 (Energy2 s, Energy2 t, Energy2 u, Energy2 mW2, Energy2 mZ2) { return - 8.*s*( (u*t/mW2/mZ2-1.)*(1.-(mW2+mZ2)/s-4.*mW2*mZ2/s/t)/4. + (mW2+mZ2)/2./mW2/mZ2*(s-mW2-mZ2+2.*mW2*mZ2/t) ); } /***************************************************************************/ Energy2 Fu0 (Energy2 s, Energy2 t, Energy2 u, Energy2 mW2, Energy2 mZ2) { return Fd0(s,u,t,mW2,mZ2); } /***************************************************************************/ Energy4 H0 (Energy2 s, Energy2 t, Energy2 u, Energy2 mW2, Energy2 mZ2) { return 8.*s*s*(u*t/mW2/mZ2-1.)*( 1./4.-(mW2+mZ2)/2./s + (sqr(mW2+mZ2)+8.*mW2*mZ2)/4./s/s ) + 8.*s*s*(mW2+mZ2)/mW2/mZ2*(s/2.-mW2-mZ2+sqr(mW2-mZ2)/2./s); } /***************************************************************************/ bool MEPP2VVPowheg::sanityCheck() const { bool alarm(false); Energy2 prefacs(8.*pi*alphaS_*S_.sr() /S_.xr() ); Energy2 prefacsp(8.*pi*alphaS_*SCp_.sr() /SCp_.xr() ); Energy2 prefacsm(8.*pi*alphaS_*SCm_.sr() /SCm_.xr() ); Energy2 prefacp(8.*pi*alphaS_*Cp_.sr()/Cp_.xr()); Energy2 prefacm(8.*pi*alphaS_*Cm_.sr()/Cm_.xr()); double xp(Cp_.xr()); double xm(Cm_.xr()); double M_B_WW(M_Born_WW(B_)); double M_B_ZZ(M_Born_ZZ(B_)); double M_V_reg_WW(M_V_regular_WW(S_)); double M_V_reg_ZZ(M_V_regular_ZZ(S_)); Energy2 t_u_qqb_WW(t_u_M_R_qqb_WW(H_)); Energy2 t_u_qqb_ZZ(t_u_M_R_qqb_ZZ(H_)); // Check that the native leading order Herwig matrix // element is equivalent to the WZ leading order matrix // element in NPB 383 (1992) 3-44, with the relevant WZ->WW // WZ->ZZ transformation applied (M_Born_). // if(fabs((lo_me2_ - M_Born_)/M_Born_)>1.e-2) { // alarm=true; // cout << "lo_me2_ - M_Born_ (%) = " // << lo_me2_ - M_Born_ << " (" // << (lo_me2_ - M_Born_)/M_Born_*100. << ")\n"; // } // Check that the transformation from NPB 383 (1992) 3-44 WZ // matrix elements to WW matrix elements actually works, by // comparing them to the explicit WW matrix elements in // NPB 410 (1993) 280-324. if(abs(mePartonData()[2]->id())==24&&abs(mePartonData()[3]->id())==24) { if(fabs((M_Born_ -M_B_WW )/M_B_WW )>1.e-6) { alarm=true; cout << "WZ->WW transformation error!\n"; cout << "M_Born_ - M_B_WW (rel) = " << M_Born_ - M_B_WW << " (" << (M_Born_ - M_B_WW)/M_B_WW << ")\n"; cout << "M_Born_ = " << M_Born_ << endl; cout << "M_B_WW = " << M_B_WW << endl; } if(fabs((M_V_regular_-M_V_reg_WW)/M_V_reg_WW)>1.e-6) { alarm=true; cout << "WZ->WW transformation error!\n"; cout << "M_V_regular_ - M_V_reg_WW (rel) = " << M_V_regular_ - M_V_reg_WW << " (" << (M_V_regular_ - M_V_reg_WW)/M_V_reg_WW << ")\n"; cout << "M_V_regular_ = " << M_V_regular_ << endl; cout << "M_V_reg_WW = " << M_V_reg_WW << endl; } if(fabs((t_u_M_R_qqb_-t_u_qqb_WW)/t_u_qqb_WW)>1.e-6) { alarm=true; cout << "WZ->WW transformation error!\n"; cout << "t_u_M_R_qqb_ - t_u_qqb_WW (rel) = " << (t_u_M_R_qqb_ - t_u_qqb_WW)/GeV2 << " (" << (t_u_M_R_qqb_ - t_u_qqb_WW)/t_u_qqb_WW << ")\n"; cout << "t_u_M_R_qqb_ = " << t_u_M_R_qqb_/GeV2 << endl; cout << "t_u_qqb_WW = " << t_u_qqb_WW /GeV2 << endl; } } // Check that the transformation from NPB 383 (1992) 3-44 WZ // matrix elements to ZZ matrix elements actually works, by // comparing them to the explicit ZZ matrix elements in // NPB 357 (1991) 409-438. if(abs(mePartonData()[2]->id())==23&&abs(mePartonData()[3]->id())==23) { if(fabs((M_Born_ -M_B_ZZ )/M_B_ZZ )>1.e-6) { alarm=true; cout << "WZ->ZZ transformation error!\n"; cout << "M_Born_ - M_B_ZZ (rel) = " << M_Born_ - M_B_ZZ << " (" << (M_Born_ - M_B_ZZ)/M_B_ZZ << ")\n"; cout << "M_Born_ = " << M_Born_ << endl; cout << "M_B_ZZ = " << M_B_ZZ << endl; } if(fabs((M_V_regular_-M_V_reg_ZZ)/M_V_reg_ZZ)>1.e-6) { alarm=true; cout << "WZ->ZZ transformation error!\n"; cout << "M_V_regular_ - M_V_reg_ZZ (rel) = " << M_V_regular_ - M_V_reg_ZZ << " (" << (M_V_regular_ - M_V_reg_ZZ)/M_V_reg_ZZ << ")\n"; cout << "M_V_regular_ = " << M_V_regular_ << endl; cout << "M_V_reg_ZZ = " << M_V_reg_ZZ << endl; } if(fabs((t_u_M_R_qqb_-t_u_qqb_ZZ)/t_u_qqb_ZZ)>1.e-6) { alarm=true; cout << "WZ->ZZ transformation error!\n"; cout << "t_u_M_R_qqb_ - t_u_qqb_ZZ (rel) = " << (t_u_M_R_qqb_ - t_u_qqb_ZZ)/GeV2 << " (" << (t_u_M_R_qqb_ - t_u_qqb_ZZ)/t_u_qqb_ZZ << ")\n"; cout << "t_u_M_R_qqb_ = " << t_u_M_R_qqb_/GeV2 << endl; cout << "t_u_qqb_ZZ = " << t_u_qqb_ZZ /GeV2 << endl; } } // Check the soft limit of the q + qbar matrix element. Energy2 absDiff_qqbs = t_u_M_R_qqb(S_) - prefacs*2.*CF_*M_Born_; double relDiff_qqbs = absDiff_qqbs / t_u_M_R_qqb(S_); if(fabs(relDiff_qqbs)>1.e-6) { alarm=true; cout << "\n"; cout << "t_u_M_R_qqb(S_) " << t_u_M_R_qqb(S_) /GeV2 << endl; cout << "t_u_M_R_qqb(S_)-8*pi*alphaS*sHat/x*2*Cab*M_Born_ (rel):\n" << absDiff_qqbs / GeV2 << " (" << relDiff_qqbs << ")\n"; } // Check the positive soft-collinearlimit of the q + qbar matrix element. Energy2 absDiff_qqbsp = t_u_M_R_qqb(SCp_) - prefacsp*2.*CF_*M_Born_; double relDiff_qqbsp = absDiff_qqbsp / t_u_M_R_qqb(SCp_); if(fabs(relDiff_qqbsp)>1.e-6) { alarm=true; cout << "\n"; cout << "t_u_M_R_qqb(SCp_) " << t_u_M_R_qqb(SCp_)/GeV2 << endl; cout << "t_u_M_R_qqb(SCp_)-8*pi*alphaS*sHat/x*2*Cab*M_Born_ (rel):\n" << absDiff_qqbsp / GeV2 << " (" << relDiff_qqbsp << ")\n"; } // Check the negative soft-collinearlimit of the q + qbar matrix element. Energy2 absDiff_qqbsm = t_u_M_R_qqb(SCm_) - prefacsm*2.*CF_*M_Born_; double relDiff_qqbsm = absDiff_qqbsm / t_u_M_R_qqb(SCm_); if(fabs(relDiff_qqbsm)>1.e-6) { alarm=true; cout << "\n"; cout << "t_u_M_R_qqb(SCm_) " << t_u_M_R_qqb(SCm_)/GeV2 << endl; cout << "t_u_M_R_qqb(SCm_)-8*pi*alphaS*sHat/x*2*Cab*M_Born_ (rel):\n" << absDiff_qqbsm / GeV2 << " (" << relDiff_qqbsm << ")\n"; } // Check the positive collinearlimit of the q + qbar matrix element. Energy2 absDiff_qqbp = t_u_M_R_qqb(Cp_) - prefacp*CF_*(1.+xp*xp)*M_Born_; double relDiff_qqbp = absDiff_qqbp / t_u_M_R_qqb(Cp_); if(fabs(relDiff_qqbp)>1.e-6) { alarm=true; cout << "\n"; cout << "t_u_M_R_qqb(Cp_) " << t_u_M_R_qqb(Cp_) /GeV2 << endl; cout << "t_u_M_R_qqb(Cp_)-8*pi*alphaS*sHat/x*(1-x)*Pqq*M_Born_ (rel):\n" << absDiff_qqbp / GeV2 << " (" << relDiff_qqbp << ")\n"; } // Check the negative collinearlimit of the q + qbar matrix element. Energy2 absDiff_qqbm = t_u_M_R_qqb(Cm_) - prefacm*CF_*(1.+xm*xm)*M_Born_; double relDiff_qqbm = absDiff_qqbm / t_u_M_R_qqb(Cm_); if(fabs(relDiff_qqbm)>1.e-6) { alarm=true; cout << "\n"; cout << "t_u_M_R_qqb(Cm_) " << t_u_M_R_qqb(Cm_) /GeV2 << endl; cout << "t_u_M_R_qqb(Cm_)-8*pi*alphaS*sHat/x*(1-x)*Pqq*M_Born_ (rel):\n" << absDiff_qqbm / GeV2 << " (" << relDiff_qqbm << ")\n"; } // Check the positive collinear limit of the g + qbar matrix element. Energy2 absDiff_gqbp = t_u_M_R_gqb(Cp_) - prefacp*(1.-xp)*TR_*(xp*xp+sqr(1.-xp))*M_Born_; double relDiff_gqbp = absDiff_gqbp/ t_u_M_R_gqb(Cp_); if(fabs(relDiff_gqbp)>1.e-6) { alarm=true; cout << "\n"; cout << "t_u_M_R_gqb(Cp_) " << t_u_M_R_gqb(Cp_) /GeV2 << endl; cout << "t_u_M_R_gqb(Cp_)-8*pi*alphaS*sHat/x*(1-x)*Pgq*M_Born_ (rel):\n" << absDiff_gqbp / GeV2 << " (" << relDiff_gqbp << ")\n"; } // Check the negative collinear limit of the q + g matrix element. Energy2 absDiff_qgm = t_u_M_R_qg(Cm_) - prefacm*(1.-xm)*TR_*(xm*xm+sqr(1.-xm))*M_Born_; double relDiff_qgm = absDiff_qgm / t_u_M_R_qg(Cm_); if(fabs(relDiff_qgm)>1.e-6) { alarm=true; cout << "\n"; cout << "t_u_M_R_qg(Cm_) " << t_u_M_R_qg(Cm_) /GeV2 << endl; cout << "t_u_M_R_qg(Cm_)-8*pi*alphaS*sHat/x*(1-x)*Pgq*M_Born_ (rel):\n" << absDiff_qgm / GeV2 << " (" << relDiff_qgm << ")\n"; } return alarm; } /***************************************************************************/ // M_Born_ZZ is the Born matrix element exactly as defined in Eqs. 2.18-2.19 // of of NPB 357(1991)409-438. double MEPP2VVPowheg::M_Born_ZZ(bornVVKinematics B) const { Energy2 s(B.sb()); Energy2 t(B.tb()); Energy2 u(B.ub()); Energy2 mZ2(B.k22b()); // the 2->2 from the 2->3 kinematics. double cosThetaW(sqrt(1.-sin2ThetaW_)); double gV2,gA2,gX,gY,gZ; gV2 = sqr(guL_/2.-gW_/2./cosThetaW*2./3.*sin2ThetaW_); gA2 = sqr(guL_/2.+gW_/2./cosThetaW*2./3.*sin2ThetaW_); gX = sqrt(gV2*gV2+gA2*gA2+6.*gA2*gV2)/2.; gV2 = sqr(gdL_/2.+gW_/2./cosThetaW*1./3.*sin2ThetaW_); gA2 = sqr(gdL_/2.-gW_/2./cosThetaW*1./3.*sin2ThetaW_); gY = sqrt(gV2*gV2+gA2*gA2+6.*gA2*gV2)/2.; gZ = gX; if(abs(quark_->id())%2==1&&abs(antiquark_->id())%2==1) gZ = gY; return 1./NC_*sqr(gZ*2.)*(t/u+u/t+4.*mZ2*s/t/u-mZ2*mZ2*(1./t/t+1./u/u)); } /***************************************************************************/ // M_V_regular_ZZ is the one-loop ZZ matrix element exactly as defined in // Eqs. B.1 & B.2 of NPB 357(1991)409-438. double MEPP2VVPowheg::M_V_regular_ZZ(realVVKinematics S) const { Energy2 s(S.bornVariables().sb()); Energy2 t(S.bornVariables().tb()); Energy2 u(S.bornVariables().ub()); Energy2 mZ2(S.k22r()); // the 2->2 from the 2->3 kinematics. double beta(S.betaxr()); // N.B. for x=1 \beta_x=\beta in NPB 383(1992)3-44. double cosThetaW(sqrt(1.-sin2ThetaW_)); double gV2,gA2,gX,gY,gZ; gV2 = sqr(guL_/2.-gW_/2./cosThetaW*2./3.*sin2ThetaW_); gA2 = sqr(guL_/2.+gW_/2./cosThetaW*2./3.*sin2ThetaW_); gX = sqrt(gV2*gV2+gA2*gA2+6.*gA2*gV2)/2.; gV2 = sqr(gdL_/2.+gW_/2./cosThetaW*1./3.*sin2ThetaW_); gA2 = sqr(gdL_/2.-gW_/2./cosThetaW*1./3.*sin2ThetaW_); gY = sqrt(gV2*gV2+gA2*gA2+6.*gA2*gV2)/2.; gZ = gX; if(abs(quark_->id())%2==1&&abs(antiquark_->id())%2==1) gZ = gY; double M_V_reg(0.); M_V_reg = 2.*s*sqr(gZ*2.)*4.*pi*alphaS_*CF_/NC_/sqr(4.*pi)/2. *( 2.*sqr(t+mZ2)/sqr(beta)/s/t/u + 4.*s/(t-mZ2)/u - ( 16.*t*t*t+(28.*s-68.*mZ2)*t*t+(18.*s*s-36.*mZ2*s+88.*mZ2*mZ2)*t + 18.*mZ2*mZ2*s-36.*mZ2*mZ2*mZ2 )/t/t/s/u + ( 12.*s/(t-mZ2)/u-4.*mZ2*s/sqr(t-mZ2)/u+2.*(t+4.*s)/s/u - 6.*(s*s+mZ2*mZ2)/s/t/u+6.*mZ2*mZ2*(2.*mZ2-s)/t/t/s/u )*log(-t/mZ2) + ( - ( 5.*t*t*t+(8.*s-18.*mZ2)*t*t+(6.*s*s+25.*mZ2*mZ2)*t + 6.*mZ2*mZ2*s-12.*mZ2*mZ2*mZ2 )/t/t/s/u - 12.*mZ2*sqr(t+mZ2)/sqr(sqr(beta))/s/s/t/u + ( 3.*t*t-26.*mZ2*t-25.*mZ2*mZ2)/sqr(beta)/s/t/u )*log(s/mZ2) + ( (-2.*t*t+8.*mZ2*t-2.*s*s-12.*mZ2*mZ2)/u + 4.*mZ2*mZ2*(2.*mZ2-s)/t/u) / (s*t) * ( 2.*sqr(log(-t/mZ2))-4.*log(-t/mZ2)*log((mZ2-t)/mZ2)-4.*ReLi2(t/mZ2)) + ( 4.*(t*t-5.*mZ2*t+s*s+10.*mZ2*mZ2)/s/u + 4.*mZ2*(-s*s+2.*mZ2*s-10.*mZ2*mZ2)/s/t/u + 8.*mZ2*mZ2*mZ2*(2.*mZ2-s)/t/t/s/u ) / (t-mZ2) * (pi*pi/2.+log(-t/mZ2)*log(-t/s)-1./2.*sqr(log(-t/mZ2))) + ( ( (2.*s-3.*mZ2)*t*t+(6.*mZ2*mZ2-8.*mZ2*s)*t+2.*s*s*s-4.*mZ2*s*s + 12.*mZ2*mZ2*s-3.*mZ2*mZ2*mZ2 ) /s/t/u + 12.*mZ2*mZ2*sqr(t+mZ2)/sqr(sqr(beta))/s/s/t/u - (mZ2*t*t-30.*mZ2*mZ2*t-27.*mZ2*mZ2*mZ2)/beta/beta/s/t/u ) / (beta*s) * (pi*pi/3.+sqr(log((1.-beta)/(1.+beta)))+4.*ReLi2(-(1.-beta)/(1.+beta))) + (4.*(t+4.*s-4.*mZ2)/3./s/u+4.*sqr(s-2.*mZ2)/3./s/t/u)*pi*pi ); swap(t,u); M_V_reg += 2.*s*sqr(gZ*2.)*4.*pi*alphaS_*CF_/NC_/sqr(4.*pi)/2. *( 2.*sqr(t+mZ2)/sqr(beta)/s/t/u + 4.*s/(t-mZ2)/u - ( 16.*t*t*t+(28.*s-68.*mZ2)*t*t+(18.*s*s-36.*mZ2*s+88.*mZ2*mZ2)*t + 18.*mZ2*mZ2*s-36.*mZ2*mZ2*mZ2 )/t/t/s/u + ( 12.*s/(t-mZ2)/u-4.*mZ2*s/sqr(t-mZ2)/u+2.*(t+4.*s)/s/u - 6.*(s*s+mZ2*mZ2)/s/t/u+6.*mZ2*mZ2*(2.*mZ2-s)/t/t/s/u )*log(-t/mZ2) + ( - ( 5.*t*t*t+(8.*s-18.*mZ2)*t*t+(6.*s*s+25.*mZ2*mZ2)*t + 6.*mZ2*mZ2*s-12.*mZ2*mZ2*mZ2 )/t/t/s/u - 12.*mZ2*sqr(t+mZ2)/sqr(sqr(beta))/s/s/t/u + ( 3.*t*t-26.*mZ2*t-25.*mZ2*mZ2)/sqr(beta)/s/t/u )*log(s/mZ2) + ( (-2.*t*t+8.*mZ2*t-2.*s*s-12.*mZ2*mZ2)/u + 4.*mZ2*mZ2*(2.*mZ2-s)/t/u) / (s*t) * ( 2.*sqr(log(-t/mZ2))-4.*log(-t/mZ2)*log((mZ2-t)/mZ2)-4.*ReLi2(t/mZ2)) + ( 4.*(t*t-5.*mZ2*t+s*s+10.*mZ2*mZ2)/s/u + 4.*mZ2*(-s*s+2.*mZ2*s-10.*mZ2*mZ2)/s/t/u + 8.*mZ2*mZ2*mZ2*(2.*mZ2-s)/t/t/s/u ) / (t-mZ2) * (pi*pi/2.+log(-t/mZ2)*log(-t/s)-1./2.*sqr(log(-t/mZ2))) + ( ( (2.*s-3.*mZ2)*t*t+(6.*mZ2*mZ2-8.*mZ2*s)*t+2.*s*s*s-4.*mZ2*s*s + 12.*mZ2*mZ2*s-3.*mZ2*mZ2*mZ2 ) /s/t/u + 12.*mZ2*mZ2*sqr(t+mZ2)/sqr(sqr(beta))/s/s/t/u - (mZ2*t*t-30.*mZ2*mZ2*t-27.*mZ2*mZ2*mZ2)/beta/beta/s/t/u ) / (beta*s) * (pi*pi/3.+sqr(log((1.-beta)/(1.+beta)))+4.*ReLi2(-(1.-beta)/(1.+beta))) + (4.*(t+4.*s-4.*mZ2)/3./s/u+4.*sqr(s-2.*mZ2)/3./s/t/u)*pi*pi ); return M_V_reg; } /***************************************************************************/ // t_u_M_R_qqb_ZZ is the real emission q + qb -> n + g matrix element // exactly as defined in Eqs. C.1 of NPB 357(1991)409-438, multiplied by // tk * uk! Energy2 MEPP2VVPowheg::t_u_M_R_qqb_ZZ(realVVKinematics R) const { // First the Born variables: Energy2 mZ2(R.k22r()); // Then the rest: Energy2 s(R.sr()); Energy2 tk(R.tkr()); Energy2 uk(R.ukr()); Energy2 q1(R.q1r()); Energy2 q2(R.q2r()); Energy2 q1h(R.q1hatr()); Energy2 q2h(R.q2hatr()); double cosThetaW(sqrt(1.-sin2ThetaW_)); double gV2,gA2,gX,gY,gZ; gV2 = sqr(guL_/2.-gW_/2./cosThetaW*2./3.*sin2ThetaW_); gA2 = sqr(guL_/2.+gW_/2./cosThetaW*2./3.*sin2ThetaW_); gX = sqrt(gV2*gV2+gA2*gA2+6.*gA2*gV2)/2.; gV2 = sqr(gdL_/2.+gW_/2./cosThetaW*1./3.*sin2ThetaW_); gA2 = sqr(gdL_/2.-gW_/2./cosThetaW*1./3.*sin2ThetaW_); gY = sqrt(gV2*gV2+gA2*gA2+6.*gA2*gV2)/2.; gZ = gX; if(abs(quark_->id())%2==1&&abs(antiquark_->id())%2==1) gZ = gY; Energy2 t_u_qqb(0.*GeV2); t_u_qqb = (2.*s)*sqr(gZ*2.)*4.*pi*alphaS_*CF_/NC_/2. * ( - ( tk*uk*uk+2.*s*uk*uk-tk*tk*uk - 2.*s*tk*uk+mZ2*(tk*tk-uk*uk+2.*s*uk-2.*s*tk-2.*s*s) )/q1h/q1/q2h/s*tk + 2.*(tk*uk*uk-mZ2*uk*(s+3.*tk)+mZ2*mZ2*(2.*uk-s))/q1/q2/s + ( tk*uk*(uk+s)-mZ2*(uk*uk+3.*tk*uk+3.*s*uk+s*tk) + 2.*mZ2*mZ2*(uk+tk+2.*s) )/q1h/q1/q2/s*tk + ( tk*(uk*uk+tk*uk-s*s)+mZ2*(4.*s*uk-3.*tk*uk-tk*tk+4.*s*s) )/q1h/q2/s - ( tk*uk+s*uk-s*tk-s*s+2.*mZ2*(s-tk) ) /q1h/q1/s*tk + q2*(tk*uk-s*uk-2.*s*tk-2.*s*s)/q1/q2h/s + 2.*(tk*uk-tk*tk-s*tk-s*s+mZ2*(2.*s-uk))/q1/s - 2.*mZ2*(uk*uk-2.*mZ2*uk+2.*mZ2*mZ2)/q1/q1/q2/s*tk + (2.*s*uk+tk*tk+3.*s*tk+2*s*s)/q1h/s + q1*(uk+s)*(uk+tk)/q1h/q2h/s + (tk*uk+s*uk+3.*s*tk+2.*s*s-mZ2*(uk+tk+2.*s))/q1h/q2h/s*uk + (uk-tk)/2./q1h/q2h/s*(q1*(uk+s)/q2/tk-q2*(tk+s)/q1/uk)*tk*uk + (tk-2.*mZ2)*(uk-2.*mZ2)/q1h/q1/q2h/q2*tk*uk - (q1*q1+q2*q2)/q1/q2 - 2.*mZ2*(q2-2.*mZ2)/q1/q1/s*tk ); swap(tk ,uk ); swap(q1 ,q2 ); swap(q1h,q2h); t_u_qqb += (2.*s)*sqr(gZ*2.)*4.*pi*alphaS_*CF_/NC_/2. * ( - ( tk*uk*uk+2.*s*uk*uk-tk*tk*uk - 2.*s*tk*uk+mZ2*(tk*tk-uk*uk+2.*s*uk-2.*s*tk-2.*s*s) )/q1h/q1/q2h/s*tk + 2.*(tk*uk*uk-mZ2*uk*(s+3.*tk)+mZ2*mZ2*(2.*uk-s))/q1/q2/s + ( tk*uk*(uk+s)-mZ2*(uk*uk+3.*tk*uk+3.*s*uk+s*tk) + 2.*mZ2*mZ2*(uk+tk+2.*s) )/q1h/q1/q2/s*tk + ( tk*(uk*uk+tk*uk-s*s)+mZ2*(4.*s*uk-3.*tk*uk-tk*tk+4.*s*s) )/q1h/q2/s - ( tk*uk+s*uk-s*tk-s*s+2.*mZ2*(s-tk) ) /q1h/q1/s*tk + q2*(tk*uk-s*uk-2.*s*tk-2.*s*s)/q1/q2h/s + 2.*(tk*uk-tk*tk-s*tk-s*s+mZ2*(2.*s-uk))/q1/s - 2.*mZ2*(uk*uk-2.*mZ2*uk+2.*mZ2*mZ2)/q1/q1/q2/s*tk + (2.*s*uk+tk*tk+3.*s*tk+2*s*s)/q1h/s + q1*(uk+s)*(uk+tk)/q1h/q2h/s + (tk*uk+s*uk+3.*s*tk+2.*s*s-mZ2*(uk+tk+2.*s))/q1h/q2h/s*uk + (uk-tk)/2./q1h/q2h/s*(q1*(uk+s)/q2/tk-q2*(tk+s)/q1/uk)*tk*uk + (tk-2.*mZ2)*(uk-2.*mZ2)/q1h/q1/q2h/q2*tk*uk - (q1*q1+q2*q2)/q1/q2 - 2.*mZ2*(q2-2.*mZ2)/q1/q1/s*tk ); swap(tk ,uk ); swap(q1 ,q2 ); swap(q1h,q2h); swap(q1 ,q1h); swap(q2 ,q2h); t_u_qqb += (2.*s)*sqr(gZ*2.)*4.*pi*alphaS_*CF_/NC_/2. * ( - ( tk*uk*uk+2.*s*uk*uk-tk*tk*uk - 2.*s*tk*uk+mZ2*(tk*tk-uk*uk+2.*s*uk-2.*s*tk-2.*s*s) )/q1h/q1/q2h/s*tk + 2.*(tk*uk*uk-mZ2*uk*(s+3.*tk)+mZ2*mZ2*(2.*uk-s))/q1/q2/s + ( tk*uk*(uk+s)-mZ2*(uk*uk+3.*tk*uk+3.*s*uk+s*tk) + 2.*mZ2*mZ2*(uk+tk+2.*s) )/q1h/q1/q2/s*tk + ( tk*(uk*uk+tk*uk-s*s)+mZ2*(4.*s*uk-3.*tk*uk-tk*tk+4.*s*s) )/q1h/q2/s - ( tk*uk+s*uk-s*tk-s*s+2.*mZ2*(s-tk) ) /q1h/q1/s*tk + q2*(tk*uk-s*uk-2.*s*tk-2.*s*s)/q1/q2h/s + 2.*(tk*uk-tk*tk-s*tk-s*s+mZ2*(2.*s-uk))/q1/s - 2.*mZ2*(uk*uk-2.*mZ2*uk+2.*mZ2*mZ2)/q1/q1/q2/s*tk + (2.*s*uk+tk*tk+3.*s*tk+2*s*s)/q1h/s + q1*(uk+s)*(uk+tk)/q1h/q2h/s + (tk*uk+s*uk+3.*s*tk+2.*s*s-mZ2*(uk+tk+2.*s))/q1h/q2h/s*uk + (uk-tk)/2./q1h/q2h/s*(q1*(uk+s)/q2/tk-q2*(tk+s)/q1/uk)*tk*uk + (tk-2.*mZ2)*(uk-2.*mZ2)/q1h/q1/q2h/q2*tk*uk - (q1*q1+q2*q2)/q1/q2 - 2.*mZ2*(q2-2.*mZ2)/q1/q1/s*tk ); swap(q1 ,q1h); swap(q2 ,q2h); swap(tk ,uk ); swap(q1 ,q2h); swap(q2 ,q1h); t_u_qqb += (2.*s)*sqr(gZ*2.)*4.*pi*alphaS_*CF_/NC_/2. * ( - ( tk*uk*uk+2.*s*uk*uk-tk*tk*uk - 2.*s*tk*uk+mZ2*(tk*tk-uk*uk+2.*s*uk-2.*s*tk-2.*s*s) )/q1h/q1/q2h/s*tk + 2.*(tk*uk*uk-mZ2*uk*(s+3.*tk)+mZ2*mZ2*(2.*uk-s))/q1/q2/s + ( tk*uk*(uk+s)-mZ2*(uk*uk+3.*tk*uk+3.*s*uk+s*tk) + 2.*mZ2*mZ2*(uk+tk+2.*s) )/q1h/q1/q2/s*tk + ( tk*(uk*uk+tk*uk-s*s)+mZ2*(4.*s*uk-3.*tk*uk-tk*tk+4.*s*s) )/q1h/q2/s - ( tk*uk+s*uk-s*tk-s*s+2.*mZ2*(s-tk) ) /q1h/q1/s*tk + q2*(tk*uk-s*uk-2.*s*tk-2.*s*s)/q1/q2h/s + 2.*(tk*uk-tk*tk-s*tk-s*s+mZ2*(2.*s-uk))/q1/s - 2.*mZ2*(uk*uk-2.*mZ2*uk+2.*mZ2*mZ2)/q1/q1/q2/s*tk + (2.*s*uk+tk*tk+3.*s*tk+2*s*s)/q1h/s + q1*(uk+s)*(uk+tk)/q1h/q2h/s + (tk*uk+s*uk+3.*s*tk+2.*s*s-mZ2*(uk+tk+2.*s))/q1h/q2h/s*uk + (uk-tk)/2./q1h/q2h/s*(q1*(uk+s)/q2/tk-q2*(tk+s)/q1/uk)*tk*uk + (tk-2.*mZ2)*(uk-2.*mZ2)/q1h/q1/q2h/q2*tk*uk - (q1*q1+q2*q2)/q1/q2 - 2.*mZ2*(q2-2.*mZ2)/q1/q1/s*tk ); swap(tk ,uk ); swap(q1 ,q2h); swap(q2 ,q1h); return t_u_qqb; } /***************************************************************************/ // M_B_WW is the Born matrix element exactly as defined in Eqs. 3.2-3.8 // of of NPB 410(1993)280-384. double MEPP2VVPowheg::M_Born_WW(bornVVKinematics B) const { Energy2 s(B.sb()); Energy2 t(B.tb()); Energy2 u(B.ub()); Energy2 mW2(B.k12b()); // N.B. the diboson masses are preserved in getting bool up_type = abs(quark_->id())%2==0 ? true : false; double Qi = up_type ? 2./3. : -1./3. ; double giL = up_type ? guL_/2. : gdL_/2.; double giR = up_type ? guR_/2. : gdR_/2.; double e2 = sqr(gW_)*sin2ThetaW_; double cos2ThetaW(1.-sin2ThetaW_); double ctt_i(gW_*gW_*gW_*gW_/16.); InvEnergy2 cts_i(gW_*gW_*e2/4./s *(Qi+2.*eZ_*giL/e2*s/(s-mW2/cos2ThetaW))); InvEnergy4 css_i(e2*e2/s/s*(sqr(Qi+eZ_*(giL+giR)/e2*s/(s-mW2/cos2ThetaW)) +sqr( eZ_*(giL-giR)/e2*s/(s-mW2/cos2ThetaW))) ); ctt_i *= 8.*Fij2_/gW_/gW_; cts_i *= sqrt(8.*Fij2_/gW_/gW_); if(quark_->id()!=-antiquark_->id()) { cts_i = 0./GeV2; css_i = 0./GeV2/GeV2; } if(!up_type) swap(t,u); double signf = up_type ? 1. : -1.; return 1./4./NC_ * ( ctt_i*( 16.*(u*t/mW2/mW2-1.)*(1./4.+mW2*mW2/t/t)+16.*s/mW2) - cts_i*( 16.*(u*t/mW2/mW2-1.)*(s/4.-mW2/2.-mW2*mW2/t) + 16.*s*(s/mW2-2.+2.*mW2/t) ) *signf + css_i*( 8.*(u*t/mW2/mW2-1.)*(s*s/4.-s*mW2+3.*mW2*mW2) + 8.*s*s*(s/mW2-4.) ) ); } /***************************************************************************/ // M_V_regular_WW is the regular part of the one-loop WW matrix element // exactly as defined in Eqs. C.1 - C.7 of of NPB 410(1993)280-324 *** // modulo a factor 1/(2s) ***, which is a flux factor that those authors // absorb in the matrix element. double MEPP2VVPowheg::M_V_regular_WW(realVVKinematics S) const { Energy2 s(S.bornVariables().sb()); Energy2 t(S.bornVariables().tb()); Energy2 u(S.bornVariables().ub()); Energy2 mW2(S.k12r()); // N.B. the diboson masses are preserved in getting double beta(S.betaxr()); // N.B. for x=1 \beta_x=\beta in NPB 383(1992)3-44. bool up_type = abs(quark_->id())%2==0 ? true : false; double Qi = up_type ? 2./3. : -1./3.; double giL = up_type ? guL_/2. : gdL_/2.; double giR = up_type ? guR_/2. : gdR_/2.; double e2 = sqr(gW_)*sin2ThetaW_; double cos2ThetaW(1.-sin2ThetaW_); double ctt_i(gW_*gW_*gW_*gW_/16.); InvEnergy2 cts_i(gW_*gW_*e2/4./s *(Qi+2.*eZ_*giL/e2*s/(s-mW2/cos2ThetaW))); InvEnergy4 css_i(e2*e2/s/s*(sqr(Qi+eZ_*(giL+giR)/e2*s/(s-mW2/cos2ThetaW)) +sqr( eZ_*(giL-giR)/e2*s/(s-mW2/cos2ThetaW))) ); ctt_i *= 8.*Fij2_/gW_/gW_; cts_i *= sqrt(8.*Fij2_/gW_/gW_); if(quark_->id()!=-antiquark_->id()) { cts_i = 0./GeV2; css_i = 0./GeV2/GeV2; } if(!up_type) swap(t,u); double signf = up_type ? 1. : -1.; InvEnergy4 TildeI4 = ( 2.*sqr(log(-t/mW2))-4.*log((mW2-t)/mW2)*log(-t/mW2) - 4.*ReLi2(t/mW2) )/s/t; InvEnergy2 TildeI3t = 1./(mW2-t) *(sqr(log(mW2/s))/2.-sqr(log(-t/s))/2.-pi*pi/2.); InvEnergy2 TildeI3l = 1./s/beta*( 4.*ReLi2((beta-1.)/(beta+1.)) + sqr(log((1.-beta)/(1.+beta))) + pi*pi/3.); double Fup1_st(0.); Fup1_st = 4.*(80.*t*t+73.*s*t-140.*mW2*t+72.*mW2*mW2)/t/t - 4.*sqr(4.*t+s)/s/beta/beta/t - 128.*(t+2.*s)/mW2 + 64.*t*(t+s)/mW2/mW2 - (32.*(t*t-3.*s*t-3.*mW2*mW2)/t/t+128.*s/(t-mW2))*log(-t/mW2) + ( 8.*(6.*t*t+8.*s*t-19.*mW2*t+12.*mW2*mW2)/t/t - (32.*t*t-128.*s*t-26.*s*s)/s/beta/beta/t + 6.*sqr(4.*t+s)/s/sqr(sqr(beta))/t )*log(s/mW2) + 32.*s*(2.*mW2*mW2/t-u)*TildeI4 - 64.*(t-mW2)*(2.*mW2*mW2/t/t-u/t)*TildeI3t + ( (16.*t*(4.*mW2-u)-49.*s*s+72.*mW2*s-48.*mW2*mW2)/2./t + 2.*(8.*t*t-14.*s*t-3.*s*s)/beta/beta/t - 3.*sqr(4.*t+s)/2./sqr(sqr(beta))/t )*TildeI3l + 32./3.*( 2.*(t+2.*s)/mW2 - (3.*t+2.*s-4.*mW2)/t - t*(t+s)/mW2/mW2 )*pi*pi; Energy2 Jup1_st(0.*GeV2); Jup1_st = -128.*(t*t+2.*s*t+2.*s*s)/mW2 - 16.*(t*t-21.*s*t-26.*mW2*t+34.*mW2*s+17.*mW2*mW2)/t + 64.*s*t*(t+s)/mW2/mW2 +32.*s*s/(t-mW2) + ( 16.*(t-5.*s+2.*mW2)-48.*mW2*(2.*s+mW2)/t + 64.*s*(2.*t+s)/(t-mW2) - 32.*s*s*t/sqr(t-mW2) )*log(-t/mW2) + ( 16.*(4.*t+s)/beta/beta - 16.*(3.*t-2.*s) + 48.*mW2*(2.*t-2.*s-mW2)/t )*log(s/mW2) + 16.*s*(t*(2.*s+u)-2.*mW2*(2.*s+mW2))*TildeI4 + 32.*(t-mW2)*(2.*mW2*(2.*s+mW2)/t-2.*s-u)*TildeI3t + ( 32.*s*t-12.*s*s+32.*mW2*mW2 - 16.*mW2*(2.*t+7.*s)-4.*s*(4.*t+s)/beta/beta )*TildeI3l + 32./3.*( 2.*(t*t+2.*s*t+2.*s*s)/mW2 - s*t*(t+s)/mW2/mW2-2.*mW2*(2.*t-2.*s-mW2)/t-t-4.*s )*pi*pi; Energy4 Kup1_st(0.*GeV2*GeV2); Kup1_st = 16.*( 12.*t*t+20.*s*t-24.*mW2*t+17.*s*s-4.*mW2*s+12.*mW2*mW2 + s*s*t*(t+s)/mW2/mW2-2.*s*(2.*t*t+3.*s*t+2.*s*s)/mW2) *(2.-pi*pi/3.); return pi*alphaS_*CF_/NC_/(sqr(4.*pi)) * ( ctt_i*Fup1_st - cts_i*Jup1_st*signf + css_i*Kup1_st ); } /***************************************************************************/ // t_u_M_R_qqb is the real emission q + qb -> n + g matrix element // exactly as defined in Eqs. C.1 of NPB 383(1992)3-44, multiplied by // tk * uk! Energy2 MEPP2VVPowheg::t_u_M_R_qqb_WW(realVVKinematics R) const { // First the Born variables: Energy2 s2(R.s2r()); Energy2 mW2(R.k12r()); // Then the rest: Energy2 s(R.sr()); Energy2 tk(R.tkr()); Energy2 uk(R.ukr()); Energy2 q1(R.q1r()); Energy2 q2(R.q2r()); Energy2 q1h(R.q1hatr()); Energy2 q2h(R.q2hatr()); bool up_type = abs(quark_->id())%2==0 ? true : false; double Qi = up_type ? 2./3. : -1./3.; double giL = up_type ? guL_/2. : gdL_/2.; double giR = up_type ? guR_/2. : gdR_/2.; double e2 = sqr(gW_)*sin2ThetaW_; double cos2ThetaW(1.-sin2ThetaW_); double ctt_i(gW_*gW_*gW_*gW_/16.); InvEnergy2 cts_i(gW_*gW_*e2/4./s2*(Qi+2.*eZ_*giL/e2*s2/(s2-mW2/cos2ThetaW))); InvEnergy4 css_i(e2*e2/s2/s2*(sqr(Qi+eZ_*(giL+giR)/e2*s2/(s2-mW2/cos2ThetaW)) +sqr( eZ_*(giL-giR)/e2*s2/(s2-mW2/cos2ThetaW))) ); ctt_i *= 8.*Fij2_/gW_/gW_; cts_i *= sqrt(8.*Fij2_/gW_/gW_); if(quark_->id()!=-antiquark_->id()) { cts_i = 0./GeV2; css_i = 0./GeV2/GeV2; } if(!up_type) { swap(q1,q1h); swap(q2,q2h); } double signf = up_type ? 1. : -1.; Energy2 t_u_Xup(0.*GeV2); Energy4 t_u_Yup(0.*GeV2*GeV2); Energy6 t_u_Zup(0.*GeV2*GeV2*GeV2); t_u_Xup = 32.*mW2*(tk*uk+3.*q2*uk+q2*s+q1*q2)/q1/q2/q2*tk + 32.*mW2*q1/q2/q2*uk - 64.*mW2*s/q2 - 32.*tk*(uk-q2)/q1/q2*tk + 64.*mW2*mW2*mW2/q1/q1/q2*tk - 16.*(2.*tk-2.*s-q2)/q2*uk + 16.*s*(2.*s+2.*q1+q2/2.)/q2 - 8.*(4.*tk+uk+9.*s+2.*q2+2.*q1)/mW2*tk - 16.*s*(2.*s+q1)/mW2 - 64.*mW2*mW2*(tk*uk+q2*tk+q1*uk-q2*s/2.)/q1/q2/q2 + 8.*s2*q1*(tk+s+q1)/mW2/mW2; swap(tk,uk); swap(q1,q2); t_u_Xup += 32.*mW2*(tk*uk+3.*q2*uk+q2*s+q1*q2)/q1/q2/q2*tk + 32.*mW2*q1/q2/q2*uk - 64.*mW2*s/q2 - 32.*tk*(uk-q2)/q1/q2*tk + 64.*mW2*mW2*mW2/q1/q1/q2*tk - 16.*(2.*tk-2.*s-q2)/q2*uk + 16.*s*(2.*s+2.*q1+q2/2.)/q2 - 8.*(4.*tk+uk+9.*s+2.*q2+2.*q1)/mW2*tk - 16.*s*(2.*s+q1)/mW2 - 64.*mW2*mW2*(tk*uk+q2*tk+q1*uk-q2*s/2.)/q1/q2/q2 + 8.*s2*q1*(tk+s+q1)/mW2/mW2; swap(tk,uk); swap(q1,q2); t_u_Yup = - 16.*tk*(uk*(uk+s+q1)+q2*(s-2.*q1))/q1/q2*tk - 32.*mW2*mW2*s/q2 - 32.*mW2*mW2*mW2/q1/q2*tk + 16.*(2.*q2*uk+s*s+q1*s+5.*q2*s+q1*q2+2.*q2*q2)/q2*tk - 16.*(q2*q2+s*s-q2*s)/q1*tk + 16.*s*(q1*s+3./2.*q2*s+q1*q2-q1*q1)/q2 + 16.*mW2*tk*(4.*uk+s+q1-2.*q2)/q1/q2*tk + 16.*mW2*(3.*s*uk+q1*uk-q1*s-3.*q2*s-q1*q1+q2*q2)/q1/q2*tk + 16.*mW2*s*(q2-4.*s+2.*q1)/q2 - 8.*s2*(4.*tk+uk+9.*s+4.*q1+2.*q2)/mW2*tk - 16.*s2*(2.*s*s+2.*q1*s+q1*q1)/mW2 - 32.*mW2*mW2*(tk+uk/2.+2.*s-q1)/q1/q2*tk + 8.*s2*s2*q1*(tk+s+q1)/mW2/mW2; swap(tk,uk); swap(q1,q2); t_u_Yup += - 16.*tk*(uk*(uk+s+q1)+q2*(s-2.*q1))/q1/q2*tk - 32.*mW2*mW2*s/q2 - 32.*mW2*mW2*mW2/q1/q2*tk + 16.*(2.*q2*uk+s*s+q1*s+5.*q2*s+q1*q2+2.*q2*q2)/q2*tk - 16.*(q2*q2+s*s-q2*s)/q1*tk + 16.*s*(q1*s+3./2.*q2*s+q1*q2-q1*q1)/q2 + 16.*mW2*tk*(4.*uk+s+q1-2.*q2)/q1/q2*tk + 16.*mW2*(3.*s*uk+q1*uk-q1*s-3.*q2*s-q1*q1+q2*q2)/q1/q2*tk + 16.*mW2*s*(q2-4.*s+2.*q1)/q2 - 8.*s2*(4.*tk+uk+9.*s+4.*q1+2.*q2)/mW2*tk - 16.*s2*(2.*s*s+2.*q1*s+q1*q1)/mW2 - 32.*mW2*mW2*(tk+uk/2.+2.*s-q1)/q1/q2*tk + 8.*s2*s2*q1*(tk+s+q1)/mW2/mW2; swap(tk,uk); swap(q1,q2); t_u_Zup = 8.*s2*(9.*tk+3.*uk+20.*s+10.*q1+4.*q2)*tk + 8.*s2*(17./2.*s*s+10.*q1*s+6.*q1*q1) - 4.*s2*s2*(4.*tk+uk+9.*s+6.*q1+2.*q2)/mW2*tk - 8.*s2*s2*(2.*s*s+3.*q1*s+2.*q1*q1)/mW2 - 16.*mW2*(2.*tk+5.*uk+7.*s+6.*q1+6.*q2)*tk - 16.*mW2*s*(s+6.*q1) + 4.*s2*s2*s2*q1*(tk+s+q1)/mW2/mW2 + 48.*mW2*mW2*s2; swap(tk,uk); swap(q1,q2); t_u_Zup += 8.*s2*(9.*tk+3.*uk+20.*s+10.*q1+4.*q2)*tk + 8.*s2*(17./2.*s*s+10.*q1*s+6.*q1*q1) - 4.*s2*s2*(4.*tk+uk+9.*s+6.*q1+2.*q2)/mW2*tk - 8.*s2*s2*(2.*s*s+3.*q1*s+2.*q1*q1)/mW2 - 16.*mW2*(2.*tk+5.*uk+7.*s+6.*q1+6.*q2)*tk - 16.*mW2*s*(s+6.*q1) + 4.*s2*s2*s2*q1*(tk+s+q1)/mW2/mW2 + 48.*mW2*mW2*s2; swap(tk,uk); swap(q1,q2); return -pi*alphaS_*CF_/NC_ * ( ctt_i*t_u_Xup - cts_i*t_u_Yup*signf + css_i*t_u_Zup ); } /***************************************************************************/ // The game here is to get this helicity amplitude squared to return all the // same values as t_u_M_R_qqb above, TIMES a further factor tk*uk! Energy2 MEPP2VVPowheg::t_u_M_R_qqb_hel_amp(realVVKinematics R) const { using namespace ThePEG::Helicity; // qqb_hel_amps_.reset(ProductionMatrixElement(PDT::Spin1Half,PDT::Spin1Half, // PDT::Spin1,PDT::Spin1, // PDT::Spin1)); double sum_hel_amps_sqr(0.); tcPDPtr p1data(quark_); tcPDPtr p2data(antiquark_); tcPDPtr k1data(mePartonData()[2]); tcPDPtr k2data(mePartonData()[3]); tcPDPtr kdata(getParticleData(ParticleID::g)); if(k1data->id()==-24&&k2data->id()==24) swap(k1data,k2data); SpinorWaveFunction qSpinor(R.p1r(),p1data,incoming); SpinorBarWaveFunction qbSpinor(R.p2r(),p2data,incoming); vector<SpinorWaveFunction> q; vector<SpinorBarWaveFunction> qb; for(unsigned int ix=0;ix<2;ix++) { qSpinor.reset(ix); qbSpinor.reset(ix); q.push_back(qSpinor); qb.push_back(qbSpinor); } VectorWaveFunction v1Polarization(R.k1r(),k1data,outgoing); VectorWaveFunction v2Polarization(R.k2r(),k2data,outgoing); vector<VectorWaveFunction> v1; vector<VectorWaveFunction> v2; for(unsigned int ix=0;ix<3;ix++) { v1Polarization.reset(ix); v2Polarization.reset(ix); v1.push_back(v1Polarization); v2.push_back(v2Polarization); } VectorWaveFunction gPolarization(R.kr(),kdata,outgoing); vector<VectorWaveFunction> g; for(unsigned int ix=0;ix<3;ix+=2) { gPolarization.reset(ix); g.push_back(gPolarization); } AbstractFFVVertexPtr ffg = FFGvertex_; AbstractFFVVertexPtr ffv1 = k1data->id()==23 ? FFZvertex_ : FFWvertex_; AbstractFFVVertexPtr ffv2 = k2data->id()==23 ? FFZvertex_ : FFWvertex_; // Collecting information for intermediate fermions vector<tcPDPtr> tc; if(abs(k1data->id())==24&&abs(k2data->id())==24) { if(abs(p1data->id())%2==0) for(int ix=0;ix<3;++ix) tc.push_back(getParticleData(1+2*ix)); else for(int ix=0;ix<3;++ix) tc.push_back(getParticleData(2+2*ix)); } else if(k1data->id()==23&&k2data->id()==23) tc.push_back(p1data); else if(abs(k1data->id())==24&&k2data->id()==23) tc.push_back(p2data); // Loop over helicities summing the relevant diagrams for(unsigned int p1hel=0;p1hel<2;++p1hel) { for(unsigned int p2hel=0;p2hel<2;++p2hel) { for(unsigned int khel=0;khel<2;++khel) { SpinorWaveFunction p1_k = ffg->evaluate(mu_UV2(),5,p1data,q[p1hel],g[khel]); SpinorBarWaveFunction p2_k = ffg->evaluate(mu_UV2(),5,p2data,qb[p2hel],g[khel]); for(unsigned int k1hel=0;k1hel<3;++k1hel) { for(unsigned int k2hel=0;k2hel<3;++k2hel) { // If helicity is exactly conserved (massless quarks) skip if p1hel=p2hel // but if the production ME is required first fill it with (0.,0.). if((p1hel==p2hel)&&helicityConservation_) { // if(getMatrix) { // if(khel==0) // qqb_hel_amps_(p1hel,p2hel,k1hel,k2hel,0) = Complex(0.,0.); // else // qqb_hel_amps_(p1hel,p2hel,k1hel,k2hel,2) = Complex(0.,0.); // } continue; } vector<Complex> diagrams; // Get all t-channel diagram contributions tcPDPtr intermediate_t; for(unsigned int ix=0;ix<tc.size();ix++) { intermediate_t = (!(k1data->id()==24&&k2data->id()==-24)) ? p2data : tc[ix]; SpinorWaveFunction p1_v1 = ffv1->evaluate(scale(),5,intermediate_t,q[p1hel],v1[k1hel]); SpinorBarWaveFunction p2_v2 = ffv2->evaluate(scale(),5,intermediate_t,qb[p2hel],v2[k2hel]); // First calculate all the off-shell fermion currents // Now calculate the 6 t-channel diagrams // q+qb->g+v1+v2, q+qb->v1+g+v2, q+qb->v1+v2+g if(!((k1data->id()==24&&k2data->id()==-24)&&(abs(p1data->id())%2==1))) { diagrams.push_back(ffv1->evaluate(scale(),p1_k,p2_v2,v1[k1hel])); diagrams.push_back(ffg->evaluate(mu_UV2(),p1_v1,p2_v2,g[khel])); diagrams.push_back(ffv2->evaluate(scale(),p1_v1,p2_k,v2[k2hel])); } intermediate_t = (!(k1data->id()==24&&k2data->id()==-24)) ? p1data : tc[ix]; SpinorWaveFunction p1_v2 = ffv2->evaluate(scale(),5,intermediate_t,q[p1hel],v2[k2hel]); SpinorBarWaveFunction p2_v1 = ffv1->evaluate(scale(),5,intermediate_t,qb[p2hel],v1[k1hel]); // q+qb->g+v2+v1, q+qb->v2+g+v1, q+qb->v2+v1+g if(!((k1data->id()==24&&k2data->id()==-24)&&(abs(p1data->id())%2==0))) { diagrams.push_back(ffv2->evaluate(scale(),p1_k,p2_v1,v2[k2hel])); diagrams.push_back(ffg->evaluate(mu_UV2(),p1_v2,p2_v1,g[khel])); diagrams.push_back(ffv1->evaluate(scale(),p1_v2,p2_k,v1[k1hel])); } } // Note: choosing 3 as the second argument in WWWvertex_->evaluate() // sets option 3 in thepeg/Helicity/Vertex/VertexBase.cc , which // means the propagator does not contain a width factor (which is // good re. gauge invariance). // If W+Z / W-Z calculate the two V+jet-like s-channel diagrams if(abs(k1data->id())==24&&k2data->id()==23) { // The off-shell s-channel boson current VectorWaveFunction k1_k2 = WWWvertex_->evaluate(scale(),3,k1data->CC(),v2[k2hel],v1[k1hel]); // q+qb->g+v1*->g+v1+v2, q+qb->v1*+g->v1+v2+g diagrams.push_back(ffv1->evaluate(scale(),p1_k,qb[p2hel],k1_k2)); diagrams.push_back(ffv1->evaluate(scale(),q[p1hel],p2_k,k1_k2)); } // If W+W- calculate the four V+jet-like s-channel diagrams if((k1data->id()==24&&k2data->id()==-24)&&(p1data->id()==-p2data->id())) { // The off-shell s-channel boson current VectorWaveFunction k1_k2; // q+qb->g+Z0*->g+v1+v2,q+qb->Z0*+g->v1+v2+g, tcPDPtr Z0 = getParticleData(ParticleID::Z0); k1_k2 = WWWvertex_->evaluate(scale(),3,Z0,v2[k2hel],v1[k1hel]); diagrams.push_back(FFZvertex_->evaluate(scale(),p1_k,qb[p2hel],k1_k2)); diagrams.push_back(FFZvertex_->evaluate(scale(),q[p1hel],p2_k,k1_k2)); // q+qb->g+gamma*->g+v1+v2,q+qb->gamma*+g->v1+v2+g, tcPDPtr gamma = getParticleData(ParticleID::gamma); k1_k2 = WWWvertex_->evaluate(scale(),3,gamma,v2[k2hel],v1[k1hel]); diagrams.push_back(FFPvertex_->evaluate(scale(),p1_k,qb[p2hel],k1_k2)); diagrams.push_back(FFPvertex_->evaluate(scale(),q[p1hel],p2_k,k1_k2)); } // Add up all diagrams to get the total amplitude: Complex hel_amp(0.); for(unsigned int ix=0;ix<diagrams.size();ix++) hel_amp += diagrams[ix]; // If we need to fill the production ME we do it here: // if(getMatrix) { // if(khel==0) // qqb_hel_amps_(p1hel,p2hel,k1hel,k2hel,0) = hel_amp; // else // qqb_hel_amps_(p1hel,p2hel,k1hel,k2hel,2) = hel_amp; // } sum_hel_amps_sqr += norm(hel_amp); } } } } } // Fill up the remaining bits of the production ME, corresponding // to longitudinal gluon polarization, with (0.,0.). // if(getMatrix) { // for(unsigned int p1hel=0;p1hel<2;++p1hel) { // for(unsigned int p2hel=0;p2hel<2;++p2hel) { // for(unsigned int k1hel=0;k1hel<3;++k1hel) { // for(unsigned int k2hel=0;k2hel<3;++k2hel) { // qqb_hel_amps_(p1hel,p2hel,k1hel,k2hel,1) = Complex(0.,0.); // } // } // } // } // } // Spin and colour averaging factors = 1/4 * CF * 1/3 = 1/9 sum_hel_amps_sqr /= 9.; // Symmetry factor for identical Z bosons in the final state if(k1data->id()==23&&k2data->id()==23) sum_hel_amps_sqr /= 2.; return sum_hel_amps_sqr*R.tkr()*R.ukr()*UnitRemoval::InvE2; } /***************************************************************************/ // The game here is to get this helicity amplitude squared to return all the // same values as t_u_M_R_qg above, TIMES a further factor tk*uk! Energy2 MEPP2VVPowheg::t_u_M_R_qg_hel_amp(realVVKinematics R) const { using namespace ThePEG::Helicity; // qg_hel_amps_.reset(ProductionMatrixElement(PDT::Spin1Half,PDT::Spin1, // PDT::Spin1,PDT::Spin1, // PDT::Spin1Half)); double sum_hel_amps_sqr(0.); tcPDPtr p1data(quark_); tcPDPtr p2data(getParticleData(ParticleID::g)); tcPDPtr k1data(mePartonData()[2]); tcPDPtr k2data(mePartonData()[3]); tcPDPtr kdata (antiquark_->CC()); if(k1data->id()==-24&&k2data->id()==24) swap(k1data,k2data); SpinorWaveFunction qinSpinor(R.p1r(),p1data,incoming); SpinorBarWaveFunction qoutSpinor(R.kr(),kdata,outgoing); vector<SpinorWaveFunction> qin; vector<SpinorBarWaveFunction> qout; for(unsigned int ix=0;ix<2;ix++) { qinSpinor.reset(ix); qoutSpinor.reset(ix); qin.push_back(qinSpinor); qout.push_back(qoutSpinor); } VectorWaveFunction v1Polarization(R.k1r(),k1data,outgoing); VectorWaveFunction v2Polarization(R.k2r(),k2data,outgoing); vector<VectorWaveFunction> v1; vector<VectorWaveFunction> v2; for(unsigned int ix=0;ix<3;ix++) { v1Polarization.reset(ix); v2Polarization.reset(ix); v1.push_back(v1Polarization); v2.push_back(v2Polarization); } VectorWaveFunction gPolarization(R.p2r(),p2data,incoming); vector<VectorWaveFunction> g; for(unsigned int ix=0;ix<3;ix+=2) { gPolarization.reset(ix); g.push_back(gPolarization); } AbstractFFVVertexPtr ffg = FFGvertex_; AbstractFFVVertexPtr ffv1 = k1data->id()==23 ? FFZvertex_ : FFWvertex_; AbstractFFVVertexPtr ffv2 = k2data->id()==23 ? FFZvertex_ : FFWvertex_; // Collecting information for intermediate fermions vector<tcPDPtr> tc; if(abs(k1data->id())==24&&abs(k2data->id())==24) { if(abs(p1data->id())%2==0) for(int ix=0;ix<3;++ix) tc.push_back(getParticleData(1+2*ix)); else for(int ix=0;ix<3;++ix) tc.push_back(getParticleData(2+2*ix)); } else if(k1data->id()==23&&k2data->id()==23) tc.push_back(p1data); else if(abs(k1data->id())==24&&k2data->id()==23) tc.push_back(kdata->CC()); // Loop over helicities summing the relevant diagrams for(unsigned int p1hel=0;p1hel<2;++p1hel) { for(unsigned int p2hel=0;p2hel<2;++p2hel) { for(unsigned int khel=0;khel<2;++khel) { SpinorWaveFunction p1_p2 = ffg->evaluate(mu_UV2(),5,p1data,qin[p1hel],g[p2hel]); SpinorBarWaveFunction p2_k = ffg->evaluate(mu_UV2(),5,kdata->CC(),qout[khel],g[p2hel]); for(unsigned int k1hel=0;k1hel<3;++k1hel) { for(unsigned int k2hel=0;k2hel<3;++k2hel) { // If helicity is exactly conserved (massless quarks) skip if p1hel!=khel // but if the production ME is required first fill it with (0.,0.). if((p1hel!=khel)&&helicityConservation_) { // if(getMatrix) { // if(p2hel==0) // qg_hel_amps_(p1hel,0,k1hel,k2hel,khel) = Complex(0.,0.); // else // qg_hel_amps_(p1hel,2,k1hel,k2hel,khel) = Complex(0.,0.); // } continue; } vector<Complex> diagrams; // Get all t-channel diagram contributions tcPDPtr intermediate_q; for(unsigned int ix=0;ix<tc.size();ix++) { intermediate_q = (!(k1data->id()==24&&k2data->id()==-24)) ? antiquark_ : tc[ix]; SpinorWaveFunction p1_v1 = ffv1->evaluate(scale(),5,intermediate_q,qin[p1hel],v1[k1hel]); SpinorBarWaveFunction k_v2 = ffv2->evaluate(scale(),5,intermediate_q,qout[khel],v2[k2hel]); // First calculate all the off-shell fermion currents // Now calculate the 6 abelian diagrams // q+g->v1+v2+q with 2 t-channel propagators, 1 s- and 1 t-channel and 2 t-channel ones. if(!((k1data->id()==24&&k2data->id()==-24)&&(abs(p1data->id())%2==1))) { diagrams.push_back(ffv2->evaluate(scale(),p1_v1,p2_k,v2[k2hel])); diagrams.push_back(ffg->evaluate(mu_UV2(),p1_v1,k_v2,g[p2hel])); diagrams.push_back(ffv1->evaluate(scale(),p1_p2,k_v2,v1[k1hel])); } intermediate_q = (!(k1data->id()==24&&k2data->id()==-24)) ? p1data : tc[ix]; SpinorWaveFunction p1_v2 = ffv2->evaluate(scale(),5,intermediate_q,qin[p1hel],v2[k2hel]); SpinorBarWaveFunction k_v1 = ffv1->evaluate(scale(),5,intermediate_q,qout[khel],v1[k1hel]); // q+g->v2+v1+q, with 2 t-channel propagators, 1 s- and 1 t-channel and 2 t-channel ones. if(!((k1data->id()==24&&k2data->id()==-24)&&(abs(p1data->id())%2==0))) { diagrams.push_back(ffv1->evaluate(scale(),p1_v2,p2_k,v1[k1hel])); diagrams.push_back(ffg->evaluate(mu_UV2(),p1_v2,k_v1,g[p2hel])); diagrams.push_back(ffv2->evaluate(scale(),p1_p2,k_v1,v2[k2hel])); } } // If W+Z / W-Z calculate the two V+jet-like s-channel diagrams if(abs(k1data->id())==24&&k2data->id()==23) { // The off-shell s-channel boson current VectorWaveFunction k1_k2 = WWWvertex_->evaluate(scale(),3,k1data->CC(),v2[k2hel],v1[k1hel]); // q+qb->g+v1*->g+v1+v2, q+qb->v1*+g->v1+v2+g diagrams.push_back(ffv1->evaluate(scale(),p1_p2,qout[khel],k1_k2)); diagrams.push_back(ffv1->evaluate(scale(),qin[p1hel],p2_k,k1_k2)); } // If W+W- calculate the four V+jet-like s-channel diagrams if((k1data->id()==24&&k2data->id()==-24)&&(p1data->id()==kdata->id())) { // The off-shell s-channel boson current VectorWaveFunction k1_k2; // q+qb->g+Z0*->g+v1+v2,q+qb->Z0*+g->v1+v2+g, tcPDPtr Z0 = getParticleData(ParticleID::Z0); k1_k2 = WWWvertex_->evaluate(scale(),3,Z0,v2[k2hel],v1[k1hel]); diagrams.push_back(FFZvertex_->evaluate(scale(),p1_p2,qout[khel],k1_k2)); diagrams.push_back(FFZvertex_->evaluate(scale(),qin[p1hel],p2_k,k1_k2)); // q+qb->g+gamma*->g+v1+v2,q+qb->gamma*+g->v1+v2+g, tcPDPtr gamma = getParticleData(ParticleID::gamma); k1_k2 = WWWvertex_->evaluate(scale(),3,gamma,v2[k2hel],v1[k1hel]); diagrams.push_back(FFPvertex_->evaluate(scale(),p1_p2,qout[khel],k1_k2)); diagrams.push_back(FFPvertex_->evaluate(scale(),qin[p1hel],p2_k,k1_k2)); } // Add up all diagrams to get the total amplitude: Complex hel_amp(0.); for(unsigned int ix=0;ix<diagrams.size();ix++) hel_amp += diagrams[ix]; // If we need to fill the production ME we do it here: // if(getMatrix) { // if(p2hel==0) // qg_hel_amps_(p1hel,0,k1hel,k2hel,khel) = hel_amp; // else // qg_hel_amps_(p1hel,2,k1hel,k2hel,khel) = hel_amp; // } sum_hel_amps_sqr += norm(hel_amp); } } } } } // Fill up the remaining bits of the production ME, corresponding // to longitudinal gluon polarization, with (0.,0.). // if(getMatrix) { // for(unsigned int p1hel=0;p1hel<2;++p1hel) { // for(unsigned int k1hel=0;k1hel<3;++k1hel) { // for(unsigned int k2hel=0;k2hel<3;++k2hel) { // for(unsigned int khel=0;khel<2;++khel) { // qg_hel_amps_(p1hel,1,k1hel,k2hel,khel) = Complex(0.,0.); // } // } // } // } // } // Spin and colour averaging factors = 1/4 * TR * 1/3 = 1/24 sum_hel_amps_sqr /= 24.; // Symmetry factor for identical Z bosons in the final state if(k1data->id()==23&&k2data->id()==23) sum_hel_amps_sqr /= 2.; return sum_hel_amps_sqr*R.tkr()*R.ukr()*UnitRemoval::InvE2; } /***************************************************************************/ // The game here is to get this helicity amplitude squared to return all the // same values as t_u_M_R_gqb above, TIMES a further factor tk*uk! Energy2 MEPP2VVPowheg::t_u_M_R_gqb_hel_amp(realVVKinematics R) const { using namespace ThePEG::Helicity; // gqb_hel_amps_.reset(ProductionMatrixElement(PDT::Spin1,PDT::Spin1Half, // PDT::Spin1,PDT::Spin1, // PDT::Spin1Half)); double sum_hel_amps_sqr(0.); tcPDPtr p1data(getParticleData(ParticleID::g)); tcPDPtr p2data(antiquark_); tcPDPtr k1data(mePartonData()[2]); tcPDPtr k2data(mePartonData()[3]); tcPDPtr kdata (quark_->CC()); if(k1data->id()==-24&&k2data->id()==24) swap(k1data,k2data); SpinorBarWaveFunction qbinSpinor(R.p2r(),p2data,incoming); SpinorWaveFunction qboutSpinor(R.kr(),kdata,outgoing); vector<SpinorBarWaveFunction> qbin; vector<SpinorWaveFunction> qbout; for(unsigned int ix=0;ix<2;ix++) { qbinSpinor.reset(ix); qboutSpinor.reset(ix); qbin.push_back(qbinSpinor); qbout.push_back(qboutSpinor); } VectorWaveFunction v1Polarization(R.k1r(),k1data,outgoing); VectorWaveFunction v2Polarization(R.k2r(),k2data,outgoing); vector<VectorWaveFunction> v1; vector<VectorWaveFunction> v2; for(unsigned int ix=0;ix<3;ix++) { v1Polarization.reset(ix); v2Polarization.reset(ix); v1.push_back(v1Polarization); v2.push_back(v2Polarization); } VectorWaveFunction gPolarization(R.p1r(),p1data,incoming); vector<VectorWaveFunction> g; for(unsigned int ix=0;ix<3;ix+=2) { gPolarization.reset(ix); g.push_back(gPolarization); } AbstractFFVVertexPtr ffg = FFGvertex_; AbstractFFVVertexPtr ffv1 = k1data->id()==23 ? FFZvertex_ : FFWvertex_; AbstractFFVVertexPtr ffv2 = k2data->id()==23 ? FFZvertex_ : FFWvertex_; // Collecting information for intermediate fermions vector<tcPDPtr> tc; if(abs(k1data->id())==24&&abs(k2data->id())==24) { if(abs(p2data->id())%2==0) for(int ix=0;ix<3;++ix) tc.push_back(getParticleData(1+2*ix)); else for(int ix=0;ix<3;++ix) tc.push_back(getParticleData(2+2*ix)); } else if(k1data->id()==23&&k2data->id()==23) tc.push_back(p2data); else if(abs(k1data->id())==24&&k2data->id()==23) tc.push_back(kdata->CC()); // Loop over helicities summing the relevant diagrams for(unsigned int p1hel=0;p1hel<2;++p1hel) { for(unsigned int p2hel=0;p2hel<2;++p2hel) { for(unsigned int khel=0;khel<2;++khel) { SpinorBarWaveFunction p1_p2 = ffg->evaluate(mu_UV2(),5,p2data,qbin[p2hel],g[p1hel]); SpinorWaveFunction p1_k = ffg->evaluate(mu_UV2(),5,kdata->CC(),qbout[khel],g[p1hel]); for(unsigned int k1hel=0;k1hel<3;++k1hel) { for(unsigned int k2hel=0;k2hel<3;++k2hel) { // If helicity is exactly conserved (massless quarks) skip if p2hel!=khel // but if the production ME is required first fill it with (0.,0.). if((p2hel!=khel)&&helicityConservation_) { // if(getMatrix) { // if(p1hel==0) // gqb_hel_amps_(0,p2hel,k1hel,k2hel,khel) = Complex(0.,0.); // else // gqb_hel_amps_(2,p2hel,k1hel,k2hel,khel) = Complex(0.,0.); // } continue; } vector<Complex> diagrams; // Get all t-channel diagram contributions tcPDPtr intermediate_q; for(unsigned int ix=0;ix<tc.size();ix++) { intermediate_q = (!(k1data->id()==24&&k2data->id()==-24)) ? quark_ : tc[ix]; SpinorBarWaveFunction p2_v1 = ffv1->evaluate(scale(),5,intermediate_q,qbin[p2hel],v1[k1hel]); SpinorWaveFunction k_v2 = ffv2->evaluate(scale(),5,intermediate_q,qbout[khel],v2[k2hel]); // First calculate all the off-shell fermion currents // Now calculate the 6 abelian diagrams q+g->v1+v2+q // with 2 t-channel propagators, 1 s- and 1 t-channel // and 2 t-channel ones. if(!((k1data->id()==24&&k2data->id()==-24)&&(abs(p2data->id())%2==0))) { diagrams.push_back(ffv2->evaluate(scale(),p1_k,p2_v1,v2[k2hel])); diagrams.push_back(ffg->evaluate(mu_UV2(),k_v2,p2_v1,g[p1hel])); diagrams.push_back(ffv1->evaluate(scale(),k_v2,p1_p2,v1[k1hel])); } intermediate_q = (!(k1data->id()==24&&k2data->id()==-24)) ? p2data : tc[ix]; SpinorBarWaveFunction p2_v2 = ffv2->evaluate(scale(),5,intermediate_q,qbin[p2hel],v2[k2hel]); SpinorWaveFunction k_v1 = ffv1->evaluate(scale(),5,intermediate_q,qbout[khel],v1[k1hel]); // q+g->v2+v1+q, with 2 t-channel propagators, 1 s- and 1 t-channel and 2 t-channel ones. if(!((k1data->id()==24&&k2data->id()==-24)&&(abs(p2data->id())%2==1))) { diagrams.push_back(ffv1->evaluate(scale(),p1_k,p2_v2,v1[k1hel])); diagrams.push_back(ffg->evaluate(mu_UV2(),k_v1,p2_v2,g[p1hel])); diagrams.push_back(ffv2->evaluate(scale(),k_v1,p1_p2,v2[k2hel])); } } // If W+Z / W-Z calculate the two V+jet-like s-channel diagrams if(abs(k1data->id())==24&&k2data->id()==23) { // The off-shell s-channel boson current VectorWaveFunction k1_k2 = WWWvertex_->evaluate(scale(),3,k1data->CC(),v2[k2hel],v1[k1hel]); // q+qb->g+v1*->g+v1+v2, q+qb->v1*+g->v1+v2+g diagrams.push_back(ffv1->evaluate(scale(),qbout[khel],p1_p2,k1_k2)); diagrams.push_back(ffv1->evaluate(scale(),p1_k,qbin[p2hel],k1_k2)); } // If W+W- calculate the four V+jet-like s-channel diagrams if((k1data->id()==24&&k2data->id()==-24)&&(p2data->id()==kdata->id())) { // The off-shell s-channel boson current VectorWaveFunction k1_k2; // q+qb->g+Z0*->g+v1+v2,q+qb->Z0*+g->v1+v2+g, tcPDPtr Z0 = getParticleData(ParticleID::Z0); k1_k2 = WWWvertex_->evaluate(scale(),3,Z0,v2[k2hel],v1[k1hel]); diagrams.push_back(FFZvertex_->evaluate(scale(),qbout[khel],p1_p2,k1_k2)); diagrams.push_back(FFZvertex_->evaluate(scale(),p1_k,qbin[p2hel],k1_k2)); // q+qb->g+gamma*->g+v1+v2,q+qb->gamma*+g->v1+v2+g, tcPDPtr gamma = getParticleData(ParticleID::gamma); k1_k2 = WWWvertex_->evaluate(scale(),3,gamma,v2[k2hel],v1[k1hel]); diagrams.push_back(FFPvertex_->evaluate(scale(),qbout[khel],p1_p2,k1_k2)); diagrams.push_back(FFPvertex_->evaluate(scale(),p1_k,qbin[p2hel],k1_k2)); } // Add up all diagrams to get the total amplitude: Complex hel_amp(0.); for(unsigned int ix=0;ix<diagrams.size();ix++) hel_amp += diagrams[ix]; // If we need to fill the production ME we do it here: // if(getMatrix) { // if(p1hel==0) // gqb_hel_amps_(0,p2hel,k1hel,k2hel,khel) = hel_amp; // else // gqb_hel_amps_(2,p2hel,k1hel,k2hel,khel) = hel_amp; // } sum_hel_amps_sqr += norm(hel_amp); } } } } } // Fill up the remaining bits of the production ME, corresponding // to longitudinal gluon polarization, with (0.,0.). // if(getMatrix) { // for(unsigned int p2hel=0;p2hel<2;++p2hel) { // for(unsigned int k1hel=0;k1hel<3;++k1hel) { // for(unsigned int k2hel=0;k2hel<3;++k2hel) { // for(unsigned int khel=0;khel<2;++khel) { // gqb_hel_amps_(1,p2hel,k1hel,k2hel,khel) = Complex(0.,0.); // } // } // } // } // } // Spin and colour averaging factors = 1/4 * TR * 1/3 = 1/24 sum_hel_amps_sqr /= 24.; // Symmetry factor for identical Z bosons in the final state if(k1data->id()==23&&k2data->id()==23) sum_hel_amps_sqr /= 2.; return sum_hel_amps_sqr*R.tkr()*R.ukr()*UnitRemoval::InvE2; } double MEPP2VVPowheg::lo_me() const { using namespace ThePEG::Helicity; double sum_hel_amps_sqr(0.); tcPDPtr p1data(quark_); tcPDPtr p2data(antiquark_); tcPDPtr k1data(mePartonData()[2]); tcPDPtr k2data(mePartonData()[3]); if(k1data->id()==-24&&k2data->id()==24) swap(k1data,k2data); // Should never actually occur. SpinorWaveFunction qSpinor(B_.p1b(),p1data,incoming); SpinorBarWaveFunction qbSpinor(B_.p2b(),p2data,incoming); vector<SpinorWaveFunction> q; vector<SpinorBarWaveFunction> qb; for(unsigned int ix=0;ix<2;ix++) { qSpinor.reset(ix); qbSpinor.reset(ix); q.push_back(qSpinor); qb.push_back(qbSpinor); } VectorWaveFunction v1Polarization(B_.k1b(),k1data,outgoing); VectorWaveFunction v2Polarization(B_.k2b(),k2data,outgoing); vector<VectorWaveFunction> v1; vector<VectorWaveFunction> v2; for(unsigned int ix=0;ix<3;ix++) { v1Polarization.reset(ix); v2Polarization.reset(ix); v1.push_back(v1Polarization); v2.push_back(v2Polarization); } AbstractFFVVertexPtr ffv1 = k1data->id()==23 ? FFZvertex_ : FFWvertex_; AbstractFFVVertexPtr ffv2 = k2data->id()==23 ? FFZvertex_ : FFWvertex_; // Collecting information for intermediate fermions vector<tcPDPtr> tc; if(abs(k1data->id())==24&&abs(k2data->id())==24) { if(abs(p1data->id())%2==0) for(int ix=0;ix<3;++ix) tc.push_back(getParticleData(1+2*ix)); else for(int ix=0;ix<3;++ix) tc.push_back(getParticleData(2+2*ix)); } else if(k1data->id()==23&&k2data->id()==23) tc.push_back(p1data); else if(abs(k1data->id())==24&&k2data->id()==23) tc.push_back(p2data); // Loop over helicities summing the relevant diagrams for(unsigned int p1hel=0;p1hel<2;++p1hel) { for(unsigned int p2hel=0;p2hel<2;++p2hel) { if((p1hel==p2hel)&&helicityConservation_) continue; for(unsigned int k1hel=0;k1hel<3;++k1hel) { for(unsigned int k2hel=0;k2hel<3;++k2hel) { vector<Complex> diagrams; // Get all t-channel diagram contributions tcPDPtr intermediate_t; for(unsigned int ix=0;ix<tc.size();ix++) { intermediate_t = (!(k1data->id()==24&&k2data->id()==-24)) ? p2data : tc[ix]; SpinorWaveFunction p1_v1 = ffv1->evaluate(scale(),5,intermediate_t,q[p1hel],v1[k1hel]); // First calculate all the off-shell fermion currents // Now calculate the 6 t-channel diagrams // q+qb->v1+v2 if(!((k1data->id()==24&&k2data->id()==-24)&&(abs(p1data->id())%2==1))) diagrams.push_back(ffv2->evaluate(scale(),p1_v1,qb[p2hel],v2[k2hel])); intermediate_t = (!(k1data->id()==24&&k2data->id()==-24)) ? p1data : tc[ix]; SpinorWaveFunction p1_v2 = ffv2->evaluate(scale(),5,intermediate_t,q[p1hel],v2[k2hel]); // q+qb->v2+v1 if(!((k1data->id()==24&&k2data->id()==-24)&&(abs(p1data->id())%2==0))) diagrams.push_back(ffv1->evaluate(scale(),p1_v2,qb[p2hel],v1[k1hel])); } // If W+Z / W-Z calculate the two V+jet-like s-channel diagrams if(abs(k1data->id())==24&&k2data->id()==23) { // The off-shell s-channel boson current VectorWaveFunction k1_k2 = WWWvertex_->evaluate(scale(),3,k1data->CC(),v2[k2hel],v1[k1hel]); // q+qb->v1*->v1+v2 diagrams.push_back(ffv1->evaluate(scale(),q[p1hel],qb[p2hel],k1_k2)); } // If W+W- calculate the four V+jet-like s-channel diagrams if((k1data->id()==24&&k2data->id()==-24)&&(p1data->id()==-p2data->id())) { // The off-shell s-channel boson current VectorWaveFunction k1_k2; // q+qb->Z0*->v1+v2 tcPDPtr Z0 = getParticleData(ParticleID::Z0); k1_k2 = WWWvertex_->evaluate(scale(),3,Z0,v2[k2hel],v1[k1hel]); diagrams.push_back(FFZvertex_->evaluate(scale(),q[p1hel],qb[p2hel],k1_k2)); // q+qb->gamma*->v1+v2 tcPDPtr gamma = getParticleData(ParticleID::gamma); k1_k2 = WWWvertex_->evaluate(scale(),3,gamma,v2[k2hel],v1[k1hel]); diagrams.push_back(FFPvertex_->evaluate(scale(),q[p1hel],qb[p2hel],k1_k2)); } // Add up all diagrams to get the total amplitude: Complex hel_amp(0.); for(unsigned int ix=0;ix<diagrams.size();ix++) hel_amp += diagrams[ix]; // If we need to fill the production ME we do it here: // if(getMatrix) lo_hel_amps_(p1hel,p2hel,k1hel,k2hel) = hel_amp; sum_hel_amps_sqr += norm(hel_amp); } } } } // Spin and colour averaging factors = 1/4 * 1/3 = 1/12 sum_hel_amps_sqr /= 12.; // Symmetry factor for identical Z bosons in the final state if(k1data->id()==23&&k2data->id()==23) sum_hel_amps_sqr /= 2.; return sum_hel_amps_sqr; } RealEmissionProcessPtr MEPP2VVPowheg::generateHardest(RealEmissionProcessPtr born, ShowerInteraction::Type inter) { if(inter==ShowerInteraction::QED) return RealEmissionProcessPtr(); // Now we want to set these data vectors according to the particles we've // received from the current 2->2 hard collision: vector<PPtr> particlesToShower; for(unsigned int ix=0;ix<born->bornIncoming().size();++ix) particlesToShower.push_back(born->bornIncoming()[ix]); qProgenitor_ = particlesToShower[0]; qbProgenitor_ = particlesToShower[1]; showerQuark_ = particlesToShower[0]; showerAntiquark_ = particlesToShower[1]; qHadron_ = dynamic_ptr_cast<tcBeamPtr>(born->hadrons()[0]->dataPtr()); qbHadron_ = dynamic_ptr_cast<tcBeamPtr>(born->hadrons()[1]->dataPtr()); if(showerQuark_->id()<0) { swap(qProgenitor_,qbProgenitor_); swap(showerQuark_,showerAntiquark_); swap(qHadron_,qbHadron_); } // In _our_ calculation we basically define the +z axis as being given // by the direction of the incoming quark for q+qb & q+g processes and // the incoming gluon for g+qbar processes. So now we might need to flip // the beams, bjorken x values, colliding partons accordingly: flipped_ = showerQuark_->momentum().z()<ZERO ? true : false; vector<unsigned int> cmap; // undecayed gauge bosons if(born->bornOutgoing().size()==2) { for(unsigned int ix=0;ix<born->bornOutgoing().size();++ix) { particlesToShower.push_back(born->bornOutgoing()[ix]); } V1_ = particlesToShower[2]; V2_ = particlesToShower[3]; } else if(born->bornOutgoing().size()==4) { // If the vector bosons have decayed already then we may want to // to get the children_ (and any associated photons) to correct // spin correlations: children_.clear(); map<ColinePtr,ColinePtr> clines; for(unsigned int ix=0;ix<born->bornOutgoing().size();++ix) { tPPtr original = born->bornOutgoing()[ix]; PPtr copy = original->dataPtr()->produceParticle(original->momentum()); children_.push_back(copy); cmap.push_back(ix); // sort out colour if(original->colourLine()) { map<ColinePtr,ColinePtr>::iterator cit = clines.find(original->colourLine()); if(cit!=clines.end()) { cit->second->addColoured(copy); } else { ColinePtr newline = new_ptr(ColourLine()); clines[original->colourLine()] = newline; newline->addColoured(copy); } } // and anticolour else if(original->antiColourLine()) { map<ColinePtr,ColinePtr>::iterator cit = clines.find(original->antiColourLine()); if(cit!=clines.end()) { cit->second->addAntiColoured(copy); } else { ColinePtr newline = new_ptr(ColourLine()); clines[original->antiColourLine()] = newline; newline->addAntiColoured(copy); } } } assert(children_.size()==4); PPtr V[2]; for(unsigned int ix=0;ix<2;++ix) { int charge = children_[0+2*ix]->dataPtr()->iCharge()+ children_[1+2*ix]->dataPtr()->iCharge(); Lorentz5Momentum psum =children_[0+2*ix]->momentum()+ children_[1+2*ix]->momentum(); psum.rescaleMass(); if(charge==-3) V[ix] = getParticleData(ParticleID::Wminus)->produceParticle(psum); else if(charge==0) V[ix] = getParticleData(ParticleID::Z0)->produceParticle(psum); else if(charge==3) V[ix] = getParticleData(ParticleID::Wplus)->produceParticle(psum); else assert(false); } V1_ = V[0]; V2_ = V[1]; if(children_[0]->id()<0) { swap(children_[0],children_[1]); swap(cmap[0],cmap[1]); } if(children_[2]->id()<0) { swap(children_[2],children_[3]); swap(cmap[2],cmap[3]); } } else assert(false); gluon_ = getParticleData(ParticleID::g)->produceParticle(); // Abort the run if V1_ and V2_ are not just pointers to different gauge bosons if(!V1_||!V2_) throw Exception() << "MEPP2VVPowheg::generateHardest()\n" << "one or both of the gauge boson pointers is null." << Exception::abortnow; if(!(abs(V1_->id())==24||V1_->id()==23)||!(abs(V2_->id())==24||V2_->id()==23)) throw Exception() << "MEPP2VVPowheg::generateHardest()\nmisidentified gauge bosons" << "V1_ = " << V1_->PDGName() << "\n" << "V2_ = " << V2_->PDGName() << "\n" << Exception::abortnow; // Order the gauge bosons in the same way as in the NLO calculation // (the same way as in the NLO matrix element): // W+(->e+,nu_e) W-(->e-,nu_ebar) (MCFM: 61 [nproc]) bool order = false; if((V1_->id()==-24&&V2_->id()== 24) || // W+/-(mu+,nu_mu / mu-,nu_mubar) Z(nu_e,nu_ebar) (MCFM: 72+77 [nproc]) (V1_->id()== 23&&abs(V2_->id())== 24) ) { swap(V1_,V2_); order = true; swap(cmap[0],cmap[2]); swap(cmap[1],cmap[3]); swap(children_[0],children_[2]); swap(children_[1],children_[3]); } // *** N.B. *** // We should not have to do a swap in the ZZ case, even if the different // (off-shell) masses of the Z's are taken into account by generating // the Born variables using the WZ LO/NLO matrix element(s), because // those transformed matrix elements are, according to mathematica, // symmetric in the momenta (and therefore also the masses) of the 2 Z's. // Now we want to construct a bornVVKinematics object. The // constructor for that needs all 4 momenta, q, qbar, V1_, V2_ // in that order, as well as the Bjorken xq and xqbar. // Get the momenta first: vector<Lorentz5Momentum> theBornMomenta; theBornMomenta.push_back(showerQuark_->momentum()); theBornMomenta.push_back(showerAntiquark_->momentum()); theBornMomenta.push_back(V1_->momentum()); theBornMomenta.push_back(V2_->momentum()); // N.B. if the showerQuark_ travels in the -z direction the born kinematics object // will detect this and rotate all particles by pi about the y axis! // Leading order momentum fractions: tcPPtr qHadron = generator()->currentEvent()->primaryCollision()->incoming().first; tcPPtr qbHadron = generator()->currentEvent()->primaryCollision()->incoming().second; assert(qHadron->children().size()>0&&qbHadron->children().size()>0); if(qHadron->children()[0]->id()<0) swap(qHadron,qbHadron); // quark and antiquark momentum fractions respectively double xa = showerQuark_ ->momentum().z()/qHadron ->momentum().z(); double xb = showerAntiquark_->momentum().z()/qbHadron->momentum().z(); // Create the object containing all 2->2 __kinematic__ information: B_ = bornVVKinematics(theBornMomenta,xa,xb); // lo_me_ is the colour & spin averaged n-body matrix element squared: lo_me_ = lo_me(true); // Attempt to generate some radiative variables and their kinematics: vector<Lorentz5Momentum> theRealMomenta; channel_ = 999; if(!getEvent(theRealMomenta,channel_)) { born->pT()[ShowerInteraction::QCD] = min_pT_; return born; } // Set the maximum pT for subsequent emissions: born->pT()[ShowerInteraction::QCD] = pT_ < min_pT_ ? min_pT_ : pT_; // Determine whether the quark or antiquark emitted: fermionNumberOfMother_=0; if((channel_==0&&theRealMomenta[0].z()/theRealMomenta[4].z()>=ZERO)|| channel_==2) fermionNumberOfMother_ = 1; else if((channel_==0&&theRealMomenta[0].z()/theRealMomenta[4].z()<ZERO)|| channel_==1) fermionNumberOfMother_ = -1; assert(fermionNumberOfMother_!=0); // If the quark in the original tree was travelling in the -z direction // then we need to unflip the event (flips are automatically carried out // when the original quark travels in the in -z direction when the // bornVVKinematics object is created): if(flipped_) for(unsigned int ix=0;ix<theRealMomenta.size();ix++) theRealMomenta[ix].rotateY(-Constants::pi); // Randomly rotate the event about the beam axis: double randomPhi(UseRandom::rnd()*2.*Constants::pi); for(unsigned int ix=0;ix<theRealMomenta.size();ix++) theRealMomenta[ix].rotateZ(randomPhi); // Warn if momentum conservation is not obeyed: Lorentz5Momentum inMinusOut(theRealMomenta[0]+theRealMomenta[1] -theRealMomenta[2]-theRealMomenta[3] -theRealMomenta[4]); if(inMinusOut.t()>0.1*GeV||inMinusOut.x()>0.1*GeV|| inMinusOut.y()>0.1*GeV||inMinusOut.z()>0.1*GeV) cout << "MEPP2VVPowheg::generateHardest\n" << "Momentum imbalance in V1 V2 rest frame\n" << "P_in minus P_out = " << inMinusOut/GeV << endl; // From the radiative kinematics we now have to form ShowerParticle objects: PPtr p1,p2,k; PPtr k1 = V1_->dataPtr()->produceParticle(theRealMomenta[2]); PPtr k2 = V2_->dataPtr()->produceParticle(theRealMomenta[3]); // q+qbar -> V1+V2+g if(channel_==0) { p1 = showerQuark_ ->dataPtr()->produceParticle(theRealMomenta[0]); p2 = showerAntiquark_->dataPtr()->produceParticle(theRealMomenta[1]); k = gluon_ ->dataPtr()->produceParticle(theRealMomenta[4]); k->incomingColour(p1); k->incomingColour(p2,true); } // q+g -> V1+V2+q else if(channel_==1) { p1 = showerQuark_ ->dataPtr() ->produceParticle(theRealMomenta[0]); p2 = gluon_ ->dataPtr() ->produceParticle(theRealMomenta[1]); k = showerAntiquark_->dataPtr()->CC()->produceParticle(theRealMomenta[4]); k->incomingColour(p2); p2->colourConnect(p1); } // g+qbar -> V1+V2+qbar else { p1 = gluon_ ->dataPtr() ->produceParticle(theRealMomenta[0]); p2 = showerAntiquark_->dataPtr() ->produceParticle(theRealMomenta[1]); k = showerQuark_ ->dataPtr()->CC()->produceParticle(theRealMomenta[4]); k->incomingColour(p1,true); p1->colourConnect(p2,true); } Lorentz5Momentum pmother,pspect; if(fermionNumberOfMother_==1) { pmother = theRealMomenta[0]-theRealMomenta[4]; pspect = theRealMomenta[1]; } else { pmother = theRealMomenta[1]-theRealMomenta[4]; pspect = theRealMomenta[0]; } unsigned int iemit = fermionNumberOfMother_==1 ? 0 : 1; unsigned int ispect = fermionNumberOfMother_==1 ? 1 : 0; // fill the output if(showerQuark_ !=born->bornIncoming()[0]) { born->incoming().push_back(p2); born->incoming().push_back(p1); swap(iemit,ispect); } else { born->incoming().push_back(p1); born->incoming().push_back(p2); } born->emitter (iemit); born->spectator(ispect); pair<double,double> xnew; for(unsigned int ix=0;ix<born->incoming().size();++ix) { double x = born->incoming()[ix]->momentum().rho()/born->hadrons()[ix]->momentum().rho(); if(ix==0) xnew.first = x; else if (ix==1) xnew.second = x; } born->x(xnew); born->interaction(ShowerInteraction::QCD); // if gauge bosons not decayed, we're done if(born->bornOutgoing().size()==2) { born->emitted(4); if(!order) { born->outgoing().push_back(k1); born->outgoing().push_back(k2); } else { born->outgoing().push_back(k2); born->outgoing().push_back(k1); } born->outgoing().push_back(k); return born; } // Recalculate the hard vertex for this event: // For spin correlations, if an emission occurs go calculate the relevant // combination of amplitudes for the ProductionMatrixElement. if(realMESpinCorrelations_) { // Here we reset the realVVKinematics n+1 momenta to be those // of the lab frame in order to calculate the spin correlations. // Note that these momenta are not used for anything else after // this. R_.p1r(theRealMomenta[0]); R_.p2r(theRealMomenta[1]); R_.k1r(theRealMomenta[2]); R_.k2r(theRealMomenta[3]); R_.kr (theRealMomenta[4]); if(channel_==0) t_u_M_R_qqb_hel_amp(R_,true); else if(channel_==1) t_u_M_R_qg_hel_amp (R_,true); else if(channel_==2) t_u_M_R_gqb_hel_amp(R_,true); recalculateVertex(); } born->emitted(6); for(unsigned int ix=0;ix<children_.size();++ix) born->outgoing().push_back(children_[cmap[ix]]); born->outgoing().push_back(k); // return the result return born; } double MEPP2VVPowheg::getResult(int channel, realVVKinematics R, Energy pT) { // This routine should return the integrand of the exact Sudakov form factor, // defined precisely as // KH 19th August - next 2 lines changed for phi in 0->pi not 0->2pi // \Delta(pT) = exp[ - \int_{pT}^{pTmax} dpT dYk d\phi/pi * getResult(...) ] // (Where phi is in 0->pi NOT 0->2*pi !) // Get pi for the prefactor: using Constants::pi; // Get the VV invariant mass^2: Energy2 p2 = B_.sb(); // Get the momentum fractions for the n+1 body event: double x1 = R.x1r(); double x2 = R.x2r(); // Reject the event if the x1 and x2 values are outside the phase space: if(x1<0.||x1>1.||x2<0.||x2>1.||x1*x2<p2/sqr(generator()->maximumCMEnergy())) return 0.; // Get the momentum fractions for the n body event: double x1b = B_.x1b(); double x2b = B_.x2b(); // Get the mandelstam variables needed to calculate the n+1 body matrix element: Energy2 s = R.sr() ; Energy2 t_u_MR_o_MB; double lo_lumi, nlo_lumi; // The luminosity function for the leading order n-body process: lo_lumi = qHadron_ ->pdf()->xfx(qHadron_ ,showerQuark_ ->dataPtr(),PDFScale_,x1b)* qbHadron_->pdf()->xfx(qbHadron_,showerAntiquark_->dataPtr(),PDFScale_,x2b); // Now we calculate the luminosity functions (product of the two pdfs) for the // real emission process(es) and also their matrix elements: // q + qbar -> V + V + g if(channel==0) { nlo_lumi = qHadron_ ->pdf()->xfx(qHadron_ ,showerQuark_ ->dataPtr() ,PDFScale_,x1) * qbHadron_->pdf()->xfx(qbHadron_,showerAntiquark_->dataPtr() ,PDFScale_,x2); t_u_MR_o_MB = t_u_M_R_qqb_hel_amp(R,false)/lo_me_; } // q + g -> V + V + q else if(channel==1) { nlo_lumi = qHadron_ ->pdf()->xfx(qHadron_ ,showerQuark_->dataPtr() ,PDFScale_,x1) * qbHadron_->pdf()->xfx(qbHadron_,getParticleData(ParticleID::g),PDFScale_,x2); t_u_MR_o_MB = t_u_M_R_qg_hel_amp(R,false)/lo_me_; } // g + qbar -> V + V + qbar else { nlo_lumi = qHadron_ ->pdf()->xfx(qHadron_ ,getParticleData(ParticleID::g),PDFScale_,x1) * qbHadron_->pdf()->xfx(qbHadron_,showerAntiquark_->dataPtr() ,PDFScale_,x2); t_u_MR_o_MB = t_u_M_R_gqb_hel_amp(R,false)/lo_me_; } // Multiply ratio of the real emission matrix elements to the Born matrix element // by the ratio of the pdfs for the real emission and born processes to get theWeight if(lo_lumi<=0.||nlo_lumi<=0.) return 0.; else return t_u_MR_o_MB * ( nlo_lumi/lo_lumi * p2/s ) * sqr(p2/s)/8./pi/pi / pT / p2 * GeV; } bool MEPP2VVPowheg::getEvent(vector<Lorentz5Momentum> & theRealMomenta, unsigned int & channel) { // Invariant mass of the colliding hadrons: Energy2 S = sqr(generator()->maximumCMEnergy()); // Born variables which are preserved (mass and rapidity of the diboson system): Energy2 p2 = B_.sb(); double Yb = B_.Yb(); // Born variables which are not preserved but are needed (the momentum fractions): double x1b(B_.x1b()), x2b(B_.x2b()); double x12b(x1b*x1b), x22b(x2b*x2b); // Maximum jet pT (half of the hadronic C.O.M. energy. N.B. this is overestimated a lot): Energy starting_pT = sqrt(S)/2.; // Initialize the pT_ *integration limit* i.e. the pT of the generated emission: pT_ = ZERO; // The pT *integration variable*: Energy pT; // The x_1 & x_2 momentum fractions corresponding to incoming momenta p1 & p2: double x1_(-999.), x2_(-999.); double x1 (-999.), x2 (-999.); // The jet rapidity *integration variable* and its limits: double Yk, minYk(-8.0), maxYk(8.0); // The theta2 integration variable (the azimuthal angle of the gluon w.r.t // V1 in the V1 & V2 rest frame: double theta2; // The realVVKinematics object corresponding to the current integration // set of integration variables: realVVKinematics R; // The veto algorithm rejection weight and a corresponding flag: double rejectionWeight; bool rejectEmission ; // Initialize the flag indicating the selected radiation channel: channel=999; // Some product of constants used for the crude distribution: double a(0.); for(int j=0;j<3;j++) { pT=starting_pT; a =(maxYk-minYk)*prefactor_[j]/2./b0_; do { // Generate next pT according to exp[- \int^{pTold}_{pT} dpT a*(power-1)/(pT^power)] // pT = GeV/pow( pow(GeV/pT,power_-1.) - log(UseRandom::rnd())/a // , 1./(power_-1.) ); // Generate next pT according to exp[- \int^{pTold}_{pT} dpT alpha1loop*prefactor/pT ] pT = LambdaQCD_*exp( 0.5*exp( log(log(sqr(pT/LambdaQCD_)))+log(UseRandom::rnd())/a ) ); // Generate rapidity of the jet: Yk = minYk + UseRandom::rnd()*(maxYk - minYk); // Generate the theta2 radiative variable: // KH 19th August - next line changed for phi in 0->pi not 0->2pi // theta2 = UseRandom::rnd() * 2.*Constants::pi; theta2 = UseRandom::rnd() * Constants::pi; // eT of the diboson system: Energy eT = sqrt(pT*pT+p2); // Calculate the eT and then solve for x_{\oplus} & x_{\ominus}: x1 = (pT*exp( Yk)+eT*exp( Yb))/sqrt(S); x2 = (pT*exp(-Yk)+eT*exp(-Yb))/sqrt(S); // Calculate the xr radiative variable: double xr(p2/(x1*x2*S)); // Then use this to calculate the y radiative variable: double y(-((xr+1.)/(xr-1.))*(xr*sqr(x1/x1b)-1.)/(xr*sqr(x1/x1b)+1.)); // The y value above should equal the one commented out below this line: // double y( ((xr+1.)/(xr-1.))*(xr*sqr(x2/x2b)-1.)/(xr*sqr(x2/x2b)+1.)); // Now we get the lower limit on the x integration, xbar: double omy(1.-y), opy(1.+y); double xbar1 = 2.*opy*x12b/(sqrt(sqr(1.+x12b)*sqr(omy)+16.*y*x12b)+omy*(1.-x1b)*(1.+x1b)); double xbar2 = 2.*omy*x22b/(sqrt(sqr(1.+x22b)*sqr(opy)-16.*y*x22b)+opy*(1.-x2b)*(1.+x2b)); double xbar = max(xbar1,xbar2); // Now we can calculate xtilde: double xt = (xr-xbar)/(1.-xbar); // Finally we can make the realVVKinematics object: R = realVVKinematics(B_,xt,y,theta2); // The next thing we have to do is set the QCD, EW and PDF scales using R: setTheScales(pT); // ... and so calculate rejection weight: rejectionWeight = getResult(j,R,pT); // If generating according to exp[- \int^{pTold}_{pT} dpT a*(power-1)/(pT^power)] // rejectionWeight/= showerAlphaS_->overestimateValue()*prefactor_[j]*pow(GeV/pT,power_); // If generating according to exp[- \int^{pTold}_{pT} dpT alpha1loop*prefactor/pT ] rejectionWeight/= 1./b0_/log(sqr(pT/LambdaQCD_))*prefactor_[j]*GeV/pT; rejectEmission = UseRandom::rnd()>rejectionWeight; // The event is a no-emission event if pT goes past min_pT_ - basically set to // outside the histogram bounds (hopefully histogram objects just ignore it then). if(pT<min_pT_) { pT=ZERO; rejectEmission = false; } if(rejectionWeight>1.) { ostringstream stream; stream << "MEPP2VVPowheg::getEvent weight for channel " << j << " is greater than one: " << rejectionWeight << endl; generator()->logWarning( Exception(stream.str(), Exception::warning) ); } } while(rejectEmission); // set pT of emission etc if(pT>pT_) { channel = j; pT_ = pT; Yk_ = Yk; R_ = R ; x1_ = x1; x2_ = x2; } } // Was this an (overall) no emission event? if(pT_<min_pT_) { pT_ = ZERO; channel = 3; } if(channel==3) return false; if(channel>3) throw Exception() << "MEPP2VVPowheg::getEvent() channel = " << channel << " pT = " << pT/GeV << " pT_ = " << pT_/GeV << Exception::abortnow; // Work out the momenta in the lab frame, reserving the mass and rapidity // of the VV system: LorentzRotation yzRotation; yzRotation.setRotateX(-atan2(pT_/GeV,sqrt(p2)/GeV)); LorentzRotation boostFrompTisZero; boostFrompTisZero.setBoostY(-pT_/sqrt(p2+pT_*pT_)); LorentzRotation boostFromYisZero; boostFromYisZero.setBoostZ(tanh(Yb)); theRealMomenta.resize(5); theRealMomenta[0] = Lorentz5Momentum(ZERO,ZERO, x1_*sqrt(S)/2., x1_*sqrt(S)/2.,ZERO); theRealMomenta[1] = Lorentz5Momentum(ZERO,ZERO,-x2_*sqrt(S)/2., x2_*sqrt(S)/2.,ZERO); theRealMomenta[2] = boostFromYisZero*boostFrompTisZero*yzRotation*(R_.k1r()); theRealMomenta[3] = boostFromYisZero*boostFrompTisZero*yzRotation*(R_.k2r()); theRealMomenta[4] = Lorentz5Momentum(ZERO, pT_, pT_*sinh(Yk_), pT_*cosh(Yk_),ZERO); return true; } void MEPP2VVPowheg::setTheScales(Energy pT) { // Work out the scales we want to use in the matrix elements and the pdfs: // Scale for alpha_S: pT^2 of the diboson system. QCDScale_ = max(pT*pT,sqr(min_pT_)); // Scale for real emission PDF: // pT^2+mVV^2 - as mcfm does in the case of a single W/Z boson). // Energy2 PDFScale_ = max(R.pT2_in_lab(),sqr(min_pT_))+R.s2r(); // pT^2 - as advocated by Nason & Ridolfi for ZZ production & Alioli et al for gg->h: PDFScale_ = max(pT*pT,sqr(min_pT_)); // Scale of electroweak vertices: mVV^2 the invariant mass of the diboson system. // EWScale_ = B_.sb(); // ... And this choice is more like what can be seen in mcatnlo_vbmain.f (weird). EWScale_ = 0.5*(B_.k12b()+B_.k22b()); return; } /***************************************************************************/ // This is identical to the code in the Powheg matrix element. It should // equal t_u_M_R_qqb in there, which is supposed to be the real emission ME // times tk*uk. Energy2 MEPP2VVPowheg::t_u_M_R_qqb_hel_amp(realVVKinematics R, bool getMatrix) const { using namespace ThePEG::Helicity; ProductionMatrixElement qqb_hel_amps(PDT::Spin1Half,PDT::Spin1Half, PDT::Spin1 ,PDT::Spin1 , PDT::Spin1); double sum_hel_amps_sqr(0.); tcPDPtr p1data(showerQuark_->dataPtr()); tcPDPtr p2data(showerAntiquark_->dataPtr()); tcPDPtr k1data(V1_->dataPtr()); tcPDPtr k2data(V2_->dataPtr()); tcPDPtr kdata(getParticleData(ParticleID::g)); if(k1data->id()==-24&&k2data->id()==24) swap(k1data,k2data); SpinorWaveFunction qSpinor(R.p1r(),p1data,incoming); SpinorBarWaveFunction qbSpinor(R.p2r(),p2data,incoming); vector<SpinorWaveFunction> q; vector<SpinorBarWaveFunction> qb; for(unsigned int ix=0;ix<2;ix++) { qSpinor.reset(ix); qbSpinor.reset(ix); q.push_back(qSpinor); qb.push_back(qbSpinor); } VectorWaveFunction v1Polarization(R.k1r(),k1data,outgoing); VectorWaveFunction v2Polarization(R.k2r(),k2data,outgoing); vector<VectorWaveFunction> v1; vector<VectorWaveFunction> v2; for(unsigned int ix=0;ix<3;ix++) { v1Polarization.reset(ix); v2Polarization.reset(ix); v1.push_back(v1Polarization); v2.push_back(v2Polarization); } VectorWaveFunction gPolarization(R.kr(),kdata,outgoing); vector<VectorWaveFunction> g; for(unsigned int ix=0;ix<3;ix+=2) { gPolarization.reset(ix); g.push_back(gPolarization); } AbstractFFVVertexPtr ffg = FFGvertex_; AbstractFFVVertexPtr ffv1 = k1data->id()==23 ? FFZvertex_ : FFWvertex_; AbstractFFVVertexPtr ffv2 = k2data->id()==23 ? FFZvertex_ : FFWvertex_; // Collecting information for intermediate fermions vector<tcPDPtr> tc; if(abs(k1data->id())==24&&abs(k2data->id())==24) { if(abs(p1data->id())%2==0) for(int ix=0;ix<3;++ix) tc.push_back(getParticleData(1+2*ix)); else for(int ix=0;ix<3;++ix) tc.push_back(getParticleData(2+2*ix)); } else if(k1data->id()==23&&k2data->id()==23) tc.push_back(p1data); else if(abs(k1data->id())==24&&k2data->id()==23) tc.push_back(p2data); // Loop over helicities summing the relevant diagrams for(unsigned int p1hel=0;p1hel<2;++p1hel) { for(unsigned int p2hel=0;p2hel<2;++p2hel) { for(unsigned int khel=0;khel<2;++khel) { SpinorWaveFunction p1_k = ffg->evaluate(QCDScale_,5,p1data,q[p1hel],g[khel]); SpinorBarWaveFunction p2_k = ffg->evaluate(QCDScale_,5,p2data,qb[p2hel],g[khel]); for(unsigned int k1hel=0;k1hel<3;++k1hel) { for(unsigned int k2hel=0;k2hel<3;++k2hel) { // If helicity is exactly conserved (massless quarks) skip if p1hel=p2hel // but if the production ME is required first fill it with (0.,0.). if((p1hel==p2hel)&&helicityConservation_) { if(getMatrix) { if(khel==0) qqb_hel_amps(p1hel,p2hel,k1hel,k2hel,0) = Complex(0.,0.); else qqb_hel_amps(p1hel,p2hel,k1hel,k2hel,2) = Complex(0.,0.); } continue; } vector<Complex> diagrams; // Get all t-channel diagram contributions tcPDPtr intermediate_t; for(unsigned int ix=0;ix<tc.size();ix++) { intermediate_t = (!(k1data->id()==24&&k2data->id()==-24)) ? p2data : tc[ix]; // Note: choosing 5 as the second argument ffvX_->evaluate() sets // option 5 in thepeg/Helicity/Vertex/VertexBase.cc, which makes // the (fermion) propagator denominator massless: 1/p^2. // If W+Z / W-Z calculate the two V+jet-like s-channel diagrams SpinorWaveFunction p1_v1 = ffv1->evaluate(EWScale_,5,intermediate_t,q[p1hel],v1[k1hel]); SpinorBarWaveFunction p2_v2 = ffv2->evaluate(EWScale_,5,intermediate_t,qb[p2hel],v2[k2hel]); // First calculate all the off-shell fermion currents // Now calculate the 6 t-channel diagrams // q+qb->g+v1+v2, q+qb->v1+g+v2, q+qb->v1+v2+g if(!((k1data->id()==24&&k2data->id()==-24)&&(abs(p1data->id())%2==1))) { diagrams.push_back(ffv1->evaluate(EWScale_,p1_k,p2_v2,v1[k1hel])); diagrams.push_back(ffg->evaluate(QCDScale_,p1_v1,p2_v2,g[khel])); diagrams.push_back(ffv2->evaluate(EWScale_,p1_v1,p2_k,v2[k2hel])); } intermediate_t = (!(k1data->id()==24&&k2data->id()==-24)) ? p1data : tc[ix]; SpinorWaveFunction p1_v2 = ffv2->evaluate(EWScale_,5,intermediate_t,q[p1hel],v2[k2hel]); SpinorBarWaveFunction p2_v1 = ffv1->evaluate(EWScale_,5,intermediate_t,qb[p2hel],v1[k1hel]); // q+qb->g+v2+v1, q+qb->v2+g+v1, q+qb->v2+v1+g if(!((k1data->id()==24&&k2data->id()==-24)&&(abs(p1data->id())%2==0))) { diagrams.push_back(ffv2->evaluate(EWScale_,p1_k,p2_v1,v2[k2hel])); diagrams.push_back(ffg->evaluate(QCDScale_,p1_v2,p2_v1,g[khel])); diagrams.push_back(ffv1->evaluate(EWScale_,p1_v2,p2_k,v1[k1hel])); } } // Note: choosing 3 as the second argument in WWWvertex_->evaluate() // sets option 3 in thepeg/Helicity/Vertex/VertexBase.cc , which // means the propagator does not contain a width factor (which is // good re. gauge invariance). // If W+Z / W-Z calculate the two V+jet-like s-channel diagrams if(abs(k1data->id())==24&&k2data->id()==23) { // The off-shell s-channel boson current VectorWaveFunction k1_k2 = WWWvertex_->evaluate(EWScale_,3,k1data->CC(),v2[k2hel],v1[k1hel]); // q+qb->g+v1*->g+v1+v2, q+qb->v1*+g->v1+v2+g diagrams.push_back(ffv1->evaluate(EWScale_,p1_k,qb[p2hel],k1_k2)); diagrams.push_back(ffv1->evaluate(EWScale_,q[p1hel],p2_k,k1_k2)); } // If W+W- calculate the four V+jet-like s-channel diagrams if((k1data->id()==24&&k2data->id()==-24)&&(p1data->id()==-p2data->id())) { // The off-shell s-channel boson current VectorWaveFunction k1_k2; // q+qb->g+Z0*->g+v1+v2,q+qb->Z0*+g->v1+v2+g, tcPDPtr Z0 = getParticleData(ParticleID::Z0); k1_k2 = WWWvertex_->evaluate(EWScale_,3,Z0,v2[k2hel],v1[k1hel]); diagrams.push_back(FFZvertex_->evaluate(EWScale_,p1_k,qb[p2hel],k1_k2)); diagrams.push_back(FFZvertex_->evaluate(EWScale_,q[p1hel],p2_k,k1_k2)); // q+qb->g+gamma*->g+v1+v2,q+qb->gamma*+g->v1+v2+g, tcPDPtr gamma = getParticleData(ParticleID::gamma); k1_k2 = WWWvertex_->evaluate(EWScale_,3,gamma,v2[k2hel],v1[k1hel]); diagrams.push_back(FFPvertex_->evaluate(EWScale_,p1_k,qb[p2hel],k1_k2)); diagrams.push_back(FFPvertex_->evaluate(EWScale_,q[p1hel],p2_k,k1_k2)); } // Add up all diagrams to get the total amplitude: Complex hel_amp(0.); for(unsigned int ix=0;ix<diagrams.size();ix++) hel_amp += diagrams[ix]; // If we need to fill the production ME we do it here: if(getMatrix) { if(khel==0) qqb_hel_amps(p1hel,p2hel,k1hel,k2hel,0) = hel_amp; else qqb_hel_amps(p1hel,p2hel,k1hel,k2hel,2) = hel_amp; } sum_hel_amps_sqr += norm(hel_amp); } } } } } // Fill up the remaining bits of the production ME, corresponding // to longitudinal gluon polarization, with (0.,0.). if(getMatrix) { for(unsigned int p1hel=0;p1hel<2;++p1hel) { for(unsigned int p2hel=0;p2hel<2;++p2hel) { for(unsigned int k1hel=0;k1hel<3;++k1hel) { for(unsigned int k2hel=0;k2hel<3;++k2hel) { qqb_hel_amps(p1hel,p2hel,k1hel,k2hel,1) = Complex(0.,0.); } } } } } // Calculate the production density matrix: if(getMatrix) { for(unsigned int k1hel=0;k1hel<3;++k1hel) { for(unsigned int k1helpr=0;k1helpr<3;++k1helpr) { for(unsigned int k2hel=0;k2hel<3;++k2hel) { for(unsigned int k2helpr=0;k2helpr<3;++k2helpr) { Complex theElement(0.,0.); // For each k1hel, k1helpr, k2hel, k2helpr sum over fermion and gluon spins... for(unsigned int p1hel=0;p1hel<2;++p1hel) { for(unsigned int p2hel=0;p2hel<2;++p2hel) { for(unsigned int khel=0;khel<3;khel+=2) { theElement += qqb_hel_amps(p1hel,p2hel,k1hel ,k2hel ,khel) *conj(qqb_hel_amps(p1hel,p2hel,k1helpr,k2helpr,khel)); } } } // ...and then set the production matrix element to the sum: productionMatrix_[k1hel][k1helpr][k2hel][k2helpr] = theElement; } } } } } // Spin and colour averaging factors = 1/4 * CF * 1/3 = 1/9 sum_hel_amps_sqr /= 9.; // Symmetry factor for identical Z bosons in the final state if(k1data->id()==23&&k2data->id()==23) sum_hel_amps_sqr /= 2.; return sum_hel_amps_sqr*R.tkr()*R.ukr()*UnitRemoval::InvE2; } /***************************************************************************/ // This is identical to the code in the Powheg matrix element. It should // equal t_u_M_R_qg in there, which is supposed to be the real emission ME // times tk*uk. Energy2 MEPP2VVPowheg::t_u_M_R_qg_hel_amp(realVVKinematics R, bool getMatrix) const { using namespace ThePEG::Helicity; ProductionMatrixElement qg_hel_amps(PDT::Spin1Half,PDT::Spin1, PDT::Spin1,PDT::Spin1, PDT::Spin1Half); double sum_hel_amps_sqr(0.); tcPDPtr p1data(showerQuark_->dataPtr()); tcPDPtr p2data(getParticleData(ParticleID::g)); tcPDPtr k1data(V1_->dataPtr()); tcPDPtr k2data(V2_->dataPtr()); tcPDPtr kdata (showerAntiquark_->dataPtr()->CC()); if(k1data->id()==-24&&k2data->id()==24) swap(k1data,k2data); SpinorWaveFunction qinSpinor(R.p1r(),p1data,incoming); SpinorBarWaveFunction qoutSpinor(R.kr(),kdata,outgoing); vector<SpinorWaveFunction> qin; vector<SpinorBarWaveFunction> qout; for(unsigned int ix=0;ix<2;ix++) { qinSpinor.reset(ix); qoutSpinor.reset(ix); qin.push_back(qinSpinor); qout.push_back(qoutSpinor); } VectorWaveFunction v1Polarization(R.k1r(),k1data,outgoing); VectorWaveFunction v2Polarization(R.k2r(),k2data,outgoing); vector<VectorWaveFunction> v1; vector<VectorWaveFunction> v2; for(unsigned int ix=0;ix<3;ix++) { v1Polarization.reset(ix); v2Polarization.reset(ix); v1.push_back(v1Polarization); v2.push_back(v2Polarization); } VectorWaveFunction gPolarization(R.p2r(),p2data,incoming); vector<VectorWaveFunction> g; for(unsigned int ix=0;ix<3;ix+=2) { gPolarization.reset(ix); g.push_back(gPolarization); } AbstractFFVVertexPtr ffg = FFGvertex_; AbstractFFVVertexPtr ffv1 = k1data->id()==23 ? FFZvertex_ : FFWvertex_; AbstractFFVVertexPtr ffv2 = k2data->id()==23 ? FFZvertex_ : FFWvertex_; // Collecting information for intermediate fermions vector<tcPDPtr> tc; if(abs(k1data->id())==24&&abs(k2data->id())==24) { if(abs(p1data->id())%2==0) for(int ix=0;ix<3;++ix) tc.push_back(getParticleData(1+2*ix)); else for(int ix=0;ix<3;++ix) tc.push_back(getParticleData(2+2*ix)); } else if(k1data->id()==23&&k2data->id()==23) tc.push_back(p1data); else if(abs(k1data->id())==24&&k2data->id()==23) tc.push_back(kdata->CC()); // Loop over helicities summing the relevant diagrams for(unsigned int p1hel=0;p1hel<2;++p1hel) { for(unsigned int p2hel=0;p2hel<2;++p2hel) { for(unsigned int khel=0;khel<2;++khel) { SpinorWaveFunction p1_p2 = ffg->evaluate(QCDScale_,5,p1data,qin[p1hel],g[p2hel]); SpinorBarWaveFunction p2_k = ffg->evaluate(QCDScale_,5,kdata->CC(),qout[khel],g[p2hel]); for(unsigned int k1hel=0;k1hel<3;++k1hel) { for(unsigned int k2hel=0;k2hel<3;++k2hel) { // If helicity is exactly conserved (massless quarks) skip if p1hel!=khel // but if the production ME is required first fill it with (0.,0.). if((p1hel!=khel)&&helicityConservation_) { if(getMatrix) { if(p2hel==0) qg_hel_amps(p1hel,0,k1hel,k2hel,khel) = Complex(0.,0.); else qg_hel_amps(p1hel,2,k1hel,k2hel,khel) = Complex(0.,0.); } continue; } vector<Complex> diagrams; // Get all t-channel diagram contributions tcPDPtr intermediate_q; for(unsigned int ix=0;ix<tc.size();ix++) { intermediate_q = (!(k1data->id()==24&&k2data->id()==-24)) ? showerAntiquark_->dataPtr() : tc[ix]; SpinorWaveFunction p1_v1 = ffv1->evaluate(EWScale_,5,intermediate_q,qin[p1hel],v1[k1hel]); SpinorBarWaveFunction k_v2 = ffv2->evaluate(EWScale_,5,intermediate_q,qout[khel],v2[k2hel]); // First calculate all the off-shell fermion currents // Now calculate the 6 abelian diagrams // q+g->v1+v2+q with 2 t-channel propagators, 1 s- and 1 t-channel and 2 t-channel ones. if(!((k1data->id()==24&&k2data->id()==-24)&&(abs(p1data->id())%2==1))) { diagrams.push_back(ffv2->evaluate(EWScale_,p1_v1,p2_k,v2[k2hel])); diagrams.push_back(ffg->evaluate(QCDScale_,p1_v1,k_v2,g[p2hel])); diagrams.push_back(ffv1->evaluate(EWScale_,p1_p2,k_v2,v1[k1hel])); } intermediate_q = (!(k1data->id()==24&&k2data->id()==-24)) ? p1data : tc[ix]; SpinorWaveFunction p1_v2 = ffv2->evaluate(EWScale_,5,intermediate_q,qin[p1hel],v2[k2hel]); SpinorBarWaveFunction k_v1 = ffv1->evaluate(EWScale_,5,intermediate_q,qout[khel],v1[k1hel]); // q+g->v2+v1+q, with 2 t-channel propagators, 1 s- and 1 t-channel and 2 t-channel ones. if(!((k1data->id()==24&&k2data->id()==-24)&&(abs(p1data->id())%2==0))) { diagrams.push_back(ffv1->evaluate(EWScale_,p1_v2,p2_k,v1[k1hel])); diagrams.push_back(ffg->evaluate(QCDScale_,p1_v2,k_v1,g[p2hel])); diagrams.push_back(ffv2->evaluate(EWScale_,p1_p2,k_v1,v2[k2hel])); } } // Note: choosing 3 as the second argument in WWWvertex_->evaluate() // sets option 3 in thepeg/Helicity/Vertex/VertexBase.cc , which // means the propagator does not contain a width factor (which is // good re. gauge invariance). // If W+Z / W-Z calculate the two V+jet-like s-channel diagrams if(abs(k1data->id())==24&&k2data->id()==23) { // The off-shell s-channel boson current VectorWaveFunction k1_k2 = WWWvertex_->evaluate(EWScale_,3,k1data->CC(),v2[k2hel],v1[k1hel]); // q+qb->g+v1*->g+v1+v2, q+qb->v1*+g->v1+v2+g diagrams.push_back(ffv1->evaluate(EWScale_,p1_p2,qout[khel],k1_k2)); diagrams.push_back(ffv1->evaluate(EWScale_,qin[p1hel],p2_k,k1_k2)); } // If W+W- calculate the four V+jet-like s-channel diagrams if((k1data->id()==24&&k2data->id()==-24)&&(p1data->id()==kdata->id())) { // The off-shell s-channel boson current VectorWaveFunction k1_k2; // q+qb->g+Z0*->g+v1+v2,q+qb->Z0*+g->v1+v2+g, tcPDPtr Z0 = getParticleData(ParticleID::Z0); k1_k2 = WWWvertex_->evaluate(EWScale_,3,Z0,v2[k2hel],v1[k1hel]); diagrams.push_back(FFZvertex_->evaluate(EWScale_,p1_p2,qout[khel],k1_k2)); diagrams.push_back(FFZvertex_->evaluate(EWScale_,qin[p1hel],p2_k,k1_k2)); // q+qb->g+gamma*->g+v1+v2,q+qb->gamma*+g->v1+v2+g, tcPDPtr gamma = getParticleData(ParticleID::gamma); k1_k2 = WWWvertex_->evaluate(EWScale_,3,gamma,v2[k2hel],v1[k1hel]); diagrams.push_back(FFPvertex_->evaluate(EWScale_,p1_p2,qout[khel],k1_k2)); diagrams.push_back(FFPvertex_->evaluate(EWScale_,qin[p1hel],p2_k,k1_k2)); } // Add up all diagrams to get the total amplitude: Complex hel_amp(0.); for(unsigned int ix=0;ix<diagrams.size();ix++) hel_amp += diagrams[ix]; // If we need to fill the production ME we do it here: if(getMatrix) { if(p2hel==0) qg_hel_amps(p1hel,0,k1hel,k2hel,khel) = hel_amp; else qg_hel_amps(p1hel,2,k1hel,k2hel,khel) = hel_amp; } sum_hel_amps_sqr += norm(hel_amp); } } } } } // Fill up the remaining bits of the production ME, corresponding // to longitudinal gluon polarization, with (0.,0.). if(getMatrix) { for(unsigned int p1hel=0;p1hel<2;++p1hel) { for(unsigned int k1hel=0;k1hel<3;++k1hel) { for(unsigned int k2hel=0;k2hel<3;++k2hel) { for(unsigned int khel=0;khel<2;++khel) { qg_hel_amps(p1hel,1,k1hel,k2hel,khel) = Complex(0.,0.); } } } } } // Calculate the production density matrix: if(getMatrix) { for(unsigned int k1hel=0;k1hel<3;++k1hel) { for(unsigned int k1helpr=0;k1helpr<3;++k1helpr) { for(unsigned int k2hel=0;k2hel<3;++k2hel) { for(unsigned int k2helpr=0;k2helpr<3;++k2helpr) { Complex theElement(0.,0.); // For each k1hel, k1helpr, k2hel, k2helpr sum over fermion and gluon spins... for(unsigned int p1hel=0;p1hel<2;++p1hel) { for(unsigned int p2hel=0;p2hel<3;p2hel+=2) { for(unsigned int khel=0;khel<2;++khel) { theElement += qg_hel_amps(p1hel,p2hel,k1hel ,k2hel ,khel) *conj(qg_hel_amps(p1hel,p2hel,k1helpr,k2helpr,khel)); } } } // ...and then set the production matrix element to the sum: productionMatrix_[k1hel][k1helpr][k2hel][k2helpr] = theElement; } } } } } // Spin and colour averaging factors = 1/4 * TR * 1/3 = 1/24 sum_hel_amps_sqr /= 24.; // Symmetry factor for identical Z bosons in the final state if(k1data->id()==23&&k2data->id()==23) sum_hel_amps_sqr /= 2.; return sum_hel_amps_sqr*R.tkr()*R.ukr()*UnitRemoval::InvE2; } /***************************************************************************/ // This is identical to the code in the Powheg matrix element. It should // equal t_u_M_R_gqb in there, which is supposed to be the real emission ME // times tk*uk. Energy2 MEPP2VVPowheg::t_u_M_R_gqb_hel_amp(realVVKinematics R, bool getMatrix) const { using namespace ThePEG::Helicity; ProductionMatrixElement gqb_hel_amps(PDT::Spin1,PDT::Spin1Half, PDT::Spin1,PDT::Spin1, PDT::Spin1Half); double sum_hel_amps_sqr(0.); tcPDPtr p1data(getParticleData(ParticleID::g)); tcPDPtr p2data(showerAntiquark_->dataPtr()); tcPDPtr k1data(V1_->dataPtr()); tcPDPtr k2data(V2_->dataPtr()); tcPDPtr kdata (showerQuark_->dataPtr()->CC()); if(k1data->id()==-24&&k2data->id()==24) swap(k1data,k2data); SpinorBarWaveFunction qbinSpinor(R.p2r(),p2data,incoming); SpinorWaveFunction qboutSpinor(R.kr(),kdata,outgoing); vector<SpinorBarWaveFunction> qbin; vector<SpinorWaveFunction> qbout; for(unsigned int ix=0;ix<2;ix++) { qbinSpinor.reset(ix); qboutSpinor.reset(ix); qbin.push_back(qbinSpinor); qbout.push_back(qboutSpinor); } VectorWaveFunction v1Polarization(R.k1r(),k1data,outgoing); VectorWaveFunction v2Polarization(R.k2r(),k2data,outgoing); vector<VectorWaveFunction> v1; vector<VectorWaveFunction> v2; for(unsigned int ix=0;ix<3;ix++) { v1Polarization.reset(ix); v2Polarization.reset(ix); v1.push_back(v1Polarization); v2.push_back(v2Polarization); } VectorWaveFunction gPolarization(R.p1r(),p1data,incoming); vector<VectorWaveFunction> g; for(unsigned int ix=0;ix<3;ix+=2) { gPolarization.reset(ix); g.push_back(gPolarization); } AbstractFFVVertexPtr ffg = FFGvertex_; AbstractFFVVertexPtr ffv1 = k1data->id()==23 ? FFZvertex_ : FFWvertex_; AbstractFFVVertexPtr ffv2 = k2data->id()==23 ? FFZvertex_ : FFWvertex_; // Collecting information for intermediate fermions vector<tcPDPtr> tc; if(abs(k1data->id())==24&&abs(k2data->id())==24) { if(abs(p2data->id())%2==0) for(int ix=0;ix<3;++ix) tc.push_back(getParticleData(1+2*ix)); else for(int ix=0;ix<3;++ix) tc.push_back(getParticleData(2+2*ix)); } else if(k1data->id()==23&&k2data->id()==23) tc.push_back(p2data); else if(abs(k1data->id())==24&&k2data->id()==23) tc.push_back(kdata->CC()); // Loop over helicities summing the relevant diagrams for(unsigned int p1hel=0;p1hel<2;++p1hel) { for(unsigned int p2hel=0;p2hel<2;++p2hel) { for(unsigned int khel=0;khel<2;++khel) { SpinorBarWaveFunction p1_p2 = ffg->evaluate(QCDScale_,5,p2data,qbin[p2hel],g[p1hel]); SpinorWaveFunction p1_k = ffg->evaluate(QCDScale_,5,kdata->CC(),qbout[khel],g[p1hel]); for(unsigned int k1hel=0;k1hel<3;++k1hel) { for(unsigned int k2hel=0;k2hel<3;++k2hel) { // If helicity is exactly conserved (massless quarks) skip if p2hel!=khel // but if the production ME is required first fill it with (0.,0.). if((p2hel!=khel)&&helicityConservation_) { if(getMatrix) { if(p1hel==0) gqb_hel_amps(0,p2hel,k1hel,k2hel,khel) = Complex(0.,0.); else gqb_hel_amps(2,p2hel,k1hel,k2hel,khel) = Complex(0.,0.); } continue; } vector<Complex> diagrams; // Get all t-channel diagram contributions tcPDPtr intermediate_q; for(unsigned int ix=0;ix<tc.size();ix++) { intermediate_q = (!(k1data->id()==24&&k2data->id()==-24)) ? showerQuark_->dataPtr() : tc[ix]; SpinorBarWaveFunction p2_v1 = ffv1->evaluate(EWScale_,5,intermediate_q,qbin[p2hel],v1[k1hel]); SpinorWaveFunction k_v2 = ffv2->evaluate(EWScale_,5,intermediate_q,qbout[khel],v2[k2hel]); // First calculate all the off-shell fermion currents // Now calculate the 6 abelian diagrams q+g->v1+v2+q // with 2 t-channel propagators, 1 s- and 1 t-channel // and 2 t-channel ones. if(!((k1data->id()==24&&k2data->id()==-24)&&(abs(p2data->id())%2==0))) { diagrams.push_back(ffv2->evaluate(EWScale_,p1_k,p2_v1,v2[k2hel])); diagrams.push_back(ffg->evaluate(QCDScale_,k_v2,p2_v1,g[p1hel])); diagrams.push_back(ffv1->evaluate(EWScale_,k_v2,p1_p2,v1[k1hel])); } intermediate_q = (!(k1data->id()==24&&k2data->id()==-24)) ? p2data : tc[ix]; SpinorBarWaveFunction p2_v2 = ffv2->evaluate(EWScale_,5,intermediate_q,qbin[p2hel],v2[k2hel]); SpinorWaveFunction k_v1 = ffv1->evaluate(EWScale_,5,intermediate_q,qbout[khel],v1[k1hel]); // q+g->v2+v1+q, with 2 t-channel propagators, 1 s- and 1 t-channel and 2 t-channel ones. if(!((k1data->id()==24&&k2data->id()==-24)&&(abs(p2data->id())%2==1))) { diagrams.push_back(ffv1->evaluate(EWScale_,p1_k,p2_v2,v1[k1hel])); diagrams.push_back(ffg->evaluate(QCDScale_,k_v1,p2_v2,g[p1hel])); diagrams.push_back(ffv2->evaluate(EWScale_,k_v1,p1_p2,v2[k2hel])); } } // Note: choosing 3 as the second argument in WWWvertex_->evaluate() // sets option 3 in thepeg/Helicity/Vertex/VertexBase.cc , which // means the propagator does not contain a width factor (which is // good re. gauge invariance). // If W+Z / W-Z calculate the two V+jet-like s-channel diagrams if(abs(k1data->id())==24&&k2data->id()==23) { // The off-shell s-channel boson current VectorWaveFunction k1_k2 = WWWvertex_->evaluate(EWScale_,3,k1data->CC(),v2[k2hel],v1[k1hel]); // q+qb->g+v1*->g+v1+v2, q+qb->v1*+g->v1+v2+g diagrams.push_back(ffv1->evaluate(EWScale_,qbout[khel],p1_p2,k1_k2)); diagrams.push_back(ffv1->evaluate(EWScale_,p1_k,qbin[p2hel],k1_k2)); } // If W+W- calculate the four V+jet-like s-channel diagrams if((k1data->id()==24&&k2data->id()==-24)&&(p2data->id()==kdata->id())) { // The off-shell s-channel boson current VectorWaveFunction k1_k2; // q+qb->g+Z0*->g+v1+v2,q+qb->Z0*+g->v1+v2+g, tcPDPtr Z0 = getParticleData(ParticleID::Z0); k1_k2 = WWWvertex_->evaluate(EWScale_,3,Z0,v2[k2hel],v1[k1hel]); diagrams.push_back(FFZvertex_->evaluate(EWScale_,qbout[khel],p1_p2,k1_k2)); diagrams.push_back(FFZvertex_->evaluate(EWScale_,p1_k,qbin[p2hel],k1_k2)); // q+qb->g+gamma*->g+v1+v2,q+qb->gamma*+g->v1+v2+g, tcPDPtr gamma = getParticleData(ParticleID::gamma); k1_k2 = WWWvertex_->evaluate(EWScale_,3,gamma,v2[k2hel],v1[k1hel]); diagrams.push_back(FFPvertex_->evaluate(EWScale_,qbout[khel],p1_p2,k1_k2)); diagrams.push_back(FFPvertex_->evaluate(EWScale_,p1_k,qbin[p2hel],k1_k2)); } // Add up all diagrams to get the total amplitude: Complex hel_amp(0.); for(unsigned int ix=0;ix<diagrams.size();ix++) hel_amp += diagrams[ix]; // If we need to fill the production ME we do it here: if(getMatrix) { if(p1hel==0) gqb_hel_amps(0,p2hel,k1hel,k2hel,khel) = hel_amp; else gqb_hel_amps(2,p2hel,k1hel,k2hel,khel) = hel_amp; } sum_hel_amps_sqr += norm(hel_amp); } } } } } // Fill up the remaining bits of the production ME, corresponding // to longitudinal gluon polarization, with (0.,0.). if(getMatrix) { for(unsigned int p2hel=0;p2hel<2;++p2hel) { for(unsigned int k1hel=0;k1hel<3;++k1hel) { for(unsigned int k2hel=0;k2hel<3;++k2hel) { for(unsigned int khel=0;khel<2;++khel) { gqb_hel_amps(1,p2hel,k1hel,k2hel,khel) = Complex(0.,0.); } } } } } // Calculate the production density matrix: if(getMatrix) { for(unsigned int k1hel=0;k1hel<3;++k1hel) { for(unsigned int k1helpr=0;k1helpr<3;++k1helpr) { for(unsigned int k2hel=0;k2hel<3;++k2hel) { for(unsigned int k2helpr=0;k2helpr<3;++k2helpr) { Complex theElement(0.,0.); // For each k1hel, k1helpr, k2hel, k2helpr sum over fermion and gluon spins... for(unsigned int p1hel=0;p1hel<3;p1hel+=2) { for(unsigned int p2hel=0;p2hel<2;++p2hel) { for(unsigned int khel=0;khel<2;++khel) { theElement += gqb_hel_amps(p1hel,p2hel,k1hel ,k2hel ,khel) *conj(gqb_hel_amps(p1hel,p2hel,k1helpr,k2helpr,khel)); } } } // ...and then set the production matrix element to the sum: productionMatrix_[k1hel][k1helpr][k2hel][k2helpr] = theElement; } } } } } // Spin and colour averaging factors = 1/4 * TR * 1/3 = 1/24 sum_hel_amps_sqr /= 24.; // Symmetry factor for identical Z bosons in the final state if(k1data->id()==23&&k2data->id()==23) sum_hel_amps_sqr /= 2.; return sum_hel_amps_sqr*R.tkr()*R.ukr()*UnitRemoval::InvE2; } /***************************************************************************/ // This returns exactly the same value as lo_me2_ when you put it in MEPP2VVPowheg.cc double MEPP2VVPowheg::lo_me(bool getMatrix) const { using namespace ThePEG::Helicity; ProductionMatrixElement lo_hel_amps(PDT::Spin1Half,PDT::Spin1Half, PDT::Spin1 ,PDT::Spin1); double sum_hel_amps_sqr(0.); tcPDPtr p1data(showerQuark_->dataPtr()); tcPDPtr p2data(showerAntiquark_->dataPtr()); tcPDPtr k1data(V1_->dataPtr()); tcPDPtr k2data(V2_->dataPtr()); if(k1data->id()==-24&&k2data->id()==24) swap(k1data,k2data); // Should never actually occur. // If you want to reproduce the spin correlations of MEPP2VV // you should evaluate this ME using the lab frame momenta // instead of the bornVVKinematics ones (partonic C.O.M. frame). SpinorWaveFunction qSpinor; SpinorBarWaveFunction qbSpinor; if(!getMatrix) { qSpinor=SpinorWaveFunction(B_.p1b(),p1data,incoming); qbSpinor=SpinorBarWaveFunction(B_.p2b(),p2data,incoming); } else { qSpinor=SpinorWaveFunction(showerQuark_->momentum(),p1data,incoming); qbSpinor=SpinorBarWaveFunction(showerAntiquark_->momentum(),p2data,incoming); } vector<SpinorWaveFunction> q; vector<SpinorBarWaveFunction> qb; for(unsigned int ix=0;ix<2;ix++) { qSpinor.reset(ix); qbSpinor.reset(ix); q.push_back(qSpinor); qb.push_back(qbSpinor); } // If you want to reproduce the spin correlations of MEPP2VV // you should evaluate this ME using the lab frame momenta // instead of the bornVVKinematics ones (partonic C.O.M. frame). VectorWaveFunction v1Polarization; VectorWaveFunction v2Polarization; if(!getMatrix) { v1Polarization=VectorWaveFunction(B_.k1b(),k1data,outgoing); v2Polarization=VectorWaveFunction(B_.k2b(),k2data,outgoing); } else { v1Polarization=VectorWaveFunction(V1_->momentum(),k1data,outgoing); v2Polarization=VectorWaveFunction(V2_->momentum(),k2data,outgoing); } vector<VectorWaveFunction> v1; vector<VectorWaveFunction> v2; for(unsigned int ix=0;ix<3;ix++) { v1Polarization.reset(ix); v2Polarization.reset(ix); v1.push_back(v1Polarization); v2.push_back(v2Polarization); } AbstractFFVVertexPtr ffv1 = k1data->id()==23 ? FFZvertex_ : FFWvertex_; AbstractFFVVertexPtr ffv2 = k2data->id()==23 ? FFZvertex_ : FFWvertex_; // Collecting information for intermediate fermions vector<tcPDPtr> tc; if(abs(k1data->id())==24&&abs(k2data->id())==24) { if(abs(p1data->id())%2==0) for(int ix=0;ix<3;++ix) tc.push_back(getParticleData(1+2*ix)); else for(int ix=0;ix<3;++ix) tc.push_back(getParticleData(2+2*ix)); } else if(k1data->id()==23&&k2data->id()==23) tc.push_back(p1data); else if(abs(k1data->id())==24&&k2data->id()==23) tc.push_back(p2data); // Loop over helicities summing the relevant diagrams for(unsigned int p1hel=0;p1hel<2;++p1hel) { for(unsigned int p2hel=0;p2hel<2;++p2hel) { for(unsigned int k1hel=0;k1hel<3;++k1hel) { for(unsigned int k2hel=0;k2hel<3;++k2hel) { if((p1hel==p2hel)&&helicityConservation_) { lo_hel_amps(p1hel,p2hel,k1hel,k2hel) = Complex(0.,0.); continue; } vector<Complex> diagrams; // Get all t-channel diagram contributions tcPDPtr intermediate_t; for(unsigned int ix=0;ix<tc.size();ix++) { intermediate_t = (!(k1data->id()==24&&k2data->id()==-24)) ? p2data : tc[ix]; SpinorWaveFunction p1_v1 = ffv1->evaluate(EWScale_,5,intermediate_t,q[p1hel],v1[k1hel]); // First calculate all the off-shell fermion currents // Now calculate the 6 t-channel diagrams // q+qb->v1+v2 if(!((k1data->id()==24&&k2data->id()==-24)&&(abs(p1data->id())%2==1))) diagrams.push_back(ffv2->evaluate(EWScale_,p1_v1,qb[p2hel],v2[k2hel])); intermediate_t = (!(k1data->id()==24&&k2data->id()==-24)) ? p1data : tc[ix]; SpinorWaveFunction p1_v2 = ffv2->evaluate(EWScale_,5,intermediate_t,q[p1hel],v2[k2hel]); // q+qb->v2+v1 if(!((k1data->id()==24&&k2data->id()==-24)&&(abs(p1data->id())%2==0))) diagrams.push_back(ffv1->evaluate(EWScale_,p1_v2,qb[p2hel],v1[k1hel])); } // Note: choosing 3 as the second argument in WWWvertex_->evaluate() // sets option 3 in thepeg/Helicity/Vertex/VertexBase.cc , which // means the propagator does not contain a width factor (which is // good re. gauge invariance). // If W+Z / W-Z calculate the two V+jet-like s-channel diagrams if(abs(k1data->id())==24&&k2data->id()==23) { // The off-shell s-channel boson current VectorWaveFunction k1_k2 = WWWvertex_->evaluate(EWScale_,3,k1data->CC(),v2[k2hel],v1[k1hel]); // q+qb->v1*->v1+v2 diagrams.push_back(ffv1->evaluate(EWScale_,q[p1hel],qb[p2hel],k1_k2)); } // If W+W- calculate the four V+jet-like s-channel diagrams if((k1data->id()==24&&k2data->id()==-24)&&(p1data->id()==-p2data->id())) { // The off-shell s-channel boson current VectorWaveFunction k1_k2; // q+qb->Z0*->v1+v2 tcPDPtr Z0 = getParticleData(ParticleID::Z0); k1_k2 = WWWvertex_->evaluate(EWScale_,3,Z0,v2[k2hel],v1[k1hel]); diagrams.push_back(FFZvertex_->evaluate(EWScale_,q[p1hel],qb[p2hel],k1_k2)); // q+qb->gamma*->v1+v2 tcPDPtr gamma = getParticleData(ParticleID::gamma); k1_k2 = WWWvertex_->evaluate(EWScale_,3,gamma,v2[k2hel],v1[k1hel]); diagrams.push_back(FFPvertex_->evaluate(EWScale_,q[p1hel],qb[p2hel],k1_k2)); } // Add up all diagrams to get the total amplitude: Complex hel_amp(0.); for(unsigned int ix=0;ix<diagrams.size();ix++) hel_amp += diagrams[ix]; // If we need to fill the production ME we do it here: if(getMatrix) lo_hel_amps(p1hel,p2hel,k1hel,k2hel) = hel_amp; sum_hel_amps_sqr += norm(hel_amp); } } } } // Calculate the production density matrix: if(getMatrix) { for(unsigned int k1hel=0;k1hel<3;++k1hel) { for(unsigned int k1helpr=0;k1helpr<3;++k1helpr) { for(unsigned int k2hel=0;k2hel<3;++k2hel) { for(unsigned int k2helpr=0;k2helpr<3;++k2helpr) { Complex theElement(0.,0.); // For each k1hel, k1helpr, k2hel, k2helpr sum over the fermion spins... for(unsigned int p1hel=0;p1hel<2;++p1hel) { for(unsigned int p2hel=0;p2hel<2;++p2hel) { if((p1hel==p2hel)&&helicityConservation_) continue; theElement += lo_hel_amps(p1hel,p2hel,k1hel ,k2hel ) *conj(lo_hel_amps(p1hel,p2hel,k1helpr,k2helpr)); } } // ...and then set the production matrix element to the sum: productionMatrix_[k1hel][k1helpr][k2hel][k2helpr] = theElement; } } } } } // Spin and colour averaging factors = 1/4 * 1/3 = 1/12 sum_hel_amps_sqr /= 12.; // Symmetry factor for identical Z bosons in the final state if(k1data->id()==23&&k2data->id()==23) sum_hel_amps_sqr /= 2.; return sum_hel_amps_sqr; } /***************************************************************************/ // This member selects a [2-body] decay mode and assigns children to the // vector bosons with momenta which are isotropic in their rest frames. bool MEPP2VVPowheg::isotropicDecayer() { using namespace ThePEG::Helicity; // Generate the children's momenta isotropically in // the rest frames of V1 and V2: double cth,phi; // First V1's children: cth = UseRandom::rnd()*2.-1.; phi = UseRandom::rnd()*2.*Constants::pi; Energy m1(V1_->momentum().m()); Energy m3(children_[0]->data().constituentMass()); Energy m4(children_[1]->data().constituentMass()); Energy p34(triangleFn(sqr(m1),sqr(m3),sqr(m4)) /2./m1); - if(isnan(p34.rawValue())||cth>1.||cth<-1.) return false; + if(std::isnan(p34.rawValue())||cth>1.||cth<-1.) return false; Energy pT34(p34*sqrt(1.-cth)*sqrt(1.+cth)); Lorentz5Momentum k3(pT34*sin(phi),pT34*cos(phi),p34 *cth, sqrt(p34*p34+sqr(m3)),m3); Lorentz5Momentum k4(-k3); k4.setE(sqrt(p34*p34+sqr(m4))); k4.setTau(m4); Boost boostToV1RF(R_.k1r().boostVector()); k3.boost(boostToV1RF); k3.rescaleRho(); k4.boost(boostToV1RF); k4.rescaleRho(); // Second V2's children: cth = UseRandom::rnd()*2.-1.; phi = UseRandom::rnd()*2.*Constants::pi; Energy m2(V2_->momentum().m()); Energy m5(children_[2]->data().constituentMass()); Energy m6(children_[3]->data().constituentMass()); Energy p56(triangleFn(sqr(m2),sqr(m5),sqr(m6)) /2./m2); - if(isnan(p56.rawValue())||cth>1.||cth<-1.) return false; + if(std::isnan(p56.rawValue())||cth>1.||cth<-1.) return false; Energy pT56(p56*sqrt(1.-cth)*sqrt(1.+cth)); Lorentz5Momentum k5(pT56*sin(phi),pT56*cos(phi),p56*cth, sqrt(p56*p56+sqr(m5)),m5); Lorentz5Momentum k6(-k5); k6.setE(sqrt(p56*p56+sqr(m6))); k6.setTau(m6); Boost boostToV2RF(R_.k2r().boostVector()); k5.boost(boostToV2RF); k5.rescaleRho(); k6.boost(boostToV2RF); k6.rescaleRho(); // Assign the momenta to the children: children_[0]->set5Momentum(k3); children_[1]->set5Momentum(k4); children_[2]->set5Momentum(k5); children_[3]->set5Momentum(k6); return true; } // Override 2->2 production matrix here: void MEPP2VVPowheg::recalculateVertex() { // Zero the squared amplitude; this equals sum_hel_amps_sqr if all // is working as it should: Complex productionMatrix2(0.,0.); for(unsigned int k1hel=0;k1hel<3;++k1hel) for(unsigned int k2hel=0;k2hel<3;++k2hel) productionMatrix2 += productionMatrix_[k1hel][k1hel][k2hel][k2hel]; // Get the vector wavefunctions: VectorWaveFunction v1Polarization; VectorWaveFunction v2Polarization; v1Polarization=VectorWaveFunction(R_.k1r(),V1_->dataPtr(),outgoing); v2Polarization=VectorWaveFunction(R_.k2r(),V2_->dataPtr(),outgoing); vector<VectorWaveFunction> v1; vector<VectorWaveFunction> v2; for(unsigned int ix=0;ix<3;ix++) { v1Polarization.reset(ix); v2Polarization.reset(ix); v1.push_back(v1Polarization); v2.push_back(v2Polarization); } AbstractFFVVertexPtr ffv1 = V1_->id()==23 ? FFZvertex_ : FFWvertex_; AbstractFFVVertexPtr ffv2 = V2_->id()==23 ? FFZvertex_ : FFWvertex_; bool vetoed(true); while(vetoed) { // Decay the bosons isotropically in their rest frames: isotropicDecayer(); // Get the spinor wavefunctions: SpinorWaveFunction k3Spinor(children_[0]->momentum(),children_[0]->dataPtr(),outgoing); SpinorBarWaveFunction k4Spinor(children_[1]->momentum(),children_[1]->dataPtr(),outgoing); SpinorWaveFunction k5Spinor(children_[2]->momentum(),children_[2]->dataPtr(),outgoing); SpinorBarWaveFunction k6Spinor(children_[3]->momentum(),children_[3]->dataPtr(),outgoing); vector<SpinorWaveFunction> k3,k5; vector<SpinorBarWaveFunction> k4,k6; for(unsigned int ix=0;ix<2;ix++) { k3Spinor.reset(ix); k4Spinor.reset(ix); k3.push_back(k3Spinor); k4.push_back(k4Spinor); k5Spinor.reset(ix); k6Spinor.reset(ix); k5.push_back(k5Spinor); k6.push_back(k6Spinor); } DecayMEPtr decayAmps(new_ptr(GeneralDecayMatrixElement(PDT::Spin1,PDT::Spin1Half,PDT::Spin1Half))); for(unsigned int k1hel=0;k1hel<3;++k1hel) { for(unsigned int k3hel=0;k3hel<2;++k3hel) { for(unsigned int k4hel=0;k4hel<2;++k4hel) { (*decayAmps)(k1hel,k3hel,k4hel) = ffv1->evaluate(EWScale_,k3[k3hel],k4[k4hel],v1[k1hel]); } } } Complex V1decayMatrix[3][3]; for(unsigned int k1hel=0;k1hel<3;++k1hel) { for(unsigned int k1helpr=0;k1helpr<3;++k1helpr) { Complex theElement(0.,0.); for(unsigned int k3hel=0;k3hel<2;++k3hel) { for(unsigned int k4hel=0;k4hel<2;++k4hel) { theElement += (*decayAmps)(k1hel,k3hel,k4hel) *conj((*decayAmps)(k1helpr,k3hel,k4hel)); } } V1decayMatrix[k1hel][k1helpr] = theElement; } } Complex V1decayMatrix2(0.,0.); for(unsigned int k1hel=0;k1hel<3;++k1hel) V1decayMatrix2 += V1decayMatrix[k1hel][k1hel]; for(unsigned int k2hel=0;k2hel<3;++k2hel) { for(unsigned int k5hel=0;k5hel<2;++k5hel) { for(unsigned int k6hel=0;k6hel<2;++k6hel) { (*decayAmps)(k2hel,k5hel,k6hel) = ffv2->evaluate(EWScale_,k5[k5hel],k6[k6hel],v2[k2hel]); } } } Complex V2decayMatrix[3][3]; for(unsigned int k2hel=0;k2hel<3;++k2hel) { for(unsigned int k2helpr=0;k2helpr<3;++k2helpr) { Complex theElement(0.,0.); for(unsigned int k5hel=0;k5hel<2;++k5hel) { for(unsigned int k6hel=0;k6hel<2;++k6hel) { theElement += (*decayAmps)(k2hel,k5hel,k6hel) *conj((*decayAmps)(k2helpr,k5hel,k6hel)); } } V2decayMatrix[k2hel][k2helpr] = theElement; } } Complex V2decayMatrix2(0.,0.); for(unsigned int k2hel=0;k2hel<3;++k2hel) V2decayMatrix2 += V2decayMatrix[k2hel][k2hel]; // Contract the production matrix and the decay matrices: Complex meTimesV1V2denominators(0.,0.); for(unsigned int k1hel=0;k1hel<3;++k1hel) { for(unsigned int k1helpr=0;k1helpr<3;++k1helpr) { for(unsigned int k2hel=0;k2hel<3;++k2hel) { for(unsigned int k2helpr=0;k2helpr<3;++k2helpr) { meTimesV1V2denominators += productionMatrix_[k1hel][k1helpr][k2hel][k2helpr] *V1decayMatrix[k1hel][k1helpr] *V2decayMatrix[k2hel][k2helpr]; } } } } if(imag(meTimesV1V2denominators)/real(meTimesV1V2denominators)>1.e-7) cout << "MEPP2VVPowheg warning\n" << "the matrix element's imaginary part is large " << meTimesV1V2denominators << endl; if(imag(productionMatrix2)/real(productionMatrix2)>1.e-7) cout << "MEPP2VVPowheg warning\n" << "the production matrix element's imaginary part is large " << productionMatrix2 << endl; if(imag(V1decayMatrix2)/real(V1decayMatrix2)>1.e-7) cout << "MEPP2VVPowheg warning\n" << "the V1 decay matrix element's imaginary part is large " << V1decayMatrix2 << endl; if(imag(V2decayMatrix2)/real(V2decayMatrix2)>1.e-7) cout << "MEPP2VVPowheg warning\n" << "the V2 decay matrix element's imaginary part is large " << V2decayMatrix2 << endl; // Need branching ratio at least in here I would think ---> double decayWeight( real(meTimesV1V2denominators) / real(productionMatrix2*V1decayMatrix2*V2decayMatrix2)); if(decayWeight>1.) cout << "MEPP2VVPowheg::recalculateVertex decayWeight > 1., decayWeight = " << decayWeight << endl; if(decayWeight<0.) cout << "MEPP2VVPowheg::recalculateVertex decayWeight < 0., decayWeight = " << decayWeight << endl; if(UseRandom::rnd()<decayWeight) vetoed = false; else vetoed = true; } return; } Energy2 MEPP2VVPowheg::triangleFn(Energy2 m12,Energy2 m22, Energy2 m32) { Energy4 lambda2(m12*m12+m22*m22+m32*m32-2.*m12*m22-2.*m12*m32-2.*m22*m32); if(lambda2>=ZERO) { return sqrt(lambda2); } else { generator()->log() << "MEPP2VVPowheg::triangleFn " << "kinematic instability, imaginary triangle function\n"; return -999999.*GeV2; } } diff --git a/MatrixElement/Powheg/MEPP2WHPowheg.cc b/MatrixElement/Powheg/MEPP2WHPowheg.cc --- a/MatrixElement/Powheg/MEPP2WHPowheg.cc +++ b/MatrixElement/Powheg/MEPP2WHPowheg.cc @@ -1,389 +1,389 @@ // -*- C++ -*- // // This is the implementation of the non-inlined, non-templated member // functions of the MEPP2WHPowheg class. // #include "MEPP2WHPowheg.h" #include "ThePEG/PDT/DecayMode.h" #include "ThePEG/Interface/Parameter.h" #include "ThePEG/Interface/Switch.h" #include "ThePEG/Interface/ClassDocumentation.h" #include "ThePEG/Persistency/PersistentOStream.h" #include "ThePEG/Persistency/PersistentIStream.h" #include "ThePEG/Handlers/StandardXComb.h" #include "ThePEG/PDT/EnumParticles.h" #include "ThePEG/MatrixElement/Tree2toNDiagram.h" #include "ThePEG/Repository/EventGenerator.h" using namespace Herwig; MEPP2WHPowheg::MEPP2WHPowheg() : _gluon(), TR_(0.5), CF_(4./3.), _contrib(1) ,_nlo_alphaS_opt(0), _fixed_alphaS(0.115895), _a(0.5) ,_p(0.7) , _eps(1.0e-8), _scaleopt(1), _fixedScale(100.*GeV), _scaleFact(1.) {} ClassDescription<MEPP2WHPowheg> MEPP2WHPowheg::initMEPP2WHPowheg; // Definition of the static class description member. void MEPP2WHPowheg::persistentOutput(PersistentOStream & os) const { os << _contrib << _nlo_alphaS_opt << _fixed_alphaS << _a << _p << _gluon << _scaleopt << ounit(_fixedScale,GeV) << _scaleFact; } void MEPP2WHPowheg::persistentInput(PersistentIStream & is, int) { is >> _contrib >> _nlo_alphaS_opt >> _fixed_alphaS >> _a >> _p >> _gluon >> _scaleopt >> iunit(_fixedScale,GeV) >> _scaleFact; } void MEPP2WHPowheg::Init() { static ClassDocumentation<MEPP2WHPowheg> documentation ("The MEPP2WHPowheg class implements the matrix element for the Bjorken" " process q qbar -> WH", "The PP$\\to$W Higgs POWHEG matrix element is described in \\cite{Hamilton:2009za}.", "%\\cite{Hamilton:2009za}\n" "\\bibitem{Hamilton:2009za}\n" " K.~Hamilton, P.~Richardson and J.~Tully,\n" " ``A Positive-Weight Next-to-Leading Order Monte Carlo Simulation for Higgs\n" " Boson Production,''\n" " JHEP {\\bf 0904} (2009) 116\n" " [arXiv:0903.4345 [hep-ph]].\n" " %%CITATION = JHEPA,0904,116;%%\n" ); static Switch<MEPP2WHPowheg,unsigned int> interfaceContribution ("Contribution", "Which contributions to the cross section to include", &MEPP2WHPowheg::_contrib, 1, false, false); static SwitchOption interfaceContributionLeadingOrder (interfaceContribution, "LeadingOrder", "Just generate the leading order cross section", 0); static SwitchOption interfaceContributionPositiveNLO (interfaceContribution, "PositiveNLO", "Generate the positive contribution to the full NLO cross section", 1); static SwitchOption interfaceContributionNegativeNLO (interfaceContribution, "NegativeNLO", "Generate the negative contribution to the full NLO cross section", 2); static Switch<MEPP2WHPowheg,unsigned int> interfaceNLOalphaSopt ("NLOalphaSopt", "Whether to use a fixed or a running QCD coupling for the NLO weight", &MEPP2WHPowheg::_nlo_alphaS_opt, 0, false, false); static SwitchOption interfaceNLOalphaSoptRunningAlphaS (interfaceNLOalphaSopt, "RunningAlphaS", "Use the usual running QCD coupling evaluated at scale scale()", 0); static SwitchOption interfaceNLOalphaSoptFixedAlphaS (interfaceNLOalphaSopt, "FixedAlphaS", "Use a constant QCD coupling for comparison/debugging purposes", 1); static Parameter<MEPP2WHPowheg,double> interfaceFixedNLOalphaS ("FixedNLOalphaS", "The value of alphaS to use for the nlo weight if _nlo_alphaS_opt=1", &MEPP2WHPowheg::_fixed_alphaS, 0.115895, 0., 1.0, false, false, Interface::limited); static Parameter<MEPP2WHPowheg,double> interfaceCorrectionCoefficient ("CorrectionCoefficient", "The magnitude of the correction term to reduce the negative contribution", &MEPP2WHPowheg::_a, 0.5, -10., 10.0, false, false, Interface::limited); static Parameter<MEPP2WHPowheg,double> interfaceCorrectionPower ("CorrectionPower", "The power of the correction term to reduce the negative contribution", &MEPP2WHPowheg::_p, 0.7, 0.0, 1.0, false, false, Interface::limited); static Switch<MEPP2WHPowheg,unsigned int> interfaceFactorizationScaleOption ("FactorizationScaleOption", "Option for the scale to be used", &MEPP2WHPowheg::_scaleopt, 1, false, false); static SwitchOption interfaceScaleOptionFixed (interfaceFactorizationScaleOption, "Fixed", "Use a fixed scale", 0); static SwitchOption interfaceScaleOptionsHat (interfaceFactorizationScaleOption, "Dynamic", "Use the mass of the vector boson-Higgs boson system", 1); static Parameter<MEPP2WHPowheg,Energy> interfaceFactorizationScaleValue ("FactorizationScaleValue", "The fixed scale to use if required", &MEPP2WHPowheg::_fixedScale, GeV, 100.0*GeV, 10.0*GeV, 1000.0*GeV, false, false, Interface::limited); static Parameter<MEPP2WHPowheg,double> interfaceScaleFactor ("ScaleFactor", "The factor used before sHat if using a running scale", &MEPP2WHPowheg::_scaleFact, 1.0, 0.0, 10.0, false, false, Interface::limited); } void MEPP2WHPowheg::doinit() { // gluon ParticleData object _gluon = getParticleData(ParticleID::g); MEPP2WH::doinit(); } Energy2 MEPP2WHPowheg::scale() const { return _scaleopt == 0 ? sqr(_fixedScale) : _scaleFact*sHat(); } int MEPP2WHPowheg::nDim() const { return 7; } bool MEPP2WHPowheg::generateKinematics(const double * r) { _xt=*(r+5); _v =*(r+6); return MEPP2WH::generateKinematics(r); } CrossSection MEPP2WHPowheg::dSigHatDR() const { // Get Born momentum fractions xbar_a and xbar_b: _xb_a = lastX1(); _xb_b = lastX2(); return MEPP2WH::dSigHatDR()*NLOweight(); } double MEPP2WHPowheg::NLOweight() const { // If only leading order is required return 1: if(_contrib==0) return 1.; useMe(); // Get particle data for QCD particles: _parton_a=mePartonData()[0]; _parton_b=mePartonData()[1]; // get BeamParticleData objects for PDF's _hadron_A=dynamic_ptr_cast<Ptr<BeamParticleData>::transient_const_pointer> (lastParticles().first->dataPtr()); _hadron_B=dynamic_ptr_cast<Ptr<BeamParticleData>::transient_const_pointer> (lastParticles().second->dataPtr()); // If necessary swap the particle data vectors so that _xb_a, // mePartonData[0], beam[0] relate to the inbound quark: if(!(lastPartons().first ->dataPtr()==_parton_a&& lastPartons().second->dataPtr()==_parton_b)) { swap(_xb_a ,_xb_b); swap(_hadron_A,_hadron_B); } // calculate the PDF's for the Born process _oldq = _hadron_A->pdf()->xfx(_hadron_A,_parton_a,scale(),_xb_a)/_xb_a; _oldqbar = _hadron_B->pdf()->xfx(_hadron_B,_parton_b,scale(),_xb_b)/_xb_b; // Calculate alpha_S _alphaS2Pi = _nlo_alphaS_opt==1 ? _fixed_alphaS : SM().alphaS(scale()); _alphaS2Pi /= 2.*Constants::pi; // Calculate the invariant mass of the dilepton pair _mll2 = sHat(); _mu2 = scale(); // Calculate the integrand // q qbar contribution double wqqvirt = Vtilde_qq(); double wqqcollin = Ctilde_qq(x(_xt,1.),1.) + Ctilde_qq(x(_xt,0.),0.); double wqqreal = Ftilde_qq(_xt,_v); double wqq = wqqvirt+wqqcollin+wqqreal; // q g contribution double wqgcollin = Ctilde_qg(x(_xt,0.),0.); double wqgreal = Ftilde_qg(_xt,_v); double wqg = wqgreal+wqgcollin; // g qbar contribution double wgqbarcollin = Ctilde_gq(x(_xt,1.),1.); double wgqbarreal = Ftilde_gq(_xt,_v); double wgqbar = wgqbarreal+wgqbarcollin; // total double wgt = 1.+(wqq+wqg+wgqbar); // KMH - 06/08 - This seems to give wrong NLO results for // associated Higgs so I'm omitting it. // //trick to try and reduce neg wgt contribution // if(_xt<1.-_eps) // wgt += _a*(1./pow(1.-_xt,_p)-(1.-pow(_eps,1.-_p))/(1.-_p)/(1.-_eps)); // return the answer - assert(!isinf(wgt)&&!isnan(wgt)); + assert(isfinite(wgt)); return _contrib==1 ? max(0.,wgt) : max(0.,-wgt); } double MEPP2WHPowheg::x(double xt, double v) const { double x0(xbar(v)); return x0+(1.-x0)*xt; } double MEPP2WHPowheg::x_a(double x, double v) const { if(x==1.) return _xb_a; if(v==0.) return _xb_a; if(v==1.) return _xb_a/x; return (_xb_a/sqrt(x))*sqrt((1.-(1.-x)*(1.-v))/(1.-(1.-x)*v)); } double MEPP2WHPowheg::x_b(double x, double v) const { if(x==1.) return _xb_b; if(v==0.) return _xb_b/x; if(v==1.) return _xb_b; return (_xb_b/sqrt(x))*sqrt((1.-(1.-x)*v)/(1.-(1.-x)*(1.-v))); } double MEPP2WHPowheg::xbar(double v) const { double xba2(sqr(_xb_a)), xbb2(sqr(_xb_b)), omv(-999.); double xbar1(-999.), xbar2(-999.); if(v==1.) return _xb_a; if(v==0.) return _xb_b; omv = 1.-v; xbar1=4.* v*xba2/ (sqrt(sqr(1.+xba2)*4.*sqr(omv)+16.*(1.-2.*omv)*xba2)+2.*omv*(1.-_xb_a)*(1.+_xb_a)); xbar2=4.*omv*xbb2/ (sqrt(sqr(1.+xbb2)*4.*sqr( v)+16.*(1.-2.* v)*xbb2)+2.* v*(1.-_xb_b)*(1.+_xb_b)); return max(xbar1,xbar2); } double MEPP2WHPowheg::Ltilde_qq(double x, double v) const { if(x==1.) return 1.; double xa(x_a(x,v)),xb(x_b(x,v)); double newq = (_hadron_A->pdf()->xfx(_hadron_A,_parton_a,scale(), xa)/ xa); double newqbar = (_hadron_B->pdf()->xfx(_hadron_B,_parton_b,scale(), xb)/ xb); return( newq * newqbar / _oldq / _oldqbar ); } double MEPP2WHPowheg::Ltilde_qg(double x, double v) const { double xa(x_a(x,v)),xb(x_b(x,v)); double newq = (_hadron_A->pdf()->xfx(_hadron_A,_parton_a,scale(), xa)/ xa); double newg2 = (_hadron_B->pdf()->xfx(_hadron_B,_gluon ,scale(), xb)/ xb); return( newq * newg2 / _oldq / _oldqbar ); } double MEPP2WHPowheg::Ltilde_gq(double x, double v) const { double xa(x_a(x,v)),xb(x_b(x,v)); double newg1 = (_hadron_A->pdf()->xfx(_hadron_A,_gluon ,scale(), xa)/ xa); double newqbar = (_hadron_B->pdf()->xfx(_hadron_B,_parton_b,scale(), xb)/ xb); return( newg1 * newqbar / _oldq / _oldqbar ); } double MEPP2WHPowheg::Vtilde_qq() const { return _alphaS2Pi*CF_*(-3.*log(_mu2/_mll2)+(2.*sqr(Constants::pi)/3.)-8.); } double MEPP2WHPowheg::Ccalbar_qg(double x) const { return (sqr(x)+sqr(1.-x))*(log(_mll2/(_mu2*x))+2.*log(1.-x))+2.*x*(1.-x); } double MEPP2WHPowheg::Ctilde_qg(double x, double v) const { return _alphaS2Pi*TR_ * ((1.-xbar(v))/x) * Ccalbar_qg(x)*Ltilde_qg(x,v); } double MEPP2WHPowheg::Ctilde_gq(double x, double v) const { return _alphaS2Pi*TR_ * ((1.-xbar(v))/x) * Ccalbar_qg(x)*Ltilde_gq(x,v); } double MEPP2WHPowheg::Ctilde_qq(double x, double v) const { double wgt = ((1.-x)/x+(1.+x*x)/(1.-x)/x*(2.*log(1.-x)-log(x)))*Ltilde_qq(x,v) - 4.*log(1.-x)/(1.-x) + 2./(1.-xbar(v))*log(1.-xbar(v))*log(1.-xbar(v)) + (2./(1.-xbar(v))*log(1.-xbar(v))-2./(1.-x)+(1.+x*x)/x/(1.-x)*Ltilde_qq(x,v)) *log(_mll2/_mu2); return _alphaS2Pi*CF_*(1.-xbar(v))*wgt; } double MEPP2WHPowheg::Fcal_qq(double x, double v) const { return (sqr(1.-x)*(1.-2.*v*(1.-v))+2.*x)/x*Ltilde_qq(x,v); } double MEPP2WHPowheg::Fcal_qg(double x, double v) const { return ((1.-xbar(v))/x)* (2.*x*(1.-x)*v+sqr((1.-x)*v)+sqr(x)+sqr(1.-x))*Ltilde_qg(x,v); } double MEPP2WHPowheg::Fcal_gq(double x, double v) const { return ((1.-xbar(v))/x)* (2.*x*(1.-x)*(1.-v)+sqr((1.-x)*(1.-v))+sqr(x)+sqr(1.-x))*Ltilde_gq(x,v); } double MEPP2WHPowheg::Ftilde_qg(double xt, double v) const { return _alphaS2Pi*TR_* ( Fcal_qg(x(xt,v),v) - Fcal_qg(x(xt,0.),0.) )/v; } double MEPP2WHPowheg::Ftilde_gq(double xt, double v) const { return _alphaS2Pi*TR_* ( Fcal_gq(x(xt,v),v) - Fcal_gq(x(xt,1.),1.) )/(1.-v); } double MEPP2WHPowheg::Ftilde_qq(double xt, double v) const { double eps(1e-10); // is emission into regular or singular region? if(xt>=0. && xt<1.-eps && v>eps && v<1.-eps) { // x<1, v>0, v<1 (regular emission, neither soft or collinear): return _alphaS2Pi*CF_* (( ( Fcal_qq(x(xt, v), v) - Fcal_qq(x(xt,1.),1.) ) / (1.-v)+ ( Fcal_qq(x(xt, v), v) - Fcal_qq(x(xt,0.),0.) ) / v )/(1.-xt) + ( log(1.-xbar(v)) - log(1.-_xb_a))*2./(1.-v) + ( log(1.-xbar(v)) - log(1.-_xb_b))*2./v); } else { // make sure emission is actually in the allowed phase space: if(!(v>=0. && v<=1. && xt>=0. && xt<=1.)) { ostringstream s; s << "MEPP2WHPowheg::Ftilde_qq : \n" << "xt(" << xt << ") and / or v(" << v << ") not in the phase space."; generator()->logWarning(Exception(s.str(),Exception::warning)); return 0.; } // is emission soft singular? if(xt>=1.-eps) { // x=1: if(v<=eps) { // x==1, v=0 (soft and collinear with particle b): return _alphaS2Pi*CF_* ( ( log(1.-xbar(v)) - log(1.-_xb_a))*2./(1.-v) ); } else if(v>=1.-eps) { // x==1, v=1 (soft and collinear with particle a): return _alphaS2Pi*CF_* ( ( log(1.-xbar(v)) - log(1.-_xb_b))*2./v ); } else { // x==1, 0<v<1 (soft wide angle emission): return _alphaS2Pi*CF_* ( ( log(1.-xbar(v)) - log(1.-_xb_a))*2./(1.-v) + ( log(1.-xbar(v)) - log(1.-_xb_b))*2./v ); } } else { // x<1: if(v<=eps) { // x<1 but v=0 (collinear with particle b, but not soft): return _alphaS2Pi*CF_* ( ( ( Fcal_qq(x(xt, v), v) - Fcal_qq(x(xt,1.),1.) ) / (1.-v) )/(1.-xt) + ( log(1.-xbar(v)) - log(1.-_xb_a))*2./(1.-v) ); } else if(v>=1.-eps) { // x<1 but v=1 (collinear with particle a, but not soft): return _alphaS2Pi*CF_* ( ( ( Fcal_qq(x(xt, v), v) - Fcal_qq(x(xt,0.),0.) ) / v )/(1.-xt) + ( log(1.-xbar(v)) - log(1.-_xb_b))*2./v ); } } } return 0.; } diff --git a/MatrixElement/Powheg/MEPP2ZHPowheg.cc b/MatrixElement/Powheg/MEPP2ZHPowheg.cc --- a/MatrixElement/Powheg/MEPP2ZHPowheg.cc +++ b/MatrixElement/Powheg/MEPP2ZHPowheg.cc @@ -1,388 +1,388 @@ // -*- C++ -*- // // This is the implementation of the non-inlined, non-templated member // functions of the MEPP2ZHPowheg class. // #include "MEPP2ZHPowheg.h" #include "ThePEG/Interface/Parameter.h" #include "ThePEG/Interface/ClassDocumentation.h" #include "ThePEG/Interface/Switch.h" #include "ThePEG/Persistency/PersistentOStream.h" #include "ThePEG/Persistency/PersistentIStream.h" #include "ThePEG/Handlers/StandardXComb.h" #include "ThePEG/PDT/EnumParticles.h" #include "ThePEG/MatrixElement/Tree2toNDiagram.h" #include "ThePEG/PDT/DecayMode.h" using namespace Herwig; MEPP2ZHPowheg::MEPP2ZHPowheg() : _gluon(), TR_(0.5), CF_(4./3.), _contrib(1) ,_nlo_alphaS_opt(0), _fixed_alphaS(0.115895), _a(0.5) ,_p(0.7) , _eps(1.0e-8), _scaleopt(1), _fixedScale(100.*GeV), _scaleFact(1.) {} void MEPP2ZHPowheg::persistentOutput(PersistentOStream & os) const { os << _contrib << _nlo_alphaS_opt << _fixed_alphaS << _a << _p << _gluon << _scaleopt << ounit(_fixedScale,GeV) << _scaleFact; } void MEPP2ZHPowheg::persistentInput(PersistentIStream & is, int) { is >> _contrib >> _nlo_alphaS_opt >> _fixed_alphaS >> _a >> _p >> _gluon >> _scaleopt >> iunit(_fixedScale,GeV) >> _scaleFact; } ClassDescription<MEPP2ZHPowheg> MEPP2ZHPowheg::initMEPP2ZHPowheg; // Definition of the static class description member. void MEPP2ZHPowheg::Init() { static ClassDocumentation<MEPP2ZHPowheg> documentation ("The MEPP2ZHPowheg class implements the matrix element for q qbar -> Z H", "The PP$\\to$Z Higgs POWHEG matrix element is described in \\cite{Hamilton:2009za}.", "\\bibitem{Hamilton:2009za}\n" " K.~Hamilton, P.~Richardson and J.~Tully,\n" " %``A Positive-Weight Next-to-Leading Order Monte Carlo Simulation for Higgs\n" " %Boson Production,''\n" " JHEP {\\bf 0904} (2009) 116\n" " [arXiv:0903.4345 [hep-ph]].\n" " %%CITATION = JHEPA,0904,116;%%\n" ); static Switch<MEPP2ZHPowheg,unsigned int> interfaceContribution ("Contribution", "Which contributions to the cross section to include", &MEPP2ZHPowheg::_contrib, 1, false, false); static SwitchOption interfaceContributionLeadingOrder (interfaceContribution, "LeadingOrder", "Just generate the leading order cross section", 0); static SwitchOption interfaceContributionPositiveNLO (interfaceContribution, "PositiveNLO", "Generate the positive contribution to the full NLO cross section", 1); static SwitchOption interfaceContributionNegativeNLO (interfaceContribution, "NegativeNLO", "Generate the negative contribution to the full NLO cross section", 2); static Switch<MEPP2ZHPowheg,unsigned int> interfaceNLOalphaSopt ("NLOalphaSopt", "Whether to use a fixed or a running QCD coupling for the NLO weight", &MEPP2ZHPowheg::_nlo_alphaS_opt, 0, false, false); static SwitchOption interfaceNLOalphaSoptRunningAlphaS (interfaceNLOalphaSopt, "RunningAlphaS", "Use the usual running QCD coupling evaluated at scale scale()", 0); static SwitchOption interfaceNLOalphaSoptFixedAlphaS (interfaceNLOalphaSopt, "FixedAlphaS", "Use a constant QCD coupling for comparison/debugging purposes", 1); static Parameter<MEPP2ZHPowheg,double> interfaceFixedNLOalphaS ("FixedNLOalphaS", "The value of alphaS to use for the nlo weight if _nlo_alphaS_opt=1", &MEPP2ZHPowheg::_fixed_alphaS, 0.115895, 0., 1.0, false, false, Interface::limited); static Parameter<MEPP2ZHPowheg,double> interfaceCorrectionCoefficient ("CorrectionCoefficient", "The magnitude of the correction term to reduce the negative contribution", &MEPP2ZHPowheg::_a, 0.5, -10., 10.0, false, false, Interface::limited); static Parameter<MEPP2ZHPowheg,double> interfaceCorrectionPower ("CorrectionPower", "The power of the correction term to reduce the negative contribution", &MEPP2ZHPowheg::_p, 0.7, 0.0, 1.0, false, false, Interface::limited); static Switch<MEPP2ZHPowheg,unsigned int> interfaceFactorizationScaleOption ("FactorizationScaleOption", "Option for the scale to be used", &MEPP2ZHPowheg::_scaleopt, 1, false, false); static SwitchOption interfaceScaleOptionFixed (interfaceFactorizationScaleOption, "Fixed", "Use a fixed scale", 0); static SwitchOption interfaceScaleOptionsHat (interfaceFactorizationScaleOption, "Dynamic", "Use the mass of the vector boson-Higgs boson system", 1); static Parameter<MEPP2ZHPowheg,Energy> interfaceFactorizationScaleValue ("FactorizationScaleValue", "The fixed scale to use if required", &MEPP2ZHPowheg::_fixedScale, GeV, 100.0*GeV, 10.0*GeV, 1000.0*GeV, false, false, Interface::limited); static Parameter<MEPP2ZHPowheg,double> interfaceScaleFactor ("ScaleFactor", "The factor used before sHat if using a running scale", &MEPP2ZHPowheg::_scaleFact, 1.0, 0.0, 10.0, false, false, Interface::limited); } void MEPP2ZHPowheg::doinit() { // gluon ParticleData object _gluon = getParticleData(ParticleID::g); MEPP2ZH::doinit(); } Energy2 MEPP2ZHPowheg::scale() const { return _scaleopt == 0 ? sqr(_fixedScale) : _scaleFact*sHat(); } int MEPP2ZHPowheg::nDim() const { return 7; } bool MEPP2ZHPowheg::generateKinematics(const double * r) { _xt=*(r+5); _v =*(r+6); return MEPP2ZH::generateKinematics(r); } CrossSection MEPP2ZHPowheg::dSigHatDR() const { // Get Born momentum fractions xbar_a and xbar_b: _xb_a = lastX1(); _xb_b = lastX2(); return MEPP2ZH::dSigHatDR()*NLOweight(); } double MEPP2ZHPowheg::NLOweight() const { // If only leading order is required return 1: if(_contrib==0) return 1.; useMe(); // Get particle data for QCD particles: _parton_a=mePartonData()[0]; _parton_b=mePartonData()[1]; // get BeamParticleData objects for PDF's _hadron_A=dynamic_ptr_cast<Ptr<BeamParticleData>::transient_const_pointer> (lastParticles().first->dataPtr()); _hadron_B=dynamic_ptr_cast<Ptr<BeamParticleData>::transient_const_pointer> (lastParticles().second->dataPtr()); // If necessary swap the particle data vectors so that _xb_a, // mePartonData[0], beam[0] relate to the inbound quark: if(!(lastPartons().first ->dataPtr()==_parton_a&& lastPartons().second->dataPtr()==_parton_b)) { swap(_xb_a ,_xb_b); swap(_hadron_A,_hadron_B); } // calculate the PDF's for the Born process _oldq = _hadron_A->pdf()->xfx(_hadron_A,_parton_a,scale(),_xb_a)/_xb_a; _oldqbar = _hadron_B->pdf()->xfx(_hadron_B,_parton_b,scale(),_xb_b)/_xb_b; // Calculate alpha_S _alphaS2Pi = _nlo_alphaS_opt==1 ? _fixed_alphaS : SM().alphaS(scale()); _alphaS2Pi /= 2.*Constants::pi; // Calculate the invariant mass of the dilepton pair _mll2 = sHat(); _mu2 = scale(); // Calculate the integrand // q qbar contribution double wqqvirt = Vtilde_qq(); double wqqcollin = Ctilde_qq(x(_xt,1.),1.) + Ctilde_qq(x(_xt,0.),0.); double wqqreal = Ftilde_qq(_xt,_v); double wqq = wqqvirt+wqqcollin+wqqreal; // q g contribution double wqgcollin = Ctilde_qg(x(_xt,0.),0.); double wqgreal = Ftilde_qg(_xt,_v); double wqg = wqgreal+wqgcollin; // g qbar contribution double wgqbarcollin = Ctilde_gq(x(_xt,1.),1.); double wgqbarreal = Ftilde_gq(_xt,_v); double wgqbar = wgqbarreal+wgqbarcollin; // total double wgt = 1.+(wqq+wqg+wgqbar); // KMH - 06/08 - This seems to give wrong NLO results for // associated Higgs so I'm omitting it. // //trick to try and reduce neg wgt contribution // if(_xt<1.-_eps) // wgt += _a*(1./pow(1.-_xt,_p)-(1.-pow(_eps,1.-_p))/(1.-_p)/(1.-_eps)); // return the answer - assert(!isinf(wgt)&&!isnan(wgt)); + assert(isfinite(wgt)); return _contrib==1 ? max(0.,wgt) : max(0.,-wgt); } double MEPP2ZHPowheg::x(double xt, double v) const { double x0(xbar(v)); return x0+(1.-x0)*xt; } double MEPP2ZHPowheg::x_a(double x, double v) const { if(x==1.) return _xb_a; if(v==0.) return _xb_a; if(v==1.) return _xb_a/x; return (_xb_a/sqrt(x))*sqrt((1.-(1.-x)*(1.-v))/(1.-(1.-x)*v)); } double MEPP2ZHPowheg::x_b(double x, double v) const { if(x==1.) return _xb_b; if(v==0.) return _xb_b/x; if(v==1.) return _xb_b; return (_xb_b/sqrt(x))*sqrt((1.-(1.-x)*v)/(1.-(1.-x)*(1.-v))); } double MEPP2ZHPowheg::xbar(double v) const { double xba2(sqr(_xb_a)), xbb2(sqr(_xb_b)), omv(-999.); double xbar1(-999.), xbar2(-999.); if(v==1.) return _xb_a; if(v==0.) return _xb_b; omv = 1.-v; xbar1=4.* v*xba2/ (sqrt(sqr(1.+xba2)*4.*sqr(omv)+16.*(1.-2.*omv)*xba2)+2.*omv*(1.-_xb_a)*(1.+_xb_a)); xbar2=4.*omv*xbb2/ (sqrt(sqr(1.+xbb2)*4.*sqr( v)+16.*(1.-2.* v)*xbb2)+2.* v*(1.-_xb_b)*(1.+_xb_b)); return max(xbar1,xbar2); } double MEPP2ZHPowheg::Ltilde_qq(double x, double v) const { if(x==1.) return 1.; double xa(x_a(x,v)),xb(x_b(x,v)); double newq = (_hadron_A->pdf()->xfx(_hadron_A,_parton_a,scale(), xa)/ xa); double newqbar = (_hadron_B->pdf()->xfx(_hadron_B,_parton_b,scale(), xb)/ xb); return( newq * newqbar / _oldq / _oldqbar ); } double MEPP2ZHPowheg::Ltilde_qg(double x, double v) const { double xa(x_a(x,v)),xb(x_b(x,v)); double newq = (_hadron_A->pdf()->xfx(_hadron_A,_parton_a,scale(), xa)/ xa); double newg2 = (_hadron_B->pdf()->xfx(_hadron_B,_gluon ,scale(), xb)/ xb); return( newq * newg2 / _oldq / _oldqbar ); } double MEPP2ZHPowheg::Ltilde_gq(double x, double v) const { double xa(x_a(x,v)),xb(x_b(x,v)); double newg1 = (_hadron_A->pdf()->xfx(_hadron_A,_gluon ,scale(), xa)/ xa); double newqbar = (_hadron_B->pdf()->xfx(_hadron_B,_parton_b,scale(), xb)/ xb); return( newg1 * newqbar / _oldq / _oldqbar ); } double MEPP2ZHPowheg::Vtilde_qq() const { return _alphaS2Pi*CF_*(-3.*log(_mu2/_mll2)+(2.*sqr(Constants::pi)/3.)-8.); } double MEPP2ZHPowheg::Ccalbar_qg(double x) const { return (sqr(x)+sqr(1.-x))*(log(_mll2/(_mu2*x))+2.*log(1.-x))+2.*x*(1.-x); } double MEPP2ZHPowheg::Ctilde_qg(double x, double v) const { return _alphaS2Pi*TR_ * ((1.-xbar(v))/x) * Ccalbar_qg(x)*Ltilde_qg(x,v); } double MEPP2ZHPowheg::Ctilde_gq(double x, double v) const { return _alphaS2Pi*TR_ * ((1.-xbar(v))/x) * Ccalbar_qg(x)*Ltilde_gq(x,v); } double MEPP2ZHPowheg::Ctilde_qq(double x, double v) const { double wgt = ((1.-x)/x+(1.+x*x)/(1.-x)/x*(2.*log(1.-x)-log(x)))*Ltilde_qq(x,v) - 4.*log(1.-x)/(1.-x) + 2./(1.-xbar(v))*log(1.-xbar(v))*log(1.-xbar(v)) + (2./(1.-xbar(v))*log(1.-xbar(v))-2./(1.-x)+(1.+x*x)/x/(1.-x)*Ltilde_qq(x,v)) *log(_mll2/_mu2); return _alphaS2Pi*CF_*(1.-xbar(v))*wgt; } double MEPP2ZHPowheg::Fcal_qq(double x, double v) const { return (sqr(1.-x)*(1.-2.*v*(1.-v))+2.*x)/x*Ltilde_qq(x,v); } double MEPP2ZHPowheg::Fcal_qg(double x, double v) const { return ((1.-xbar(v))/x)* (2.*x*(1.-x)*v+sqr((1.-x)*v)+sqr(x)+sqr(1.-x))*Ltilde_qg(x,v); } double MEPP2ZHPowheg::Fcal_gq(double x, double v) const { return ((1.-xbar(v))/x)* (2.*x*(1.-x)*(1.-v)+sqr((1.-x)*(1.-v))+sqr(x)+sqr(1.-x))*Ltilde_gq(x,v); } double MEPP2ZHPowheg::Ftilde_qg(double xt, double v) const { return _alphaS2Pi*TR_* ( Fcal_qg(x(xt,v),v) - Fcal_qg(x(xt,0.),0.) )/v; } double MEPP2ZHPowheg::Ftilde_gq(double xt, double v) const { return _alphaS2Pi*TR_* ( Fcal_gq(x(xt,v),v) - Fcal_gq(x(xt,1.),1.) )/(1.-v); } double MEPP2ZHPowheg::Ftilde_qq(double xt, double v) const { double eps(1e-10); // is emission into regular or singular region? if(xt>=0. && xt<1.-eps && v>eps && v<1.-eps) { // x<1, v>0, v<1 (regular emission, neither soft or collinear): return _alphaS2Pi*CF_* (( ( Fcal_qq(x(xt, v), v) - Fcal_qq(x(xt,1.),1.) ) / (1.-v)+ ( Fcal_qq(x(xt, v), v) - Fcal_qq(x(xt,0.),0.) ) / v )/(1.-xt) + ( log(1.-xbar(v)) - log(1.-_xb_a))*2./(1.-v) + ( log(1.-xbar(v)) - log(1.-_xb_b))*2./v); } else { // make sure emission is actually in the allowed phase space: if(!(v>=0. && v<=1. && xt>=0. && xt<=1.)) { ostringstream s; s << "MEPP2ZHPowheg::Ftilde_qq : \n" << "xt(" << xt << ") and / or v(" << v << ") not in the phase space."; generator()->logWarning(Exception(s.str(),Exception::warning)); return 0.; } // is emission soft singular? if(xt>=1.-eps) { // x=1: if(v<=eps) { // x==1, v=0 (soft and collinear with particle b): return _alphaS2Pi*CF_* ( ( log(1.-xbar(v)) - log(1.-_xb_a))*2./(1.-v) ); } else if(v>=1.-eps) { // x==1, v=1 (soft and collinear with particle a): return _alphaS2Pi*CF_* ( ( log(1.-xbar(v)) - log(1.-_xb_b))*2./v ); } else { // x==1, 0<v<1 (soft wide angle emission): return _alphaS2Pi*CF_* ( ( log(1.-xbar(v)) - log(1.-_xb_a))*2./(1.-v) + ( log(1.-xbar(v)) - log(1.-_xb_b))*2./v ); } } else { // x<1: if(v<=eps) { // x<1 but v=0 (collinear with particle b, but not soft): return _alphaS2Pi*CF_* ( ( ( Fcal_qq(x(xt, v), v) - Fcal_qq(x(xt,1.),1.) ) / (1.-v) )/(1.-xt) + ( log(1.-xbar(v)) - log(1.-_xb_a))*2./(1.-v) ); } else if(v>=1.-eps) { // x<1 but v=1 (collinear with particle a, but not soft): return _alphaS2Pi*CF_* ( ( ( Fcal_qq(x(xt, v), v) - Fcal_qq(x(xt,0.),0.) ) / v )/(1.-xt) + ( log(1.-xbar(v)) - log(1.-_xb_b))*2./v ); } } } return 0.; } diff --git a/MatrixElement/Powheg/MEqq2W2ffPowheg.cc b/MatrixElement/Powheg/MEqq2W2ffPowheg.cc --- a/MatrixElement/Powheg/MEqq2W2ffPowheg.cc +++ b/MatrixElement/Powheg/MEqq2W2ffPowheg.cc @@ -1,389 +1,389 @@ // -*- C++ -*- // // MEqq2W2ffPowheg.cc is a part of Herwig - A multi-purpose Monte Carlo event generator // Copyright (C) 2002-2011 The Herwig Collaboration // // Herwig is licenced under version 2 of the GPL, see COPYING for details. // Please respect the MCnet academic guidelines, see GUIDELINES for details. // // // This is the implementation of the non-inlined, non-templated member // functions of the MEqq2W2ffPowheg class. // #include "MEqq2W2ffPowheg.h" #include "ThePEG/Interface/ClassDocumentation.h" #include "ThePEG/Interface/Switch.h" #include "ThePEG/Interface/Parameter.h" #include "ThePEG/Persistency/PersistentOStream.h" #include "ThePEG/Persistency/PersistentIStream.h" #include "ThePEG/Handlers/StandardXComb.h" #include "ThePEG/PDT/EnumParticles.h" #include "Herwig/Models/StandardModel/StandardModel.h" #include "ThePEG/Repository/EventGenerator.h" #include "Herwig/Utilities/Maths.h" using namespace Herwig; using Herwig::Math::ReLi2; MEqq2W2ffPowheg::MEqq2W2ffPowheg() : _gluon(), TR_(0.5), CF_(4./3.), _contrib(1) ,_nlo_alphaS_opt(0), _fixed_alphaS(0.115895), _a(0.5) ,_p(0.7) , _eps(1.0e-8), _scaleopt(1), _fixedScale(100.*GeV), _scaleFact(1.) { massOption(vector<unsigned int>(2,1)); } void MEqq2W2ffPowheg::doinit() { // gluon ParticleData object _gluon = getParticleData(ParticleID::g); MEqq2W2ff::doinit(); } Energy2 MEqq2W2ffPowheg::scale() const { return _scaleopt == 0 ? sqr(_fixedScale) : _scaleFact*sHat(); } void MEqq2W2ffPowheg::persistentOutput(PersistentOStream & os) const { os << _contrib << _nlo_alphaS_opt << _fixed_alphaS << _a << _p << _gluon << _scaleopt << ounit(_fixedScale,GeV) << _scaleFact; } void MEqq2W2ffPowheg::persistentInput(PersistentIStream & is, int) { is >> _contrib >> _nlo_alphaS_opt >> _fixed_alphaS >> _a >> _p >> _gluon >> _scaleopt >> iunit(_fixedScale,GeV) >> _scaleFact; } ClassDescription<MEqq2W2ffPowheg> MEqq2W2ffPowheg::initMEqq2W2ffPowheg; // Definition of the static class description member. void MEqq2W2ffPowheg::Init() { static ClassDocumentation<MEqq2W2ffPowheg> documentation ("The MEqq2W2ffPowheg class implements the matrix element for" "q qbar to Standard Model fermions via W exchange using helicity amplitude" "techniques including the NLO correction in the POWHEG formalism", "The qq$\\to$W$\\to$ff POWHEG matrix element is described in \\cite{Hamilton:2008pd}.", "%\\cite{Hamilton:2008pd}\n" "\\bibitem{Hamilton:2008pd}\n" " K.~Hamilton, P.~Richardson and J.~Tully,\n" " ``A Positive-Weight Next-to-Leading Order Monte Carlo Simulation of Drell-Yan\n" " Vector Boson Production,''\n" " JHEP {\\bf 0810} (2008) 015\n" " [arXiv:0806.0290 [hep-ph]].\n" " %%CITATION = JHEPA,0810,015;%%\n"); static Switch<MEqq2W2ffPowheg,unsigned int> interfaceContribution ("Contribution", "Which contributions to the cross section to include", &MEqq2W2ffPowheg::_contrib, 1, false, false); static SwitchOption interfaceContributionLeadingOrder (interfaceContribution, "LeadingOrder", "Just generate the leading order cross section", 0); static SwitchOption interfaceContributionPositiveNLO (interfaceContribution, "PositiveNLO", "Generate the positive contribution to the full NLO cross section", 1); static SwitchOption interfaceContributionNegativeNLO (interfaceContribution, "NegativeNLO", "Generate the negative contribution to the full NLO cross section", 2); static Switch<MEqq2W2ffPowheg,unsigned int> interfaceNLOalphaSopt ("NLOalphaSopt", "Whether to use a fixed or a running QCD coupling for the NLO weight", &MEqq2W2ffPowheg::_nlo_alphaS_opt, 0, false, false); static SwitchOption interfaceNLOalphaSoptRunningAlphaS (interfaceNLOalphaSopt, "RunningAlphaS", "Use the usual running QCD coupling evaluated at scale scale()", 0); static SwitchOption interfaceNLOalphaSoptFixedAlphaS (interfaceNLOalphaSopt, "FixedAlphaS", "Use a constant QCD coupling for comparison/debugging purposes", 1); static Parameter<MEqq2W2ffPowheg,double> interfaceFixedNLOalphaS ("FixedNLOalphaS", "The value of alphaS to use for the nlo weight if _nlo_alphaS_opt=1", &MEqq2W2ffPowheg::_fixed_alphaS, 0.115895, 0., 1.0, false, false, Interface::limited); static Parameter<MEqq2W2ffPowheg,double> interfaceCorrectionCoefficient ("CorrectionCoefficient", "The magnitude of the correction term to reduce the negative contribution", &MEqq2W2ffPowheg::_a, 0.5, -10., 10.0, false, false, Interface::limited); static Parameter<MEqq2W2ffPowheg,double> interfaceCorrectionPower ("CorrectionPower", "The power of the correction term to reduce the negative contribution", &MEqq2W2ffPowheg::_p, 0.7, 0.0, 1.0, false, false, Interface::limited); static Switch<MEqq2W2ffPowheg,unsigned int> interfaceScaleOption ("ScaleOption", "Option for the scale to be used", &MEqq2W2ffPowheg::_scaleopt, 1, false, false); static SwitchOption interfaceScaleOptionFixed (interfaceScaleOption, "Fixed", "Use a fixed scale", 0); static SwitchOption interfaceScaleOptionsHat (interfaceScaleOption, "Dynamic", "Use the off-shell vector boson mass as the scale", 1); static Parameter<MEqq2W2ffPowheg,Energy> interfaceFixedScale ("FixedScale", "The fixed scale to use if required", &MEqq2W2ffPowheg::_fixedScale, GeV, 100.0*GeV, 10.0*GeV, 1000.0*GeV, false, false, Interface::limited); static Parameter<MEqq2W2ffPowheg,double> interfaceScaleFactor ("ScaleFactor", "The factor used before sHat if using a running scale", &MEqq2W2ffPowheg::_scaleFact, 1.0, 0.0, 10.0, false, false, Interface::limited); } int MEqq2W2ffPowheg::nDim() const { return 3; } bool MEqq2W2ffPowheg::generateKinematics(const double * r) { _xt=*(r+1); _v =*(r+2); return MEqq2W2ff::generateKinematics(r); } CrossSection MEqq2W2ffPowheg::dSigHatDR() const { // Get Born momentum fractions xbar_a and xbar_b: _xb_a = lastX1(); _xb_b = lastX2(); return MEqq2W2ff::dSigHatDR()*NLOweight(); } double MEqq2W2ffPowheg::NLOweight() const { // If only leading order is required return 1: if(_contrib==0) return 1.; useMe(); // Get particle data for QCD particles: _parton_a=mePartonData()[0]; _parton_b=mePartonData()[1]; // get BeamParticleData objects for PDF's _hadron_A=dynamic_ptr_cast<Ptr<BeamParticleData>::transient_const_pointer> (lastParticles().first->dataPtr()); _hadron_B=dynamic_ptr_cast<Ptr<BeamParticleData>::transient_const_pointer> (lastParticles().second->dataPtr()); // If necessary swap the particle data vectors so that _xb_a, // mePartonData[0], beam[0] relate to the inbound quark: if(!(lastPartons().first ->dataPtr()==_parton_a&& lastPartons().second->dataPtr()==_parton_b)) { swap(_xb_a ,_xb_b); swap(_hadron_A,_hadron_B); } // calculate the PDF's for the Born process _oldq = _hadron_A->pdf()->xfx(_hadron_A,_parton_a,scale(),_xb_a)/_xb_a; _oldqbar = _hadron_B->pdf()->xfx(_hadron_B,_parton_b,scale(),_xb_b)/_xb_b; // Calculate alpha_S _alphaS2Pi = _nlo_alphaS_opt==1 ? _fixed_alphaS : SM().alphaS(scale()); _alphaS2Pi /= 2.*Constants::pi; // Calculate the invariant mass of the dilepton pair _mll2 = sHat(); _mu2 = scale(); // Calculate the integrand // q qbar contribution double wqqvirt = Vtilde_qq(); double wqqcollin = Ctilde_qq(x(_xt,1.),1.) + Ctilde_qq(x(_xt,0.),0.); double wqqreal = Ftilde_qq(_xt,_v); double wqq = wqqvirt+wqqcollin+wqqreal; // q g contribution double wqgcollin = Ctilde_qg(x(_xt,0.),0.); double wqgreal = Ftilde_qg(_xt,_v); double wqg = wqgreal+wqgcollin; // g qbar contribution double wgqbarcollin = Ctilde_gq(x(_xt,1.),1.); double wgqbarreal = Ftilde_gq(_xt,_v); double wgqbar = wgqbarreal+wgqbarcollin; // total double wgt = 1.+(wqq+wqg+wgqbar); //trick to try and reduce neg wgt contribution if(_xt<1.-_eps) wgt += _a*(1./pow(1.-_xt,_p)-(1.-pow(_eps,1.-_p))/(1.-_p)/(1.-_eps)); // return the answer - assert(!isinf(wgt)&&!isnan(wgt)); + assert(isfinite(wgt)); return _contrib==1 ? max(0.,wgt) : max(0.,-wgt); } double MEqq2W2ffPowheg::x(double xt, double v) const { double x0(xbar(v)); return x0+(1.-x0)*xt; } double MEqq2W2ffPowheg::x_a(double x, double v) const { if(x==1.) return _xb_a; if(v==0.) return _xb_a; if(v==1.) return _xb_a/x; return (_xb_a/sqrt(x))*sqrt((1.-(1.-x)*(1.-v))/(1.-(1.-x)*v)); } double MEqq2W2ffPowheg::x_b(double x, double v) const { if(x==1.) return _xb_b; if(v==0.) return _xb_b/x; if(v==1.) return _xb_b; return (_xb_b/sqrt(x))*sqrt((1.-(1.-x)*v)/(1.-(1.-x)*(1.-v))); } double MEqq2W2ffPowheg::xbar(double v) const { double xba2(sqr(_xb_a)), xbb2(sqr(_xb_b)), omv(-999.); double xbar1(-999.), xbar2(-999.); if(v==1.) return _xb_a; if(v==0.) return _xb_b; omv = 1.-v; xbar1=4.* v*xba2/ (sqrt(sqr(1.+xba2)*4.*sqr(omv)+16.*(1.-2.*omv)*xba2)+2.*omv*(1.-_xb_a)*(1.+_xb_a)); xbar2=4.*omv*xbb2/ (sqrt(sqr(1.+xbb2)*4.*sqr( v)+16.*(1.-2.* v)*xbb2)+2.* v*(1.-_xb_b)*(1.+_xb_b)); return max(xbar1,xbar2); } double MEqq2W2ffPowheg::Ltilde_qq(double x, double v) const { if(x==1.) return 1.; double xa(x_a(x,v)),xb(x_b(x,v)); double newq = (_hadron_A->pdf()->xfx(_hadron_A,_parton_a,scale(), xa)/ xa); double newqbar = (_hadron_B->pdf()->xfx(_hadron_B,_parton_b,scale(), xb)/ xb); return( newq * newqbar / _oldq / _oldqbar ); } double MEqq2W2ffPowheg::Ltilde_qg(double x, double v) const { double xa(x_a(x,v)),xb(x_b(x,v)); double newq = (_hadron_A->pdf()->xfx(_hadron_A,_parton_a,scale(), xa)/ xa); double newg2 = (_hadron_B->pdf()->xfx(_hadron_B,_gluon ,scale(), xb)/ xb); return( newq * newg2 / _oldq / _oldqbar ); } double MEqq2W2ffPowheg::Ltilde_gq(double x, double v) const { double xa(x_a(x,v)),xb(x_b(x,v)); double newg1 = (_hadron_A->pdf()->xfx(_hadron_A,_gluon ,scale(), xa)/ xa); double newqbar = (_hadron_B->pdf()->xfx(_hadron_B,_parton_b,scale(), xb)/ xb); return( newg1 * newqbar / _oldq / _oldqbar ); } double MEqq2W2ffPowheg::Vtilde_qq() const { return _alphaS2Pi*CF_*(-3.*log(_mu2/_mll2)+(2.*sqr(Constants::pi)/3.)-8.); } double MEqq2W2ffPowheg::Ccalbar_qg(double x) const { return (sqr(x)+sqr(1.-x))*(log(_mll2/(_mu2*x))+2.*log(1.-x))+2.*x*(1.-x); } double MEqq2W2ffPowheg::Ctilde_qg(double x, double v) const { return _alphaS2Pi*TR_ * ((1.-xbar(v))/x) * Ccalbar_qg(x)*Ltilde_qg(x,v); } double MEqq2W2ffPowheg::Ctilde_gq(double x, double v) const { return _alphaS2Pi*TR_ * ((1.-xbar(v))/x) * Ccalbar_qg(x)*Ltilde_gq(x,v); } double MEqq2W2ffPowheg::Ctilde_qq(double x, double v) const { double wgt = ((1.-x)/x+(1.+x*x)/(1.-x)/x*(2.*log(1.-x)-log(x)))*Ltilde_qq(x,v) - 4.*log(1.-x)/(1.-x) + 2./(1.-xbar(v))*log(1.-xbar(v))*log(1.-xbar(v)) + (2./(1.-xbar(v))*log(1.-xbar(v))-2./(1.-x)+(1.+x*x)/x/(1.-x)*Ltilde_qq(x,v)) *log(_mll2/_mu2); return _alphaS2Pi*CF_*(1.-xbar(v))*wgt; } double MEqq2W2ffPowheg::Fcal_qq(double x, double v) const { return (sqr(1.-x)*(1.-2.*v*(1.-v))+2.*x)/x*Ltilde_qq(x,v); } double MEqq2W2ffPowheg::Fcal_qg(double x, double v) const { return ((1.-xbar(v))/x)* (2.*x*(1.-x)*v+sqr((1.-x)*v)+sqr(x)+sqr(1.-x))*Ltilde_qg(x,v); } double MEqq2W2ffPowheg::Fcal_gq(double x, double v) const { return ((1.-xbar(v))/x)* (2.*x*(1.-x)*(1.-v)+sqr((1.-x)*(1.-v))+sqr(x)+sqr(1.-x))*Ltilde_gq(x,v); } double MEqq2W2ffPowheg::Ftilde_qg(double xt, double v) const { return _alphaS2Pi*TR_* ( Fcal_qg(x(xt,v),v) - Fcal_qg(x(xt,0.),0.) )/v; } double MEqq2W2ffPowheg::Ftilde_gq(double xt, double v) const { return _alphaS2Pi*TR_* ( Fcal_gq(x(xt,v),v) - Fcal_gq(x(xt,1.),1.) )/(1.-v); } double MEqq2W2ffPowheg::Ftilde_qq(double xt, double v) const { double eps(1e-10); // is emission into regular or singular region? if(xt>=0. && xt<1.-eps && v>eps && v<1.-eps) { // x<1, v>0, v<1 (regular emission, neither soft or collinear): return _alphaS2Pi*CF_* (( ( Fcal_qq(x(xt, v), v) - Fcal_qq(x(xt,1.),1.) ) / (1.-v)+ ( Fcal_qq(x(xt, v), v) - Fcal_qq(x(xt,0.),0.) ) / v )/(1.-xt) + ( log(1.-xbar(v)) - log(1.-_xb_a))*2./(1.-v) + ( log(1.-xbar(v)) - log(1.-_xb_b))*2./v); } else { // make sure emission is actually in the allowed phase space: if(!(v>=0. && v<=1. && xt>=0. && xt<=1.)) { ostringstream s; s << "MEqq2W2ffPowheg::Ftilde_qq : \n" << "xt(" << xt << ") and / or v(" << v << ") not in the phase space."; generator()->logWarning(Exception(s.str(),Exception::warning)); return 0.; } // is emission soft singular? if(xt>=1.-eps) { // x=1: if(v<=eps) { // x==1, v=0 (soft and collinear with particle b): return _alphaS2Pi*CF_* ( ( log(1.-xbar(v)) - log(1.-_xb_a))*2./(1.-v) ); } else if(v>=1.-eps) { // x==1, v=1 (soft and collinear with particle a): return _alphaS2Pi*CF_* ( ( log(1.-xbar(v)) - log(1.-_xb_b))*2./v ); } else { // x==1, 0<v<1 (soft wide angle emission): return _alphaS2Pi*CF_* ( ( log(1.-xbar(v)) - log(1.-_xb_a))*2./(1.-v) + ( log(1.-xbar(v)) - log(1.-_xb_b))*2./v ); } } else { // x<1: if(v<=eps) { // x<1 but v=0 (collinear with particle b, but not soft): return _alphaS2Pi*CF_* ( ( ( Fcal_qq(x(xt, v), v) - Fcal_qq(x(xt,1.),1.) ) / (1.-v) )/(1.-xt) + ( log(1.-xbar(v)) - log(1.-_xb_a))*2./(1.-v) ); } else if(v>=1.-eps) { // x<1 but v=1 (collinear with particle a, but not soft): return _alphaS2Pi*CF_* ( ( ( Fcal_qq(x(xt, v), v) - Fcal_qq(x(xt,0.),0.) ) / v )/(1.-xt) + ( log(1.-xbar(v)) - log(1.-_xb_b))*2./v ); } } } return 0.; } diff --git a/MatrixElement/Powheg/MEqq2gZ2ffPowheg.cc b/MatrixElement/Powheg/MEqq2gZ2ffPowheg.cc --- a/MatrixElement/Powheg/MEqq2gZ2ffPowheg.cc +++ b/MatrixElement/Powheg/MEqq2gZ2ffPowheg.cc @@ -1,394 +1,394 @@ // -*- C++ -*- // // MEqq2gZ2ffPowheg.cc is a part of Herwig - A multi-purpose Monte Carlo event generator // Copyright (C) 2002-2011 The Herwig Collaboration // // Herwig is licenced under version 2 of the GPL, see COPYING for details. // Please respect the MCnet academic guidelines, see GUIDELINES for details. // // // This is the implementation of the non-inlined, non-templated member // functions of the MEqq2gZ2ffPowheg class. // #include "MEqq2gZ2ffPowheg.h" #include "ThePEG/Interface/ClassDocumentation.h" #include "ThePEG/Interface/Switch.h" #include "ThePEG/Interface/Parameter.h" #include "ThePEG/Persistency/PersistentOStream.h" #include "ThePEG/Persistency/PersistentIStream.h" #include "ThePEG/Handlers/StandardXComb.h" #include "ThePEG/PDT/EnumParticles.h" #include "Herwig/Models/StandardModel/StandardModel.h" #include "ThePEG/Repository/EventGenerator.h" #include "Herwig/Utilities/Maths.h" using namespace Herwig; using Herwig::Math::ReLi2; MEqq2gZ2ffPowheg::MEqq2gZ2ffPowheg() : _gluon(), TR_(0.5), CF_(4./3.), _contrib(1) ,_nlo_alphaS_opt(0), _fixed_alphaS(0.115895), _a(0.5) ,_p(0.7) , _eps(1.0e-8), _scaleopt(1), _fixedScale(100.*GeV), _scaleFact(1.) { massOption(vector<unsigned int>(2,1)); } void MEqq2gZ2ffPowheg::doinit() { // gluon ParticleData object _gluon = getParticleData(ParticleID::g); MEqq2gZ2ff::doinit(); } Energy2 MEqq2gZ2ffPowheg::scale() const { return _scaleopt == 0 ? sqr(_fixedScale) : _scaleFact*sHat(); } void MEqq2gZ2ffPowheg::persistentOutput(PersistentOStream & os) const { os << _contrib << _nlo_alphaS_opt << _fixed_alphaS << _a << _p << _gluon << _scaleopt << ounit(_fixedScale,GeV) << _scaleFact; } void MEqq2gZ2ffPowheg::persistentInput(PersistentIStream & is, int) { is >> _contrib >> _nlo_alphaS_opt >> _fixed_alphaS >> _a >> _p >> _gluon >> _scaleopt >> iunit(_fixedScale,GeV) >> _scaleFact; } ClassDescription<MEqq2gZ2ffPowheg> MEqq2gZ2ffPowheg::initMEqq2gZ2ffPowheg; // Definition of the static class description member. void MEqq2gZ2ffPowheg::Init() { static ClassDocumentation<MEqq2gZ2ffPowheg> documentation ("The MEqq2gZ2ffPowheg class implements the matrix element for" "q qbar to Standard Model fermions via Z and photon exchange using" " helicity amplitude techniques including the NLO correction in" " the POWHEG formalism", "The qq$\\to\\gamma/Z\\to$ff POWHEG matrix element is described in \\cite{Hamilton:2008pd}.", "%\\cite{Hamilton:2008pd}\n" "\\bibitem{Hamilton:2008pd}\n" " K.~Hamilton, P.~Richardson and J.~Tully,\n" " %``A Positive-Weight Next-to-Leading Order Monte Carlo Simulation of Drell-Yan\n" " %Vector Boson Production,''\n" " JHEP {\\bf 0810} (2008) 015\n" " [arXiv:0806.0290 [hep-ph]].\n" " %%CITATION = JHEPA,0810,015;%%\n" ); static Switch<MEqq2gZ2ffPowheg,unsigned int> interfaceContribution ("Contribution", "Which contributions to the cross section to include", &MEqq2gZ2ffPowheg::_contrib, 1, false, false); static SwitchOption interfaceContributionLeadingOrder (interfaceContribution, "LeadingOrder", "Just generate the leading order cross section", 0); static SwitchOption interfaceContributionPositiveNLO (interfaceContribution, "PositiveNLO", "Generate the positive contribution to the full NLO cross section", 1); static SwitchOption interfaceContributionNegativeNLO (interfaceContribution, "NegativeNLO", "Generate the negative contribution to the full NLO cross section", 2); static Switch<MEqq2gZ2ffPowheg,unsigned int> interfaceNLOalphaSopt ("NLOalphaSopt", "Whether to use a fixed or a running QCD coupling for the NLO weight", &MEqq2gZ2ffPowheg::_nlo_alphaS_opt, 0, false, false); static SwitchOption interfaceNLOalphaSoptRunningAlphaS (interfaceNLOalphaSopt, "RunningAlphaS", "Use the usual running QCD coupling evaluated at scale scale()", 0); static SwitchOption interfaceNLOalphaSoptFixedAlphaS (interfaceNLOalphaSopt, "FixedAlphaS", "Use a constant QCD coupling for comparison/debugging purposes", 1); static Parameter<MEqq2gZ2ffPowheg,double> interfaceFixedNLOalphaS ("FixedNLOalphaS", "The value of alphaS to use for the nlo weight if _nlo_alphaS_opt=1", &MEqq2gZ2ffPowheg::_fixed_alphaS, 0.115895, 0., 1.0, false, false, Interface::limited); static Parameter<MEqq2gZ2ffPowheg,double> interfaceCorrectionCoefficient ("CorrectionCoefficient", "The magnitude of the correction term to reduce the negative contribution", &MEqq2gZ2ffPowheg::_a, 0.5, -10., 10.0, false, false, Interface::limited); static Parameter<MEqq2gZ2ffPowheg,double> interfaceCorrectionPower ("CorrectionPower", "The power of the correction term to reduce the negative contribution", &MEqq2gZ2ffPowheg::_p, 0.7, 0.0, 1.0, false, false, Interface::limited); static Switch<MEqq2gZ2ffPowheg,unsigned int> interfaceScaleOption ("ScaleOption", "Option for the scale to be used", &MEqq2gZ2ffPowheg::_scaleopt, 1, false, false); static SwitchOption interfaceScaleOptionFixed (interfaceScaleOption, "Fixed", "Use a fixed scale", 0); static SwitchOption interfaceScaleOptionsHat (interfaceScaleOption, "Dynamic", "Use the off-shell vector boson mass as the scale", 1); static Parameter<MEqq2gZ2ffPowheg,Energy> interfaceFixedScale ("FixedScale", "The fixed scale to use if required", &MEqq2gZ2ffPowheg::_fixedScale, GeV, 100.0*GeV, 10.0*GeV, 1000.0*GeV, false, false, Interface::limited); static Parameter<MEqq2gZ2ffPowheg,double> interfaceScaleFactor ("ScaleFactor", "The factor used before sHat if using a running scale", &MEqq2gZ2ffPowheg::_scaleFact, 1.0, 0.0, 10.0, false, false, Interface::limited); } int MEqq2gZ2ffPowheg::nDim() const { return HwMEBase::nDim() + ( _contrib>=1 ? 2 : 0 ); } bool MEqq2gZ2ffPowheg::generateKinematics(const double * r) { if(_contrib>=1) { _xt=*(r+1); _v =*(r+2); } return MEqq2gZ2ff::generateKinematics(r); } CrossSection MEqq2gZ2ffPowheg::dSigHatDR() const { // Get Born momentum fractions xbar_a and xbar_b: _xb_a = lastX1(); _xb_b = lastX2(); return MEqq2gZ2ff::dSigHatDR()*NLOweight(); } double MEqq2gZ2ffPowheg::NLOweight() const { // If only leading order is required return 1: if(_contrib==0) return 1.; useMe(); // Get particle data for QCD particles: _parton_a=mePartonData()[0]; _parton_b=mePartonData()[1]; // get BeamParticleData objects for PDF's _hadron_A=dynamic_ptr_cast<Ptr<BeamParticleData>::transient_const_pointer> (lastParticles().first->dataPtr()); _hadron_B=dynamic_ptr_cast<Ptr<BeamParticleData>::transient_const_pointer> (lastParticles().second->dataPtr()); // If necessary swap the particle data vectors so that _xb_a, // mePartonData[0], beam[0] relate to the inbound quark: if(!(lastPartons().first ->dataPtr()==_parton_a&& lastPartons().second->dataPtr()==_parton_b)) { swap(_xb_a ,_xb_b); swap(_hadron_A,_hadron_B); } // calculate the PDF's for the Born process _oldq = _hadron_A->pdf()->xfx(_hadron_A,_parton_a,scale(),_xb_a)/_xb_a; _oldqbar = _hadron_B->pdf()->xfx(_hadron_B,_parton_b,scale(),_xb_b)/_xb_b; // Calculate alpha_S _alphaS2Pi = _nlo_alphaS_opt==1 ? _fixed_alphaS : SM().alphaS(scale()); _alphaS2Pi /= 2.*Constants::pi; // Calculate the invariant mass of the dilepton pair _mll2 = sHat(); _mu2 = scale(); // Calculate the integrand // q qbar contribution double wqqvirt = Vtilde_qq(); double wqqcollin = Ctilde_qq(x(_xt,1.),1.) + Ctilde_qq(x(_xt,0.),0.); double wqqreal = Ftilde_qq(_xt,_v); double wqq = wqqvirt+wqqcollin+wqqreal; // q g contribution double wqgcollin = Ctilde_qg(x(_xt,0.),0.); double wqgreal = Ftilde_qg(_xt,_v); double wqg = wqgreal+wqgcollin; // g qbar contribution double wgqbarcollin = Ctilde_gq(x(_xt,1.),1.); double wgqbarreal = Ftilde_gq(_xt,_v); double wgqbar = wgqbarreal+wgqbarcollin; // total double wgt = 1.+(wqq+wqg+wgqbar); //trick to try and reduce neg wgt contribution if(_xt<1.-_eps) wgt += _a*(1./pow(1.-_xt,_p)-(1.-pow(_eps,1.-_p))/(1.-_p)/(1.-_eps)); - assert(!isinf(wgt)&&!isnan(wgt)); + assert(isfinite(wgt)); // return the answer return _contrib==1 ? max(0.,wgt) : max(0.,-wgt); } double MEqq2gZ2ffPowheg::x(double xt, double v) const { double x0(xbar(v)); return x0+(1.-x0)*xt; } double MEqq2gZ2ffPowheg::x_a(double x, double v) const { if(x==1.) return _xb_a; if(v==0.) return _xb_a; if(v==1.) return _xb_a/x; return (_xb_a/sqrt(x))*sqrt((1.-(1.-x)*(1.-v))/(1.-(1.-x)*v)); } double MEqq2gZ2ffPowheg::x_b(double x, double v) const { if(x==1.) return _xb_b; if(v==0.) return _xb_b/x; if(v==1.) return _xb_b; return (_xb_b/sqrt(x))*sqrt((1.-(1.-x)*v)/(1.-(1.-x)*(1.-v))); } double MEqq2gZ2ffPowheg::xbar(double v) const { double xba2(sqr(_xb_a)), xbb2(sqr(_xb_b)), omv(-999.); double xbar1(-999.), xbar2(-999.); if(v==1.) return _xb_a; if(v==0.) return _xb_b; omv = 1.-v; xbar1=4.* v*xba2/ (sqrt(sqr(1.+xba2)*4.*sqr(omv)+16.*(1.-2.*omv)*xba2)+2.*omv*(1.-_xb_a)*(1.+_xb_a)); xbar2=4.*omv*xbb2/ (sqrt(sqr(1.+xbb2)*4.*sqr( v)+16.*(1.-2.* v)*xbb2)+2.* v*(1.-_xb_b)*(1.+_xb_b)); return max(xbar1,xbar2); } double MEqq2gZ2ffPowheg::Ltilde_qq(double x, double v) const { if(x==1.) return 1.; double xa(x_a(x,v)),xb(x_b(x,v)); double newq = (_hadron_A->pdf()->xfx(_hadron_A,_parton_a,scale(), xa)/ xa); double newqbar = (_hadron_B->pdf()->xfx(_hadron_B,_parton_b,scale(), xb)/ xb); return( newq * newqbar / _oldq / _oldqbar ); } double MEqq2gZ2ffPowheg::Ltilde_qg(double x, double v) const { double xa(x_a(x,v)),xb(x_b(x,v)); double newq = (_hadron_A->pdf()->xfx(_hadron_A,_parton_a,scale(), xa)/ xa); double newg2 = (_hadron_B->pdf()->xfx(_hadron_B,_gluon ,scale(), xb)/ xb); return( newq * newg2 / _oldq / _oldqbar ); } double MEqq2gZ2ffPowheg::Ltilde_gq(double x, double v) const { double xa(x_a(x,v)),xb(x_b(x,v)); double newg1 = (_hadron_A->pdf()->xfx(_hadron_A,_gluon ,scale(), xa)/ xa); double newqbar = (_hadron_B->pdf()->xfx(_hadron_B,_parton_b,scale(), xb)/ xb); return( newg1 * newqbar / _oldq / _oldqbar ); } double MEqq2gZ2ffPowheg::Vtilde_qq() const { return _alphaS2Pi*CF_*(-3.*log(_mu2/_mll2)+(2.*sqr(Constants::pi)/3.)-8.); } double MEqq2gZ2ffPowheg::Ccalbar_qg(double x) const { return (sqr(x)+sqr(1.-x))*(log(_mll2/(_mu2*x))+2.*log(1.-x))+2.*x*(1.-x); } double MEqq2gZ2ffPowheg::Ctilde_qg(double x, double v) const { return _alphaS2Pi*TR_ * ((1.-xbar(v))/x) * Ccalbar_qg(x)*Ltilde_qg(x,v); } double MEqq2gZ2ffPowheg::Ctilde_gq(double x, double v) const { return _alphaS2Pi*TR_ * ((1.-xbar(v))/x) * Ccalbar_qg(x)*Ltilde_gq(x,v); } double MEqq2gZ2ffPowheg::Ctilde_qq(double x, double v) const { double wgt = ((1.-x)/x+(1.+x*x)/(1.-x)/x*(2.*log(1.-x)-log(x)))*Ltilde_qq(x,v) - 4.*log(1.-x)/(1.-x) + 2./(1.-xbar(v))*log(1.-xbar(v))*log(1.-xbar(v)) + (2./(1.-xbar(v))*log(1.-xbar(v))-2./(1.-x)+(1.+x*x)/x/(1.-x)*Ltilde_qq(x,v)) *log(_mll2/_mu2); return _alphaS2Pi*CF_*(1.-xbar(v))*wgt; } double MEqq2gZ2ffPowheg::Fcal_qq(double x, double v) const { return (sqr(1.-x)*(1.-2.*v*(1.-v))+2.*x)/x*Ltilde_qq(x,v); } double MEqq2gZ2ffPowheg::Fcal_qg(double x, double v) const { return ((1.-xbar(v))/x)* (2.*x*(1.-x)*v+sqr((1.-x)*v)+sqr(x)+sqr(1.-x))*Ltilde_qg(x,v); } double MEqq2gZ2ffPowheg::Fcal_gq(double x, double v) const { return ((1.-xbar(v))/x)* (2.*x*(1.-x)*(1.-v)+sqr((1.-x)*(1.-v))+sqr(x)+sqr(1.-x))*Ltilde_gq(x,v); } double MEqq2gZ2ffPowheg::Ftilde_qg(double xt, double v) const { return _alphaS2Pi*TR_* ( Fcal_qg(x(xt,v),v) - Fcal_qg(x(xt,0.),0.) )/v; } double MEqq2gZ2ffPowheg::Ftilde_gq(double xt, double v) const { return _alphaS2Pi*TR_* ( Fcal_gq(x(xt,v),v) - Fcal_gq(x(xt,1.),1.) )/(1.-v); } double MEqq2gZ2ffPowheg::Ftilde_qq(double xt, double v) const { double eps(1e-10); // is emission into regular or singular region? if(xt>=0. && xt<1.-eps && v>eps && v<1.-eps) { // x<1, v>0, v<1 (regular emission, neither soft or collinear): return _alphaS2Pi*CF_* (( ( Fcal_qq(x(xt, v), v) - Fcal_qq(x(xt,1.),1.) ) / (1.-v)+ ( Fcal_qq(x(xt, v), v) - Fcal_qq(x(xt,0.),0.) ) / v )/(1.-xt) + ( log(1.-xbar(v)) - log(1.-_xb_a))*2./(1.-v) + ( log(1.-xbar(v)) - log(1.-_xb_b))*2./v); } else { // make sure emission is actually in the allowed phase space: if(!(v>=0. && v<=1. && xt>=0. && xt<=1.)) { ostringstream s; s << "MEqq2gZ2ffPowheg::Ftilde_qq : \n" << "xt(" << xt << ") and / or v(" << v << ") not in the phase space."; generator()->logWarning(Exception(s.str(),Exception::warning)); return 0.; } // is emission soft singular? if(xt>=1.-eps) { // x=1: if(v<=eps) { // x==1, v=0 (soft and collinear with particle b): return _alphaS2Pi*CF_* ( ( log(1.-xbar(v)) - log(1.-_xb_a))*2./(1.-v) ); } else if(v>=1.-eps) { // x==1, v=1 (soft and collinear with particle a): return _alphaS2Pi*CF_* ( ( log(1.-xbar(v)) - log(1.-_xb_b))*2./v ); } else { // x==1, 0<v<1 (soft wide angle emission): return _alphaS2Pi*CF_* ( ( log(1.-xbar(v)) - log(1.-_xb_a))*2./(1.-v) + ( log(1.-xbar(v)) - log(1.-_xb_b))*2./v ); } } else { // x<1: if(v<=eps) { // x<1 but v=0 (collinear with particle b, but not soft): return _alphaS2Pi*CF_* ( ( ( Fcal_qq(x(xt, v), v) - Fcal_qq(x(xt,1.),1.) ) / (1.-v) )/(1.-xt) + ( log(1.-xbar(v)) - log(1.-_xb_a))*2./(1.-v) ); } else if(v>=1.-eps) { // x<1 but v=1 (collinear with particle a, but not soft): return _alphaS2Pi*CF_* ( ( ( Fcal_qq(x(xt, v), v) - Fcal_qq(x(xt,0.),0.) ) / v )/(1.-xt) + ( log(1.-xbar(v)) - log(1.-_xb_b))*2./v ); } } } return 0.; } diff --git a/PDF/MRST.cc b/PDF/MRST.cc --- a/PDF/MRST.cc +++ b/PDF/MRST.cc @@ -1,836 +1,836 @@ // -*- C++ -*- // // MRST.cc is a part of Herwig - A multi-purpose Monte Carlo event generator // Copyright (C) 2002-2011 The Herwig Collaboration // // Herwig is licenced under version 2 of the GPL, see COPYING for details. // Please respect the MCnet academic guidelines, see GUIDELINES for details. // #include "MRST.h" #include <ThePEG/PDT/ParticleData.h> #include <ThePEG/PDT/EnumParticles.h> #include <ThePEG/Persistency/PersistentOStream.h> #include <ThePEG/Persistency/PersistentIStream.h> #include <ThePEG/Repository/EventGenerator.h> #include <ThePEG/Interface/ClassDocumentation.h> #include <ThePEG/Interface/Parameter.h> #include <ThePEG/Interface/Switch.h> #include <istream> #include <iostream> #include <sstream> #include <string> using namespace ThePEG; using namespace Herwig; /** * Minimum value of \f$x\f$ */ const double MRST::xmin=1E-5; /** * Maximum value of \f$x\f$ */ const double MRST::xmax=1.0; /** * Minimum value of \f$q^2\f$. */ const Energy2 MRST::qsqmin = 1.25 * GeV2; /** * Maximum value of \f$q^2\f$. */ const Energy2 MRST::qsqmax = 1E7 * GeV2; /** * Mass squared of the charm quark */ const Energy2 MRST::mc2 = 2.045 * GeV2; /** * Mass squared of the bottom quark */ const Energy2 MRST::mb2 = 18.5 * GeV2; ClassDescription<MRST> MRST::initMRST; MRST::MRST() : _inter(2), _xswitch(0.9), data(np+1,vector<vector<double> > (nx+1,vector<double> (nq+1,0.0))), fdata(np+1,vector<vector<double> > (nx+1,vector<double> (nq+1,0.0))) { if ( ! initialized ) { for ( int jj=1; jj < ntenth; ++jj ) { lxxb[jj] = log10(xx[jj]/xx[ntenth]) + xx[ntenth]; } lxxb[ntenth] = xx[ntenth]; for ( int n=1; n<=nx; n++ ) lxx[n] = log10(xx[n]); for ( int n=1; n<=nq; n++ ) lqq[n] = log10(qq[n]); initialized = true; } } bool MRST::canHandleParticle(tcPDPtr particle) const { // Return true if this PDF can handle the extraction of parton from the // given particle ie. if the particle is a proton or neutron. return ( abs(particle->id()) == ParticleID::pplus || abs(particle->id()) == ParticleID::n0 ); } cPDVector MRST::partons(tcPDPtr p) const { // Return the parton types which are described by these parton // densities. cPDVector ret; if ( canHandleParticle(p) ) { ret.push_back(getParticleData(ParticleID::g)); for ( int i = 1; i <= 5; ++i ) { ret.push_back(getParticleData(i)); ret.push_back(getParticleData(-i)); } } return ret; } double MRST::xfx(tcPDPtr particle, tcPDPtr parton, Energy2 partonScale, double x, double, Energy2) const { return pdfValue(x, partonScale, particle, parton,Total); } double MRST::xfvx(tcPDPtr particle, tcPDPtr parton, Energy2 partonScale, double x, double, Energy2) const { return pdfValue(x, partonScale, particle, parton,Valence); } double MRST::xfsx(tcPDPtr particle, tcPDPtr parton, Energy2 partonScale, double x, double, Energy2) const { return pdfValue(x, partonScale, particle, parton,Sea); } double MRST::pdfValue(double x, Energy2 q2, tcPDPtr particle, tcPDPtr parton, PDFType type) const { - assert(!isnan(x) && !isinf(x)); + assert(isfinite(x)); // reset x to min or max if outside range if(x<xmin) x=xmin; else if(x>xmax) x=xmax; // reset q2 to min or max if outside range if(q2<qsqmin) q2=qsqmin; else if(q2>qsqmax) q2=qsqmax; // c++ interpolation double output(0.); if(_inter==0||(_inter==2&&x<_xswitch)) { // interpolation is in logx, log qsq: double xxx=log10(x); double qsq=log10(q2/GeV2); // bin position int n=locate(lxx,nx,xxx); int m=locate(lqq,nq,qsq); // fraction along the bin double t=(xxx-lxx[n])/(lxx[n+1]-lxx[n]); double u=(qsq-lqq[m])/(lqq[m+1]-lqq[m]); bool anti = particle->id() < 0; bool neutron = abs(particle->id()) == ParticleID::n0; if (type==Valence) { switch(parton->id()) { case ParticleID::u: output= (neutron? (anti? 0.0: lookup(dnValence,n,m,u,t)): (anti? 0.0: lookup(upValence,n,m,u,t))); break; case ParticleID::ubar: output= (neutron? (anti? lookup(dnValence,n,m,u,t): 0.0): (anti? lookup(upValence,n,m,u,t): 0.0)); break; case ParticleID::d: output= (neutron? (anti? 0.0: lookup(upValence,n,m,u,t)): (anti? 0.0: lookup(dnValence,n,m,u,t))); break; case ParticleID::dbar: output= (neutron? (anti? lookup(upValence,n,m,u,t): 0.0): (anti? lookup(dnValence,n,m,u,t): 0.0)); break; } } else if(type==Sea) { switch(parton->id()) { case ParticleID::b: case ParticleID::bbar: output= lookup(bot,n,m,u,t); break; case ParticleID::c: case ParticleID::cbar: output= lookup(chm,n,m,u,t); break; case ParticleID::s: case ParticleID::sbar: output= lookup(str,n,m,u,t); break; case ParticleID::u: case ParticleID::ubar: output= (neutron? lookup(dnSea,n,m,u,t) : lookup(upSea,n,m,u,t)); break; case ParticleID::d: case ParticleID::dbar: output= (neutron? lookup(upSea,n,m,u,t) : lookup(dnSea,n,m,u,t)); break; case ParticleID::g: output= lookup(glu,n,m,u,t); break; } } else if(type==Total) { switch(parton->id()) { case ParticleID::b: case ParticleID::bbar: output= lookup(bot,n,m,u,t); break; case ParticleID::c: case ParticleID::cbar: output= lookup(chm,n,m,u,t); break; case ParticleID::s: case ParticleID::sbar: output= lookup(str,n,m,u,t); break; case ParticleID::u: output= (neutron? (lookup(dnSea,n,m,u,t) + (anti? 0.0: lookup(dnValence,n,m,u,t))) : (lookup(upSea,n,m,u,t) + (anti? 0.0: lookup(upValence,n,m,u,t)))); break; case ParticleID::ubar: output= (neutron? (lookup(dnSea,n,m,u,t) + (anti? lookup(dnValence,n,m,u,t): 0.0)) : (lookup(upSea,n,m,u,t) + (anti? lookup(upValence,n,m,u,t): 0.0))); break; case ParticleID::d: output= (neutron? (lookup(upSea,n,m,u,t) + (anti? 0.0: lookup(upValence,n,m,u,t))) : (lookup(dnSea,n,m,u,t) + (anti? 0.0: lookup(dnValence,n,m,u,t)))); break; case ParticleID::dbar: output= (neutron? (lookup(upSea,n,m,u,t) + (anti? lookup(upValence,n,m,u,t): 0.0)) : (lookup(dnSea,n,m,u,t) + (anti? lookup(dnValence,n,m,u,t): 0.0))); break; case ParticleID::g: output= lookup(glu,n,m,u,t); break; } } } else { double xxx=x; if(x<lxxb[ntenth]) xxx = log10(x/lxxb[ntenth])+lxxb[ntenth]; int nn=0; do ++nn; while(xxx>lxxb[nn+1]); double a=(xxx-lxxb[nn])/(lxxb[nn+1]-lxxb[nn]); double qsq=q2/GeV2; int mm=0; do ++mm; while(qsq>qq[mm+1]); double b=(qsq-qq[mm])/(qq[mm+1]-qq[mm]); double g[np+1]; for(int ii=1;ii<=np;++ii) { g[ii]= (1.-a)*(1.-b)*fdata[ii][nn ][mm] + (1.-a)*b*fdata[ii][nn ][mm+1] + a*(1.-b)*fdata[ii][nn+1][mm] + a*b*fdata[ii][nn+1][mm+1]; if(nn<ntenth&&!(ii==5||ii==7)) { double fac=(1.-b)*fdata[ii][ntenth][mm]+b*fdata[ii][ntenth][mm+1]; g[ii] = fac*pow(10.,g[ii]-fac); } g[ii] *= pow(1.-x,n0[ii]); } bool anti = particle->id() < 0; bool neutron = abs(particle->id()) == ParticleID::n0; if (type==Valence) { switch(parton->id()) { case ParticleID::u: output= (neutron? (anti? 0.0: g[2]): (anti? 0.0: g[1])); break; case ParticleID::ubar: output= (neutron? (anti? g[2]: 0.0): (anti? g[1]: 0.0)); break; case ParticleID::d: output= (neutron? (anti? 0.0: g[1]): (anti? 0.0: g[2])); break; case ParticleID::dbar: output= (neutron? (anti? g[1]: 0.0): (anti? g[2]: 0.0)); break; } } else if(type==Sea) { switch(parton->id()) { case ParticleID::b: case ParticleID::bbar: output= g[7]; break; case ParticleID::c: case ParticleID::cbar: output= g[5]; break; case ParticleID::s: case ParticleID::sbar: output= g[6]; break; case ParticleID::u: case ParticleID::ubar: output= (neutron ? g[8] : g[4] ); break; case ParticleID::d: case ParticleID::dbar: output= (neutron? g[4] : g[8] ); break; case ParticleID::g: output= g[3]; break; } } else if(type==Total) { switch(parton->id()) { case ParticleID::b: case ParticleID::bbar: output= g[7]; break; case ParticleID::c: case ParticleID::cbar: output= g[5]; break; case ParticleID::s: case ParticleID::sbar: output= g[6]; break; case ParticleID::u: output= (neutron? (g[8] + (anti? 0.0: g[2])) : (g[4] + (anti? 0.0: g[1]))); break; case ParticleID::ubar: output= (neutron? (g[8] + (anti? g[2]: 0.0)) : (g[4] + (anti? g[1]: 0.0))); break; case ParticleID::d: output= (neutron? (g[4] + (anti? 0.0: g[1])) : (g[8] + (anti? 0.0: g[2]))); break; case ParticleID::dbar: output= (neutron? (g[4] + (anti? g[1]: 0.0)) : (g[8] + (anti? g[2]: 0.0))); break; case ParticleID::g: output= g[3]; break; } } } output = max(output,0.); - assert(!isnan(output)); + assert(!std::isnan(output)); return output; } void MRST::persistentOutput(PersistentOStream &out) const { out << _file << data << fdata << _inter << _xswitch; } void MRST::persistentInput(PersistentIStream & in, int) { in >> _file >> data >> fdata >> _inter >> _xswitch; initialize(false); } void MRST::Init() { static ClassDocumentation<MRST> documentation ("Implementation of the MRST PDFs", "Implementation of the MRST LO* / LO** PDFs \\cite{Sherstnev:2007nd}.", " %\\cite{Sherstnev:2007nd}\n" "\\bibitem{Sherstnev:2007nd}\n" " A.~Sherstnev and R.~S.~Thorne,\n" " ``Parton Distributions for LO Generators,''\n" " Eur.\\ Phys.\\ J.\\ C {\\bf 55} (2008) 553\n" " [arXiv:0711.2473 [hep-ph]].\n" " %%CITATION = EPHJA,C55,553;%%\n" ); static Switch<MRST,unsigned int> interfaceInterpolation ("Interpolation", "Whether to use cubic or linear (C++ or FORTRAN) interpolation", &MRST::_inter, 2, false, false); static SwitchOption interfaceInterpolationCubic (interfaceInterpolation, "Cubic", "Use cubic interpolation", 0); static SwitchOption interfaceInterpolationLinear (interfaceInterpolation, "Linear", "Use Linear Interpolation", 1);; static SwitchOption interfaceInterpolationMixed (interfaceInterpolation, "Mixed", "Use cubic below xswitch and linear interpolation above", 2); static Parameter<MRST,double> interfaceXSwitch ("XSwitch", "Value of x to switch from cubic to linear interpolation", &MRST::_xswitch, 0.9, 0.0, 1.0, false, false, Interface::limited); } void MRST::doinitrun() { cerr << "Warning: Herwig::MRST is obsolete and will be removed in Herwig 7.1.\n" << " Please switch to using a PDF set provided by LHAPDF.\n"; PDFBase::doinitrun(); #ifdef MRST_TESTING unsigned int intersave=_inter; tPDPtr proton=getParticleData(ParticleID::pplus); for(unsigned int itype=0;itype<8;++itype) { tPDPtr parton; string name; if(itype==0) { name="u.top"; parton=getParticleData(ParticleID::u); } else if(itype==1) { name="d.top"; parton=getParticleData(ParticleID::d); } else if(itype==2) { name="ubar.top"; parton=getParticleData(ParticleID::ubar); } else if(itype==3) { name="dbar.top"; parton=getParticleData(ParticleID::dbar); } else if(itype==4) { name="s.top"; parton=getParticleData(ParticleID::s); } else if(itype==5) { name="c.top"; parton=getParticleData(ParticleID::c); } else if(itype==6) { name="b.top"; parton=getParticleData(ParticleID::b); } else if(itype==7) { name="g.top"; parton=getParticleData(ParticleID::g); } ofstream output(name.c_str()); Energy qmin=2.0*GeV,qmax=3000.0*GeV; int nq=10; Energy qstep=(qmax-qmin)/nq; for(Energy q=qmin+qstep;q<=qmax;q+=qstep) { double nx=500; double xmin=1e-5,xmax=1.; double xstep=(log(xmax)-log(xmin))/nx; output << "NEW FRAME" << endl; output << "SET FONT DUPLEX\n"; output << "SET SCALE Y LOG\n"; output << "SET LIMITS X " << xmin << " " << xmax << endl; if(itype==0) output << "TITLE TOP \" up distribution for q=" << q/GeV << "\"\n"; else if(itype==1) output << "TITLE TOP \" down distribution for q=" << q/GeV << "\"\n"; else if(itype==2) output << "TITLE TOP \" ubar distribution for q=" << q/GeV << "\"\n"; else if(itype==3) output << "TITLE TOP \" dbar distribution for q=" << q/GeV << "\"\n"; else if(itype==4) output << "TITLE TOP \" strange distribution for q=" << q/GeV << "\"\n"; else if(itype==5) output << "TITLE TOP \" charm distribution for q=" << q/GeV << "\"\n"; else if(itype==6) output << "TITLE TOP \" bottom distribution for q=" << q/GeV << "\"\n"; else if(itype==7) output << "TITLE TOP \" gluon distribution for q=" << q/GeV << "\"\n"; _inter=0; for(double xl=log(xmin)+xstep;xl<=log(xmax);xl+=xstep) { double x=exp(xl); double val=xfl(proton,parton,q*q,-xl); if(val>1e5) val=1e5; output << x << '\t' << val << '\n'; } output << "JOIN RED" << endl; _inter=1; for(double xl=log(xmin)+xstep;xl<=log(xmax);xl+=xstep) { double x=exp(xl); double val=xfl(proton,parton,q*q,-xl); if(val>1e5) val=1e5; output << x << '\t' << val << '\n'; } output << "JOIN BLUE" << endl; _inter=2; for(double xl=log(xmin)+xstep;xl<=log(xmax);xl+=xstep) { double x=exp(xl); double val=xfl(proton,parton,q*q,-xl); if(val>1e5) val=1e5; output << x << '\t' << val << '\n'; } output << "JOIN GREEN" << endl; } } _inter=intersave; #endif } void MRST::readSetup(istream &is) { _file = dynamic_cast<istringstream*>(&is)->str(); initialize(); } void MRST::initialize(bool reread) { useMe(); // int i,n,m,k,l,j; // counters double dx,dq; int wt[][16] = {{ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0}, {-3, 0, 0, 3, 0, 0, 0, 0,-2, 0, 0,-1, 0, 0, 0, 0}, { 2, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0}, { 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0}, { 0, 0, 0, 0,-3, 0, 0, 3, 0, 0, 0, 0,-2, 0, 0,-1}, { 0, 0, 0, 0, 2, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 1}, {-3, 3, 0, 0,-2,-1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0, 0, 0, 0,-3, 3, 0, 0,-2,-1, 0, 0}, { 9,-9, 9,-9, 6, 3,-3,-6, 6,-6,-3, 3, 4, 2, 1, 2}, {-6, 6,-6, 6,-4,-2, 2, 4,-3, 3, 3,-3,-2,-1,-1,-2}, { 2,-2, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0, 0, 0, 0, 2,-2, 0, 0, 1, 1, 0, 0}, {-6, 6,-6, 6,-3,-3, 3, 3,-4, 4, 2,-2,-2,-2,-1,-1}, { 4,-4, 4,-4, 2, 2,-2,-2, 2,-2,-2, 2, 1, 1, 1, 1}}; // Variables used for initialising c_ij array: double f1[np+1][nx+1][nq+1];//derivative wrt.x double f2[np+1][nx+1][nq+1];//derivative wrt.qq double f12[np+1][nx+1][nq+1];//cross derivative double xxd,d1d2,cl[16],x[16],d1,d2,y[5],y1[5],y2[5],y12[5]; if(reread) { ifstream datafile(_file.c_str()); if(!datafile) throw Exception() << "Could not open file '" << _file << "' in MRST::initialize()" << Exception::runerror; for(int nn=1; nn<nx; nn++) { for(int mm=1; mm<=nq; mm++) { datafile >> data[1][nn][mm]; datafile >> data[2][nn][mm]; datafile >> data[3][nn][mm]; datafile >> data[4][nn][mm]; datafile >> data[5][nn][mm]; datafile >> data[7][nn][mm]; datafile >> data[6][nn][mm]; datafile >> data[8][nn][mm]; if(datafile.eof()) throw Exception() << "Error while reading " << _file << " too few data points in file" << "in MRST::initialize()" << Exception::runerror; for(int ii=1;ii<=np;++ii) { fdata[ii][nn][mm] = _inter==0 ? 0. : data[ii][nn][mm]/pow(1.-xx[nn],n0[ii]); } } } for (int n=1; n<=8; ++n) { for(int mm=1; mm<=nq; ++mm) { data[n][nx][mm]=0.0; } } double dtemp; datafile >> dtemp; if(!datafile.eof()) throw Exception() << "Error reading end of " << _file << " too many data points in file" << "in MRST::initialize()" << Exception::runerror; datafile.close(); // calculate the FORTRAN interpolation for(int jj=1;jj<=ntenth-1;++jj) { for(int ii=1;ii<=np;++ii) { if(ii==5||ii==7) continue; for(int kk=1;kk<=nq;++kk) { fdata[ii][jj][kk] = _inter==0 ? 0. : log10( fdata[ii][jj][kk] / fdata[ii][ntenth][kk] ) + fdata[ii][ntenth][kk]; } } } for (int n=1; n<=np; ++n) { for(int mm=1; mm<=nq; ++mm) { fdata[n][nx][mm]=0.0; } } } // Now calculate the derivatives used for bicubic interpolation for (int i=1;i<=np;i++) { // Start by calculating the first x derivatives // along the first x value dx=lxx[2]-lxx[1]; for (int m=1;m<=nq;m++) f1[i][1][m]=(data[i][2][m]-data[i][1][m])/dx; // The along the rest (up to the last) for (int k=2;k<nx;k++) { for (int m=1;m<=nq;m++) { f1[i][k][m]=polderivative(lxx[k-1],lxx[k],lxx[k+1], data[i][k-1][m], data[i][k][m], data[i][k+1][m]); } } // Then for the last column dx=lxx[nx]-lxx[nx-1]; for (int m=1;m<=nq;m++) f1[i][nx][m]=(data[i][nx][m]-data[i][nx-1][m])/dx; if ((i!=5)&&(i!=7)) { // then calculate the qq derivatives // Along the first qq value dq=lqq[2]-lqq[1]; for (int k=1;k<=nx;k++) f2[i][k][1]=(data[i][k][2]-data[i][k][1])/dq; // The rest up to the last qq value for (int m=2;m<nq;m++) { for (int k=1;k<=nx;k++) f2[i][k][m]=polderivative(lqq[m-1],lqq[m],lqq[m+1], data[i][k][m-1], data[i][k][m], data[i][k][m+1]); } // then for the last row dq=lqq[nq]-lqq[nq-1]; for (int k=1;k<=nx;k++) f2[i][k][nq]=(data[i][k][nq]-data[i][k][nq-1])/dq; // Now, calculate the cross derivatives. // Calculate these as x-derivatives of the y-derivatives // ?? Could be improved by taking the average between dxdy and dydx ?? // Start by calculating the first x derivatives // along the first x value dx=lxx[2]-lxx[1]; for (int m=1;m<=nq;m++) f12[i][1][m]=(f2[i][2][m]-f2[i][1][m])/dx; // The along the rest (up to the last) for (int k=2;k<nx;k++) { for (int m=1;m<=nq;m++) f12[i][k][m]=polderivative(lxx[k-1],lxx[k],lxx[k+1], f2[i][k-1][m],f2[i][k][m],f2[i][k+1][m]); } // Then for the last column dx=lxx[nx]-lxx[nx-1]; for (int m=1;m<=nq;m++) f12[i][nx][m]=(f2[i][nx][m]-f2[i][nx-1][m])/dx; } if (i==5) { // zero all elements below the charm threshold for (int m=1;m<nqc0;m++) for (int k=1;k<=nx;k++) f2[i][k][m]=0.0; // then calculate the qq derivatives // Along the first qq value above the threshold (m=ncq0) dq=lqq[nqc0+1]-lqq[nqc0]; for (int k=1;k<=nx;k++) f2[i][k][nqc0]=(data[i][k][nqc0+1]-data[i][k][nqc0])/dq; // The rest up to the last qq value for (int m=nqc0+1;m<nq;m++) { for (int k=1;k<=nx;k++) f2[i][k][m]=polderivative(lqq[m-1],lqq[m],lqq[m+1], data[i][k][m-1], data[i][k][m], data[i][k][m+1]); } // then for the last row dq=lqq[nq]-lqq[nq-1]; for (int k=1;k<=nx;k++) f2[i][k][nq]=(data[i][k][nq]-data[i][k][nq-1])/dq; // Now, calculate the cross derivatives. // Calculate these as x-derivatives of the y-derivatives // ?? Could be improved by taking the average between dxdy and dydx ?? dx=lxx[2]-lxx[1]; for (int m=1;m<=nq;m++) f12[i][1][m]=(f2[i][2][m]-f2[i][1][m])/dx; // The along the rest (up to the last) for (int k=2;k<nx;k++) { for (int m=1;m<=nq;m++) f12[i][k][m]=polderivative(lxx[k-1],lxx[k],lxx[k+1], f2[i][k-1][m],f2[i][k][m],f2[i][k+1][m]); } // Then for the last column dx=lxx[nx]-lxx[nx-1]; for (int m=1;m<=nq;m++) f12[i][nx][m]=(f2[i][nx][m]-f2[i][nx-1][m])/dx; } if (i==7) { // zero all elements below the bottom threshold for (int m=1;m<nqb0;m++) for (int k=1;k<=nx;k++) f2[i][k][m]=0.0; // then calculate the qq derivatives // Along the first qq value above the threshold (m=nqb0) dq=lqq[nqb0+1]-lqq[nqb0]; for (int k=1;k<=nx;k++) f2[i][k][nqb0]=(data[i][k][nqb0+1]-data[i][k][nqb0])/dq; // The rest up to the last qq value for (int m=nqb0+1;m<nq;m++) { for (int k=1;k<=nx;k++) f2[i][k][m]=polderivative(lqq[m-1],lqq[m],lqq[m+1], data[i][k][m-1], data[i][k][m], data[i][k][m+1]); } // then for the last row dq=lqq[nq]-lqq[nq-1]; for (int k=1;k<=nx;k++) f2[i][k][nq]=(data[i][k][nq]-data[i][k][nq-1])/dq; // Now, calculate the cross derivatives. // Calculate these as x-derivatives of the y-derivatives // ?? Could be improved by taking the average between dxdy and dydx ?? dx=lxx[2]-lxx[1]; for (int m=1;m<=nq;m++) f12[i][1][m]=(f2[i][2][m]-f2[i][1][m])/dx; // The along the rest (up to the last) for (int k=2;k<nx;k++) { for (int m=1;m<=nq;m++) f12[i][k][m]=polderivative(lxx[k-1],lxx[k],lxx[k+1], f2[i][k-1][m],f2[i][k][m],f2[i][k+1][m]); } // Then for the last column dx=lxx[nx]-lxx[nx-1]; for (int m=1;m<=nq;m++) f12[i][nx][m]=(f2[i][nx][m]-f2[i][nx-1][m])/dx; } // Now calculate the coefficients c_ij for(int n=1;n<=nx-1;n++) { for(int m=1;m<=nq-1;m++) { d1=lxx[n+1]-lxx[n]; d2=lqq[m+1]-lqq[m]; d1d2=d1*d2; // Iterate around the grid and store the values of f, f_x, f_y and f_xy y[1]=data[i][n][m]; y[2]=data[i][n+1][m]; y[3]=data[i][n+1][m+1]; y[4]=data[i][n][m+1]; y1[1]=f1[i][n][m]; y1[2]=f1[i][n+1][m]; y1[3]=f1[i][n+1][m+1]; y1[4]=f1[i][n][m+1]; y2[1]=f2[i][n][m]; y2[2]=f2[i][n+1][m]; y2[3]=f2[i][n+1][m+1]; y2[4]=f2[i][n][m+1]; y12[1]=f12[i][n][m]; y12[2]=f12[i][n+1][m]; y12[3]=f12[i][n+1][m+1]; y12[4]=f12[i][n][m+1]; for (int k=1;k<=4;k++) { x[k-1]=y[k]; x[k+3]=y1[k]*d1; x[k+7]=y2[k]*d2; x[k+11]=y12[k]*d1d2; } for (int l=0;l<=15;l++) { xxd=0.0; for (int k=0;k<=15;k++) xxd+= wt[l][k]*x[k]; cl[l]=xxd; } int l=0; for (int k=1;k<=4;k++) for (int j=1;j<=4;j++) c[i][n][m][k][j]=cl[l++]; } //m } //n } // i } double MRST::xx[] = { 0.0, 1E-5, 2E-5, 4E-5, 6E-5, 8E-5, 1E-4, 2E-4, 4E-4, 6E-4, 8E-4, 1E-3, 2E-3, 4E-3, 6E-3, 8E-3, 1E-2, 1.4E-2, 2E-2, 3E-2, 4E-2, 6E-2, 8E-2, .1, .125, 0.15, .175, .2, .225, 0.25, .275, .3, .325, 0.35, .375, .4, .425, 0.45, .475, .5, .525, 0.55, .575, .6, .65, .7, .75, .8, .9, 1. }; double MRST::lxx[] = { 0.0, 1E-5, 2E-5, 4E-5, 6E-5, 8E-5, 1E-4, 2E-4, 4E-4, 6E-4, 8E-4, 1E-3, 2E-3, 4E-3, 6E-3, 8E-3, 1E-2, 1.4E-2, 2E-2, 3E-2, 4E-2, 6E-2, 8E-2, .1, .125, 0.15, .175, .2, .225, 0.25, .275, .3, .325, 0.35, .375, .4, .425, 0.45, .475, .5, .525, 0.55, .575, .6, .65, .7, .75, .8, .9, 1. }; double MRST::lxxb[] = { 0.0, 1E-5, 2E-5, 4E-5, 6E-5, 8E-5, 1E-4, 2E-4, 4E-4, 6E-4, 8E-4, 1E-3, 2E-3, 4E-3, 6E-3, 8E-3, 1E-2, 1.4E-2, 2E-2, 3E-2, 4E-2, 6E-2, 8E-2, .1, .125, 0.15, .175, .2, .225, 0.25, .275, .3, .325, 0.35, .375, .4, .425, 0.45, .475, .5, .525, 0.55, .575, .6, .65, .7, .75, .8, .9, 1. }; double MRST::qq[] = { 0.0, 1.25, 1.5, 2., 2.5, 3.2, 4., 5., 6.4, 8., 10., 12., 18., 26., 40., 64., 1E2, 1.6E2, 2.4E2, 4E2, 6.4E2, 1E3, 1.8E3, 3.2E3, 5.6E3, 1E4, 1.8E4, 3.2E4, 5.6E4, 1E5, 1.8E5, 3.2E5, 5.6E5, 1E6, 1.8E6, 3.2E6, 5.6E6, 1E7 }; double MRST::lqq[] = { 0.0, 1.25, 1.5, 2., 2.5, 3.2, 4., 5., 6.4, 8., 10., 12., 18., 26., 40., 64., 1E2, 1.6E2, 2.4E2, 4E2, 6.4E2, 1E3, 1.8E3, 3.2E3, 5.6E3, 1E4, 1.8E4, 3.2E4, 5.6E4, 1E5, 1.8E5, 3.2E5, 5.6E5, 1E6, 1.8E6, 3.2E6, 5.6E6, 1E7 }; double MRST::n0[] = {0,3,4,5,9,9,9,9,9}; bool MRST::initialized = false; diff --git a/PDT/ThreeBodyAllOnCalculator.tcc b/PDT/ThreeBodyAllOnCalculator.tcc --- a/PDT/ThreeBodyAllOnCalculator.tcc +++ b/PDT/ThreeBodyAllOnCalculator.tcc @@ -1,198 +1,198 @@ // -*- C++ -*- // // ThreeBodyAllOnCalculator.tcc is a part of Herwig - A multi-purpose Monte Carlo event generator // Copyright (C) 2002-2011 The Herwig Collaboration // // Herwig is licenced under version 2 of the GPL, see COPYING for details. // Please respect the MCnet academic guidelines, see GUIDELINES for details. // // // This is the implementation of the non-inlined templated member // functions of the ThreeBodyAllOnCalculator class. // using namespace Herwig; // shift the variables for the outer integrand and give limits for the inner one template <class T> void ThreeBodyAllOnCalculator<T>::outerVariables(double x, Energy2 & low, Energy2 & upp) const { // first convert the value of x into the value of souter if(_mapping[_thechannel]==0) { _souter = _channelmass[_thechannel]*(_channelmass[_thechannel]+ _channelwidth[_thechannel]*tan(x)); } else if(_mapping[_thechannel]==1) { _souter = sqr(_channelmass[_thechannel])*(1.+1./x); } else { _souter = UnitRemoval::E2 * pow(x,1./(_channelpower[_thechannel]+1.)); } // now the limits of the inner integral Energy ea(ZERO),eb(ZERO); Energy rs=sqrt(_souter); Energy2 eam2(ZERO),ebm2(ZERO); switch(_channeltype[_thechannel]) { case 1: ea = 0.5*(_souter-_m2[1]+_m2[2])/rs; eam2 = sqr(ea)-_m2[2]; eb = 0.5*(_m2[0]-_souter-_m2[3])/rs; ebm2 = sqr(eb)-_m2[3]; break; case 2: ea = 0.5*(_souter-_m2[1]+_m2[3])/rs; eam2 = sqr(ea)-_m2[3]; eb = 0.5*(_m2[0]-_souter-_m2[2])/rs; ebm2 = sqr(eb)-_m2[2]; break; case 3: ea = 0.5*(_souter-_m2[2]+_m2[3])/rs; eam2 = sqr(ea)-_m2[3]; eb = 0.5*(_m2[0]-_souter-_m2[1])/rs; ebm2 = sqr(eb)-_m2[1]; break; default: assert(false); } Energy eam = sqrt(max(ZERO,eam2)); Energy ebm = sqrt(max(ZERO,ebm2)); Energy2 sum = sqr(ea+eb); // calculate the limits low = sum - sqr(eam+ebm); upp = sum - sqr(eam-ebm); } template <class T> Energy2 ThreeBodyAllOnCalculator<T>::operator ()(Energy2 y) const { - assert(!isnan(y.rawValue())); + assert(!std::isnan(y.rawValue())); // set up the values of the s variables Energy2 s12(ZERO),s23(ZERO),s13(ZERO), m2sum(_m2[0]+_m2[1]+_m2[2]+_m2[3]); switch(_channeltype[_thechannel]) { case 1: s12 = _souter; s23 = y; s13 = m2sum-s12-s23; break; case 2: s23 = y; s13 = _souter; s12 = m2sum-s23-s13; break; case 3: s23 = _souter; s13 = y; s12 = m2sum-s23-s13; break; } // compute the jacobian // computer the denominator for the jacobian InvEnergy2 jacdem = ZERO; Energy2 sjac(ZERO); Energy2 rm2,rw2; for(unsigned int ix=0,N=_channeltype.size(); ix<N; ++ix) { switch(_channeltype[ix]) { case 1: sjac = s12; break; case 2: sjac = s13; break; case 3: sjac = s23; break; } - assert(!isnan(sjac.rawValue())); + assert(!std::isnan(sjac.rawValue())); InvEnergy2 term; if(_mapping[ix]==0) { rm2 = sqr(_channelmass[ix]); rw2 = sqr(_channelwidth[ix]); Energy4 tmp = sqr(sjac-rm2) + rw2*rm2; term = _channelweights[ix]*_channelmass[ix]*_channelwidth[ix]/tmp; } else if(_mapping[ix]==1) { term = _channelweights[ix]*sqr(_channelmass[ix]/(sjac-sqr(_channelmass[ix]))); } else if(_mapping[ix]==2) { term = UnitRemoval::InvE2 * _channelweights[ix]*(_channelpower[ix]+1.)* pow(sjac*UnitRemoval::InvE2, _channelpower[ix]); } else assert(false); jacdem += term; } // now computer the matrix element return _theME.threeBodyMatrixElement(_mode,_m2[0],s12,s13, s23,_m[1],_m[2],_m[3])/jacdem; } // calculate the width for a given mass template <class T> Energy ThreeBodyAllOnCalculator<T>::partialWidth(Energy2 q2) const { Outer outer(this,_relerr); _m[0] = sqrt(q2); _m2[0]=q2; // check the decay is kinematically allowed if(_m[0]<_m[1]+_m[2]+_m[3]) return ZERO; // set up for the different channels unsigned int N = _channeltype.size(); vector<double> rupp(N,0.),rlow(N,0.); for(unsigned int ix=0; ix<N; ++ix) { Energy2 upp(ZERO),low(ZERO); // work out the kinematic limits switch(_channeltype[ix]) { case 1: upp = sqr(_m[0]-_m[3]); low = sqr(_m[1]+_m[2]); break; case 2: upp = sqr(_m[0]-_m[2]); low = sqr(_m[1]+_m[3]); break; case 3: upp = sqr(_m[0]-_m[1]); low = sqr(_m[2]+_m[3]); break; default: assert(false); } // transform them if(_channelmass[ix] > ZERO) { if(_channelwidth[ix] > 1e-8*MeV) { rupp[ix] = atan2((upp-_channelmass[ix]*_channelmass[ix]), _channelmass[ix]*_channelwidth[ix]); rlow[ix] = atan2((low-_channelmass[ix]*_channelmass[ix]), _channelmass[ix]*_channelwidth[ix]); _mapping[ix] = 0; if(rupp[ix]/rlow[ix]>0.&&_channelwidth[ix]/_channelmass[ix]<1e-6) { _mapping[ix] = 1; Energy2 m2=sqr(_channelmass[ix]); rupp[ix] = m2/(low-m2); rlow[ix] = m2/(upp-m2); } } else { _mapping[ix] = 1; Energy2 m2=sqr(_channelmass[ix]); rupp[ix] = m2/(low-m2); rlow[ix] = m2/(upp-m2); } } else { _mapping[ix] = 2; rupp[ix] = pow(upp*UnitRemoval::InvE2, _channelpower[ix]+1.); rlow[ix] = pow(low*UnitRemoval::InvE2, _channelpower[ix]+1.); } } // perform the integrals for all the different channels Energy4 sum(ZERO); for(unsigned int ix=0,N=_channeltype.size(); ix<N; ++ix) { // perform the integral using GSLIntegrator class _thechannel=ix; GSLIntegrator intb(1e-35,_relerr,1000); sum += _channelweights[ix] * intb.value(outer,rlow[ix],rupp[ix]); } // final factors Energy3 fact = pow<3,1>(Constants::twopi * _m[0]); return sum/fact/32.; } diff --git a/Sampling/BinSampler.cc b/Sampling/BinSampler.cc --- a/Sampling/BinSampler.cc +++ b/Sampling/BinSampler.cc @@ -1,728 +1,728 @@ // -*- C++ -*- // // BinSampler.cc is a part of Herwig - A multi-purpose Monte Carlo event generator // Copyright (C) 2002-2012 The Herwig Collaboration // // Herwig is licenced under version 2 of the GPL, see COPYING for details. // Please respect the MCnet academic guidelines, see GUIDELINES for details. // // // This is the implementation of the non-inlined, non-templated member // functions of the BinSampler class. // #include "BinSampler.h" #include "ThePEG/Interface/ClassDocumentation.h" #include "ThePEG/EventRecord/Particle.h" #include "ThePEG/Repository/UseRandom.h" #include "ThePEG/Repository/EventGenerator.h" #include "ThePEG/Utilities/DescribeClass.h" #include "ThePEG/Repository/Repository.h" #include "ThePEG/Interface/Parameter.h" #include "ThePEG/Interface/Switch.h" #include "ThePEG/Persistency/PersistentOStream.h" #include "ThePEG/Persistency/PersistentIStream.h" #include "ThePEG/Handlers/StandardEventHandler.h" #include "ThePEG/Handlers/StandardXComb.h" #include <boost/progress.hpp> #include "GeneralSampler.h" using namespace Herwig; BinSampler::BinSampler() : MultiIterationStatistics(), theBias(1.), theWeighted(false), theInitialPoints(1000000), theNIterations(1), theEnhancementFactor(1.0), theNonZeroInPresampling(false), theHalfPoints(false), theMaxNewMax(30), theReferenceWeight(1.0), theBin(-1), theInitialized(false), theRemapperPoints(0), theRemapChannelDimension(false), theLuminosityMapperBins(0), theGeneralMapperBins(0), theRemapperMinSelection(0.00001), theIntegrated(false), theRemappersFilled(false), theHasGrids(false), theKappa(1.){} BinSampler::~BinSampler() {} IBPtr BinSampler::clone() const { return new_ptr(*this); } IBPtr BinSampler::fullclone() const { return new_ptr(*this); } void BinSampler::sampler(Ptr<GeneralSampler>::tptr s) { theSampler = s; } Ptr<GeneralSampler>::tptr BinSampler::sampler() const { return theSampler; } string BinSampler::process() const { ostringstream os(""); const StandardEventHandler& eh = *theEventHandler; const StandardXComb& xc = *eh.xCombs()[theBin]; os << xc.matrixElement()->name() << " : "; os << xc.mePartonData()[0]->PDGName() << " " << xc.mePartonData()[1]->PDGName() << " -> "; for ( cPDVector::const_iterator pid = xc.mePartonData().begin() + 2; pid != xc.mePartonData().end(); ++pid ) os << (**pid).PDGName() << " "; return os.str(); } string BinSampler::shortprocess() const { ostringstream os(""); const StandardEventHandler& eh = *theEventHandler; const StandardXComb& xc = *eh.xCombs()[theBin]; os << xc.mePartonData()[0]->id() << " " << xc.mePartonData()[1]->id() << " : "; for ( cPDVector::const_iterator pid = xc.mePartonData().begin() + 2; pid != xc.mePartonData().end(); ++pid ) os << (**pid).id() << " "; return os.str(); } string BinSampler::id() const { ostringstream os(""); const StandardEventHandler& eh = *theEventHandler; const StandardXComb& xc = *eh.xCombs()[theBin]; string name = xc.matrixElement()->name(); string::size_type i = name.find_first_of("["); string nameFirst = name.substr(0,i); i = name.find_first_of("]"); string nameSecond = name.substr(i+1); os << nameFirst << nameSecond << ":"; for ( cPDVector::const_iterator pid = xc.mePartonData().begin(); pid != xc.mePartonData().end(); ++pid ) os << (**pid).id() << (pid != (--xc.mePartonData().end()) ? "," : ""); return os.str(); } double BinSampler::evaluate(vector<double> p, bool remap) { double w = 1.0; if ( remap && !remappers.empty() ) { for ( size_t k = 0; k < p.size(); ++k ) { map<size_t,Remapper>::const_iterator r = remappers.find(k); if ( r != remappers.end() ) { pair<double,double> f = r->second.generate(p[k]); p[k] = f.first; w /= f.second; } } } try { w *= eventHandler()->dSigDR(p) / nanobarn; } catch (Veto&) { w = 0.0; } catch (...) { throw; } if (randomNumberString()!="") for ( size_t k = 0; k < p.size(); ++k ) { RandomNumberHistograms[RandomNumberIndex(id(),k)].first.book(p[k],w); RandomNumberHistograms[RandomNumberIndex(id(),k)].second+=w; } return w; } double BinSampler::generate() { double w = 1.; for ( size_t k = 0; k < lastPoint().size(); ++k ) { lastPoint()[k] = UseRandom::rnd(); } try { w = evaluate(lastPoint()); } catch (Veto&) { w = 0.0; } catch (...) { throw; } if ( !weighted() && initialized() ) { double p = min(abs(w),kappa()*referenceWeight())/(kappa()*referenceWeight()); double sign = w >= 0. ? 1. : -1.; if ( p < 1 && UseRandom::rnd() > p ) w = 0.; else w = sign*max(abs(w),referenceWeight()*kappa()); } select(w); if ( w != 0.0 ) accept(); assert(kappa()==1.||sampler()->almostUnweighted()); return w; } void BinSampler::fillRemappers(bool progress) { if ( remappers.empty() ) return; unsigned long nanPoints = 0; boost::progress_display* progressBar = 0; if ( progress ) { Repository::clog() << "warming up " << process(); progressBar = new boost::progress_display(theRemapperPoints,Repository::clog()); } unsigned long countzero =0; for ( unsigned long k = 0; k < theRemapperPoints; ++k,++countzero ) { if (countzero>=theRemapperPoints)break; double w = 1.; for ( size_t j = 0; j < lastPoint().size(); ++j ) { lastPoint()[j] = UseRandom::rnd(); } try { w = evaluate(lastPoint(),false); } catch (Veto&) { w = 0.0; } catch (...) { throw; } - if ( isnan(w) || isinf(w) ) + if ( ! isfinite(w) ) ++nanPoints; if ( theNonZeroInPresampling && w==0. ){ k--; continue; } if ( w != 0.0 ) { countzero=0; for ( map<size_t,Remapper>::iterator r = remappers.begin(); r != remappers.end(); ++r ) r->second.fill(lastPoint()[r->first],w); } if ( progressBar ) ++(*progressBar); } if ( progressBar ) { delete progressBar; } if ( nanPoints ) { Repository::clog() << "Warning: " << nanPoints << " out of " << theRemapperPoints << " points with nan or inf " << "weight encountered while filling remappers.\n" << flush; } } void BinSampler::saveIntegrationData() const { XML::Element stats = MultiIterationStatistics::toXML(); stats.appendAttribute("process",id()); sampler()->grids().append(stats); } void BinSampler::readIntegrationData() { if ( theIntegrated ) return; bool haveStats = false; list<XML::Element>::iterator sit = sampler()->grids().children().begin(); for ( ; sit != sampler()->grids().children().end(); ++sit ) { if ( sit->type() != XML::ElementTypes::Element ) continue; if ( sit->name() != "MultiIterationStatistics" ) continue; string proc; sit->getFromAttribute("process",proc); if ( proc == id() ) { haveStats = true; break; } } if ( haveStats ) { MultiIterationStatistics::fromXML(*sit); sampler()->grids().erase(sit); theIntegrated = true; } else { throw Exception() << "\n--------------------------------------------------------------------------------\n\n" << "Expected integration data.\n\n" << "* When using the build setup make sure the integrate command has been run.\n\n" << "* Check the [EventGenerator].log file for further information.\n\n" << "* Make sure that the Herwig folder can be found and that it contains a HerwigGrids.xml file.\n\n" << "* If you have split the integration jobs, make sure that each integration job was finished.\n" << " Afterwards delete the global HerwigGrids.xml file in the Herwig subfolder\n" << " to automatically create an updated version of the global HerwigGrids.xml file.\n\n" << "--------------------------------------------------------------------------------\n" << Exception::abortnow; } } void BinSampler::saveRemappers() const { if ( remappers.empty() ) return; XML::Element maps(XML::ElementTypes::Element,"Remappers"); maps.appendAttribute("process",id()); for ( map<size_t,Remapper>::const_iterator r = remappers.begin(); r != remappers.end(); ++r ) { XML::Element rmap = r->second.toXML(); rmap.appendAttribute("dimension",r->first); maps.append(rmap); } sampler()->grids().append(maps); } void BinSampler::setupRemappers(bool progress) { if ( !theRemapperPoints ) return; if ( theRemappersFilled ) return; lastPoint().resize(dimension()); bool haveGrid = false; list<XML::Element>::iterator git = sampler()->grids().children().begin(); for ( ; git != sampler()->grids().children().end(); ++git ) { if ( git->type() != XML::ElementTypes::Element ) continue; if ( git->name() != "Remappers" ) continue; string proc; git->getFromAttribute("process",proc); if ( proc == id() ) { haveGrid = true; break; } } if ( haveGrid ) { for ( list<XML::Element>::iterator cit = git->children().begin(); cit != git->children().end(); ++cit ) { if ( cit->type() != XML::ElementTypes::Element ) continue; if ( cit->name() != "Remapper" ) continue; size_t dimension = 0; cit->getFromAttribute("dimension",dimension); remappers[dimension].fromXML(*cit); } sampler()->grids().erase(git); } if ( !haveGrid ) { const StandardEventHandler& eh = *eventHandler(); const StandardXComb& xc = *eh.xCombs()[bin()]; const pair<int,int>& pdims = xc.partonDimensions(); set<int> remapped; if ( theRemapChannelDimension && xc.diagrams().size() > 1 && dimension() > pdims.first + pdims.second ) { remappers[pdims.first] = Remapper(xc.diagrams().size(),theRemapperMinSelection,false); remapped.insert(pdims.first); } if ( theLuminosityMapperBins > 1 && dimension() >= pdims.first + pdims.second ) { for ( int n = 0; n < pdims.first; ++n ) { remappers[n] = Remapper(theLuminosityMapperBins,theRemapperMinSelection,true); remapped.insert(n); } for ( int n = dimension() - pdims.second; n < dimension(); ++n ) { remappers[n] = Remapper(theLuminosityMapperBins,theRemapperMinSelection,true); remapped.insert(n); } } if ( theGeneralMapperBins > 1 ) { for ( int n = 0; n < dimension(); n++ ) { if ( remapped.find(n) == remapped.end() ) { remappers[n] = Remapper(theGeneralMapperBins,theRemapperMinSelection,true); remapped.insert(n); } } } fillRemappers(progress); for ( map<size_t,Remapper>::iterator r = remappers.begin(); r != remappers.end(); ++r ) { r->second.finalize(); } } theRemappersFilled = true; } void BinSampler::runIteration(unsigned long points, bool progress) { boost::progress_display* progressBar = 0; if ( progress ) { Repository::clog() << "integrating " << process() << " , iteration " << (iterations().size() + 1); progressBar = new boost::progress_display(points,Repository::clog()); } double w=0.; double maxweight=0; int numlastmax=0; unsigned long countzero =0; int newmax=0; for ( unsigned long k = 0; k < points; ++k,++countzero ) { if (countzero>=points)break; w=abs(generate()); if(theNonZeroInPresampling && w==0.0){ k--; continue; } if (w!=0.0) countzero =0; numlastmax++; if (theHalfPoints&&maxweight<w&& numlastmax<(int)(points/2.)){ if(++newmax>theMaxNewMax){ throw Exception() << "\n--------------------------------------------------------------------------------\n\n" << "To many new Maxima.\n\n" << "* With the option:\n\n" << "* set Sampler:BinSampler:HalfPoints Yes\n\n" << "* for every new maximum weight found until the half of the persampling points\n" << "* the counter is set to zero. We count the number of new maxima.\n" << "* You have reached: "<<newmax<<"\n" << "* Did you apply reasonable cuts to the process?\n" << "* You can set the maximum allowed new maxima by:" << "* set Sampler:BinSampler:MaxNewMax N\n\n" << "--------------------------------------------------------------------------------\n" << Exception::abortnow; } maxweight=w; k=0; numlastmax=0; } if ( progress ) { ++(*progressBar); } } if ( progress ) { Repository::clog() << "integrated ( " << averageWeight() << " +/- " << sqrt(averageWeightVariance()) << " ) nb\nepsilon = " << (abs(maxWeight()) != 0. ? averageAbsWeight()/abs(maxWeight()) : 0.); if ( !iterations().empty() ) Repository::clog() << " chi2 = " << chi2(); Repository::clog() << "\n"; Repository::clog() << "--------------------------------------------------------------------------------\n"; } if ( progressBar ) delete progressBar; } void BinSampler::initialize(bool progress) { lastPoint().resize(dimension()); if (randomNumberString()!="") for(size_t i=0;i<lastPoint().size();i++){ RandomNumberHistograms[RandomNumberIndex(id(),i)] = make_pair( RandomNumberHistogram(),0.); } if ( initialized() ) return; if ( !sampler()->grids().children().empty() ) { nIterations(1); } if ( !integrated() ) { unsigned long points = initialPoints(); for ( unsigned long k = 0; k < nIterations(); ++k ) { runIteration(points,progress); if ( k < nIterations() - 1 ) { points = (unsigned long)(points*enhancementFactor()); adapt(); nextIteration(); } } } isInitialized(); } void BinSampler::finalize(bool){ if (theRandomNumbers!="") for ( map<RandomNumberIndex,pair<RandomNumberHistogram,double> >:: const_iterator b = RandomNumberHistograms.begin(); b != RandomNumberHistograms.end(); ++b ) { b->second.first.dump(randomNumberString(), b->first.first,shortprocess(),b->first.second); } } BinSampler::RandomNumberHistogram:: RandomNumberHistogram(double low, double up, unsigned int nbins) : lower(low) { nbins = nbins + 1; double c = up / (nbins-1.); for ( unsigned int k = 1; k < nbins; ++k ) { bins[low+c*k] = 0.; binsw1[low+c*k] = 0.; } } void BinSampler::RandomNumberHistogram:: dump(const std::string& folder,const std::string& prefix, const std::string& process, const int NR) const { ostringstream fname(""); std::string prefix2; std::string prefix3=prefix; std::remove_copy(prefix.begin(), prefix.end(), std::back_inserter(prefix2), '.'); prefix3=prefix2;prefix2.clear(); std::remove_copy(prefix3.begin(), prefix3.end(), std::back_inserter(prefix2), ':'); prefix3=prefix2;prefix2.clear(); std::remove_copy(prefix3.begin(), prefix3.end(), std::back_inserter(prefix2), ','); fname << "RN-"<< NR ; ofstream out((folder+"/"+prefix2+fname.str()+".dat").c_str()); double sumofweights=0.; for ( map<double,double >::const_iterator b = bins.begin();b != bins.end(); ++b ) sumofweights+=b->second; double sumofweights2=0.; for ( map<double,double >::const_iterator b = binsw1.begin();b != binsw1.end(); ++b ) sumofweights2+=b->second; map<double,double >::const_iterator b2 = binsw1.begin(); if ( sumofweights == 0 ) { cerr << "Not enough statistic accumulated for " << process << " skipping random number diagnostic.\n" << flush; return; } for ( map<double,double >::const_iterator b = bins.begin(); b != bins.end(); ++b, ++b2) { out << " " << b->first << " " << b->second/sumofweights*100. << " " << b2->second/sumofweights2*100. << "\n" << flush; } double xmin = -0.01; double xmax = 1.01; ofstream gpout((folder+"/"+prefix2+fname.str()+".gp").c_str()); gpout << "set terminal epslatex color solid\n" << "set output '" << prefix2+fname.str() << "-plot.tex'\n" << "set xrange [" << xmin << ":" << xmax << "]\n"; gpout << "set xlabel 'rn "<<NR <<"' \n"; gpout << "set size 0.5,0.6\n"; gpout << "plot '" << prefix2+fname.str() << ".dat' u ($1):($2) w boxes lc rgbcolor \"blue\" t '{\\tiny "<<process <<" }',"; gpout << " '" << prefix2+fname.str(); gpout << ".dat' u ($1):($3) w boxes lc rgbcolor \"red\" t '';"; gpout << "reset\n"; } // If needed, insert default implementations of virtual function defined // in the InterfacedBase class here (using ThePEG-interfaced-impl in Emacs). void BinSampler::persistentOutput(PersistentOStream & os) const { MultiIterationStatistics::put(os); os << theBias << theWeighted << theInitialPoints << theNIterations << theEnhancementFactor << theNonZeroInPresampling << theHalfPoints << theMaxNewMax << theReferenceWeight << theBin << theInitialized << theLastPoint << theEventHandler << theSampler << theRandomNumbers << theRemapperPoints << theRemapChannelDimension << theLuminosityMapperBins << theGeneralMapperBins << theKappa; } void BinSampler::persistentInput(PersistentIStream & is, int) { MultiIterationStatistics::get(is); is >> theBias >> theWeighted >> theInitialPoints >> theNIterations >> theEnhancementFactor >> theNonZeroInPresampling >> theHalfPoints >> theMaxNewMax >> theReferenceWeight >> theBin >> theInitialized >> theLastPoint >> theEventHandler >> theSampler >> theRandomNumbers >> theRemapperPoints >> theRemapChannelDimension >> theLuminosityMapperBins >> theGeneralMapperBins >> theKappa; } // *** Attention *** The following static variable is needed for the type // description system in ThePEG. Please check that the template arguments // are correct (the class and its base class), and that the constructor // arguments are correct (the class name and the name of the dynamically // loadable library where the class implementation can be found). DescribeClass<BinSampler,MultiIterationStatistics> describeHerwigBinSampler("Herwig::BinSampler", "HwSampling.so"); void BinSampler::Init() { static ClassDocumentation<BinSampler> documentation ("BinSampler samples XCombs bins. This default implementation performs flat MC integration."); static Parameter<BinSampler,unsigned long> interfaceInitialPoints ("InitialPoints", "The number of points to use for initial integration.", &BinSampler::theInitialPoints, 1000000, 1, 0, false, false, Interface::lowerlim); static Parameter<BinSampler,size_t> interfaceNIterations ("NIterations", "The number of iterations to perform initially.", &BinSampler::theNIterations, 1, 1, 0, false, false, Interface::lowerlim); static Parameter<BinSampler,double> interfaceEnhancementFactor ("EnhancementFactor", "The enhancement factor for the number of points in the next iteration.", &BinSampler::theEnhancementFactor, 2.0, 1.0, 0, false, false, Interface::lowerlim); static Switch<BinSampler,bool> interfaceNonZeroInPresampling ("NonZeroInPresampling", "Switch on to count only non zero weights in presampling.", &BinSampler::theNonZeroInPresampling, true, false, false); static SwitchOption interfaceNonZeroInPresamplingYes (interfaceNonZeroInPresampling, "Yes", "", true); static SwitchOption interfaceNonZeroInPresamplingNo (interfaceNonZeroInPresampling, "No", "", false); static Switch<BinSampler,bool> interfaceHalfPoints ("HalfPoints", "Switch on to reset the counter of points if new maximumis was found in the first 1/2 points.", &BinSampler::theHalfPoints, true, false, false); static SwitchOption interfaceHalfPointsYes (interfaceHalfPoints, "Yes", "", true); static SwitchOption interfaceHalfPointsNo (interfaceHalfPoints, "No", "", false); static Parameter<BinSampler,int> interfaceMaxNewMax ("MaxNewMax", "The maximum number of allowed new maxima in combination with the HalfPoints option.", &BinSampler::theMaxNewMax, 30, 1, 0, false, false, Interface::lowerlim); static Parameter<BinSampler,string> interfaceRandomNumbers ("RandomNumbers", "Prefix for distributions of the random numbers.", &BinSampler::theRandomNumbers, "", false, false); static Parameter<BinSampler,unsigned long> interfaceRemapperPoints ("RemapperPoints", "The number of points to be used for filling remappers.", &BinSampler::theRemapperPoints, 10000, 0, 0, false, false, Interface::lowerlim); static Switch<BinSampler,bool> interfaceRemapChannelDimension ("RemapChannelDimension", "Switch on remapping of the channel dimension.", &BinSampler::theRemapChannelDimension, true, false, false); static SwitchOption interfaceRemapChannelDimensionYes (interfaceRemapChannelDimension, "Yes", "", true); static SwitchOption interfaceRemapChannelDimensionNo (interfaceRemapChannelDimension, "No", "", false); static Parameter<BinSampler,unsigned long> interfaceLuminosityMapperBins ("LuminosityMapperBins", "The number of bins to be used for remapping parton luminosities.", &BinSampler::theLuminosityMapperBins, 0, 0, 0, false, false, Interface::lowerlim); static Parameter<BinSampler,unsigned long> interfaceGeneralMapperBins ("GeneralMapperBins", "The number of bins to be used for remapping other phase space dimensions.", &BinSampler::theGeneralMapperBins, 0, 0, 0, false, false, Interface::lowerlim); static Parameter<BinSampler,double> interfaceRemapperMinSelection ("RemapperMinSelection", "The minimum bin selection probability for remappers.", &BinSampler::theRemapperMinSelection, 0.00001, 0.0, 1.0, false, false, Interface::limited); static Parameter<BinSampler,double> interfaceKappa ("Kappa", "In AllmostUnweighted mode unweight to Kappa ReferenceWeight.", &BinSampler::theKappa, 1., 0.000001, 1.0, false, false, Interface::limited); } diff --git a/Sampling/CellGrids/SimpleCellGrid.h b/Sampling/CellGrids/SimpleCellGrid.h --- a/Sampling/CellGrids/SimpleCellGrid.h +++ b/Sampling/CellGrids/SimpleCellGrid.h @@ -1,376 +1,376 @@ // -*- C++ -*- // // SimpleCellGrid.hpp is a part of ExSample // Copyright (C) 2012-2013 Simon Platzer // // ExSample is licenced under version 2 of the GPL, see COPYING for details. // #ifndef EXSAMPLE_SimpleCellGrid_hpp_included #define EXSAMPLE_SimpleCellGrid_hpp_included #include "CellGrid.h" #include <cmath> namespace ExSample { /** * \brief A simple cell grid providing basic adaption and sampling * \author Simon Platzer */ class SimpleCellGrid : public CellGrid { public: /** * Default constructor */ SimpleCellGrid() : CellGrid() {} /** * Construct given boundaries and a weight */ SimpleCellGrid(const std::vector<double>& newLowerLeft, const std::vector<double>& newUpperRight, bool keepWeightInformation = true, double newWeight = 0.0); /** * Produce a new instance of a cell grid */ virtual CellGrid* makeInstance() const; /** * Produce a new instance of a cell grid */ virtual CellGrid* makeInstance(const std::vector<double>& newLowerLeft, const std::vector<double>& newUpperRight, double newWeight = 0.0) const; /** * Split this cell grid in the given dimension and coordinate, if * it is a leaf */ virtual void split(std::size_t newSplitDimension, double newSplitCoordinate); virtual void splitter(size_t dim, int rat); public: /** * Return the first child */ const SimpleCellGrid& firstChild() const { return dynamic_cast<const SimpleCellGrid&>(CellGrid::firstChild()); } /** * Access the first child */ SimpleCellGrid& firstChild() { return dynamic_cast<SimpleCellGrid&>(CellGrid::firstChild()); } /** * Return the second child */ const SimpleCellGrid& secondChild() const { return dynamic_cast<const SimpleCellGrid&>(CellGrid::secondChild()); } /** * Access the second child */ SimpleCellGrid& secondChild() { return dynamic_cast<SimpleCellGrid&>(CellGrid::secondChild()); } public: /** * A simple counter to store information used for adaption */ struct Counter { /** * Default constructor */ Counter() : nPoints(0.0), sumOfWeights(0.0), sumOfSquaredWeights(0.0), maxWeight(0.0) {} /** * The number of points */ double nPoints; /** * The sum of weights */ double sumOfWeights; /** * The sum of squared weights */ double sumOfSquaredWeights; /** * The maximum weight */ double maxWeight; /** * Book a point */ void book(double weight) { nPoints += 1.0; sumOfWeights += std::abs(weight); sumOfSquaredWeights += sqr(weight); maxWeight = std::max(std::abs(weight),maxWeight); } /** * Return the average weight */ double averageWeight() const { return nPoints != 0.0 ? sumOfWeights/nPoints : 0.0; } /** * Return the variance of the weights */ double varianceOfAverage() const { return nPoints > 1.0 ? fabs(sumOfSquaredWeights/nPoints - sqr(sumOfWeights/nPoints))/(nPoints-1) : 0.0; } }; /** * Return weight information for adaption steps */ const std::vector<std::pair<Counter,Counter> >& weightInformation() const { return theWeightInformation; } /** * Access weight information for adaption steps */ std::vector<std::pair<Counter,Counter> >& weightInformation() { return theWeightInformation; } /** * Update the weight information for the given point */ virtual void updateWeightInformation(const std::vector<double>& p, double w); /** * Adjust the reference weight */ void adjustReferenceWeight(double w) { theReferenceWeight = std::max(theReferenceWeight,std::abs(w)); } /** * Return the reference weight */ double getReferenceWeight() const { return theReferenceWeight; } /** * Perform a default adaption step, splitting along the dimension * which shows up the largest difference in average weights; if * this exceeds gain, perform the split. */ virtual void adapt(double gain, double epsilon, std::set<SimpleCellGrid*>& newCells); /** * Update the weights of the cells from information accumulated so * far */ virtual void setWeights(); public: /** * Sample a point flat in this cell */ template<class RndGenerator> void sampleFlatPoint(std::vector<double>& p, RndGenerator& rnd) const { assert(p.size() == lowerLeft().size()); for ( size_t k = 0; k < p.size(); ++k ) { p[k] = lowerLeft()[k] + rnd.rnd()*(upperRight()[k]-lowerLeft()[k]); } } /** * Sample a point flat in this cell, keeping parameters fixed */ template<class RndGenerator> void sampleFlatPoint(std::vector<double>& p, const std::vector<bool>& parameterFlags, RndGenerator& rnd) const { assert(p.size() == lowerLeft().size()); for ( size_t k = 0; k < p.size(); ++k ) { if ( parameterFlags[k] ) continue; p[k] = lowerLeft()[k] + rnd.rnd()*(upperRight()[k]-lowerLeft()[k]); } } /** * Explore the cell grid, given a number of points to be sampled * in each cell; the weights of the cell will contain the maximum * weight encountered. If newCells is non-empty explore only these * cells, otherwise explore all cells. */ template<class RndGenerator, class Function> void explore(std::size_t nPoints, RndGenerator& rnd, Function& f, std::set<SimpleCellGrid*>& newCells, std::ostream& warn) { unsigned long nanPoints = 0; if ( !isLeaf() ) { firstChild().explore(nPoints,rnd,f,newCells,warn); secondChild().explore(nPoints,rnd,f,newCells,warn); return; } if ( !newCells.empty() ) { if ( newCells.find(this) == newCells.end() ) return; } std::vector<double> point(lowerLeft().size()); for ( std::size_t k = 0; k < nPoints; ++k ) { sampleFlatPoint(point,rnd); double w = f.evaluate(point); - if ( isnan(w) || isinf(w) ) { + if ( ! isfinite(w) ) { ++nanPoints; continue; } updateWeightInformation(point,std::abs(w)); } if ( nanPoints ) { warn << "Warning: " << nanPoints << " out of " << nPoints << " points with nan or inf weight encountered while " << "exploring a cell.\n" << std::flush; } } /** * Select a cell */ template<class RndGenerator> SimpleCellGrid* selectCell(RndGenerator& rnd) { if ( isLeaf() ) return this; if ( firstChild().active() && secondChild().active() ) { double p = firstChild().integral()/integral(); if ( rnd.rnd() <= p ) return firstChild().selectCell(rnd); else return secondChild().selectCell(rnd); } if ( firstChild().active() && !secondChild().active() ) return firstChild().selectCell(rnd); else return secondChild().selectCell(rnd); } /** * Sample a point and return its weight */ template<class RndGenerator, class Function> double sample(RndGenerator& rnd, Function& f, std::vector<double>& p, bool unweight, bool adjustReference) { SimpleCellGrid* selected = selectCell(rnd); selected->sampleFlatPoint(p,rnd); double w = f.evaluate(p); selected->updateWeightInformation(p,w); double xw = integral()*w/selected->weight(); if ( adjustReference ) { selected->adjustReferenceWeight(xw); } if ( unweight ) { double r = selected->getReferenceWeight(); if ( r == 0. ) return xw; double p = std::min(std::abs(xw),r)/r; double sign = xw >= 0. ? 1. : -1.; if ( p < 1 && rnd.rnd() > p ) xw = 0.; else xw = sign*std::max(std::abs(xw),r); } return xw; } /** * Sample a point and return its weight */ template<class RndGenerator, class Function> std::pair<double,double> generate(RndGenerator& rnd, Function& f, std::vector<double>& p) { SimpleCellGrid* selected = selectCell(rnd); selected->sampleFlatPoint(p,rnd); double w = f.evaluate(p); selected->updateWeightInformation(p,w); return std::make_pair(w,selected->weight()); } /** * Sample a point and return its weight */ template<class RndGenerator, class Function> std::pair<double,double> generate(RndGenerator& rnd, Function& f, std::vector<double>& p, const std::vector<bool>& parameterFlags) { SimpleCellGrid* selected = selectCell(rnd); selected->sampleFlatPoint(p,parameterFlags,rnd); double w = f.evaluate(p); selected->updateWeightInformation(p,w); return std::make_pair(w,selected->weight()); } public: /** * Fill CellGrid data from an XML element */ virtual void fromXML(const XML::Element&); /** * Return an XML element for the data of this CellGrid */ virtual XML::Element toXML() const; private: /** * Weight information for adaption steps */ std::vector<std::pair<Counter,Counter> > theWeightInformation; /** * The reference weight to be used for unweighting */ double theReferenceWeight; }; } #endif // EXSAMPLE_SimpleCellGrid_hpp_included diff --git a/Sampling/GeneralSampler.cc b/Sampling/GeneralSampler.cc --- a/Sampling/GeneralSampler.cc +++ b/Sampling/GeneralSampler.cc @@ -1,1036 +1,1036 @@ // -*- C++ -*- // // GeneralSampler.cc is a part of Herwig - A multi-purpose Monte Carlo event generator // Copyright (C) 2002-2012 The Herwig Collaboration // // Herwig is licenced under version 2 of the GPL, see COPYING for details. // Please respect the MCnet academic guidelines, see GUIDELINES for details. // // // This is the implementation of the non-inlined, non-templated member // functions of the GeneralSampler class. // #include "GeneralSampler.h" #include "ThePEG/Interface/ClassDocumentation.h" #include "ThePEG/EventRecord/Particle.h" #include "ThePEG/Repository/UseRandom.h" #include "ThePEG/Repository/EventGenerator.h" #include "ThePEG/Repository/Repository.h" #include "ThePEG/Utilities/DescribeClass.h" #include "ThePEG/Utilities/LoopGuard.h" #include "ThePEG/Interface/Reference.h" #include "ThePEG/Interface/Switch.h" #include "ThePEG/Interface/Parameter.h" #include "ThePEG/Persistency/PersistentOStream.h" #include "ThePEG/Persistency/PersistentIStream.h" #include "ThePEG/Handlers/StandardEventHandler.h" #include "ThePEG/Handlers/StandardXComb.h" #include "Herwig/Utilities/RunDirectories.h" #include "Herwig/Utilities/XML/ElementIO.h" #include <boost/progress.hpp> #include <boost/filesystem.hpp> #include <cstdlib> #include <sstream> using namespace Herwig; GeneralSampler::GeneralSampler() : theVerbose(false), theIntegratedXSec(ZERO), theIntegratedXSecErr(ZERO), theUpdateAfter(1), crossSectionCalls(0), gotCrossSections(false), theSumWeights(0.), theSumWeights2(0.), theAttempts(0), theAccepts(0), theMaxWeight(0.0), theAddUpSamplers(false), theGlobalMaximumWeight(true), theFlatSubprocesses(false), isSampling(false), theMinSelection(0.01), runCombinationData(false), theAlmostUnweighted(false), maximumExceeds(0), maximumExceededBy(0.), correctWeights(0.),theMaxEnhancement(1.05), didReadGrids(false), theParallelIntegration(false), theIntegratePerJob(0), theIntegrationJobs(0), theIntegrationJobsCreated(0), justAfterIntegrate(false), theWriteGridsOnFinish(false) {} GeneralSampler::~GeneralSampler() {} IBPtr GeneralSampler::clone() const { return new_ptr(*this); } IBPtr GeneralSampler::fullclone() const { return new_ptr(*this); } double sign(double x) { return x >= 0. ? 1. : -1.; } void GeneralSampler::initialize() { if ( theParallelIntegration && runLevel() == ReadMode ) throw Exception() << "\n--------------------------------------------------------------------------------\n\n" << "Parallel integration is only supported in the build/integrate/run mode\n\n" << "--------------------------------------------------------------------------------\n" << Exception::abortnow; if ( runLevel() == ReadMode || runLevel() == IntegrationMode ) { assert(theSamplers.empty()); if ( !theGrids.children().empty() ) Repository::clog() << "--------------------------------------------------------------------------------\n\n" << "Using an existing grid. Please consider re-running the grid adaption\n" << "when there have been significant changes to parameters, cuts, etc.\n\n" << "--------------------------------------------------------------------------------\n" << flush; } if ( theParallelIntegration ) { if ( !theIntegratePerJob && !theIntegrationJobs ) throw Exception() << "Please specify the number of subprocesses per integration job or the " << "number of integration jobs to be created." << Exception::abortnow; if ( theIntegrationJobs ) { unsigned int nintegrate = eventHandler()->nBins()/theIntegrationJobs; if ( eventHandler()->nBins() % theIntegrationJobs != 0 ) ++nintegrate; theIntegratePerJob = max(theIntegratePerJob,nintegrate); } unsigned int jobCount = 0; ofstream* jobList = 0; generator()->log() << "--------------------------------------------------------------------------------\n" << "preparing integration jobs ...\n" << flush; vector<int> randomized; vector<int> pickfrom; for ( int b = 0; b < eventHandler()->nBins(); ++b ) pickfrom.push_back(b); //set<int> check; while ( !pickfrom.empty() ) { size_t idx = UseRandom::irnd(pickfrom.size()); randomized.push_back(pickfrom[idx]); pickfrom.erase(pickfrom.begin() + idx); } int b = 0; for ( vector<int>::const_iterator bx = randomized.begin(); bx != randomized.end(); ++bx, ++b ) { if ( b == 0 || b % theIntegratePerJob == 0 ) { if ( jobList ) { jobList->close(); delete jobList; jobList = 0; } ostringstream name; string prefix = RunDirectories::buildStorage(); if ( prefix.empty() ) prefix = "./"; else if ( *prefix.rbegin() != '/' ) prefix += "/"; name << prefix << "integrationJob" << jobCount; ++jobCount; string fname = name.str(); jobList = new ofstream(fname.c_str()); if ( !*jobList ) { delete jobList; throw Exception() << "Failed to write integration job list" << Exception::abortnow; } } *jobList << *bx << " "; } theIntegrationJobsCreated = jobCount; generator()->log() << "--------------------------------------------------------------------------------\n\n" << "Wrote " << jobCount << " integration jobs\n" << "Please submit integration jobs with the\nintegrate --jobid=x\ncommand for job ids " << "from 0 to " << (jobCount-1) << "\n\n" << "e.g.:\n\n" << " for i in $(seq 0 "<< (jobCount-1) <<");do Herwig integrate --jobid=$i "<<generator()->runName()<<".run & done\n\n" << "--------------------------------------------------------------------------------\n" << flush; if ( jobList ) { jobList->close(); delete jobList; jobList = 0; } theParallelIntegration = false; return; } if ( runLevel() == BuildMode ) return; if ( !samplers().empty() ) return; if ( binSampler()->adaptsOnTheFly() ) { if ( !theAddUpSamplers ) { Repository::clog() << "Warning: On-the-fly adapting samplers require cross section calculation from " << "adding up individual samplers. The AddUpSamplers flag will be switched on."; } theAddUpSamplers = true; } if ( !weighted() && !binSampler()->canUnweight() ) throw Exception() << "Unweighted events requested from weighted bin sampler object."; if ( theFlatSubprocesses && !theGlobalMaximumWeight ) { Repository::clog() << "Warning: Can only use a global maximum weight when selecting subprocesses " << "uniformly. The GlobalMaximumWeight flag will be switched on."; theGlobalMaximumWeight = true; } set<int> binsToIntegrate; if ( integrationList() != "" ) { string prefix = RunDirectories::buildStorage(); if ( prefix.empty() ) prefix = "./"; else if ( *prefix.rbegin() != '/' ) prefix += "/"; string fname = prefix + integrationList(); ifstream jobList(fname.c_str()); if ( jobList ) { int b = 0; while ( jobList >> b ) binsToIntegrate.insert(b); } else { Repository::clog() << "Job list '" << integrationList() << "' not found.\n" << "Assuming empty integration job\n" << flush; return; } } if ( binsToIntegrate.empty() ) { for ( int b = 0; b < eventHandler()->nBins(); ++b ) binsToIntegrate.insert(b); } boost::progress_display* progressBar = 0; if ( !theVerbose && !justAfterIntegrate ) { Repository::clog() << "integrating subprocesses"; progressBar = new boost::progress_display(binsToIntegrate.size(),Repository::clog()); } int count=0; for ( set<int>::const_iterator bit = binsToIntegrate.begin(); bit != binsToIntegrate.end(); ++bit ) { count++; if(theVerbose&& (runLevel() == ReadMode || runLevel() == IntegrationMode)) cout<<"\nIntegrate "<< count <<" of "<<binsToIntegrate.size() <<":\n"<<flush; Ptr<BinSampler>::ptr s = theBinSampler->cloneMe(); s->eventHandler(eventHandler()); s->sampler(this); s->bin(*bit); lastSampler(s); s->doWeighted(eventHandler()->weighted()); s->setupRemappers(theVerbose); if ( justAfterIntegrate ) s->readIntegrationData(); s->initialize(theVerbose); samplers()[*bit] = s; if ( !theVerbose && !justAfterIntegrate ) ++(*progressBar); if ( s->nanPoints() && theVerbose ) { Repository::clog() << "warning: " << s->nanPoints() << " of " << s->allPoints() << " points with nan or inf weight.\n" << flush; } } if ( progressBar ) { delete progressBar; progressBar = 0; } if ( runLevel() == IntegrationMode ) { theGrids = XML::Element(XML::ElementTypes::Element,"Grids"); for ( map<double,Ptr<BinSampler>::ptr>::iterator s = samplers().begin(); s != samplers().end(); ++s ) { s->second->saveGrid(); s->second->saveRemappers(); s->second->saveIntegrationData(); } writeGrids(); return; } if ( theVerbose ) { bool oldAdd = theAddUpSamplers; theAddUpSamplers = true; try { Repository::clog() << "estimated total cross section is ( " << integratedXSec()/nanobarn << " +/- " << integratedXSecErr()/nanobarn << " ) nb\n" << flush; } catch (...) { theAddUpSamplers = oldAdd; throw; } theAddUpSamplers = oldAdd; } updateSamplers(); if ( samplers().empty() ) { throw Exception() << "No processes with non-zero cross section present." << Exception::abortnow; } if ( !justAfterIntegrate ) { theGrids = XML::Element(XML::ElementTypes::Element,"Grids"); for ( map<double,Ptr<BinSampler>::ptr>::iterator s = samplers().begin(); s != samplers().end(); ++s ) { s->second->saveGrid(); s->second->saveRemappers(); } writeGrids(); } } double GeneralSampler::generate() { long excptTries = 0; gotCrossSections = false; lastSampler(samplers().upper_bound(UseRandom::rnd())->second); double weight = 0.; while ( true ) { try { weight = 1.0; double p = lastSampler()->referenceWeight()/lastSampler()->bias()/theMaxWeight; if ( weighted() ) weight *= p; else if ( p < UseRandom::rnd() ){ weight = 0.0; // The lastSampler was picked according to the bias of the process. --excptTries; } if ( weight != 0.0 ) weight *= lastSampler()->generate()/lastSampler()->referenceWeight(); } catch(BinSampler::NextIteration) { updateSamplers(); lastSampler(samplers().upper_bound(UseRandom::rnd())->second); if ( ++excptTries == eventHandler()->maxLoop() ) break; continue; } catch (...) { throw; } - if ( isnan(lastSampler()->lastWeight()) || isinf(lastSampler()->lastWeight()) ) { + if ( ! isfinite(lastSampler()->lastWeight()) ) { lastSampler() = samplers().upper_bound(UseRandom::rnd())->second; if ( ++excptTries == eventHandler()->maxLoop() ) break; continue; } theAttempts += 1; if ( abs(weight) == 0.0 ) { lastSampler(samplers().upper_bound(UseRandom::rnd())->second); if ( ++excptTries == eventHandler()->maxLoop() ) break; continue; } if ( !eventHandler()->weighted() && !theAlmostUnweighted ) { if ( abs(weight) > 1. ) { ++maximumExceeds; maximumExceededBy += abs(weight)-1.; } correctWeights+=weight; if ( weight > 0.0 ) weight = 1.; else weight = -1.; } break; } theAccepts += 1; if ( excptTries == eventHandler()->maxLoop() ) throw Exception() << "GeneralSampler::generate() : Maximum number of tries to re-run event " << "selection reached. Aborting now." << Exception::runerror; lastPoint() = lastSampler()->lastPoint(); lastSampler()->accept(); theSumWeights += weight; theSumWeights2 += sqr(weight); return weight; } void GeneralSampler::rejectLast() { if ( !lastSampler() ) return; double w = 0.0; if ( weighted() ) w = lastSampler()->lastWeight()/lastSampler()->bias()/theMaxWeight; else w = lastSampler()->lastWeight()/lastSampler()->referenceWeight(); lastSampler()->reject(); theSumWeights -= w; theSumWeights2 -= sqr(w); theAttempts -= 1; theAccepts -= 1; } void GeneralSampler::updateSamplers() { map<double,Ptr<BinSampler>::ptr> checkedSamplers; for ( map<double,Ptr<BinSampler>::ptr>::iterator s = samplers().begin(); s != samplers().end(); ++s ) { if ( s->second->averageAbsWeight() == 0.0 ) { generator()->log() << "Warning: no phase space points with non-zero cross section\n" << "could be obtained for the process: " << s->second->process() << "\n" << "This process will not be considered. Try increasing InitialPoints.\n" << flush; if ( s->second->nanPoints() ) { generator()->log() << "Warning: " << s->second->nanPoints() << " of " << s->second->allPoints() << " points with nan or inf weight\n" << "in " << s->second->process() << "\n" << flush; } continue; } checkedSamplers.insert(*s); } theSamplers = checkedSamplers; if ( samplers().empty() ) return; double allMax = 0.0; double sumbias = 0.; for ( map<double,Ptr<BinSampler>::ptr>::iterator s = samplers().begin(); s != samplers().end(); ++s ) { double bias = 1.; if ( !theFlatSubprocesses ) bias *= s->second->averageAbsWeight(); s->second->bias(bias); sumbias += bias; allMax = max(allMax,s->second->maxWeight()*theMaxEnhancement); } double nsumbias = 0.0; bool needAdjust = false; for ( map<double,Ptr<BinSampler>::ptr>::iterator s = samplers().begin(); s != samplers().end(); ++s ) { needAdjust |= s->second->bias()/sumbias < theMinSelection; s->second->bias(max(s->second->bias()/sumbias,theMinSelection)); nsumbias += s->second->bias(); } if ( nsumbias == 0.0 ) { samplers().clear(); return; } if ( needAdjust ) { for ( map<double,Ptr<BinSampler>::ptr>::iterator s = samplers().begin(); s != samplers().end(); ++s ) { s->second->bias(s->second->bias()/nsumbias); } } theMaxWeight = 0.0; for ( map<double,Ptr<BinSampler>::ptr>::iterator s = samplers().begin(); s != samplers().end(); ++s ) { double wref = theGlobalMaximumWeight ? allMax : s->second->maxWeight()*theMaxEnhancement; s->second->referenceWeight(wref); theMaxWeight = max(theMaxWeight,wref/s->second->bias()); if ( (isSampling && s->second == lastSampler()) || !isSampling ) s->second->nextIteration(); } map<double,Ptr<BinSampler>::ptr> newSamplers; double current = 0.; for ( map<double,Ptr<BinSampler>::ptr>::iterator s = samplers().begin(); s != samplers().end(); ++s ) { if ( s->second->bias() == 0.0 ) continue; current += s->second->bias(); newSamplers[current] = s->second; } samplers() = newSamplers; } void GeneralSampler::currentCrossSections() const { if ( !theAddUpSamplers ) { double n = attempts(); if ( n > 1 ) { theIntegratedXSec = sumWeights()*maxXSec()/attempts(); double sw = sumWeights(); double sw2 = sumWeights2(); theIntegratedXSecErr = maxXSec()*sqrt(abs(sw2/n-sqr(sw/n))/(n-1)); } else { theIntegratedXSec = ZERO; theIntegratedXSecErr = ZERO; } return; } if ( gotCrossSections ) return; if ( crossSectionCalls > 0 ) { if ( ++crossSectionCalls == theUpdateAfter ) { crossSectionCalls = 0; } else return; } ++crossSectionCalls; gotCrossSections = true; theIntegratedXSec = ZERO; double var = 0.0; for ( map<double,Ptr<BinSampler>::ptr>::const_iterator s = samplers().begin(); s != samplers().end(); ++s ) { theIntegratedXSec += s->second->integratedXSec(); var += sqr(s->second->integratedXSecErr()/nanobarn); } theIntegratedXSecErr = sqrt(var)*nanobarn; } void GeneralSampler::prepare() { readGrids(); } // If needed, insert default implementations of virtual function defined // in the InterfacedBase class here (using ThePEG-interfaced-impl in Emacs). void GeneralSampler::doinit() { if ( RunDirectories::empty() ) RunDirectories::pushRunId(generator()->runName()); if ( integratePerJob() || integrationJobs() ) { theParallelIntegration = true; theIntegratePerJob = integratePerJob(); theIntegrationJobs = integrationJobs(); } readGrids(); if ( theGrids.children().empty() && runLevel() == RunMode ) generator()->log() << "\n--------------------------------------------------------------------------------\n\n" << "Warning: No grid file could be found at the start of this run.\n\n" << "* For a read/run setup intented to be used with --setupfile please consider\n" << " using the build/integrate/run setup.\n" << "* For a build/integrate/run setup to be used with --setupfile please ensure\n" << " that the same setupfile is provided to both, the integrate and run steps.\n\n" << "--------------------------------------------------------------------------------\n" << flush; if ( samplers().empty() && runLevel() == RunMode ) justAfterIntegrate = true; SamplerBase::doinit(); } void GeneralSampler::dofinish() { set<string> compensating; for ( map<double,Ptr<BinSampler>::ptr>::const_iterator s = samplers().begin(); s != samplers().end(); ++s ) { if ( s->second->compensating() ) { compensating.insert(s->second->process()); } if ( s->second->nanPoints() ) { generator()->log() << "warning: " << s->second->nanPoints() << " of " << s->second->allPoints() << " points with nan or inf weight\n" << "in " << s->second->process() << "\n" << flush; } s->second->finalize(theVerbose); } if ( theVerbose ) { if ( !compensating.empty() ) { generator()->log() << "warning: sampling for the following processes is still compensating:\n"; for ( set<string>::const_iterator c = compensating.begin(); c != compensating.end(); ++c ) generator()->log() << *c << "\n"; } generator()->log() << "final integrated cross section is ( " << integratedXSec()/nanobarn << " +/- " << integratedXSecErr()/nanobarn << " ) nb\n" << flush; } if ( !compensating.empty() ) { generator()->log() << "Warning: Some samplers are still in compensating mode.\n" << flush; } if ( maximumExceeds != 0 ) { //generator()->log() << maximumExceeds << " of " << theAttempts // << " attempted points exceeded the guessed maximum weight\n" // << "with an average relative deviation of " // << maximumExceededBy/maximumExceeds << "\n\n" << flush; generator()->log() <<"\n\n\nNote: In this run "<<maximumExceeds<<" of the "<<theAccepts<<" accepted events\n" <<"were found with a weight W larger than the expected Wmax.\n"; generator()->log() <<"This corresponds to a cross section difference between:\n" <<" UnitWeights: "<< theMaxWeight*theSumWeights/theAttempts<<"nb\n" <<" AlmostUnweighted: "<< theMaxWeight*correctWeights/theAttempts<< "nb\n"<< " use 'set Sampler:AlmostUnweighted On' to switch to non-unit weights.\n\n"; generator()->log() <<"The maximum weight determined in the read/integrate step has been enhanced by \n"<< " set /Herwig/Samplers/Sampler:MaxEnhancement "<< theMaxEnhancement<< ".\nIf the rate of excessions ("<<(double)maximumExceeds*100/(double)theAccepts<< "%) or the change of the cross section is large,\nyou can try to:\n\n"<< "Enhance the number of points used in the read/integrate step\n"<< " set /Herwig/Samplers/Sampler:BinSampler:InitialPoints ...\n\n"<< "and/or enhance the reference weight found in the read/integrate step\n"<< " set /Herwig/Samplers/Sampler:MaxEnhancement 1.x\n\n"<< "If this does not help (and your process is well defined by cuts)\n"<< "don't hesitate to contact herwig@projects.hepforge.org.\n\n"; } if ( runCombinationData ) { string dataName = RunDirectories::runStorage(); if ( dataName.empty() ) dataName = "./"; else if ( *dataName.rbegin() != '/' ) dataName += "/"; dataName += "HerwigSampling.dat"; ofstream data(dataName.c_str()); double runXSec = theMaxWeight*theSumWeights/theAttempts; double runXSecErr = sqr(theMaxWeight)*(1./theAttempts)*(1./(theAttempts-1.))* abs(theSumWeights2 - sqr(theSumWeights)/theAttempts); data << setprecision(17); data << "CrossSectionCombined " << (integratedXSec()/nanobarn) << " +/- " << (integratedXSecErr()/nanobarn) << "\n" << "CrossSectionRun " << runXSec << " +/- " << sqrt(runXSecErr) << "\n" << "PointsAttempted " << theAttempts << "\n" << "PointsAccepted " << theAccepts << "\n" << "SumWeights " << theSumWeights*theMaxWeight << "\n" << "SumWeights2 " << theSumWeights2*sqr(theMaxWeight) << "\n" << flush; } theGrids = XML::Element(XML::ElementTypes::Element,"Grids"); for ( map<double,Ptr<BinSampler>::ptr>::iterator s = samplers().begin(); s != samplers().end(); ++s ) { s->second->saveGrid(); s->second->saveRemappers(); if ( justAfterIntegrate ) s->second->saveIntegrationData(); } if ( theWriteGridsOnFinish ) writeGrids(); SamplerBase::dofinish(); } void GeneralSampler::doinitrun() { readGrids(); if ( theGrids.children().empty() && !didReadGrids ) generator()->log() << "\n--------------------------------------------------------------------------------\n\n" << "Warning:No grid file could be found at the start of this run.\n\n" << "* For a read/run setup intented to be used with --setupfile please consider\n" << " using the build/integrate/run setup.\n" << "* For a build/integrate/run setup to be used with --setupfile please ensure\n" << " that the same setupfile is provided to both, the integrate and run steps.\n\n" << "--------------------------------------------------------------------------------\n" << flush; if ( samplers().empty() ) { justAfterIntegrate = true; if ( !hasSetupFile() ) initialize(); } else { for ( map<double,Ptr<BinSampler>::ptr>::iterator s = samplers().begin(); s != samplers().end(); ++s ) { s->second->setupRemappers(theVerbose); if ( justAfterIntegrate ) s->second->readIntegrationData(); s->second->initialize(theVerbose); } } isSampling = true; SamplerBase::doinitrun(); } void GeneralSampler::rebind(const TranslationMap & trans) { for ( map<double,Ptr<BinSampler>::ptr>::iterator s = samplers().begin(); s != samplers().end(); ++s ) s->second = trans.translate(s->second); SamplerBase::rebind(trans); } IVector GeneralSampler::getReferences() { IVector ret = SamplerBase::getReferences(); for ( map<double,Ptr<BinSampler>::ptr>::iterator s = samplers().begin(); s != samplers().end(); ++s ) ret.push_back(s->second); return ret; } void GeneralSampler::writeGrids() const { if ( theGrids.children().empty() ) return; string dataName = RunDirectories::runStorage(); if ( dataName.empty() ) dataName = "./"; else if ( *dataName.rbegin() != '/' ) dataName += "/"; dataName += "HerwigGrids.xml"; ofstream out(dataName.c_str()); XML::ElementIO::put(theGrids,out); } void GeneralSampler::readGrids() { // return if grids were already read if ( didReadGrids ) return; // check for global HerwigGrids.xml file or combine integration jobs to a global HerwigGrids.xml file // Show messages of integration job combination only in the first run (if no global HerwigGrids.xml file is found in one of the directories) // or in case of an error // Check if a globalHerwigGridsFileFound was found and keep messages in a stringstream buffer beforehand bool globalHerwigGridsFileFound = false; bool integrationJobCombinationSuccessful = true; std::stringstream messageBuffer; RunDirectories directories; while ( directories && !didReadGrids ) { string dataName = directories.nextRunStorage(); if ( dataName.empty() ) dataName = "./"; else if ( *dataName.rbegin() != '/' ) dataName += "/"; string directoryName = dataName; dataName += "HerwigGrids.xml"; ifstream in(dataName.c_str()); if ( in ) { theGrids = XML::ElementIO::get(in); didReadGrids = true; // Set to true if in any of the directories a global HerwigGrid.xml file was found globalHerwigGridsFileFound = true; } else { // Check if integrationJob was split and try to merge single integrationJobs together // integrationJobsCreated() == 0 indicates that parallel integration has not been // requested, while the parallel integration parameters may well yield a single job if(integrationJobsCreated() >= 1 && runLevel() == RunMode) { messageBuffer << "\n\n* Global HerwigGrids.xml file does not exist yet" << "\n and integration jobs were split into " << integrationJobsCreated() << " integration jobs." << "\n Trying to combine single integration jobs to a global HerwigGrids.xml file" << "\n using the following directory " << directoryName << "."; theGrids = XML::Element(XML::ElementTypes::Element,"Grids"); integrationJobCombinationSuccessful = true; for(unsigned int currentProcessedIntegrationJobNum = 0; currentProcessedIntegrationJobNum < integrationJobsCreated(); ++currentProcessedIntegrationJobNum) { ostringstream currentProcessedIntegrationJob; currentProcessedIntegrationJob << directoryName << "integrationJob" << currentProcessedIntegrationJobNum << "/HerwigGrids.xml"; if(boost::filesystem::exists(boost::filesystem::path(currentProcessedIntegrationJob.str()))) { ifstream localGridFileIN(currentProcessedIntegrationJob.str().c_str()); if(localGridFileIN) { theGrids = theGrids + XML::ElementIO::get(localGridFileIN); messageBuffer << "\n* Added integration job " << currentProcessedIntegrationJobNum << " to global HerwigGrids.xml file."; } else { integrationJobCombinationSuccessful = false; messageBuffer << "\n* Could not open/add integration job " << currentProcessedIntegrationJobNum << " to global HerwigGrids.xml file."; } } else { integrationJobCombinationSuccessful = false; messageBuffer << "\n* Could not find integration job " << currentProcessedIntegrationJob.str(); } } if(integrationJobCombinationSuccessful) { string globalGridFile = directoryName + "HerwigGrids.xml"; ofstream globalGridFileOF(globalGridFile.c_str()); XML::ElementIO::put(theGrids,globalGridFileOF); messageBuffer << "\n* Global HerwigGrids.xml file was created, the integration jobs 0 to " << integrationJobsCreated()-1 << " were combined." << "\n* If previous warnings in regards to the HerwigGrids.xml file occured, these can be safely ignored." << "\n* Note: This message will occur only in the first run and will be suppressed in further runs.\n" << flush; didReadGrids = true; } else { messageBuffer << "\n* Global HerwigGrids.xml file could not be created due to failed combination of integration jobs." << "\n Please check the above-mentioned missing/failed integration jobs which are needed for the combination." << "\n* Note: It can be that the HerwigGrids.xml file is searched and can be found in further directories." << "\n In this case you can ignore this warning message.\n" << flush; } } } } // Show messages if global HerwigGrids.xml file was not found or first combination run if (!globalHerwigGridsFileFound && (theVerbose || !integrationJobCombinationSuccessful)) BaseRepository::cout() << messageBuffer.str() << "\n" << flush; if ( !didReadGrids ) theGrids = XML::Element(XML::ElementTypes::Element,"Grids"); } void GeneralSampler::persistentOutput(PersistentOStream & os) const { os << theVerbose << theBinSampler << theSamplers << theLastSampler << theUpdateAfter << crossSectionCalls << gotCrossSections << ounit(theIntegratedXSec,nanobarn) << ounit(theIntegratedXSecErr,nanobarn) << theSumWeights << theSumWeights2 << theAttempts << theAccepts << theMaxWeight << theAddUpSamplers << theGlobalMaximumWeight << theFlatSubprocesses << isSampling << theMinSelection << runCombinationData << theAlmostUnweighted << maximumExceeds << maximumExceededBy << correctWeights << theMaxEnhancement << theParallelIntegration << theIntegratePerJob << theIntegrationJobs << theIntegrationJobsCreated << theWriteGridsOnFinish; } void GeneralSampler::persistentInput(PersistentIStream & is, int) { is >> theVerbose >> theBinSampler >> theSamplers >> theLastSampler >> theUpdateAfter >> crossSectionCalls >> gotCrossSections >> iunit(theIntegratedXSec,nanobarn) >> iunit(theIntegratedXSecErr,nanobarn) >> theSumWeights >> theSumWeights2 >> theAttempts >> theAccepts >> theMaxWeight >> theAddUpSamplers >> theGlobalMaximumWeight >> theFlatSubprocesses >> isSampling >> theMinSelection >> runCombinationData >> theAlmostUnweighted >> maximumExceeds >> maximumExceededBy >> correctWeights >> theMaxEnhancement >> theParallelIntegration >> theIntegratePerJob >> theIntegrationJobs >> theIntegrationJobsCreated >> theWriteGridsOnFinish; } // *** Attention *** The following static variable is needed for the type // description system in ThePEG. Please check that the template arguments // are correct (the class and its base class), and that the constructor // arguments are correct (the class name and the name of the dynamically // loadable library where the class implementation can be found). DescribeClass<GeneralSampler,SamplerBase> describeHerwigGeneralSampler("Herwig::GeneralSampler", "HwSampling.so"); void GeneralSampler::Init() { static ClassDocumentation<GeneralSampler> documentation ("A GeneralSampler class"); static Reference<GeneralSampler,BinSampler> interfaceBinSampler ("BinSampler", "The bin sampler to be used.", &GeneralSampler::theBinSampler, false, false, true, false, false); static Parameter<GeneralSampler,size_t> interfaceUpdateAfter ("UpdateAfter", "Update cross sections every number of events.", &GeneralSampler::theUpdateAfter, 1, 1, 0, false, false, Interface::lowerlim); static Switch<GeneralSampler,bool> interfaceVerbose ("Verbose", "", &GeneralSampler::theVerbose, false, false, false); static SwitchOption interfaceVerboseOn (interfaceVerbose, "On", "", true); static SwitchOption interfaceVerboseOff (interfaceVerbose, "Off", "", false); static Switch<GeneralSampler,bool> interfaceAddUpSamplers ("AddUpSamplers", "Calculate cross sections from adding up individual samplers.", &GeneralSampler::theAddUpSamplers, false, false, false); static SwitchOption interfaceAddUpSamplersOn (interfaceAddUpSamplers, "On", "", true); static SwitchOption interfaceAddUpSamplersOff (interfaceAddUpSamplers, "Off", "", false); static Switch<GeneralSampler,bool> interfaceGlobalMaximumWeight ("GlobalMaximumWeight", "Use a global maximum weight instead of partial unweighting.", &GeneralSampler::theGlobalMaximumWeight, true, false, false); static SwitchOption interfaceGlobalMaximumWeightOn (interfaceGlobalMaximumWeight, "On", "", true); static SwitchOption interfaceGlobalMaximumWeightOff (interfaceGlobalMaximumWeight, "Off", "", false); static Parameter<GeneralSampler,double> interfaceMaxEnhancement ("MaxEnhancement", "Enhance the maximum reference weight found in the read step.", &GeneralSampler::theMaxEnhancement, 1.1, 1.0, 1.5, false, false, Interface::limited); static Switch<GeneralSampler,bool> interfaceFlatSubprocesses ("FlatSubprocesses", "[debug] Perform a flat subprocess selection.", &GeneralSampler::theFlatSubprocesses, false, false, false); static SwitchOption interfaceFlatSubprocessesOn (interfaceFlatSubprocesses, "On", "", true); static SwitchOption interfaceFlatSubprocessesOff (interfaceFlatSubprocesses, "Off", "", false); static Parameter<GeneralSampler,double> interfaceMinSelection ("MinSelection", "A minimum subprocess selection probability.", &GeneralSampler::theMinSelection, 0.01, 0.0, 1.0, false, false, Interface::limited); static Switch<GeneralSampler,bool> interfaceRunCombinationData ("RunCombinationData", "", &GeneralSampler::runCombinationData, false, false, false); static SwitchOption interfaceRunCombinationDataOn (interfaceRunCombinationData, "On", "", true); static SwitchOption interfaceRunCombinationDataOff (interfaceRunCombinationData, "Off", "", false); static Switch<GeneralSampler,bool> interfaceAlmostUnweighted ("AlmostUnweighted", "", &GeneralSampler::theAlmostUnweighted, false, false, false); static SwitchOption interfaceAlmostUnweightedOn (interfaceAlmostUnweighted, "On", "", true); static SwitchOption interfaceAlmostUnweightedOff (interfaceAlmostUnweighted, "Off", "", false); static Switch<GeneralSampler,bool> interfaceParallelIntegration ("ParallelIntegration", "Prepare parallel jobs for integration.", &GeneralSampler::theParallelIntegration, false, false, false); static SwitchOption interfaceParallelIntegrationYes (interfaceParallelIntegration, "Yes", "", true); static SwitchOption interfaceParallelIntegrationNo (interfaceParallelIntegration, "No", "", false); static Parameter<GeneralSampler,unsigned int> interfaceIntegratePerJob ("IntegratePerJob", "The number of subprocesses to integrate per job.", &GeneralSampler::theIntegratePerJob, 0, 0, 0, false, false, Interface::lowerlim); static Parameter<GeneralSampler,unsigned int> interfaceIntegrationJobs ("IntegrationJobs", "The maximum number of integration jobs to create.", &GeneralSampler::theIntegrationJobs, 0, 0, 0, false, false, Interface::lowerlim); static Parameter<GeneralSampler,unsigned int> interfaceIntegrationJobsCreated ("IntegrationJobsCreated", "The number of integration jobs which were actually created.", &GeneralSampler::theIntegrationJobsCreated, 1, 1, 0, false, false, Interface::lowerlim); static Switch<GeneralSampler,bool> interfaceWriteGridsOnFinish ("WriteGridsOnFinish", "Write grids on finishing a run.", &GeneralSampler::theWriteGridsOnFinish, false, false, false); static SwitchOption interfaceWriteGridsOnFinishYes (interfaceWriteGridsOnFinish, "Yes", "", true); static SwitchOption interfaceWriteGridsOnFinishNo (interfaceWriteGridsOnFinish, "No", "", false); } diff --git a/Sampling/GeneralStatistics.h b/Sampling/GeneralStatistics.h --- a/Sampling/GeneralStatistics.h +++ b/Sampling/GeneralStatistics.h @@ -1,314 +1,314 @@ // -*- C++ -*- // // GeneralStatictis.h is a part of Herwig - A multi-purpose Monte Carlo event generator // Copyright (C) 2002-2012 The Herwig Collaboration // // Herwig is licenced under version 2 of the GPL, see COPYING for details. // Please respect the MCnet academic guidelines, see GUIDELINES for details. // #ifndef Herwig_GeneralStatistics_H #define Herwig_GeneralStatistics_H // // This is the declaration of the GeneralStatistics class. // #include "ThePEG/Persistency/PersistentOStream.h" #include "ThePEG/Persistency/PersistentIStream.h" #include "Herwig/Utilities/XML/Element.h" namespace Herwig { using namespace ThePEG; /** * \ingroup Matchbox * \author Simon Platzer * * \brief General Monte Carlo statistics. * */ class GeneralStatistics { public: /** @name Standard constructors and destructors. */ //@{ /** * The default constructor. */ GeneralStatistics() : theMaxWeight(0.), theMinWeight(Constants::MaxDouble), theSumWeights(0.), theSumSquaredWeights(0.), theSumAbsWeights(0.), theSelectedPoints(0), theAcceptedPoints(0), theNanPoints(0), theAllPoints(0), theLastWeight(0.) {} /** * The destructor. */ virtual ~GeneralStatistics(); //@} public: /** * Return the last calculated chi^2. */ virtual double chi2() const { return 0.; } /** * Reset these statistics. */ void reset() { *this = GeneralStatistics(); } public: /** * Return the last weight encountered. */ double lastWeight() const { return theLastWeight; } /** * Return the maximum absolute weight */ double maxWeight() const { return theMaxWeight; } /** * Return the minimum absolute weight */ double minWeight() const { return theMinWeight; } /** * Set the maximum absolute weight */ void maxWeight(double w) { theMaxWeight = w; } /** * Set the minimum absolute weight */ void minWeight(double w) { theMinWeight = w; } /** * Return the sum of weights */ double sumWeights() const { return theSumWeights; } /** * Return the sum of squared weights */ double sumSquaredWeights() const { return theSumSquaredWeights; } /** * Return the sum of absolute weights */ double sumAbsWeights() const { return theSumAbsWeights; } /** * Return the number of selected points. */ unsigned long selectedPoints() const { return theSelectedPoints; } /** * Return the nnumber of accepted points. */ unsigned long acceptedPoints() const { return theAcceptedPoints; } /** * Return the number of points where a nan or inf weight has been * encountered. */ unsigned long nanPoints() const { return theNanPoints; } /** * Return the number of all points. */ unsigned long allPoints() const { return theAllPoints; } /** * Return the average weight. */ virtual double averageWeight() const { return selectedPoints() > 0 ? sumWeights()/selectedPoints() : 0.; } /** * Return the average absolute weight. */ virtual double averageAbsWeight() const { return selectedPoints() > 0 ? sumAbsWeights()/selectedPoints() : 0.; } /** * Return the variance of weights. */ double weightVariance() const { return selectedPoints() > 1 ? abs(sumSquaredWeights() - sqr(sumWeights())/selectedPoints())/(selectedPoints()-1) : 0.; } /** * Return the variance of absolute weights. */ double absWeightVariance() const { return selectedPoints() > 1 ? abs(sumSquaredWeights() - sqr(sumAbsWeights())/selectedPoints())/(selectedPoints()-1) : 0.; } /** * Return the variance of the average weight. */ virtual double averageWeightVariance() const { return selectedPoints() > 1 ? weightVariance()/selectedPoints() : 0.; } /** * Return the variance of the average absolute weight. */ virtual double averageAbsWeightVariance() const { return selectedPoints() > 1 ? absWeightVariance()/selectedPoints() : 0; } /** * Select an event */ virtual void select(double weight, bool doIntegral = true) { - if ( isnan(weight) || isinf(weight) ) { + if ( ! isfinite(weight) ) { theLastWeight = weight; theNanPoints += 1; theAllPoints += 1; return; } theLastWeight = weight; theMaxWeight = max(theMaxWeight,abs(weight)); theMinWeight = min(theMinWeight,abs(weight)); if ( !doIntegral ) return; theSumWeights += weight; theSumSquaredWeights += sqr(weight); theSumAbsWeights += abs(weight); theSelectedPoints += 1; theAllPoints += 1; } /** * Accept an event. */ virtual void accept() { theAcceptedPoints += 1; } /** * Reject an event. */ virtual void reject() { - if ( isnan(lastWeight()) || isinf(lastWeight()) ) { + if ( ! isfinite(lastWeight()) ) { theNanPoints -= 1; theAllPoints -= 1; return; } theSumWeights -= lastWeight(); theSumSquaredWeights -= sqr(lastWeight()); theSumAbsWeights -= abs(lastWeight()); theSelectedPoints -= 1; theAcceptedPoints -= 1; theAllPoints -= 1; } public: /** @name Functions used by the persistent I/O system. */ //@{ /** * Function used to write out object persistently. * @param os the persistent output stream written to. */ void put(PersistentOStream & os) const; /** * Function used to read in object persistently. * @param is the persistent input stream read from. * @param version the version number of the object when written. */ void get(PersistentIStream & is); //@} /** * Fill statistics data from an XML element */ void fromXML(const XML::Element&); /** * Return an XML element for the data of this statistics */ XML::Element toXML() const; private: /** * The maximum weight encountered. */ double theMaxWeight; /** * The minimum weight encountered. */ double theMinWeight; /** * The sum of weights. */ double theSumWeights; /** * The sum of weights squared. */ double theSumSquaredWeights; /** * The sum of absolute values of weights */ double theSumAbsWeights; /** * The number of selected points */ unsigned long theSelectedPoints; /** * The number of accepted points */ unsigned long theAcceptedPoints; /** * The number of points where an nan or inf weight was encountered. */ unsigned long theNanPoints; /** * The number of all points. */ unsigned long theAllPoints; /** * The last weight encountered */ double theLastWeight; }; inline PersistentOStream& operator<<(PersistentOStream& os, const GeneralStatistics& s) { s.put(os); return os; } inline PersistentIStream& operator>>(PersistentIStream& is, GeneralStatistics& s) { s.get(is); return is; } } #endif /* Herwig_GeneralStatistics_H */ diff --git a/Sampling/MonacoSampler.cc b/Sampling/MonacoSampler.cc --- a/Sampling/MonacoSampler.cc +++ b/Sampling/MonacoSampler.cc @@ -1,399 +1,399 @@ // -*- C++ -*- // // MonacoSampler.cc is a part of Herwig - A multi-purpose Monte Carlo event generator // Copyright (C) 2002-2012 The Herwig Collaboration // // Herwig is licenced under version 2 of the GPL, see COPYING for details. // Please respect the MCnet academic guidelines, see GUIDELINES for details. // // // This is the implementation of the non-inlined, non-templated member // functions of the MonacoSampler class. // #include "MonacoSampler.h" #include "ThePEG/Interface/ClassDocumentation.h" #include "ThePEG/EventRecord/Particle.h" #include "ThePEG/Repository/UseRandom.h" #include "ThePEG/Repository/EventGenerator.h" #include "ThePEG/Utilities/DescribeClass.h" #include "ThePEG/Repository/Repository.h" #include "ThePEG/Interface/Parameter.h" #include "ThePEG/Persistency/PersistentOStream.h" #include "ThePEG/Persistency/PersistentIStream.h" #include "ThePEG/Handlers/StandardEventHandler.h" #include "ThePEG/Handlers/StandardXComb.h" #include <boost/progress.hpp> #include "MonacoSampler.h" #include "Herwig/Sampling/GeneralSampler.h" using namespace Herwig; MonacoSampler::MonacoSampler() : BinSampler(), theAlpha(0.875), theGridDivisions(48), theIterationPoints(0) {} MonacoSampler::~MonacoSampler() {} IBPtr MonacoSampler::clone() const { return new_ptr(*this); } IBPtr MonacoSampler::fullclone() const { return new_ptr(*this); } double MonacoSampler::generate() { double w = 1.; // cout<<"\npoint: "; std::valarray<int> upperb(dimension()); for ( int k = 0; k < dimension(); ++k ) { double div = (1 - UseRandom::rnd()) * theGridDivisions; upperb[k] = static_cast<int>(div); double gupper, glower; if ( upperb[k] <= 0 ) { upperb[k] = 0; glower = 0.; gupper = theGrid(k,0); } else if (upperb[k] >= static_cast<int>(theGridDivisions)) { upperb[k] = theGridDivisions-1; glower = theGrid(k,theGridDivisions-2); gupper = theGrid(k,theGridDivisions-1); } else { glower = theGrid(k,upperb[k]-1); gupper = theGrid(k,upperb[k]); } double gdiff = gupper - glower; lastPoint()[k] = glower + (div-upperb[k])*gdiff; w *= gdiff * theGridDivisions; } // cout<<lastPoint()[k]<<" "; try { w *= eventHandler()->dSigDR(lastPoint()) / nanobarn; } catch (Veto&) { w = 0.0; } catch (...) { throw; } // only store numbers double wgt = w; - if ( isnan(wgt) || isinf(wgt) ) wgt = 0; + if ( ! isfinite(wgt) ) wgt = 0; // save results for later grid optimization theIterationPoints++; for ( int k = 0; k < dimension(); ++k ) { theGridData(k,upperb[k]) += wgt*wgt; } if (randomNumberString()!="") for ( size_t k = 0; k < lastPoint().size(); ++k ) { RandomNumberHistograms[RandomNumberIndex(id(),k)].first.book(lastPoint()[k],wgt); RandomNumberHistograms[RandomNumberIndex(id(),k)].second+=wgt; } if ( !weighted() && initialized() ) { double p = min(abs(w),kappa()*referenceWeight())/(kappa()*referenceWeight()); double sign = w >= 0. ? 1. : -1.; if ( p < 1 && UseRandom::rnd() > p ) w = 0.; else w = sign*max(abs(w),kappa()*referenceWeight()); } select(w); assert(kappa()==1.||sampler()->almostUnweighted()); if ( w != 0.0 ) accept(); return w; } void MonacoSampler::saveGrid() const { XML::Element grid = toXML(); grid.appendAttribute("process",id()); sampler()->grids().append(grid); } void MonacoSampler::initialize(bool progress) { //read in grid bool haveGrid = false; list<XML::Element>::iterator git = sampler()->grids().children().begin(); for ( ; git != sampler()->grids().children().end(); ++git ) { if ( git->type() != XML::ElementTypes::Element ) continue; if ( git->name() != "Monaco" ) continue; string proc; git->getFromAttribute("process",proc); if ( proc == id() ) { haveGrid = true; break; } } if ( haveGrid ) { fromXML(*git); sampler()->grids().erase(git); didReadGrids(); } else { // flat grid theGrid.resize(dimension(),theGridDivisions); for (int k = 0; k < dimension(); k++) for (size_t l = 0; l < theGridDivisions; l++) theGrid(k,l) = (l+1)/static_cast<double>(theGridDivisions); theGridData = boost::numeric::ublas::zero_matrix<double>(dimension(),theGridDivisions); theIterationPoints = 0; } lastPoint().resize(dimension()); if (randomNumberString()!="") for(size_t i=0;i<lastPoint().size();i++){ RandomNumberHistograms[RandomNumberIndex(id(),i)] = make_pair( RandomNumberHistogram(),0.); } if ( initialized() ) { if ( !hasGrids() ) throw Exception() << "MonacoSampler: Require existing grid when starting to run.\n" << "Did you miss setting --setupfile?" << Exception::abortnow; return; } if ( haveGrid ) { if ( !integrated() ) { runIteration(initialPoints(),progress); adapt(); } isInitialized(); return; } // if ( !sampler()->grids().children().empty() ) { // nIterations(1); // } unsigned long points = initialPoints(); for ( unsigned long k = 0; k < nIterations(); ++k ) { runIteration(points,progress); if ( k < nIterations() - 1 ) { points = (unsigned long)(points*enhancementFactor()); adapt(); nextIteration(); } } adapt(); didReadGrids(); isInitialized(); } void MonacoSampler::adapt() { int dim = dimension(); // refine grid std::valarray<double> gridcumul(dim); for (int k=0; k<dim; ++k) { double gridold = theGridData(k,0); double gridnew = theGridData(k,1); theGridData(k,0) = (gridold + gridnew) / 2.0; gridcumul[k] = theGridData(k,0); for (size_t l=1; l<theGridDivisions-1; ++l) { theGridData(k,l) = gridold + gridnew; gridold = gridnew; gridnew = theGridData(k,l+1); theGridData(k,l) = (theGridData(k,l) + gridnew) / 3.0; gridcumul[k] += theGridData(k,l); } theGridData(k,theGridDivisions-1) = (gridnew + gridold) / 2.0; gridcumul[k] += theGridData(k,theGridDivisions-1); } for (int k=0; k<dim; ++k) { double rc = 0.; std::valarray<double> ri(theGridDivisions); for (size_t l=0; l<theGridDivisions; ++l) { ri[l] = 0.; if ((theGridData(k,l) >= 0) && (gridcumul[k] != 0)) { theGridData(k,l) = max( 1.0e-30, theGridData(k,l) ); double gpart = gridcumul[k] / theGridData(k,l); ri[l] = pow( (gpart - 1.0) / (gpart * log( gpart )), theAlpha); } else { ri[l] = pow( 1. / log( 1e30 ), theAlpha); } rc += ri[l]; } rc /= theGridDivisions; double gridold = 0, gridnew = 0.; double deltar = 0.; unsigned int m = 0; std::valarray<double> theGridRowNew(theGridDivisions); for (size_t l = 0; l < theGridDivisions; ++l) { deltar += ri[l]; gridold = gridnew; gridnew = theGrid(k,l); for (; deltar > rc; m++) { deltar -= rc; theGridRowNew[m] = gridnew - (gridnew - gridold) * deltar / ri[l]; } } for (size_t l = 0; l < theGridDivisions-1; ++l) { theGrid(k,l) = theGridRowNew[l]; } theGrid(k,theGridDivisions-1) = 1.0; } theGridData = boost::numeric::ublas::zero_matrix<double>(dimension(),theGridDivisions); theIterationPoints = 0; } void MonacoSampler::finalize(bool) { // save grid adapt(); XML::Element grid = MonacoSampler::toXML(); grid.appendAttribute("process",id()); sampler()->grids().append(grid); if (randomNumberString()!="") for ( map<RandomNumberIndex,pair<RandomNumberHistogram,double> >:: const_iterator b = RandomNumberHistograms.begin(); b != RandomNumberHistograms.end(); ++b ) { b->second.first.dump(randomNumberString(), b->first.first,shortprocess(),b->first.second); } } void MonacoSampler::fromXML(const XML::Element& grid) { int dim = 0; grid.getFromAttribute("Dimension",dim); if ( dim != dimension() ) { throw std::runtime_error("[MonacoSampler] Number of dimensions in grid file does not match expectation."); } size_t griddivisions = 0; grid.getFromAttribute("GridDivisions",griddivisions); boost::numeric::ublas::matrix<double> tmpgrid(dim,griddivisions); pair<multimap<pair<int,string>,list<XML::Element>::iterator>::const_iterator,multimap<pair<int,string>,list<XML::Element>::iterator>::const_iterator> cit; cit = grid.findAll(XML::ElementTypes::Element,"GridVector"); if ( cit.first->second == grid.children().end() ) throw std::runtime_error("[MonacoSampler] Expected a GridVector element."); for (multimap<pair<int,string>,list<XML::Element>::iterator>::const_iterator iit=cit.first; iit!=cit.second; ++iit) { const XML::Element& gridvector = *iit->second; int k = 0; gridvector.getFromAttribute("Index",k); if ( k >= dim ) { throw std::runtime_error("[MonacoSampler] Index of grid dimension larger than grid size."); } else { list<XML::Element>::const_iterator git; git = gridvector.findFirst(XML::ElementTypes::ParsedCharacterData,""); if ( git == gridvector.children().end() ) throw std::runtime_error("[MonacoSampler] Expected grid data."); istringstream bdata(git->content()); for ( size_t l = 0; l < griddivisions; ++l ) { bdata >> tmpgrid(k,l); } } } // store back into main variable // if griddivisions do not match, rebin preserving bin density theGrid.resize(dim,theGridDivisions); theIterationPoints = 0; double divratio = griddivisions / static_cast<double>(theGridDivisions); for (int k = 0; k < dim; k++) { double xold = 0, xnew = 0, deltar = 0; size_t l = 0; for (size_t m = 0; m < griddivisions; m++) { deltar += 1; xold = xnew; xnew = tmpgrid(k,m); for (; deltar > divratio; l++) { deltar -= divratio; theGrid(k,l) = xnew - (xnew - xold) * deltar; } } theGrid(k,theGridDivisions-1) = 1.0; } theGridData = boost::numeric::ublas::zero_matrix<double>(dimension(),theGridDivisions); } XML::Element MonacoSampler::toXML() const { XML::Element grid(XML::ElementTypes::Element,"Monaco"); grid.appendAttribute("Dimension",dimension()); grid.appendAttribute("GridDivisions",theGridDivisions); for ( int k = 0; k < dimension(); ++k ) { XML::Element gridvector(XML::ElementTypes::Element,"GridVector"); gridvector.appendAttribute("Index",k); ostringstream bdata; bdata << setprecision(17); for ( size_t l = 0; l < theGridDivisions; ++l ) bdata << theGrid(k,l) << " "; XML::Element belem(XML::ElementTypes::ParsedCharacterData,bdata.str()); gridvector.append(belem); grid.append(gridvector); } return grid; } // If needed, insert default implementations of virtual function defined // in the InterfacedBase class here (using ThePEG-interfaced-impl in Emacs). void MonacoSampler::persistentOutput(PersistentOStream & os) const { BinSampler::put(os); os << theAlpha << theGridDivisions; } void MonacoSampler::persistentInput(PersistentIStream & is, int) { BinSampler::get(is); is >> theAlpha >> theGridDivisions; } // *** Attention *** The following static variable is needed for the type // description system in ThePEG. Please check that the template arguments // are correct (the class and its base class), and that the constructor // arguments are correct (the class name and the name of the dynamically // loadable library where the class implementation can be found). DescribeClass<MonacoSampler,BinSampler> describeHerwigMonacoSampler("Herwig::MonacoSampler", "HwSampling.so"); void MonacoSampler::Init() { static ClassDocumentation<MonacoSampler> documentation ("MonacoSampler samples XCombs bins. This implementation performs weighted MC integration using Monaco, an adapted Vegas algorithm."); static Parameter<MonacoSampler,double> interfaceAlpha ("Alpha", "Rate of grid modification (0 for no modification).", &MonacoSampler::theAlpha, 0.875, 0.0, 0, false, false, Interface::lowerlim); static Parameter<MonacoSampler,size_t> interfaceGridDivisions ("GridDivisions", "The number of divisions per grid dimension.", &MonacoSampler::theGridDivisions, 48, 1, 0, false, false, Interface::lowerlim); } diff --git a/Sampling/exsample/exponential_generator.icc b/Sampling/exsample/exponential_generator.icc --- a/Sampling/exsample/exponential_generator.icc +++ b/Sampling/exsample/exponential_generator.icc @@ -1,386 +1,385 @@ // -*- C++ -*- // // exponential_generator.icc is part of ExSample -- A Library for Sampling Sudakov-Type Distributions // // Copyright (C) 2008-2011 Simon Platzer -- simon.plaetzer@desy.de // // ExSample is licenced under version 2 of the GPL, see COPYING for details. // Please respect the MCnet academic guidelines, see GUIDELINES for details. // // namespace exsample { template<class Function, class Random> void exponential_generator<Function,Random>::initialize() { adaption_info_.dimension = function_->dimension(); adaption_info_.lower_left = function_->support().first; adaption_info_.upper_right = function_->support().second; if (adaption_info_.adapt.empty()) adaption_info_.adapt = std::vector<bool>(adaption_info_.dimension,true); evolution_variable_ = function_->evolution_variable(); evolution_cutoff_ = function_->evolution_cutoff(); sample_variables_ = function_->variable_flags(); sample_other_variables_ = sample_variables_; sample_other_variables_[evolution_variable_] = false; last_point_.resize(adaption_info_.dimension); parametric_selector_ = parametric_selector(&last_point_,sample_other_variables_); exponent_selector_ = parametric_selector(&last_point_,sample_variables_); missing_accessor_ = parametric_missing_accessor(&last_parameter_bin_); parametric_sampler_ = parametric_sampling_selector<rnd_generator<Random> > (&last_point_,&last_parameter_bin_,sample_other_variables_,rnd_gen_); if (initialized_) return; splits_ = 0; for ( std::size_t k = 0; k < adaption_info_.dimension; ++k ) { if ( sample_other_variables_[k] ) continue; parameter_splits_[k].push_back(adaption_info_.lower_left[k]); parameter_splits_[k].push_back(adaption_info_.upper_right[k]); } root_cell_ = binary_tree<cell>(cell(adaption_info_.lower_left, adaption_info_.upper_right, sample_other_variables_, adaption_info_)); root_cell_.value().info().explore(rnd_gen_,adaption_info_,function_,detuning_); root_cell_.value().integral(root_cell_.value().info().volume() * root_cell_.value().info().overestimate()); last_exponent_integrand_.resize(1); check_events_ = adaption_info_.presampling_points; initialized_ = true; } template<class Function, class Random> bool exponential_generator<Function,Random>::split () { if (adaption_info_.freeze_grid <= accepts_) return false; if (compensating_) return false; if (!(*last_cell_).info().bad(adaption_info_)) return false; bool dosplit = false; std::pair<std::size_t,double> sp = (*last_cell_).info().get_split(adaption_info_,dosplit); if (!dosplit) return false; if (!adaption_info_.adapt[sp.first]) return false; if (splits_ == parameter_hash_bits/2) return false; ++splits_; last_cell_.node().split((*last_cell_).split(sp,rnd_gen_,function_,adaption_info_, sample_other_variables_,detuning_)); if ( !sample_other_variables_[sp.first] ) { if ( std::find(parameter_splits_[sp.first].begin(),parameter_splits_[sp.first].end(),sp.second) == parameter_splits_[sp.first].end() ) { parameter_splits_[sp.first].push_back(sp.second); std::sort(parameter_splits_[sp.first].begin(),parameter_splits_[sp.first].end()); if ( sp.first == evolution_variable_ ) { last_exponent_integrand_.push_back(0.); } } } did_split_ = true; last_point_ = function_->parameter_point(); root_cell_.tree_accumulate(parametric_selector_,integral_accessor_,std::plus<double>()); exponents_.clear(); get_exponent(); return true; } template<class Function, class Random> void exponential_generator<Function,Random>::get_exponent () { last_parameter_bin_.reset(); root_cell_.subtree_hash (exponent_selector_,last_parameter_bin_); last_exponent_ = exponents_.find(last_parameter_bin_); if (last_exponent_ != exponents_.end()) return; exponents_[last_parameter_bin_] = linear_interpolator(); last_exponent_ = exponents_.find(last_parameter_bin_); double old_evo = last_point_[evolution_variable_]; std::vector<double>::iterator exp_it = last_exponent_integrand_.begin(); for (std::vector<double>::iterator esp = parameter_splits_[evolution_variable_].begin(); esp < boost::prior(parameter_splits_[evolution_variable_].end()); ++esp, ++exp_it) { last_point_[evolution_variable_] = (*esp + *boost::next(esp))/2.; *exp_it = root_cell_.accumulate(parametric_selector_,integral_accessor_,std::plus<double>()); } exp_it = boost::prior(last_exponent_integrand_.end()); double total = 0.; for (std::vector<double>::iterator esp = boost::prior(parameter_splits_[evolution_variable_].end()); esp > parameter_splits_[evolution_variable_].begin(); --esp, --exp_it) { last_exponent_->second.set_interpolation(*esp,total); total += (*exp_it) * ((*esp) - (*boost::prior(esp))); } last_exponent_->second.set_interpolation(parameter_splits_[evolution_variable_].front(),total); last_point_[evolution_variable_] = old_evo; } template<class Function, class Random> std::set<std::vector<double> > exponential_generator<Function,Random>::parameter_points() { std::set<std::vector<double> > res; std::vector<double> pt(adaption_info_.dimension,0.); recursive_parameter_points(res,pt,0); return res; } template<class Function, class Random> void exponential_generator<Function,Random>:: recursive_parameter_points(std::set<std::vector<double> >& res, std::vector<double>& pt, size_t current) { if ( current == adaption_info_.dimension ) { res.insert(pt); return; } if ( sample_variables_[current] ) { recursive_parameter_points(res,pt,current+1); return; } for ( std::vector<double>::const_iterator sp = parameter_splits_[current].begin(); sp != boost::prior(parameter_splits_[current].end()); ++sp ) { pt[current] = (*sp + *boost::next(sp))/2.; recursive_parameter_points(res,pt,current+1); } } template<class Function, class Random> void exponential_generator<Function,Random>::compensate() { if (!did_split_ || !docompensate_) { assert(did_split_ || last_cell_ == root_cell_.begin()); exponents_.clear(); last_cell_->info().overestimate(last_value_,last_point_,detuning_); last_cell_->integral(last_cell_->info().volume() * last_cell_->info().overestimate()); last_point_ = function_->parameter_point(); get_exponent(); return; } std::vector<double> themaxpoint = last_point_; std::set<std::vector<double> > id_points = parameter_points(); for ( std::set<std::vector<double> >::const_iterator id = id_points.begin(); id != id_points.end(); ++id ) { last_point_ = *id; get_exponent(); } std::map<bit_container<parameter_hash_bits>,linear_interpolator > old_exponents = exponents_; double old_oe = last_cell_->info().overestimate(); last_cell_->info().overestimate(last_value_,themaxpoint,detuning_); last_cell_->integral(last_cell_->info().volume() * last_cell_->info().overestimate()); exponents_.clear(); for ( std::set<std::vector<double> >::const_iterator id = id_points.begin(); id != id_points.end(); ++id ) { last_point_ = *id; get_exponent(); std::map<bit_container<parameter_hash_bits>,linear_interpolator >::iterator old_exp = old_exponents.find(last_parameter_bin_); std::map<bit_container<parameter_hash_bits>,linear_interpolator >::iterator new_exp = exponents_.find(last_parameter_bin_); assert(old_exp != old_exponents.end() && new_exp != exponents_.end()); double old_norm = 1. - std::exp(-(old_exp->second)(adaption_info_.lower_left[evolution_variable_])); double new_norm = 1. - std::exp(-(new_exp->second)(adaption_info_.lower_left[evolution_variable_])); for (binary_tree<cell>::iterator it = root_cell_.begin(); it != root_cell_.end(); ++it) { if ( !it->info().contains_parameter(last_point_,sample_variables_) ) continue; double old_int = 0.; double new_int = 0.; for ( std::vector<double>::const_iterator sp = parameter_splits_[evolution_variable_].begin(); sp != boost::prior(parameter_splits_[evolution_variable_].end()); ++sp ) { if ( *sp >= it->info().lower_left()[evolution_variable_] && *sp < it->info().upper_right()[evolution_variable_] ) { double xl = *sp; double xxl = *boost::next(sp); double old_al = (old_exp->second.interpolation()[xxl] - old_exp->second.interpolation()[xl]) / (xxl-xl); double old_bl = (xxl * old_exp->second.interpolation()[xl] - xl * old_exp->second.interpolation()[xxl]) / (xxl-xl); double new_al = (new_exp->second.interpolation()[xxl] - new_exp->second.interpolation()[xl]) / (xxl-xl); double new_bl = (xxl * new_exp->second.interpolation()[xl] - xl * new_exp->second.interpolation()[xxl]) / (xxl-xl); if ( std::abs(old_al) > std::numeric_limits<double>::epsilon() ) { old_int += (exp(-(old_al*xl+old_bl)) - exp(-(old_al*xxl+old_bl)))/old_al; } else { old_int += (xxl-xl)*exp(-old_bl); } if ( std::abs(new_al) > std::numeric_limits<double>::epsilon() ) { new_int += (exp(-(new_al*xl+new_bl)) - exp(-(new_al*xxl+new_bl)))/new_al; } else { new_int += (xxl-xl)*exp(-new_bl); } } } double scaling; if (it != last_cell_) { if (old_int > std::numeric_limits<double>::epsilon() && new_int > std::numeric_limits<double>::epsilon()) scaling = ((old_norm * new_int) / (new_norm * old_int)) - 1.; else scaling = 0.; } else { if (old_int > std::numeric_limits<double>::epsilon() && new_int > std::numeric_limits<double>::epsilon()) scaling = ((last_value_ * old_norm * new_int) / (old_oe * new_norm * old_int)) - 1.; else scaling = 0.; } it->info().parametric_missing(last_parameter_bin_, it->info().parametric_missing(last_parameter_bin_) + static_cast<int>(round(scaling * it->info().attempted()))); if (it->info().parametric_missing(last_parameter_bin_) != 0) { compensating_ = true; } } } last_point_ = function_->parameter_point(); } template<class Function, class Random> double exponential_generator<Function,Random>::generate () { if (compensating_) { compensating_ = false; for (binary_tree<cell>::iterator it = root_cell_.begin(); it != root_cell_.end(); ++it) if (it->info().parametric_compensating()) { compensating_ = true; break; } parametric_sampler_.compensate(compensating_); } last_point_ = function_->parameter_point(); if (last_point_[evolution_variable_] < evolution_cutoff_) { return 0.; } unsigned long n_hit_miss = 0; unsigned long n_select = 0; double minus_log_r; root_cell_.tree_accumulate(parametric_selector_,integral_accessor_,std::plus<double>()); get_exponent(); while (true) { n_select = 0; minus_log_r = -std::log(rnd_gen_()) + last_exponent_->second(last_point_[evolution_variable_]); if (!last_exponent_->second.invertible(minus_log_r)) { return 0.; } try { last_point_[evolution_variable_] = last_exponent_->second.unique_inverse(minus_log_r); } catch (constant_interpolation& c) { last_point_[evolution_variable_] = rnd_gen_(c.range.first,c.range.second); } - assert(!std::isnan(last_point_[evolution_variable_]) && - !std::isinf(last_point_[evolution_variable_])); + assert(isfinite(last_point_[evolution_variable_])); if (last_point_[evolution_variable_] < evolution_cutoff_) { return 0.; } ++attempts_; if (compensating_) { root_cell_.tree_accumulate(missing_accessor_,std::plus<int>()); } if (parameter_splits_[evolution_variable_].size() > 2) root_cell_.tree_accumulate(parametric_selector_,integral_accessor_,std::plus<double>()); if (did_split_) while ((last_cell_ = root_cell_.select(parametric_sampler_)) == root_cell_.end()) { root_cell_.tree_accumulate(missing_accessor_,std::plus<int>()); if(++n_select > adaption_info_.maxtry) throw selection_maxtry(); } else last_cell_ = root_cell_.begin(); last_cell_->info().select(rnd_gen_,last_point_,sample_other_variables_); last_value_ = function_->evaluate(last_point_); assert(last_value_ >= 0.); last_cell_->info().selected(last_point_,last_value_,adaption_info_); if (last_value_ > last_cell_->info().overestimate()) { if ( std::abs(last_value_)/last_cell_->info().overestimate() > 2. ) { last_value_ = last_cell_->info().overestimate()* (1.+exp(2.*(2.-std::abs(last_value_)/last_cell_->info().overestimate()))); } compensate(); throw exponential_regenerate(); } if (last_cell_->info().attempted() % check_events_ == 0) { if (split()) { throw exponential_regenerate(); } } if (last_value_/last_cell_->info().overestimate() > rnd_gen_()) { function_->accept(last_point_,last_value_,last_cell_->info().overestimate()); break; } if ( last_value_ != 0.0 ) { function_->veto(last_point_,last_value_,last_cell_->info().overestimate()); } if(++n_hit_miss > adaption_info_.maxtry) throw hit_and_miss_maxtry(); } if (last_value_ == 0.) return 0.; ++accepts_; ++check_events_; last_cell_->info().accept(); return 1.; } template<class Function, class Random> template<class OStream> void exponential_generator<Function,Random>::put (OStream& os) const { os << check_events_; ostream_traits<OStream>::separator(os); adaption_info_.put(os); root_cell_.put(os); os << did_split_; ostream_traits<OStream>::separator(os); os << initialized_; ostream_traits<OStream>::separator(os); os << evolution_variable_; ostream_traits<OStream>::separator(os); os << evolution_cutoff_; ostream_traits<OStream>::separator(os); os << sample_variables_; ostream_traits<OStream>::separator(os); os << sample_other_variables_; ostream_traits<OStream>::separator(os); os << parameter_splits_; ostream_traits<OStream>::separator(os); // last_cell_ is selected new so we ignore it here os << last_point_; ostream_traits<OStream>::separator(os); os << last_value_; ostream_traits<OStream>::separator(os); last_parameter_bin_.put(os); os << exponents_.size(); ostream_traits<OStream>::separator(os); for ( std::map<bit_container<parameter_hash_bits>,linear_interpolator >::const_iterator ex = exponents_.begin(); ex != exponents_.end() ; ++ex ) { ex->first.put(os); ex->second.put(os); } os << last_exponent_integrand_; ostream_traits<OStream>::separator(os); os << compensating_; ostream_traits<OStream>::separator(os); os << attempts_; ostream_traits<OStream>::separator(os); os << accepts_; ostream_traits<OStream>::separator(os); os << splits_; ostream_traits<OStream>::separator(os); os << docompensate_; ostream_traits<OStream>::separator(os); } template<class Function, class Random> template<class IStream> void exponential_generator<Function,Random>::get (IStream& is) { is >> check_events_; adaption_info_.get(is); root_cell_.get(is); is >> did_split_ >> initialized_ >> evolution_variable_ >> evolution_cutoff_ >> sample_variables_ >> sample_other_variables_ >> parameter_splits_; // last_cell_ is selected new so we ignore it here is >> last_point_ >> last_value_; last_parameter_bin_.get(is); size_t dim; is >> dim; for ( size_t k = 0; k < dim ; ++k ) { bit_container<parameter_hash_bits> key; key.get(is); exponents_[key].get(is); } is >> last_exponent_integrand_; last_exponent_ = exponents_.find(last_parameter_bin_); is >> compensating_ >> attempts_ >> accepts_ >> splits_ >> docompensate_; } } diff --git a/Shower/Dipole/Base/DipoleSplittingGenerator.cc b/Shower/Dipole/Base/DipoleSplittingGenerator.cc --- a/Shower/Dipole/Base/DipoleSplittingGenerator.cc +++ b/Shower/Dipole/Base/DipoleSplittingGenerator.cc @@ -1,606 +1,606 @@ // -*- C++ -*- // // DipoleSplittingGenerator.cc is a part of Herwig - A multi-purpose Monte Carlo event generator // Copyright (C) 2002-2007 The Herwig Collaboration // // Herwig is licenced under version 2 of the GPL, see COPYING for details. // Please respect the MCnet academic guidelines, see GUIDELINES for details. // // // This is the implementation of the non-inlined, non-templated member // functions of the DipoleSplittingGenerator class. // #include <config.h> #include "DipoleSplittingGenerator.h" #include "ThePEG/Interface/ClassDocumentation.h" #include "ThePEG/Interface/Reference.h" #include "ThePEG/Repository/EventGenerator.h" #include "ThePEG/Persistency/PersistentOStream.h" #include "ThePEG/Persistency/PersistentIStream.h" #include "Herwig/Shower/Dipole/DipoleShowerHandler.h" using namespace Herwig; DipoleSplittingGenerator::DipoleSplittingGenerator() : HandlerBase(), theExponentialGenerator(0), prepared(false), presampling(false), theDoCompensate(false), theSplittingWeight(1.) { if ( ShowerHandler::currentHandler() ) setGenerator(ShowerHandler::currentHandler()->generator()); } DipoleSplittingGenerator::~DipoleSplittingGenerator() { if ( theExponentialGenerator ) { delete theExponentialGenerator; theExponentialGenerator = 0; } } IBPtr DipoleSplittingGenerator::clone() const { return new_ptr(*this); } IBPtr DipoleSplittingGenerator::fullclone() const { return new_ptr(*this); } void DipoleSplittingGenerator::wrap(Ptr<DipoleSplittingGenerator>::ptr other) { assert(!prepared); theOtherGenerator = other; } void DipoleSplittingGenerator::resetVariations() { for ( map<string,double>::iterator w = currentWeights.begin(); w != currentWeights.end(); ++w ) w->second = 1.; } void DipoleSplittingGenerator::veto(const vector<double>&, double p, double r) { double factor = 1.; if ( splittingReweight() ) { if ( ( ShowerHandler::currentHandler()->firstInteraction() && splittingReweight()->firstInteraction() ) || ( !ShowerHandler::currentHandler()->firstInteraction() && splittingReweight()->secondaryInteractions() ) ) { factor = splittingReweight()->evaluate(generatedSplitting); theSplittingWeight *= (r-factor*p)/(r-p); } } splittingKernel()->veto(generatedSplitting, factor*p, r, currentWeights); } void DipoleSplittingGenerator::accept(const vector<double>&, double p, double r) { double factor = 1.; if ( splittingReweight() ) { if ( ( ShowerHandler::currentHandler()->firstInteraction() && splittingReweight()->firstInteraction() ) || ( !ShowerHandler::currentHandler()->firstInteraction() && splittingReweight()->secondaryInteractions() ) ) { factor = splittingReweight()->evaluate(generatedSplitting); theSplittingWeight *= factor; } } splittingKernel()->accept(generatedSplitting, factor*p, r, currentWeights); } void DipoleSplittingGenerator::prepare(const DipoleSplittingInfo& sp) { generatedSplitting = sp; generatedSplitting.splittingKinematics(splittingKernel()->splittingKinematics()); generatedSplitting.splittingParameters().resize(splittingKernel()->nDimAdditional()); if ( wrapping() ) { generatedSplitting.emitterData(theSplittingKernel->emitter(generatedSplitting.index())); generatedSplitting.spectatorData(theSplittingKernel->spectator(generatedSplitting.index())); generatedSplitting.emissionData(theSplittingKernel->emission(generatedSplitting.index())); parameters.resize(theOtherGenerator->nDim()); prepared = true; return; } generatedSplitting.emitterData(splittingKernel()->emitter(generatedSplitting.index())); generatedSplitting.spectatorData(splittingKernel()->spectator(generatedSplitting.index())); generatedSplitting.emissionData(splittingKernel()->emission(generatedSplitting.index())); presampledSplitting = generatedSplitting; prepared = true; parameters.resize(nDim()); theExponentialGenerator = new exsample::exponential_generator<DipoleSplittingGenerator,UseRandom>(); theExponentialGenerator->sampling_parameters().maxtry = maxtry(); theExponentialGenerator->sampling_parameters().presampling_points = presamplingPoints(); theExponentialGenerator->sampling_parameters().freeze_grid = freezeGrid(); theExponentialGenerator->detuning(detuning()); theExponentialGenerator->docompensate(theDoCompensate); theExponentialGenerator->function(this); theExponentialGenerator->initialize(); } void DipoleSplittingGenerator::fixParameters(const DipoleSplittingInfo& sp, Energy optHardPt) { assert(generator()); assert(!presampling); assert(prepared); assert(sp.index() == generatedSplitting.index()); generatedSplitting.scale(sp.scale()); parameters[3] = sp.scale()/generator()->maximumCMEnergy(); generatedSplitting.hardPt(sp.hardPt()); parameters[0] = splittingKinematics()->ptToRandom(optHardPt == ZERO ? generatedSplitting.hardPt() : min(generatedSplitting.hardPt(),optHardPt), sp.scale(), sp.emitterX(), sp.spectatorX(), generatedSplitting.index(), *splittingKernel()); size_t shift = 4; if ( generatedSplitting.index().emitterPDF().pdf() && generatedSplitting.index().spectatorPDF().pdf() ) { generatedSplitting.emitterX(sp.emitterX()); generatedSplitting.spectatorX(sp.spectatorX()); parameters[4] = sp.emitterX(); parameters[5] = sp.spectatorX(); shift += 2; } if ( generatedSplitting.index().emitterPDF().pdf() && !generatedSplitting.index().spectatorPDF().pdf() ) { generatedSplitting.emitterX(sp.emitterX()); parameters[4] = sp.emitterX(); ++shift; } if ( !generatedSplitting.index().emitterPDF().pdf() && generatedSplitting.index().spectatorPDF().pdf() ) { generatedSplitting.spectatorX(sp.spectatorX()); parameters[4] = sp.spectatorX(); ++shift; } if ( splittingKernel()->nDimAdditional() ) copy(sp.lastSplittingParameters().begin(),sp.lastSplittingParameters().end(),parameters.begin()+shift); if ( sp.emitter() ) generatedSplitting.emitter(sp.emitter()); if ( sp.spectator() ) generatedSplitting.spectator(sp.spectator()); } int DipoleSplittingGenerator::nDim() const { assert(!wrapping()); assert(prepared); int ret = 4; // 0 pt, 1 z, 2 phi, 3 scale, 4/5 xs + parameters if ( generatedSplitting.index().emitterPDF().pdf() ) { ++ret; } if ( generatedSplitting.index().spectatorPDF().pdf() ) { ++ret; } ret += splittingKernel()->nDimAdditional(); return ret; } const vector<bool>& DipoleSplittingGenerator::sampleFlags() { assert(!wrapping()); if ( !theFlags.empty() ) return theFlags; theFlags.resize(nDim(),false); theFlags[0] = true; theFlags[1] = true; theFlags[2] = true; // 0 pt, 1 z, 2 phi return theFlags; } const pair<vector<double>,vector<double> >& DipoleSplittingGenerator::support() { assert(!wrapping()); if ( !theSupport.first.empty() ) return theSupport; vector<double> lower(nDim(),0.); vector<double> upper(nDim(),1.); pair<double,double> kSupport = generatedSplitting.splittingKinematics()->kappaSupport(generatedSplitting); pair<double,double> xSupport = generatedSplitting.splittingKinematics()->xiSupport(generatedSplitting); lower[0] = kSupport.first; lower[1] = xSupport.first; upper[0] = kSupport.second; upper[1] = xSupport.second; theSupport.first = lower; theSupport.second = upper; return theSupport; } void DipoleSplittingGenerator::startPresampling() { assert(!wrapping()); splittingKernel()->startPresampling(generatedSplitting.index()); presampling = true; } void DipoleSplittingGenerator::stopPresampling() { assert(!wrapping()); splittingKernel()->stopPresampling(generatedSplitting.index()); presampling = false; } bool DipoleSplittingGenerator::haveOverestimate() const { assert(!wrapping()); assert(prepared); return generatedSplitting.splittingKinematics()->haveOverestimate() && splittingKernel()->haveOverestimate(generatedSplitting); } bool DipoleSplittingGenerator::overestimate(const vector<double>& point) { assert(!wrapping()); assert(prepared); assert(!presampling); assert(haveOverestimate()); if ( ! generatedSplitting.splittingKinematics()->generateSplitting(point[0],point[1],point[2], generatedSplitting, *splittingKernel()) ) return 0.; generatedSplitting.splittingKinematics()->prepareSplitting(generatedSplitting); return ( generatedSplitting.splittingKinematics()->jacobianOverestimate() * splittingKernel()->overestimate(generatedSplitting) ); } double DipoleSplittingGenerator::invertOverestimateIntegral(double value) const { assert(!wrapping()); assert(prepared); assert(!presampling); assert(haveOverestimate()); return splittingKernel()->invertOverestimateIntegral(generatedSplitting,value); } double DipoleSplittingGenerator::evaluate(const vector<double>& point) { assert(!wrapping()); assert(prepared); assert(generator()); DipoleSplittingInfo& split = ( !presampling ? generatedSplitting : presampledSplitting ); split.continuesEvolving(); size_t shift = 4; if ( presampling ) { split.scale(point[3] * generator()->maximumCMEnergy()); if ( split.index().emitterPDF().pdf() && split.index().spectatorPDF().pdf() ) { split.emitterX(point[4]); split.spectatorX(point[5]); shift += 2; } if ( split.index().emitterPDF().pdf() && !split.index().spectatorPDF().pdf() ) { split.emitterX(point[4]); ++shift; } if ( !split.index().emitterPDF().pdf() && split.index().spectatorPDF().pdf() ) { split.spectatorX(point[4]); ++shift; } if ( splittingKernel()->nDimAdditional() ) copy(point.begin()+shift,point.end(),split.splittingParameters().begin()); split.hardPt(split.splittingKinematics()->ptMax(split.scale(), split.emitterX(), split.spectatorX(), split.index(), *splittingKernel())); } if ( ! split.splittingKinematics()->generateSplitting(point[0],point[1],point[2],split,*splittingKernel()) ) { split.lastValue(0.); return 0.; } split.splittingKinematics()->prepareSplitting(split); if ( split.stoppedEvolving() ) { split.lastValue(0.); return 0.; } if ( !presampling ) splittingKernel()->clearAlphaPDFCache(); double kernel = splittingKernel()->evaluate(split); double jac = split.splittingKinematics()->jacobian(); // multiply in the profile scales when relevant assert(ShowerHandler::currentHandler()); if ( ShowerHandler::currentHandler()->firstInteraction() && ShowerHandler::currentHandler()->profileScales() && !presampling ) { Energy hard = ShowerHandler::currentHandler()->hardScale(); if ( hard > ZERO ) kernel *= ShowerHandler::currentHandler()->profileScales()->hardScaleProfile(hard,split.lastPt()); } split.lastValue( abs(jac) * kernel ); - if ( isnan(split.lastValue()) || isinf(split.lastValue()) ) { + if ( ! isfinite(split.lastValue()) ) { generator()->log() << "DipoleSplittingGenerator:evaluate(): problematic splitting kernel encountered for " << splittingKernel()->name() << "\n" << flush; split.lastValue(0.0); } if ( kernel < 0. ) return 0.; return split.lastValue(); } void DipoleSplittingGenerator::doGenerate(map<string,double>& variations, Energy optCutoff) { assert(!wrapping()); double res = 0.; Energy startPt = generatedSplitting.hardPt(); double optKappaCutoff = 0.0; if ( optCutoff > splittingKinematics()->IRCutoff() ) { optKappaCutoff = splittingKinematics()->ptToRandom(optCutoff, generatedSplitting.scale(), generatedSplitting.emitterX(), generatedSplitting.spectatorX(), generatedSplitting.index(), *splittingKernel()); } resetVariations(); theSplittingWeight = 1.; while (true) { try { if ( optKappaCutoff == 0.0 ) { res = theExponentialGenerator->generate(); } else { res = theExponentialGenerator->generate(optKappaCutoff); } } catch (exsample::exponential_regenerate&) { resetVariations(); theSplittingWeight = 1.; generatedSplitting.hardPt(startPt); continue; } catch (exsample::hit_and_miss_maxtry&) { throw DipoleShowerHandler::RedoShower(); } catch (exsample::selection_maxtry&) { throw DipoleShowerHandler::RedoShower(); } break; } for ( map<string,double>::const_iterator w = currentWeights.begin(); w != currentWeights.end(); ++w ) { map<string,double>::iterator v = variations.find(w->first); if ( v != variations.end() ) v->second *= w->second; else variations[w->first] = w->second; } if ( res == 0. ) { generatedSplitting.lastPt(0.0*GeV); generatedSplitting.didStopEvolving(); } else { generatedSplitting.continuesEvolving(); if ( theMCCheck ) theMCCheck->book(generatedSplitting.emitterX(), generatedSplitting.spectatorX(), generatedSplitting.scale(), startPt, generatedSplitting.lastPt(), generatedSplitting.lastZ(), 1.); } } Energy DipoleSplittingGenerator::generate(const DipoleSplittingInfo& split, map<string,double>& variations, Energy optHardPt, Energy optCutoff) { fixParameters(split,optHardPt); if ( wrapping() ) { return theOtherGenerator->generateWrapped(generatedSplitting,variations,optHardPt,optCutoff); } doGenerate(variations,optCutoff); return generatedSplitting.lastPt(); } Energy DipoleSplittingGenerator::generateWrapped(DipoleSplittingInfo& split, map<string,double>& variations, Energy optHardPt, Energy optCutoff) { assert(!wrapping()); DipoleSplittingInfo backup = generatedSplitting; generatedSplitting = split; fixParameters(split,optHardPt); try { doGenerate(variations,optCutoff); } catch (...) { split = generatedSplitting; generatedSplitting = backup; throw; } Energy pt = generatedSplitting.lastPt(); split = generatedSplitting; generatedSplitting = backup; return pt; } void DipoleSplittingGenerator::completeSplitting(DipoleSplittingInfo& sp) const { pair<bool,bool> conf = sp.configuration(); sp = generatedSplitting; sp.configuration(conf); } Ptr<DipoleSplittingKernel>::tptr DipoleSplittingGenerator::splittingKernel() const { if ( wrapping() ) return theOtherGenerator->splittingKernel(); return theSplittingKernel; } Ptr<DipoleSplittingReweight>::tptr DipoleSplittingGenerator::splittingReweight() const { if ( wrapping() ) return theOtherGenerator->splittingReweight(); return theSplittingReweight; } Ptr<DipoleSplittingKinematics>::tptr DipoleSplittingGenerator::splittingKinematics() const { if ( wrapping() ) return theOtherGenerator->splittingKinematics(); return theSplittingKernel->splittingKinematics(); } void DipoleSplittingGenerator::splittingKernel(Ptr<DipoleSplittingKernel>::tptr sp) { theSplittingKernel = sp; if ( theSplittingKernel->mcCheck() ) theMCCheck = theSplittingKernel->mcCheck(); } void DipoleSplittingGenerator::splittingReweight(Ptr<DipoleSplittingReweight>::tptr sp) { theSplittingReweight = sp; } void DipoleSplittingGenerator::debugGenerator(ostream& os) const { os << "--- DipoleSplittingGenerator ---------------------------------------------------\n"; os << " generating splittings using\n" << " splittingKernel = " << splittingKernel()->name() << " splittingKinematics = " << generatedSplitting.splittingKinematics()->name() << "\n" << " to sample splittings of type:\n"; os << generatedSplitting; os << "--------------------------------------------------------------------------------\n"; } void DipoleSplittingGenerator::debugLastEvent(ostream& os) const { os << "--- DipoleSplittingGenerator ---------------------------------------------------\n"; os << " last generated event:\n"; os << generatedSplitting; os << "--------------------------------------------------------------------------------\n"; } // If needed, insert default implementations of virtual function defined // in the InterfacedBase class here (using ThePEG-interfaced-impl in Emacs). void DipoleSplittingGenerator::persistentOutput(PersistentOStream & os) const { os << theOtherGenerator << theSplittingKernel << theSplittingReweight << theMCCheck << theDoCompensate; } void DipoleSplittingGenerator::persistentInput(PersistentIStream & is, int) { is >> theOtherGenerator >> theSplittingKernel >> theSplittingReweight >> theMCCheck >> theDoCompensate; } ClassDescription<DipoleSplittingGenerator> DipoleSplittingGenerator::initDipoleSplittingGenerator; // Definition of the static class description member. void DipoleSplittingGenerator::Init() { static ClassDocumentation<DipoleSplittingGenerator> documentation ("DipoleSplittingGenerator is used by the dipole shower " "to sample splittings from a given dipole splitting kernel."); static Reference<DipoleSplittingGenerator,DipoleSplittingKernel> interfaceSplittingKernel ("SplittingKernel", "Set the splitting kernel to sample from.", &DipoleSplittingGenerator::theSplittingKernel, false, false, true, false, false); static Reference<DipoleSplittingGenerator,DipoleSplittingReweight> interfaceSplittingReweight ("SplittingReweight", "Set the splitting reweight.", &DipoleSplittingGenerator::theSplittingReweight, false, false, true, true, false); static Reference<DipoleSplittingGenerator,DipoleMCCheck> interfaceMCCheck ("MCCheck", "[debug option] MCCheck", &DipoleSplittingGenerator::theMCCheck, false, false, true, true, false); interfaceMCCheck.rank(-1); } diff --git a/Shower/Dipole/DipoleShowerHandler.cc b/Shower/Dipole/DipoleShowerHandler.cc --- a/Shower/Dipole/DipoleShowerHandler.cc +++ b/Shower/Dipole/DipoleShowerHandler.cc @@ -1,999 +1,999 @@ // -*- C++ -*- // // DipoleShowerHandler.cc is a part of Herwig - A multi-purpose Monte Carlo event generator // Copyright (C) 2002-2007 The Herwig Collaboration // // Herwig is licenced under version 2 of the GPL, see COPYING for details. // Please respect the MCnet academic guidelines, see GUIDELINES for details. // // // This is the implementation of the non-inlined, non-templated member // functions of the DipoleShowerHandler class. // #include <config.h> #include "DipoleShowerHandler.h" #include "ThePEG/Interface/ClassDocumentation.h" #include "ThePEG/Interface/Reference.h" #include "ThePEG/Interface/RefVector.h" #include "ThePEG/Interface/Parameter.h" #include "ThePEG/Interface/Switch.h" #include "ThePEG/Persistency/PersistentOStream.h" #include "ThePEG/Persistency/PersistentIStream.h" #include "Herwig/PDF/MPIPDF.h" #include "Herwig/PDF/MinBiasPDF.h" #include "Herwig/PDF/HwRemDecayer.h" #include "Herwig/Shower/Dipole/Utility/DipolePartonSplitter.h" #include "Herwig/MatrixElement/Matchbox/Base/SubtractedME.h" #include "Herwig/MatrixElement/Matchbox/MatchboxFactory.h" using namespace Herwig; bool DipoleShowerHandler::firstWarn = true; DipoleShowerHandler::DipoleShowerHandler() : ShowerHandler(), chainOrderVetoScales(true), nEmissions(0), discardNoEmissions(false), firstMCatNLOEmission(false), realignmentScheme(0), verbosity(0), printEvent(0), nTries(0), didRadiate(false), didRealign(false), theRenormalizationScaleFreeze(1.*GeV), theFactorizationScaleFreeze(2.*GeV), theDoCompensate(false), theFreezeGrid(500000), theDetuning(1.0), maxPt(ZERO), muPt(ZERO) {} DipoleShowerHandler::~DipoleShowerHandler() {} IBPtr DipoleShowerHandler::clone() const { return new_ptr(*this); } IBPtr DipoleShowerHandler::fullclone() const { return new_ptr(*this); } tPPair DipoleShowerHandler::cascade(tSubProPtr sub, XCPtr, Energy optHardPt, Energy optCutoff) { useMe(); prepareCascade(sub); resetWeights(); if ( !doFSR() && ! doISR() ) return sub->incoming(); eventRecord().clear(); eventRecord().prepare(sub,dynamic_ptr_cast<tStdXCombPtr>(lastXCombPtr()),pdfs()); if ( eventRecord().outgoing().empty() && !doISR() ) return sub->incoming(); if ( !eventRecord().incoming().first->coloured() && !eventRecord().incoming().second->coloured() && !doFSR() ) return sub->incoming(); nTries = 0; while ( true ) { try { didRadiate = false; didRealign = false; if ( eventRecord().truncatedShower() ) { throw Exception() << "Inconsistent hard emission set-up in DipoleShowerHandler::cascade. " << "No truncated shower needed with DipoleShowerHandler. Add " << "'set MEMatching:TruncatedShower No' to input file." << Exception::runerror; } hardScales(lastXCombPtr()->lastShowerScale()); if ( verbosity > 1 ) { generator()->log() << "DipoleShowerHandler starting off:\n"; eventRecord().debugLastEvent(generator()->log()); generator()->log() << flush; } unsigned int nEmitted = 0; if ( firstMCatNLOEmission ) { if ( !eventRecord().isMCatNLOHEvent() ) nEmissions = 1; else nEmissions = 0; } if ( !firstMCatNLOEmission ) { doCascade(nEmitted,optHardPt,optCutoff); if ( discardNoEmissions ) { if ( !didRadiate ) throw Veto(); if ( nEmissions ) if ( nEmissions < nEmitted ) throw Veto(); } } else { if ( nEmissions == 1 ) doCascade(nEmitted,optHardPt,optCutoff); } if ( intrinsicPtGenerator ) { if ( eventRecord().incoming().first->coloured() && eventRecord().incoming().second->coloured() ) { SpinOneLorentzRotation rot = intrinsicPtGenerator->kick(eventRecord().incoming(), eventRecord().intermediates()); eventRecord().transform(rot); } } didRealign = realign(); constituentReshuffle(); break; } catch (RedoShower&) { resetWeights(); if ( ++nTries > maxtry() ) throw ShowerTriesVeto(maxtry()); eventRecord().clear(); eventRecord().prepare(sub,dynamic_ptr_cast<tStdXCombPtr>(lastXCombPtr()),pdfs()); continue; } catch (...) { throw; } } return eventRecord().fillEventRecord(newStep(),firstInteraction(),didRealign); } void DipoleShowerHandler::constituentReshuffle() { if ( constituentReshuffler ) { constituentReshuffler->reshuffle(eventRecord().outgoing(), eventRecord().incoming(), eventRecord().intermediates()); } } void DipoleShowerHandler::hardScales(Energy2 muf) { maxPt = generator()->maximumCMEnergy(); if ( restrictPhasespace() ) { if ( !hardScaleIsMuF() || !firstInteraction() ) { if ( !eventRecord().outgoing().empty() ) { for ( PList::const_iterator p = eventRecord().outgoing().begin(); p != eventRecord().outgoing().end(); ++p ) maxPt = min(maxPt,(**p).momentum().mt()); } else { assert(!eventRecord().hard().empty()); Lorentz5Momentum phard(ZERO,ZERO,ZERO,ZERO); for ( PList::const_iterator p = eventRecord().hard().begin(); p != eventRecord().hard().end(); ++p ) phard += (**p).momentum(); Energy mhard = phard.m(); maxPt = mhard; } maxPt *= hardScaleFactor(); } else { maxPt = hardScaleFactor()*sqrt(muf); } muPt = maxPt; } else { muPt = hardScaleFactor()*sqrt(muf); } for ( list<DipoleChain>::iterator ch = eventRecord().chains().begin(); ch != eventRecord().chains().end(); ++ch ) { Energy minVetoScale = -1.*GeV; for ( list<Dipole>::iterator dip = ch->dipoles().begin(); dip != ch->dipoles().end(); ++dip ) { // max scale per config Energy maxFirst = 0.0*GeV; Energy maxSecond = 0.0*GeV; for ( vector<Ptr<DipoleSplittingKernel>::ptr>::iterator k = kernels.begin(); k != kernels.end(); ++k ) { pair<bool,bool> conf = make_pair(true,false); if ( (**k).canHandle(dip->index(conf)) ) { Energy scale = evolutionOrdering()->hardScale(dip->emitter(conf),dip->spectator(conf), dip->emitterX(conf),dip->spectatorX(conf), **k,dip->index(conf)); maxFirst = max(maxFirst,scale); } conf = make_pair(false,true); if ( (**k).canHandle(dip->index(conf)) ) { Energy scale = evolutionOrdering()->hardScale(dip->emitter(conf),dip->spectator(conf), dip->emitterX(conf),dip->spectatorX(conf), **k,dip->index(conf)); maxSecond = max(maxSecond,scale); } } if ( dip->leftParticle()->vetoScale() >= ZERO ) { maxFirst = min(maxFirst,sqrt(dip->leftParticle()->vetoScale())); if ( minVetoScale >= ZERO ) minVetoScale = min(minVetoScale,sqrt(dip->leftParticle()->vetoScale())); else minVetoScale = sqrt(dip->leftParticle()->vetoScale()); } if ( dip->rightParticle()->vetoScale() >= ZERO ) { maxSecond = min(maxSecond,sqrt(dip->rightParticle()->vetoScale())); if ( minVetoScale >= ZERO ) minVetoScale = min(minVetoScale,sqrt(dip->rightParticle()->vetoScale())); else minVetoScale = sqrt(dip->rightParticle()->vetoScale()); } maxFirst = min(maxPt,maxFirst); dip->emitterScale(make_pair(true,false),maxFirst); maxSecond = min(maxPt,maxSecond); dip->emitterScale(make_pair(false,true),maxSecond); } if ( !evolutionOrdering()->independentDipoles() && chainOrderVetoScales && minVetoScale >= ZERO ) { for ( list<Dipole>::iterator dip = ch->dipoles().begin(); dip != ch->dipoles().end(); ++dip ) { dip->leftScale(min(dip->leftScale(),minVetoScale)); dip->rightScale(min(dip->rightScale(),minVetoScale)); } } } } Energy DipoleShowerHandler::getWinner(DipoleSplittingInfo& winner, const Dipole& dip, pair<bool,bool> conf, Energy optHardPt, Energy optCutoff) { return getWinner(winner,dip.index(conf), dip.emitterX(conf),dip.spectatorX(conf), conf,dip.emitter(conf),dip.spectator(conf), dip.emitterScale(conf),optHardPt,optCutoff); } Energy DipoleShowerHandler::getWinner(SubleadingSplittingInfo& winner, Energy optHardPt, Energy optCutoff) { return getWinner(winner,winner.index(), winner.emitterX(),winner.spectatorX(), winner.configuration(), winner.emitter(),winner.spectator(), winner.startScale(),optHardPt,optCutoff); } Energy DipoleShowerHandler::getWinner(DipoleSplittingInfo& winner, const DipoleIndex& index, double emitterX, double spectatorX, pair<bool,bool> conf, tPPtr emitter, tPPtr spectator, Energy startScale, Energy optHardPt, Energy optCutoff) { if ( !index.initialStateEmitter() && !doFSR() ) { winner.didStopEvolving(); return 0.0*GeV; } if ( index.initialStateEmitter() && !doISR() ) { winner.didStopEvolving(); return 0.0*GeV; } DipoleSplittingInfo candidate; candidate.index(index); candidate.configuration(conf); candidate.emitterX(emitterX); candidate.spectatorX(spectatorX); candidate.emitter(emitter); candidate.spectator(spectator); if ( generators().find(candidate.index()) == generators().end() ) getGenerators(candidate.index(),theSplittingReweight); // // NOTE -- needs proper fixing at some point // // For some very strange reason, equal_range gives back // key ranges it hasn't been asked for. This particularly // happens e.g. for FI dipoles of the same kind, but different // PDF (hard vs MPI PDF). I can't see a reason for this, // as DipoleIndex properly implements comparison for equality // and (lexicographic) ordering; for the time being, we // use equal_range, extented by an explicit check for wether // the key is indeed what we wanted. See line after (*) comment // below. // pair<GeneratorMap::iterator,GeneratorMap::iterator> gens = generators().equal_range(candidate.index()); Energy winnerScale = 0.0*GeV; GeneratorMap::iterator winnerGen = generators().end(); for ( GeneratorMap::iterator gen = gens.first; gen != gens.second; ++gen ) { // (*) see NOTE above if ( !(gen->first == candidate.index()) ) continue; if ( startScale <= gen->second->splittingKinematics()->IRCutoff() ) continue; Energy dScale = gen->second->splittingKinematics()->dipoleScale(emitter->momentum(), spectator->momentum()); // in very exceptional cases happening in DIS - if ( isnan( dScale.rawValue() ) ) + if ( std::isnan( dScale.rawValue() ) ) throw RedoShower(); candidate.scale(dScale); candidate.continuesEvolving(); Energy hardScale = evolutionOrdering()->maxPt(startScale,candidate,*(gen->second->splittingKernel())); Energy maxPossible = gen->second->splittingKinematics()->ptMax(candidate.scale(), candidate.emitterX(), candidate.spectatorX(), candidate.index(), *gen->second->splittingKernel()); Energy ircutoff = optCutoff < gen->second->splittingKinematics()->IRCutoff() ? gen->second->splittingKinematics()->IRCutoff() : optCutoff; if ( maxPossible <= ircutoff ) { continue; } if ( maxPossible >= hardScale ) candidate.hardPt(hardScale); else { hardScale = maxPossible; candidate.hardPt(maxPossible); } gen->second->generate(candidate,currentWeights(),optHardPt,optCutoff); Energy nextScale = evolutionOrdering()->evolutionScale(gen->second->lastSplitting(),*(gen->second->splittingKernel())); if ( nextScale > winnerScale ) { winner.fill(candidate); gen->second->completeSplitting(winner); winnerGen = gen; winnerScale = nextScale; } reweight(reweight() * gen->second->splittingWeight()); } if ( winnerGen == generators().end() ) { winner.didStopEvolving(); return 0.0*GeV; } if ( winner.stoppedEvolving() ) return 0.0*GeV; return winnerScale; } void DipoleShowerHandler::doCascade(unsigned int& emDone, Energy optHardPt, Energy optCutoff) { if ( nEmissions ) if ( emDone == nEmissions ) return; DipoleSplittingInfo winner; DipoleSplittingInfo dipoleWinner; while ( eventRecord().haveChain() ) { if ( verbosity > 2 ) { generator()->log() << "DipoleShowerHandler selecting splittings for the chain:\n" << eventRecord().currentChain() << flush; } list<Dipole>::iterator winnerDip = eventRecord().currentChain().dipoles().end(); Energy winnerScale = 0.0*GeV; Energy nextLeftScale = 0.0*GeV; Energy nextRightScale = 0.0*GeV; for ( list<Dipole>::iterator dip = eventRecord().currentChain().dipoles().begin(); dip != eventRecord().currentChain().dipoles().end(); ++dip ) { nextLeftScale = getWinner(dipoleWinner,*dip,make_pair(true,false),optHardPt,optCutoff); if ( nextLeftScale > winnerScale ) { winnerScale = nextLeftScale; winner = dipoleWinner; winnerDip = dip; } nextRightScale = getWinner(dipoleWinner,*dip,make_pair(false,true),optHardPt,optCutoff); if ( nextRightScale > winnerScale ) { winnerScale = nextRightScale; winner = dipoleWinner; winnerDip = dip; } if ( evolutionOrdering()->independentDipoles() ) { Energy dipScale = max(nextLeftScale,nextRightScale); if ( dip->leftScale() > dipScale ) dip->leftScale(dipScale); if ( dip->rightScale() > dipScale ) dip->rightScale(dipScale); } } if ( verbosity > 1 ) { if ( winnerDip != eventRecord().currentChain().dipoles().end() ) generator()->log() << "DipoleShowerHandler selected the splitting:\n" << winner << " for the dipole\n" << (*winnerDip) << flush; else generator()->log() << "DipoleShowerHandler could not select a splitting above the IR cutoff\n" << flush; } // pop the chain if no dipole did radiate if ( winnerDip == eventRecord().currentChain().dipoles().end() ) { eventRecord().popChain(); if ( theEventReweight && eventRecord().chains().empty() ) if ( (theEventReweight->firstInteraction() && firstInteraction()) || (theEventReweight->secondaryInteractions() && !firstInteraction()) ) { double w = theEventReweight->weightCascade(eventRecord().incoming(), eventRecord().outgoing(), eventRecord().hard(),theGlobalAlphaS); reweight(reweight()*w); } continue; } // otherwise perform the splitting didRadiate = true; eventRecord().isMCatNLOSEvent(false); eventRecord().isMCatNLOHEvent(false); pair<list<Dipole>::iterator,list<Dipole>::iterator> children; DipoleChain* firstChain = 0; DipoleChain* secondChain = 0; eventRecord().split(winnerDip,winner,children,firstChain,secondChain); assert(firstChain && secondChain); evolutionOrdering()->setEvolutionScale(winnerScale,winner,*firstChain,children); if ( !secondChain->dipoles().empty() ) evolutionOrdering()->setEvolutionScale(winnerScale,winner,*secondChain,children); if ( verbosity > 1 ) { generator()->log() << "DipoleShowerHandler did split the last selected dipole into:\n" << (*children.first) << (*children.second) << flush; } if ( verbosity > 2 ) { generator()->log() << "After splitting the last selected dipole, " << "DipoleShowerHandler encountered the following chains:\n" << (*firstChain) << (*secondChain) << flush; } if ( theEventReweight ) if ( (theEventReweight->firstInteraction() && firstInteraction()) || (theEventReweight->secondaryInteractions() && !firstInteraction()) ) { double w = theEventReweight->weight(eventRecord().incoming(), eventRecord().outgoing(), eventRecord().hard(),theGlobalAlphaS); reweight(reweight()*w); } if ( nEmissions ) if ( ++emDone == nEmissions ) return; } } bool DipoleShowerHandler::realign() { if ( !didRadiate && !intrinsicPtGenerator ) return false; if ( eventRecord().incoming().first->coloured() || eventRecord().incoming().second->coloured() ) { if ( eventRecord().incoming().first->momentum().perp2()/GeV2 < 1e-10 && eventRecord().incoming().second->momentum().perp2()/GeV2 < 1e-10 ) return false; pair<Lorentz5Momentum,Lorentz5Momentum> inMomenta (eventRecord().incoming().first->momentum(), eventRecord().incoming().second->momentum()); SpinOneLorentzRotation transform((inMomenta.first+inMomenta.second).findBoostToCM()); Axis dir = (transform * inMomenta.first).vect().unit(); Axis rot (-dir.y(),dir.x(),0); double theta = dir.theta(); if ( lastParticles().first->momentum().z() < ZERO ) theta = -theta; transform.rotate(-theta,rot); inMomenta.first = transform*inMomenta.first; inMomenta.second = transform*inMomenta.second; assert(inMomenta.first.z() > ZERO && inMomenta.second.z() < ZERO); Energy2 sHat = (eventRecord().incoming().first->momentum() + eventRecord().incoming().second->momentum()).m2(); pair<Energy,Energy> masses(eventRecord().incoming().first->mass(), eventRecord().incoming().second->mass()); pair<Energy,Energy> qs; if ( !eventRecord().incoming().first->coloured() ) { assert(masses.second == ZERO); qs.first = eventRecord().incoming().first->momentum().z(); qs.second = (sHat-sqr(masses.first))/(2.*(qs.first+sqrt(sqr(masses.first)+sqr(qs.first)))); } else if ( !eventRecord().incoming().second->coloured() ) { assert(masses.first == ZERO); qs.second = eventRecord().incoming().second->momentum().z(); qs.first = (sHat-sqr(masses.second))/(2.*(qs.second+sqrt(sqr(masses.second)+sqr(qs.second)))); } else { assert(masses.first == ZERO && masses.second == ZERO); if ( realignmentScheme == 0 ) { double yX = eventRecord().pX().rapidity(); double yInt = (transform*eventRecord().pX()).rapidity(); double dy = yX-yInt; qs.first = (sqrt(sHat)/2.)*exp(dy); qs.second = (sqrt(sHat)/2.)*exp(-dy); } else if ( realignmentScheme == 1 ) { Energy sS = sqrt((lastParticles().first->momentum() + lastParticles().second->momentum()).m2()); qs.first = eventRecord().fractions().first * sS / 2.; qs.second = eventRecord().fractions().second * sS / 2.; } } double beta = (qs.first-qs.second) / ( sqrt(sqr(masses.first)+sqr(qs.first)) + sqrt(sqr(masses.second)+sqr(qs.second)) ); transform.boostZ(beta); Lorentz5Momentum tmp; if ( eventRecord().incoming().first->coloured() ) { tmp = eventRecord().incoming().first->momentum(); tmp = transform * tmp; eventRecord().incoming().first->set5Momentum(tmp); } if ( eventRecord().incoming().second->coloured() ) { tmp = eventRecord().incoming().second->momentum(); tmp = transform * tmp; eventRecord().incoming().second->set5Momentum(tmp); } eventRecord().transform(transform); return true; } return false; } void DipoleShowerHandler::resetAlphaS(Ptr<AlphaSBase>::tptr as) { for ( vector<Ptr<DipoleSplittingKernel>::ptr>::iterator k = kernels.begin(); k != kernels.end(); ++k ) { if ( !(**k).alphaS() ) (**k).alphaS(as); (**k).renormalizationScaleFreeze(theRenormalizationScaleFreeze); (**k).factorizationScaleFreeze(theFactorizationScaleFreeze); } // clear the generators to be rebuild // actually, there shouldn't be any generators // when this happens. generators().clear(); } void DipoleShowerHandler::resetReweight(Ptr<DipoleSplittingReweight>::tptr rw) { for ( GeneratorMap::iterator k = generators().begin(); k != generators().end(); ++k ) k->second->splittingReweight(rw); } void DipoleShowerHandler::getGenerators(const DipoleIndex& ind, Ptr<DipoleSplittingReweight>::tptr rw) { bool gotone = false; for ( vector<Ptr<DipoleSplittingKernel>::ptr>::iterator k = kernels.begin(); k != kernels.end(); ++k ) { if ( (**k).canHandle(ind) ) { if ( verbosity > 0 ) { generator()->log() << "DipoleShowerHandler encountered the dipole configuration\n" << ind << " in event number " << eventHandler()->currentEvent()->number() << "\nwhich can be handled by the splitting kernel '" << (**k).name() << "'.\n" << flush; } gotone = true; Ptr<DipoleSplittingGenerator>::ptr nGenerator = new_ptr(DipoleSplittingGenerator()); nGenerator->doCompensate(theDoCompensate); nGenerator->splittingKernel(*k); if ( renormalizationScaleFactor() != 1. ) nGenerator->splittingKernel()->renormalizationScaleFactor(renormalizationScaleFactor()); if ( factorizationScaleFactor() != 1. ) nGenerator->splittingKernel()->factorizationScaleFactor(factorizationScaleFactor()); if ( !nGenerator->splittingReweight() ) nGenerator->splittingReweight(rw); nGenerator->splittingKernel()->freezeGrid(theFreezeGrid); nGenerator->splittingKernel()->detuning(theDetuning); GeneratorMap::const_iterator equivalent = generators().end(); for ( GeneratorMap::const_iterator eq = generators().begin(); eq != generators().end(); ++eq ) { if ( !eq->second->wrapping() ) if ( (**k).canHandleEquivalent(ind,*(eq->second->splittingKernel()),eq->first) ) { equivalent = eq; if ( verbosity > 0 ) { generator()->log() << "The dipole configuration " << ind << " can equivalently be handled by the existing\n" << "generator for configuration " << eq->first << " using the kernel '" << eq->second->splittingKernel()->name() << "'\n" << flush; } break; } } if ( equivalent != generators().end() ) { nGenerator->wrap(equivalent->second); } DipoleSplittingInfo dummy; dummy.index(ind); nGenerator->prepare(dummy); generators().insert(make_pair(ind,nGenerator)); } } if ( !gotone ) { generator()->logWarning(Exception() << "DipoleShowerHandler could not " << "find a splitting kernel which is able " << "to handle splittings off the dipole " << ind << ".\n" << "Please check the input files." << Exception::warning); } } // If needed, insert default implementations of virtual function defined // in the InterfacedBase class here (using ThePEG-interfaced-impl in Emacs). void DipoleShowerHandler::doinit() { ShowerHandler::doinit(); if ( theGlobalAlphaS ) resetAlphaS(theGlobalAlphaS); } void DipoleShowerHandler::dofinish() { ShowerHandler::dofinish(); } void DipoleShowerHandler::doinitrun() { ShowerHandler::doinitrun(); } void DipoleShowerHandler::persistentOutput(PersistentOStream & os) const { os << kernels << theEvolutionOrdering << constituentReshuffler << intrinsicPtGenerator << theGlobalAlphaS << chainOrderVetoScales << nEmissions << discardNoEmissions << firstMCatNLOEmission << realignmentScheme << verbosity << printEvent << ounit(theRenormalizationScaleFreeze,GeV) << ounit(theFactorizationScaleFreeze,GeV) << theShowerApproximation << theDoCompensate << theFreezeGrid << theDetuning << theEventReweight << theSplittingReweight << ounit(maxPt,GeV) << ounit(muPt,GeV); } void DipoleShowerHandler::persistentInput(PersistentIStream & is, int) { is >> kernels >> theEvolutionOrdering >> constituentReshuffler >> intrinsicPtGenerator >> theGlobalAlphaS >> chainOrderVetoScales >> nEmissions >> discardNoEmissions >> firstMCatNLOEmission >> realignmentScheme >> verbosity >> printEvent >> iunit(theRenormalizationScaleFreeze,GeV) >> iunit(theFactorizationScaleFreeze,GeV) >> theShowerApproximation >> theDoCompensate >> theFreezeGrid >> theDetuning >> theEventReweight >> theSplittingReweight >> iunit(maxPt,GeV) >> iunit(muPt,GeV); } ClassDescription<DipoleShowerHandler> DipoleShowerHandler::initDipoleShowerHandler; // Definition of the static class description member. void DipoleShowerHandler::Init() { static ClassDocumentation<DipoleShowerHandler> documentation ("The DipoleShowerHandler class manages the showering using " "the dipole shower algorithm.", "The shower evolution was performed using the algorithm described in " "\\cite{Platzer:2009jq} and \\cite{Platzer:2011bc}.", "%\\cite{Platzer:2009jq}\n" "\\bibitem{Platzer:2009jq}\n" "S.~Platzer and S.~Gieseke,\n" "``Coherent Parton Showers with Local Recoils,''\n" " JHEP {\\bf 1101}, 024 (2011)\n" "arXiv:0909.5593 [hep-ph].\n" "%%CITATION = ARXIV:0909.5593;%%\n" "%\\cite{Platzer:2011bc}\n" "\\bibitem{Platzer:2011bc}\n" "S.~Platzer and S.~Gieseke,\n" "``Dipole Showers and Automated NLO Matching in Herwig,''\n" "arXiv:1109.6256 [hep-ph].\n" "%%CITATION = ARXIV:1109.6256;%%"); static RefVector<DipoleShowerHandler,DipoleSplittingKernel> interfaceKernels ("Kernels", "Set the splitting kernels to be used by the dipole shower.", &DipoleShowerHandler::kernels, -1, false, false, true, false, false); static Reference<DipoleShowerHandler,DipoleEvolutionOrdering> interfaceEvolutionOrdering ("EvolutionOrdering", "Set the evolution ordering to be used.", &DipoleShowerHandler::theEvolutionOrdering, false, false, true, false, false); static Reference<DipoleShowerHandler,ConstituentReshuffler> interfaceConstituentReshuffler ("ConstituentReshuffler", "The object to be used to reshuffle partons to their constitutent mass shells.", &DipoleShowerHandler::constituentReshuffler, false, false, true, true, false); static Reference<DipoleShowerHandler,IntrinsicPtGenerator> interfaceIntrinsicPtGenerator ("IntrinsicPtGenerator", "Set the object in charge to generate intrinsic pt for incoming partons.", &DipoleShowerHandler::intrinsicPtGenerator, false, false, true, true, false); static Reference<DipoleShowerHandler,AlphaSBase> interfaceGlobalAlphaS ("GlobalAlphaS", "Set a global strong coupling for all splitting kernels.", &DipoleShowerHandler::theGlobalAlphaS, false, false, true, true, false); static Switch<DipoleShowerHandler,int> interfaceRealignmentScheme ("RealignmentScheme", "The realignment scheme to use.", &DipoleShowerHandler::realignmentScheme, 0, false, false); static SwitchOption interfaceRealignmentSchemePreserveRapidity (interfaceRealignmentScheme, "PreserveRapidity", "Preserve the rapidity of non-coloured outgoing system.", 0); static SwitchOption interfaceRealignmentSchemeEvolutionFractions (interfaceRealignmentScheme, "EvolutionFractions", "Use momentum fractions as generated by the evolution.", 1); static SwitchOption interfaceRealignmentSchemeCollisionFrame (interfaceRealignmentScheme, "CollisionFrame", "Determine realignment from collision frame.", 2); static Switch<DipoleShowerHandler,bool> interfaceChainOrderVetoScales ("ChainOrderVetoScales", "[experimental] Switch on or off the chain ordering for veto scales.", &DipoleShowerHandler::chainOrderVetoScales, true, false, false); static SwitchOption interfaceChainOrderVetoScalesOn (interfaceChainOrderVetoScales, "On", "Switch on chain ordering for veto scales.", true); static SwitchOption interfaceChainOrderVetoScalesOff (interfaceChainOrderVetoScales, "Off", "Switch off chain ordering for veto scales.", false); interfaceChainOrderVetoScales.rank(-1); static Parameter<DipoleShowerHandler,unsigned int> interfaceNEmissions ("NEmissions", "[debug option] Limit the number of emissions to be generated. Zero does not limit the number of emissions.", &DipoleShowerHandler::nEmissions, 0, 0, 0, false, false, Interface::lowerlim); interfaceNEmissions.rank(-1); static Switch<DipoleShowerHandler,bool> interfaceDiscardNoEmissions ("DiscardNoEmissions", "[debug option] Discard events without radiation.", &DipoleShowerHandler::discardNoEmissions, false, false, false); static SwitchOption interfaceDiscardNoEmissionsOn (interfaceDiscardNoEmissions, "On", "Discard events without radiation.", true); static SwitchOption interfaceDiscardNoEmissionsOff (interfaceDiscardNoEmissions, "Off", "Do not discard events without radiation.", false); interfaceDiscardNoEmissions.rank(-1); static Switch<DipoleShowerHandler,bool> interfaceFirstMCatNLOEmission ("FirstMCatNLOEmission", "[debug option] Only perform the first MC@NLO emission.", &DipoleShowerHandler::firstMCatNLOEmission, false, false, false); static SwitchOption interfaceFirstMCatNLOEmissionOn (interfaceFirstMCatNLOEmission, "On", "", true); static SwitchOption interfaceFirstMCatNLOEmissionOff (interfaceFirstMCatNLOEmission, "Off", "", false); interfaceFirstMCatNLOEmission.rank(-1); static Parameter<DipoleShowerHandler,int> interfaceVerbosity ("Verbosity", "[debug option] Set the level of debug information provided.", &DipoleShowerHandler::verbosity, 0, 0, 0, false, false, Interface::lowerlim); interfaceVerbosity.rank(-1); static Parameter<DipoleShowerHandler,int> interfacePrintEvent ("PrintEvent", "[debug option] The number of events for which debugging information should be provided.", &DipoleShowerHandler::printEvent, 0, 0, 0, false, false, Interface::lowerlim); interfacePrintEvent.rank(-1); static Parameter<DipoleShowerHandler,Energy> interfaceRenormalizationScaleFreeze ("RenormalizationScaleFreeze", "The freezing scale for the renormalization scale.", &DipoleShowerHandler::theRenormalizationScaleFreeze, GeV, 1.0*GeV, 0.0*GeV, 0*GeV, false, false, Interface::lowerlim); static Parameter<DipoleShowerHandler,Energy> interfaceFactorizationScaleFreeze ("FactorizationScaleFreeze", "The freezing scale for the factorization scale.", &DipoleShowerHandler::theFactorizationScaleFreeze, GeV, 2.0*GeV, 0.0*GeV, 0*GeV, false, false, Interface::lowerlim); static Switch<DipoleShowerHandler,bool> interfaceDoCompensate ("DoCompensate", "", &DipoleShowerHandler::theDoCompensate, false, false, false); static SwitchOption interfaceDoCompensateYes (interfaceDoCompensate, "Yes", "", true); static SwitchOption interfaceDoCompensateNo (interfaceDoCompensate, "No", "", false); static Parameter<DipoleShowerHandler,unsigned long> interfaceFreezeGrid ("FreezeGrid", "", &DipoleShowerHandler::theFreezeGrid, 500000, 1, 0, false, false, Interface::lowerlim); static Parameter<DipoleShowerHandler,double> interfaceDetuning ("Detuning", "A value to detune the overestimate kernel.", &DipoleShowerHandler::theDetuning, 1.0, 1.0, 0, false, false, Interface::lowerlim); static Reference<DipoleShowerHandler,DipoleEventReweight> interfaceEventReweight ("EventReweight", "", &DipoleShowerHandler::theEventReweight, false, false, true, true, false); static Reference<DipoleShowerHandler,DipoleSplittingReweight> interfaceSplittingReweight ("SplittingReweight", "Set the splitting reweight.", &DipoleShowerHandler::theSplittingReweight, false, false, true, true, false); } diff --git a/Shower/QTilde/Default/QTildeReconstructor.cc b/Shower/QTilde/Default/QTildeReconstructor.cc --- a/Shower/QTilde/Default/QTildeReconstructor.cc +++ b/Shower/QTilde/Default/QTildeReconstructor.cc @@ -1,2923 +1,2923 @@ // -*- C++ -*- // // QTildeReconstructor.cc is a part of Herwig - A multi-purpose Monte Carlo event generator // Copyright (C) 2002-2011 The Herwig Collaboration // // Herwig is licenced under version 2 of the GPL, see COPYING for details. // Please respect the MCnet academic guidelines, see GUIDELINES for details. // // // This is the implementation of the non-inlined, non-templated member // functions of the QTildeReconstructor class. // #include "QTildeReconstructor.h" #include "ThePEG/PDT/EnumParticles.h" #include "ThePEG/Repository/EventGenerator.h" #include "ThePEG/EventRecord/Event.h" #include "ThePEG/Interface/Parameter.h" #include "ThePEG/Interface/Switch.h" #include "ThePEG/Interface/ClassDocumentation.h" #include "ThePEG/Interface/RefVector.h" #include "Herwig/Shower/QTilde/Base/PartnerFinder.h" #include "ThePEG/Persistency/PersistentOStream.h" #include "ThePEG/Persistency/PersistentIStream.h" #include "Herwig/Shower/QTilde/SplittingFunctions/SplittingFunction.h" #include "ThePEG/Repository/UseRandom.h" #include "ThePEG/EventRecord/ColourLine.h" #include "ThePEG/Utilities/DescribeClass.h" #include "Herwig/Shower/QTilde/QTildeShowerHandler.h" #include <cassert> using namespace Herwig; DescribeClass<QTildeReconstructor,KinematicsReconstructor> describeQTildeReconstructor("Herwig::QTildeReconstructor", "HwShower.so"); namespace { /** * Struct to order the jets in off-shellness */ struct JetOrdering { bool operator() (const JetKinStruct & j1, const JetKinStruct & j2) { Energy diff1 = j1.q.m()-j1.p.m(); Energy diff2 = j2.q.m()-j2.p.m(); if(diff1!=diff2) { return diff1>diff2; } else if( j1.q.e() != j2.q.e() ) return j1.q.e()>j2.q.e(); else return j1.parent->uniqueId>j2.parent->uniqueId; } }; } void QTildeReconstructor::persistentOutput(PersistentOStream & os) const { os << _reconopt << _initialBoost << ounit(_minQ,GeV) << _noRescale << _noRescaleVector << _finalStateReconOption << _initialStateReconOption; } void QTildeReconstructor::persistentInput(PersistentIStream & is, int) { is >> _reconopt >> _initialBoost >> iunit(_minQ,GeV) >> _noRescale >> _noRescaleVector >> _finalStateReconOption >> _initialStateReconOption; } void QTildeReconstructor::Init() { static ClassDocumentation<QTildeReconstructor> documentation ( "This class is responsible for the kinematics reconstruction of the showering,", " including the kinematics reshuffling necessary to compensate for the recoil" "of the emissions." ); static Switch<QTildeReconstructor,unsigned int> interfaceReconstructionOption ("ReconstructionOption", "Option for the kinematics reconstruction", &QTildeReconstructor::_reconopt, 0, false, false); static SwitchOption interfaceReconstructionOptionGeneral (interfaceReconstructionOption, "General", "Use the general solution which ignores the colour structure for all processes", 0); static SwitchOption interfaceReconstructionOptionColour (interfaceReconstructionOption, "Colour", "Use the colour structure of the process to determine the reconstruction procedure.", 1); static SwitchOption interfaceReconstructionOptionColour2 (interfaceReconstructionOption, "Colour2", "Make the most use possible of the colour structure of the process to determine the reconstruction procedure. " "Start with FF, then IF then II colour connections", 2); static SwitchOption interfaceReconstructionOptionColour3 (interfaceReconstructionOption, "Colour3", "Make the most use possible of the colour structure of the process to determine the reconstruction procedure. " "Do the colour connections in order of the pT's emitted in the shower starting with the hardest." " The colour partner is fully reconstructed at the same time.", 3); static SwitchOption interfaceReconstructionOptionColour4 (interfaceReconstructionOption, "Colour4", "Make the most use possible of the colour structure of the process to determine the reconstruction procedure. " "Do the colour connections in order of the pT's emitted in the shower starting with the hardest, while leaving" " the colour partner on mass-shell", 4); static Parameter<QTildeReconstructor,Energy> interfaceMinimumQ2 ("MinimumQ2", "The minimum Q2 for the reconstruction of initial-final systems", &QTildeReconstructor::_minQ, GeV, 0.001*GeV, 1e-6*GeV, 10.0*GeV, false, false, Interface::limited); static RefVector<QTildeReconstructor,ParticleData> interfaceNoRescale ("NoRescale", "Particles which shouldn't be rescaled to be on shell by the shower", &QTildeReconstructor::_noRescaleVector, -1, false, false, true, false, false); static Switch<QTildeReconstructor,unsigned int> interfaceInitialInitialBoostOption ("InitialInitialBoostOption", "Option for how the boost from the system before ISR to that after ISR is applied.", &QTildeReconstructor::_initialBoost, 0, false, false); static SwitchOption interfaceInitialInitialBoostOptionOneBoost (interfaceInitialInitialBoostOption, "OneBoost", "Apply one boost from old CMS to new CMS", 0); static SwitchOption interfaceInitialInitialBoostOptionLongTransBoost (interfaceInitialInitialBoostOption, "LongTransBoost", "First apply a longitudinal and then a transverse boost", 1); static Switch<QTildeReconstructor,unsigned int> interfaceFinalStateReconOption ("FinalStateReconOption", "Option for how to reconstruct the momenta of the final-state system", &QTildeReconstructor::_finalStateReconOption, 0, false, false); static SwitchOption interfaceFinalStateReconOptionDefault (interfaceFinalStateReconOption, "Default", "All the momenta are rescaled in the rest frame", 0); static SwitchOption interfaceFinalStateReconOptionMostOffShell (interfaceFinalStateReconOption, "MostOffShell", "All particles put on the new-mass shell and then the most off-shell and" " recoiling system are rescaled to ensure 4-momentum is conserved.", 1); static SwitchOption interfaceFinalStateReconOptionRecursive (interfaceFinalStateReconOption, "Recursive", "Recursively put on shell by putting the most off-shell particle which" " hasn't been rescaled on-shell by rescaling the particles and the recoiling system. ", 2); static SwitchOption interfaceFinalStateReconOptionRestMostOffShell (interfaceFinalStateReconOption, "RestMostOffShell", "The most off-shell is put on shell by rescaling it and the recoiling system," " the recoiling system is then put on-shell in its rest frame.", 3); static SwitchOption interfaceFinalStateReconOptionRestRecursive (interfaceFinalStateReconOption, "RestRecursive", "As 3 but recursive treated the currently most-off shell," " only makes a difference if more than 3 partons.", 4); static Switch<QTildeReconstructor,unsigned int> interfaceInitialStateReconOption ("InitialStateReconOption", "Option for the reconstruction of initial state radiation", &QTildeReconstructor::_initialStateReconOption, 0, false, false); static SwitchOption interfaceInitialStateReconOptionRapidity (interfaceInitialStateReconOption, "Rapidity", "Preserve shat and rapidity", 0); static SwitchOption interfaceInitialStateReconOptionLongitudinal (interfaceInitialStateReconOption, "Longitudinal", "Preserve longitudinal momentum", 1); static SwitchOption interfaceInitialStateReconOptionSofterFraction (interfaceInitialStateReconOption, "SofterFraction", "Preserve the momentum fraction of the parton which has emitted softer.", 2); } void QTildeReconstructor::doinit() { KinematicsReconstructor::doinit(); _noRescale = set<cPDPtr>(_noRescaleVector.begin(),_noRescaleVector.end()); } bool QTildeReconstructor:: reconstructTimeLikeJet(const tShowerParticlePtr particleJetParent) const { assert(particleJetParent); bool emitted=true; // if this is not a fixed point in the reconstruction if( !particleJetParent->children().empty() ) { // if not a reconstruction fixpoint, dig deeper for all children: for ( ParticleVector::const_iterator cit = particleJetParent->children().begin(); cit != particleJetParent->children().end(); ++cit ) reconstructTimeLikeJet(dynamic_ptr_cast<ShowerParticlePtr>(*cit)); } // it is a reconstruction fixpoint, ie kinematical data has to be available else { // check if the parent was part of the shower ShowerParticlePtr jetGrandParent; if(!particleJetParent->parents().empty()) jetGrandParent= dynamic_ptr_cast<ShowerParticlePtr> (particleJetParent->parents()[0]); // update if so if (jetGrandParent) { if (jetGrandParent->showerKinematics()) { if(particleJetParent->id()==_progenitor->id()&& !_progenitor->data().stable()) { jetGrandParent->showerKinematics()->reconstructLast(particleJetParent, _progenitor->mass()); } else { jetGrandParent->showerKinematics()->reconstructLast(particleJetParent); } } } // otherwise else { Energy dm = particleJetParent->data().constituentMass(); if (abs(dm-particleJetParent->momentum().m())>0.001*MeV &&particleJetParent->dataPtr()->stable() &&particleJetParent->id()!=ParticleID::gamma &&_noRescale.find(particleJetParent->dataPtr())==_noRescale.end()) { Lorentz5Momentum dum = particleJetParent->momentum(); dum.setMass(dm); dum.rescaleEnergy(); particleJetParent->set5Momentum(dum); } else { emitted=false; } } } // recursion has reached an endpoint once, ie we can reconstruct the // kinematics from the children. if( !particleJetParent->children().empty() ) particleJetParent->showerKinematics() ->reconstructParent( particleJetParent, particleJetParent->children() ); return emitted; } bool QTildeReconstructor:: reconstructHardJets(ShowerTreePtr hard, const map<tShowerProgenitorPtr, pair<Energy,double> > & intrinsic, ShowerInteraction::Type type, bool switchRecon) const { _currentTree = hard; _intrinsic=intrinsic; // extract the particles from the ShowerTree vector<ShowerProgenitorPtr> ShowerHardJets=hard->extractProgenitors(); for(unsigned int ix=0;ix<ShowerHardJets.size();++ix) { _boosts[ShowerHardJets[ix]->progenitor()] = vector<LorentzRotation>(); } for(map<tShowerTreePtr,pair<tShowerProgenitorPtr,tShowerParticlePtr> >::const_iterator tit = _currentTree->treelinks().begin(); tit != _currentTree->treelinks().end();++tit) { _treeBoosts[tit->first] = vector<LorentzRotation>(); } try { // old recon method, using new member functions if(_reconopt == 0 || switchRecon ) { reconstructGeneralSystem(ShowerHardJets); } // reconstruction based on coloured systems else if( _reconopt == 1) { reconstructColourSinglets(ShowerHardJets,type); } // reconstruction of FF, then IF, then II else if( _reconopt == 2) { reconstructFinalFirst(ShowerHardJets); } // reconstruction based on coloured systems else if( _reconopt == 3 || _reconopt == 4) { reconstructColourPartner(ShowerHardJets); } else assert(false); } catch(KinematicsReconstructionVeto) { _progenitor=tShowerParticlePtr(); _intrinsic.clear(); for(map<tPPtr,vector<LorentzRotation> >::const_iterator bit=_boosts.begin();bit!=_boosts.end();++bit) { for(vector<LorentzRotation>::const_reverse_iterator rit=bit->second.rbegin();rit!=bit->second.rend();++rit) { LorentzRotation rot = rit->inverse(); bit->first->transform(rot); } } _boosts.clear(); for(map<tShowerTreePtr,vector<LorentzRotation> >::const_iterator bit=_treeBoosts.begin();bit!=_treeBoosts.end();++bit) { for(vector<LorentzRotation>::const_reverse_iterator rit=bit->second.rbegin();rit!=bit->second.rend();++rit) { LorentzRotation rot = rit->inverse(); bit->first->transform(rot,false); } } _currentTree = tShowerTreePtr(); _treeBoosts.clear(); return false; } catch (Exception & ex) { _progenitor=tShowerParticlePtr(); _intrinsic.clear(); _currentTree = tShowerTreePtr(); _boosts.clear(); _treeBoosts.clear(); throw ex; } _progenitor=tShowerParticlePtr(); _intrinsic.clear(); // ensure x<1 for(map<ShowerProgenitorPtr,ShowerParticlePtr>::const_iterator cit=hard->incomingLines().begin();cit!=hard->incomingLines().end();++cit) { tPPtr parent = cit->first->progenitor(); while (!parent->parents().empty()) { parent = parent->parents()[0]; } tPPtr hadron; if ( cit->first->original()->parents().empty() ) { hadron = cit->first->original(); } else { hadron = cit->first->original()->parents()[0]; } if( ! (hadron->id() == parent->id() && hadron->children().size() <= 1) && parent->momentum().rho() > hadron->momentum().rho()) { _progenitor=tShowerParticlePtr(); _intrinsic.clear(); for(map<tPPtr,vector<LorentzRotation> >::const_iterator bit=_boosts.begin();bit!=_boosts.end();++bit) { for(vector<LorentzRotation>::const_reverse_iterator rit=bit->second.rbegin();rit!=bit->second.rend();++rit) { LorentzRotation rot = rit->inverse(); bit->first->transform(rot); } } _boosts.clear(); for(map<tShowerTreePtr,vector<LorentzRotation> >::const_iterator bit=_treeBoosts.begin();bit!=_treeBoosts.end();++bit) { for(vector<LorentzRotation>::const_reverse_iterator rit=bit->second.rbegin();rit!=bit->second.rend();++rit) { LorentzRotation rot = rit->inverse(); bit->first->transform(rot,false); } } _currentTree = tShowerTreePtr(); _treeBoosts.clear(); return false; } } _boosts.clear(); _treeBoosts.clear(); _currentTree = tShowerTreePtr(); return true; } double QTildeReconstructor::solveKfactor(const Energy & root_s, const JetKinVect & jets) const { Energy2 s = sqr(root_s); // must be at least two jets if ( jets.size() < 2) throw KinematicsReconstructionVeto(); // sum of jet masses must be less than roots if(momConsEq( 0.0, root_s, jets )>ZERO) throw KinematicsReconstructionVeto(); // if two jets simple solution if ( jets.size() == 2 ) { static const Energy2 eps = 1.0e-4 * MeV2; if ( sqr(jets[0].p.x()+jets[1].p.x()) < eps && sqr(jets[0].p.y()+jets[1].p.y()) < eps && sqr(jets[0].p.z()+jets[1].p.z()) < eps ) { Energy test = (jets[0].p+jets[1].p).vect().mag(); if(test > 1.0e-4 * MeV) throw KinematicsReconstructionVeto(); if ( jets[0].p.vect().mag2() < eps ) throw KinematicsReconstructionVeto(); Energy2 m1sq(jets[0].q.m2()),m2sq(jets[1].q.m2()); return sqrt( ( sqr(s - m1sq - m2sq) - 4.*m1sq*m2sq ) /(4.*s*jets[0].p.vect().mag2()) ); } else throw KinematicsReconstructionVeto(); } // i.e. jets.size() > 2, numerically // check convergence, if it's a problem maybe use Newton iteration? else { double k1 = 0.,k2 = 1.,k = 0.; if ( momConsEq( k1, root_s, jets ) < ZERO ) { while ( momConsEq( k2, root_s, jets ) < ZERO ) { k1 = k2; k2 *= 2; } while ( fabs( (k1 - k2)/(k1 + k2) ) > 1.e-10 ) { if( momConsEq( k2, root_s, jets ) == ZERO ) { return k2; } else { k = (k1+k2)/2.; if ( momConsEq( k, root_s, jets ) > ZERO ) { k2 = k; } else { k1 = k; } } } return k1; } else throw KinematicsReconstructionVeto(); } throw KinematicsReconstructionVeto(); } bool QTildeReconstructor:: reconstructSpaceLikeJet( const tShowerParticlePtr p) const { bool emitted = true; tShowerParticlePtr child; tShowerParticlePtr parent; if(!p->parents().empty()) parent = dynamic_ptr_cast<ShowerParticlePtr>(p->parents()[0]); if(parent) { emitted=true; reconstructSpaceLikeJet(parent); } // if branching reconstruct time-like child if(p->children().size()==2) child = dynamic_ptr_cast<ShowerParticlePtr>(p->children()[1]); if(p->perturbative()==0 && child) { dynamic_ptr_cast<ShowerParticlePtr>(p->children()[0])-> showerKinematics()->reconstructParent(p,p->children()); if(!child->children().empty()) { _progenitor=child; reconstructTimeLikeJet(child); // calculate the momentum of the particle Lorentz5Momentum pnew=p->momentum()-child->momentum(); pnew.rescaleMass(); p->children()[0]->set5Momentum(pnew); } } return emitted; } Boost QTildeReconstructor:: solveBoostBeta( const double k, const Lorentz5Momentum & newq, const Lorentz5Momentum & oldp ) { // try something different, purely numerical first: // a) boost to rest frame of newq, b) boost with kp/E Energy q = newq.vect().mag(); Energy2 qs = sqr(q); Energy2 Q2 = newq.m2(); Energy kp = k*(oldp.vect().mag()); Energy2 kps = sqr(kp); // usually we take the minus sign, since this boost will be smaller. // we only require |k \vec p| = |\vec q'| which leaves the sign of // the boost open but the 'minus' solution gives a smaller boost // parameter, i.e. the result should be closest to the previous // result. this is to be changed if we would get many momentum // conservation violations at the end of the shower from a hard // process. double betam = (q*sqrt(qs + Q2) - kp*sqrt(kps + Q2))/(kps + qs + Q2); // move directly to 'return' Boost beta = -betam*(k/kp)*oldp.vect(); // note that (k/kp)*oldp.vect() = oldp.vect()/oldp.vect().mag() but cheaper. // leave this out if it's running properly! if ( betam >= 0 ) return beta; else return Boost(0., 0., 0.); } bool QTildeReconstructor:: reconstructDecayJets(ShowerTreePtr decay, ShowerInteraction::Type) const { _currentTree = decay; // extract the particles from the ShowerTree vector<ShowerProgenitorPtr> ShowerHardJets=decay->extractProgenitors(); for(unsigned int ix=0;ix<ShowerHardJets.size();++ix) { _boosts[ShowerHardJets[ix]->progenitor()] = vector<LorentzRotation>(); } for(map<tShowerTreePtr,pair<tShowerProgenitorPtr,tShowerParticlePtr> >::const_iterator tit = _currentTree->treelinks().begin(); tit != _currentTree->treelinks().end();++tit) { _treeBoosts[tit->first] = vector<LorentzRotation>(); } try { bool radiated[2]={false,false}; // find the decaying particle and check if particles radiated ShowerProgenitorPtr initial; for(unsigned int ix=0;ix<ShowerHardJets.size();++ix) { // only consider initial-state jets if(ShowerHardJets[ix]->progenitor()->isFinalState()) { radiated[1] |=ShowerHardJets[ix]->hasEmitted(); } else { initial=ShowerHardJets[ix]; radiated[0]|=ShowerHardJets[ix]->hasEmitted(); } } // find boost to the rest frame if needed Boost boosttorest=-initial->progenitor()->momentum().boostVector(); double gammarest = initial->progenitor()->momentum().e()/ initial->progenitor()->momentum().mass(); // check if need to boost to rest frame bool gottaBoost = (boosttorest.mag() > 1e-12); // if initial state radiation reconstruct the jet and set up the basis vectors Lorentz5Momentum pjet; Lorentz5Momentum nvect; // find the partner ShowerParticlePtr partner = initial->progenitor()->partner(); Lorentz5Momentum ppartner[2]; if(partner) ppartner[0]=partner->momentum(); // get the n reference vector if(partner) { if(initial->progenitor()->showerKinematics()) { nvect = initial->progenitor()->showerBasis()->getBasis()[1]; } else { Lorentz5Momentum ppartner=initial->progenitor()->partner()->momentum(); if(gottaBoost) ppartner.boost(boosttorest,gammarest); nvect = Lorentz5Momentum( ZERO,0.5*initial->progenitor()->mass()* ppartner.vect().unit()); nvect.boost(-boosttorest,gammarest); } } // if ISR if(radiated[0]) { // reconstruct the decay jet reconstructDecayJet(initial->progenitor()); // momentum of decaying particle after ISR pjet=initial->progenitor()->momentum() -decay->incomingLines().begin()->second->momentum(); pjet.rescaleMass(); } // boost initial state jet and basis vector if needed if(gottaBoost) { pjet.boost(boosttorest,gammarest); nvect.boost(boosttorest,gammarest); ppartner[0].boost(boosttorest,gammarest); } // loop over the final-state particles and do the reconstruction JetKinVect possiblepartners; JetKinVect jetKinematics; bool atLeastOnce = radiated[0]; LorentzRotation restboost(boosttorest,gammarest); Energy inmass(ZERO); for(unsigned int ix=0;ix<ShowerHardJets.size();++ix) { // only consider final-state jets if(!ShowerHardJets[ix]->progenitor()->isFinalState()) { inmass=ShowerHardJets[ix]->progenitor()->mass(); continue; } // do the reconstruction JetKinStruct tempJetKin; tempJetKin.parent = ShowerHardJets[ix]->progenitor(); if(ShowerHardJets.size()==2) { Lorentz5Momentum dum=ShowerHardJets[ix]->progenitor()->momentum(); dum.setMass(inmass); dum.rescaleRho(); tempJetKin.parent->set5Momentum(dum); } tempJetKin.p = ShowerHardJets[ix]->progenitor()->momentum(); if(gottaBoost) tempJetKin.p.boost(boosttorest,gammarest); _progenitor=tempJetKin.parent; if(ShowerHardJets[ix]->reconstructed()==ShowerProgenitor::notReconstructed) { atLeastOnce |= reconstructTimeLikeJet(tempJetKin.parent); ShowerHardJets[ix]->reconstructed(ShowerProgenitor::done); } if(gottaBoost) deepTransform(tempJetKin.parent,restboost); tempJetKin.q = ShowerHardJets[ix]->progenitor()->momentum(); jetKinematics.push_back(tempJetKin); } if(partner) ppartner[1]=partner->momentum(); // calculate the rescaling parameters double k1,k2; Lorentz5Momentum qt; if(!solveDecayKFactor(initial->progenitor()->mass(),nvect,pjet, jetKinematics,partner,ppartner,k1,k2,qt)) { for(map<tPPtr,vector<LorentzRotation> >::const_iterator bit=_boosts.begin();bit!=_boosts.end();++bit) { for(vector<LorentzRotation>::const_reverse_iterator rit=bit->second.rbegin();rit!=bit->second.rend();++rit) { LorentzRotation rot = rit->inverse(); bit->first->transform(rot); } } _boosts.clear(); for(map<tShowerTreePtr,vector<LorentzRotation> >::const_iterator bit=_treeBoosts.begin();bit!=_treeBoosts.end();++bit) { for(vector<LorentzRotation>::const_reverse_iterator rit=bit->second.rbegin();rit!=bit->second.rend();++rit) { LorentzRotation rot = rit->inverse(); bit->first->transform(rot,false); } } _treeBoosts.clear(); _currentTree = tShowerTreePtr(); return false; } // apply boosts and rescalings to final-state jets for(JetKinVect::iterator it = jetKinematics.begin(); it != jetKinematics.end(); ++it) { LorentzRotation Trafo = LorentzRotation(); if(it->parent!=partner) { // boost for rescaling if(atLeastOnce) { map<tShowerTreePtr,pair<tShowerProgenitorPtr, tShowerParticlePtr> >::const_iterator tit; for(tit = _currentTree->treelinks().begin(); tit != _currentTree->treelinks().end();++tit) { if(tit->second.first && tit->second.second==it->parent) break; } if(it->parent->children().empty()&&!it->parent->spinInfo() && tit==_currentTree->treelinks().end()) { Lorentz5Momentum pnew(k2*it->p.vect(), sqrt(sqr(k2*it->p.vect().mag())+it->q.mass2()), it->q.mass()); it->parent->set5Momentum(pnew); } else { // rescaling boost can't ever work in this case if(k2<0. && it->q.mass()==ZERO) throw KinematicsReconstructionVeto(); Trafo = solveBoost(k2, it->q, it->p); } } if(gottaBoost) Trafo.boost(-boosttorest,gammarest); if(atLeastOnce || gottaBoost) deepTransform(it->parent,Trafo); } else { Lorentz5Momentum pnew=ppartner[0]; pnew *=k1; pnew-=qt; pnew.setMass(ppartner[1].mass()); pnew.rescaleEnergy(); LorentzRotation Trafo=solveBoost(1.,ppartner[1],pnew); if(gottaBoost) Trafo.boost(-boosttorest,gammarest); deepTransform(partner,Trafo); } } } catch(KinematicsReconstructionVeto) { for(map<tPPtr,vector<LorentzRotation> >::const_iterator bit=_boosts.begin();bit!=_boosts.end();++bit) { for(vector<LorentzRotation>::const_reverse_iterator rit=bit->second.rbegin();rit!=bit->second.rend();++rit) { LorentzRotation rot = rit->inverse(); bit->first->transform(rot); } } _boosts.clear(); for(map<tShowerTreePtr,vector<LorentzRotation> >::const_iterator bit=_treeBoosts.begin();bit!=_treeBoosts.end();++bit) { for(vector<LorentzRotation>::const_reverse_iterator rit=bit->second.rbegin();rit!=bit->second.rend();++rit) { LorentzRotation rot = rit->inverse(); bit->first->transform(rot,false); } } _treeBoosts.clear(); _currentTree = tShowerTreePtr(); return false; } catch (Exception & ex) { _currentTree = tShowerTreePtr(); _boosts.clear(); _treeBoosts.clear(); throw ex; } _boosts.clear(); _treeBoosts.clear(); _currentTree = tShowerTreePtr(); return true; } bool QTildeReconstructor:: reconstructDecayJet( const tShowerParticlePtr p) const { if(p->children().empty()) return false; tShowerParticlePtr child; // if branching reconstruct time-like child child = dynamic_ptr_cast<ShowerParticlePtr>(p->children()[1]); if(child) { _progenitor=child; reconstructTimeLikeJet(child); // calculate the momentum of the particle Lorentz5Momentum pnew=p->momentum()-child->momentum(); pnew.rescaleMass(); p->children()[0]->set5Momentum(pnew); child=dynamic_ptr_cast<ShowerParticlePtr>(p->children()[0]); reconstructDecayJet(child); return true; } return false; } bool QTildeReconstructor:: solveDecayKFactor(Energy mb, const Lorentz5Momentum & n, const Lorentz5Momentum & pjet, const JetKinVect & jetKinematics, ShowerParticlePtr partner, Lorentz5Momentum ppartner[2], double & k1, double & k2, Lorentz5Momentum & qt) const { Energy2 pjn = partner ? pjet.vect()*n.vect() : ZERO; Energy2 pcn = partner ? ppartner[0].vect()*n.vect() : 1.*MeV2; Energy2 nmag = n.vect().mag2(); Lorentz5Momentum pn = partner ? (pjn/nmag)*n : Lorentz5Momentum(); qt=pjet-pn; qt.setE(ZERO); Energy2 pt2=qt.vect().mag2(); Energy Ejet = pjet.e(); // magnitudes of the momenta for fast access vector<Energy2> pmag; Energy total(Ejet); for(unsigned int ix=0;ix<jetKinematics.size();++ix) { pmag.push_back(jetKinematics[ix].p.vect().mag2()); total+=jetKinematics[ix].q.mass(); } // return if no possible solution if(total>mb) return false; Energy2 pcmag=ppartner[0].vect().mag2(); // used newton-raphson to get the rescaling static const Energy eps=1e-8*GeV; long double d1(1.),d2(1.); Energy roots, ea, ec, ds; unsigned int ix=0; do { ++ix; d2 = d1 + pjn/pcn; roots = Ejet; ds = ZERO; for(unsigned int iy=0;iy<jetKinematics.size();++iy) { if(jetKinematics[iy].parent==partner) continue; ea = sqrt(sqr(d2)*pmag[iy]+jetKinematics[iy].q.mass2()); roots += ea; ds += d2/ea*pmag[iy]; } if(partner) { ec = sqrt(sqr(d1)*pcmag + pt2 + ppartner[1].mass2()); roots += ec; ds += d1/ec*pcmag; } d1 += (mb-roots)/ds; d2 = d1 + pjn/pcn; } while(abs(mb-roots)>eps && ix<100); k1=d1; k2=d2; // return true if N-R succeed, otherwise false return ix<100; } bool QTildeReconstructor:: deconstructDecayJets(HardTreePtr decay,ShowerInteraction::Type) const { // extract the momenta of the particles vector<Lorentz5Momentum> pin; vector<Lorentz5Momentum> pout; // on-shell masses of the decay products vector<Energy> mon; Energy mbar(-GeV); // the hard branchings of the particles set<HardBranchingPtr>::iterator cit; set<HardBranchingPtr> branchings=decay->branchings(); // properties of the incoming particle bool ISR = false; HardBranchingPtr initial; Lorentz5Momentum qisr; // find the incoming particle, both before and after // any ISR for(cit=branchings.begin();cit!=branchings.end();++cit){ if((*cit)->status()==HardBranching::Incoming|| (*cit)->status()==HardBranching::Decay) { // search back up isr if needed HardBranchingPtr branch = *cit; while(branch->parent()) branch=branch->parent(); initial=branch; // momentum or original parent pin.push_back(branch->branchingParticle()->momentum()); // ISR? ISR = !branch->branchingParticle()->children().empty(); // ISR momentum qisr = pin.back()-(**cit).branchingParticle()->momentum(); qisr.rescaleMass(); } } assert(pin.size()==1); // compute boost to rest frame Boost boostv=-pin[0].boostVector(); // partner for ISR ShowerParticlePtr partner; Lorentz5Momentum ppartner; if(initial->branchingParticle()->partner()) { partner=initial->branchingParticle()->partner(); ppartner=partner->momentum(); } // momentum of the decay products for(cit=branchings.begin();cit!=branchings.end();++cit) { if((*cit)->status()!=HardBranching::Outgoing) continue; // find the mass of the particle // including special treatment for off-shell resonances // to preserve off-shell mass Energy mass; if(!(**cit).branchingParticle()->dataPtr()->stable()) { HardBranchingPtr branch=*cit; while(!branch->children().empty()) { for(unsigned int ix=0;ix<branch->children().size();++ix) { if(branch->children()[ix]->branchingParticle()->id()== (**cit).branchingParticle()->id()) { branch = branch->children()[ix]; continue; } } }; mass = branch->branchingParticle()->mass(); } else { mass = (**cit).branchingParticle()->dataPtr()->mass(); } // if not evolution partner of decaying particle if((*cit)->branchingParticle()!=partner) { pout.push_back((*cit)->branchingParticle()->momentum()); mon.push_back(mass); } // evolution partner of decaying particle else { mbar = mass; } } // boost all the momenta to the rest frame of the decaying particle for(unsigned int ix=0;ix<pout.size();++ix) pout[ix].boost(boostv); if(initial->branchingParticle()->partner()) { ppartner.boost(boostv); qisr.boost(boostv); } // compute the rescaling factors double k1,k2; if(!ISR) { if(partner) { pout.push_back(ppartner); mon.push_back(mbar); } k1=k2=inverseRescalingFactor(pout,mon,pin[0].mass()); if(partner) { pout.pop_back(); mon.pop_back(); } } else { if(!inverseDecayRescalingFactor(pout,mon,pin[0].mass(), ppartner,mbar,k1,k2)) return false; } // now calculate the p reference vectors unsigned int ifinal=0; for(cit=branchings.begin();cit!=branchings.end();++cit) { if((**cit).status()!=HardBranching::Outgoing) continue; // for partners other than colour partner of decaying particle if((*cit)->branchingParticle()!=partner) { Lorentz5Momentum pvect = (*cit)->branchingParticle()->momentum(); pvect.boost(boostv); pvect /= k1; pvect.setMass(mon[ifinal]); ++ifinal; pvect.rescaleEnergy(); pvect.boost(-boostv); (*cit)->pVector(pvect); (*cit)->showerMomentum(pvect); } // for colour partner of decaying particle else { Lorentz5Momentum pvect = (*cit)->branchingParticle()->momentum(); pvect.boost(boostv); Lorentz5Momentum qtotal; for(unsigned int ix=0;ix<pout.size();++ix) qtotal+=pout[ix]; Lorentz5Momentum qperp = qisr-(qisr.vect()*qtotal.vect())/(qtotal.vect().mag2())*qtotal; pvect +=qperp; pvect /=k2; pvect.setMass(mbar); pvect.rescaleEnergy(); pvect.boost(-boostv); (*cit)->pVector(pvect); (*cit)->showerMomentum(pvect); } } // For initial-state if needed if(initial) { tShowerParticlePtr newPartner=initial->branchingParticle()->partner(); if(newPartner) { tHardBranchingPtr branch; for( set<HardBranchingPtr>::iterator clt = branchings.begin(); clt != branchings.end(); ++clt ) { if((**clt).branchingParticle()==newPartner) { initial->colourPartner(*clt); branch=*clt; break; } } Lorentz5Momentum pvect = initial->branchingParticle()->momentum(); initial->pVector(pvect); Lorentz5Momentum ptemp = branch->pVector(); ptemp.boost(boostv); Lorentz5Momentum nvect = Lorentz5Momentum( ZERO, 0.5*initial->branchingParticle()->mass()* ptemp.vect().unit()); nvect.boost(-boostv); initial->nVector(nvect); } } // calculate the reference vectors, then for outgoing particles for(cit=branchings.begin();cit!=branchings.end();++cit){ if((**cit).status()!=HardBranching::Outgoing) continue; // find the partner branchings tShowerParticlePtr newPartner=(*cit)->branchingParticle()->partner(); if(!newPartner) continue; tHardBranchingPtr branch; for( set<HardBranchingPtr>::iterator clt = branchings.begin(); clt != branchings.end(); ++clt ) { if(cit==clt) continue; if((**clt).branchingParticle()==newPartner) { (**cit).colourPartner(*clt); branch=*clt; break; } } if((**decay->incoming().begin()).branchingParticle()==newPartner) { (**cit).colourPartner(*decay->incoming().begin()); branch = *decay->incoming().begin(); } // final-state colour partner if(branch->status()==HardBranching::Outgoing) { Boost boost=((*cit)->pVector()+branch->pVector()).findBoostToCM(); Lorentz5Momentum pcm = branch->pVector(); pcm.boost(boost); Lorentz5Momentum nvect = Lorentz5Momentum(ZERO,pcm.vect()); nvect.boost( -boost); (*cit)->nVector(nvect); } // initial-state colour partner else { Boost boost=branch->pVector().findBoostToCM(); Lorentz5Momentum pcm = (*cit)->pVector(); pcm.boost(boost); Lorentz5Momentum nvect = Lorentz5Momentum( ZERO, -pcm.vect()); nvect.boost( -boost); (*cit)->nVector(nvect); } } // now compute the new momenta // and calculate the shower variables for(cit=branchings.begin();cit!=branchings.end();++cit) { if((**cit).status()!=HardBranching::Outgoing) continue; LorentzRotation B=LorentzRotation(-boostv); LorentzRotation A=LorentzRotation(boostv),R; if((*cit)->branchingParticle()==partner) { Lorentz5Momentum qnew; Energy2 dot=(*cit)->pVector()*(*cit)->nVector(); double beta = 0.5*((*cit)->branchingParticle()->momentum().m2() -sqr((*cit)->pVector().mass()))/dot; qnew=(*cit)->pVector()+beta*(*cit)->nVector(); qnew.rescaleMass(); // compute the boost R=B*solveBoost(A*qnew,A*(*cit)->branchingParticle()->momentum())*A; } else { Lorentz5Momentum qnew; if((*cit)->branchingParticle()->partner()) { Energy2 dot=(*cit)->pVector()*(*cit)->nVector(); double beta = 0.5*((*cit)->branchingParticle()->momentum().m2() -sqr((*cit)->pVector().mass()))/dot; qnew=(*cit)->pVector()+beta*(*cit)->nVector(); qnew.rescaleMass(); } else { qnew = (*cit)->pVector(); } // compute the boost R=B*solveBoost(A*qnew,A*(*cit)->branchingParticle()->momentum())*A; } // reconstruct the momenta (*cit)->setMomenta(R,1.0,Lorentz5Momentum()); } if(initial) { initial->setMomenta(LorentzRotation(),1.0,Lorentz5Momentum()); } return true; } double QTildeReconstructor:: inverseRescalingFactor(vector<Lorentz5Momentum> pout, vector<Energy> mon, Energy roots) const { double lambda=1.; if(pout.size()==2) { double mu_q1(pout[0].m()/roots), mu_q2(pout[1].m()/roots); double mu_p1(mon[0]/roots) , mu_p2(mon[1]/roots); lambda = ((1.+mu_q1+mu_q2)*(1.-mu_q1-mu_q2)*(mu_q1-1.-mu_q2)*(mu_q2-1.-mu_q1))/ ((1.+mu_p1+mu_p2)*(1.-mu_p1-mu_p2)*(mu_p1-1.-mu_p2)*(mu_p2-1.-mu_p1)); if(lambda<0.) throw Exception() << "Rescaling factor is imaginary in QTildeReconstructor::" << "inverseRescalingFactor lambda^2= " << lambda << Exception::eventerror; lambda = sqrt(lambda); } else { unsigned int ntry=0; // compute magnitudes once for speed vector<Energy2> pmag; for(unsigned int ix=0;ix<pout.size();++ix) { pmag.push_back(pout[ix].vect().mag2()); } // Newton-Raphson for the rescaling vector<Energy> root(pout.size()); do { // compute new energies Energy sum(ZERO); for(unsigned int ix=0;ix<pout.size();++ix) { root[ix] = sqrt(pmag[ix]/sqr(lambda)+sqr(mon[ix])); sum+=root[ix]; } // if accuracy reached exit if(abs(sum/roots-1.)<1e-10) break; // use Newton-Raphson to compute new guess for lambda Energy numer(ZERO),denom(ZERO); for(unsigned int ix=0;ix<pout.size();++ix) { numer +=root[ix]; denom +=pmag[ix]/root[ix]; } numer-=roots; double fact = 1.+sqr(lambda)*numer/denom; if(fact<0.) fact=0.5; lambda *=fact; ++ntry; } while(ntry<100); } - if(isnan(lambda)) + if(std::isnan(lambda)) throw Exception() << "Rescaling factor is nan in QTildeReconstructor::" << "inverseRescalingFactor " << Exception::eventerror; return lambda; } bool QTildeReconstructor:: deconstructGeneralSystem(HardTreePtr tree, ShowerInteraction::Type type) const { // extract incoming and outgoing particles ColourSingletShower in,out; for(set<HardBranchingPtr>::const_iterator it=tree->branchings().begin(); it!=tree->branchings().end();++it) { if((**it).status()==HardBranching::Incoming) in .jets.push_back(*it); else out.jets.push_back(*it); } LorentzRotation toRest,fromRest; bool applyBoost(false); // do the initial-state reconstruction deconstructInitialInitialSystem(applyBoost,toRest,fromRest, tree,in.jets,type); // do the final-state reconstruction deconstructFinalStateSystem(toRest,fromRest,tree, out.jets,type); // only at this point that we can be sure all the reference vectors // are correct for(set<HardBranchingPtr>::const_iterator it=tree->branchings().begin(); it!=tree->branchings().end();++it) { if((**it).status()==HardBranching::Incoming) continue; if((**it).branchingParticle()->coloured()) (**it).setMomenta(LorentzRotation(),1.,Lorentz5Momentum(),false); } for(set<HardBranchingPtr>::const_iterator it=tree->incoming().begin(); it!=tree->incoming().end();++it) { (**it).setMomenta(LorentzRotation(),1.,Lorentz5Momentum(),false); } return true; } bool QTildeReconstructor::deconstructHardJets(HardTreePtr tree, ShowerInteraction::Type type) const { // inverse of old recon method if(_reconopt == 0) { return deconstructGeneralSystem(tree,type); } else if(_reconopt == 1) { return deconstructColourSinglets(tree,type); } else if(_reconopt == 2) { throw Exception() << "Inverse reconstruction is not currently supported for ReconstructionOption Colour2 " << "in QTildeReconstructor::deconstructHardJets(). Please use one of the other options\n" << Exception::runerror; } else if(_reconopt == 3 || _reconopt == 4 ) { return deconstructColourPartner(tree,type); } else assert(false); } bool QTildeReconstructor:: deconstructColourSinglets(HardTreePtr tree, ShowerInteraction::Type type) const { // identify the colour singlet systems unsigned int nnun(0),nnii(0),nnif(0),nnf(0),nni(0); vector<ColourSingletShower> systems(identifySystems(tree->branchings(),nnun,nnii,nnif,nnf,nni)); // now decide what to do LorentzRotation toRest,fromRest; bool applyBoost(false); bool general(false); // initial-initial connection and final-state colour singlet systems // Drell-Yan type if(nnun==0&&nnii==1&&nnif==0&&nnf>0&&nni==0) { // reconstruct initial-initial system for(unsigned int ix=0;ix<systems.size();++ix) { if(systems[ix].type==II) deconstructInitialInitialSystem(applyBoost,toRest,fromRest,tree, systems[ix].jets,type); } if(type!=ShowerInteraction::QCD) { combineFinalState(systems); general=false; } } // DIS and VBF type else if(nnun==0&&nnii==0&&((nnif==1&&nnf>0&&nni==1)|| (nnif==2&& nni==0))) { for(unsigned int ix=0;ix<systems.size();++ix) { if(systems[ix].type==IF) deconstructInitialFinalSystem(tree,systems[ix].jets,type); } } // e+e- type else if(nnun==0&&nnii==0&&nnif==0&&nnf>0&&nni==2) { // only FS needed // but need to boost to rest frame if QED ISR Lorentz5Momentum ptotal; for(unsigned int ix=0;ix<systems.size();++ix) { if(systems[ix].type==I) ptotal += systems[ix].jets[0]->branchingParticle()->momentum(); } toRest = LorentzRotation(ptotal.findBoostToCM()); fromRest = toRest; fromRest.invert(); if(type!=ShowerInteraction::QCD) { combineFinalState(systems); general=false; } } // general type else { general = true; } // final-state systems except for general recon if(!general) { for(unsigned int ix=0;ix<systems.size();++ix) { if(systems[ix].type==F) deconstructFinalStateSystem(toRest,fromRest,tree, systems[ix].jets,type); } // only at this point that we can be sure all the reference vectors // are correct for(set<HardBranchingPtr>::const_iterator it=tree->branchings().begin(); it!=tree->branchings().end();++it) { if((**it).status()==HardBranching::Incoming) continue; if((**it).branchingParticle()->coloured()) (**it).setMomenta(LorentzRotation(),1.,Lorentz5Momentum(),false); } for(set<HardBranchingPtr>::const_iterator it=tree->incoming().begin(); it!=tree->incoming().end();++it) { (**it).setMomenta(LorentzRotation(),1.,Lorentz5Momentum(),false); } return true; } else { return deconstructGeneralSystem(tree,type); } return true; } bool QTildeReconstructor:: deconstructColourPartner(HardTreePtr tree, ShowerInteraction::Type type) const { Lorentz5Momentum ptotal; HardBranchingPtr emitter; ColourSingletShower incomingShower,outgoingShower; for(set<HardBranchingPtr>::const_iterator it=tree->branchings().begin(); it!=tree->branchings().end();++it) { if((**it).status()==HardBranching::Incoming) { incomingShower.jets.push_back(*it); ptotal += (*it)->branchingParticle()->momentum(); // check for emitting particle if((**it).parent() ) { if(!emitter) emitter = *it; else throw Exception() << "Only one emitting particle allowed in " << "QTildeReconstructor::deconstructColourPartner()" << Exception::runerror; } } else if ((**it).status()==HardBranching::Outgoing) { outgoingShower.jets.push_back(*it); // check for emitting particle if(!(**it).children().empty() ) { if(!emitter) emitter = *it; else throw Exception() << "Only one emitting particle allowed in " << "QTildeReconstructor::deconstructColourPartner()" << Exception::runerror; } } } assert(emitter); assert(emitter->colourPartner()); ColourSingletShower system; system.jets.push_back(emitter); system.jets.push_back(emitter->colourPartner()); LorentzRotation toRest,fromRest; bool applyBoost(false); // identify the colour singlet system if(emitter->status() == HardBranching::Outgoing && emitter->colourPartner()->status() == HardBranching::Outgoing ) { system.type=F; // need to boost to rest frame if QED ISR if( !incomingShower.jets[0]->branchingParticle()->coloured() && !incomingShower.jets[1]->branchingParticle()->coloured() ) { Boost boost = ptotal.findBoostToCM(); toRest = LorentzRotation( boost); fromRest = LorentzRotation(-boost); } else findInitialBoost(ptotal,ptotal,toRest,fromRest); deconstructFinalStateSystem(toRest,fromRest,tree, system.jets,type); } else if (emitter->status() == HardBranching::Incoming && emitter->colourPartner()->status() == HardBranching::Incoming) { system.type=II; deconstructInitialInitialSystem(applyBoost,toRest,fromRest,tree,system.jets,type); // make sure the recoil gets applied deconstructFinalStateSystem(toRest,fromRest,tree, outgoingShower.jets,type); } else if ((emitter->status() == HardBranching::Outgoing && emitter->colourPartner()->status() == HardBranching::Incoming ) || (emitter->status() == HardBranching::Incoming && emitter->colourPartner()->status() == HardBranching::Outgoing)) { system.type=IF; // enusre incoming first if(system.jets[0]->status() == HardBranching::Outgoing) swap(system.jets[0],system.jets[1]); deconstructInitialFinalSystem(tree,system.jets,type); } else { throw Exception() << "Unknown type of system in " << "QTildeReconstructor::deconstructColourPartner()" << Exception::runerror; } // only at this point that we can be sure all the reference vectors // are correct for(set<HardBranchingPtr>::const_iterator it=tree->branchings().begin(); it!=tree->branchings().end();++it) { if((**it).status()==HardBranching::Incoming) continue; if((**it).branchingParticle()->coloured()) (**it).setMomenta(LorentzRotation(),1.,Lorentz5Momentum(),false); } for(set<HardBranchingPtr>::const_iterator it=tree->incoming().begin(); it!=tree->incoming().end();++it) { (**it).setMomenta(LorentzRotation(),1.,Lorentz5Momentum(),false); } for(set<HardBranchingPtr>::const_iterator it=tree->branchings().begin(); it!=tree->branchings().end();++it) { if((**it).status()!=HardBranching::Incoming) continue; if(*it==system.jets[0] || *it==system.jets[1]) continue; if((**it).branchingParticle()->momentum().z()>ZERO) { (**it).z((**it).branchingParticle()->momentum().plus()/(**it).beam()->momentum().plus()); } else { (**it).z((**it).branchingParticle()->momentum().minus()/(**it).beam()->momentum().minus()); } } return true; } void QTildeReconstructor:: reconstructInitialFinalSystem(vector<ShowerProgenitorPtr> jets) const { Lorentz5Momentum pin[2],pout[2],pbeam; for(unsigned int ix=0;ix<jets.size();++ix) { // final-state parton if(jets[ix]->progenitor()->isFinalState()) { pout[0] +=jets[ix]->progenitor()->momentum(); _progenitor = jets[ix]->progenitor(); if(jets[ix]->reconstructed()==ShowerProgenitor::notReconstructed) { reconstructTimeLikeJet(jets[ix]->progenitor()); jets[ix]->reconstructed(ShowerProgenitor::done); } } // initial-state parton else { pin[0] +=jets[ix]->progenitor()->momentum(); if(jets[ix]->progenitor()->showerKinematics()) { pbeam = jets[ix]->progenitor()->showerBasis()->getBasis()[0]; } else { if ( jets[ix]->original()->parents().empty() ) { pbeam = jets[ix]->progenitor()->momentum(); } else { pbeam = jets[ix]->original()->parents()[0]->momentum(); } } if(jets[ix]->reconstructed()==ShowerProgenitor::notReconstructed) { reconstructSpaceLikeJet(jets[ix]->progenitor()); jets[ix]->reconstructed(ShowerProgenitor::done); } assert(!jets[ix]->original()->parents().empty()); } } // add intrinsic pt if needed addIntrinsicPt(jets); // momenta after showering for(unsigned int ix=0;ix<jets.size();++ix) { if(jets[ix]->progenitor()->isFinalState()) pout[1] += jets[ix]->progenitor()->momentum(); else pin[1] += jets[ix]->progenitor()->momentum(); } // work out the boost to the Breit frame Lorentz5Momentum pa = pout[0]-pin[0]; Axis axis(pa.vect().unit()); LorentzRotation rot; double sinth(sqrt(sqr(axis.x())+sqr(axis.y()))); if ( sinth > 1.e-9 ) rot.setRotate(-acos(axis.z()),Axis(-axis.y()/sinth,axis.x()/sinth,0.)); rot.rotateX(Constants::pi); rot.boostZ( pa.e()/pa.vect().mag()); Lorentz5Momentum ptemp=rot*pbeam; Boost trans = -1./ptemp.e()*ptemp.vect(); trans.setZ(0.); if ( trans.mag2() - 1. >= 0. ) throw KinematicsReconstructionVeto(); rot.boost(trans); pa *=rot; // project and calculate rescaling // reference vectors Lorentz5Momentum n1(ZERO,ZERO,-pa.z(),-pa.z()); Lorentz5Momentum n2(ZERO,ZERO, pa.z(),-pa.z()); Energy2 n1n2 = n1*n2; // decompose the momenta Lorentz5Momentum qbp=rot*pin[1],qcp=rot*pout[1]; qbp.rescaleMass(); qcp.rescaleMass(); double a[2],b[2]; a[0] = n2*qbp/n1n2; b[0] = n1*qbp/n1n2; Lorentz5Momentum qperp = qbp-a[0]*n1-b[0]*n2; b[1] = 0.5; a[1] = 0.5*(qcp.m2()-qperp.m2())/n1n2/b[1]; double kb; if(a[0]!=0.) { double A(0.5*a[0]),B(b[0]*a[0]-a[1]*b[1]-0.25),C(-0.5*b[0]); if(sqr(B)-4.*A*C<0.) throw KinematicsReconstructionVeto(); kb = 0.5*(-B+sqrt(sqr(B)-4.*A*C))/A; } else { kb = 0.5*b[0]/(b[0]*a[0]-a[1]*b[1]-0.25); } // changed to improve stability if(kb==0.) throw KinematicsReconstructionVeto(); if ( a[1]>b[1] && abs(a[1]) < 1e-12 ) throw KinematicsReconstructionVeto(); if ( a[1]<=b[1] && abs(0.5+b[0]/kb) < 1e-12 ) throw KinematicsReconstructionVeto(); double kc = (a[1]>b[1]) ? (a[0]*kb-0.5)/a[1] : b[1]/(0.5+b[0]/kb); if(kc==0.) throw KinematicsReconstructionVeto(); Lorentz5Momentum pnew[2] = { a[0]*kb*n1+b[0]/kb*n2+qperp, a[1]*kc*n1+b[1]/kc*n2+qperp}; LorentzRotation rotinv=rot.inverse(); for(unsigned int ix=0;ix<jets.size();++ix) { if(jets[ix]->progenitor()->isFinalState()) { deepTransform(jets[ix]->progenitor(),rot); deepTransform(jets[ix]->progenitor(),solveBoost(pnew[1],qcp)); Energy delta = jets[ix]->progenitor()->momentum().m()-jets[ix]->progenitor()->momentum().mass(); if ( abs(delta) > MeV ) throw KinematicsReconstructionVeto(); deepTransform(jets[ix]->progenitor(),rotinv); } else { tPPtr parent; boostChain(jets[ix]->progenitor(),rot,parent); boostChain(jets[ix]->progenitor(),solveBoostZ(pnew[0],qbp),parent); // check the first boost worked, and if not apply small correction to // fix energy/momentum conservation // this is a kludge but it reduces momentum non-conservation dramatically Lorentz5Momentum pdiff = pnew[0]-jets[ix]->progenitor()->momentum(); Energy2 delta = sqr(pdiff.x())+sqr(pdiff.y())+sqr(pdiff.z())+sqr(pdiff.t()); unsigned int ntry=0; while(delta>1e-6*GeV2 && ntry<5 ) { ntry +=1; boostChain(jets[ix]->progenitor(),solveBoostZ(pnew[0],jets[ix]->progenitor()->momentum()),parent); pdiff = pnew[0]-jets[ix]->progenitor()->momentum(); delta = sqr(pdiff.x())+sqr(pdiff.y())+sqr(pdiff.z())+sqr(pdiff.t()); } // apply test in breit-frame Lorentz5Momentum ptest1 = parent->momentum(); Lorentz5Momentum ptest2 = rot*pbeam; if(ptest1.z()/ptest2.z()<0. || ptest1.z()/ptest2.z()>1.) throw KinematicsReconstructionVeto(); boostChain(jets[ix]->progenitor(),rotinv,parent); } } } bool QTildeReconstructor::addIntrinsicPt(vector<ShowerProgenitorPtr> jets) const { bool added=false; // add the intrinsic pt if needed for(unsigned int ix=0;ix<jets.size();++ix) { // only for initial-state particles which haven't radiated if(jets[ix]->progenitor()->isFinalState()|| jets[ix]->hasEmitted()|| jets[ix]->reconstructed()==ShowerProgenitor::dontReconstruct) continue; if(_intrinsic.find(jets[ix])==_intrinsic.end()) continue; pair<Energy,double> pt=_intrinsic[jets[ix]]; Energy etemp = jets[ix]->original()->parents()[0]->momentum().z(); Lorentz5Momentum p_basis(ZERO, ZERO, etemp, abs(etemp)), n_basis(ZERO, ZERO,-etemp, abs(etemp)); double alpha = jets[ix]->progenitor()->x(); double beta = 0.5*(sqr(jets[ix]->progenitor()->data().mass())+ sqr(pt.first))/alpha/(p_basis*n_basis); Lorentz5Momentum pnew=alpha*p_basis+beta*n_basis; pnew.setX(pt.first*cos(pt.second)); pnew.setY(pt.first*sin(pt.second)); pnew.rescaleMass(); jets[ix]->progenitor()->set5Momentum(pnew); added = true; } return added; } LorentzRotation QTildeReconstructor:: solveBoost(const double k, const Lorentz5Momentum & newq, const Lorentz5Momentum & oldp ) const { Energy q = newq.vect().mag(); Energy2 qs = sqr(q); Energy2 Q2 = newq.mass2(); Energy kp = k*(oldp.vect().mag()); Energy2 kps = sqr(kp); double betam = (q*newq.e() - kp*sqrt(kps + Q2))/(kps + qs + Q2); if ( abs(betam) - 1. >= 0. ) throw KinematicsReconstructionVeto(); Boost beta = -betam*(k/kp)*oldp.vect(); double gamma = 0.; if(Q2/sqr(oldp.e())>1e-4) { if(betam<0.5) { gamma = 1./sqrt(1.-sqr(betam)); } else { gamma = ( kps+ qs + Q2)/ sqrt(2.*kps*qs + kps*Q2 + qs*Q2 + sqr(Q2) + 2.*q*newq.e()*kp*sqrt(kps + Q2)); } } else { if(k>0) { gamma = 4.*kps*qs/sqr(kps +qs) + 2.*sqr(kps-qs)*Q2/pow<3,1>(kps +qs) - 0.25*( sqr(kps) + 14.*kps*qs + sqr(qs))*sqr(kps-qs)/(pow<4,1>(kps +qs)*kps*qs)*sqr(Q2); } else { gamma = 0.25*sqr(Q2)/(kps*qs)*(1. - 0.5*(kps+qs)/(kps*qs)*Q2); } if(gamma<=0.) throw KinematicsReconstructionVeto(); gamma = 1./sqrt(gamma); } // note that (k/kp)*oldp.vect() = oldp.vect()/oldp.vect().mag() but cheaper. ThreeVector<Energy2> ax = newq.vect().cross( oldp.vect() ); double delta = newq.vect().angle( oldp.vect() ); LorentzRotation R; using Constants::pi; Energy2 scale1 = sqr(newq.x())+ sqr(newq.y())+sqr(newq.z()); Energy2 scale2 = sqr(oldp.x())+ sqr(oldp.y())+sqr(oldp.z()); if ( ax.mag2()/scale1/scale2 > 1e-28 ) { R.rotate( delta, unitVector(ax) ).boost( beta , gamma ); } else if(abs(delta-pi)/pi < 0.001) { double phi=2.*pi*UseRandom::rnd(); Axis axis(cos(phi),sin(phi),0.); axis.rotateUz(newq.vect().unit()); R.rotate(delta,axis).boost( beta , gamma ); } else { R.boost( beta , gamma ); } return R; } LorentzRotation QTildeReconstructor::solveBoost(const Lorentz5Momentum & q, const Lorentz5Momentum & p ) const { Energy modp = p.vect().mag(); Energy modq = q.vect().mag(); double betam = (p.e()*modp-q.e()*modq)/(sqr(modq)+sqr(modp)+p.mass2()); if ( abs(betam)-1. >= 0. ) throw KinematicsReconstructionVeto(); Boost beta = -betam*q.vect().unit(); ThreeVector<Energy2> ax = p.vect().cross( q.vect() ); double delta = p.vect().angle( q.vect() ); LorentzRotation R; using Constants::pi; if ( beta.mag2() - 1. >= 0. ) throw KinematicsReconstructionVeto(); if ( ax.mag2()/GeV2/MeV2 > 1e-16 ) { R.rotate( delta, unitVector(ax) ).boost( beta ); } else { R.boost( beta ); } return R; } LorentzRotation QTildeReconstructor::solveBoostZ(const Lorentz5Momentum & q, const Lorentz5Momentum & p ) const { static const double eps = 1e-6; LorentzRotation R; double beta; Energy2 mt2 = p.mass()<ZERO ? -sqr(p.mass())+sqr(p.x())+sqr(p.y()) : sqr(p.mass())+sqr(p.x())+sqr(p.y()) ; double ratio = mt2/(sqr(p.t())+sqr(q.t())); if(abs(ratio)>eps) { double erat = (q.t()+q.z())/(p.t()+p.z()); Energy2 den = mt2*(erat+1./erat); Energy2 num = (q.z()-p.z())*(q.t()+p.t()) + (p.z()+q.z())*(p.t()-q.t()); beta = num/den; if ( abs(beta) - 1. >= 0. ) throw KinematicsReconstructionVeto(); R.boostZ(beta); } else { double er = sqr(p.t()/q.t()); double x = ratio+0.125*(er+10.+1./er)*sqr(ratio); beta = -(p.t()-q.t())*(p.t()+q.t())/(sqr(p.t())+sqr(q.t()))*(1.+x); double gamma = (4.*sqr(p.t()*q.t()) +sqr(p.t()-q.t())*sqr(p.t()+q.t())* (-2.*x+sqr(x)))/sqr(sqr(p.t())+sqr(q.t())); if ( abs(beta) - 1. >= 0. ) throw KinematicsReconstructionVeto(); gamma = 1./sqrt(gamma); R.boost(0.,0.,beta,gamma); } Lorentz5Momentum ptest = R*p; if(ptest.z()/q.z() < 0. || ptest.t()/q.t() < 0. ) { throw KinematicsReconstructionVeto(); } return R; } void QTildeReconstructor:: reconstructFinalStateSystem(bool applyBoost, const LorentzRotation & toRest, const LorentzRotation & fromRest, vector<ShowerProgenitorPtr> jets) const { LorentzRotation trans = applyBoost? toRest : LorentzRotation(); // special for case of individual particle if(jets.size()==1) { deepTransform(jets[0]->progenitor(),trans); deepTransform(jets[0]->progenitor(),fromRest); return; } bool radiated(false); // find the hard process centre-of-mass energy Lorentz5Momentum pcm; // check if radiated and calculate total momentum for(unsigned int ix=0;ix<jets.size();++ix) { radiated |=jets[ix]->hasEmitted(); pcm += jets[ix]->progenitor()->momentum(); } if(applyBoost) pcm *= trans; // check if in CMF frame Boost beta_cm = pcm.findBoostToCM(); bool gottaBoost(false); if(beta_cm.mag() > 1e-12) { gottaBoost = true; trans.boost(beta_cm); } // collection of pointers to initial hard particle and jet momenta // for final boosts JetKinVect jetKinematics; vector<ShowerProgenitorPtr>::const_iterator cit; for(cit = jets.begin(); cit != jets.end(); cit++) { JetKinStruct tempJetKin; tempJetKin.parent = (*cit)->progenitor(); if(applyBoost || gottaBoost) { deepTransform(tempJetKin.parent,trans); } tempJetKin.p = (*cit)->progenitor()->momentum(); _progenitor=tempJetKin.parent; if((**cit).reconstructed()==ShowerProgenitor::notReconstructed) { radiated |= reconstructTimeLikeJet((*cit)->progenitor()); (**cit).reconstructed(ShowerProgenitor::done); } else { radiated |= !(*cit)->progenitor()->children().empty(); } tempJetKin.q = (*cit)->progenitor()->momentum(); jetKinematics.push_back(tempJetKin); } // default option rescale everything with the same factor if( _finalStateReconOption == 0 || jetKinematics.size() <= 2 ) { // find the rescaling factor double k = 0.0; if(radiated) { k = solveKfactor(pcm.m(), jetKinematics); // perform the rescaling and boosts for(JetKinVect::iterator it = jetKinematics.begin(); it != jetKinematics.end(); ++it) { LorentzRotation Trafo = solveBoost(k, it->q, it->p); deepTransform(it->parent,Trafo); } } } // different treatment of most off-shell else if ( _finalStateReconOption <= 4 ) { // sort the jets by virtuality std::sort(jetKinematics.begin(),jetKinematics.end(),JetOrdering()); // Bryan's procedures from FORTRAN if( _finalStateReconOption <=2 ) { // loop over the off-shell partons, _finalStateReconOption==1 only first ==2 all JetKinVect::const_iterator jend = _finalStateReconOption==1 ? jetKinematics.begin()+1 : jetKinematics.end(); for(JetKinVect::const_iterator jit=jetKinematics.begin(); jit!=jend;++jit) { // calculate the 4-momentum of the recoiling system Lorentz5Momentum psum; bool done = true; for(JetKinVect::const_iterator it=jetKinematics.begin();it!=jetKinematics.end();++it) { if(it==jit) { done = false; continue; } // first option put on-shell and sum 4-momenta if( _finalStateReconOption == 1 ) { LorentzRotation Trafo = solveBoost(1., it->q, it->p); deepTransform(it->parent,Trafo); psum += it->parent->momentum(); } // second option, sum momenta else { // already rescaled if(done) psum += it->parent->momentum(); // still needs to be rescaled else psum += it->p; } } // set the mass psum.rescaleMass(); // calculate the 3-momentum rescaling factor Energy2 s(pcm.m2()); Energy2 m1sq(jit->q.m2()),m2sq(psum.m2()); Energy4 num = sqr(s - m1sq - m2sq) - 4.*m1sq*m2sq; if(num<ZERO) throw KinematicsReconstructionVeto(); double k = sqrt( num / (4.*s*jit->p.vect().mag2()) ); // boost the off-shell parton LorentzRotation B1 = solveBoost(k, jit->q, jit->p); deepTransform(jit->parent,B1); // boost everything else to rescale LorentzRotation B2 = solveBoost(k, psum, psum); for(JetKinVect::iterator it=jetKinematics.begin();it!=jetKinematics.end();++it) { if(it==jit) continue; deepTransform(it->parent,B2); it->p *= B2; it->q *= B2; } } } // Peter's C++ procedures else { reconstructFinalFinalOffShell(jetKinematics,pcm.m2(), _finalStateReconOption == 4); } } else assert(false); // apply the final boosts if(gottaBoost || applyBoost) { LorentzRotation finalBoosts; if(gottaBoost) finalBoosts.boost(-beta_cm); if(applyBoost) finalBoosts.transform(fromRest); for(JetKinVect::iterator it = jetKinematics.begin(); it != jetKinematics.end(); ++it) { deepTransform(it->parent,finalBoosts); } } } void QTildeReconstructor:: reconstructInitialInitialSystem(bool & applyBoost, LorentzRotation & toRest, LorentzRotation & fromRest, vector<ShowerProgenitorPtr> jets) const { bool radiated = false; Lorentz5Momentum pcm; // check whether particles radiated and calculate total momentum for( unsigned int ix = 0; ix < jets.size(); ++ix ) { radiated |= jets[ix]->hasEmitted(); pcm += jets[ix]->progenitor()->momentum(); if(jets[ix]->original()->parents().empty()) return; } pcm.rescaleMass(); // check if intrinsic pt to be added radiated |= !_intrinsic.empty(); // if no radiation return if(!radiated) { for(unsigned int ix=0;ix<jets.size();++ix) { if(jets[ix]->reconstructed()==ShowerProgenitor::notReconstructed) jets[ix]->reconstructed(ShowerProgenitor::done); } return; } // initial state shuffling applyBoost=false; vector<Lorentz5Momentum> p, pq, p_in; vector<Energy> pts; for(unsigned int ix=0;ix<jets.size();++ix) { // add momentum to vector p_in.push_back(jets[ix]->progenitor()->momentum()); // reconstruct the jet if(jets[ix]->reconstructed()==ShowerProgenitor::notReconstructed) { radiated |= reconstructSpaceLikeJet(jets[ix]->progenitor()); jets[ix]->reconstructed(ShowerProgenitor::done); } assert(!jets[ix]->original()->parents().empty()); Energy etemp = jets[ix]->original()->parents()[0]->momentum().z(); Lorentz5Momentum ptemp = Lorentz5Momentum(ZERO, ZERO, etemp, abs(etemp)); pq.push_back(ptemp); pts.push_back(jets[ix]->highestpT()); } // add the intrinsic pt if needed radiated |=addIntrinsicPt(jets); for(unsigned int ix=0;ix<jets.size();++ix) { p.push_back(jets[ix]->progenitor()->momentum()); } double x1 = p_in[0].z()/pq[0].z(); double x2 = p_in[1].z()/pq[1].z(); vector<double> beta=initialStateRescaling(x1,x2,p_in[0]+p_in[1],p,pq,pts); // if not need don't apply boosts if(!(radiated && p.size() == 2 && pq.size() == 2)) return; applyBoost=true; // apply the boosts Lorentz5Momentum newcmf; for(unsigned int ix=0;ix<jets.size();++ix) { tPPtr toBoost = jets[ix]->progenitor(); Boost betaboost(0, 0, beta[ix]); tPPtr parent; boostChain(toBoost, LorentzRotation(0.,0.,beta[ix]),parent); if(parent->momentum().e()/pq[ix].e()>1.|| parent->momentum().z()/pq[ix].z()>1.) throw KinematicsReconstructionVeto(); newcmf+=toBoost->momentum(); } if(newcmf.m()<ZERO||newcmf.e()<ZERO) throw KinematicsReconstructionVeto(); findInitialBoost(pcm,newcmf,toRest,fromRest); } void QTildeReconstructor:: deconstructInitialInitialSystem(bool & applyBoost, LorentzRotation & toRest, LorentzRotation & fromRest, HardTreePtr tree, vector<HardBranchingPtr> jets, ShowerInteraction::Type) const { assert(jets.size()==2); // put beam with +z first if(jets[0]->beam()->momentum().z()<ZERO) swap(jets[0],jets[1]); // get the momenta of the particles vector<Lorentz5Momentum> pin,pq; for(unsigned int ix=0;ix<jets.size();++ix) { pin.push_back(jets[ix]->branchingParticle()->momentum()); Energy etemp = jets[ix]->beam()->momentum().z(); pq.push_back(Lorentz5Momentum(ZERO, ZERO,etemp, abs(etemp))); } // calculate the rescaling double x[2]; Lorentz5Momentum pcm=pin[0]+pin[1]; assert(pcm.mass2()>ZERO); pcm.rescaleMass(); vector<double> boost = inverseInitialStateRescaling(x[0],x[1],pcm,pin,pq); set<HardBranchingPtr>::const_iterator cjt=tree->incoming().begin(); HardBranchingPtr incoming[2]; incoming[0] = *cjt; ++cjt; incoming[1] = *cjt; if((*tree->incoming().begin())->beam()->momentum().z()/pq[0].z()<0.) swap(incoming[0],incoming[1]); // apply the boost the the particles unsigned int iswap[2]={1,0}; for(unsigned int ix=0;ix<2;++ix) { LorentzRotation R(0.,0.,-boost[ix]); incoming[ix]->pVector(pq[ix]); incoming[ix]->nVector(pq[iswap[ix]]); incoming[ix]->setMomenta(R,1.,Lorentz5Momentum()); jets[ix]->showerMomentum(x[ix]*jets[ix]->pVector()); } // and calculate the boosts applyBoost=true; // do one boost if(_initialBoost==0) { toRest = LorentzRotation(-pcm.boostVector()); } else if(_initialBoost==1) { // first the transverse boost Energy pT = sqrt(sqr(pcm.x())+sqr(pcm.y())); double beta = -pT/pcm.t(); toRest=LorentzRotation(Boost(beta*pcm.x()/pT,beta*pcm.y()/pT,0.)); // the longitudinal beta = pcm.z()/sqrt(pcm.m2()+sqr(pcm.z())); toRest.boost(Boost(0.,0.,-beta)); } else assert(false); fromRest = LorentzRotation((jets[0]->showerMomentum()+ jets[1]->showerMomentum()).boostVector()); } void QTildeReconstructor:: deconstructFinalStateSystem(const LorentzRotation & toRest, const LorentzRotation & fromRest, HardTreePtr tree, vector<HardBranchingPtr> jets, ShowerInteraction::Type type) const { LorentzRotation trans = toRest; if(jets.size()==1) { Lorentz5Momentum pnew = toRest*(jets[0]->branchingParticle()->momentum()); pnew *= fromRest; jets[0]-> original(pnew); jets[0]->showerMomentum(pnew); // find the colour partners ShowerParticleVector particles; vector<Lorentz5Momentum> ptemp; set<HardBranchingPtr>::const_iterator cjt; for(cjt=tree->branchings().begin();cjt!=tree->branchings().end();++cjt) { ptemp.push_back((**cjt).branchingParticle()->momentum()); (**cjt).branchingParticle()->set5Momentum((**cjt).showerMomentum()); particles.push_back((**cjt).branchingParticle()); } dynamic_ptr_cast<tcQTildeShowerHandlerPtr>(ShowerHandler::currentHandler())->showerModel()->partnerFinder() ->setInitialEvolutionScales(particles,false,type,false); // calculate the reference vectors unsigned int iloc(0); set<HardBranchingPtr>::iterator clt; for(cjt=tree->branchings().begin();cjt!=tree->branchings().end();++cjt) { // reset the momentum (**cjt).branchingParticle()->set5Momentum(ptemp[iloc]); ++iloc; // sort out the partners tShowerParticlePtr partner = (*cjt)->branchingParticle()->partner(); if(!partner) continue; for(clt=tree->branchings().begin();clt!=tree->branchings().end();++clt) { if((**clt).branchingParticle()==partner) { (**cjt).colourPartner(*clt); break; } } tHardBranchingPtr branch; for(clt=tree->branchings().begin();clt!=tree->branchings().end();++clt) { if(clt==cjt) continue; if((*clt)->branchingParticle()==partner) { branch=*clt; break; } } } return; } vector<HardBranchingPtr>::iterator cit; vector<Lorentz5Momentum> pout; vector<Energy> mon; Lorentz5Momentum pin; for(cit=jets.begin();cit!=jets.end();++cit) { pout.push_back((*cit)->branchingParticle()->momentum()); mon.push_back(findMass(*cit)); pin+=pout.back(); } // boost all the momenta to the rest frame of the decaying particle pin.rescaleMass(); pin *=trans; Boost beta_cm = pin.findBoostToCM(); bool gottaBoost(false); if(beta_cm.mag() > 1e-12) { gottaBoost = true; trans.boost(beta_cm); pin.boost(beta_cm); } for(unsigned int ix=0;ix<pout.size();++ix) { pout[ix].transform(trans); } // rescaling factor double lambda=inverseRescalingFactor(pout,mon,pin.mass()); if (lambda< 1.e-10) throw KinematicsReconstructionVeto(); // now calculate the p reference vectors for(unsigned int ix=0;ix<jets.size();++ix) { Lorentz5Momentum pvect = jets[ix]->branchingParticle()->momentum(); pvect.transform(trans); pvect /= lambda; pvect.setMass(mon[ix]); pvect.rescaleEnergy(); if(gottaBoost) pvect.boost(-beta_cm); pvect.transform(fromRest); jets[ix]->pVector(pvect); jets[ix]->showerMomentum(pvect); } // find the colour partners ShowerParticleVector particles; vector<Lorentz5Momentum> ptemp; set<HardBranchingPtr>::const_iterator cjt; for(cjt=tree->branchings().begin();cjt!=tree->branchings().end();++cjt) { ptemp.push_back((**cjt).branchingParticle()->momentum()); (**cjt).branchingParticle()->set5Momentum((**cjt).showerMomentum()); particles.push_back((**cjt).branchingParticle()); } dynamic_ptr_cast<tcQTildeShowerHandlerPtr>(ShowerHandler::currentHandler())->showerModel()->partnerFinder() ->setInitialEvolutionScales(particles,false,type,false); // calculate the reference vectors unsigned int iloc(0); set<HardBranchingPtr>::iterator clt; for(cjt=tree->branchings().begin();cjt!=tree->branchings().end();++cjt) { // reset the momentum (**cjt).branchingParticle()->set5Momentum(ptemp[iloc]); ++iloc; } for(cjt=tree->branchings().begin();cjt!=tree->branchings().end();++cjt) { // sort out the partners tShowerParticlePtr partner = (*cjt)->branchingParticle()->partner(); if(!partner) continue; for(clt=tree->branchings().begin();clt!=tree->branchings().end();++clt) { if((**clt).branchingParticle()==partner) { (**cjt).colourPartner(*clt); break; } } tHardBranchingPtr branch; for(clt=tree->branchings().begin();clt!=tree->branchings().end();++clt) { if(clt==cjt) continue; if((*clt)->branchingParticle()==partner) { branch=*clt; break; } } // compute the reference vectors // both incoming, should all ready be done if((**cjt).status()==HardBranching::Incoming && (**clt).status()==HardBranching::Incoming) { continue; } // both outgoing else if((**cjt).status()!=HardBranching::Incoming&& branch->status()==HardBranching::Outgoing) { Boost boost=((*cjt)->pVector()+branch->pVector()).findBoostToCM(); Lorentz5Momentum pcm = branch->pVector(); pcm.boost(boost); Lorentz5Momentum nvect = Lorentz5Momentum(ZERO,pcm.vect()); nvect.boost( -boost); (**cjt).nVector(nvect); } else if((**cjt).status()==HardBranching::Incoming) { Lorentz5Momentum pa = -(**cjt).showerMomentum()+branch->showerMomentum(); Lorentz5Momentum pb = (**cjt).showerMomentum(); Axis axis(pa.vect().unit()); LorentzRotation rot; double sinth(sqrt(sqr(axis.x())+sqr(axis.y()))); rot.setRotate(-acos(axis.z()),Axis(-axis.y()/sinth,axis.x()/sinth,0.)); rot.rotateX(Constants::pi); rot.boostZ( pa.e()/pa.vect().mag()); pb*=rot; Boost trans = -1./pb.e()*pb.vect(); trans.setZ(0.); rot.boost(trans); Energy scale=(**cjt).beam()->momentum().e(); Lorentz5Momentum pbasis(ZERO,(**cjt).beam()->momentum().vect().unit()*scale); Lorentz5Momentum pcm = rot*pbasis; rot.invert(); (**cjt).nVector(rot*Lorentz5Momentum(ZERO,-pcm.vect())); tHardBranchingPtr branch2 = *cjt;; while (branch2->parent()) { branch2=branch2->parent(); branch2->nVector(rot*Lorentz5Momentum(ZERO,-pcm.vect())); } } else if(branch->status()==HardBranching::Incoming) { (**cjt).nVector(Lorentz5Momentum(ZERO,branch->showerMomentum().vect())); } } // now compute the new momenta for(cjt=tree->branchings().begin();cjt!=tree->branchings().end();++cjt) { if(!(*cjt)->branchingParticle()->isFinalState()) continue; Lorentz5Momentum qnew; if((*cjt)->branchingParticle()->partner()) { Energy2 dot=(*cjt)->pVector()*(*cjt)->nVector(); double beta = 0.5*((*cjt)->branchingParticle()->momentum().m2() -sqr((*cjt)->pVector().mass()))/dot; qnew=(*cjt)->pVector()+beta*(*cjt)->nVector(); qnew.rescaleMass(); } else { qnew = (*cjt)->pVector(); } // qnew is the unshuffled momentum in the rest frame of the p basis vectors, // for the simple case Z->q qbar g this was checked against analytic formulae. // compute the boost LorentzRotation R=solveBoost(qnew, toRest*(*cjt)->branchingParticle()->momentum())*toRest; (*cjt)->setMomenta(R,1.0,Lorentz5Momentum()); } } Energy QTildeReconstructor::momConsEq(double k, const Energy & root_s, const JetKinVect & jets) const { static const Energy2 eps=1e-8*GeV2; Energy dum = ZERO; for(JetKinVect::const_iterator it = jets.begin(); it != jets.end(); ++it) { Energy2 dum2 = (it->q).m2() + sqr(k)*(it->p).vect().mag2(); if(dum2 < ZERO) { if(dum2 < -eps) throw KinematicsReconstructionVeto(); dum2 = ZERO; } dum += sqrt(dum2); } return dum - root_s; } void QTildeReconstructor::boostChain(tPPtr p, const LorentzRotation &bv, tPPtr & parent) const { if(!p->parents().empty()) boostChain(p->parents()[0], bv,parent); else parent=p; p->transform(bv); if(p->children().size()==2) { if(dynamic_ptr_cast<ShowerParticlePtr>(p->children()[1])) deepTransform(p->children()[1],bv); } } namespace { bool sortJets(ShowerProgenitorPtr j1, ShowerProgenitorPtr j2) { return j1->highestpT()>j2->highestpT(); } } void QTildeReconstructor:: reconstructGeneralSystem(vector<ShowerProgenitorPtr> & ShowerHardJets) const { // find initial- and final-state systems ColourSingletSystem in,out; for(unsigned int ix=0;ix<ShowerHardJets.size();++ix) { if(ShowerHardJets[ix]->progenitor()->isFinalState()) out.jets.push_back(ShowerHardJets[ix]); else in.jets.push_back(ShowerHardJets[ix]); } // reconstruct initial-initial system LorentzRotation toRest,fromRest; bool applyBoost(false); // reconstruct initial-initial system reconstructInitialInitialSystem(applyBoost,toRest,fromRest,in.jets); // reconstruct the final-state systems reconstructFinalStateSystem(applyBoost,toRest,fromRest,out.jets); } void QTildeReconstructor:: reconstructFinalFirst(vector<ShowerProgenitorPtr> & ShowerHardJets) const { static const Energy2 minQ2 = 1e-4*GeV2; map<ShowerProgenitorPtr,bool> used; for(unsigned int ix=0;ix<ShowerHardJets.size();++ix) { used[ShowerHardJets[ix]] = false; } // first to the final-state reconstruction of any systems which need it set<ShowerProgenitorPtr> outgoing; // first find any particles with final state partners for(unsigned int ix=0;ix<ShowerHardJets.size();++ix) { if(ShowerHardJets[ix]->progenitor()->isFinalState()&& ShowerHardJets[ix]->progenitor()->partner()&& ShowerHardJets[ix]->progenitor()->partner()->isFinalState()) outgoing.insert(ShowerHardJets[ix]); } // then find the colour partners if(!outgoing.empty()) { set<ShowerProgenitorPtr> partners; for(set<ShowerProgenitorPtr>::const_iterator it=outgoing.begin();it!=outgoing.end();++it) { for(unsigned int ix=0;ix<ShowerHardJets.size();++ix) { if((**it).progenitor()->partner()==ShowerHardJets[ix]->progenitor()) { partners.insert(ShowerHardJets[ix]); break; } } } outgoing.insert(partners.begin(),partners.end()); } // do the final-state reconstruction if needed if(!outgoing.empty()) { assert(outgoing.size()!=1); LorentzRotation toRest,fromRest; vector<ShowerProgenitorPtr> outgoingJets(outgoing.begin(),outgoing.end()); reconstructFinalStateSystem(false,toRest,fromRest,outgoingJets); } // Now do any initial-final systems which are needed vector<ColourSingletSystem> IFSystems; // find the systems N.B. can have duplicates // find initial-state with FS partners or FS with IS partners for(unsigned int ix=0;ix<ShowerHardJets.size();++ix) { if(!ShowerHardJets[ix]->progenitor()->isFinalState()&& ShowerHardJets[ix]->progenitor()->partner()&& ShowerHardJets[ix]->progenitor()->partner()->isFinalState()) { IFSystems.push_back(ColourSingletSystem(IF,ShowerHardJets[ix])); } else if(ShowerHardJets[ix]->progenitor()->isFinalState()&& ShowerHardJets[ix]->progenitor()->partner()&& !ShowerHardJets[ix]->progenitor()->partner()->isFinalState()) { IFSystems.push_back(ColourSingletSystem(IF,ShowerHardJets[ix])); } } // then add the partners for(unsigned int is=0;is<IFSystems.size();++is) { for(unsigned int ix=0;ix<ShowerHardJets.size();++ix) { if(IFSystems[is].jets[0]->progenitor()->partner()==ShowerHardJets[ix]->progenitor()) { IFSystems[is].jets.push_back(ShowerHardJets[ix]); } } // ensure incoming first if(IFSystems[is].jets[0]->progenitor()->isFinalState()) swap(IFSystems[is].jets[0],IFSystems[is].jets[1]); } if(!IFSystems.empty()) { unsigned int istart = UseRandom::irnd(IFSystems.size()); unsigned int istop=IFSystems.size(); for(unsigned int is=istart;is<=istop;++is) { if(is==IFSystems.size()) { if(istart!=0) { istop = istart-1; is=0; } else break; } // skip duplicates if(used[IFSystems[is].jets[0]] && used[IFSystems[is].jets[1]] ) continue; if(IFSystems[is].jets[0]->original()&&IFSystems[is].jets[0]->original()->parents().empty()) continue; Lorentz5Momentum psum; for(unsigned int ix=0;ix<IFSystems[is].jets.size();++ix) { if(IFSystems[is].jets[ix]->progenitor()->isFinalState()) psum += IFSystems[is].jets[ix]->progenitor()->momentum(); else psum -= IFSystems[is].jets[ix]->progenitor()->momentum(); } if(-psum.m2()>minQ2) { reconstructInitialFinalSystem(IFSystems[is].jets); for(unsigned int ix=0;ix<IFSystems[is].jets.size();++ix) { used[IFSystems[is].jets[ix]] = true; } } } } // now we finally need to handle the initial state system ColourSingletSystem in,out; for(unsigned int ix=0;ix<ShowerHardJets.size();++ix) { if(ShowerHardJets[ix]->progenitor()->isFinalState()) out.jets.push_back(ShowerHardJets[ix]); else in.jets.push_back(ShowerHardJets[ix]); } // reconstruct initial-initial system bool doRecon = false; for(unsigned int ix=0;ix<in.jets.size();++ix) { if(!used[in.jets[ix]]) { doRecon = true; break; } } LorentzRotation toRest,fromRest; bool applyBoost(false); if(doRecon) { reconstructInitialInitialSystem(applyBoost,toRest,fromRest,in.jets); } // reconstruct the final-state systems if(!doRecon) { for(unsigned int ix=0;ix<out.jets.size();++ix) { if(!used[out.jets[ix]]) { doRecon = true; break; } } } if(doRecon) { reconstructFinalStateSystem(applyBoost,toRest,fromRest,out.jets); } } void QTildeReconstructor:: reconstructColourPartner(vector<ShowerProgenitorPtr> & ShowerHardJets) const { static const Energy2 minQ2 = 1e-4*GeV2; // sort the vector by hardness of emission std::sort(ShowerHardJets.begin(),ShowerHardJets.end(),sortJets); // map between particles and progenitors for easy lookup map<ShowerParticlePtr,ShowerProgenitorPtr> progenitorMap; for(unsigned int ix=0;ix<ShowerHardJets.size();++ix) { progenitorMap[ShowerHardJets[ix]->progenitor()] = ShowerHardJets[ix]; } // check that the IF systems can be reconstructed bool canReconstruct = true; for(unsigned int ix=0;ix<ShowerHardJets.size();++ix) { tShowerParticlePtr progenitor = ShowerHardJets[ix]->progenitor(); tShowerParticlePtr partner = progenitor->partner(); if(!partner) continue; else if((progenitor->isFinalState() && !partner->isFinalState()) || (!progenitor->isFinalState() && partner->isFinalState()) ) { vector<ShowerProgenitorPtr> jets(2); jets[0] = ShowerHardJets[ix]; jets[1] = progenitorMap[partner]; Lorentz5Momentum psum; for(unsigned int iy=0;iy<jets.size();++iy) { if(jets[iy]->progenitor()->isFinalState()) psum += jets[iy]->progenitor()->momentum(); else psum -= jets[iy]->progenitor()->momentum(); } if(-psum.m2()<minQ2) { canReconstruct = false; break; } } } if(!canReconstruct) { reconstructGeneralSystem(ShowerHardJets); return; } map<ShowerProgenitorPtr,bool> used; for(unsigned int ix=0;ix<ShowerHardJets.size();++ix) { used[ShowerHardJets[ix]] = false; } for(unsigned int ix=0;ix<ShowerHardJets.size();++ix) { // skip jets which have already been handled if(ShowerHardJets[ix]->reconstructed()==ShowerProgenitor::done) continue; // already reconstructed if(used[ShowerHardJets[ix]]) continue; // no partner continue tShowerParticlePtr progenitor = ShowerHardJets[ix]->progenitor(); tShowerParticlePtr partner = progenitor->partner(); if(!partner) { // check if there's a daughter tree which also needs boosting Lorentz5Momentum porig = progenitor->momentum(); map<tShowerTreePtr,pair<tShowerProgenitorPtr,tShowerParticlePtr> >::const_iterator tit; for(tit = _currentTree->treelinks().begin(); tit != _currentTree->treelinks().end();++tit) { // if there is, boost it if(tit->second.first && tit->second.second==progenitor) { Lorentz5Momentum pnew = tit->first->incomingLines().begin() ->first->progenitor()->momentum(); pnew *= tit->first->transform(); Lorentz5Momentum pdiff = porig-pnew; Energy2 test = sqr(pdiff.x()) + sqr(pdiff.y()) + sqr(pdiff.z()) + sqr(pdiff.t()); LorentzRotation rot; if(test>1e-6*GeV2) rot = solveBoost(porig,pnew); tit->first->transform(rot,false); _treeBoosts[tit->first].push_back(rot); } } ShowerHardJets[ix]->reconstructed(ShowerProgenitor::done); continue; } // do the reconstruction // final-final if(progenitor->isFinalState() && partner->isFinalState() ) { LorentzRotation toRest,fromRest; vector<ShowerProgenitorPtr> jets(2); jets[0] = ShowerHardJets[ix]; jets[1] = progenitorMap[partner]; if(_reconopt==4 && jets[1]->reconstructed()==ShowerProgenitor::notReconstructed) jets[1]->reconstructed(ShowerProgenitor::dontReconstruct); reconstructFinalStateSystem(false,toRest,fromRest,jets); if(_reconopt==4 && jets[1]->reconstructed()==ShowerProgenitor::dontReconstruct) jets[1]->reconstructed(ShowerProgenitor::notReconstructed); used[jets[0]] = true; if(_reconopt==3) used[jets[1]] = true; } // initial-final else if((progenitor->isFinalState() && !partner->isFinalState()) || (!progenitor->isFinalState() && partner->isFinalState()) ) { vector<ShowerProgenitorPtr> jets(2); jets[0] = ShowerHardJets[ix]; jets[1] = progenitorMap[partner]; if(jets[0]->progenitor()->isFinalState()) swap(jets[0],jets[1]); if(jets[0]->original()&&jets[0]->original()->parents().empty()) continue; Lorentz5Momentum psum; for(unsigned int iy=0;iy<jets.size();++iy) { if(jets[iy]->progenitor()->isFinalState()) psum += jets[iy]->progenitor()->momentum(); else psum -= jets[iy]->progenitor()->momentum(); } if(_reconopt==4 && progenitorMap[partner]->reconstructed()==ShowerProgenitor::notReconstructed) progenitorMap[partner]->reconstructed(ShowerProgenitor::dontReconstruct); reconstructInitialFinalSystem(jets); if(_reconopt==4 && progenitorMap[partner]->reconstructed()==ShowerProgenitor::dontReconstruct) progenitorMap[partner]->reconstructed(ShowerProgenitor::notReconstructed); used[ShowerHardJets[ix]] = true; if(_reconopt==3) used[progenitorMap[partner]] = true; } // initial-initial else if(!progenitor->isFinalState() && !partner->isFinalState() ) { ColourSingletSystem in,out; in.jets.push_back(ShowerHardJets[ix]); in.jets.push_back(progenitorMap[partner]); for(unsigned int iy=0;iy<ShowerHardJets.size();++iy) { if(ShowerHardJets[iy]->progenitor()->isFinalState()) out.jets.push_back(ShowerHardJets[iy]); } LorentzRotation toRest,fromRest; bool applyBoost(false); if(_reconopt==4 && in.jets[1]->reconstructed()==ShowerProgenitor::notReconstructed) in.jets[1]->reconstructed(ShowerProgenitor::dontReconstruct); reconstructInitialInitialSystem(applyBoost,toRest,fromRest,in.jets); if(_reconopt==4 && in.jets[1]->reconstructed()==ShowerProgenitor::dontReconstruct) in.jets[1]->reconstructed(ShowerProgenitor::notReconstructed); used[in.jets[0]] = true; if(_reconopt==3) used[in.jets[1]] = true; for(unsigned int iy=0;iy<out.jets.size();++iy) { if(out.jets[iy]->reconstructed()==ShowerProgenitor::notReconstructed) out.jets[iy]->reconstructed(ShowerProgenitor::dontReconstruct); } // reconstruct the final-state systems LorentzRotation finalBoosts; finalBoosts.transform( toRest); finalBoosts.transform(fromRest); for(unsigned int iy=0;iy<out.jets.size();++iy) { deepTransform(out.jets[iy]->progenitor(),finalBoosts); } for(unsigned int iy=0;iy<out.jets.size();++iy) { if(out.jets[iy]->reconstructed()==ShowerProgenitor::dontReconstruct) out.jets[iy]->reconstructed(ShowerProgenitor::notReconstructed); } } } } bool QTildeReconstructor:: inverseDecayRescalingFactor(vector<Lorentz5Momentum> pout, vector<Energy> mon,Energy roots, Lorentz5Momentum ppartner, Energy mbar, double & k1, double & k2) const { ThreeVector<Energy> qtotal; vector<Energy2> pmag; for(unsigned int ix=0;ix<pout.size();++ix) { pmag.push_back(pout[ix].vect().mag2()); qtotal+=pout[ix].vect(); } Energy2 dot1 = qtotal*ppartner.vect(); Energy2 qmag2=qtotal.mag2(); double a = -dot1/qmag2; static const Energy eps=1e-10*GeV; unsigned int itry(0); Energy numer(ZERO),denom(ZERO); k1=1.; do { ++itry; numer=denom=0.*GeV; double k12=sqr(k1); for(unsigned int ix=0;ix<pout.size();++ix) { Energy en = sqrt(pmag[ix]/k12+sqr(mon[ix])); numer += en; denom += pmag[ix]/en; } Energy en = sqrt(qmag2/k12+sqr(mbar)); numer += en-roots; denom += qmag2/en; k1 += numer/denom*k12*k1; if(abs(k1)>1e10) return false; } while (abs(numer)>eps&&itry<100); k1 = abs(k1); k2 = a*k1; return itry<100; } void QTildeReconstructor:: deconstructInitialFinalSystem(HardTreePtr tree,vector<HardBranchingPtr> jets, ShowerInteraction::Type type) const { HardBranchingPtr incoming; Lorentz5Momentum pin[2],pout[2],pbeam; HardBranchingPtr initial; Energy mc(ZERO); for(unsigned int ix=0;ix<jets.size();++ix) { // final-state parton if(jets[ix]->status()==HardBranching::Outgoing) { pout[0] += jets[ix]->branchingParticle()->momentum(); mc = jets[ix]->branchingParticle()->thePEGBase() ? jets[ix]->branchingParticle()->thePEGBase()->mass() : jets[ix]->branchingParticle()->dataPtr()->mass(); } // initial-state parton else { pin[0] += jets[ix]->branchingParticle()->momentum(); initial = jets[ix]; pbeam = jets[ix]->beam()->momentum(); Energy scale=pbeam.t(); pbeam = Lorentz5Momentum(ZERO,pbeam.vect().unit()*scale); incoming = jets[ix]; while(incoming->parent()) incoming = incoming->parent(); } } if(jets.size()>2) { pout[0].rescaleMass(); mc = pout[0].mass(); } // work out the boost to the Breit frame Lorentz5Momentum pa = pout[0]-pin[0]; Axis axis(pa.vect().unit()); LorentzRotation rot; double sinth(sqrt(sqr(axis.x())+sqr(axis.y()))); if(axis.perp2()>0.) { rot.setRotate(-acos(axis.z()),Axis(-axis.y()/sinth,axis.x()/sinth,0.)); rot.rotateX(Constants::pi); rot.boostZ( pa.e()/pa.vect().mag()); } // transverse part Lorentz5Momentum paxis=rot*pbeam; Boost trans = -1./paxis.e()*paxis.vect(); trans.setZ(0.); rot.boost(trans); pa *= rot; // reference vectors Lorentz5Momentum n1(ZERO,ZERO,-pa.z(),-pa.z()); Lorentz5Momentum n2(ZERO,ZERO, pa.z(),-pa.z()); Energy2 n1n2 = n1*n2; // decompose the momenta Lorentz5Momentum qbp=rot*pin[0],qcp= rot*pout[0]; double a[2],b[2]; a[0] = n2*qbp/n1n2; b[0] = n1*qbp/n1n2; a[1] = n2*qcp/n1n2; b[1] = n1*qcp/n1n2; Lorentz5Momentum qperp = qbp-a[0]*n1-b[0]*n2; // before reshuffling Energy Q = abs(pa.z()); double c = sqr(mc/Q); Lorentz5Momentum pb(ZERO,ZERO,0.5*Q*(1.+c),0.5*Q*(1.+c)); Lorentz5Momentum pc(ZERO,ZERO,0.5*Q*(c-1.),0.5*Q*(1.+c)); double anew[2],bnew[2]; anew[0] = pb*n2/n1n2; bnew[0] = 0.5*(qbp.m2()-qperp.m2())/n1n2/anew[0]; bnew[1] = pc*n1/n1n2; anew[1] = 0.5*qcp.m2()/bnew[1]/n1n2; Lorentz5Momentum qnewb = (anew[0]*n1+bnew[0]*n2+qperp); Lorentz5Momentum qnewc = (anew[1]*n1+bnew[1]*n2); // initial-state boost LorentzRotation rotinv=rot.inverse(); LorentzRotation transb=rotinv*solveBoostZ(qnewb,qbp)*rot; // final-state boost LorentzRotation transc=rotinv*solveBoost(qnewc,qcp)*rot; // this will need changing for more than one outgoing particle // set the pvectors for(unsigned int ix=0;ix<jets.size();++ix) { if(jets[ix]->status()==HardBranching::Incoming) { jets[ix]->pVector(pbeam); jets[ix]->showerMomentum(rotinv*pb); incoming->pVector(jets[ix]->pVector()); } else { jets[ix]->pVector(rotinv*pc); jets[ix]->showerMomentum(jets[ix]->pVector()); } } // find the colour partners ShowerParticleVector particles; vector<Lorentz5Momentum> ptemp; set<HardBranchingPtr>::const_iterator cjt; for(cjt=tree->branchings().begin();cjt!=tree->branchings().end();++cjt) { ptemp.push_back((**cjt).branchingParticle()->momentum()); (**cjt).branchingParticle()->set5Momentum((**cjt).showerMomentum()); particles.push_back((**cjt).branchingParticle()); } dynamic_ptr_cast<tcQTildeShowerHandlerPtr>(ShowerHandler::currentHandler())->showerModel()->partnerFinder() ->setInitialEvolutionScales(particles,false,type,false); unsigned int iloc(0); for(cjt=tree->branchings().begin();cjt!=tree->branchings().end();++cjt) { // reset the momentum (**cjt).branchingParticle()->set5Momentum(ptemp[iloc]); ++iloc; } for(vector<HardBranchingPtr>::const_iterator cjt=jets.begin(); cjt!=jets.end();++cjt) { // sort out the partners tShowerParticlePtr partner = (*cjt)->branchingParticle()->partner(); if(!partner) continue; tHardBranchingPtr branch; for(set<HardBranchingPtr>::const_iterator clt=tree->branchings().begin();clt!=tree->branchings().end();++clt) { if((**clt).branchingParticle()==partner) { (**cjt).colourPartner(*clt); branch=*clt; break; } } // compute the reference vectors // both incoming, should all ready be done if((**cjt).status()==HardBranching::Incoming && branch->status()==HardBranching::Incoming) { Energy etemp = (*cjt)->beam()->momentum().z(); Lorentz5Momentum nvect(ZERO, ZERO,-etemp, abs(etemp)); tHardBranchingPtr branch2 = *cjt; (**cjt).nVector(nvect); while (branch2->parent()) { branch2=branch2->parent(); branch2->nVector(nvect); } } // both outgoing else if((**cjt).status()==HardBranching::Outgoing&& branch->status()==HardBranching::Outgoing) { Boost boost=((*cjt)->pVector()+branch->pVector()).findBoostToCM(); Lorentz5Momentum pcm = branch->pVector(); pcm.boost(boost); Lorentz5Momentum nvect = Lorentz5Momentum(ZERO,pcm.vect()); nvect.boost( -boost); (**cjt).nVector(nvect); } else if((**cjt).status()==HardBranching::Incoming) { Lorentz5Momentum pa = -(**cjt).showerMomentum()+branch->showerMomentum(); Lorentz5Momentum pb = (**cjt).showerMomentum(); Axis axis(pa.vect().unit()); LorentzRotation rot; double sinth(sqrt(sqr(axis.x())+sqr(axis.y()))); if(axis.perp2()>1e-20) { rot.setRotate(-acos(axis.z()),Axis(-axis.y()/sinth,axis.x()/sinth,0.)); rot.rotateX(Constants::pi); } if(abs(1.-pa.e()/pa.vect().mag())>1e-6) rot.boostZ( pa.e()/pa.vect().mag()); pb*=rot; Boost trans = -1./pb.e()*pb.vect(); trans.setZ(0.); rot.boost(trans); Energy scale=(**cjt).beam()->momentum().t(); Lorentz5Momentum pbasis(ZERO,(**cjt).beam()->momentum().vect().unit()*scale); Lorentz5Momentum pcm = rot*pbasis; rot.invert(); Lorentz5Momentum nvect = rot*Lorentz5Momentum(ZERO,-pcm.vect()); (**cjt).nVector(nvect); tHardBranchingPtr branch2 = *cjt; while (branch2->parent()) { branch2=branch2->parent(); branch2->nVector(nvect); } } else if(branch->status()==HardBranching::Incoming) { Lorentz5Momentum nvect=Lorentz5Momentum(ZERO,branch->showerMomentum().vect()); (**cjt).nVector(nvect); } } // now compute the new momenta for(vector<HardBranchingPtr>::const_iterator cjt=jets.begin(); cjt!=jets.end();++cjt) { if((**cjt).status()==HardBranching::Outgoing) { (**cjt).setMomenta(transc,1.,Lorentz5Momentum()); } } incoming->setMomenta(transb,1.,Lorentz5Momentum()); } void QTildeReconstructor::deepTransform(PPtr particle, const LorentzRotation & r, bool match, PPtr original) const { if(_boosts.find(particle)!=_boosts.end()) { _boosts[particle].push_back(r); } Lorentz5Momentum porig = particle->momentum(); if(!original) original = particle; for ( int i = 0, N = particle->children().size(); i < N; ++i ) { deepTransform(particle->children()[i],r, particle->children()[i]->id()==original->id()&&match,original); } particle->transform(r); // transform the p and n vectors ShowerParticlePtr sparticle = dynamic_ptr_cast<ShowerParticlePtr>(particle); if(sparticle && sparticle->showerBasis()) { sparticle->showerBasis()->transform(r); } if ( particle->next() ) deepTransform(particle->next(),r,match,original); if(!match) return; if(!particle->children().empty()) return; // force the mass shell if(particle->dataPtr()->stable()) { Lorentz5Momentum ptemp = particle->momentum(); ptemp.rescaleEnergy(); particle->set5Momentum(ptemp); } // check if there's a daughter tree which also needs boosting map<tShowerTreePtr,pair<tShowerProgenitorPtr,tShowerParticlePtr> >::const_iterator tit; for(tit = _currentTree->treelinks().begin(); tit != _currentTree->treelinks().end();++tit) { // if there is, boost it if(tit->second.first && tit->second.second==original) { Lorentz5Momentum pnew = tit->first->incomingLines().begin() ->first->progenitor()->momentum(); pnew *= tit->first->transform(); Lorentz5Momentum pdiff = porig-pnew; Energy2 test = sqr(pdiff.x()) + sqr(pdiff.y()) + sqr(pdiff.z()) + sqr(pdiff.t()); LorentzRotation rot; if(test>1e-6*GeV2) rot = solveBoost(porig,pnew); tit->first->transform(r*rot,false); _treeBoosts[tit->first].push_back(r*rot); } } } void QTildeReconstructor::reconstructFinalFinalOffShell(JetKinVect orderedJets, Energy2 s, bool recursive) const { JetKinVect::iterator jit; jit = orderedJets.begin(); ++jit; // 4-momentum of recoiling system Lorentz5Momentum psum; for( ; jit!=orderedJets.end(); ++jit) psum += jit->p; psum.rescaleMass(); // calculate the 3-momentum rescaling factor Energy2 m1sq(orderedJets.begin()->q.m2()),m2sq(psum.m2()); Energy4 num = sqr(s - m1sq - m2sq) - 4.*m1sq*m2sq; if(num<ZERO) throw KinematicsReconstructionVeto(); double k = sqrt( num / (4.*s*orderedJets.begin()->p.vect().mag2()) ); // boost the most off-shell LorentzRotation B1 = solveBoost(k, orderedJets.begin()->q, orderedJets.begin()->p); deepTransform(orderedJets.begin()->parent,B1); // boost everything else // first to rescale LorentzRotation B2 = solveBoost(k, psum, psum); // and then to rest frame of new system Lorentz5Momentum pnew = B2*psum; pnew.rescaleMass(); B2.transform(pnew.findBoostToCM()); // apply transform (calling routine ensures at least 3 elements) jit = orderedJets.begin(); ++jit; for(;jit!=orderedJets.end();++jit) { deepTransform(jit->parent,B2); jit->p *= B2; jit->q *= B2; } JetKinVect newJets(orderedJets.begin()+1,orderedJets.end()); // final reconstruction if(newJets.size()==2 || !recursive ) { // rescaling factor double k = solveKfactor(psum.m(), newJets); // rescale jets in the new CMF for(JetKinVect::iterator it = newJets.begin(); it != newJets.end(); ++it) { LorentzRotation Trafo = solveBoost(k, it->q, it->p); deepTransform(it->parent,Trafo); } } // recursive else { std::sort(newJets.begin(),newJets.end(),JetOrdering()); reconstructFinalFinalOffShell(newJets,psum.m2(),recursive); } // finally boost back from new CMF LorentzRotation back(-pnew.findBoostToCM()); for(JetKinVect::iterator it = newJets.begin(); it != newJets.end(); ++it) { deepTransform(it->parent,back); } } Energy QTildeReconstructor::findMass(HardBranchingPtr branch) const { // KH - 230909 - If the particle has no children then it will // not have showered and so it should be "on-shell" so we can // get it's mass from it's momentum. This means that the // inverseRescalingFactor doesn't give any nans or do things // it shouldn't if it gets e.g. two Z bosons generated with // off-shell masses. This is for sure not the best solution. // PR 1/1/10 modification to previous soln // PR 28/8/14 change to procedure and factorize into a function if(branch->children().empty()) { return branch->branchingParticle()->mass(); } else if(!branch->children().empty() && !branch->branchingParticle()->dataPtr()->stable() ) { for(unsigned int ix=0;ix<branch->children().size();++ix) { if(branch->branchingParticle()->id()== branch->children()[ix]->branchingParticle()->id()) return findMass(branch->children()[ix]); } } return branch->branchingParticle()->dataPtr()->mass(); } vector<double> QTildeReconstructor::inverseInitialStateRescaling(double & x1, double & x2, const Lorentz5Momentum & pold, const vector<Lorentz5Momentum> & p, const vector<Lorentz5Momentum> & pq) const { // hadronic CMS Energy2 s = (pq[0] +pq[1] ).m2(); // partonic CMS Energy MDY = pold.m(); // find alpha, beta and pt Energy2 p12=pq[0]*pq[1]; double a[2],b[2]; Lorentz5Momentum pt[2]; for(unsigned int ix=0;ix<2;++ix) { a[ix] = p[ix]*pq[1]/p12; b [ix] = p[ix]*pq[0]/p12; pt[ix] = p[ix]-a[ix]*pq[0]-b[ix]*pq[1]; } // compute kappa // we always want to preserve the mass of the system double k1(1.),k2(1.); if(_initialStateReconOption==0) { double rap=pold.rapidity(); x2 = MDY/sqrt(s*exp(2.*rap)); x1 = sqr(MDY)/s/x2; k1=a[0]/x1; k2=b[1]/x2; } // longitudinal momentum else if(_initialStateReconOption==1) { double A = 1.; double C = -sqr(MDY)/s; double B = 2.*pold.z()/sqrt(s); if(abs(B)>1e-10) { double discrim = 1.-4.*A*C/sqr(B); if(discrim < 0.) throw KinematicsReconstructionVeto(); x1 = B>0. ? 0.5*B/A*(1.+sqrt(discrim)) : 0.5*B/A*(1.-sqrt(discrim)); } else { x1 = -C/A; if( x1 <= 0.) throw KinematicsReconstructionVeto(); x1 = sqrt(x1); } x2 = sqr(MDY)/s/x1; k1=a[0]/x1; k2=b[1]/x2; } // preserve mass and don't scale the softer system // to reproduce the dipole kinematics else if(_initialStateReconOption==2) { // in this case kp = k1 or k2 depending on who's the harder guy k1 = a[0]*b[1]*s/sqr(MDY); if ( pt[0].perp2() < pt[1].perp2() ) swap(k1,k2); x1 = a[0]/k1; x2 = b[1]/k2; } else assert(false); // decompose the momenta double anew[2] = {a[0]/k1,a[1]*k2}; double bnew[2] = {b[0]*k1,b[1]/k2}; vector<double> boost(2); for(unsigned int ix=0;ix<2;++ix) { boost[ix] = getBeta(a [ix]+b [ix], a[ix] -b [ix], anew[ix]+bnew[ix], anew[ix]-bnew[ix]); } return boost; } vector<double> QTildeReconstructor::initialStateRescaling(double x1, double x2, const Lorentz5Momentum & pold, const vector<Lorentz5Momentum> & p, const vector<Lorentz5Momentum> & pq, const vector<Energy>& highestpts) const { Energy2 S = (pq[0]+pq[1]).m2(); // find alphas and betas in terms of desired basis Energy2 p12 = pq[0]*pq[1]; double a[2] = {p[0]*pq[1]/p12,p[1]*pq[1]/p12}; double b[2] = {p[0]*pq[0]/p12,p[1]*pq[0]/p12}; Lorentz5Momentum p1p = p[0] - a[0]*pq[0] - b[0]*pq[1]; Lorentz5Momentum p2p = p[1] - a[1]*pq[0] - b[1]*pq[1]; // compute kappa // we always want to preserve the mass of the system Energy MDY = pold.m(); Energy2 A = a[0]*b[1]*S; Energy2 B = Energy2(sqr(MDY)) - (a[0]*b[0]+a[1]*b[1])*S - (p1p+p2p).m2(); Energy2 C = a[1]*b[0]*S; double rad = 1.-4.*A*C/sqr(B); if(rad < 0.) throw KinematicsReconstructionVeto(); double kp = B/(2.*A)*(1.+sqrt(rad)); // now compute k1 // conserve rapidity double k1(0.); double k2(0.); if(_initialStateReconOption==0) { rad = kp*(b[0]+kp*b[1])/(kp*a[0]+a[1]); rad *= pq[0].z()<ZERO ? exp(-2.*pold.rapidity()) : exp(2.*pold.rapidity()); if(rad <= 0.) throw KinematicsReconstructionVeto(); k1 = sqrt(rad); k2 = kp/k1; } // conserve longitudinal momentum else if(_initialStateReconOption==1) { double a2 = (a[0]+a[1]/kp); double b2 = -x2+x1; double c2 = -(b[1]*kp+b[0]); if(abs(b2)>1e-10) { double discrim = 1.-4.*a2*c2/sqr(b2); if(discrim < 0.) throw KinematicsReconstructionVeto(); k1 = b2>0. ? 0.5*b2/a2*(1.+sqrt(discrim)) : 0.5*b2/a2*(1.-sqrt(discrim)); } else { k1 = -c2/a2; if( k1 <= 0.) throw KinematicsReconstructionVeto(); k1 = sqrt(k1); } k2 = kp/k1; } // preserve mass and don't scale the softer system // to reproduce the dipole kinematics else if(_initialStateReconOption==2) { // in this case kp = k1 or k2 depending on who's the harder guy k1 = kp; k2 = 1.; if ( highestpts[0] < highestpts[1] ) swap(k1,k2); } else assert(false); // calculate the boosts vector<double> beta(2); beta[0] = getBeta((a[0]+b[0]), (a[0]-b[0]), (k1*a[0]+b[0]/k1), (k1*a[0]-b[0]/k1)); beta[1] = getBeta((a[1]+b[1]), (a[1]-b[1]), (a[1]/k2+k2*b[1]), (a[1]/k2-k2*b[1])); if (pq[0].z() > ZERO) { beta[0] = -beta[0]; beta[1] = -beta[1]; } return beta; } void QTildeReconstructor:: reconstructColourSinglets(vector<ShowerProgenitorPtr> & ShowerHardJets, ShowerInteraction::Type type) const { // identify and catagorize the colour singlet systems unsigned int nnun(0),nnii(0),nnif(0),nnf(0),nni(0); vector<ColourSingletSystem> systems(identifySystems(set<ShowerProgenitorPtr>(ShowerHardJets.begin(),ShowerHardJets.end()), nnun,nnii,nnif,nnf,nni)); // now decide what to do // initial-initial connection and final-state colour singlet systems LorentzRotation toRest,fromRest; bool applyBoost(false),general(false); // Drell-Yan type if(nnun==0&&nnii==1&&nnif==0&&nnf>0&&nni==0) { // reconstruct initial-initial system for(unsigned int ix=0;ix<systems.size();++ix) { if(systems[ix].type==II) reconstructInitialInitialSystem(applyBoost,toRest,fromRest, systems[ix].jets); } if(type!=ShowerInteraction::QCD) { combineFinalState(systems); general=false; } } // DIS and VBF type else if(nnun==0&&nnii==0&&((nnif==1&&nnf>0&&nni==1)|| (nnif==2&& nni==0))) { // check these systems can be reconstructed for(unsigned int ix=0;ix<systems.size();++ix) { // compute q^2 if(systems[ix].type!=IF) continue; Lorentz5Momentum q; for(unsigned int iy=0;iy<systems[ix].jets.size();++iy) { if(systems[ix].jets[iy]->progenitor()->isFinalState()) q += systems[ix].jets[iy]->progenitor()->momentum(); else q -= systems[ix].jets[iy]->progenitor()->momentum(); } q.rescaleMass(); // check above cut if(abs(q.m())>=_minQ) continue; if(nnif==1&&nni==1) { throw KinematicsReconstructionVeto(); } else { general = true; break; } } if(!general) { for(unsigned int ix=0;ix<systems.size();++ix) { if(systems[ix].type==IF) reconstructInitialFinalSystem(systems[ix].jets); } } } // e+e- type else if(nnun==0&&nnii==0&&nnif==0&&nnf>0&&nni==2) { general = type!=ShowerInteraction::QCD; } // general type else { general = true; } // final-state systems except for general recon if(!general) { for(unsigned int ix=0;ix<systems.size();++ix) { if(systems[ix].type==F) reconstructFinalStateSystem(applyBoost,toRest,fromRest, systems[ix].jets); } } else { reconstructGeneralSystem(ShowerHardJets); } } void QTildeReconstructor::findInitialBoost(const Lorentz5Momentum & pold, const Lorentz5Momentum & pnew, LorentzRotation & toRest, LorentzRotation & fromRest) const { // do one boost if(_initialBoost==0) { toRest = LorentzRotation(pold.findBoostToCM()); fromRest = LorentzRotation(pnew.boostVector()); } else if(_initialBoost==1) { // boost to rest frame // first transverse toRest = Boost(-pold.x()/pold.t(),-pold.y()/pold.t(),0.); // then longitudinal double beta = pold.z()/sqrt(pold.m2()+sqr(pold.z())); toRest.boost((Boost(0.,0.,-beta))); // boost from rest frame // first apply longitudinal boost beta = pnew.z()/sqrt(pnew.m2()+sqr(pnew.z())); fromRest=LorentzRotation(Boost(0.,0.,beta)); // then transverse one fromRest.boost(Boost(pnew.x()/pnew.t(), pnew.y()/pnew.t(),0.)); } else assert(false); } diff --git a/Utilities/Histogram.h b/Utilities/Histogram.h --- a/Utilities/Histogram.h +++ b/Utilities/Histogram.h @@ -1,440 +1,440 @@ // -*- C++ -*- // // Histogram.h is a part of Herwig - A multi-purpose Monte Carlo event generator // Copyright (C) 2002-2011 The Herwig Collaboration // // Herwig is licenced under version 2 of the GPL, see COPYING for details. // Please respect the MCnet academic guidelines, see GUIDELINES for details. // #ifndef HERWIG_Histogram_H #define HERWIG_Histogram_H // // This is the declaration of the Histogram class. // #include "Histogram.fh" #include "ThePEG/Interface/Interfaced.h" #include "Statistic.h" #include <string> namespace Herwig { using namespace ThePEG; /** * Options for histogram output. * They can be combined using the '|' operator, e.g. 'Frame | Ylog' */ namespace HistogramOptions { const unsigned int None = 0; /**< No options */ const unsigned int Frame = 1; /**< Plot on new frame */ const unsigned int Errorbars = 1 << 1; /**< Plot error bars */ const unsigned int Xlog = 1 << 2; /**< log scale for x-axis */ const unsigned int Ylog = 1 << 3; /**< log scale for y-axis */ const unsigned int Smooth = 1 << 4; /**< smooth the line */ const unsigned int Rawcount = 1 << 5; /**< don't normalize to unit area */ } /** * The Histogram class is a simple histogram for the Analysis handlers. * * @see \ref HistogramInterfaces "The interfaces" * defined for Histogram. */ class Histogram: public Interfaced { public: /** @name Standard constructors and destructors. */ //@{ /** * The default constructor. * @param lower The lower limit of the histogram * @param upper The upper limit of the histogram * @param nbin Number of bins */ Histogram(double lower=0., double upper=0., unsigned int nbin=0) : _globalStats(), _havedata(false), _bins(nbin+2),_prefactor(1.),_total(0.) { if (upper<lower) swap(upper,lower); _bins[0].limit=-1.e100; double limit(lower); double width((upper-lower)/nbin); for(unsigned int ix=1; ix <= nbin; ++ix) { _bins[ix].limit=limit; limit += width; } _bins.back().limit=limit; } /** * Constructor for variable width bins * @param limits The lower limits for the bins followed by the upper limit of the last bin */ Histogram(vector<double> limits) : _globalStats(), _havedata(false), _bins(limits.size()+1), _prefactor(1.),_total(0.) { _bins[0].limit=-1.e100; for (size_t i=1; i<=limits.size(); ++i) _bins[i].limit=limits[i-1]; } /** * Constructor with data included * @param limits The lower limits for the bins followed by the upper limit of the last bin * @param data The data * @param dataerror The errors on the data */ Histogram(vector<double> limits, vector<double> data, vector<double> dataerror) : _globalStats(), _havedata(true), _bins(limits.size()+1), _prefactor(1.),_total(0.) { _bins[0].limit=-1.e100; for (size_t i=1; i<=limits.size(); ++i) _bins[i].limit=limits[i-1]; // no data goes into _bins[0] or _bins.back()! for (size_t i=1; i<=min(limits.size()-1,data.size()); ++i) _bins[i].data=data[i-1]; for (size_t i=1; i<=min(limits.size()-1,dataerror.size()); ++i) _bins[i].dataerror=dataerror[i-1]; } //@} public: /** * Operator to add a point to the histogrma with unit weight */ void operator += (double input) { addWeighted(input,1.0); } /** * Function to add a weighted point to the histogram */ void addWeighted(double input, double weight) { - if(isnan(input)) return; + if(std::isnan(input)) return; unsigned int ibin; for(ibin=1; ibin<_bins.size(); ++ibin) { if(input<_bins[ibin].limit) break; } _bins[ibin-1].contents += weight; _bins[ibin-1].contentsSq += sqr(weight); _globalStats += weight * input; _total += weight; } /** * Number of bins (not counting the overflow) */ unsigned int numberOfBins() const { return _bins.size()-2; } /** * Get the prefactor */ double prefactor() const { return _prefactor; } /** * Set the prefactor */ void prefactor(double in ) { _prefactor=in; } /** * Access to the statistics on the total entry of the histogram */ const Statistic & globalStatistics() const { return _globalStats; } /** * Normalise the distributions to the data */ void normaliseToData(); /** * Normalise the distributions to the total cross section */ void normaliseToCrossSection(); /** * Return the chi squared * @param chisq The chi squared * @param ndegrees The number of points * @param minfrac The minimum fractional error on the data point */ void chiSquared(double & chisq, unsigned int & ndegrees, double minfrac=0.) const; /** * @brief Output as file ready for usage with flat2aida and other Rivet tools * @param out The output stream * @param histogramname The histogram name identifying the histogram. Required * for comparisons (e.g. with rivet-mkhtml or with * compare-histos) * @param analysisname The analysis name * @param title The title for the top of the plot in LaTeX format * @param xlabel The x label in LaTeX format * @param ylabel The y label in LaTeX format * @param rawcount Don't normalise to unit area. * @param multiplicator Factor the histogram is multiplied with. * N.B. Experimental data is not output. */ void rivetOutput(ostream & out, string histogramname = string("default"), string analysisname = string("default"), string title = string(), string xlabel = string(), string ylabel = string(), bool rawcount = false, double multiplicator = 1.0) const; /** * Output as a topdrawer file. The histogram is normalised to unit area * @param out The output stream * @param flags A bitmask of flags from HistogramOptions, e.g. Frame|Ylog * @param colour The colour for the line * @param title The title for the top of the plot * @param titlecase topdraw format for the title * @param left Left axis lable * @param leftcase topdraw format for left axis label * @param bottom Bottom axis lable * @param bottomcase Bottom axis lable ofr topdraw * N.B. in td smoothing only works for histograms with uniform binning. */ void topdrawOutput(ostream & out, unsigned int flags = 0, string colour = string("BLACK"), string title = string(), string titlecase = string(), string left = string(), string leftcase = string(), string bottom = string(), string bottomcase = string() ) const; void topdrawMCatNLO(ostream & out, unsigned int flags =0 , string colour = string("BLACK"), string title = string() ) const; /** * Output as a topdrawer file. A bin by bin average is taken. * @param out The output stream * @param frame output on a new graph * @param errorbars output data points with error bars * @param xlog log scale on x axis * @param ylog log scale on y axis * @param colour The colour for the line * @param title The title for the top of the plot * @param titlecase topdraw format for the title * @param left Left axis lable * @param leftcase topdraw format for left axis label * @param bottom Bottom axis lable * @param bottomcase Bottom axis lable ofr topdraw */ void topdrawOutputAverage(ostream & out, bool frame, bool errorbars, bool xlog, bool ylog, string colour=string("BLACK"), string title=string(), string titlecase =string(), string left=string(), string leftcase =string(), string bottom=string(), string bottomcase =string()) const; /** * get the number of visible entries (all entries without those in the * under- and overflow bins) in the histogram. This assumes integer * entries, ie it gives wrong results for weighted histograms. */ unsigned int visibleEntries() const; /** * Compute the normalisation of the data. */ double dataNorm() const; /** * Output into a simple ascii file, easily readable by gnuplot. */ void simpleOutput(ostream & out, bool errorbars, bool normdata=false); /** * Dump bin data into a vector */ vector<double> dumpBins() const; /** * Returns a new histogram containing bin-by-bin ratios of two histograms */ Histogram ratioWith(const Histogram & h2) const; /** * @brief Returns limits for bins with exponentially increasing widths. * For usage with the variable-bin-width Histogram constructor. * @param xmin Lower limit of the first bin, needs to be > 0 * @param nbins Number of bins * @param base The base, needs to be > 1 */ static vector<double> LogBins(double xmin, unsigned nbins, double base = 10.0); public: /** * The standard Init function used to initialize the interfaces. * Called exactly once for each class by the class description system * before the main function starts or * when this class is dynamically loaded. */ static void Init(); protected: /** @name Clone Methods. */ //@{ /** * Make a simple clone of this object. * @return a pointer to the new object. */ virtual IBPtr clone() const; /** Make a clone of this object, possibly modifying the cloned object * to make it sane. * @return a pointer to the new object. */ virtual IBPtr fullclone() const; //@} private: /** * The static object used to initialize the description of this class. * Indicates that this is a concrete class with persistent data. */ static NoPIOClassDescription<Histogram> initHistogram; /** * The assignment operator is private and must never be called. * In fact, it should not even be implemented. */ Histogram & operator=(const Histogram &); private: /** * Global statistics of all data that went into the histogram. */ Statistic _globalStats; /** * Set to true if there is experimental data available */ bool _havedata; /** * One bin of the histogram. limit is the _lower_ bound of the bin. */ struct Bin { /** * Default constructor */ Bin() : contents(0.0), contentsSq(0.0), limit(0.0), data(0.0), dataerror(0.0), points(0) {} /** * Contents of the bin */ double contents; /** * Contents squared for the error */ double contentsSq; /** * The limit for the bin */ double limit; /** * The experimental value for the bin */ double data; /** * The error on the experimental value for the bin */ double dataerror; /** * The number of points in the bin */ long points; }; /** * The histogram bins. _bins[0] is the underflow, _bins.back() the overflow */ vector<Bin> _bins; /** * Prefactors to multiply the output by */ double _prefactor; /** * Total entry */ double _total; public: /** * The vector of bins */ vector<Bin> bins() const { return _bins; } }; } #include "ThePEG/Utilities/ClassTraits.h" namespace ThePEG { /** @cond TRAITSPECIALIZATIONS */ /** This template specialization informs ThePEG about the * base classes of Histogram. */ template <> struct BaseClassTrait<Herwig::Histogram,1> { /** Typedef of the first base class of Histogram. */ typedef Herwig::Statistic NthBase; }; /** This template specialization informs ThePEG about the name of * the Histogram class and the shared object where it is defined. */ template <> struct ClassTraits<Herwig::Histogram> : public ClassTraitsBase<Herwig::Histogram> { /** Return a platform-independent class name */ static string className() { return "Herwig::Histogram"; } }; /** @endcond */ } #endif /* HERWIG_Histogram_H */