diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -1,27 +1,28 @@ 168ae2110e964d62fbc1331a1c2e095952a67748 release-2-5-2 3abb4fa42e20e332796c2572334c2d77204cd0e0 release-2-4-2 4796ca080aafd5daa3b7349b015cb1df944428a2 release-2-5-0 76da042f056eb153981b4d005d5474ffb90a5e88 release-2-4-1 81a684a558413c69df314365eabf09893ffd43d8 release-2-6-0 bd75cd00d99f4bdbaed992daf98f0a73c0f91e9b release-2-4-0 ff6ecc8d49ce10299303b050394bd5cb5837f1c3 release-2-5-1 d0389f5453b2c210923e1adc7b872b18269de668 release-2-6-1 f8998033021185942533b824607285feb3fbd2dc release-2-6-1a cead23e428b9aacaf2d709e722624e54f844498b release-2-6-1b 191db4655439045f912cb21bd905e729d59ec7bc release-2-6-2 edb538156e9c3d64bb842934b4cebf0126aeb9ea release-2-6-3 eb4a104591859ecac18746b1ad54d6aa0c2a5d1a release-2-7-0 568971ac5b3c1d044c9259f2280a8304fc5a62e9 trunk-before-QED 6e3edb6cfeb4ee48687eb4eb3d016026fc59d602 trunk-after-QED 633abb80b571aa23088957df60e9b0000bbb8a22 release-2-7-1 1bdde095d2346c15ee548e5406a96f0fc6d6e0f1 beforeHQ a0f9fb821396092bdbeee532bcb0bd624f58335b before_MB_merge 270c1e6b34aa7f758f1d9868c4d3e1ec4bf4e709 herwig-7-0-0 6e0f198c1c2603ecd1a0b6cfe40105cda4bd58c5 herwig-7-0-1 566c1de845a8070559cda45b1bdb40afa18cb2cc herwig-7-0-2 f5c4aa956880f2def763ebd57de7b5bfa55cb1db herwig-7-0-3 65282dedfc2e4bec184e68678dbf4c553c968f38 herwig-7-0-4 541e7790b65ed423c86780bf66ec30e6b99b5a18 herwig-7-1-0 dd35a1c12d57c047169e8c5fb18644972d49c6ac herwig-7-1-1 0d651b079756b63713e32a1341d81e4dfc7eeb7b herwig-7-1-2 4b97934bc41c861c4be04f563ffa68a94a982560 herwig-7-1-3 +97aca5398cfa1f3273804f03fa96fa0fa23eca61 herwig-7-1-4 diff --git a/AUTHORS b/AUTHORS --- a/AUTHORS +++ b/AUTHORS @@ -1,56 +1,56 @@ ================================================================================ -Herwig 7.1.3 +Herwig 7.1.4 ================================================================================ Please contact for any queries. -------------------------------------------------------------------------------- Herwig is actively developed by: -------------------------------------------------------------------------------- Johannes Bellm Stefan Gieseke David Grellscheid Patrick Kirchgaeßer Graeme Nail Andreas Papaefstathiou Simon Plätzer Radek Podskubka Michael Rauch Christian Reuschle Peter Richardson Mike Seymour Andrzej Siödmok Stephen Webster -------------------------------------------------------------------------------- Former authors are: -------------------------------------------------------------------------------- Ken Arnold Manuel Bähr Luca d'Errico Martyn Gigg Nadine Fischer Keith Hamilton Marco A. Harrendorf Seyi Latunde-Dada Frashër Loshaj Daniel Rauch Alberto Ribon Christian Röhr Pavel Růžička Peter Schichtel Alex Schofield Thomas Schuh Alexander Sherstnev Philip Stephens Martin Stoll Louise Suter Jon Tully Bryan Webber Alix Wilcock David Winn Benedikt Zimmermann diff --git a/MatrixElement/DIS/Makefile.am b/MatrixElement/DIS/Makefile.am --- a/MatrixElement/DIS/Makefile.am +++ b/MatrixElement/DIS/Makefile.am @@ -1,6 +1,6 @@ pkglib_LTLIBRARIES = HwMEDIS.la HwMEDIS_la_SOURCES = \ DISBase.h DISBase.cc \ MENeutralCurrentDIS.cc MENeutralCurrentDIS.h \ MEChargedCurrentDIS.cc MEChargedCurrentDIS.h -HwMEDIS_la_LDFLAGS = $(AM_LDFLAGS) -module -version-info 7:0:0 +HwMEDIS_la_LDFLAGS = $(AM_LDFLAGS) -module -version-info 7:1:0 diff --git a/MatrixElement/Hadron/Makefile.am b/MatrixElement/Hadron/Makefile.am --- a/MatrixElement/Hadron/Makefile.am +++ b/MatrixElement/Hadron/Makefile.am @@ -1,28 +1,28 @@ pkglib_LTLIBRARIES = HwMEHadron.la HwMEHadron_la_SOURCES = \ MEqq2gZ2ff.cc MEqq2gZ2ff.h \ MEqq2W2ff.cc MEqq2W2ff.h \ MEPP2GammaJet.h MEPP2GammaJet.cc\ MEQCD2to2.h MEQCD2to2.cc\ MEPP2HiggsJet.h MEPP2HiggsJet.cc\ MEPP2GammaGamma.h MEPP2GammaGamma.cc \ MEPP2QQ.h MEPP2QQ.cc \ MEPP2QQHiggs.h MEPP2QQHiggs.cc \ MEPP2Higgs.h MEPP2Higgs.cc\ MEPP2WH.h MEPP2WH.cc \ MEPP2ZH.h MEPP2ZH.cc \ MEPP2WJet.cc MEPP2WJet.h \ MEPP2ZJet.cc MEPP2ZJet.h \ MEPP2VV.cc MEPP2VV.h \ MEPP2VGamma.cc MEPP2VGamma.h \ MEPP2HiggsVBF.cc MEPP2HiggsVBF.h \ MEPP2SingleTop.cc MEPP2SingleTop.h \ MEMinBias.h MEMinBias.cc \ MEDiffraction.h MEDiffraction.cc -HwMEHadron_la_LDFLAGS = $(AM_LDFLAGS) -module -version-info 8:1:0 +HwMEHadron_la_LDFLAGS = $(AM_LDFLAGS) -module -version-info 8:2:0 pkglib_LTLIBRARIES += HwMEHadronFast.la HwMEHadronFast_la_SOURCES = \ MEQCD2to2Fast.h MEQCD2to2Fast.cc HwMEHadronFast_la_LDFLAGS = $(AM_LDFLAGS) -module -version-info 6:0:0 diff --git a/MatrixElement/Lepton/MEee2Higgs2SM.cc b/MatrixElement/Lepton/MEee2Higgs2SM.cc --- a/MatrixElement/Lepton/MEee2Higgs2SM.cc +++ b/MatrixElement/Lepton/MEee2Higgs2SM.cc @@ -1,354 +1,354 @@ // -*- C++ -*- // // MEee2Higgs2SM.cc is a part of Herwig - A multi-purpose Monte Carlo event generator // Copyright (C) 2002-2017 The Herwig Collaboration // // Herwig is licenced under version 3 of the GPL, see COPYING for details. // Please respect the MCnet academic guidelines, see GUIDELINES for details. // // // This is the implementation of the non-inlined, non-templated member // functions of the MEee2Higgs2SM class. // #include "MEee2Higgs2SM.h" #include "ThePEG/Utilities/DescribeClass.h" #include "ThePEG/Interface/ClassDocumentation.h" #include "ThePEG/Interface/Switch.h" #include "ThePEG/Persistency/PersistentOStream.h" #include "ThePEG/Persistency/PersistentIStream.h" #include "ThePEG/Handlers/StandardXComb.h" #include "Herwig/MatrixElement/HardVertex.h" #include "ThePEG/PDT/EnumParticles.h" using namespace Herwig; void MEee2Higgs2SM::doinit() { ME2to2Base::doinit(); h0_ = getParticleData(ThePEG::ParticleID::h0); tcHwSMPtr hwsm=ThePEG::dynamic_ptr_cast(standardModel()); // do the initialisation if(hwsm) { FFHVertex_ = hwsm->vertexFFH(); HGGVertex_ = hwsm->vertexHGG(); } else { throw InitException() << "Must have Herwig StandardModel object in " << "MEee2Higgs2SM::doinit()" << Exception::runerror; } } Energy2 MEee2Higgs2SM::scale() const { return sHat(); } unsigned int MEee2Higgs2SM::orderInAlphaS() const { return 0; } unsigned int MEee2Higgs2SM::orderInAlphaEW() const { return 2; } void MEee2Higgs2SM::getDiagrams() const { // specific the diagrams tcPDPtr ep = getParticleData(ParticleID::eplus ); tcPDPtr em = getParticleData(ParticleID::eminus); // outgoing quarks for(int i=1;i<=5;++i) { if(allowed_==0 || allowed_==1 || allowed_==i+2) { tcPDPtr lm = getParticleData(i); tcPDPtr lp = lm->CC(); add(new_ptr((Tree2toNDiagram(2), em, ep, 1, h0_, 3, lm, 3, lp, -1))); } } // outgoing leptons for( int i =11;i<=16;i+=2) { if(allowed_==0 || allowed_==2 || allowed_==(i+7)/2) { tcPDPtr lm = getParticleData(i); tcPDPtr lp = lm->CC(); add(new_ptr((Tree2toNDiagram(2), em, ep, 1, h0_, 3, lm, 3, lp, -1))); } } if(allowed_==0 || allowed_==12) { tcPDPtr g = getParticleData(ParticleID::g); add(new_ptr((Tree2toNDiagram(2), em, ep, 1, h0_, 3, g, 3, g, -1))); } } double MEee2Higgs2SM::me2() const { double aver=0.; // get the order right int ielectron(0),ipositron(1),ilp(2),ilm(3); if(mePartonData()[0]->id()!=11) swap(ielectron,ipositron); if(mePartonData()[2]->id()>mePartonData()[3]->id()) swap(ilp,ilm); // the arrays for the wavefunction to be passed to the matrix element vector fin; vector ain; for(unsigned int ihel=0;ihel<2;++ihel) { fin .push_back(SpinorWaveFunction(meMomenta()[ielectron], mePartonData()[ielectron],ihel,incoming)); ain .push_back(SpinorBarWaveFunction(meMomenta()[ipositron], mePartonData()[ipositron],ihel,incoming)); } // H -> f fbar if(mePartonData()[2]->id()!=ParticleID::g) { vector aout; vector fout; for(unsigned int ihel=0;ihel<2;++ihel) { fout.push_back(SpinorBarWaveFunction(meMomenta()[ilm], mePartonData()[ilm],ihel,outgoing)); aout.push_back(SpinorWaveFunction(meMomenta()[ilp], mePartonData()[ilp],ihel,outgoing)); } HelicityME(fin,ain,fout,aout,aver); if(mePartonData()[ilm]->id()<=6) aver*=3.; } else { vector g1,g2; for(unsigned int ihel=0;ihel<2;++ihel) { g1.push_back(VectorWaveFunction(meMomenta()[2],mePartonData()[2], 2*ihel,outgoing)); g2.push_back(VectorWaveFunction(meMomenta()[3],mePartonData()[3], 2*ihel,outgoing)); } ggME(fin,ain,g1,g2,aver); aver *= 8.; } return aver; } // the helicity amplitude matrix element ProductionMatrixElement MEee2Higgs2SM::HelicityME(vector fin, vector ain, vector fout, vector aout, double & aver) const { // the particles should be in the order // for the incoming // 0 incoming fermion (u spinor) // 1 incoming antifermion (vbar spinor) // for the outgoing // 0 outgoing fermion (ubar spinor) // 1 outgoing antifermion (v spinor) // me to be returned ProductionMatrixElement output(PDT::Spin1Half,PDT::Spin1Half, PDT::Spin1Half,PDT::Spin1Half); // wavefunctions for the intermediate particles ScalarWaveFunction interh; // temporary storage of the different diagrams Complex diag; aver=0.; // sum over helicities to get the matrix element unsigned int inhel1,inhel2,outhel1,outhel2; for(inhel1=0;inhel1<2;++inhel1) { for(inhel2=0;inhel2<2;++inhel2) { interh = FFHVertex_->evaluate(sHat(),1,h0_,fin[inhel1],ain[inhel2]); for(outhel1=0;outhel1<2;++outhel1) { for(outhel2=0;outhel2<2;++outhel2) { diag = FFHVertex_->evaluate(sHat(),aout[outhel2], fout[outhel1],interh); output(inhel1,inhel2,outhel1,outhel2)=diag; aver +=real(diag*conj(diag)); } } } } return output; } Selector MEee2Higgs2SM::diagrams(const DiagramVector &) const { Selector sel;sel.insert(1.0, 0); return sel; } Selector MEee2Higgs2SM::colourGeometries(tcDiagPtr diag) const { static ColourLines neutral ( " " ); static ColourLines quarks ( "-5 4"); static ColourLines gluons ( "-5 4, 5 -4"); Selector sel; int id = abs((diag->partons()[2])->id()); if (id<=6 ) sel.insert(1.0, &quarks); - if (id==21) + else if (id==21) sel.insert(1.0, &gluons); else sel.insert(1.0, &neutral); return sel; } void MEee2Higgs2SM::persistentOutput(PersistentOStream & os) const { os << FFHVertex_ << HGGVertex_ << h0_ << allowed_; } void MEee2Higgs2SM::persistentInput(PersistentIStream & is, int) { is >> FFHVertex_ >> HGGVertex_ >> h0_ >> allowed_; } // The following static variable is needed for the type // description system in ThePEG. DescribeClass describeHerwigMEee2Higgs2SM("Herwig::MEee2Higgs2SM", "HwMELepton.so"); void MEee2Higgs2SM::Init() { static ClassDocumentation documentation ("The MEee2Higgs2SM class implements the matrix element for e+e- to" " SM particle via Higgs exchnage and is designed to be a process to test" " things involving scalar particles. "); static Switch interfaceallowed ("Allowed", "Allowed outgoing particles", &MEee2Higgs2SM::allowed_, 0, false, false); static SwitchOption interfaceallowedAll (interfaceallowed, "All", "All SM particles allowed", 0); static SwitchOption interfaceallowed1 (interfaceallowed, "Quarks", "Only the quarks allowed", 1); static SwitchOption interfaceallowed2 (interfaceallowed, "Leptons", "Only the leptons allowed", 2); static SwitchOption interfacealloweddown (interfaceallowed, "Down", "Only d dbar allowed", 3); static SwitchOption interfaceallowedup (interfaceallowed, "Up", "Only u ubar allowed", 4); static SwitchOption interfaceallowedstrange (interfaceallowed, "Strange", "Only s sbar allowed", 5); static SwitchOption interfaceallowedcharm (interfaceallowed, "Charm", "Only c cbar allowed", 6); static SwitchOption interfaceallowedbottom (interfaceallowed, "Bottom", "Only b bbar allowed", 7); static SwitchOption interfaceallowedtop (interfaceallowed, "Top", "Only t tbar allowed", 8); static SwitchOption interfaceallowedelectron (interfaceallowed, "Electron", "Only e+e- allowed", 9); static SwitchOption interfaceallowedMuon (interfaceallowed, "Muon", "Only mu+mu- allowed", 10); static SwitchOption interfaceallowedTau (interfaceallowed, "Tau", "Only tau+tau- allowed", 11); static SwitchOption interfaceallowedGluon (interfaceallowed, "Gluon", "Only gg allowed", 12); } void MEee2Higgs2SM::constructVertex(tSubProPtr sub) { // extract the particles in the hard process ParticleVector hard; hard.push_back(sub->incoming().first);hard.push_back(sub->incoming().second); hard.push_back(sub->outgoing()[0]);hard.push_back(sub->outgoing()[1]); if(hard[0]->id()id()) swap(hard[0],hard[1]); if(hard[2]->id()id()) swap(hard[2],hard[3]); vector fin; vector ain; SpinorWaveFunction( fin ,hard[0],incoming,false,true); SpinorBarWaveFunction(ain ,hard[1],incoming,false,true); double me; ProductionMatrixElement prodme; if(hard[2]->id()!=ParticleID::g) { vector aout; vector fout; SpinorBarWaveFunction(fout,hard[2],outgoing,true ,true); SpinorWaveFunction( aout,hard[3],outgoing,true ,true); // calculate the matrix element prodme=HelicityME(fin,ain,fout,aout,me); } else { vector g1,g2; VectorWaveFunction(g1,hard[2],outgoing,true,true); VectorWaveFunction(g2,hard[3],outgoing,true,true); g1[1]=g1[2]; g2[1]=g2[2]; prodme=ggME(fin,ain,g1,g2,me); } // construct the vertex HardVertexPtr hardvertex=new_ptr(HardVertex()); // set the matrix element for the vertex hardvertex->ME(prodme); // set the pointers and to and from the vertex for(unsigned int ix=0;ix<4;++ix) { (hard[ix]->spinInfo())->productionVertex(hardvertex); } } void MEee2Higgs2SM::rebind(const TranslationMap & trans) { // dummy = trans.translate(dummy); FFHVertex_ = trans.translate(FFHVertex_); HGGVertex_ = trans.translate(HGGVertex_); h0_ = trans.translate(h0_); ME2to2Base::rebind(trans); } IVector MEee2Higgs2SM::getReferences() { IVector ret = ME2to2Base::getReferences(); ret.push_back(FFHVertex_); ret.push_back(HGGVertex_); ret.push_back(h0_); return ret; } // the helicity amplitude matrix element ProductionMatrixElement MEee2Higgs2SM::ggME(vector fin, vector ain, vector g1, vector g2, double & aver) const { ProductionMatrixElement output(PDT::Spin1Half,PDT::Spin1Half, PDT::Spin1,PDT::Spin1); // wavefunctions for the intermediate particles ScalarWaveFunction interh; // temporary storage of the different diagrams Complex diag; aver=0.; // sum over helicities to get the matrix element unsigned int inhel1,inhel2,outhel1,outhel2; for(inhel1=0;inhel1<2;++inhel1) { for(inhel2=0;inhel2<2;++inhel2) { interh = FFHVertex_->evaluate(sHat(),1,h0_,fin[inhel1],ain[inhel2]); for(outhel1=0;outhel1<2;++outhel1) { for(outhel2=0;outhel2<2;++outhel2) { diag = HGGVertex_->evaluate(sHat(),g1[outhel1],g2[outhel2],interh); output(inhel1,inhel2,2*outhel1,2*outhel2)=diag; aver +=real(diag*conj(diag)); } } } } return output; } diff --git a/MatrixElement/Lepton/Makefile.am b/MatrixElement/Lepton/Makefile.am --- a/MatrixElement/Lepton/Makefile.am +++ b/MatrixElement/Lepton/Makefile.am @@ -1,10 +1,10 @@ pkglib_LTLIBRARIES = HwMELepton.la HwMELepton_la_SOURCES = \ MEee2gZ2qq.h MEee2gZ2qq.cc\ MEee2gZ2ll.h MEee2gZ2ll.cc\ MEee2ZH.h MEee2ZH.cc\ MEee2HiggsVBF.h MEee2HiggsVBF.cc \ MEee2VV.h MEee2VV.cc \ MEee2VectorMeson.h MEee2VectorMeson.cc \ MEee2Higgs2SM.h MEee2Higgs2SM.cc -HwMELepton_la_LDFLAGS = $(AM_LDFLAGS) -module -version-info 7:0:0 +HwMELepton_la_LDFLAGS = $(AM_LDFLAGS) -module -version-info 7:1:0 diff --git a/MatrixElement/Matchbox/Phasespace/PhasespaceHelpers.cc b/MatrixElement/Matchbox/Phasespace/PhasespaceHelpers.cc --- a/MatrixElement/Matchbox/Phasespace/PhasespaceHelpers.cc +++ b/MatrixElement/Matchbox/Phasespace/PhasespaceHelpers.cc @@ -1,602 +1,563 @@ // -*- C++ -*- // // PhasespaceHelpers.cc is a part of Herwig - A multi-purpose Monte Carlo event generator // Copyright (C) 2002-2017 The Herwig Collaboration // // Herwig is licenced under version 3 of the GPL, see COPYING for details. // Please respect the MCnet academic guidelines, see GUIDELINES for details. // #include "PhasespaceHelpers.h" #include "Herwig/MatrixElement/Matchbox/Phasespace/RandomHelpers.h" using namespace Herwig; using namespace Herwig::PhasespaceHelpers; using namespace Herwig::RandomHelpers; Energy PhasespaceInfo::generateMass(tcPDPtr data, - const pair& range) { + const pair& range) { double xlow = sqr(range.first)/sHat; if ( range.first < ZERO ) xlow = -xlow; double xup = sqr(range.second)/sHat; if ( range.second < ZERO ) xup = -xup; double mu = sqr(data->hardProcessMass())/sHat; double gamma = sqr(data->hardProcessWidth())/sHat; if ( gamma < 1e-14 ) gamma = 0.0; if ( M0 != ZERO ) x0 = M0/sqrtSHat; if ( Mc != ZERO ) xc = Mc/sqrtSHat; double r = rnd(); pair event; if ( gamma == 0. ) { if ( mu < xlow || mu > xup ) { if ( abs(xlow-mu) < xc ) - xlow = mu; + xlow = mu; if ( abs(xup-mu) < xc ) - xup = mu; + xup = mu; } if ( mu < xlow || mu > xup ) { event = - generate(inverse(mu,xlow,xup),r); + generate(inverse(mu,xlow,xup),r); } else { pair pLeft(xlow,xlow < mu-x0 ? mu-x0 : xlow); pair pRight(xup > mu+x0 ? mu+x0 : xup,xup); pair fLeft(pLeft.second < mu-x0 ? mu-x0 : pLeft.second,mu-xc); - if ( fLeft.first >= fLeft.second ) - fLeft.first = fLeft.second; + if ( fLeft.first >= fLeft.second ) fLeft.first = fLeft.second; pair fRight(mu+xc,pRight.first > mu+x0 ? mu+x0 : pRight.first); - if ( fRight.first >= fRight.second ) - fRight.second = fRight.first; + if ( fRight.first >= fRight.second ) fRight.second = fRight.first; - if ( pLeft.first != pLeft.second && - fLeft.first != fLeft.second && - fRight.first != fRight.second && - pRight.first != pRight.second ) { - event = - generate((piecewise(), - inverse(mu,pLeft.first,pLeft.second), - match(flat(fLeft.first,fLeft.second))) + - match((piecewise(), - flat(fRight.first,fRight.second), - match(inverse(mu,pRight.first,pRight.second)))), - r); - } else if ( pLeft.first == pLeft.second && - fLeft.first != fLeft.second && - fRight.first != fRight.second && - pRight.first != pRight.second ) { - event = - generate(flat(fLeft.first,fLeft.second) + - match((piecewise(), - flat(fRight.first,fRight.second), - match(inverse(mu,pRight.first,pRight.second)))), - r); - } else if ( pLeft.first != pLeft.second && - fLeft.first != fLeft.second && - fRight.first != fRight.second && - pRight.first == pRight.second ) { - event = - generate((piecewise(), - inverse(mu,pLeft.first,pLeft.second), - match(flat(fLeft.first,fLeft.second))) + - match(flat(fRight.first,fRight.second)), - r); - } else if ( pLeft.first == pLeft.second && - fLeft.first == fLeft.second && - fRight.first != fRight.second && - pRight.first != pRight.second ) { - event = - generate((piecewise(), - flat(fRight.first,fRight.second), - match(inverse(mu,pRight.first,pRight.second))), - r); - } else if ( pLeft.first != pLeft.second && - fLeft.first != fLeft.second && - fRight.first == fRight.second && - pRight.first == pRight.second ) { - event = - generate((piecewise(), - inverse(mu,pLeft.first,pLeft.second), - match(flat(fLeft.first,fLeft.second))), - r); - } else if ( pLeft.first == pLeft.second && - fLeft.first != fLeft.second && - fRight.first != fRight.second && - pRight.first == pRight.second ) { - event = - generate(flat(fLeft.first,fLeft.second) + - match(flat(fRight.first,fRight.second)), - r); - } else if ( pLeft.first == pLeft.second && - fLeft.first == fLeft.second && - fRight.first != fRight.second && - pRight.first == pRight.second ) { - event = - generate(flat(fRight.first,fRight.second), - r); - } else if ( pLeft.first == pLeft.second && - fLeft.first != fLeft.second && - fRight.first == fRight.second && - pRight.first == pRight.second ) { - event = - generate(flat(fLeft.first,fLeft.second), - r); - } else if ( pLeft.first == pLeft.second && - fLeft.first == fLeft.second && - fRight.first == fRight.second && - pRight.first == pRight.second ) { - throw Veto(); + + const bool pL= abs( pLeft.first - pLeft.second ) < 1e-14; + const bool fL= abs( fLeft.first - fLeft.second ) < 1e-14; + const bool fR= abs( fRight.first - fRight.second ) < 1e-14; + const bool pR= abs( pRight.first - pRight.second ) < 1e-14; + + + if ( !pL && !fL && !fR && !pR ) { + event = + generate((piecewise(), + inverse(mu,pLeft.first,pLeft.second), + match(flat(fLeft.first,fLeft.second))) + + match((piecewise(), + flat(fRight.first,fRight.second), + match(inverse(mu,pRight.first,pRight.second)))), + r); + } else if ( pL && !fL && !fR && !pR ) { + event = + generate(flat(fLeft.first,fLeft.second) + + match((piecewise(), + flat(fRight.first,fRight.second), + match(inverse(mu,pRight.first,pRight.second)))), r); + } else if ( !pL && !fL && !fR && pR ) { + event = + generate((piecewise(), + inverse(mu,pLeft.first,pLeft.second), + match(flat(fLeft.first,fLeft.second))) + + match(flat(fRight.first,fRight.second)), + r); + } else if ( pL && fL && !fR && !pR ) { + event = + generate((piecewise(),flat(fRight.first,fRight.second), + match(inverse(mu,pRight.first,pRight.second))), r); + } else if ( !pL && !fL && fR && pR ) { + event = + generate((piecewise(), + inverse(mu,pLeft.first,pLeft.second), + match(flat(fLeft.first,fLeft.second))),r); + } else if ( pL && !fL && !fR && pR ) { + event = generate(flat(fLeft.first,fLeft.second) + + match(flat(fRight.first,fRight.second)),r); + } else if ( pL && fL && !fR && pR ) { + event = generate(flat(fRight.first,fRight.second),r); + } else if ( pL && !fL && fR && pR ) { + event = generate(flat(fLeft.first,fLeft.second),r); + } else if ( pL && fL && fR && pR ) { + throw Veto(); } else assert(false); } } else { event = generate(breitWigner(mu,gamma,xlow,xup),r); } - if ( abs(event.first) < xc ) + if ( abs(event.first) < xc ) throw Veto(); weight *= event.second; Energy res = sqrt(abs(event.first)*sHat); if ( event.first < 0. ) res = -res; return res; } Lorentz5Momentum PhasespaceInfo::generateKt(const Lorentz5Momentum& p1, - const Lorentz5Momentum& p2, - Energy pt) { + const Lorentz5Momentum& p2, + Energy pt) { double phi = 2.*Constants::pi*rnd(); weight *= 2.*Constants::pi; Lorentz5Momentum P = p1 + p2; Energy2 Q2 = abs(P.m2()); - Lorentz5Momentum Q = - Lorentz5Momentum(ZERO,ZERO,ZERO,sqrt(Q2),sqrt(Q2)); + Lorentz5Momentum Q = Lorentz5Momentum(ZERO,ZERO,ZERO,sqrt(Q2),sqrt(Q2)); bool boost = abs((P-Q).vect().mag2()/GeV2) > 1e-8 || abs((P-Q).t()/GeV) > 1e-4; boost &= (P*Q-Q.mass2())/GeV2 > 1e-8; Lorentz5Momentum inFrame1; if ( boost ) inFrame1 = p1 + ((P*p1-Q*p1)/(P*Q-Q.mass2()))*(P-Q); else inFrame1 = p1; Energy ptx = inFrame1.x(); Energy pty = inFrame1.y(); Energy q = 2.*inFrame1.z(); Energy Qp = sqrt(4.*(sqr(ptx)+sqr(pty))+sqr(q)); Energy Qy = sqrt(4.*sqr(pty)+sqr(q)); double cPhi = cos(phi); double sPhi = sqrt(1.-sqr(cPhi)); if ( phi > Constants::pi ) sPhi = -sPhi; Lorentz5Momentum kt; kt.setT(ZERO); kt.setX(pt*Qy*cPhi/Qp); kt.setY(-pt*(4*ptx*pty*cPhi/Qp+q*sPhi)/Qy); kt.setZ(2.*pt*(-ptx*q*cPhi/Qp + pty*sPhi)/Qy); if ( boost ) kt = kt + ((P*kt-Q*kt)/(P*Q-Q.mass2()))*(P-Q); kt.setMass(-pt); kt.rescaleRho(); return kt; } void PhasespaceTree::setup(const Tree2toNDiagram& diag, - int pos) { + int pos) { doMirror = false; - pair dchildren = - diag.children(pos); + pair dchildren = diag.children(pos); data = diag.allPartons()[pos]; spacelike = pos < diag.nSpace(); if ( pos == 0 ) externalId = 0; if ( dchildren.first == -1 ) { externalId = diag.externalId(pos); leafs.insert(externalId); return; } children.push_back(PhasespaceTree()); children.back().setup(diag,dchildren.first); children.push_back(PhasespaceTree()); children.back().setup(diag,dchildren.second); if ( !children[0].children.empty() && - children[1].children.empty() && - !spacelike ) + children[1].children.empty() && + !spacelike ) swap(children[0],children[1]); if ( spacelike && - !children[0].spacelike ) + !children[0].spacelike ) swap(children[0],children[1]); copy(children[0].leafs.begin(),children[0].leafs.end(), inserter(leafs,leafs.begin())); copy(children[1].leafs.begin(),children[1].leafs.end(), inserter(leafs,leafs.begin())); } void PhasespaceTree::setupMirrored(const Tree2toNDiagram& diag, - int pos) { + int pos) { doMirror = true; spacelike = pos < diag.nSpace(); pair dchildren; if (pos != 0 && spacelike) - dchildren = - make_pair(diag.parent(pos), diag.children(diag.parent(pos)).second); - else if ( !spacelike ) - dchildren = - diag.children(pos); - else - dchildren = - make_pair(-1,-1); + dchildren = {diag.parent(pos), diag.children(diag.parent(pos)).second}; + else if ( !spacelike ) dchildren = diag.children(pos); + else dchildren = {-1,-1}; data = diag.allPartons()[pos]; if ( pos == diag.nSpace() - 1 ) externalId = 1; if ( dchildren.first == -1 ) { externalId = diag.externalId(pos); leafs.insert(externalId); return; } children.push_back(PhasespaceTree()); children.back().setupMirrored(diag,dchildren.first); children.push_back(PhasespaceTree()); children.back().setupMirrored(diag,dchildren.second); if ( !children[0].children.empty() && - children[1].children.empty() && - !spacelike ) + children[1].children.empty() && + !spacelike ) swap(children[0],children[1]); if ( spacelike && - !children[0].spacelike ) { + !children[0].spacelike ) { assert (false); } copy(children[0].leafs.begin(),children[0].leafs.end(), inserter(leafs,leafs.begin())); copy(children[1].leafs.begin(),children[1].leafs.end(), inserter(leafs,leafs.begin())); } void PhasespaceTree::print(int in) { for (int i = 0; i != in; i++) cerr << " "; cerr << " |- " << data->PDGName() << " " << externalId << "\n" << flush; if ( !children.empty() ) { children[1].print(in+1); children[0].print(in+int(!spacelike)); } else { - cerr << "\n"; + cerr << "\n"; } } void PhasespaceTree::init(const vector& meMomenta) { if ( children.empty() ) { massRange.first = meMomenta[externalId].mass(); massRange.second = massRange.first; if ( !doMirror && externalId == 1 ) momentum = meMomenta[1]; if ( doMirror && externalId == 0 ) momentum = meMomenta[0]; momentum.setMass(meMomenta[externalId].mass()); return; } children[0].init(meMomenta); children[1].init(meMomenta); if ( !children[0].spacelike && - !children[1].spacelike ) { - massRange.first = - children[0].massRange.first + - children[1].massRange.first; + !children[1].spacelike ) { + massRange.first = + children[0].massRange.first + + children[1].massRange.first; } } void PhasespaceTree::generateKinematics(PhasespaceInfo& info, - vector& meMomenta) { + vector& meMomenta) { if ( !doMirror && externalId == 0 ) { init(meMomenta); momentum = meMomenta[0]; } else if ( doMirror && externalId == 1) { init(meMomenta); momentum = meMomenta[1]; } if ( children.empty() ) { if ( ( !doMirror && externalId != 1 ) - || ( doMirror && externalId !=0 ) ) + || ( doMirror && externalId !=0 ) ) meMomenta[externalId] = momentum; return; } - // s-channel + // s-channel if ( ( !doMirror && externalId == 0 && - children[0].externalId == 1 ) - || ( doMirror && externalId == 1 && - children[0].externalId == 0 ) ) { - children[1].momentum = meMomenta[0] + meMomenta[1]; - children[1].momentum.setMass(info.sqrtSHat); - children[1].momentum.rescaleEnergy(); - children[1].generateKinematics(info,meMomenta); - return; - } + children[0].externalId == 1 ) + || ( doMirror && externalId == 1 && + children[0].externalId == 0 ) ) { + children[1].momentum = meMomenta[0] + meMomenta[1]; + children[1].momentum.setMass(info.sqrtSHat); + children[1].momentum.rescaleEnergy(); + children[1].generateKinematics(info,meMomenta); + return; + } if ( !spacelike ) { Energy mij = momentum.mass(); Energy mi,mj; - - // work out the mass for the first child + + // work out the mass for the first child if ( !children[0].children.empty() ) { Energy sumOthers = ZERO; for ( size_t k = 2; k < meMomenta.size(); ++k ) - if ( children[1].leafs.find(k) != children[1].leafs.end() ) - sumOthers += meMomenta[k].mass(); + if ( children[1].leafs.find(k) != children[1].leafs.end() ) + sumOthers += meMomenta[k].mass(); children[0].massRange.second = momentum.mass() - sumOthers; if ( children[0].massRange.second < children[0].massRange.first ) - throw Veto(); + throw Veto(); if ( children[0].massRange.second > momentum.mass() ) - throw Veto(); + throw Veto(); mi = info.generateMass(children[0].data,children[0].massRange); children[0].momentum.setMass(mi); } else { mi = children[0].momentum.mass(); } - // work out the mass for the second child + // work out the mass for the second child if ( !children[1].children.empty() ) { children[1].massRange.second = momentum.mass()-children[0].momentum.mass(); if ( children[1].massRange.second < children[1].massRange.first ) - throw Veto(); + throw Veto(); mj = info.generateMass(children[1].data,children[1].massRange); children[1].momentum.setMass(mj); } else { mj = children[1].momentum.mass(); } Energy2 mij2 = sqr(mij); Energy2 mi2 = sqr(mi); Energy2 mj2 = sqr(mj); - // perform the decay + // perform the decay Energy4 lambda2 = sqr(mij2-mi2-mj2)-4.*mi2*mj2; if ( lambda2 <= ZERO ) throw Veto(); Energy2 lambda = sqrt(lambda2); double phi = 2.*Constants::pi*info.rnd(); double cosPhi = cos(phi); double sinPhi = sqrt(1.-sqr(cosPhi)); if ( phi > Constants::pi ) sinPhi = -sinPhi; info.weight *= Constants::pi*lambda/(2.*mij2); double cosTheta = 2.*info.rnd() - 1.; double sinTheta = sqrt(1.-sqr(cosTheta)); Energy p = lambda/(2.*mij); children[0].momentum.setX(p*cosPhi*sinTheta); children[0].momentum.setY(p*sinPhi*sinTheta); children[0].momentum.setZ(p*cosTheta); children[0].momentum.rescaleEnergy(); if ( momentum.m2() <= ZERO ) throw Veto(); Boost out = momentum.boostVector(); if ( out.mag2() > Constants::epsilon ) { children[0].momentum.boost(out); } children[1].momentum = momentum - children[0].momentum; children[1].momentum.setMass(mj); children[1].momentum.rescaleEnergy(); - // go on with next branchings + // go on with next branchings children[0].generateKinematics(info,meMomenta); children[1].generateKinematics(info,meMomenta); return; } - // get the minimum mass of the `W' system + // get the minimum mass of the `W' system Energy Wmin = ZERO; PhasespaceTree* current = &children[0]; while ( !(current->children.empty()) ) { Wmin += current->children[1].massRange.first; current = &(current->children[0]); } - // get the CM energy avaialble + // get the CM energy avaialble Energy2 s = (momentum+meMomenta[int(!doMirror)]).m2(); if ( s <= ZERO ) throw Veto(); - // generate a mass for the timelike child + // generate a mass for the timelike child Energy mi; if ( !children[1].children.empty() ) { children[1].massRange.second = sqrt(s)-Wmin; if ( children[1].massRange.second < children[1].massRange.first ) throw Veto(); mi = info.generateMass(children[1].data,children[1].massRange); children[1].momentum.setMass(mi); } else { mi = children[1].momentum.mass(); } Energy2 mi2 = sqr(mi); + + // wether or not this is the last 2->2 scatter + bool lastScatter = children[0].children[0].children.empty(); - // wether or not this is the last 2->2 scatter - bool lastScatter - = children[0].children[0].children.empty(); - - // `W' mass relevant for the other boundaries + // `W' mass relevant for the other boundaries Energy MW = Wmin; - // generate a mass for second outgoing leg, if needed + // generate a mass for second outgoing leg, if needed if ( lastScatter ) if ( !children[0].children[1].children.empty() ) { - // get the maximum `W' mass + // get the maximum `W' mass Energy Wmax = sqrt(s)-children[1].momentum.mass(); children[0].children[1].massRange.second = Wmax; if ( children[0].children[1].massRange.second < - children[0].children[1].massRange.first ) - throw Veto(); + children[0].children[1].massRange.first ) + throw Veto(); MW = info.generateMass(children[0].children[1].data, - children[0].children[1].massRange); + children[0].children[1].massRange); children[0].children[1].momentum.setMass(MW); } Energy2 MW2 = sqr(MW); Energy ma = momentum.mass(); Energy2 ma2 = sqr(ma); if ( ma < ZERO ) ma2 = -ma2; Energy mb = meMomenta[int(!doMirror)].mass(); Energy2 mb2 = sqr(mb); - // pick the ys variable + // pick the ys variable Energy2 ys = ZERO; if ( !lastScatter ) { ys = info.rnd()*(sqr(sqrt(s)-mi)-MW2); info.weight *= (sqr(sqrt(s)-mi)-MW2)/info.sHat; } Energy4 lambda2 = sqr(s-ma2-mb2)-4.*ma2*mb2; if ( lambda2 <= ZERO ) { throw Veto(); } Energy2 lambda = sqrt(lambda2); info.weight *= info.sHat/(4.*lambda); - // get the boundaries on the momentum transfer + // get the boundaries on the momentum transfer Energy4 rho2 = sqr(s-ys-MW2-mi2)-4.*mi2*(ys+MW2); if ( rho2 < ZERO ) throw Veto(); Energy2 rho = sqrt(rho2); - Energy4 tau2 = - ys*(ma2-mb2+s) - - sqr(s)+s*(ma2+mb2+mi2+MW2)-(mi2-MW2)*(ma2-mb2); + Energy4 tau2 = + ys*(ma2-mb2+s) + - sqr(s)+s*(ma2+mb2+mi2+MW2)-(mi2-MW2)*(ma2-mb2); pair tBounds - ((tau2-rho*lambda)/(2.*s),(tau2+rho*lambda)/(2.*s)); + ((tau2-rho*lambda)/(2.*s),(tau2+rho*lambda)/(2.*s)); children[0].massRange.first = sqrt(abs(tBounds.first)); if ( tBounds.first < ZERO ) children[0].massRange.first = -children[0].massRange.first; children[0].massRange.second = sqrt(abs(tBounds.second)); if ( tBounds.second < ZERO ) children[0].massRange.second = -children[0].massRange.second; - // generate a momentum transfer + // generate a momentum transfer Energy mai = info.generateMass(children[0].data,children[0].massRange); children[0].momentum.setMass(mai); Energy2 t = sqr(mai); if ( mai < ZERO ) t = -t; Energy2 u = -s -t + ys + ma2 + mb2 + mi2 + MW2; Energy2 st = s - ma2 - mb2; Energy2 tt = t - mi2 - ma2; Energy2 ut = u - mi2 - mb2; - // get the timelike momentum + // get the timelike momentum double xa = (-st*ut+2.*mb2*tt)/lambda2; double xb = (-st*tt+2.*ma2*ut)/lambda2; Energy2 pt2 = (st*tt*ut-ma2*sqr(ut)-mb2*sqr(tt)-mi2*sqr(st)+4.*ma2*mb2*mi2)/lambda2; if ( pt2 < ZERO ) throw Veto(); Energy pt = sqrt(pt2); - children[1].momentum = - xa*momentum + xb*meMomenta[int(!doMirror)] + info.generateKt(momentum,meMomenta[int(!doMirror)],pt); + children[1].momentum = + xa*momentum + xb*meMomenta[int(!doMirror)] + + info.generateKt(momentum,meMomenta[int(!doMirror)],pt); children[1].momentum.setMass(mi); children[1].momentum.rescaleEnergy(); children[0].momentum = - momentum - children[1].momentum; + momentum - children[1].momentum; children[0].momentum.setMass(mai); bool changeSign = false; if ( children[0].momentum.t() < ZERO && mai > ZERO ) changeSign = true; if ( mai < ZERO ) children[0].momentum.rescaleRho(); else children[0].momentum.rescaleEnergy(); if ( changeSign ) children[0].momentum.setT(-children[0].momentum.t()); children[1].generateKinematics(info,meMomenta); if ( !lastScatter ) { children[0].generateKinematics(info,meMomenta); } else { children[0].children[1].momentum = - meMomenta[int(!doMirror)] + children[0].momentum; + meMomenta[int(!doMirror)] + children[0].momentum; children[0].children[1].momentum.setMass(MW); children[0].children[1].momentum.rescaleEnergy(); children[0].children[1].generateKinematics(info,meMomenta); } } void PhasespaceTree::put(PersistentOStream& os) const { os << children.size(); if ( !children.empty() ) { children[0].put(os); children[1].put(os); } os << data << externalId << leafs << spacelike << doMirror; } void PhasespaceTree::get(PersistentIStream& is) { size_t nc; is >> nc; assert(nc == 0 || nc == 2); if ( nc == 2 ) { children.resize(2,PhasespaceTree()); children[0].get(is); children[1].get(is); } is >> data >> externalId >> leafs >> spacelike >> doMirror; } diff --git a/Models/Feynrules/python/ufo2peg/general_lorentz.py b/Models/Feynrules/python/ufo2peg/general_lorentz.py --- a/Models/Feynrules/python/ufo2peg/general_lorentz.py +++ b/Models/Feynrules/python/ufo2peg/general_lorentz.py @@ -1,3018 +1,3029 @@ import copy from .helpers import SkipThisVertex,def_from_model from .converter import py2cpp import string,re from string import Template epsValue=[[[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]],[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]], [[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]],[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]], [[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]],[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]], [[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]],[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]], [[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]],[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]], [[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]],[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]], [[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]],[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]], [[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]],[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]]] epsValue[0][1][2][3] = -1. epsValue[0][1][3][2] = 1. epsValue[0][2][1][3] = 1. epsValue[0][2][3][1] = -1. epsValue[0][3][1][2] = -1. epsValue[0][3][2][1] = 1. epsValue[1][0][2][3] = 1. epsValue[1][0][3][2] = -1. epsValue[1][2][0][3] = -1. epsValue[1][2][3][0] = 1. epsValue[1][3][0][2] = 1. epsValue[1][3][2][0] = -1. epsValue[2][0][1][3] = -1. epsValue[2][0][3][1] = 1. epsValue[2][1][0][3] = 1. epsValue[2][1][3][0] = -1. epsValue[2][3][0][1] = -1. epsValue[2][3][1][0] = 1. epsValue[3][0][1][2] = 1. epsValue[3][0][2][1] = -1. epsValue[3][1][0][2] = -1. epsValue[3][1][2][0] = 1. epsValue[3][2][0][1] = 1. epsValue[3][2][1][0] = -1. # self contracted tensor propagator tPropA=[[],[],[],[]] tPropA[0].append(Template("-2. / 3. * (M${iloc}2 + 2 * P${iloc}t ** 2) * (M${iloc}2 -p2)*OM${iloc}**2")) tPropA[0].append(Template("-4. / 3. * P${iloc}t * P${iloc}x * (M${iloc}2 -p2)*OM${iloc}**2")) tPropA[0].append(Template("-4. / 3. * P${iloc}t * P${iloc}y * (M${iloc}2 -p2)*OM${iloc}**2")) tPropA[0].append(Template("-4. / 3. * P${iloc}t * P${iloc}z * (M${iloc}2 -p2)*OM${iloc}**2")) tPropA[1].append(Template("-4. / 3. * P${iloc}t * P${iloc}x * (M${iloc}2 -p2)*OM${iloc}**2")) tPropA[1].append(Template(" 2. / 3. * (M${iloc}2 - 2 * P${iloc}x ** 2) * (M${iloc}2 -p2)*OM${iloc}**2")) tPropA[1].append(Template("-4. / 3. * P${iloc}x * P${iloc}y * (M${iloc}2 -p2)*OM${iloc}**2")) tPropA[1].append(Template("-4. / 3. * P${iloc}x * P${iloc}z * (M${iloc}2 -p2)*OM${iloc}**2")) tPropA[2].append(Template("-4. / 3. * P${iloc}t * P${iloc}y * (M${iloc}2 -p2)*OM${iloc}**2")) tPropA[2].append(Template("-4. / 3. * P${iloc}x * P${iloc}y * (M${iloc}2 -p2)*OM${iloc}**2")) tPropA[2].append(Template(" 2. / 3. * (M${iloc}2 - 2 * P${iloc}y ** 2) * (M${iloc}2 -p2)*OM${iloc}**2")) tPropA[2].append(Template("-4. / 3. * P${iloc}y * P${iloc}z * (M${iloc}2 -p2)*OM${iloc}**2")) tPropA[3].append(Template("-4. / 3. * P${iloc}t * P${iloc}z * (M${iloc}2 -p2)*OM${iloc}**2")) tPropA[3].append(Template("-4. / 3. * P${iloc}x * P${iloc}z * (M${iloc}2 -p2)*OM${iloc}**2")) tPropA[3].append(Template("-4. / 3. * P${iloc}y * P${iloc}z * (M${iloc}2 -p2)*OM${iloc}**2")) tPropA[3].append(Template(" 2. / 3. * (M${iloc}2 - 2 * P${iloc}z ** 2) * (M${iloc}2 -p2)*OM${iloc}**2")) # tensor propagator 1 index contracted tPropB=[[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]]] tPropB[0][0].append(Template("4. / 3. * (${V}t - ${dot}*OM${iloc} * P${iloc}t) * (1. - OM${iloc} * P${iloc}t ** 2)")) tPropB[0][0].append(Template("-2 * (${V}t - ${dot}*OM${iloc} * P${iloc}t)*OM${iloc} * P${iloc}t * P${iloc}x - 2. / 3. * (1. - OM${iloc} * P${iloc}t ** 2) * (${V}x - ${dot}*OM${iloc} * P${iloc}x)")) tPropB[0][0].append(Template(" -2 * (${V}t - ${dot}*OM${iloc} * P${iloc}t)*OM${iloc} * P${iloc}t * P${iloc}y - 2. / 3. * (1. - OM${iloc} * P${iloc}t ** 2) * (${V}y - ${dot}*OM${iloc} * P${iloc}y)")) tPropB[0][0].append(Template(" -2 * (${V}t - ${dot}*OM${iloc} * P${iloc}t)*OM${iloc} * P${iloc}t * P${iloc}z - 2. / 3. * (1. - OM${iloc} * P${iloc}t ** 2) * (${V}z - ${dot}*OM${iloc} * P${iloc}z)")) tPropB[0][1].append(Template(" -(${V}t - ${dot}*OM${iloc} * P${iloc}t)*OM${iloc} * P${iloc}t * P${iloc}x / 3. + (1. - OM${iloc} * P${iloc}t ** 2) * (${V}x - ${dot}*OM${iloc} * P${iloc}x)")) tPropB[0][1].append(Template(" (${V}t - ${dot}*OM${iloc} * P${iloc}t) * (-1. - OM${iloc} * P${iloc}x ** 2) - OM${iloc} * P${iloc}t * P${iloc}x * (${V}x - ${dot}*OM${iloc} * P${iloc}x) / 3.")) tPropB[0][1].append(Template("-(${V}t - ${dot}*OM${iloc} * P${iloc}t)*OM${iloc} * P${iloc}x * P${iloc}y - OM${iloc} * P${iloc}t * P${iloc}y * (${V}x - ${dot}*OM${iloc} * P${iloc}x) + 2. / 3.*OM${iloc} * P${iloc}t * P${iloc}x * (${V}y - ${dot}*OM${iloc} * P${iloc}y)")) tPropB[0][1].append(Template("-(${V}t - ${dot}*OM${iloc} * P${iloc}t)*OM${iloc} * P${iloc}x * P${iloc}z - OM${iloc} * P${iloc}t * P${iloc}z * (${V}x - ${dot}*OM${iloc} * P${iloc}x) + 2. / 3.*OM${iloc} * P${iloc}t * P${iloc}x * (${V}z - ${dot}*OM${iloc} * P${iloc}z)")) tPropB[0][2].append(Template(" -(${V}t - ${dot}*OM${iloc} * P${iloc}t)*OM${iloc} * P${iloc}t * P${iloc}y / 3. + (1. - OM${iloc} * P${iloc}t ** 2) * (${V}y - ${dot}*OM${iloc} * P${iloc}y)")) tPropB[0][2].append(Template("-(${V}t - ${dot}*OM${iloc} * P${iloc}t)*OM${iloc} * P${iloc}x * P${iloc}y - OM${iloc} * P${iloc}t * P${iloc}x * (${V}y - ${dot}*OM${iloc} * P${iloc}y) + 2. / 3.*OM${iloc} * P${iloc}t * P${iloc}y * (${V}x - ${dot}*OM${iloc} * P${iloc}x)")) tPropB[0][2].append(Template(" (${V}t - ${dot}*OM${iloc} * P${iloc}t) * (-1. - OM${iloc} * P${iloc}y ** 2) - OM${iloc} * P${iloc}t * P${iloc}y * (${V}y - ${dot}*OM${iloc} * P${iloc}y) / 3.")) tPropB[0][2].append(Template("-(${V}t - ${dot}*OM${iloc} * P${iloc}t)*OM${iloc} * P${iloc}y * P${iloc}z - OM${iloc} * P${iloc}t * P${iloc}z * (${V}y - ${dot}*OM${iloc} * P${iloc}y) + 2. / 3.*OM${iloc} * P${iloc}t * P${iloc}y * (${V}z - ${dot}*OM${iloc} * P${iloc}z)")) tPropB[0][3].append(Template(" -(${V}t - ${dot}*OM${iloc} * P${iloc}t)*OM${iloc} * P${iloc}t * P${iloc}z / 3. + (1. - OM${iloc} * P${iloc}t ** 2) * (${V}z - ${dot}*OM${iloc} * P${iloc}z)")) tPropB[0][3].append(Template("-(${V}t - ${dot}*OM${iloc} * P${iloc}t)*OM${iloc} * P${iloc}x * P${iloc}z - OM${iloc} * P${iloc}t * P${iloc}x * (${V}z - ${dot}*OM${iloc} * P${iloc}z) + 2. / 3.*OM${iloc} * P${iloc}t * P${iloc}z * (${V}x - ${dot}*OM${iloc} * P${iloc}x)")) tPropB[0][3].append(Template(" -(${V}t - ${dot}*OM${iloc} * P${iloc}t)*OM${iloc} * P${iloc}y * P${iloc}z - OM${iloc} * P${iloc}t * P${iloc}y * (${V}z - ${dot}*OM${iloc} * P${iloc}z) + 2. / 3.*OM${iloc} * P${iloc}t * P${iloc}z * (${V}y - ${dot}*OM${iloc} * P${iloc}y)")) tPropB[0][3].append(Template("(${V}t - ${dot}*OM${iloc} * P${iloc}t) * (-1. - OM${iloc} * P${iloc}z ** 2) - OM${iloc} * P${iloc}t * P${iloc}z * (${V}z - ${dot}*OM${iloc} * P${iloc}z) / 3.")) tPropB[1][0].append(Template("-(${V}t - ${dot}*OM${iloc} * P${iloc}t)*OM${iloc} * P${iloc}t * P${iloc}x / 3. + (1 - OM${iloc} * P${iloc}t ** 2) * (${V}x - ${dot}*OM${iloc} * P${iloc}x)")) tPropB[1][0].append(Template(" (${V}t - ${dot}*OM${iloc} * P${iloc}t) * (-1 - OM${iloc} * P${iloc}x ** 2) - OM${iloc} * P${iloc}t * P${iloc}x * (${V}x - ${dot}*OM${iloc} * P${iloc}x) / 3.")) tPropB[1][0].append(Template(" -(${V}t - ${dot}*OM${iloc} * P${iloc}t)*OM${iloc} * P${iloc}x * P${iloc}y - OM${iloc} * P${iloc}t * P${iloc}y * (${V}x - ${dot}*OM${iloc} * P${iloc}x) + 2. / 3.*OM${iloc} * P${iloc}t * P${iloc}x * (${V}y - ${dot}*OM${iloc} * P${iloc}y)")) tPropB[1][0].append(Template(" -(${V}t - ${dot}*OM${iloc} * P${iloc}t)*OM${iloc} * P${iloc}x * P${iloc}z - OM${iloc} * P${iloc}t * P${iloc}z * (${V}x - ${dot}*OM${iloc} * P${iloc}x) + 2. / 3.*OM${iloc} * P${iloc}t * P${iloc}x * (${V}z - ${dot}*OM${iloc} * P${iloc}z)")) tPropB[1][1].append(Template(" -2*OM${iloc} * P${iloc}t * P${iloc}x * (${V}x - ${dot}*OM${iloc} * P${iloc}x) - 2. / 3. * (${V}t - ${dot}*OM${iloc} * P${iloc}t) * (-1 - OM${iloc} * P${iloc}x ** 2)")) tPropB[1][1].append(Template(" 4. / 3. * (${V}x - ${dot}*OM${iloc} * P${iloc}x) * (-1 - OM${iloc} * P${iloc}x ** 2)")) tPropB[1][1].append(Template(" -2 * (${V}x - ${dot}*OM${iloc} * P${iloc}x)*OM${iloc} * P${iloc}x * P${iloc}y - 2. / 3. * (-1 - OM${iloc} * P${iloc}x ** 2) * (${V}y - ${dot}*OM${iloc} * P${iloc}y)")) tPropB[1][1].append(Template(" -2 * (${V}x - ${dot}*OM${iloc} * P${iloc}x)*OM${iloc} * P${iloc}x * P${iloc}z - 2. / 3. * (-1 - OM${iloc} * P${iloc}x ** 2) * (${V}z - ${dot}*OM${iloc} * P${iloc}z)")) tPropB[1][2].append(Template(" -OM${iloc} * P${iloc}t * P${iloc}y * (${V}x - ${dot}*OM${iloc} * P${iloc}x) - OM${iloc} * P${iloc}t * P${iloc}x * (${V}y - ${dot}*OM${iloc} * P${iloc}y) + 2. / 3. * (${V}t - ${dot}*OM${iloc} * P${iloc}t)*OM${iloc} * P${iloc}x * P${iloc}y")) tPropB[1][2].append(Template(" -(${V}x - ${dot}*OM${iloc} * P${iloc}x)*OM${iloc} * P${iloc}x * P${iloc}y / 3. + (-1 - OM${iloc} * P${iloc}x ** 2) * (${V}y - ${dot}*OM${iloc} * P${iloc}y)")) tPropB[1][2].append(Template(" (${V}x - ${dot}*OM${iloc} * P${iloc}x) * (-1 - OM${iloc} * P${iloc}y ** 2) - OM${iloc} * P${iloc}x * P${iloc}y * (${V}y - ${dot}*OM${iloc} * P${iloc}y) / 3.")) tPropB[1][2].append(Template("-(${V}x - ${dot}*OM${iloc} * P${iloc}x)*OM${iloc} * P${iloc}y * P${iloc}z - OM${iloc} * P${iloc}x * P${iloc}z * (${V}y - ${dot}*OM${iloc} * P${iloc}y) + 2. / 3.*OM${iloc} * P${iloc}x * P${iloc}y * (${V}z - ${dot}*OM${iloc} * P${iloc}z)")) tPropB[1][3].append(Template("-OM${iloc} * P${iloc}t * P${iloc}z * (${V}x - ${dot}*OM${iloc} * P${iloc}x) - OM${iloc} * P${iloc}t * P${iloc}x * (${V}z - ${dot}*OM${iloc} * P${iloc}z) + 2. / 3. * (${V}t - ${dot}*OM${iloc} * P${iloc}t)*OM${iloc} * P${iloc}x * P${iloc}z")) tPropB[1][3].append(Template(" -(${V}x - ${dot}*OM${iloc} * P${iloc}x)*OM${iloc} * P${iloc}x * P${iloc}z / 3. + (-1 - OM${iloc} * P${iloc}x ** 2) * (${V}z - ${dot}*OM${iloc} * P${iloc}z)")) tPropB[1][3].append(Template(" -(${V}x - ${dot}*OM${iloc} * P${iloc}x)*OM${iloc} * P${iloc}y * P${iloc}z - OM${iloc} * P${iloc}x * P${iloc}y * (${V}z - ${dot}*OM${iloc} * P${iloc}z) + 2. / 3.*OM${iloc} * P${iloc}x * P${iloc}z * (${V}y - ${dot}*OM${iloc} * P${iloc}y)")) tPropB[1][3].append(Template("(${V}x - ${dot}*OM${iloc} * P${iloc}x) * (-1 - OM${iloc} * P${iloc}z ** 2) - OM${iloc} * P${iloc}x * P${iloc}z * (${V}z - ${dot}*OM${iloc} * P${iloc}z) / 3.")) tPropB[2][0].append(Template(" -(${V}t - ${dot}*OM${iloc} * P${iloc}t)*OM${iloc} * P${iloc}t * P${iloc}y / 3. + (1 - OM${iloc} * P${iloc}t ** 2) * (${V}y - ${dot}*OM${iloc} * P${iloc}y)")) tPropB[2][0].append(Template(" -(${V}t - ${dot}*OM${iloc} * P${iloc}t)*OM${iloc} * P${iloc}x * P${iloc}y - OM${iloc} * P${iloc}t * P${iloc}x * (${V}y - ${dot}*OM${iloc} * P${iloc}y) + 2. / 3.*OM${iloc} * P${iloc}t * P${iloc}y * (${V}x - ${dot}*OM${iloc} * P${iloc}x)")) tPropB[2][0].append(Template(" (${V}t - ${dot}*OM${iloc} * P${iloc}t) * (-1 - OM${iloc} * P${iloc}y ** 2) - OM${iloc} * P${iloc}t * P${iloc}y * (${V}y - ${dot}*OM${iloc} * P${iloc}y) / 3.")) tPropB[2][0].append(Template(" -(${V}t - ${dot}*OM${iloc} * P${iloc}t)*OM${iloc} * P${iloc}y * P${iloc}z - OM${iloc} * P${iloc}t * P${iloc}z * (${V}y - ${dot}*OM${iloc} * P${iloc}y) + 2. / 3.*OM${iloc} * P${iloc}t * P${iloc}y * (${V}z - ${dot}*OM${iloc} * P${iloc}z)")) tPropB[2][1].append(Template("-OM${iloc} * P${iloc}t * P${iloc}y * (${V}x - ${dot}*OM${iloc} * P${iloc}x) - OM${iloc} * P${iloc}t * P${iloc}x * (${V}y - ${dot}*OM${iloc} * P${iloc}y) + 2. / 3. * (${V}t - ${dot}*OM${iloc} * P${iloc}t)*OM${iloc} * P${iloc}x * P${iloc}y")) tPropB[2][1].append(Template("-(${V}x - ${dot}*OM${iloc} * P${iloc}x)*OM${iloc} * P${iloc}x * P${iloc}y / 3. + (-1 - OM${iloc} * P${iloc}x ** 2) * (${V}y - ${dot}*OM${iloc} * P${iloc}y)")) tPropB[2][1].append(Template(" (${V}x - ${dot}*OM${iloc} * P${iloc}x) * (-1 - OM${iloc} * P${iloc}y ** 2) - OM${iloc} * P${iloc}x * P${iloc}y * (${V}y - ${dot}*OM${iloc} * P${iloc}y) / 3.")) tPropB[2][1].append(Template("-(${V}x - ${dot}*OM${iloc} * P${iloc}x)*OM${iloc} * P${iloc}y * P${iloc}z - OM${iloc} * P${iloc}x * P${iloc}z * (${V}y - ${dot}*OM${iloc} * P${iloc}y) + 2. / 3.*OM${iloc} * P${iloc}x * P${iloc}y * (${V}z - ${dot}*OM${iloc} * P${iloc}z)")) tPropB[2][2].append(Template(" -2*OM${iloc} * P${iloc}t * P${iloc}y * (${V}y - ${dot}*OM${iloc} * P${iloc}y) - 2. / 3. * (${V}t - ${dot}*OM${iloc} * P${iloc}t) * (-1 - OM${iloc} * P${iloc}y ** 2)")) tPropB[2][2].append(Template(" -2*OM${iloc} * P${iloc}x * P${iloc}y * (${V}y - ${dot}*OM${iloc} * P${iloc}y) - 2. / 3. * (${V}x - ${dot}*OM${iloc} * P${iloc}x) * (-1 - OM${iloc} * P${iloc}y ** 2)")) tPropB[2][2].append(Template("4. / 3. * (${V}y - ${dot}*OM${iloc} * P${iloc}y) * (-1 - OM${iloc} * P${iloc}y ** 2)")) tPropB[2][2].append(Template(" -2 * (${V}y - ${dot}*OM${iloc} * P${iloc}y)*OM${iloc} * P${iloc}y * P${iloc}z - 2. / 3. * (-1 - OM${iloc} * P${iloc}y ** 2) * (${V}z - ${dot}*OM${iloc} * P${iloc}z)")) tPropB[2][3].append(Template(" -OM${iloc} * P${iloc}t * P${iloc}z * (${V}y - ${dot}*OM${iloc} * P${iloc}y) - OM${iloc} * P${iloc}t * P${iloc}y * (${V}z - ${dot}*OM${iloc} * P${iloc}z) + 2. / 3. * (${V}t - ${dot}*OM${iloc} * P${iloc}t)*OM${iloc} * P${iloc}y * P${iloc}z")) tPropB[2][3].append(Template(" -OM${iloc} * P${iloc}x * P${iloc}z * (${V}y - ${dot}*OM${iloc} * P${iloc}y) - OM${iloc} * P${iloc}x * P${iloc}y * (${V}z - ${dot}*OM${iloc} * P${iloc}z) + 2. / 3. * (${V}x - ${dot}*OM${iloc} * P${iloc}x)*OM${iloc} * P${iloc}y * P${iloc}z")) tPropB[2][3].append(Template(" -(${V}y - ${dot}*OM${iloc} * P${iloc}y)*OM${iloc} * P${iloc}y * P${iloc}z / 3. + (-1 - OM${iloc} * P${iloc}y ** 2) * (${V}z - ${dot}*OM${iloc} * P${iloc}z)")) tPropB[2][3].append(Template(" (${V}y - ${dot}*OM${iloc} * P${iloc}y) * (-1 - OM${iloc} * P${iloc}z ** 2) - OM${iloc} * P${iloc}y * P${iloc}z * (${V}z - ${dot}*OM${iloc} * P${iloc}z) / 3.")) tPropB[3][0].append(Template(" -(${V}t - ${dot}*OM${iloc} * P${iloc}t)*OM${iloc} * P${iloc}t * P${iloc}z / 3. + (1 - OM${iloc} * P${iloc}t ** 2) * (${V}z - ${dot}*OM${iloc} * P${iloc}z)")) tPropB[3][0].append(Template(" -(${V}t - ${dot}*OM${iloc} * P${iloc}t)*OM${iloc} * P${iloc}x * P${iloc}z - OM${iloc} * P${iloc}t * P${iloc}x * (${V}z - ${dot}*OM${iloc} * P${iloc}z) + 2. / 3.*OM${iloc} * P${iloc}t * P${iloc}z * (${V}x - ${dot}*OM${iloc} * P${iloc}x)")) tPropB[3][0].append(Template("-(${V}t - ${dot}*OM${iloc} * P${iloc}t)*OM${iloc} * P${iloc}y * P${iloc}z - OM${iloc} * P${iloc}t * P${iloc}y * (${V}z - ${dot}*OM${iloc} * P${iloc}z) + 2. / 3.*OM${iloc} * P${iloc}t * P${iloc}z * (${V}y - ${dot}*OM${iloc} * P${iloc}y)")) tPropB[3][0].append(Template(" (${V}t - ${dot}*OM${iloc} * P${iloc}t) * (-1 - OM${iloc} * P${iloc}z ** 2) - OM${iloc} * P${iloc}t * P${iloc}z * (${V}z - ${dot}*OM${iloc} * P${iloc}z) / 3.")) tPropB[3][1].append(Template("-OM${iloc} * P${iloc}t * P${iloc}z * (${V}x - ${dot}*OM${iloc} * P${iloc}x) - OM${iloc} * P${iloc}t * P${iloc}x * (${V}z - ${dot}*OM${iloc} * P${iloc}z) + 2. / 3. * (${V}t - ${dot}*OM${iloc} * P${iloc}t)*OM${iloc} * P${iloc}x * P${iloc}z")) tPropB[3][1].append(Template(" -(${V}x - ${dot}*OM${iloc} * P${iloc}x)*OM${iloc} * P${iloc}x * P${iloc}z / 3. + (-1 - OM${iloc} * P${iloc}x ** 2) * (${V}z - ${dot}*OM${iloc} * P${iloc}z)")) tPropB[3][1].append(Template(" -(${V}x - ${dot}*OM${iloc} * P${iloc}x)*OM${iloc} * P${iloc}y * P${iloc}z - OM${iloc} * P${iloc}x * P${iloc}y * (${V}z - ${dot}*OM${iloc} * P${iloc}z) + 2. / 3.*OM${iloc} * P${iloc}x * P${iloc}z * (${V}y - ${dot}*OM${iloc} * P${iloc}y)")) tPropB[3][1].append(Template(" (${V}x - ${dot}*OM${iloc} * P${iloc}x) * (-1 - OM${iloc} * P${iloc}z ** 2) - OM${iloc} * P${iloc}x * P${iloc}z * (${V}z - ${dot}*OM${iloc} * P${iloc}z) / 3.")) tPropB[3][2].append(Template(" -OM${iloc} * P${iloc}t * P${iloc}z * (${V}y - ${dot}*OM${iloc} * P${iloc}y) - OM${iloc} * P${iloc}t * P${iloc}y * (${V}z - ${dot}*OM${iloc} * P${iloc}z) + 2. / 3. * (${V}t - ${dot}*OM${iloc} * P${iloc}t)*OM${iloc} * P${iloc}y * P${iloc}z")) tPropB[3][2].append(Template("-OM${iloc} * P${iloc}x * P${iloc}z * (${V}y - ${dot}*OM${iloc} * P${iloc}y) - OM${iloc} * P${iloc}x * P${iloc}y * (${V}z - ${dot}*OM${iloc} * P${iloc}z) + 2. / 3. * (${V}x - ${dot}*OM${iloc} * P${iloc}x)*OM${iloc} * P${iloc}y * P${iloc}z")) tPropB[3][2].append(Template(" -(${V}y - ${dot}*OM${iloc} * P${iloc}y)*OM${iloc} * P${iloc}y * P${iloc}z / 3. + (-1 - OM${iloc} * P${iloc}y ** 2) * (${V}z - ${dot}*OM${iloc} * P${iloc}z)")) tPropB[3][2].append(Template(" (${V}y - ${dot}*OM${iloc} * P${iloc}y) * (-1 - OM${iloc} * P${iloc}z ** 2) - OM${iloc} * P${iloc}y * P${iloc}z * (${V}z - ${dot}*OM${iloc} * P${iloc}z) / 3.")) tPropB[3][3].append(Template(" -2*OM${iloc} * P${iloc}t * P${iloc}z * (${V}z - ${dot}*OM${iloc} * P${iloc}z) - 2. / 3. * (${V}t - ${dot}*OM${iloc} * P${iloc}t) * (-1 - OM${iloc} * P${iloc}z ** 2)")) tPropB[3][3].append(Template("-2*OM${iloc} * P${iloc}x * P${iloc}z * (${V}z - ${dot}*OM${iloc} * P${iloc}z) - 2. / 3. * (${V}x - ${dot}*OM${iloc} * P${iloc}x) * (-1 - OM${iloc} * P${iloc}z ** 2)")) tPropB[3][3].append(Template("-2*OM${iloc} * P${iloc}y * P${iloc}z * (${V}z - ${dot}*OM${iloc} * P${iloc}z) - 2. / 3. * (${V}y - ${dot}*OM${iloc} * P${iloc}y) * (-1 - OM${iloc} * P${iloc}z ** 2)")) tPropB[3][3].append(Template("4. / 3. * (${V}z - ${dot}*OM${iloc} * P${iloc}z) * (-1 - OM${iloc} * P${iloc}z ** 2)")) # tensor propagator, no contracted indices tPropC=[[[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]]], [[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]]], [[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]]], [[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]]]] tPropC[0][0][0].append(Template("4./3. * (1 - OM${iloc} * P${iloc}t ** 2) ** 2")) tPropC[0][0][0].append(Template("-4./3. * (1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}t * P${iloc}x")) tPropC[0][0][0].append(Template("-4./3. * (1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}t * P${iloc}y")) tPropC[0][0][0].append(Template("-4./3. * (1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}t * P${iloc}z")) tPropC[0][0][1].append(Template("-4./3. * (1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}t * P${iloc}x")) tPropC[0][0][1].append(Template("2*OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}x ** 2 - 2./3. * (1 - OM${iloc} * P${iloc}t ** 2) * (-1 - OM${iloc} * P${iloc}x ** 2)")) tPropC[0][0][1].append(Template("2*OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}x * P${iloc}y + 2./3. * (1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}x * P${iloc}y")) tPropC[0][0][1].append(Template("2*OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}x * P${iloc}z + 2./3. * (1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}x * P${iloc}z")) tPropC[0][0][2].append(Template("-4./3. * (1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}t * P${iloc}y")) tPropC[0][0][2].append(Template("2*OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}x * P${iloc}y + 2./3. * (1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}x * P${iloc}y")) tPropC[0][0][2].append(Template("2*OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}y ** 2 - 2./3. * (1 - OM${iloc} * P${iloc}t ** 2) * (-1 - OM${iloc} * P${iloc}y ** 2)")) tPropC[0][0][2].append(Template("2*OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}y * P${iloc}z + 2./3. * (1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}y * P${iloc}z")) tPropC[0][0][3].append(Template("-4./3. * (1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}t * P${iloc}z")) tPropC[0][0][3].append(Template("2*OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}x * P${iloc}z + 2./3. * (1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}x * P${iloc}z")) tPropC[0][0][3].append(Template("2*OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}y * P${iloc}z + 2./3. * (1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}y * P${iloc}z")) tPropC[0][0][3].append(Template("2*OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}z ** 2 - 2./3. * (1 - OM${iloc} * P${iloc}t ** 2) * (-1 - OM${iloc} * P${iloc}z ** 2)")) tPropC[0][1][0].append(Template("-4./3. * (1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}t * P${iloc}x")) tPropC[0][1][0].append(Template("(1 - OM${iloc} * P${iloc}t ** 2) * (-1 - OM${iloc} * P${iloc}x ** 2) + OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}x ** 2 /3.")) tPropC[0][1][0].append(Template("-(1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}x * P${iloc}y + OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}x * P${iloc}y /3.")) tPropC[0][1][0].append(Template("-(1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}x * P${iloc}z + OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}x * P${iloc}z /3.")) tPropC[0][1][1].append(Template("(1 - OM${iloc} * P${iloc}t ** 2) * (-1 - OM${iloc} * P${iloc}x ** 2) + OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}x ** 2 /3.")) tPropC[0][1][1].append(Template("-4./3.*OM${iloc} * P${iloc}t * P${iloc}x * (-1 - OM${iloc} * P${iloc}x ** 2)")) tPropC[0][1][1].append(Template("OM${iloc}**2 * P${iloc}t * P${iloc}x ** 2 * P${iloc}y /3. - OM${iloc} * P${iloc}t * P${iloc}y * (-1 - OM${iloc} * P${iloc}x ** 2)")) tPropC[0][1][1].append(Template("OM${iloc}**2 * P${iloc}t * P${iloc}x ** 2 * P${iloc}z /3. - OM${iloc} * P${iloc}t * P${iloc}z * (-1 - OM${iloc} * P${iloc}x ** 2)")) tPropC[0][1][2].append(Template("-(1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}x * P${iloc}y + OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}x * P${iloc}y /3.")) tPropC[0][1][2].append(Template("OM${iloc}**2 * P${iloc}t * P${iloc}x ** 2 * P${iloc}y /3. - OM${iloc} * P${iloc}t * P${iloc}y * (-1 - OM${iloc} * P${iloc}x ** 2)")) tPropC[0][1][2].append(Template("2*OM${iloc}**2 * P${iloc}t * P${iloc}y ** 2 * P${iloc}x + 2./3.*OM${iloc} * P${iloc}t * P${iloc}x * (-1 - OM${iloc} * P${iloc}y ** 2)")) tPropC[0][1][2].append(Template("4./3.*OM${iloc}**2 * P${iloc}t * P${iloc}y * P${iloc}x * P${iloc}z")) tPropC[0][1][3].append(Template("-(1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}x * P${iloc}z + OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}x * P${iloc}z /3.")) tPropC[0][1][3].append(Template("OM${iloc}**2 * P${iloc}t * P${iloc}x ** 2 * P${iloc}z /3. - OM${iloc} * P${iloc}t * P${iloc}z * (-1 - OM${iloc} * P${iloc}x ** 2)")) tPropC[0][1][3].append(Template("4./3.*OM${iloc}**2 * P${iloc}t * P${iloc}y * P${iloc}x * P${iloc}z")) tPropC[0][1][3].append(Template("2*OM${iloc}**2 * P${iloc}t * P${iloc}z ** 2 * P${iloc}x + 2./3.*OM${iloc} * P${iloc}t * P${iloc}x * (-1 - OM${iloc} * P${iloc}z ** 2)")) tPropC[0][2][0].append(Template("-4./3. * (1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}t * P${iloc}y")) tPropC[0][2][0].append(Template("-(1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}x * P${iloc}y + OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}x * P${iloc}y /3.")) tPropC[0][2][0].append(Template("(1 - OM${iloc} * P${iloc}t ** 2) * (-1 - OM${iloc} * P${iloc}y ** 2) + OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}y ** 2 /3.")) tPropC[0][2][0].append(Template("-(1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}y * P${iloc}z + OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}y * P${iloc}z /3.")) tPropC[0][2][1].append(Template("-(1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}x * P${iloc}y + OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}x * P${iloc}y /3.")) tPropC[0][2][1].append(Template("2*OM${iloc}**2 * P${iloc}t * P${iloc}x ** 2 * P${iloc}y + 2./3.*OM${iloc} * P${iloc}t * P${iloc}y * (-1 - OM${iloc} * P${iloc}x ** 2)")) tPropC[0][2][1].append(Template("-OM${iloc} * P${iloc}t * P${iloc}x * (-1 - OM${iloc} * P${iloc}y ** 2) + OM${iloc}**2 * P${iloc}t * P${iloc}y ** 2 * P${iloc}x /3.")) tPropC[0][2][1].append(Template("4./3.*OM${iloc}**2 * P${iloc}t * P${iloc}y * P${iloc}x * P${iloc}z")) tPropC[0][2][2].append(Template("(1 - OM${iloc} * P${iloc}t ** 2) * (-1 - OM${iloc} * P${iloc}y ** 2) + OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}y ** 2 /3.")) tPropC[0][2][2].append(Template("-OM${iloc} * P${iloc}t * P${iloc}x * (-1 - OM${iloc} * P${iloc}y ** 2) + OM${iloc}**2 * P${iloc}t * P${iloc}y ** 2 * P${iloc}x /3.")) tPropC[0][2][2].append(Template("-4./3.*OM${iloc} * P${iloc}t * P${iloc}y * (-1 - OM${iloc} * P${iloc}y ** 2)")) tPropC[0][2][2].append(Template("OM${iloc}**2 * P${iloc}t * P${iloc}y ** 2 * P${iloc}z /3. - OM${iloc} * P${iloc}t * P${iloc}z * (-1 - OM${iloc} * P${iloc}y ** 2)")) tPropC[0][2][3].append(Template("-(1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}y * P${iloc}z + OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}y * P${iloc}z /3.")) tPropC[0][2][3].append(Template("4./3.*OM${iloc}**2 * P${iloc}t * P${iloc}y * P${iloc}x * P${iloc}z")) tPropC[0][2][3].append(Template("OM${iloc}**2 * P${iloc}t * P${iloc}y ** 2 * P${iloc}z /3. - OM${iloc} * P${iloc}t * P${iloc}z * (-1 - OM${iloc} * P${iloc}y ** 2)")) tPropC[0][2][3].append(Template("2*OM${iloc}**2 * P${iloc}t * P${iloc}z ** 2 * P${iloc}y + 2./3.*OM${iloc} * P${iloc}t * P${iloc}y * (-1 - OM${iloc} * P${iloc}z ** 2)")) tPropC[0][3][0].append(Template("-4./3. * (1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}t * P${iloc}z")) tPropC[0][3][0].append(Template("-(1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}x * P${iloc}z + OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}x * P${iloc}z /3.")) tPropC[0][3][0].append(Template("-(1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}y * P${iloc}z + OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}y * P${iloc}z /3.")) tPropC[0][3][0].append(Template("(1 - OM${iloc} * P${iloc}t ** 2) * (-1 - OM${iloc} * P${iloc}z ** 2) + OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}z ** 2 /3.")) tPropC[0][3][1].append(Template("-(1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}x * P${iloc}z + OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}x * P${iloc}z /3.")) tPropC[0][3][1].append(Template("2*OM${iloc}**2 * P${iloc}t * P${iloc}x ** 2 * P${iloc}z + 2./3.*OM${iloc} * P${iloc}t * P${iloc}z * (-1 - OM${iloc} * P${iloc}x ** 2)")) tPropC[0][3][1].append(Template("4./3.*OM${iloc}**2 * P${iloc}t * P${iloc}y * P${iloc}x * P${iloc}z")) tPropC[0][3][1].append(Template("-OM${iloc} * P${iloc}t * P${iloc}x * (-1 - OM${iloc} * P${iloc}z ** 2) + OM${iloc}**2 * P${iloc}t * P${iloc}z ** 2 * P${iloc}x /3.")) tPropC[0][3][2].append(Template("-(1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}y * P${iloc}z + OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}y * P${iloc}z /3.")) tPropC[0][3][2].append(Template("4./3.*OM${iloc}**2 * P${iloc}t * P${iloc}y * P${iloc}x * P${iloc}z")) tPropC[0][3][2].append(Template("2*OM${iloc}**2 * P${iloc}t * P${iloc}y ** 2 * P${iloc}z + 2./3.*OM${iloc} * P${iloc}t * P${iloc}z * (-1 - OM${iloc} * P${iloc}y ** 2)")) tPropC[0][3][2].append(Template("-OM${iloc} * P${iloc}t * P${iloc}y * (-1 - OM${iloc} * P${iloc}z ** 2) + OM${iloc}**2 * P${iloc}t * P${iloc}z ** 2 * P${iloc}y /3.")) tPropC[0][3][3].append(Template("(1 - OM${iloc} * P${iloc}t ** 2) * (-1 - OM${iloc} * P${iloc}z ** 2) + OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}z ** 2 /3.")) tPropC[0][3][3].append(Template("-OM${iloc} * P${iloc}t * P${iloc}x * (-1 - OM${iloc} * P${iloc}z ** 2) + OM${iloc}**2 * P${iloc}t * P${iloc}z ** 2 * P${iloc}x /3.")) tPropC[0][3][3].append(Template("-OM${iloc} * P${iloc}t * P${iloc}y * (-1 - OM${iloc} * P${iloc}z ** 2) + OM${iloc}**2 * P${iloc}t * P${iloc}z ** 2 * P${iloc}y /3.")) tPropC[0][3][3].append(Template("-4./3.*OM${iloc} * P${iloc}t * P${iloc}z * (-1 - OM${iloc} * P${iloc}z ** 2)")) tPropC[1][0][0].append(Template(" -4./3. * (1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}t * P${iloc}x")) tPropC[1][0][0].append(Template(" (1 - OM${iloc} * P${iloc}t ** 2) * (-1 - OM${iloc} * P${iloc}x ** 2) + OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}x ** 2 /3.")) tPropC[1][0][0].append(Template(" -(1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}x * P${iloc}y + OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}x * P${iloc}y /3.")) tPropC[1][0][0].append(Template(" -(1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}x * P${iloc}z + OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}x * P${iloc}z /3.")) tPropC[1][0][1].append(Template(" (1 - OM${iloc} * P${iloc}t ** 2) * (-1 - OM${iloc} * P${iloc}x ** 2) + OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}x ** 2 /3.")) tPropC[1][0][1].append(Template(" -4./3.*OM${iloc} * P${iloc}t * P${iloc}x * (-1 - OM${iloc} * P${iloc}x ** 2)")) tPropC[1][0][1].append(Template(" OM${iloc}**2 * P${iloc}t * P${iloc}x ** 2 * P${iloc}y /3. - OM${iloc} * P${iloc}t * P${iloc}y * (-1 - OM${iloc} * P${iloc}x ** 2)")) tPropC[1][0][1].append(Template(" OM${iloc}**2 * P${iloc}t * P${iloc}x ** 2 * P${iloc}z /3. - OM${iloc} * P${iloc}t * P${iloc}z * (-1 - OM${iloc} * P${iloc}x ** 2)")) tPropC[1][0][2].append(Template(" -(1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}x * P${iloc}y + OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}x * P${iloc}y /3.")) tPropC[1][0][2].append(Template(" OM${iloc}**2 * P${iloc}t * P${iloc}x ** 2 * P${iloc}y /3. - OM${iloc} * P${iloc}t * P${iloc}y * (-1 - OM${iloc} * P${iloc}x ** 2)")) tPropC[1][0][2].append(Template(" 2*OM${iloc}**2 * P${iloc}t * P${iloc}y ** 2 * P${iloc}x + 2./3.*OM${iloc} * P${iloc}t * P${iloc}x * (-1 - OM${iloc} * P${iloc}y ** 2)")) tPropC[1][0][2].append(Template(" 4./3.*OM${iloc}**2 * P${iloc}t * P${iloc}y * P${iloc}x * P${iloc}z")) tPropC[1][0][3].append(Template(" -(1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}x * P${iloc}z + OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}x * P${iloc}z /3.")) tPropC[1][0][3].append(Template(" OM${iloc}**2 * P${iloc}t * P${iloc}x ** 2 * P${iloc}z /3. - OM${iloc} * P${iloc}t * P${iloc}z * (-1 - OM${iloc} * P${iloc}x ** 2)")) tPropC[1][0][3].append(Template(" 4./3.*OM${iloc}**2 * P${iloc}t * P${iloc}y * P${iloc}x * P${iloc}z")) tPropC[1][0][3].append(Template(" 2*OM${iloc}**2 * P${iloc}t * P${iloc}z ** 2 * P${iloc}x + 2./3.*OM${iloc} * P${iloc}t * P${iloc}x * (-1 - OM${iloc} * P${iloc}z ** 2)")) tPropC[1][1][0].append(Template(" 2*OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}x ** 2 - 2./3. * (1 - OM${iloc} * P${iloc}t ** 2) * (-1 - OM${iloc} * P${iloc}x ** 2)")) tPropC[1][1][0].append(Template(" -4./3.*OM${iloc} * P${iloc}t * P${iloc}x * (-1 - OM${iloc} * P${iloc}x ** 2)")) tPropC[1][1][0].append(Template(" 2*OM${iloc}**2 * P${iloc}t * P${iloc}x ** 2 * P${iloc}y + 2./3.*OM${iloc} * P${iloc}t * P${iloc}y * (-1 - OM${iloc} * P${iloc}x ** 2)")) tPropC[1][1][0].append(Template(" 2*OM${iloc}**2 * P${iloc}t * P${iloc}x ** 2 * P${iloc}z + 2./3.*OM${iloc} * P${iloc}t * P${iloc}z * (-1 - OM${iloc} * P${iloc}x ** 2)")) tPropC[1][1][1].append(Template(" -4./3.*OM${iloc} * P${iloc}t * P${iloc}x * (-1 - OM${iloc} * P${iloc}x ** 2)")) tPropC[1][1][1].append(Template(" 4./3. * (-1 - OM${iloc} * P${iloc}x ** 2) ** 2")) tPropC[1][1][1].append(Template(" -4./3. * (-1 - OM${iloc} * P${iloc}x ** 2)*OM${iloc} * P${iloc}x * P${iloc}y")) tPropC[1][1][1].append(Template(" -4./3. * (-1 - OM${iloc} * P${iloc}x ** 2)*OM${iloc} * P${iloc}x * P${iloc}z")) tPropC[1][1][2].append(Template(" 2*OM${iloc}**2 * P${iloc}t * P${iloc}x ** 2 * P${iloc}y + 2./3.*OM${iloc} * P${iloc}t * P${iloc}y * (-1 - OM${iloc} * P${iloc}x ** 2)")) tPropC[1][1][2].append(Template(" -4./3. * (-1 - OM${iloc} * P${iloc}x ** 2)*OM${iloc} * P${iloc}x * P${iloc}y")) tPropC[1][1][2].append(Template(" 2*OM${iloc}**2 * P${iloc}x ** 2 * P${iloc}y ** 2 - 2./3. * (-1 - OM${iloc} * P${iloc}x ** 2) * (-1 - OM${iloc} * P${iloc}y ** 2)")) tPropC[1][1][2].append(Template(" 2*OM${iloc}**2 * P${iloc}x ** 2 * P${iloc}y * P${iloc}z + 2./3. * (-1 - OM${iloc} * P${iloc}x ** 2)*OM${iloc} * P${iloc}y * P${iloc}z")) tPropC[1][1][3].append(Template(" 2*OM${iloc}**2 * P${iloc}t * P${iloc}x ** 2 * P${iloc}z + 2./3.*OM${iloc} * P${iloc}t * P${iloc}z * (-1 - OM${iloc} * P${iloc}x ** 2)")) tPropC[1][1][3].append(Template(" -4./3. * (-1 - OM${iloc} * P${iloc}x ** 2)*OM${iloc} * P${iloc}x * P${iloc}z")) tPropC[1][1][3].append(Template(" 2*OM${iloc}**2 * P${iloc}x ** 2 * P${iloc}y * P${iloc}z + 2./3. * (-1 - OM${iloc} * P${iloc}x ** 2)*OM${iloc} * P${iloc}y * P${iloc}z")) tPropC[1][1][3].append(Template(" 2*OM${iloc}**2 * P${iloc}x ** 2 * P${iloc}z ** 2 - 2./3. * (-1 - OM${iloc} * P${iloc}x ** 2) * (-1 - OM${iloc} * P${iloc}z ** 2)")) tPropC[1][2][0].append(Template("2*OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}x * P${iloc}y + 2./3. * (1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}x * P${iloc}y")) tPropC[1][2][0].append(Template("OM${iloc}**2 * P${iloc}t * P${iloc}x ** 2 * P${iloc}y /3. - OM${iloc} * P${iloc}t * P${iloc}y * (-1 - OM${iloc} * P${iloc}x ** 2)")) tPropC[1][2][0].append(Template("-OM${iloc} * P${iloc}t * P${iloc}x * (-1 - OM${iloc} * P${iloc}y ** 2) + OM${iloc}**2 * P${iloc}t * P${iloc}y ** 2 * P${iloc}x /3.")) tPropC[1][2][0].append(Template("4./3.*OM${iloc}**2 * P${iloc}t * P${iloc}y * P${iloc}x * P${iloc}z ")) tPropC[1][2][1].append(Template("OM${iloc}**2 * P${iloc}t * P${iloc}x ** 2 * P${iloc}y /3. - OM${iloc} * P${iloc}t * P${iloc}y * (-1 - OM${iloc} * P${iloc}x ** 2)")) tPropC[1][2][1].append(Template(" -4./3. * (-1 - OM${iloc} * P${iloc}x ** 2)*OM${iloc} * P${iloc}x * P${iloc}y ")) tPropC[1][2][1].append(Template(" (-1 - OM${iloc} * P${iloc}x ** 2) * (-1 - OM${iloc} * P${iloc}y ** 2) + OM${iloc}**2 * P${iloc}x ** 2 * P${iloc}y ** 2 /3. ")) tPropC[1][2][1].append(Template(" -(-1 - OM${iloc} * P${iloc}x ** 2)*OM${iloc} * P${iloc}y * P${iloc}z + OM${iloc}**2 * P${iloc}x ** 2 * P${iloc}y * P${iloc}z /3.")) tPropC[1][2][2].append(Template(" -OM${iloc} * P${iloc}t * P${iloc}x * (-1 - OM${iloc} * P${iloc}y ** 2) + OM${iloc}**2 * P${iloc}t * P${iloc}y ** 2 * P${iloc}x /3.")) tPropC[1][2][2].append(Template(" (-1 - OM${iloc} * P${iloc}x ** 2) * (-1 - OM${iloc} * P${iloc}y ** 2) + OM${iloc}**2 * P${iloc}x ** 2 * P${iloc}y ** 2 /3. ")) tPropC[1][2][2].append(Template(" -4./3.*OM${iloc} * P${iloc}x * P${iloc}y * (-1 - OM${iloc} * P${iloc}y ** 2) ")) tPropC[1][2][2].append(Template(" OM${iloc}**2 * P${iloc}x * P${iloc}y ** 2 * P${iloc}z /3. - OM${iloc} * P${iloc}x * P${iloc}z * (-1 - OM${iloc} * P${iloc}y ** 2)")) tPropC[1][2][3].append(Template(" 4./3.*OM${iloc}**2 * P${iloc}t * P${iloc}y * P${iloc}x * P${iloc}z ")) tPropC[1][2][3].append(Template(" -(-1 - OM${iloc} * P${iloc}x ** 2)*OM${iloc} * P${iloc}y * P${iloc}z + OM${iloc}**2 * P${iloc}x ** 2 * P${iloc}y * P${iloc}z /3.")) tPropC[1][2][3].append(Template(" OM${iloc}**2 * P${iloc}x * P${iloc}y ** 2 * P${iloc}z /3. - OM${iloc} * P${iloc}x * P${iloc}z * (-1 - OM${iloc} * P${iloc}y ** 2)")) tPropC[1][2][3].append(Template(" 2*OM${iloc}**2 * P${iloc}x * P${iloc}z ** 2 * P${iloc}y + 2./3.*OM${iloc} * P${iloc}x * P${iloc}y * (-1 - OM${iloc} * P${iloc}z ** 2)")) tPropC[1][3][0].append(Template("2*OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}x * P${iloc}z + 2./3. * (1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}x * P${iloc}z")) tPropC[1][3][0].append(Template("OM${iloc}**2 * P${iloc}t * P${iloc}x ** 2 * P${iloc}z /3. - OM${iloc} * P${iloc}t * P${iloc}z * (-1 - OM${iloc} * P${iloc}x ** 2)")) tPropC[1][3][0].append(Template("4./3.*OM${iloc}**2 * P${iloc}t * P${iloc}y * P${iloc}x * P${iloc}z ")) tPropC[1][3][0].append(Template("-OM${iloc} * P${iloc}t * P${iloc}x * (-1 - OM${iloc} * P${iloc}z ** 2) + OM${iloc}**2 * P${iloc}t * P${iloc}z ** 2 * P${iloc}x /3.")) tPropC[1][3][1].append(Template("OM${iloc}**2 * P${iloc}t * P${iloc}x ** 2 * P${iloc}z /3. - OM${iloc} * P${iloc}t * P${iloc}z * (-1 - OM${iloc} * P${iloc}x ** 2)")) tPropC[1][3][1].append(Template("-4./3. * (-1 - OM${iloc} * P${iloc}x ** 2)*OM${iloc} * P${iloc}x * P${iloc}z ")) tPropC[1][3][1].append(Template("-(-1 - OM${iloc} * P${iloc}x ** 2)*OM${iloc} * P${iloc}y * P${iloc}z + OM${iloc}**2 * P${iloc}x ** 2 * P${iloc}y * P${iloc}z /3.")) tPropC[1][3][1].append(Template("(-1 - OM${iloc} * P${iloc}x ** 2) * (-1 - OM${iloc} * P${iloc}z ** 2) + OM${iloc}**2 * P${iloc}x ** 2 * P${iloc}z ** 2 /3. ")) tPropC[1][3][2].append(Template("4./3.*OM${iloc}**2 * P${iloc}t * P${iloc}y * P${iloc}x * P${iloc}z ")) tPropC[1][3][2].append(Template("-(-1 - OM${iloc} * P${iloc}x ** 2)*OM${iloc} * P${iloc}y * P${iloc}z + OM${iloc}**2 * P${iloc}x ** 2 * P${iloc}y * P${iloc}z /3.")) tPropC[1][3][2].append(Template("2*OM${iloc}**2 * P${iloc}x * P${iloc}y ** 2 * P${iloc}z + 2./3.*OM${iloc} * P${iloc}x * P${iloc}z * (-1 - OM${iloc} * P${iloc}y ** 2)")) tPropC[1][3][2].append(Template("-OM${iloc} * P${iloc}x * P${iloc}y * (-1 - OM${iloc} * P${iloc}z ** 2) + OM${iloc}**2 * P${iloc}x * P${iloc}z ** 2 * P${iloc}y /3.")) tPropC[1][3][3].append(Template("-OM${iloc} * P${iloc}t * P${iloc}x * (-1 - OM${iloc} * P${iloc}z ** 2) + OM${iloc}**2 * P${iloc}t * P${iloc}z ** 2 * P${iloc}x /3.")) tPropC[1][3][3].append(Template("(-1 - OM${iloc} * P${iloc}x ** 2) * (-1 - OM${iloc} * P${iloc}z ** 2) + OM${iloc}**2 * P${iloc}x ** 2 * P${iloc}z ** 2 /3. ")) tPropC[1][3][3].append(Template("-OM${iloc} * P${iloc}x * P${iloc}y * (-1 - OM${iloc} * P${iloc}z ** 2) + OM${iloc}**2 * P${iloc}x * P${iloc}z ** 2 * P${iloc}y /3.")) tPropC[1][3][3].append(Template("-4./3.*OM${iloc} * P${iloc}x * P${iloc}z * (-1 - OM${iloc} * P${iloc}z ** 2)")) tPropC[2][0][0].append(Template("-4./3. * (1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}t * P${iloc}y ")) tPropC[2][0][0].append(Template("-(1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}x * P${iloc}y + OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}x * P${iloc}y /3. ")) tPropC[2][0][0].append(Template("(1 - OM${iloc} * P${iloc}t ** 2) * (-1 - OM${iloc} * P${iloc}y ** 2) + OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}y ** 2 /3. ")) tPropC[2][0][0].append(Template("-(1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}y * P${iloc}z + OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}y * P${iloc}z /3. ")) tPropC[2][0][1].append(Template("-(1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}x * P${iloc}y + OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}x * P${iloc}y /3. ")) tPropC[2][0][1].append(Template("2*OM${iloc}**2 * P${iloc}t * P${iloc}x ** 2 * P${iloc}y + 2./3.*OM${iloc} * P${iloc}t * P${iloc}y * (-1 - OM${iloc} * P${iloc}x ** 2)")) tPropC[2][0][1].append(Template("-OM${iloc} * P${iloc}t * P${iloc}x * (-1 - OM${iloc} * P${iloc}y ** 2) + OM${iloc}**2 * P${iloc}t * P${iloc}y ** 2 * P${iloc}x /3.")) tPropC[2][0][1].append(Template("4./3.*OM${iloc}**2 * P${iloc}t * P${iloc}y * P${iloc}x * P${iloc}z ")) tPropC[2][0][2].append(Template("(1 - OM${iloc} * P${iloc}t ** 2) * (-1 - OM${iloc} * P${iloc}y ** 2) + OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}y ** 2 /3. ")) tPropC[2][0][2].append(Template("-OM${iloc} * P${iloc}t * P${iloc}x * (-1 - OM${iloc} * P${iloc}y ** 2) + OM${iloc}**2 * P${iloc}t * P${iloc}y ** 2 * P${iloc}x /3.")) tPropC[2][0][2].append(Template("-4./3.*OM${iloc} * P${iloc}t * P${iloc}y * (-1 - OM${iloc} * P${iloc}y ** 2) ")) tPropC[2][0][2].append(Template("OM${iloc}**2 * P${iloc}t * P${iloc}y ** 2 * P${iloc}z /3. - OM${iloc} * P${iloc}t * P${iloc}z * (-1 - OM${iloc} * P${iloc}y ** 2)")) tPropC[2][0][3].append(Template("-(1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}y * P${iloc}z + OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}y * P${iloc}z /3. ")) tPropC[2][0][3].append(Template("4./3.*OM${iloc}**2 * P${iloc}t * P${iloc}y * P${iloc}x * P${iloc}z ")) tPropC[2][0][3].append(Template("OM${iloc}**2 * P${iloc}t * P${iloc}y ** 2 * P${iloc}z /3. - OM${iloc} * P${iloc}t * P${iloc}z * (-1 - OM${iloc} * P${iloc}y ** 2)")) tPropC[2][0][3].append(Template("2*OM${iloc}**2 * P${iloc}t * P${iloc}z ** 2 * P${iloc}y + 2./3.*OM${iloc} * P${iloc}t * P${iloc}y * (-1 - OM${iloc} * P${iloc}z ** 2)")) tPropC[2][1][0].append(Template("2*OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}x * P${iloc}y + 2./3. * (1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}x * P${iloc}y")) tPropC[2][1][0].append(Template("OM${iloc}**2 * P${iloc}t * P${iloc}x ** 2 * P${iloc}y /3. - OM${iloc} * P${iloc}t * P${iloc}y * (-1 - OM${iloc} * P${iloc}x ** 2)")) tPropC[2][1][0].append(Template("-OM${iloc} * P${iloc}t * P${iloc}x * (-1 - OM${iloc} * P${iloc}y ** 2) + OM${iloc}**2 * P${iloc}t * P${iloc}y ** 2 * P${iloc}x /3.")) tPropC[2][1][0].append(Template("4./3.*OM${iloc}**2 * P${iloc}t * P${iloc}y * P${iloc}x * P${iloc}z ")) tPropC[2][1][1].append(Template("OM${iloc}**2 * P${iloc}t * P${iloc}x ** 2 * P${iloc}y /3. - OM${iloc} * P${iloc}t * P${iloc}y * (-1 - OM${iloc} * P${iloc}x ** 2)")) tPropC[2][1][1].append(Template("-4./3. * (-1 - OM${iloc} * P${iloc}x ** 2)*OM${iloc} * P${iloc}x * P${iloc}y ")) tPropC[2][1][1].append(Template("(-1 - OM${iloc} * P${iloc}x ** 2) * (-1 - OM${iloc} * P${iloc}y ** 2) + OM${iloc}**2 * P${iloc}x ** 2 * P${iloc}y ** 2 /3. ")) tPropC[2][1][1].append(Template("-(-1 - OM${iloc} * P${iloc}x ** 2)*OM${iloc} * P${iloc}y * P${iloc}z + OM${iloc}**2 * P${iloc}x ** 2 * P${iloc}y * P${iloc}z /3.")) tPropC[2][1][2].append(Template("-OM${iloc} * P${iloc}t * P${iloc}x * (-1 - OM${iloc} * P${iloc}y ** 2) + OM${iloc}**2 * P${iloc}t * P${iloc}y ** 2 * P${iloc}x /3.")) tPropC[2][1][2].append(Template("(-1 - OM${iloc} * P${iloc}x ** 2) * (-1 - OM${iloc} * P${iloc}y ** 2) + OM${iloc}**2 * P${iloc}x ** 2 * P${iloc}y ** 2 /3. ")) tPropC[2][1][2].append(Template("-4./3.*OM${iloc} * P${iloc}x * P${iloc}y * (-1 - OM${iloc} * P${iloc}y ** 2) ")) tPropC[2][1][2].append(Template("OM${iloc}**2 * P${iloc}x * P${iloc}y ** 2 * P${iloc}z /3. - OM${iloc} * P${iloc}x * P${iloc}z * (-1 - OM${iloc} * P${iloc}y ** 2)")) tPropC[2][1][3].append(Template("4./3.*OM${iloc}**2 * P${iloc}t * P${iloc}y * P${iloc}x * P${iloc}z ")) tPropC[2][1][3].append(Template("-(-1 - OM${iloc} * P${iloc}x ** 2)*OM${iloc} * P${iloc}y * P${iloc}z + OM${iloc}**2 * P${iloc}x ** 2 * P${iloc}y * P${iloc}z /3.")) tPropC[2][1][3].append(Template("OM${iloc}**2 * P${iloc}x * P${iloc}y ** 2 * P${iloc}z /3. - OM${iloc} * P${iloc}x * P${iloc}z * (-1 - OM${iloc} * P${iloc}y ** 2)")) tPropC[2][1][3].append(Template("2*OM${iloc}**2 * P${iloc}x * P${iloc}z ** 2 * P${iloc}y + 2./3.*OM${iloc} * P${iloc}x * P${iloc}y * (-1 - OM${iloc} * P${iloc}z ** 2)")) tPropC[2][2][0].append(Template("2*OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}y ** 2 - 2./3. * (1 - OM${iloc} * P${iloc}t ** 2) * (-1 - OM${iloc} * P${iloc}y ** 2) ")) tPropC[2][2][0].append(Template("2*OM${iloc}**2 * P${iloc}t * P${iloc}y ** 2 * P${iloc}x + 2./3.*OM${iloc} * P${iloc}t * P${iloc}x * (-1 - OM${iloc} * P${iloc}y ** 2) ")) tPropC[2][2][0].append(Template("-4./3.*OM${iloc} * P${iloc}t * P${iloc}y * (-1 - OM${iloc} * P${iloc}y ** 2) ")) tPropC[2][2][0].append(Template("2*OM${iloc}**2 * P${iloc}t * P${iloc}y ** 2 * P${iloc}z + 2./3.*OM${iloc} * P${iloc}t * P${iloc}z * (-1 - OM${iloc} * P${iloc}y ** 2) ")) tPropC[2][2][1].append(Template("2*OM${iloc}**2 * P${iloc}t * P${iloc}y ** 2 * P${iloc}x + 2./3.*OM${iloc} * P${iloc}t * P${iloc}x * (-1 - OM${iloc} * P${iloc}y ** 2) ")) tPropC[2][2][1].append(Template("2*OM${iloc}**2 * P${iloc}x ** 2 * P${iloc}y ** 2 - 2./3. * (-1 - OM${iloc} * P${iloc}x ** 2) * (-1 - OM${iloc} * P${iloc}y ** 2) ")) tPropC[2][2][1].append(Template("-4./3.*OM${iloc} * P${iloc}x * P${iloc}y * (-1 - OM${iloc} * P${iloc}y ** 2) ")) tPropC[2][2][1].append(Template("2*OM${iloc}**2 * P${iloc}x * P${iloc}y ** 2 * P${iloc}z + 2./3.*OM${iloc} * P${iloc}x * P${iloc}z * (-1 - OM${iloc} * P${iloc}y ** 2) ")) tPropC[2][2][2].append(Template("-4./3.*OM${iloc} * P${iloc}t * P${iloc}y * (-1 - OM${iloc} * P${iloc}y ** 2) ")) tPropC[2][2][2].append(Template("-4./3.*OM${iloc} * P${iloc}x * P${iloc}y * (-1 - OM${iloc} * P${iloc}y ** 2) ")) tPropC[2][2][2].append(Template("4./3. * (-1 - OM${iloc} * P${iloc}y ** 2) ** 2 ")) tPropC[2][2][2].append(Template("-4./3. * (-1 - OM${iloc} * P${iloc}y ** 2)*OM${iloc} * P${iloc}y * P${iloc}z ")) tPropC[2][2][3].append(Template("2*OM${iloc}**2 * P${iloc}t * P${iloc}y ** 2 * P${iloc}z + 2./3.*OM${iloc} * P${iloc}t * P${iloc}z * (-1 - OM${iloc} * P${iloc}y ** 2) ")) tPropC[2][2][3].append(Template("2*OM${iloc}**2 * P${iloc}x * P${iloc}y ** 2 * P${iloc}z + 2./3.*OM${iloc} * P${iloc}x * P${iloc}z * (-1 - OM${iloc} * P${iloc}y ** 2) ")) tPropC[2][2][3].append(Template("-4./3. * (-1 - OM${iloc} * P${iloc}y ** 2)*OM${iloc} * P${iloc}y * P${iloc}z ")) tPropC[2][2][3].append(Template("2*OM${iloc}**2 * P${iloc}y ** 2 * P${iloc}z ** 2 - 2./3. * (-1 - OM${iloc} * P${iloc}y ** 2) * (-1 - OM${iloc} * P${iloc}z ** 2)")) tPropC[2][3][0].append(Template(" 2*OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}y * P${iloc}z + 2./3. * (1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}y * P${iloc}z")) tPropC[2][3][0].append(Template(" 4./3.*OM${iloc}**2 * P${iloc}t * P${iloc}y * P${iloc}x * P${iloc}z ")) tPropC[2][3][0].append(Template(" OM${iloc}**2 * P${iloc}t * P${iloc}y ** 2 * P${iloc}z /3. - OM${iloc} * P${iloc}t * P${iloc}z * (-1 - OM${iloc} * P${iloc}y ** 2)")) tPropC[2][3][0].append(Template(" -OM${iloc} * P${iloc}t * P${iloc}y * (-1 - OM${iloc} * P${iloc}z ** 2) + OM${iloc}**2 * P${iloc}t * P${iloc}z ** 2 * P${iloc}y /3.")) tPropC[2][3][1].append(Template(" 4./3.*OM${iloc}**2 * P${iloc}t * P${iloc}y * P${iloc}x * P${iloc}z ")) tPropC[2][3][1].append(Template(" 2*OM${iloc}**2 * P${iloc}x ** 2 * P${iloc}y * P${iloc}z + 2./3. * (-1 - OM${iloc} * P${iloc}x ** 2)*OM${iloc} * P${iloc}y * P${iloc}z")) tPropC[2][3][1].append(Template(" OM${iloc}**2 * P${iloc}x * P${iloc}y ** 2 * P${iloc}z /3. - OM${iloc} * P${iloc}x * P${iloc}z * (-1 - OM${iloc} * P${iloc}y ** 2)")) tPropC[2][3][1].append(Template(" -OM${iloc} * P${iloc}x * P${iloc}y * (-1 - OM${iloc} * P${iloc}z ** 2) + OM${iloc}**2 * P${iloc}x * P${iloc}z ** 2 * P${iloc}y /3.")) tPropC[2][3][2].append(Template(" OM${iloc}**2 * P${iloc}t * P${iloc}y ** 2 * P${iloc}z /3. - OM${iloc} * P${iloc}t * P${iloc}z * (-1 - OM${iloc} * P${iloc}y ** 2)")) tPropC[2][3][2].append(Template(" OM${iloc}**2 * P${iloc}x * P${iloc}y ** 2 * P${iloc}z /3. - OM${iloc} * P${iloc}x * P${iloc}z * (-1 - OM${iloc} * P${iloc}y ** 2)")) tPropC[2][3][2].append(Template(" -4./3. * (-1 - OM${iloc} * P${iloc}y ** 2)*OM${iloc} * P${iloc}y * P${iloc}z ")) tPropC[2][3][2].append(Template(" (-1 - OM${iloc} * P${iloc}y ** 2) * (-1 - OM${iloc} * P${iloc}z ** 2) + OM${iloc}**2 * P${iloc}y ** 2 * P${iloc}z ** 2 /3. ")) tPropC[2][3][3].append(Template(" -OM${iloc} * P${iloc}t * P${iloc}y * (-1 - OM${iloc} * P${iloc}z ** 2) + OM${iloc}**2 * P${iloc}t * P${iloc}z ** 2 * P${iloc}y /3.")) tPropC[2][3][3].append(Template(" -OM${iloc} * P${iloc}x * P${iloc}y * (-1 - OM${iloc} * P${iloc}z ** 2) + OM${iloc}**2 * P${iloc}x * P${iloc}z ** 2 * P${iloc}y /3.")) tPropC[2][3][3].append(Template(" (-1 - OM${iloc} * P${iloc}y ** 2) * (-1 - OM${iloc} * P${iloc}z ** 2) + OM${iloc}**2 * P${iloc}y ** 2 * P${iloc}z ** 2 /3. ")) tPropC[2][3][3].append(Template(" -4./3.*OM${iloc} * P${iloc}y * P${iloc}z * (-1 - OM${iloc} * P${iloc}z ** 2) ")) tPropC[3][0][0].append(Template(" -4./3. * (1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}t * P${iloc}z ")) tPropC[3][0][0].append(Template(" -(1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}x * P${iloc}z + OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}x * P${iloc}z /3. ")) tPropC[3][0][0].append(Template(" -(1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}y * P${iloc}z + OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}y * P${iloc}z /3. ")) tPropC[3][0][0].append(Template(" (1 - OM${iloc} * P${iloc}t ** 2) * (-1 - OM${iloc} * P${iloc}z ** 2) + OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}z ** 2 /3. ")) tPropC[3][0][1].append(Template(" -(1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}x * P${iloc}z + OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}x * P${iloc}z /3. ")) tPropC[3][0][1].append(Template(" 2*OM${iloc}**2 * P${iloc}t * P${iloc}x ** 2 * P${iloc}z + 2./3.*OM${iloc} * P${iloc}t * P${iloc}z * (-1 - OM${iloc} * P${iloc}x ** 2)")) tPropC[3][0][1].append(Template(" 4./3.*OM${iloc}**2 * P${iloc}t * P${iloc}y * P${iloc}x * P${iloc}z ")) tPropC[3][0][1].append(Template(" -OM${iloc} * P${iloc}t * P${iloc}x * (-1 - OM${iloc} * P${iloc}z ** 2) + OM${iloc}**2 * P${iloc}t * P${iloc}z ** 2 * P${iloc}x /3.")) tPropC[3][0][2].append(Template(" -(1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}y * P${iloc}z + OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}y * P${iloc}z /3. ")) tPropC[3][0][2].append(Template(" 4./3.*OM${iloc}**2 * P${iloc}t * P${iloc}y * P${iloc}x * P${iloc}z ")) tPropC[3][0][2].append(Template(" 2*OM${iloc}**2 * P${iloc}t * P${iloc}y ** 2 * P${iloc}z + 2./3.*OM${iloc} * P${iloc}t * P${iloc}z * (-1 - OM${iloc} * P${iloc}y ** 2)")) tPropC[3][0][2].append(Template(" -OM${iloc} * P${iloc}t * P${iloc}y * (-1 - OM${iloc} * P${iloc}z ** 2) + OM${iloc}**2 * P${iloc}t * P${iloc}z ** 2 * P${iloc}y /3.")) tPropC[3][0][3].append(Template(" (1 - OM${iloc} * P${iloc}t ** 2) * (-1 - OM${iloc} * P${iloc}z ** 2) + OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}z ** 2 /3. ")) tPropC[3][0][3].append(Template(" -OM${iloc} * P${iloc}t * P${iloc}x * (-1 - OM${iloc} * P${iloc}z ** 2) + OM${iloc}**2 * P${iloc}t * P${iloc}z ** 2 * P${iloc}x /3.")) tPropC[3][0][3].append(Template(" -OM${iloc} * P${iloc}t * P${iloc}y * (-1 - OM${iloc} * P${iloc}z ** 2) + OM${iloc}**2 * P${iloc}t * P${iloc}z ** 2 * P${iloc}y /3.")) tPropC[3][0][3].append(Template(" -4./3.*OM${iloc} * P${iloc}t * P${iloc}z * (-1 - OM${iloc} * P${iloc}z ** 2)")) tPropC[3][1][0].append(Template(" 2*OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}x * P${iloc}z + 2./3. * (1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}x * P${iloc}z")) tPropC[3][1][0].append(Template(" OM${iloc}**2 * P${iloc}t * P${iloc}x ** 2 * P${iloc}z /3. - OM${iloc} * P${iloc}t * P${iloc}z * (-1 - OM${iloc} * P${iloc}x ** 2)")) tPropC[3][1][0].append(Template(" 4./3.*OM${iloc}**2 * P${iloc}t * P${iloc}y * P${iloc}x * P${iloc}z ")) tPropC[3][1][0].append(Template(" -OM${iloc} * P${iloc}t * P${iloc}x * (-1 - OM${iloc} * P${iloc}z ** 2) + OM${iloc}**2 * P${iloc}t * P${iloc}z ** 2 * P${iloc}x /3.")) tPropC[3][1][1].append(Template(" OM${iloc}**2 * P${iloc}t * P${iloc}x ** 2 * P${iloc}z /3. - OM${iloc} * P${iloc}t * P${iloc}z * (-1 - OM${iloc} * P${iloc}x ** 2)")) tPropC[3][1][1].append(Template(" -4./3. * (-1 - OM${iloc} * P${iloc}x ** 2)*OM${iloc} * P${iloc}x * P${iloc}z ")) tPropC[3][1][1].append(Template(" -(-1 - OM${iloc} * P${iloc}x ** 2)*OM${iloc} * P${iloc}y * P${iloc}z + OM${iloc}**2 * P${iloc}x ** 2 * P${iloc}y * P${iloc}z /3.")) tPropC[3][1][1].append(Template(" (-1 - OM${iloc} * P${iloc}x ** 2) * (-1 - OM${iloc} * P${iloc}z ** 2) + OM${iloc}**2 * P${iloc}x ** 2 * P${iloc}z ** 2 /3. ")) tPropC[3][1][2].append(Template(" 4./3.*OM${iloc}**2 * P${iloc}t * P${iloc}y * P${iloc}x * P${iloc}z ")) tPropC[3][1][2].append(Template(" -(-1 - OM${iloc} * P${iloc}x ** 2)*OM${iloc} * P${iloc}y * P${iloc}z + OM${iloc}**2 * P${iloc}x ** 2 * P${iloc}y * P${iloc}z /3.")) tPropC[3][1][2].append(Template(" 2*OM${iloc}**2 * P${iloc}x * P${iloc}y ** 2 * P${iloc}z + 2./3.*OM${iloc} * P${iloc}x * P${iloc}z * (-1 - OM${iloc} * P${iloc}y ** 2)")) tPropC[3][1][2].append(Template(" -OM${iloc} * P${iloc}x * P${iloc}y * (-1 - OM${iloc} * P${iloc}z ** 2) + OM${iloc}**2 * P${iloc}x * P${iloc}z ** 2 * P${iloc}y /3.")) tPropC[3][1][3].append(Template(" -OM${iloc} * P${iloc}t * P${iloc}x * (-1 - OM${iloc} * P${iloc}z ** 2) + OM${iloc}**2 * P${iloc}t * P${iloc}z ** 2 * P${iloc}x /3.")) tPropC[3][1][3].append(Template(" (-1 - OM${iloc} * P${iloc}x ** 2) * (-1 - OM${iloc} * P${iloc}z ** 2) + OM${iloc}**2 * P${iloc}x ** 2 * P${iloc}z ** 2 /3. ")) tPropC[3][1][3].append(Template(" -OM${iloc} * P${iloc}x * P${iloc}y * (-1 - OM${iloc} * P${iloc}z ** 2) + OM${iloc}**2 * P${iloc}x * P${iloc}z ** 2 * P${iloc}y /3.")) tPropC[3][1][3].append(Template(" -4./3.*OM${iloc} * P${iloc}x * P${iloc}z * (-1 - OM${iloc} * P${iloc}z ** 2) ")) tPropC[3][2][0].append(Template(" 2*OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}y * P${iloc}z + 2./3. * (1 - OM${iloc} * P${iloc}t ** 2)*OM${iloc} * P${iloc}y * P${iloc}z")) tPropC[3][2][0].append(Template(" 4./3.*OM${iloc}**2 * P${iloc}t * P${iloc}y * P${iloc}x * P${iloc}z")) tPropC[3][2][0].append(Template(" OM${iloc}**2 * P${iloc}t * P${iloc}y ** 2 * P${iloc}z /3. - OM${iloc} * P${iloc}t * P${iloc}z * (-1 - OM${iloc} * P${iloc}y ** 2)")) tPropC[3][2][0].append(Template(" -OM${iloc} * P${iloc}t * P${iloc}y * (-1 - OM${iloc} * P${iloc}z ** 2) + OM${iloc}**2 * P${iloc}t * P${iloc}z ** 2 * P${iloc}y /3.")) tPropC[3][2][1].append(Template(" 4./3.*OM${iloc}**2 * P${iloc}t * P${iloc}y * P${iloc}x * P${iloc}z")) tPropC[3][2][1].append(Template(" 2*OM${iloc}**2 * P${iloc}x ** 2 * P${iloc}y * P${iloc}z + 2./3. * (-1 - OM${iloc} * P${iloc}x ** 2)*OM${iloc} * P${iloc}y * P${iloc}z")) tPropC[3][2][1].append(Template(" OM${iloc}**2 * P${iloc}x * P${iloc}y ** 2 * P${iloc}z /3. - OM${iloc} * P${iloc}x * P${iloc}z * (-1 - OM${iloc} * P${iloc}y ** 2)")) tPropC[3][2][1].append(Template(" -OM${iloc} * P${iloc}x * P${iloc}y * (-1 - OM${iloc} * P${iloc}z ** 2) + OM${iloc}**2 * P${iloc}x * P${iloc}z ** 2 * P${iloc}y /3.")) tPropC[3][2][2].append(Template(" OM${iloc}**2 * P${iloc}t * P${iloc}y ** 2 * P${iloc}z /3. - OM${iloc} * P${iloc}t * P${iloc}z * (-1 - OM${iloc} * P${iloc}y ** 2)")) tPropC[3][2][2].append(Template(" OM${iloc}**2 * P${iloc}x * P${iloc}y ** 2 * P${iloc}z /3. - OM${iloc} * P${iloc}x * P${iloc}z * (-1 - OM${iloc} * P${iloc}y ** 2)")) tPropC[3][2][2].append(Template(" -4./3. * (-1 - OM${iloc} * P${iloc}y ** 2)*OM${iloc} * P${iloc}y * P${iloc}z")) tPropC[3][2][2].append(Template(" (-1 - OM${iloc} * P${iloc}y ** 2) * (-1 - OM${iloc} * P${iloc}z ** 2) + OM${iloc}**2 * P${iloc}y ** 2 * P${iloc}z ** 2 /3. ")) tPropC[3][2][3].append(Template(" -OM${iloc} * P${iloc}t * P${iloc}y * (-1 - OM${iloc} * P${iloc}z ** 2) + OM${iloc}**2 * P${iloc}t * P${iloc}z ** 2 * P${iloc}y /3.")) tPropC[3][2][3].append(Template(" -OM${iloc} * P${iloc}x * P${iloc}y * (-1 - OM${iloc} * P${iloc}z ** 2) + OM${iloc}**2 * P${iloc}x * P${iloc}z ** 2 * P${iloc}y /3.")) tPropC[3][2][3].append(Template(" (-1 - OM${iloc} * P${iloc}y ** 2) * (-1 - OM${iloc} * P${iloc}z ** 2) + OM${iloc}**2 * P${iloc}y ** 2 * P${iloc}z ** 2 /3. ")) tPropC[3][2][3].append(Template(" -4./3.*OM${iloc} * P${iloc}y * P${iloc}z * (-1 - OM${iloc} * P${iloc}z ** 2)")) tPropC[3][3][0].append(Template(" 2*OM${iloc}**2 * P${iloc}t ** 2 * P${iloc}z ** 2 - 2./3. * (1 - OM${iloc} * P${iloc}t ** 2) * (-1 - OM${iloc} * P${iloc}z ** 2)")) tPropC[3][3][0].append(Template(" 2*OM${iloc}**2 * P${iloc}t * P${iloc}z ** 2 * P${iloc}x + 2./3.*OM${iloc} * P${iloc}t * P${iloc}x * (-1 - OM${iloc} * P${iloc}z ** 2)")) tPropC[3][3][0].append(Template(" 2*OM${iloc}**2 * P${iloc}t * P${iloc}z ** 2 * P${iloc}y + 2./3.*OM${iloc} * P${iloc}t * P${iloc}y * (-1 - OM${iloc} * P${iloc}z ** 2)")) tPropC[3][3][0].append(Template(" -4./3.*OM${iloc} * P${iloc}t * P${iloc}z * (-1 - OM${iloc} * P${iloc}z ** 2) ")) tPropC[3][3][1].append(Template(" 2*OM${iloc}**2 * P${iloc}t * P${iloc}z ** 2 * P${iloc}x + 2./3.*OM${iloc} * P${iloc}t * P${iloc}x * (-1 - OM${iloc} * P${iloc}z ** 2)")) tPropC[3][3][1].append(Template(" 2*OM${iloc}**2 * P${iloc}x ** 2 * P${iloc}z ** 2 - 2./3. * (-1 - OM${iloc} * P${iloc}x ** 2) * (-1 - OM${iloc} * P${iloc}z ** 2)")) tPropC[3][3][1].append(Template(" 2*OM${iloc}**2 * P${iloc}x * P${iloc}z ** 2 * P${iloc}y + 2./3.*OM${iloc} * P${iloc}x * P${iloc}y * (-1 - OM${iloc} * P${iloc}z ** 2)")) tPropC[3][3][1].append(Template(" -4./3.*OM${iloc} * P${iloc}x * P${iloc}z * (-1 - OM${iloc} * P${iloc}z ** 2) ")) tPropC[3][3][2].append(Template(" 2*OM${iloc}**2 * P${iloc}t * P${iloc}z ** 2 * P${iloc}y + 2./3.*OM${iloc} * P${iloc}t * P${iloc}y * (-1 - OM${iloc} * P${iloc}z ** 2)")) tPropC[3][3][2].append(Template(" 2*OM${iloc}**2 * P${iloc}x * P${iloc}z ** 2 * P${iloc}y + 2./3.*OM${iloc} * P${iloc}x * P${iloc}y * (-1 - OM${iloc} * P${iloc}z ** 2)")) tPropC[3][3][2].append(Template(" 2*OM${iloc}**2 * P${iloc}y ** 2 * P${iloc}z ** 2 - 2./3. * (-1 - OM${iloc} * P${iloc}y ** 2) * (-1 - OM${iloc} * P${iloc}z ** 2)")) tPropC[3][3][2].append(Template(" -4./3.*OM${iloc} * P${iloc}y * P${iloc}z * (-1 - OM${iloc} * P${iloc}z ** 2)")) tPropC[3][3][3].append(Template(" -4./3.*OM${iloc} * P${iloc}t * P${iloc}z * (-1 - OM${iloc} * P${iloc}z ** 2)")) tPropC[3][3][3].append(Template(" -4./3.*OM${iloc} * P${iloc}x * P${iloc}z * (-1 - OM${iloc} * P${iloc}z ** 2)")) tPropC[3][3][3].append(Template(" -4./3.*OM${iloc} * P${iloc}y * P${iloc}z * (-1 - OM${iloc} * P${iloc}z ** 2)")) tPropC[3][3][3].append(Template(" 4./3. * (-1 - OM${iloc} * P${iloc}z ** 2) ** 2")) imap=["t","x","y","z"] RSDotProduct = Template("${s}ts${si}*${v}t-${s}xs${si}*${v}x-${s}ys${si}*${v}y-${s}zs${si}*${v}z") vTemplateT="""\ {header} {{ if({type}W{iloc}.id()=={id}) {{ return {normal}; }} else {{ return {transpose}; }} }}; """ vTemplate4="""\ {header} {{ {swap} if(id{iloc1}=={id1}) {{ if(id{iloc2}=={id2}) {{ return {res1}; }} else {{ return {res2}; }} }} else {{ if(id{iloc2}=={id2}) {{ return {res3}; }} else {{ return {res4}; }} }} }}; """ vecTemplate="""\ Energy2 p2 = P{iloc}.m2(); LorentzPolarizationVector vtemp = {res}; Complex fact = -Complex(0.,1.)*({cf})*propagator(iopt,p2,out,mass,width); if(mass.real() < ZERO) mass = (iopt==5) ? ZERO : out->mass(); complex mass2 = sqr(mass); if(mass.real()==ZERO) {{ vtemp =fact*vtemp; }} else {{ complex dot = P{iloc}*vtemp; vtemp = fact*(vtemp-dot/mass2*P{iloc}); }} return VectorWaveFunction(P{iloc},out,vtemp.x(),vtemp.y(),vtemp.z(),vtemp.t()); """ sTemplate="""\ Energy2 p2 = P{iloc}.m2(); if(mass.real() < ZERO) mass = (iopt==5) ? ZERO : out->mass(); Complex fact = Complex(0.,1.)*({cf})*propagator(iopt,p2,out,mass,width); Lorentz{offTypeA} newSpin = fact*({res}); return {offTypeB}(P{iloc},out,newSpin); """ RSTemplate="""\ if(mass.real() < ZERO) mass = (iopt==5) ? ZERO : out->mass(); Energy2 p2 = P{iloc}.m2(); Complex fact = Complex(0.,-1.)*({cf})*propagator(iopt,p2,out,mass,width); complex Omass = mass.real()==ZERO ? InvEnergy(ZERO) : 1./mass; Lorentz{offTypeA} newSpin = fact*({res}); return {offTypeB}(P{iloc},out,newSpin); """ scaTemplate="""\ if(mass.real() < ZERO) mass = (iopt==5) ? ZERO : out->mass(); Energy2 p2 = P{iloc}.m2(); Complex fact = Complex(0.,1.)*({cf})*propagator(iopt,p2,out,mass,width); complex output = fact*({res}); return ScalarWaveFunction(P{iloc},out,output); """ tenTemplate="""\ if(mass.real() < ZERO) mass = (iopt==5) ? ZERO : out->mass(); InvEnergy2 OM{iloc} = mass.real()==ZERO ? InvEnergy2(ZERO) : 1./sqr(mass.real()); Energy2 M{iloc}2 = sqr(mass.real()); Energy2 p2 = P{iloc}.m2(); Complex fact = Complex(0.,1.)*({cf})*propagator(iopt,p2,out,mass,width); LorentzTensor output = fact*({res}); return TensorWaveFunction(P{iloc},out,output); """ # various strings for matrixes I4 = "Matrix([[1.,0,0,0],[0,1.,0,0],[0,0,1.,0],[0,0,0,1.]])" G5 = "Matrix([[-1.,0,0,0],[0,-1.,0,0],[0,0,1.,0],[0,0,0,1.]])" PM = "Matrix([[1.,0,0,0],[0,1.,0,0],[0,0,0,0],[0,0,0,0]])" PP = "Matrix([[0,0,0,0],[0,0,0,0],[0,0,1.,0],[0,0,0,1.]])" vslash = Template("Matrix([[0,0,${v}TMZ,-${v}XMY],[0,0,-${v}XPY,${v}TPZ],[${v}TPZ,${v}XMY,0,0],[${v}XPY,${v}TMZ,0,0]])") vslashS = Template("${v}TPZ=Symbol(\"${v}TPZ\")\n${v}TMZ=Symbol(\"${v}TMZ\")\n${v}XPY=Symbol(\"${v}XPY\")\n${v}XMY=Symbol(\"${v}XMY\")\n") momCom = Template("${v}t = Symbol(\"${v}t\")\n${v}x = Symbol(\"${v}x\")\n${v}y = Symbol(\"${v}y\")\n${v}z = Symbol(\"${v}z\")\n") vslashD = Template("complex<${var}> ${v}TPZ = ${v}.t()+${v}.z();\n complex<${var}> ${v}TMZ = ${v}.t()-${v}.z();\n complex<${var}> ${v}XPY = ${v}.x()+Complex(0.,1.)*${v}.y();\n complex<${var}> ${v}XMY = ${v}.x()-Complex(0.,1.)*${v}.y();") vslashM = Template("Matrix([[$m,0,${v}TMZ,-${v}XMY],[0,$m,-${v}XPY,${v}TPZ],[${v}TPZ,${v}XMY,$m,0],[${v}XPY,${v}TMZ,0,$m]])") vslashM2 = Template("Matrix([[$m,0,-${v}TMZ,${v}XMY],[0,$m,${v}XPY,-${v}TPZ],[-${v}TPZ,-${v}XMY,$m,0],[-${v}XPY,-${v}TMZ,0,$m]])") vslashMS = Template("${v}TPZ=Symbol(\"${v}TPZ\")\n${v}TMZ=Symbol(\"${v}TMZ\")\n${v}XPY=Symbol(\"${v}XPY\")\n${v}XMY=Symbol(\"${v}XMY\")\n${m}=Symbol(\"${m}\")\nO${m}=Symbol(\"O${m}\")\n") rslash = Template("Matrix([[$m,0,${v}TMZ,-${v}XMY],[0,$m,-${v}XPY,${v}TPZ],[${v}TPZ,${v}XMY,$m,0],[${v}XPY,${v}TMZ,0,$m]])*( ($${eta}-2./3.*O${m}**2*${v}$${A}*${v}$${B})*Matrix([[1.,0,0,0],[0,1.,0,0],[0,0,1.,0],[0,0,0,1.]]) -1./3.*$${DA}*$${DB} -1./3.*O${m}*(${v}$${B}*$${DA}-${v}$${A}*$${DB}))") rslashB = Template("Matrix([[$m,0,${v}TMZ,-${v}XMY],[0,$m,-${v}XPY,${v}TPZ],[${v}TPZ,${v}XMY,$m,0],[${v}XPY,${v}TMZ,0,$m]])*( (${v2}$${A}-2./3.*O${m}**2*${v}$${A}*${dot})*Matrix([[1.,0,0,0],[0,1.,0,0],[0,0,1.,0],[0,0,0,1.]]) -1./3.*$${DA}*${DB} -1./3.*O${m}*(${dot}*$${DA}-${v}$${A}*${DB}))") rslash2 = Template("Matrix([[$m,0,-${v}TMZ,${v}XMY],[0,$m,${v}XPY,-${v}TPZ],[-${v}TPZ,-${v}XMY,$m,0],[-${v}XPY,-${v}TMZ,0,$m]])*( ($${eta}-2./3.*O${m}**2*${v}$${A}*${v}$${B})*Matrix([[1.,0,0,0],[0,1.,0,0],[0,0,1.,0],[0,0,0,1.]]) -1./3.*$${DA}*$${DB} +1./3.*O${m}*(${v}$${B}*$${DA}-${v}$${A}*$${DB}))") rslash2B = Template("Matrix([[$m,0,-${v}TMZ,${v}XMY],[0,$m,${v}XPY,-${v}TPZ],[-${v}TPZ,-${v}XMY,$m,0],[-${v}XPY,-${v}TMZ,0,$m]])*( (${v2}$${B}-2./3.*O${m}**2*${dot}*${v}$${B})*Matrix([[1.,0,0,0],[0,1.,0,0],[0,0,1.,0],[0,0,0,1.]]) -1./3.*${DA}*$${DB} +1./3.*O${m}*(${v}$${B}*${DA}-${dot}*$${DB}))") dirac=["Matrix([[0,0,1.,0],[0,0,0,1.],[1.,0,0,0],[0,1.,0,0]])","Matrix([[0,0,0,1.],[0,0,1.,0],[0,-1.,0,0],[-1.,0,0,0]])", "Matrix([[0,0,0,complex(0, -1.)],[0,0,complex(0, 1.),0],[0,complex(0, 1.),0,0],[complex(0, -1.),0,0,0]])", "Matrix([[0,0,1.,0],[0,0,0,-1.],[-1.,0,0,0],[0,1.,0,0]])"] CC = "Matrix([[0,1.,0,0],[-1.,0,0,0],[0,0,0,-1.],[0,0,1.,0]])" CD = "Matrix([[0,-1.,0,0],[1.,0,0,0],[0,0,0,1.],[0,0,-1.,0]])" evaluateTemplate = """\ {decl} {{ {momenta} {waves} {swap} {symbols} {couplings} {defns} {result} }} """ spinor = Template("Matrix([[${s}s1],[${s}s2],[${s}s3],[${s}s4]])") sbar = Template("Matrix([[${s}s1,${s}s2,${s}s3,${s}s4]])") sline = Template("${s}s1=Symbol(\"${s}s1\")\n${s}s2=Symbol(\"${s}s2\")\n${s}s3=Symbol(\"${s}s3\")\n${s}s4=Symbol(\"${s}s4\")\n") RSSpinorTemplate = Template("${type}(${outxs1},\n${outxs2},\n${outxs3},\n${outxs4},\n${outys1},\n${outys2},\n${outys3},\n${outys4},\n${outzs1},\n${outzs2},\n${outzs3},\n${outzs4},\n${outts1},\n${outts2},\n${outts3},\n${outts4})") SpinorTemplate = Template("${type}(${outs1},\n${outs2},\n${outs3},\n${outs4})") class LorentzIndex : """ A simple classs to store a Lorentz index """ type="" value=0 dimension=0 def __repr__(self): if(self.type=="V" and not isinstance(self.value,int)) : return self.value else : return "%s%s" % (self.type,self.value) def __init__(self,val) : if(isinstance(val,int)) : self.dimension=0 if(val<0) : self.type="D" self.value = val elif(val>0 and val/1000==0) : self.type="E" self.value = val elif(val>0 and val/1000==1) : self.type="T1" self.value = val%1000 elif(val>0 and val/1000==2) : self.type="T2" self.value = val%1000 else : print "Unknown value in Lorentz index:",val raise SkipThisVertex() else : print "Unknown value in Lorentz index:",val raise SkipThisVertex() def __eq__(self,other): if(not isinstance(other,LorentzIndex)) : return False return ( (self.type, self.value) == (other.type, other.value) ) def __hash__(self) : return hash((self.type,self.value)) class DiracMatrix: """A simple class to store Dirac matrices""" name ="" value="" index=0 def __init(self) : self.name="" self.value="" self.index=0 def __repr__(self) : if(self.value==0) : return "%s" % self.index else : return "%s" % self.value class LorentzStructure: """A simple class to store a Lorentz structures""" name="" value=0 lorentz=[] spin=[] def __init(self) : self.name="" self.value=0 self.lorentz=[] self.spin=[] def __repr__(self): output = self.name if((self.name=="P" or self.name=="Tensor") and self.value!=0) : output += "%s" % self.value if(self.name=="int" or self.name=="sign") : output += "=%s" % self.value elif(len(self.spin)==0) : output += "(" for val in self.lorentz : output += "%s," % val output=output.rstrip(",") output+=")" elif(len(self.lorentz)==0) : output += "(" for val in self.spin : output += "%s," % val output=output.rstrip(",") output+=")" else : output += "(" for val in self.lorentz : output += "%s," % val for val in self.spin : output += "%s," % val output=output.rstrip(",") output+=")" return output def LorentzCompare(a,b) : if(a.name=="int" and b.name=="int") : return int(abs(b.value)-abs(a.value)) elif(a.name=="int") : return -1 elif(b.name=="int") : return 1 elif(len(a.spin)==0) : if(len(b.spin)==0) : return len(b.lorentz)-len(a.lorentz) else : return -1 elif(len(b.spin)==0) : return 1 else : if(len(a.spin)==0 or len(b.spin)==0) : print 'Index problem in lorentz compare',\ a.name,b.name,a.spin,b.spin raise SkipThisVertex() if(a.spin[0]>0 or b.spin[1]>0 ) : return -1 if(a.spin[1]>0 or b.spin[0]>0 ) : return 1 if(a.spin[1]==b.spin[0]) : return -1 if(b.spin[1]==a.spin[0]) : return 1 return 0 def extractIndices(struct) : if(struct.find("(")<0) : return [] temp=struct.split("(")[1].split(")")[0].split(",") output=[] for val in temp : output.append(int(val)) return output def parse_structure(structure,spins) : output=[] found = True while(found) : found = False # signs between terms if(structure=="+" or structure=="-") : output.append(LorentzStructure()) output[0].name="sign" output[0].value=structure[0]+"1." output[0].value=float(output[0].value) output[0].lorentz=[] output[0].spin=[] return output # simple numeric pre/post factors elif((structure[0]=="-" or structure[0]=="+") and structure[-1]==")" and structure[1]=="(") : output.append(LorentzStructure()) output[-1].name="int" output[-1].value=structure[0]+"1." output[-1].value=float(output[-1].value) output[-1].lorentz=[] output[-1].spin=[] structure=structure[2:-1] found=True elif(structure[0]=="(") : temp=structure.rsplit(")",1) structure=temp[0][1:] output.append(LorentzStructure()) output[-1].name="int" output[-1].value="1."+temp[1] output[-1].value=float(eval(output[-1].value)) output[-1].lorentz=[] output[-1].spin=[] found=True elif(structure[0:2]=="-(") : temp=structure.rsplit(")",1) structure=temp[0][2:] output.append(LorentzStructure()) output[-1].name="int" output[-1].value="-1."+temp[1] output[-1].value=float(eval(output[-1].value)) output[-1].lorentz=[] output[-1].spin=[] found=True # special handling for powers power = False if("**" in structure ) : power = True structure = structure.replace("**","^") structures = structure.split("*") if(power) : for j in range(0,len(structures)): if(structures[j].find("^")>=0) : temp = structures[j].split("^") structures[j] = temp[0] for i in range(0,int(temp[1])-1) : structures.append(temp[0]) # split up the structure for struct in structures: ind = extractIndices(struct) # different types of object # object with only spin indices if(struct.find("Identity")==0 or struct.find("Proj")==0 or struct.find("Gamma5")==0) : output.append(LorentzStructure()) output[-1].spin=ind output[-1].lorentz=[] output[-1].name=struct.split("(")[0] output[-1].value=0 if(len(struct.replace("%s(%s,%s)" % (output[-1].name,ind[0],ind[1]),""))!=0) : print "Problem handling %s structure " % output[-1].name raise SkipThisVertex() # objects with 2 lorentz indices elif(struct.find("Metric")==0) : output.append(LorentzStructure()) output[-1].lorentz=[LorentzIndex(ind[0]),LorentzIndex(ind[1])] output[-1].name=struct.split("(")[0] output[-1].value=0 output[-1].spin=[] if(len(struct.replace("%s(%s,%s)" % (output[-1].name,ind[0],ind[1]),""))!=0) : print "Problem handling %s structure " % output[-1].name raise SkipThisVertex() elif(struct.find("P(")==0) : output.append(LorentzStructure()) output[-1].lorentz=[LorentzIndex(ind[0])] output[-1].name=struct.split("(")[0] output[-1].value=ind[1] output[-1].spin=[] if(len(struct.replace("%s(%s,%s)" % (output[-1].name,ind[0],ind[1]),""))!=0) : print "Problem handling %s structure " % output[-1].name raise SkipThisVertex() # 1 lorentz and 1 spin index elif(struct.find("Gamma")==0) : output.append(LorentzStructure()) output[-1].lorentz=[LorentzIndex(ind[0])] output[-1].spin=[ind[1],ind[2]] output[-1].name=struct.split("(")[0] output[-1].value=1 if(len(struct.replace("%s(%s,%s,%s)" % (output[-1].name,ind[0],ind[1],ind[2]),""))!=0) : print "problem parsing gamma matrix",struct raise SkipThisVertex() # objects with 4 lorentz indices elif(struct.find("Epsilon")==0) : output.append(LorentzStructure()) output[-1].lorentz=[] for i in range(0,len(ind)) : output[-1].lorentz.append(LorentzIndex(ind[i])) output[-1].spin=[] output[-1].name=struct.split("(")[0] output[-1].value=1 if(len(struct.replace("%s(%s,%s,%s,%s)" % (output[-1].name,ind[0],ind[1],ind[2],ind[3]),""))!=0) : print 'Problem parsing epsilon',struct raise SkipThisVertex() # scalars else : try : output.append(LorentzStructure()) output[-1].value=float(struct) output[-1].name="int" output[-1].lorentz=[] output[-1].spin=[] except : if(struct.find("complex")==0) : vals = struct[0:-1].replace("complex(","").split(",") output[-1].value=complex(float(vals[0]),float(vals[1])) output[-1].name="int" output[-1].lorentz=[] output[-1].spin=[] else : print 'Problem parsing scalar',struct raise SkipThisVertex() # now do the sorting if(len(output)==1) : return output output = sorted(output,cmp=LorentzCompare) # fix indices in the RS case if(4 in spins) : for i in range(0,len(output)) : for ll in range(0,len(output[i].lorentz)) : if(spins[output[i].lorentz[ll].value-1]==4 and output[i].lorentz[ll].type=="E") : output[i].lorentz[ll].type="R" # return the answer return output def constructDotProduct(ind1,ind2,defns) : (ind1,ind2) = sorted((ind1,ind2),cmp=indSort) dimension=ind1.dimension+ind2.dimension # this product already dealt with ? if((ind1,ind2) in defns) : name = defns[(ind1,ind2)][0] # handle the product else : name = "dot%s" % (len(defns)+1) unit = computeUnit(dimension) defns[(ind1,ind2)] = [name,"complex<%s> %s = %s*%s;" % (unit,name,ind1,ind2)] return (name,dimension) def contract(parsed) : for j in range(0,len(parsed)) : if(parsed[j]=="") : continue if(parsed[j].name=="P") : # simplest case if(parsed[j].lorentz[0].type=="E" or parsed[j].lorentz[0].type=="P") : newIndex = LorentzIndex(parsed[j].value) newIndex.type="P" newIndex.dimension=1 parsed[j].name="Metric" parsed[j].lorentz.append(newIndex) parsed[j].lorentz = sorted(parsed[j].lorentz,cmp=indSort) continue ll=1 found=False for k in range(0,len(parsed)) : if(j==k or parsed[k]=="" ) : continue for i in range(0,len(parsed[k].lorentz)) : if(parsed[k].lorentz[i] == parsed[j].lorentz[0]) : parsed[k].lorentz[i].type="P" parsed[k].lorentz[i].value = parsed[j].value parsed[k].lorentz[i].dimension=1 if(parsed[k].name=="P") : parsed[k].lorentz.append(LorentzIndex(parsed[k].value)) parsed[k].lorentz[1].type="P" parsed[k].lorentz[1].dimension=1 parsed[k].name="Metric" parsed[k].value = 0 found=True break if(found) : parsed[j]="" break return [x for x in parsed if x != ""] def computeUnit(dimension) : if(isinstance(dimension,int)) : dtemp = dimension else : dtemp=dimension[1]+dimension[2] if(dtemp==0) : unit="double" elif(dtemp==1) : unit="Energy" elif(dtemp==-1) : unit="InvEnergy" elif(dtemp>0) : unit="Energy%s" % (dtemp) elif(dtemp<0) : unit="InvEnergy%s" % (dtemp) return unit def computeUnit2(dimension,vDim) : # first correct for any coupling power in vertex totalDim = int(dimension[0])+dimension[2]+vDim-4 output="" if(totalDim!=0) : if(totalDim>0) : if(totalDim==1) : output = "1./GeV" elif(totalDim==2) : output = "1./GeV2" else : output="1." for i in range(0,totalDim) : output +="/GeV" else : if(totalDim==-1) : output = "GeV" elif(totalDim==-2) : output = "GeV2" else : output="1." for i in range(0,-totalDim) : output +="*GeV" expr="" # now remove the remaining dimensionality removal=dimension[1]-int(dimension[0])-vDim+4 if(removal!=0) : if(removal>0) : if(removal==1) : expr = "UnitRemovalInvE" else : expr = "UnitRemovalInvE%s" % removal else : if(removal==-1) : expr = "UnitRemovalE" else : expr = "UnitRemovalE%s" % (-removal) if(output=="") : return expr elif(expr=="") : return output else : return "%s*%s" %(output,expr) # order the indices of a dot product def indSort(a,b) : if(not isinstance(a,LorentzIndex) or not isinstance(b,LorentzIndex)) : print "Trying to sort something that's not a Lorentz index",a,b raise SkipThisVertex() if(a.type==b.type) : i1=a.value i2=b.value if(i1>i2) : return 1 elif(i10) : output="+" else : output="-" parsed=[] return (output,parsed,dimension,eps) # replace integers (really lorentz scalars) for j in range(0,len(parsed)) : if(parsed[j]!="" and parsed[j].name=="int") : output *= parsed[j].value parsed[j]="" # bracket this for safety if(output!="") : output = "(%s)" % output # special for tensor indices if("T" in lorentztag) : for j in range(0,len(parsed)) : if(parsed[j]=="") :continue # check for tensor index found=False for li in parsed[j].lorentz : if(li.type[0]=="T") : index = li found=True break if(not found) : continue # workout the other index for the tensor index2 = LorentzIndex(li.value) if(index.type=="T1") : index2.type="T2" else : index2.type="T1" # special is tensor contracted with itself if(parsed[j].name=="Metric" and index2 == parsed[j].lorentz[1]) : parsed[j].name = "Tensor" parsed[j].value = index.value parsed[j].lorentz = [] if(iloc!=index.value) : name= "traceT%s" % parsed[j].value if( name in defns ) : output += "*(%s)" % defns[name][0] else : defns[name] = [name,"Complex %s = T%s.trace();" % (name,parsed[j].value)] output += "*(%s)" % defns[name][0] parsed[j]="" continue # otherwise search for the match for k in range(j+1,len(parsed)) : found = False for li in parsed[k].lorentz : if(li == index2) : found=True break if(not found) : continue if(parsed[j].name=="P") : newIndex1 = LorentzIndex(parsed[j].value) newIndex1.type="P" newIndex1.dimension=1 elif(parsed[j].name=="Metric") : for li in parsed[j].lorentz : if(li != index) : newIndex1=li break else : print 'Unknown object with tensor index, first object',parsed[j] raise SkipThisVertex() if(parsed[k].name=="P") : newIndex2 = LorentzIndex(parsed[k].value) newIndex2.type="P" newIndex2.dimension=1 elif(parsed[k].name=="Metric") : for li in parsed[k].lorentz : if(li != index) : newIndex2=li break elif(parsed[k].name=="Gamma") : # if we can't contract if(index.value==iloc or (newIndex1.type=="E" and newIndex1.value==iloc)) : newIndex2=index2 # otherwise contract else : unit=computeUnit(newIndex1.dimension) if(index.type=="T1") : name="T%s%sF" % (index.value,newIndex1) defns[name] = [name,"LorentzVector > %s = T%s.preDot(%s);" % (unit,name,index.value,newIndex1)] else : name="T%s%sS" % (index.value,newIndex1) defns[name] = [name,"LorentzVector > %s = T%s.postDot(%s);" % (unit,name,index.value,newIndex1)] parsed[j]="" gIndex=LorentzIndex(-1) gIndex.type="V" gIndex.value=name gIndex.dimension=newIndex1.dimension parsed[k].lorentz[0] = gIndex break else : print 'Unknown object with tensor index, second object',parsed[j],parsed[k] raise SkipThisVertex() if(index2.type=="T1") : newIndex1,newIndex2=newIndex2,newIndex1 parsed[j].name = "Tensor" parsed[j].value= int(index.value) parsed[j].lorentz= [newIndex1,newIndex2] if(parsed[k].name!="Gamma") : parsed[k]="" break # main handling of lorentz structures for j in range(0,len(parsed)) : if(parsed[j]=="") : continue if(parsed[j].name=="Metric") : # check whether or not we can contract canContract=True for ll in parsed[j].lorentz : if((ll.type=="E" and ll.value==iloc) or ll.type=="R") : canContract = False break if(not canContract) : continue # if we can do it (name,dTemp) = constructDotProduct(parsed[j].lorentz[0],parsed[j].lorentz[1],defns) output += "*(%s)" % name dimension[2] += dTemp parsed[j]="" elif(parsed[j].name=="Epsilon") : if(not eps) : eps = True # work out which, if any of the indices can be summed over summable=[] for ix in range(0,len(parsed[j].lorentz)) : if(parsed[j].lorentz[ix].type=="P" or (parsed[j].lorentz[ix].type=="E" and iloc !=parsed[j].lorentz[ix].value)) : summable.append(True) else : summable.append(False) sc = summable.count(True) # less than 3 contractable indices, leave for later if(sc<3) : continue # can contract to a vector elif(sc==3) : offLoc = -1 for i in range(0,len(summable)): if(not summable[i]) : offLoc = i break else : offLoc = 0 indices=[] dTemp=0 for ix in range(0,len(parsed[j].lorentz)) : dTemp += parsed[j].lorentz[ix].dimension if(ix!=offLoc) : indices.append(parsed[j].lorentz[ix]) # contract all the indices if(sc==4) : dimension[2] += dTemp iTemp = (parsed[j].lorentz[0],parsed[j].lorentz[1], parsed[j].lorentz[2],parsed[j].lorentz[3]) if(iTemp in defns) : output += "*(%s)" % defns[iTemp][0] parsed[j]="" else : name = "dot%s" % (len(defns)+1) unit = computeUnit(dTemp) defns[iTemp] = [name,"complex<%s> %s =-%s*epsilon(%s,%s,%s);" % (unit,name,parsed[j].lorentz[0], indices[0],indices[1],indices[2]) ] output += "*(%s)" % name parsed[j]="" # contract 3 indices leaving a vector else : iTemp = (indices[0],indices[1],indices[2]) sign = "1" if(offLoc%2!=0) : sign="-1" if(iTemp in defns) : name = defns[iTemp][0] else : name = "V%s" % (len(defns)+1) unit = computeUnit(dTemp) defns[iTemp] = [name,"LorentzVector > %s =-epsilon(%s,%s,%s);" % (unit,name, indices[0],indices[1],indices[2]) ] newIndex = LorentzIndex(int(name[1:])) newIndex.type="V" newIndex.dimension=dTemp output += "*(%s)" % (sign) oi = parsed[j].lorentz[offLoc] if(oi.type!="D") : parsed[j].name="Metric" parsed[j].spins=[] parsed[j].value=0 parsed[j].lorentz=[newIndex,oi] else : found=False for k in range(0,len(parsed)): if(k==j or parsed[k]=="") : continue for ll in range(0,len(parsed[k].lorentz)) : if(parsed[k].lorentz[ll]==oi) : found=True parsed[k].lorentz[ll]=newIndex break if(found) : break if(not found) : print "Problem contracting indices of Epsilon tensor" raise SkipThisVertex() parsed[j]="" elif(parsed[j].name=="Tensor") : # not an external tensor if(parsed[j].value!=iloc) : # now check the lorentz indices con=[] uncon=[] dtemp=0 for li in parsed[j].lorentz : if(li.type=="P" or li.type=="V") : con.append(li) dtemp+=li.dimension elif(li.type=="E") : if(li.value!=iloc) : con.append(li) else : uncon.append(li) else : print "Can't handle index ",li,"in tensor",parsed[j] raise SkipThisVertex() if(len(con)==2) : iTemp = ("T%s%s%s"% (parsed[j].value,con[0],con[1])) dimension[2]+=dtemp if(iTemp in defns) : output += "*(%s)" % defns[iTemp][0] else : unit=computeUnit(dtemp) name = "dot%s" % (len(defns)+1) defns[iTemp] = [name,"complex<%s> %s = T%s.preDot(%s)*%s;" % (unit,name,parsed[j].value,con[0],con[1])] output += "*(%s)" % name parsed[j]="" # handled in final stage else : continue elif(parsed[j].name.find("Proj")>=0 or parsed[j].name.find("Gamma")>=0 or parsed[j].name.find("Identity")>=0) : continue elif(parsed[j].name=="P" and parsed[j].lorentz[0].type=="R") : continue else : print 'Lorentz structure',parsed[j],'not handled' raise SkipThisVertex() # remove leading * if(output!="" and output[0]=="*") : output = output[1:] # remove any (now) empty elements parsed = [x for x in parsed if x != ""] return (output,parsed,dimension,eps) def finalContractions(output,parsed,dimension,lorentztag,iloc,defns) : if(len(parsed)==0) : return (output,dimension) elif(len(parsed)!=1) : print "Summation can't be handled",parsed raise SkipThisVertex() if(parsed[0].name=="Tensor") : # contracted with off-shell vector if(parsed[0].value!=iloc) : found = False for ll in parsed[0].lorentz : if(ll.type=="E" and ll.value==iloc) : found = True else : lo=ll if(found) : dimension[2]+= lo.dimension unit=computeUnit(lo.dimension) if(lo==parsed[0].lorentz[0]) : name="T%s%sF" % (parsed[0].value,lo) defns[name] = [name,"LorentzVector > %s = T%s.preDot(%s);" % (unit,name,parsed[0].value,lo)] else : name="T%s%sS" % (parsed[0].value,lo) defns[name] = [name,"LorentzVector > %s = T%s.postDot(%s);" % (unit,name,parsed[0].value,lo)] parsed[0]="" if(output=="") : output="1." output = "(%s)*(%s)" %(output,name) else : print "Can\'t contract tensor",lo,iloc raise SkipThisVertex() # off-shell tensor else : if(len(parsed[0].lorentz)!=0) : dimension[2]+=parsed[0].lorentz[0].dimension+parsed[0].lorentz[1].dimension tensor = tensorPropagator(parsed[0],defns) if(output=="") : output="1." output = [output,tensor,()] elif(parsed[0].name=="Metric") : found = False for ll in parsed[0].lorentz : if(ll.type=="E" and ll.value==iloc) : found = True else : lo=ll if(found) : parsed[0]="" dimension[2] += lo.dimension if(output=="") : output="1." output = "(%s)*(%s)" %(output,lo) else : print "Structure can't be handled",parsed,iloc raise SkipThisVertex() return (output,dimension) def tensorPropagator(struct,defns) : # dummy index i0 = LorentzIndex(-1000) # index for momentum of propagator ip = LorentzIndex(struct.value) ip.type="P" ip.dimension=1 # the metric tensor terms=[] if(len(struct.lorentz)==0) : (dp,dTemp) = constructDotProduct(ip,ip,defns) pre = "-1./3.*(1.-%s*OM%s)" % (dp,struct.value) terms.append((pre,i0,i0)) pre = "-2./3.*(1.-%s*OM%s)" % (dp,struct.value) terms.append(("%s*OM%s" %(pre,struct.value),ip,ip)) else : # indices of the tensor ind1 = struct.lorentz[0] ind2 = struct.lorentz[1] # the dot products we need (d1,dtemp) = constructDotProduct(ind1,ip,defns) (d2,dtemp) = constructDotProduct(ind2,ip,defns) (d3,dtemp) = constructDotProduct(ind1,ind2,defns) # various terms in the propagator terms.append(("0.5",ind1,ind2)) terms.append(("-0.5*OM%s*%s"%(struct.value,d1),ip,ind2)) terms.append(("-0.5*OM%s*%s"%(struct.value,d2),ind1,ip)) terms.append(("0.5",ind2,ind1)) terms.append(("-0.5*OM%s*%s"%(struct.value,d2),ip,ind1)) terms.append(("-0.5*OM%s*%s"%(struct.value,d1),ind2,ip)) terms.append(("-1./3.*"+d3,i0,i0)) terms.append(("1./3.*OM%s*%s*%s"%(struct.value,d1,d2),i0,i0)) terms.append(("1./3.*OM%s*%s"%(struct.value,d3),ip,ip)) terms.append(("2./3.*OM%s*OM%s*%s*%s"%(struct.value,struct.value,d1,d2),ip,ip)) # compute the output as a dict output={} for i1 in imap: for i2 in imap : val="" for term in terms: if(term[0][0]!="-") : pre = "+"+term[0] else : pre = term[0] if(term[1]==i0) : if(i1==i2) : if(i1=="t") : val += pre else : if(pre[0]=="+") : val +="-"+pre[1:] else : val +="+"+pre[1:] else : val += "%s*%s%s*%s%s" % (pre,term[1],i1,term[2],i2) output["%s%s" % (i1,i2) ] = val.replace("+1*","+").replace("-1*","-") return output def generateVertex(iloc,L,parsed,lorentztag,vertex,defns) : # try to import sympy and exit if required try : import sympy from sympy import Matrix,Symbol except : print 'ufo2herwig uses the python sympy module to translate general lorentz structures.' print 'This must be installed if you wish to use this option.' print 'EXITTING' quit() eps=False # parse the lorentz structures output = [1.]*len(parsed) dimension=[] for i in range(0,len(parsed)) : dimension.append([0,0,0]) for i in range (0,len(parsed)) : (output[i],parsed[i],dimension[i],eps) = finishParsing(parsed[i],dimension[i],lorentztag,iloc,defns,eps) # still need to process gamma matrix strings for fermions if(lorentztag[0] in ["F","R"] ) : return convertDirac(output,dimension,eps,iloc,L,parsed,lorentztag,vertex,defns) # return the answer else : handled=True for i in range (0,len(parsed)) : if(len(parsed[i])!=0) : handled = False break if(not handled) : for i in range (0,len(parsed)) : (output[i],dimension[i]) = finalContractions(output[i],parsed[i],dimension[i],lorentztag,iloc,defns) return (output,dimension,eps) def convertDirac(output,dimension,eps,iloc,L,parsed,lorentztag,vertex,defns) : for i in range(0,len(parsed)): # skip empty elements if(len(parsed[i])==0 or (len(parsed[i])==1 and parsed[i][0]=="")) : continue # parse the string (output[i],dimension[i],defns) = convertDiracStructure(parsed[i],output[i],dimension[i], defns,iloc,L,lorentztag,vertex) return (output,dimension,eps) # parse the gamma matrices def convertMatrix(structure,spins,unContracted,Symbols,dtemp,defns,iloc) : i1 = structure.spin[0] i2 = structure.spin[1] if(structure.name=="Identity") : output = DiracMatrix() output.value = I4 output.index=0 output.name="M" structure="" elif(structure.name=="Gamma5") : output = DiracMatrix() output.value = G5 output.index=0 output.name="M" structure="" elif(structure.name=="ProjM") : output = DiracMatrix() output.value = PM output.index=0 output.name="M" structure="" elif(structure.name=="ProjP") : output = DiracMatrix() output.value = PP output.index=0 output.name="M" structure="" elif(structure.name=="Gamma") : # gamma(mu) lorentz matrix contracted with dummy index if(structure.lorentz[0].type=="D" or structure.lorentz[0].type=="R") : if(structure.lorentz[0] not in unContracted) : unContracted[structure.lorentz[0]] = 0 output = DiracMatrix() output.value=0 output.index=structure.lorentz[0] output.name="GMU" structure="" elif(structure.lorentz[0].type == "E" and structure.lorentz[0].value == iloc ) : if(structure.lorentz[0] not in unContracted) : unContracted[structure.lorentz[0]] = 0 output = DiracMatrix() output.value=0 output.index=structure.lorentz[0] output.name="GMU" structure="" elif(structure.lorentz[0].type == "T1" or structure.lorentz[0].type == "T2") : if(structure.lorentz[0] not in unContracted) : unContracted[structure.lorentz[0]] = 0 output = DiracMatrix() output.value=0 output.index=structure.lorentz[0] output.name="GMU" structure="" else : output=DiracMatrix() output.name="M" output.value = vslash.substitute({ "v" : structure.lorentz[0]}) Symbols += vslashS.substitute({ "v" : structure.lorentz[0]}) variable = computeUnit(structure.lorentz[0].dimension) #if(structure.lorentz[0].type!="V" or # structure.lorentz[0].type=="V") : dtemp[2] += structure.lorentz[0].dimension defns["vv%s" % structure.lorentz[0] ] = \ ["vv%s" % structure.lorentz[0], vslashD.substitute({ "var" : variable, "v" : structure.lorentz[0]})] structure="" else : print 'Unknown Gamma matrix structure',structure raise SkipThisVertex() return (i1,i2,output,structure,Symbols) def checkRSContract(parsed,loc,dtemp) : rindex=LorentzIndex(loc) rindex.type="R" contract="" for i in range(0,len(parsed)) : if(parsed[i]=="") : continue found = False for ll in range(0,len(parsed[i].lorentz)) : if(parsed[i].lorentz[ll]==rindex) : found=True break if(not found) : continue if(parsed[i].name=="P") : dtemp[2]+=1 contract = LorentzIndex(parsed[i].value) contract.type="P" contract.dimension=1 parsed[i]="" break elif(parsed[i].name=="Metric") : for ll in parsed[i].lorentz : if(ll==rindex) : continue else : break if(ll.type=="P") : dtemp[2]+=1 contract=ll parsed[i]="" break elif(parsed[i].name=="Epsilon") : continue else : print "Unkonwn type contracted with RS spinor",parsed[i] raise SkipThisVertex() return contract def processChain(dtemp,parsed,spins,Symbols,unContracted,defns,iloc) : # piece of dimension which is common (0.5 for sbar and spinor) dtemp[0]+=1 # set up the spin indices sind = 0 lind = 0 expr=[] # now find the next thing in the matrix string ii = 0 index=0 while True : # already handled if(parsed[ii]=="") : ii+=1 continue # start of the chain elif(sind==0 and len(parsed[ii].spin)==2 and parsed[ii].spin[0]>0 ) : (sind,index,matrix,parsed[ii],Symbols) \ = convertMatrix(parsed[ii],spins,unContracted,Symbols,dtemp,defns,iloc) expr.append(matrix) # next element in the chain elif(index!=0 and len(parsed[ii].spin)==2 and parsed[ii].spin[0]==index) : (crap,index,matrix,parsed[ii],Symbols) \ = convertMatrix(parsed[ii],spins,unContracted,Symbols,dtemp,defns,iloc) expr.append(matrix) # check the index to see if we're at the end if(index>0) : lind=index break ii+=1 if(ii>=len(parsed)) : print "Can't parsed the chain of dirac matrices" print parsed raise SkipThisVertex() # start and end of the spin chains # first particle spin 1/2 if(spins[sind-1]==2) : start = DiracMatrix() endT = DiracMatrix() start.index=0 endT .index=0 # start of chain and end of transpose # off shell if(sind==iloc) : start.name="M" endT .name="M" start.value = vslashM .substitute({ "v" : "P%s" % sind, "m" : "M%s" % sind} ) Symbols += vslashMS.substitute({ "v" : "P%s" % sind, "m" : "M%s" % sind} ) endT.value = vslashM2.substitute({ "v" : "P%s" % sind, "m" : "M%s" % sind} ) defns["vvP%s" % sind ] = ["vvP%s" % sind , vslashD.substitute({ "var" : "Energy", "v" : "P%s" % sind })] dtemp[1]+=1 # onshell else : start.name="S" endT .name="S" subs = {'s' : ("sbar%s" % sind)} start.value = sbar .substitute(subs) Symbols += sline.substitute(subs) subs = {'s' : ("s%s" % sind)} endT.value = spinor.substitute(subs) Symbols += sline.substitute(subs) # spin 3/2 fermion elif spins[sind-1]==4 : # check if we can easily contract contract=checkRSContract(parsed,sind,dtemp) # off-shell if(sind==iloc) : oindex = LorentzIndex(sind) oindex.type="O" unContracted[oindex]=0 Symbols += vslashMS.substitute({ "v" : "P%s" % sind, "m" : "M%s" % sind} ) Symbols += momCom.substitute({"v" : "P%s" %sind }) defns["vvP%s" % sind ] = ["vvP%s" % sind , vslashD.substitute({ "var" : "Energy", "v" : "P%s" % sind })] dtemp[1] += 1 if(contract=="") : rindex = LorentzIndex(sind) rindex.type="R" start=DiracMatrix() start.value = Template(rslash.substitute({ "v" : "P%s" % sind, "m" : "M%s" % sind, "loc" : sind })) start.name = "RP" start.index = (oindex,rindex) endT=DiracMatrix() endT.value = Template(rslash2.substitute({ "v" : "P%s" % sind, "m" : "M%s" % sind, "loc" : sind })) endT.name = "RP" endT.index = (rindex,oindex) else : # construct dot product pi = LorentzIndex(sind) pi.type="P" pi.dimension=1 (name,dummy) = constructDotProduct(pi,contract,defns) Symbols += momCom.substitute({"v" : contract }) RB = vslash.substitute({ "v" : contract}) Symbols += vslashS.substitute({ "v" : contract }) Symbols += "%s = Symbol('%s')\n" % (name,name) defns["vv%s" % contract ] = ["vv%s" % contract, vslashD.substitute({ "var" : computeUnit(contract.dimension), "v" : "%s" % contract })] start=DiracMatrix() start.name="RQ" start.index = oindex start.value =Template( rslashB.substitute({ "v" : "P%s" % sind, "m" : "M%s" % sind, "loc" : sind, "DB" : RB, "dot" : name , "v2" : contract})) endT=DiracMatrix() endT.name = "RQ" endT.index = oindex endT.value = Template(rslash2B.substitute({ "v" : "P%s" % sind, "m" : "M%s" % sind, "loc" : sind , "DA" : RB, "dot" : name , "v2" : contract})) # on-shell else : start = DiracMatrix() endT = DiracMatrix() # no contraction if(contract=="" or (contract.type=="E" and contract.value==iloc) ) : if contract == "" : contract = LorentzIndex(sind) contract.type="R" # start of matrix string start.value = Template(sbar .substitute({'s' : ("Rsbar%s${L}" % sind)})) start.name="RS" start.index = contract # end of transpose string endT.value=Template(spinor.substitute({'s' : ("Rs%s${L}" % sind)})) endT.name="RS" endT.index = contract unContracted[contract]=0 # variables for sympy for LI in imap : Symbols += sline.substitute({'s' : ("Rs%s%s" % (sind,LI))}) Symbols += sline.substitute({'s' : ("Rsbar%s%s" % (sind,LI))}) else : # start of matrix string start.name="S" start.value = "Matrix([[%s,%s,%s,%s]])" % (RSDotProduct.substitute({'s' : ("Rsbar%s" % sind), 'v':contract, 'si' : 1}), RSDotProduct.substitute({'s' : ("Rsbar%s" % sind), 'v':contract, 'si' : 2}), RSDotProduct.substitute({'s' : ("Rsbar%s" % sind), 'v':contract, 'si' : 3}), RSDotProduct.substitute({'s' : ("Rsbar%s" % sind), 'v':contract, 'si' : 4})) endT.name="S" endT.value = "Matrix([[%s],[%s],[%s],[%s]])" % (RSDotProduct.substitute({'s' : ("Rs%s" % sind), 'v':contract, 'si' : 1}), RSDotProduct.substitute({'s' : ("Rs%s" % sind), 'v':contract, 'si' : 2}), RSDotProduct.substitute({'s' : ("Rs%s" % sind), 'v':contract, 'si' : 3}), RSDotProduct.substitute({'s' : ("Rs%s" % sind), 'v':contract, 'si' : 4})) Symbols += momCom.substitute({"v" : contract }) for LI in ["x","y","z","t"] : Symbols += sline.substitute({'s' : ("Rs%s%s" % (sind,LI))}) Symbols += sline.substitute({'s' : ("Rsbar%s%s" % (sind,LI))}) # last particle spin 1/2 if( spins[lind-1]==2 ) : end = DiracMatrix() startT = DiracMatrix() end .index=0 startT.index=0 # end of chain if(lind==iloc) : end.name ="M" startT.name="M" end.value = vslashM2.substitute({ "v" : "P%s" % lind, "m" : "M%s" % lind} ) startT.value = vslashM.substitute({ "v" : "P%s" % lind, "m" : "M%s" % lind} ) Symbols += vslashMS.substitute({ "v" : "P%s" % lind, "m" : "M%s" % lind} ) defns["vvP%s" % lind ] = ["vvP%s" % lind , vslashD.substitute({ "var" : "Energy", "v" : "P%s" % lind })] dtemp[1] += 1 else : startT.name="S" end .name="S" subs = {'s' : ("s%s" % lind)} end.value = spinor.substitute(subs) Symbols += sline.substitute(subs) subs = {'s' : ("sbar%s" % lind)} startT.value = sbar .substitute(subs) Symbols += sline.substitute(subs) # last particle spin 3/2 elif spins[lind-1]==4 : # check if we can easily contract contract=checkRSContract(parsed,lind,dtemp) # off-shell if(lind==iloc) : oindex = LorentzIndex(lind) oindex.type="O" unContracted[oindex]=0 Symbols += vslashMS.substitute({ "v" : "P%s" % lind, "m" : "M%s" % lind} ) Symbols += momCom.substitute({"v" : "P%s" %lind }) defns["vvP%s" % lind ] = ["vvP%s" % lind , vslashD.substitute({ "var" : "Energy", "v" : "P%s" % lind })] dtemp[1] += 1 if(contract=="") : rindex = LorentzIndex(lind) rindex.type="R" end=DiracMatrix() end.value = Template(rslash2.substitute({ "v" : "P%s" % lind, "m" : "M%s" % lind, "loc" : lind })) end.name = "RP" end.index = (rindex,oindex) startT=DiracMatrix() startT.value=Template(rslash.substitute({ "v" : "P%s" % lind, "m" : "M%s" % lind, "loc" : lind })) startT.name = "RP" startT.index = (oindex,rindex) else : # construct dot product pi = LorentzIndex(lind) pi.type="P" pi.dimension=1 (name,unit) = constructDotProduct(pi,contract,defns) Symbols += momCom.substitute({"v" : contract }) RB = vslash.substitute({ "v" : contract}) Symbols += vslashS.substitute({ "v" : contract }) Symbols += "%s = Symbol('%s')\n" % (name,name) defns["vv%s" % contract ] = ["vv%s" % contract, vslashD.substitute({ "var" : computeUnit(contract.dimension), "v" : "%s" % contract })] end=DiracMatrix() end.name="RQ" end.index = oindex end.value =Template( rslash2B.substitute({ "v" : "P%s" % lind, "m" : "M%s" % lind, "loc" : lind, "DA" : RB, "dot" : name , "v2" : contract})) startT=DiracMatrix() startT.name = "RQ" startT.index = oindex startT.value = Template(rslashB.substitute({ "v" : "P%s" % lind, "m" : "M%s" % lind, "loc" : lind , "DB" : RB, "dot" : name , "v2" : contract})) # on-shell else : end = DiracMatrix() startT = DiracMatrix() # no contraction if(contract=="" or (contract.type=="E" and contract.value==iloc) ) : if contract == "" : contract = LorentzIndex(lind) contract.type="R" # end of matrix string end.value = Template(spinor.substitute({'s' : ("Rs%s${L}" % lind)})) end.name = "RS" end.index = contract # start of matrix string startT.value = Template(sbar .substitute({'s' : ("Rsbar%s${L}" % lind)})) startT.name = "RS" startT.index = contract unContracted[contract]=0 # variables for sympy for LI in imap : Symbols += sline.substitute({'s' : ("Rs%s%s" % (lind,LI))}) Symbols += sline.substitute({'s' : ("Rsbar%s%s" % (lind,LI))}) # contraction else : # end of the matrix string end.name = "S" end.value = "Matrix([[%s],[%s],[%s],[%s]])" % (RSDotProduct.substitute({'s' : ("Rs%s" % lind), 'v':contract, 'si' : 1}), RSDotProduct.substitute({'s' : ("Rs%s" % lind), 'v':contract, 'si' : 2}), RSDotProduct.substitute({'s' : ("Rs%s" % lind), 'v':contract, 'si' : 3}), RSDotProduct.substitute({'s' : ("Rs%s" % lind), 'v':contract, 'si' : 4})) startT.name = "S" startT.value = "Matrix([[%s,%s,%s,%s]])" % (RSDotProduct.substitute({'s' : ("Rsbar%s" % lind), 'v':contract, 'si' : 1}), RSDotProduct.substitute({'s' : ("Rsbar%s" % lind), 'v':contract, 'si' : 2}), RSDotProduct.substitute({'s' : ("Rsbar%s" % lind), 'v':contract, 'si' : 3}), RSDotProduct.substitute({'s' : ("Rsbar%s" % lind), 'v':contract, 'si' : 4})) Symbols += momCom.substitute({"v" : contract }) for LI in ["x","y","z","t"] : Symbols += sline.substitute({'s' : ("Rs%s%s" % (lind,LI))}) Symbols += sline.substitute({'s' : ("Rsbar%s%s" % (lind,LI))}) return(sind,lind,expr,start,startT,end,endT,Symbols) def calculateDirac(expr,start,end,startT,endT,sind,lind,Symbols,iloc) : res=[] for ichain in range(0,len(start)) : # calculate the matrix string etemp="*".join(str(x) for x in expr[ichain]) temp={} exec("import sympy\nfrom sympy import Symbol,Matrix\n"+Symbols+"result="+ ( "%s*%s*%s" %(start[ichain],etemp,end[ichain]))) in temp res.append(temp["result"]) tempT={} exec("import sympy\nfrom sympy import Symbol,Matrix,Transpose\n"+Symbols+"result="+ - ( "%s*%s*Transpose(%s)*%s*%s" %(startT[0],CC,etemp,CD,endT[0]))) in tempT + ( "%s*%s*Transpose(%s)*%s*%s" %(startT[ichain],CC,etemp,CD,endT[ichain]))) in tempT res.append(tempT["result"]) if(len(start)==1) : - if(iloc==0 or (iloc!=sind[0] and iloc!=lind[0])) : + if(iloc==0 or (iloc!=sind[ichain] and iloc!=lind[ichain])) : sVal = {'s' : temp ["result"][0,0],'sT' : tempT["result"][0,0]} else : sVal={} for jj in range(1,5) : sVal["s%s" % jj] = temp ["result"][jj-1] sVal["sT%s" % jj] = tempT["result"][jj-1] else : sVal={} sVal["s" ] = res[0][0,0]*res[2][0,0] sVal["sT2" ] = res[0][0,0]*res[3][0,0] sVal["sT1" ] = res[1][0,0]*res[2][0,0] sVal["sT12"] = res[1][0,0]*res[3][0,0] return sVal def addToOutput(res,nchain,sign,rTemp) : # 1 spin chain if(nchain==1) : for ii in range(0,2) : if(rTemp[ii][0].shape[0]==1) : # result is scalar if(rTemp[ii][0].shape[1]==1) : if(len(res[ii])==0) : res[ii].append(sign*rTemp [ii][0][0,0]) else : res[ii][0] += sign*rTemp [ii][0][0,0] # result is a spinor elif(rTemp[ii][0].shape[1]==4) : if(len(res[ii])==0) : for j in range(0,4) : res[ii].append(sign*rTemp[ii][0][0,j]) else : for j in range(0,4) : res[ii][j] += sign*rTemp[ii][0][0,j] else : print "Size problem adding results A",sign,rTemp[ii].shape raise SkipThisVertex() # spinor elif(rTemp[ii][0].shape[0]==4 and rTemp[ii][0].shape[1]==1 ) : if(len(res[ii])==0) : for j in range(0,4) : res[ii].append(sign*rTemp[ii][0][j,0]) else : for j in range(0,4) : res[ii][j] += sign*rTemp[ii][0][j,0] else : print "Size problem adding results A",sign,rTemp[ii][0].shape raise SkipThisVertex() # 2 spin chains, should only be for a vertex else : for j1 in range(0,2) : for j2 in range (0,2) : val = sign*rTemp[j1][0]*rTemp[j2][1] if(len(res[3])==0) : res[2*j1+j2].append(val[0,0]) else : res[2*j1+j2][0] += val[0,0] def calculateDirac2(expr,start,end,startT,endT,sind,lind,Symbols,defns, iloc,unContracted,spins,lorentz) : tDot="" # output sVal={} # no of chains nchain=len(expr) # now deal with the uncontracted cases contracted={} # sort out contracted and uncontracted indices keys = unContracted.keys() for key in keys: # summed dummy index if key.type=="D" : contracted[key]=0 del unContracted[key] # RS index elif key.type =="R" : contracted[key]=0 del unContracted[key] # tensor index elif key.type == "T1" or key.type=="T2" : contracted[key]=0 del unContracted[key] # external index elif key.type == "O" : continue # uncontracted vector index elif key.type=="E" or key.type=="Q": continue else : print 'Unknown type of uncontracted index',key raise SkipThisVertex() # check the lorentz structures for lstruct in lorentz : if(lstruct.name=="Epsilon" or lstruct.name=="Vector") : for index in lstruct.lorentz : if(index.type=="E" and index.value==iloc) : unContracted[index]=0 elif(index.type=="P" or index.type=="E" or index.type=="R" or index.type=="D") : contracted[index]=0 else : print 'Unknown index',index, 'in ',lstruct raise SkipThisVertex() elif(lstruct.name=="Tensor") : if(iloc==lstruct.value) : Symbols += momCom.substitute({"v": "P%s"%lstruct.value}) Symbols += "OM%s = Symbol(\"OM%s\")\n" % (lstruct.value,lstruct.value) Symbols += "M%s2 = Symbol(\"M%s2\")\n" % (lstruct.value,lstruct.value) Symbols += "p2 = Symbol(\"p2\")\n" for ival in range(1,3) : newIndex=LorentzIndex(ival) newIndex.type="O" newIndex.dimension=0 lstruct.lorentz.append(newIndex) unContracted[newIndex]=0 # contracted with self if(len(lstruct.lorentz)==0) : pass # both indices uncontracted, deal with later elif lstruct.lorentz[0].type=="T1" and lstruct.lorentz[1].type=="T2": pass elif lstruct.lorentz[0].type=="T1": pIndex = LorentzIndex(lstruct.value) pIndex.dimension=1 pIndex.type="P" (tDot,dtemp) = constructDotProduct(pIndex,lstruct.lorentz[1],defns) Symbols+="%s = Symbol(\"%s\")\n" %(tDot,tDot) Symbols += momCom.substitute({"v": lstruct.lorentz[1]}) elif lstruct.lorentz[1].type=="T2" : pIndex = LorentzIndex(lstruct.value) pIndex.dimension=1 pIndex.type="P" (tDot,dtemp) = constructDotProduct(pIndex,lstruct.lorentz[0],defns) Symbols+="%s = Symbol(\"%s\")\n" %(tDot,tDot) Symbols += momCom.substitute({"v": lstruct.lorentz[0]}) # both indices still to be contracted else : contracted[lstruct.lorentz[0].type]=0 contracted[lstruct.lorentz[1].type]=0 else : print 'Unknown lorentz object in calculateDirac2',lstruct,iloc raise SkipThisVertex() # iterate over the uncontracted indices while True : # loop over the unContracted indices res = [] for i in range(0,nchain) : res.append([]) res.append([]) # loop over the contracted indices while True : # sign from metric tensor in contractions sign = 1 for key,val in contracted.iteritems() : if(val>0) : sign *=-1 eTemp =[] sTemp =[] fTemp =[] sTTemp=[] fTTemp=[] # make the necessary replacements for remaining indices for ichain in range(0,nchain) : # compute the main expression eTemp.append([]) for val in expr[ichain] : # already a matrix if(val.name=="M") : eTemp[ichain].append(val) # gamma(mu), replace with correct dirac matrix elif(val.name=="GMU") : if(val.index in contracted) : eTemp[ichain].append(dirac[contracted[val.index]]) elif(val.index in unContracted) : eTemp[ichain].append(dirac[unContracted[val.index]]) else : print 'Unknown index for gamma matrix',val raise SkipThisVertex() # unknown to be sorted out else : print 'Unknown type in expr',val raise SkipThisVertex() # start and end # start if(start[ichain].name=="S" or start[ichain].name=="M" ) : sTemp.append(start[ichain].value) elif(start[ichain].name=="RS") : if(start[ichain].index in contracted) : sTemp.append(start[ichain].value.substitute({"L" : imap[ contracted[start[ichain].index]] })) else : sTemp.append(start[ichain].value.substitute({"L" : imap[unContracted[start[ichain].index]] })) elif(start[ichain].name=="RQ") : i1 = unContracted[start[ichain].index] sTemp.append(start[ichain].value.substitute({"A" : imap[i1], "DA" : dirac[i1] })) elif(start[ichain].name=="RP") : i1 = unContracted[start[ichain].index[0]] i2 = contracted[start[ichain].index[1]] eta=0 if(i1==i2) : if(i1==0) : eta = 1 else : eta = -1 sTemp.append(start[ichain].value.substitute({"eta" : eta, "A":imap[i1] , "B":imap[i2] , "DA": dirac[i1], "DB": dirac[i2]})) else : print 'Barred spinor not a spinor',start[ichain] raise SkipThisVertex() if(startT[ichain].name=="S" or startT[ichain].name=="M" ) : sTTemp.append(startT[ichain].value) elif(startT[ichain].name=="RS") : if(startT[ichain].index in contracted) : sTTemp.append(startT[ichain].value.substitute({"L" : imap[ contracted[startT[ichain].index]] })) else : sTTemp.append(startT[ichain].value.substitute({"L" : imap[unContracted[startT[ichain].index]] })) elif(startT[ichain].name=="RQ") : i1 = unContracted[startT[ichain].index] sTTemp.append(startT[ichain].value.substitute({"A" : imap[i1], "DA" : dirac[i1] })) elif(startT[ichain].name=="RP") : i1 = unContracted[startT[ichain].index[0]] i2 = contracted[startT[ichain].index[1]] eta=0 if(i1==i2) : if(i1==0) : eta = 1 else : eta = -1 sTTemp.append(startT[ichain].value.substitute({"eta" : eta, "A":imap[i1] , "B":imap[i2] , "DA": dirac[i1], "DB": dirac[i2]})) else : print 'barred spinorT not a spinor',startT[ichain] raise SkipThisVertex() # end if(end[ichain].name=="S" or end[ichain].name=="M" ) : fTemp.append(end[ichain].value) elif(end[ichain].name=="RS") : if(end[ichain].index in contracted) : fTemp.append(end[ichain].value.substitute({"L" : imap[ contracted[end[ichain].index]] })) else : fTemp.append(end[ichain].value.substitute({"L" : imap[unContracted[end[ichain].index]] })) elif(end[ichain].name=="RQ") : i1 = unContracted[end[ichain].index] fTemp.append(end[ichain].value.substitute({"B" : imap[i1], "DB": dirac[i1] })) elif(end[ichain].name=="RP") : i1 = contracted[end[ichain].index[0]] i2 = unContracted[end[ichain].index[1]] eta=0 if(i1==i2) : if(i1==0) : eta = 1 else : eta = -1 fTemp.append(end[ichain].value.substitute({"eta" : eta, "A":imap[i1] , "B":imap[i2] , "DA": dirac[i1], "DB": dirac[i2]})) else : print 'spinor not a spinor',end[ichain] raise SkipThisVertex() if(endT[ichain].name=="S" or endT[ichain].name=="M" ) : fTTemp.append(endT[ichain].value) elif(endT[ichain].name=="RS") : if(endT[ichain].index in contracted) : fTTemp.append(endT[ichain].value.substitute({"L" : imap[ contracted[endT[ichain].index]] })) else : fTTemp.append(endT[ichain].value.substitute({"L" : imap[unContracted[endT[ichain].index]] })) elif(endT[ichain].name=="RQ") : i1 = unContracted[endT[ichain].index] fTTemp.append(endT[ichain].value.substitute({"B" : imap[i1], "DB": dirac[i1] })) elif(endT[ichain].name=="RP") : i1 = contracted[endT[ichain].index[0]] i2 = unContracted[endT[ichain].index[1]] eta=0 if(i1==i2) : if(i1==0) : eta = 1 else : eta = -1 fTTemp.append(endT[ichain].value.substitute({"eta" : eta, "A":imap[i1] , "B":imap[i2] , "DA": dirac[i1], "DB": dirac[i2]})) else : print 'spinorT not a spinor',endT[ichain] raise SkipThisVertex() # and none dirac lorentz structures isZero = False for li in lorentz: # uncontracted vector if(li.name=="Vector") : index = unContracted[li.lorentz[0]] Symbols += momCom.substitute({"v":li.value}) for ichain in range(0,nchain) : eTemp[ichain].append("%s%s"% (li.value,imap[index]) ) elif(li.name=="Epsilon") : value="" ival=[] for index in li.lorentz : if(index in contracted) : if(index.type=="P" or index.type=="E") : value += "*%s%s" % (index,imap[contracted[index]]) ival.append(contracted[index]) elif(index.type=="R" or index.type=="D") : ival.append(contracted[index]) else : print 'Unknown index in Epsilon Tensor',index raise SkipThisVertex() elif(index in unContracted) : ival.append(unContracted[index]) if(len(value)!=0 and value[0]=="*") : value = value[1:] eVal = epsValue[ival[0]][ival[1]][ival[2]][ival[3]] if(eVal==0) : isZero = True else : for ichain in range(0,nchain) : eTemp[ichain].append("(%s*%s)"% (eVal,value) ) elif(li.name=="Tensor") : if(li.lorentz[0] in unContracted and li.lorentz[1] in unContracted) : value='0.5*(%s)'%tPropA[unContracted[li.lorentz[0]]][unContracted[li.lorentz[0]]].substitute({"iloc" : li.value}) for ichain in range(0,nchain) : eTemp[ichain].append("(%s)"% (value) ) elif(len(li.lorentz)==4) : if li.lorentz[0].type=="T1" and li.lorentz[1].type=="T2" : value='0.5*(%s)'%tPropC[unContracted[li.lorentz[2]]][unContracted[li.lorentz[3]]][contracted[li.lorentz[0]]][contracted[li.lorentz[1]]].substitute({"iloc" : li.value}) elif li.lorentz[0].type=="T1": value='0.5*(%s)'%tPropB[unContracted[li.lorentz[2]]][unContracted[li.lorentz[3]]][contracted[li.lorentz[0]]].substitute({"iloc" : li.value, "V" : li.lorentz[1], "dot" : tDot}) elif li.lorentz[1].type=="T2" : value= '0.5*(%s)'%tPropB[unContracted[li.lorentz[2]]][unContracted[li.lorentz[3]]][contracted[li.lorentz[1]]].substitute({"iloc" : li.value, "V" : li.lorentz[0], "dot" : tDot}) else : print 'Both contracted tensor indices contracted' raise SkipThisVertex() for ichain in range(0,nchain) : eTemp[ichain].append("(%s)"% (value) ) else : print 'Uncontracted on-shell tensor' raise SkipThisVertex() # unknown else : print 'Unknown expression in lorentz loop',li raise SkipThisVertex() # now evaluate the result if(not isZero) : rTemp =[[],[]] for ichain in range(0,nchain) : core = "*".join(str(x) for x in eTemp[ichain]) temp={} exec("import sympy\nfrom sympy import Symbol,Matrix\n"+Symbols+"result="+ ( "(%s)*(%s)*(%s)" %(sTemp[ichain],core,fTemp[ichain]))) in temp rTemp[0].append(temp["result"]) temp={} exec("import sympy\nfrom sympy import Symbol,Matrix,Transpose\n"+Symbols+"result="+ ( "(%s)*(%s)*(Transpose(%s))*(%s)*(%s)" %(sTTemp[ichain],CC,core,CD,fTTemp[ichain]))) in temp rTemp[1].append(temp["result"]) # and add it to the output addToOutput(res,nchain,sign,rTemp) #### END OF THE CONTRACTED LOOP ##### # increment the indices being summed over keys=contracted.keys() ii = len(keys)-1 while ii >=0 : if(contracted[keys[ii]]<3) : contracted[keys[ii]]+=1 break else : contracted[keys[ii]]=0 ii-=1 nZero=0 for (key,val) in contracted.iteritems() : if(val==0) : nZero+=1 if(nZero==len(contracted)) : break ###### END OF THE UNCONTRACTED LOOP ###### # no uncontracted indices if(len(unContracted)==0) : if(len(res[0])==1) : # scalar if(len(res)==2) : sVal["s" ] = res[0] sVal["sT"] = res[1] # 4 fermion else : sVal["s" ] = res[0] sVal["sT2" ] = res[1] sVal["sT1" ] = res[2] sVal["sT12"] = res[3] # spinor elif(len(res[0])==4) : for k in range(0,4) : sVal[ "s%s" % (k+1) ] = res[0][k] sVal[ "sT%s" % (k+1) ] = res[1][k] else : print 'Sum problem',len(res),len(res[0]) raise SkipThisVertex() break # uncontracted indices else : istring = "" for (key,val) in unContracted.iteritems() : istring +=imap[val] if(len(istring)>2) : print 'Index problem',istring raise SkipThisVertex() sVal[istring] = res[0] sVal[istring+"T"] = res[1] # increment the unsummed indices keys=unContracted.keys() ii = len(keys)-1 while ii >=0 : if(unContracted[keys[ii]]<3) : unContracted[keys[ii]]+=1 break else : unContracted[keys[ii]]=0 ii-=1 nZero=0 for (key,val) in unContracted.iteritems() : if(val==0) : nZero+=1 if(nZero==len(unContracted)) : break # handle the vector case if( "tt" in sVal) : if(len(sVal)==32 and "tt" in sVal and len(sVal["tt"])==1) : for key in sVal: sVal[key] = sVal[key][0] else : print 'Tensor sum problem',len(sVal) raise SkipThisVertex() elif( "t" in sVal ) : # deal with pure vectors if(len(sVal)==8 and "t" in sVal and len(sVal["t"])==1) : pass # RS spinors elif(len(sVal)==8 and "t" in sVal and len(sVal["t"])==4) : pass else : print 'Value problem',len(sVal) raise SkipThisVertex() else : if("s" in sVal) : for key in sVal: sVal[key] = sVal[key][0] return sVal def convertDiracStructure(parsed,output,dimension,defns,iloc,L,lorentztag,vertex) : # get the spins of the particles spins = vertex.lorentz[0].spins # check if we have one or two spin chains nchain = (lorentztag.count("F")+lorentztag.count("R"))/2 # storage of the intermediate results expr =[] start =[] end =[] startT=[] endT =[] sind=[0]*nchain lind=[0]*nchain unContracted={} Symbols="" dtemp=[0,0,0] # parse the dirac matrix strings for ichain in range(0,nchain) : expr .append([]) start .append("") startT.append("") end .append("") endT .append("") sind[ichain],lind[ichain],expr[ichain],start[ichain],startT[ichain],end[ichain],endT[ichain],Symbols =\ processChain(dtemp,parsed,spins,Symbols,unContracted,defns,iloc) + # standard ordering of the chains + for ichain in range(0,nchain) : + for jchain in range(ichain+1,nchain) : + if(sind[jchain] s%s = sW%s.wave();" % (fIndex[i-1],fIndex[i-1])) nf+=1 else : decls.append("SpinorBarWaveFunction & sbarW%s" % (fIndex[i-1])) momenta.append([False,"Lorentz5Momentum P%s =-sbarW%s.momentum();" % (fIndex[i-1],fIndex[i-1])]) waves.append("LorentzSpinorBar sbar%s = sbarW%s.wave();" % (fIndex[i-1],fIndex[i-1])) nf+=1 elif(spin==3) : decls.append("VectorWaveFunction & vW%s" % (i)) momenta.append([False,"Lorentz5Momentum P%s =-vW%s.momentum();" % (i,i)]) waves.append("LorentzPolarizationVector E%s = vW%s.wave();" % (i,i)) elif(spin==4) : if(i%2==1) : decls.append("RSSpinorWaveFunction & RsW%s" % (i)) momenta.append([False,"Lorentz5Momentum P%s =-RsW%s.momentum();" % (i,i)]) waves.append("LorentzRSSpinor Rs%s = RsW%s.wave();" % (i,i)) nf+=1 else : decls.append("RSSpinorBarWaveFunction & RsbarW%s" % (i)) momenta.append([False,"Lorentz5Momentum P%s =-RsbarW%s.momentum();" % (i,i)]) waves.append("LorentzRSSpinorBar Rsbar%s = RsbarW%s.wave();" % (i,i)) nf+=1 elif(spin==5) : decls.append("TensorWaveFunction & tW%s" % (i)) momenta.append([False,"Lorentz5Momentum P%s =-tW%s.momentum();" % (i,i)]) waves.append("LorentzTensor T%s = tW%s.wave();" % (i,i)) else : print 'Unknown spin',spin raise SkipThisVertex() poff += "-P%s" % (i) # ensure unbarred spinor first ibar=-1 isp=-1 for i in range(0,len(decls)) : if(decls[i].find("Bar")>0 and ibar==-1) : ibar=i elif(decls[i].find("Spinor")>=0 and isp==-1) : isp=i if(isp!=-1 and ibar!=-1 and isp>ibar) : decls[ibar],decls[isp] = decls[isp],decls[ibar] # constrct the signature poff = ("Lorentz5Momentum P%s = " % iloc ) + poff sig="" if(iloc==0) : sig="%s evaluate(Energy2, const %s)" % (offType,", const ".join(decls)) # special for VVT vertex if(len(vertex.lorentz[0].spins)==3 and vertex.lorentz[0].spins.count(3)==2 and vertex.lorentz[0].spins.count(5)==1) : sig = sig[0:-1] + ", Energy vmass=-GeV)" else : sig="%s evaluate(Energy2, int iopt, tcPDPtr out, const %s, complex mass=-GeV, complex width=-GeV)" % (offType,", const ".join(decls)) momenta.append([True,poff+";"]) # special for VVT vertex if(len(vertex.lorentz[0].spins)==3 and vertex.lorentz[0].spins.count(3)==2 and vertex.lorentz[0].spins.count(5)==1 and vertex.lorentz[0].spins[iloc-1]==5) : sig=sig.replace("complex mass=-GeV","Energy vmass=-GeV, complex mass=-GeV") for i in range(0,len(momenta)) : momenta[i][0]=True return offType,nf,poff,sig def combineResult(res,nf,ispin,vertex) : # extract the vals and dimensions (vals,dim) = res # construct the output structure # vertex and off-shell scalars if(ispin<=1) : otype={'res':""} # spins elif(ispin==2) : otype={'s1':"",'s2':"",'s3':"",'s4':""} # vectors elif(ispin==3) : if( "t" in vals[0][1] ) : otype={'t':"",'x':"",'y':"",'z':""} else : otype={"res":""} # RS spinors elif(ispin==4) : otype={} for i1 in imap : for i in range(1,5) : otype["%ss%s"% (i1,i)]="" # off-shell tensors elif(ispin==5) : otype={} for i1 in imap : for i2 in imap : otype["%s%s"%(i1,i2)]="" else : print 'Unknown spin',ispin raise SkipThisVertex() expr=[otype] for i in range(0,nf-1) : expr.append(copy.copy(otype)) # dimension for checking dimCheck=dim[0] for i in range(0,len(vals)) : # simple signs if(vals[i]=="+" or vals[i]=="-") : for ii in range(0,len(expr)) : for(key,val) in expr[ii].iteritems() : expr[ii][key] = expr[ii][key]+vals[i] continue # check the dimensions if(dimCheck[0]!=dim[i][0] or dimCheck[1]!=dim[i][1] or dimCheck[2]!=dim[i][2]) : print "Dimension problem in result",i,dimCheck,dim,vertex print vertex.lorentz for j in range(0,len(vals)) : print j,dim[j],vals[j] raise SkipThisVertex() # simplest case if(isinstance(vals[i], basestring)) : for ii in range(0,len(expr)) : for(key,val) in expr[ii].iteritems() : expr[ii][key] = expr[ii][key]+vals[i] continue # more complex structures pre = vals[i][0] if(pre=="(1.0)") : pre="" if(not isinstance(vals[i][1],dict)) : print 'must be a dict here' raise SkipThisVertex() # tensors if("tt" in vals[i][1]) : for i1 in imap : for i2 in imap : key="%s%s"%(i1,i2) if(pre=="") : expr[0][key] += "(%s)" % vals[i][1][key] else : expr[0][key] += "%s*(%s)" % (pre,vals[i][1][key]) if(len(expr)==2) : if(pre=="") : expr[1][key] +="(%s)" % vals[i][1][key+"T"] else : expr[1][key] +="%s*(%s)" % (pre,vals[i][1][key+"T"]) # standard fermion vertex case elif(len(vals[i][1])==2 and "s" in vals[i][1] and "sT" in vals[i][1]) : if(pre=="") : expr[0]["res"] += "(%s)" % vals[i][1]["s"] expr[1]["res"] += "(%s)" % vals[i][1]["sT"] else : expr[0]["res"] += "%s*(%s)" % (pre,vals[i][1]["s"]) expr[1]["res"] += "%s*(%s)" % (pre,vals[i][1]["sT"]) # spinor case elif(len(vals[i][1])==8 and "s1" in vals[i][1]) : for jj in range(1,5) : if(pre=="") : expr[0]["s%s" % jj] += "(%s)" % vals[i][1]["s%s" % jj] expr[1]["s%s" % jj] += "(%s)" % vals[i][1]["sT%s" % jj] else : expr[0]["s%s" % jj] += "%s*(%s)" % (pre,vals[i][1]["s%s" % jj]) expr[1]["s%s" % jj] += "%s*(%s)" % (pre,vals[i][1]["sT%s" % jj]) # vector elif(len(vals[i][1])%4==0 and "t" in vals[i][1] and len(vals[i][1]["t"])==1 ) : for i1 in imap : if(pre=="") : expr[0][i1] += "(%s)" % vals[i][1][i1][0] else : expr[0][i1] += "%s*(%s)" % (pre,vals[i][1][i1][0]) if(len(expr)==2) : if(pre=="") : expr[1][i1] +="(%s)" % vals[i][1][i1+"T"][0] else : expr[1][i1] +="%s*(%s)" % (pre,vals[i][1][i1+"T"][0]) # 4 fermion vertex case elif(len(vals[i][1])==4 and "sT12" in vals[i][1]) : if(pre=="") : expr[0]["res"] += "(%s)" % vals[i][1]["s"] expr[1]["res"] += "(%s)" % vals[i][1]["sT2"] expr[2]["res"] += "(%s)" % vals[i][1]["sT1"] expr[3]["res"] += "(%s)" % vals[i][1]["sT12"] else : expr[0]["res"] += "%s*(%s)" % (pre,vals[i][1]["s"]) expr[1]["res"] += "%s*(%s)" % (pre,vals[i][1]["sT2"]) expr[2]["res"] += "(%s)" % vals[i][1]["sT1"] expr[3]["res"] += "(%s)" % vals[i][1]["sT12"] # RS spinor elif(len(vals[i][1])%4==0 and "t" in vals[i][1] and len(vals[i][1]["t"])==4 ) : for i1 in imap : for k in range(1,5) : key = "%ss%s" % (i1,k) if(pre=="") : expr[0][key] += "(%s)" % vals[i][1][i1][k-1] expr[1][key] += "(%s)" % vals[i][1][i1+"T"][k-1] else : expr[0][key] += "%s*(%s)" % (pre,vals[i][1][i1][k-1]) expr[1][key] += "%s*(%s)" % (pre,vals[i][1][i1+"T"][k-1]) else : print 'problem with type',vals[i] raise SkipThisVertex() # no of particles in the vertex vDim = len(vertex.lorentz[0].spins) # compute the unit and apply it unit = computeUnit2(dimCheck,vDim) if(unit!="") : for ii in range(0,len(expr)) : for (key,val) in expr[ii].iteritems() : expr[ii][key] = "(%s)*(%s)" % (val,unit) return expr def headerReplace(inval) : return inval.replace("virtual","").replace("ScalarWaveFunction","").replace("SpinorWaveFunction","") \ .replace("SpinorBarWaveFunction","").replace("VectorWaveFunction","").replace("TensorWaveFunction","") \ .replace("Energy2","q2").replace("int","").replace("complex","").replace("Energy","").replace("=-GeV","") \ .replace("const &","").replace("tcPDPtr","").replace(" "," ").replace("Complex","") def combineComponents(result,offType,RS) : for i in range(0,len(result)) : for (key,value) in result[i].iteritems() : output=py2cpp(value.strip(),True) result[i][key]=output[0] # simplest case, just a value if(len(result[0])==1 and "res" in result[0]) : for i in range(0,len(result)) : result[i] = result[i]["res"] result[i]=result[i].replace("1j","ii") return # calculate the substitutions if(not isinstance(result[0],basestring)) : subs=[] for ii in range(0,len(result)) : subs.append({}) for (key,val) in result[ii].iteritems() : subs[ii]["out%s" % key]= val # spinors if("s1" in result[0]) : stype = "LorentzSpinor" sbtype = "LorentzSpinorBar" if(offType.find("Bar")>0) : (stype,sbtype)=(sbtype,stype) subs[0]["type"] = stype result[0] = SpinorTemplate.substitute(subs[0]) subs[1]["type"] = sbtype result[1] = SpinorTemplate.substitute(subs[1]) # tensors elif("tt" in result[0]) : for ii in range(0,len(result)) : result[ii] = Template("LorentzTensor(${outxx},\n${outxy},\n${outxz},\n${outxt},\n${outyx},\n${outyy},\n${outyz},\n${outyt},\n${outzx},\n${outzy},\n${outzz},\n${outzt},\n${outtx},\n${outty},\n${outtz},\n${outtt})").substitute(subs[ii]) result[ii]=result[ii].replace("(+","(") # vectors elif("t" in result[0]) : for ii in range(0,len(result)) : result[ii] = Template("LorentzVector(${outx},\n${outy},\n${outz},\n${outt})").substitute(subs[ii]) result[ii]=result[ii].replace("(+","(") # RS spinors elif("ts1" in result[0]) : stype = "LorentzRSSpinor" sbtype = "LorentzRSSpinorBar" if(offType.find("Bar")>0) : (stype,sbtype)=(sbtype,stype) subs[0]["type"] = stype result[0] = RSSpinorTemplate.substitute(subs[0]) subs[1]["type"] = sbtype result[1] = RSSpinorTemplate.substitute(subs[1]) else : print 'Type not implemented',result raise SkipThisVertex() for i in range(0,len(result)) : result[i]=result[i].replace("1j","ii") def generateEvaluateFunction(model,vertex,iloc,values,defns,vertexEval,cf,order) : RS = "R" in vertex.lorentz[0].name FM = "F" in vertex.lorentz[0].name # extract the start and end of the spin chains if( RS or FM ) : fIndex = vertexEval[0][0][0][2] else : fIndex=0 # first construct the signature of the function decls=[] momenta=[] waves=[] offType,nf,poff,sig = constructSignature(vertex,order,iloc,decls,momenta,waves,fIndex) # combine the different terms in the result symbols=set() localCouplings=[] result=[] ispin = 0 if(iloc!=0) : ispin = vertex.lorentz[0].spins[iloc-1] # put the lorentz structures and couplings together for j in range(0,len(vertexEval)) : # get the lorentz structure piece expr = combineResult(vertexEval[j],nf,ispin,vertex) # get the coupling for this bit val, sym = py2cpp(values[j]) localCouplings.append("Complex local_C%s = %s;\n" % (j,val)) symbols |=sym # put them together vtype="Complex" if("res" in expr[0] and offType=="VectorWaveFunction") : vtype="LorentzPolarizationVector" if(len(result)==0) : for ii in range(0,len(expr)) : result.append({}) for (key,val) in expr[ii].iteritems() : result[ii][key] = " %s((local_C%s)*(%s)) " % (vtype,j,val) else : for ii in range(0,len(expr)) : for (key,val) in expr[ii].iteritems(): result[ii][key] += " + %s((local_C%s)*(%s)) " % (vtype,j,val) # for more complex types merge the spin/lorentz components combineComponents(result,offType,RS) # multiple by scalar wavefunctions scalars="" for i in range (0,len(vertex.lorentz[0].spins)) : if(vertex.lorentz[0].spins[i]==1 and i+1!=iloc) : scalars += "sca%s*" % (i+1) if(scalars!="") : for ii in range(0,len(result)) : result[ii] = "(%s)*(%s)" % (result[ii],scalars[0:-1]) # vertex, just return the answer if(iloc==0) : result[0] = "return (%s)*(%s);\n" % (result[0],py2cpp(cf)[0]) if(FM or RS) : for i in range(1,len(result)) : result[i] = "return (%s)*(%s);\n" % (result[i],py2cpp(cf)[0]) # off-shell particle else : # off-shell scalar if(vertex.lorentz[0].spins[iloc-1] == 1 ) : result[0] = scaTemplate.format(iloc=iloc,cf=py2cpp(cf[0])[0],res=result[0]) if(FM or RS) : result[1] = scaTemplate.format(iloc=iloc,cf=py2cpp(cf[0])[0],res=result[1]) # off-shell fermion elif(vertex.lorentz[0].spins[iloc-1] == 2 ) : result[0] = sTemplate.format(iloc=iloc,cf=py2cpp(cf[0])[0],offTypeA=offType.replace("WaveFunction",""), res=result[0].replace( "M%s" % iloc, "mass" ),offTypeB=offType) if(FM or RS) : if(offType.find("Bar")>0) : offTypeT=offType.replace("Bar","") else : offTypeT=offType.replace("Spinor","SpinorBar") result[1] = sTemplate.format(iloc=iloc,cf=py2cpp(cf[0])[0],offTypeA=offTypeT.replace("WaveFunction",""), res=result[1].replace( "M%s" % iloc, "mass" ),offTypeB=offTypeT) # off-shell vector elif(vertex.lorentz[0].spins[iloc-1] == 3 ) : result[0] = vecTemplate.format(iloc=iloc,res=result[0],cf=py2cpp(cf)[0]) if(FM or RS) : result[1] = vecTemplate.format(iloc=iloc,cf=py2cpp(cf[0])[0],res=result[1]) elif(vertex.lorentz[0].spins[iloc-1] == 4 ) : if(offType.find("Bar")>0) : offTypeT=offType.replace("Bar","") else : offTypeT=offType.replace("Spinor","SpinorBar") result[1] = RSTemplate.format(iloc=iloc,cf=py2cpp(cf[0])[0],offTypeA=offTypeT.replace("WaveFunction",""), res=result[1].replace( "M%s" % iloc, "mass" ),offTypeB=offTypeT) result[0] = RSTemplate.format(iloc=iloc,cf=py2cpp(cf[0])[0],offTypeA=offType.replace("WaveFunction",""), res=result[0].replace( "M%s" % iloc, "mass" ),offTypeB=offType) # tensors elif(vertex.lorentz[0].spins[iloc-1]) : if(RS) : print "RS spinors and tensors not handled" raise SkipThisVertex() result[0] = tenTemplate.format(iloc=iloc,cf=py2cpp(cf)[0],res=result[0]) if(FM or RS) : result[1] = tenTemplate.format(iloc=iloc,cf=py2cpp(cf)[0],res=result[1]) else : print 'Unknown spin for off-shell particle',vertex.lorentz[0].spins[iloc-1] raise SkipThisVertex() # check if momenta defns needed to clean up compile of code for (key,val) in defns.iteritems() : if( isinstance(key, basestring)) : if(key.find("vvP")==0) : momenta[int(key[3])-1][0] = True else : for vals in key : if(vals.type=="P") : momenta[vals.value-1][0] = True # cat the definitions defString="" for (key,value) in defns.iteritems() : if(value[0]=="") : continue if(value[0][0]=="V" or value[0][0]=="T") : defString+=" %s\n" %value[1] for (key,value) in defns.iteritems() : if(value[0]=="") : continue if(value[0][0]!="V" and value[0][0]!="T") : defString+=" %s\n" %value[1] if(len(result)<=2) : sorder=swapOrder(vertex,iloc,momenta,fIndex) else : sorder="" momentastring="" for i in range(0,len(momenta)) : if(momenta[i][0] and momenta[i][1]!="") : momentastring+=momenta[i][1]+"\n " # special for 4-point VVVV if(vertex.lorentz[0].spins.count(3)==4 and iloc==0) : sig=sig.replace("Energy2","Energy2,int") header="virtual %s" % sig sig=sig.replace("=-GeV","") symboldefs = [ def_from_model(model,s) for s in symbols ] function = evaluateTemplate.format(decl=sig,momenta=momentastring,defns=defString, waves="\n ".join(waves),symbols='\n '.join(symboldefs), couplings="\n ".join(localCouplings), result=result[0],swap=sorder) # special for transpose if(FM or RS) : h2=header if(not RS) : h2=header.replace("evaluate","evaluateN") function=function.replace("evaluate(","evaluateN(") headers=[] newHeader="" for ifunction in range(1,len(result)) : waveNew=[] momentastring="" htemp = header.split(",") irs=-1 isp=-1 # RS case if(RS) : # sort out the wavefunctions for i in range(0,len(waves)) : if(waves[i].find("Spinor")<0) : waveNew.append(waves[i]) continue if(waves[i].find("Bar")>0) : waveNew.append(waves[i].replace("Bar","").replace("bar","")) else : waveNew.append(waves[i].replace("Spinor","SpinorBar").replace(" s"," sbar").replace("Rs","Rsbar")) # sort out the momenta definitions for i in range(0,len(momenta)) : if(momenta[i][0] and momenta[i][1]!="") : if(momenta[i][1].find("barW")>0) : momentastring+=momenta[i][1].replace("barW","W")+"\n " elif(momenta[i][1].find("sW")>0) : momentastring+=momenta[i][1].replace("sW","sbarW")+"\n " else : momentastring+=momenta[i][1]+"\n " # header string for i in range(0,len(htemp)) : if(htemp[i].find("RS")>0) : if(htemp[i].find("Bar")>0) : htemp[i]=htemp[i].replace("Bar","").replace("RsbarW","RsW") else : htemp[i]=htemp[i].replace("Spinor","SpinorBar").replace("RsW","RsbarW") if(i>0) : irs=i elif(htemp[i].find("Spinor")>0) : if(htemp[i].find("Bar")>0) : htemp[i]=htemp[i].replace("Bar","").replace("sbarW","sW") else : htemp[i]=htemp[i].replace("Spinor","SpinorBar").replace("sW","sbarW") if(i>0) : isp=i if(irs>0 and isp >0) : htemp[irs],htemp[isp] = htemp[isp],htemp[irs] # fermion case else : htemp2 = header.split(",") # which fermions to exchange if(len(fIndex)==2) : isp = (fIndex[0],) ibar = (fIndex[1],) else : if(ifunction==1) : isp = (fIndex[2],) ibar = (fIndex[3],) elif(ifunction==2) : isp = (fIndex[0],) ibar = (fIndex[1],) elif(ifunction==3) : isp = (fIndex[0],fIndex[2]) ibar = (fIndex[1],fIndex[3]) # wavefunctions for i in range(0,len(waves)) : if(waves[i].find("Spinor")<0) : waveNew.append(waves[i]) continue if(waves[i].find("Bar")>0) : found=False for itest in range(0,len(ibar)) : if(waves[i].find("sbarW%s"%ibar[itest])>=0) : waveNew.append(waves[i].replace("Bar","").replace("bar","")) found=True break if(not found) : waveNew.append(waves[i]) else : found=False for itest in range(0,len(isp)) : if(waves[i].find("sW%s"%isp[itest])>=0) : waveNew.append(waves[i].replace("Spinor","SpinorBar").replace(" s"," sbar")) found=True break if(not found) : waveNew.append(waves[i]) # momenta definitions for i in range(0,len(momenta)) : if(momenta[i][0] and momenta[i][1]!="") : if(momenta[i][1].find("barW")>0) : found = False for itest in range(0,len(ibar)) : if(momenta[i][1].find("barW%s"%ibar[itest])>=0) : momentastring+=momenta[i][1].replace("barW","W")+"\n " found=True break if(not found) : momentastring+=momenta[i][1]+"\n " elif(momenta[i][1].find("sW")>0) : found=False for itest in range(0,len(isp)) : if(momenta[i][1].find("sW%s"%isp[itest])>=0) : momentastring+=momenta[i][1].replace("sW","sbarW")+"\n " found=True break if(not found) : momentastring+=momenta[i][1]+"\n " else : momentastring+=momenta[i][1]+"\n " # header for i in range(0,len(htemp)) : if(htemp[i].find("Spinor")<0) : continue if(htemp[i].find("Bar")>0) : if(i==0) : htemp[i] =htemp [i].replace("Bar","") htemp2[i]=htemp2[i].replace("Bar","") continue for itest in range(0,len(ibar)) : if(htemp[i].find("sbarW%s"%ibar[itest])>=0) : htemp[i] =htemp [i].replace("Bar","").replace("sbarW","sW") htemp2[i]=htemp2[i].replace("Bar","").replace("sbarW%s"%ibar[itest],"sW%s"%isp[itest]) break else : if(i==0) : htemp [i]=htemp [i].replace("Spinor","SpinorBar") htemp2[i]=htemp2[i].replace("Spinor","SpinorBar") continue for itest in range(0,len(isp)) : if(htemp[i].find("sW%s"%isp[itest])>=0) : htemp [i]=htemp [i].replace("Spinor","SpinorBar").replace("sW","sbarW") htemp2[i]=htemp2[i].replace("Spinor","SpinorBar").replace("sW%s"%isp[itest],"sbarW%s"%ibar[itest]) break # header for transposed function hnew = ','.join(htemp) hnew = hnew.replace("virtual ","").replace("=-GeV","") if(not RS) : theader = ','.join(htemp2) theader = theader.replace("virtual ","").replace("=-GeV","") if(len(result)==2) : hnew =hnew .replace("evaluate","evaluateT") theader=theader.replace("evaluate","evaluateT") else : hnew =hnew .replace("evaluate","evaluateT%s" % ifunction) theader=theader.replace("evaluate","evaluateT%s" % ifunction) if(iloc not in fIndex) : theader = headerReplace(theader) else : theader = headerReplace(h2).replace("evaluateN","evaluateT") headers.append(theader) newHeader += hnew +";\n" else : newHeader += hnew fnew = evaluateTemplate.format(decl=hnew,momenta=momentastring,defns=defString, waves="\n ".join(waveNew),symbols='\n '.join(symboldefs), couplings="\n ".join(localCouplings), result=result[ifunction],swap=sorder) function +="\n" + fnew if(FM and not RS) : if(len(result)==2) : if(iloc!=fIndex[1]) : fi=1 stype="sbar" else : fi=0 stype="s" header = vTemplateT.format(header=header.replace("Energy2,","Energy2 q2,"), normal=headerReplace(h2), transpose=theader,type=stype, iloc=fIndex[fi],id=vertex.particles[fIndex[fi]-1].pdg_code) \ +newHeader+h2.replace("virtual","") else : sorder = swapOrderFFFF(vertex,iloc,fIndex) header = vTemplate4.format(header=header.replace("Energy2,","Energy2 q2,"), iloc1=fIndex[1],iloc2=fIndex[3],swap=sorder, id1=vertex.particles[fIndex[1]-1].pdg_code, id2=vertex.particles[fIndex[3]-1].pdg_code, cf=py2cpp(cf)[0], res1=headerReplace(h2).replace("W",""), res2=headers[0].replace("W",""), res3=headers[1].replace("W",""), res4=headers[2].replace("W",""))\ +newHeader+h2.replace("virtual","") else : header=header + ";\n" + newHeader return (header,function) evaluateMultiple = """\ {decl} {{ {code} }} """ def multipleEvaluate(vertex,spin,defns) : if(spin==1) : name="scaW" elif(spin==3) : name="vW" else : print 'Evaluate multiple problem',spin raise SkipThisVertex() if(len(defns)==0) : return ("","") header = defns[0] ccdefn = header.replace("=-GeV","").replace("virtual ","").replace("Energy2","Energy2 q2") code="" spins=vertex.lorentz[0].spins iloc=1 waves=[] for i in range(0,len(spins)) : if(spins[i]==spin) : waves.append("%s%s" %(name,i+1)) for i in range(0,len(spins)) : if(spins[i]==spin) : if(iloc==1) : el="" else : el="else " call = headerReplace(defns[iloc]) if(iloc!=1) : call = call.replace(waves[0],waves[iloc-1]) pdgid = vertex.particles[i].pdg_code code += " %sif(out->id()==%s) return %s;\n" % (el,pdgid,call) iloc+=1 code+=" else assert(false);\n" return (header,evaluateMultiple.format(decl=ccdefn,code=code)) diff --git a/Models/Makefile.am b/Models/Makefile.am --- a/Models/Makefile.am +++ b/Models/Makefile.am @@ -1,181 +1,181 @@ SUBDIRS = RSModel StandardModel General Susy UED Zprime \ Transplanckian ADD Leptoquarks Sextet TTbAsymm \ LH LHTP Feynrules noinst_LTLIBRARIES = libHwStandardModel.la nodist_libHwStandardModel_la_SOURCES = \ StandardModel/SM__all.cc libHwStandardModel_la_CPPFLAGS = \ $(AM_CPPFLAGS) -I$(srcdir)/StandardModel noinst_LTLIBRARIES += libHwModelGenerator.la nodist_libHwModelGenerator_la_SOURCES = \ General/ModelGenerator__all.cc libHwModelGenerator_la_CPPFLAGS = \ $(AM_CPPFLAGS) -I$(srcdir)/General if WANT_BSM pkglib_LTLIBRARIES = \ HwRSModel.la \ HwUED.la \ HwSusy.la \ HwNMSSM.la \ HwRPV.la \ HwZprimeModel.la \ HwTransplanck.la \ HwADDModel.la \ HwLeptoquarkModel.la \ HwSextetModel.la \ HwTTbAModel.la \ HwLHModel.la \ HwLHTPModel.la endif ############# HwRSModel_la_LDFLAGS = \ $(AM_LDFLAGS) -module -version-info 10:0:0 HwRSModel_la_CPPFLAGS = \ $(AM_CPPFLAGS) -I$(srcdir)/RSModel nodist_HwRSModel_la_SOURCES = \ RSModel/RS__all.cc ############# HwUED_la_LDFLAGS = \ $(AM_LDFLAGS) -module -version-info 10:1:0 HwUED_la_CPPFLAGS = \ $(AM_CPPFLAGS) -I$(srcdir)/UED nodist_HwUED_la_SOURCES = \ UED/UED__all.cc ############# HwSusy_la_LDFLAGS = \ -$(AM_LDFLAGS) -module -version-info 13:1:0 +$(AM_LDFLAGS) -module -version-info 13:2:0 HwSusy_la_CPPFLAGS = \ $(AM_CPPFLAGS) -I$(srcdir)/Susy nodist_HwSusy_la_SOURCES = \ Susy/Susy__all.cc ############# HwNMSSM_la_LDFLAGS = \ $(AM_LDFLAGS) -module -version-info 5:0:0 HwNMSSM_la_CPPFLAGS = \ $(AM_CPPFLAGS) -I$(srcdir)/Susy/NMSSM nodist_HwNMSSM_la_SOURCES = \ Susy/NMSSM/NMSSM__all.cc ############# HwRPV_la_LDFLAGS = \ $(AM_LDFLAGS) -module -version-info 3:0:0 HwRPV_la_CPPFLAGS = \ $(AM_CPPFLAGS) -I$(srcdir)/Susy/RPV nodist_HwRPV_la_SOURCES = \ Susy/RPV/RPV__all.cc ############# HwZprimeModel_la_LDFLAGS = \ $(AM_LDFLAGS) -module -version-info 3:0:0 HwZprimeModel_la_SOURCES = \ Zprime/ZprimeModel.cc Zprime/ZprimeModelZPQQVertex.cc ############# HwTransplanck_la_LDFLAGS = \ $(AM_LDFLAGS) -module -version-info 4:1:0 HwTransplanck_la_SOURCES = \ Transplanckian/METRP2to2.cc ############# HwADDModel_la_LDFLAGS = \ $(AM_LDFLAGS) -module -version-info 4:0:0 HwADDModel_la_CPPFLAGS = \ $(AM_CPPFLAGS) -I$(srcdir)/ADD nodist_HwADDModel_la_SOURCES = \ ADD/ADD__all.cc ############# HwLeptoquarkModel_la_LDFLAGS = \ $(AM_LDFLAGS) -module -version-info 5:0:0 HwLeptoquarkModel_la_CPPFLAGS = \ $(AM_CPPFLAGS) -I$(srcdir)/Leptoquarks nodist_HwLeptoquarkModel_la_SOURCES = \ Leptoquarks/Leptoquark__all.cc ############# HwSextetModel_la_LDFLAGS = \ $(AM_LDFLAGS) -module -version-info 3:0:0 HwSextetModel_la_CPPFLAGS = \ $(AM_CPPFLAGS) -I$(srcdir)/Sextet nodist_HwSextetModel_la_SOURCES = \ Sextet/Sextet__all.cc ############# HwTTbAModel_la_LDFLAGS = \ $(AM_LDFLAGS) -module -version-info 3:0:0 HwTTbAModel_la_CPPFLAGS = \ $(AM_CPPFLAGS) -I$(srcdir)/TTbAsymm nodist_HwTTbAModel_la_SOURCES = \ TTbAsymm/TTbA__all.cc ############# HwLHModel_la_LDFLAGS = \ $(AM_LDFLAGS) -module -version-info 5:0:0 HwLHModel_la_CPPFLAGS = \ $(AM_CPPFLAGS) -I$(srcdir)/LH nodist_HwLHModel_la_SOURCES = \ LH/LH__all.cc ############# HwLHTPModel_la_LDFLAGS = \ $(AM_LDFLAGS) -module -version-info 5:0:0 HwLHTPModel_la_LIBADD = \ $(GSLLIBS) HwLHTPModel_la_CPPFLAGS = \ $(AM_CPPFLAGS) $(GSLINCLUDE) -I$(srcdir)/LHTP nodist_HwLHTPModel_la_SOURCES = \ LHTP/LHTP__all.cc ############# diff --git a/NEWS b/NEWS --- a/NEWS +++ b/NEWS @@ -1,1689 +1,1699 @@ Herwig News -*- outline -*- ================================================================================ +* Herwig 7.1.4 release: 2018-06-28 + +** More matrix elements and better handling of BSM physics + +** Fix for spin correlations in angular-ordered shower, effects top decays + +** Allow fixed target collisions + +** various minor fixes + * Herwig 7.1.3 release: 2018-04-05 ** Dipole Shower *** Changed default phase space limits *** g -> gg splitting function asymmetrized *** Initial retune supplied, given the visible changes to LEP observables ** Added new Baryonic colour reconnection model (arXiv 1710:10906) ** Added Schuler-Sjostrand Photon PDFs ** Handling of massless taus from external sources ** various minor fixes ** use std::array<> where possible * Herwig 7.1.2 release: 2017-11-01 ** Reduction of the default pt cut for QED radiation off leptons. ** Inputfile changes due to new read mode in ThePEG. ThePEG remains in current repo dir when reading input-file/snippet. ** Fix for shower scale variations in qtilde shower. ** All standard input files now use the tuned intrinsic pt. ** Remove obsolete input files for various tunes. ** Fix for Madgraph interface for NLO corrections with recent version. ** Run file size reduction for processes using madgraph/openloops. ** Fix in jacobian for massive dipole kinematics. ** General improvements for UFO model handling. * Herwig 7.1.1 release: 2017-07-14 ** Snippets are now all installed ** Fixed broken ufo2herwig output and LHC-MB.in ** UFO improvements *** More robust SLHA file handling *** option of creating diagonal mixing matrices, needed for ATLAS simplfied models *** Improved warnings about resetting standard model particles *** Fixed certain cases where the wrong lorentz structure was picked in VVS vertices ** Improved error message for unhandled beam particles ** Fix for Dipole Shower chain selection ** Fixed crash in double diffractive delta resonances * Herwig 7.1.0 release: 2017-05-19 ** Major new release For a more detailed overview and further references please see the release note arXiv:1705.06919 ** NLO multijet merging with the dipole shower ** A new soft model ** An interface to EvtGen ** Improved calculation of mass effects in the dipole shower ** Top decays in the dipole shower, and NLO corrections to the decay ** An implementation of the KrkNLO method for simple processes ** Major restructuring and cleanup of default input files ** C++11 is now mandatory and heavily used in the code ** Many smaller bugfixes and improvements * Herwig 7.0.4 release: 2016-10-24 ** API The high level API is now properly available as a library, providing an alternative to the Herwig main program. ** Dipole shower Added nloops() function to the AlphaS classes to return the number of loops in the running coupling ** Matchbox Improved error handling and clearer messages ** BSM models Initialize W mass correctly from SLHA file. Improved reading in of decay modes if they already exist. ** Sampling Introduced option to reduce reference weight in AlmostUnweighted mode by Kappa factor. Useful for processes where full unweighting is infeasible. ** Qtilde shower Better control of scale in splitting functions using the SplittingFunction:ScaleChoice interface. ** Tests New NLO fixed-order input files for testing Matchbox in Tests/ExternNLO. ** Input files Set diagonal CKM options consistently. Added TauTauHVertex and MuMuHVertex to MatchboxDefaults. * Herwig 7.0.3 release: 2016-07-21 ** subprocess generation filters A number of filters has been implemented to speed up process and diagram generation for the case of Standard Model like processes; this is particularly helpful for processes involving a large number of electroweak combinatorics. See the documentation for more details. ** fix to directory hierarchy A bug in the Herwig-scratch directory hierarchy when integrating with different seeds has been fixed. ** fix for relocating MadGraph generated amplitudes The directory and symbolic link structure for MadGraph-built amplitudes has been changed to allow for relocation of a run directory to a different file system hierarchy. ** z* variable added to LeptonsJetsAnalysis The builtin LeptonsJetsAnalysis has been extented to include the normalized relative rapidity z* ** detuning parameter for shower reweighting An efficiency tweak when using the shower reweighting facilities has been introduced for the veto algorithms in both the QTilde and the Dipole shower. See the documentation for more details. ** BaryonThreeQuarkModelFormFactor A scaling bug in the form factor series expansion was fixed. * Herwig 7.0.2 release: 2016-04-29 ** Event reweighting New option of calculating the weights for the effect of varying the scale used in both the default and dipole showers. The method is described in arXiv:1605.08256 ** mergegrids mode A main mode for the Herwig executable has been added which merges the integration grids from parallel runs. ** NLO Diphoton production The NLO matrix elements in the POWHEG approach for diphoton production as described in JHEP 1202 (2012) 130, arXiv:1106.3939 have been included as MEPP2GammaGammaPowheg. ** BSM Hard process constructor A missing Feynman diagram with a t-channel vector boson has been added to the matrix element for vv2ss ** BSM Decay Modes calculation The behaviour of the option to disable decay modes in BSM Models has been changed so that the partial width for the ignored modes is now calculated and included in the total width, but the branching ratio is set to zero. This is more physical than the previous option where the mode was t otally ignored and hence not included in the calculation of the width. ** Mass Generation The behaviour of the GenericMassGenerator has been changed so that modes which have been disabled are only included in the calculation of the total width and not in the partial width used in the numerator of the weight used to select the off-shell mass. ** Boost detection Boost could not detect the compiler version for gcc-5.3 and gcc-6.1 * Herwig 7.0.1 release: 2016-01-17 ** Version number written to log file The Herwig version number is now included in addition to ThePEG's version. ** Tau lifetimes A bug with lifetimes for hard process Taus is fixed. Thanks to ATLAS for the report! ** Shower FSR retries Rare events could take a long time due to an extremely large number of FSR retries. These are now capped at a configurable number. ** Dipole shower Reweighting for non-radiating events; fixes for shower profile handling; option to downgrade large-Q expansion for alphaS ** Matchbox builtins Added massive currents for q-qbar ** ShowerAlphaQCD can now use user-defined thresholds ** Input snippets W/Z/H on-shell now split into three files; 4-flavour scheme added ** UFO converter The converter now has experimental support for writing out param cards of its current settings. ** LEPJetAnalysis loading fixed ** Contrib HJets++ has moved to a stand-alone project, FxFx has been added * Herwig 7.0.0 (Herwig++ 3.0.0) release: 2015-12-04 ** Major new release A major new release of the Monte Carlo event generator Herwig++ (version 3.0) is now available. This release marks the end of distinguishing Herwig++ and HERWIG development and therefore constitutes the first major release of version 7 of the Herwig event generator family. The new version features a number of significant improvements to the event simulation, including: built-in NLO hard process calculation for all Standard Model processes, with matching to both angular ordered and dipole shower modules via variants of both subtractive (MC@NLO-type) and multiplicative (Powheg-type) algorithms; QED radiation and spin correlations in the angular ordered shower; a consistent treatment of perturbative uncertainties within the hard process and parton showering, as well as a vastly improved documentation. This version includes (for a more detailed overview and further references please see the release note arXiv:1512.01178): ** A long list of improvements and fixes for the Matchbox module *** includes MC@NLO and Powheg matching to both showers with truncated showering ** A long list of improvements and fixes for both of the shower modules *** includes improvements of numerics issues relevant to 100 TeV pp collisions ** NLO event simulation and Matchbox development *** Interfaces to a number of external libraries *** A new workflow for event generation *** Electroweak corrections to VV production ** Parton shower development *** QED radiation in the angular ordered shower *** Spin correlations in the angular ordered shower *** New scale choices in gluon branchings ** Improvements to event generation workflow *** Re-organized and streamlined input files for the new NLO development *** A unified treatment of shower and matching uncertainties *** New integrator modules featuring parallel integration ** New default tunes for both shower modules ** New contrib modules *** Electroweak Higgs plus jets production *** FxFx merging support *** Higgs boson pair production * Herwig++-2.7.1 release: 2014-07-07 ** New shower switches to select schemes for momentum reconstruction *** QTildeReconstructor:FinalStateReconOption has the following options: *** Default All momenta are rescaled in the rest frame. *** MostOffShell Put all particles on the new-mass shell and the most off-shell and recoiling system are rescaled to ensure 4-momentum is conserved. *** Recursive Recursively put the most off-shell particle which hasn't yet been rescaled on-shell by rescaling the particles and the recoiling system. *** RestMostOffShell The most off-shell is put on shell by rescaling it and the recoiling system, the recoiling system is then put on-shell in its rest frame. *** RestRecursive As above, but recursively treat the currently most-off shell (only makes a difference for more than 3 partons) ** Ticket #378: Hadronization of baryon number violating clusters involving diquarks Fixed by only considering non-diquarks to be combined in the ClusterFinder. ** UFO converter can now parse SLHA files for parameter settings The UFO converter code can now use SLHA files for modifying parameters. The first pass "ufo2herwig" produces the model to be compiled. For each parameter card, run "slha2herwig" to get the matching input file. ** Fix for systems using lib64 The repository is now initialized correctly on systems using lib64 as the library location. ** Efficiency optimization Better allocation of internal vector variables for a noticeable speed increase of 10-20% with LHC events. * Herwig++-2.7.0 release: 2013-10-28 ** UFO interface to Feynman rules generators Herwig++ now includes "ufo2herwig", a tool that automatically creates all required files to run a BSM model from a UFO directory. The conversion has been extensively tested against Feynrules models MSSM, NMSSM, RS, Technicolor, and less extensively with most of the other models in the Feynrules model database. We expect that following this release there will be no further hard-coded new physics models added to Herwig++ and that future models will be included using the UFO interface. ** Shower uncertainties A first set of scaling parameters to estimate shower uncertainties is provided for both the angular ordered as well as the dipole shower; they are Evolver:HardScaleFactor and ShowerAlphaQCD: RenormalizationScaleFactor. ** Rewrite of Matchbox NLO matching The NLO matching implementation has been rewritten and is now more flexible and consistent. Profile scales are provided for the hardest emission both for the dipole shower and matrix element correction matching. ** BLHA2 Interface and new processes Matchbox now features a generic BLHA2 interface to one-loop amplitude codes and now also includes W and W+jet production as well as Higss production in gluon fusion as builtin processes. ** Impoved dipole shower kinematics parametrization The kinematics parametrization for emissions in the dipole shower has been made more efficient. ** W and Z Powheg decays Decays of W and Z bosons now use the Powheg decayers by default. ** Improved treatment of beam remnants The handling of beam remnants has been improved in multiple contexts, leading to a much lower error rate at p+/p- collisions. An additional value "VeryHard" for ClusterFissioner:RemnantOption can be used to disable any special treatment of beam remnant clusters. ** New underlying event tune Herwig++ now uses tune UE-EE-5-MRST by default. Other related tunes can be obtained from the Herwig++ tunes page ** Improvements in BSM code The UFO development identified many sign fixes in rarely used BSM vertices; many improvements were made to general decayers, allowing four-body decays in BSM for the first time; Powheg is enabled in General two-body decayers; and the handling of colour sextets has been improved. ** A new HiggsPair matrix element in Contrib. ** A new matrix element for single top production. ** The Higgs mass is now set to 125.9 GeV (from PDG 2013 update). ** C++-11 testing To help with the coming transition to C++-11, we provide the new --enable-stdcxx11 configure flag. Please try to test builds with this flag enabled and let us know any problems, but do not use this in production code yet. In future releases, this flag will be on by default. ** Other changes *** Many new Rivet analyses have been included in the Tests directory. *** Cleaned Shower header structure; grouped shower parameters into one struct. *** The boolean Powheg flag in HwMEBase changed to an enumeration. * Herwig++-2.6.3 release: 2013-02-22 ** Decay vertex positioning in HepMC output Pseudo-vertices that Herwig++ inserts for technical reasons will now not contribute to the Lorentz positions of downstream vertices. Thanks to ATLAS for the bug report! ** Updated Rivet tests Herwig's library of Rivet test runs has been brought up-to-date with new analyses that were recently published by the Rivet collaboration. * Herwig++-2.6.2 release: 2013-01-30 ** Fixes for PDF and scale choices in POWHEG events Scale settings for MPI and the regular shower are now correct in POWHEG events. This should fix reported anomalies in POWHEG jet rates. NLO PDFs are now also set consistently in the example input files. ** Ticket #373: Branching ratio factors in cross-section If any decay modes are selectively disabled, setting the following post-handler will cause all reported cross-sections to include the branching ratio factor(s) from the previous stages correctly: create Herwig::BranchingRatioReweighter BRreweight insert LHCGenerator:EventHandler:PostDecayHandlers 0 BRreweight ** Anomalous vertices now possible in MEfftoVH ** Interactive shell does not quit on error ** Better warning messages for events from inconsistent LHEF files ** Possible division by zero error fixed in BSM branching ratio calculations ** Decayer and ME changes to improve checkpointing The checkpointing changes in ThePEG 1.8.2 are implemented here, too. Regular dump files are consistent now. * Herwig++-2.6.1 release: 2012-10-16 ** Configure switches The various switches to turn off compilation of BSM models have been unified into a single '--disable-models'. A new flag '--disable-dipole' can be used to turn off the compilation of the Dipole and Matchbox codes. ** Ticket #348: Search path for repository 'read' command The search path for the 'read' command is configurable on the command line with the -i and -I switches. By default, the installation location is now included in the search path, so that 'Herwig++ read LEP.in' will work in an empty directory. The current working directory will always be searched first. The rarely used "Herwig++ init" command has been made consistent with 'read' and 'run' and should now be used without the '-i' flag. ** Width treatment in BSM The width treatment in BSM decay chains has been greatly improved and is now switched on by default in the .model files. To get the old behaviour, use set /Herwig/NewPhysics/NewModel:WhichOffshell Selected ** New BSM models Little Higgs models with and without T-parity are now available. ** Resonance photon lifetime A lifetime bug affecting decays of pi0 to e+e-X was fixed. The virtual photon is not part of the event record anymore. ** Ticket #371: Hard diffraction FPE Herwig++ 2.6.0 introduced a bug into the diffraction code which would abort any runs. This is now fixed. ** O2AlphaS Support for setting quark masses different from the particle data objects as introduced in ThePEG 1.8.1 has been enabled. ** Matchbox Several improvements and bug fixes are included for Matchbox. Amplitudes relevant to pp -> Z+jet and crossed processes at NLO are now available, and various scale choices have been added in a more flexible way. All subtraction dipoles for massive quarks are now included. ** Dipole shower Parameters to perform scale variations in the shower have been added to estimate uncertainties. A bug in showering off gg -> h has been fixed. ** Minor fixes *** Two broken colour structures in GeneralHardME *** Susy Higgs mixing matrix *** BaryonFactorizedDecayer out-of-bounds access *** Mass values in SimpleLHCAnalysis * Herwig++-2.6.0 release: 2012-05-21 (tagged at SVN r7407) ** New NLO framework Matchbox, a flexible and very general framework for performing NLO calculations at fixed order or matched to parton showers is provided with this release. ** Dipole shower algorithm A first implementation of the coherent dipole shower algorithm by Plätzer and Gieseke (arXiv:0909.5593 and arXiv:1109.6256) is available. ** Alternative samplers and the ExSample library The ExSample library by Plätzer (arXiv:1108.6182) is shipped along with Herwig++ in an extended version. The extended version provides SamplerBase objects which can be used alternatively to the default ACDCSampler. ** New BSM models *** New colour sextet diquark model A colour sextet diquark model has been included, as described in Richardson and Winn (arXiv:1108.6154). *** Models reproducing the CDF t-tbar asymmetry Four models that can reproduce the reported t-tbar asymmetry have been included. *** Zprime A simple standard model extension by one additional heavy neutral vector boson. ** Interface to AlpGen, with MLM merging The Contrib directory contains a new interface to the AlpGen matrix element generator. AlpGen events must be preprocessed with the provided AlpGenToLH.exe tool before they can be used. More information can be found in the Herwig++ 2.6 release note. ** HiggsVBF Powheg Higgs boson production by vector boson fusion is available at NLO in the POWHEG scheme, as described in d'Errico, Richardson (arXiv:1106.2983). The Powheg DIS processes were available in Herwig++-2.5.2 already. ** Statistical colour reconnection Alternative mechanisms to minimize the colour length Sum(m_clu) before the hadronization stage, based on Metropolis and annealing algorithms. ** Energy extrapolation of underlying-event tunes To describe underlying-event data at different c.m. energies, the energy-dependent parameter pT_min will now be adjusted automatically, following a power-law. The new tune parameters are the value at 7000 GeV "MPIHandler:pTmin0", and MPIHandler:Power. ** Ticket #239: Reporting of minimum-bias cross-section When simulating minimum-bias events using the MEMinBias matrix element, the correct unitarized cross section can now be reported via the standard facilities; it is no longer necessary to extract it from the .log file of the run. The corresponding functionality is enabled by inserting a MPIXSecReweighter object as a post-subprocess handler: create Herwig::MPIXSecReweighter MPIXSecReweighter insert LHCHandler:PostSubProcessHandlers 0 MPIXSecReweighter ** Dependency on 'boost' Herwig++ now requires the boost headers to build; if not detected in standard locations, specify with the --with-boost configure option. ** Tests directory The Tests directory now contains input cards for almost all Rivet analyses. A full comparison run can be initiated with 'make tests'. ** Minor changes *** Default LHC energy now 8 TeV All LHC-based defaults have now been updated to use 8 TeV as the center-of-mass energy. *** Herwig::ExtraParticleID -> ThePEG::ParticleID The namespace for additional particles has been unified into ThePEG::ParticleID *** MEee2VectorMeson The e+e- -> vector meson matrix element has moved from Contrib into HwMELepton.so *** SUSY numerics fixes Better handling of rare numerical instabilities. *** YODA output for Rivet The built-in histogramming handler can now output data in the YODA format used by Rivet. *** Consistency checks in SLHA file reader Better warnings for inconsistent SusyLHA files *** better colour flow checking for development ** Bug fixes *** Extremely offshell W from top decay Numerical improvements for very off-shell W bosons coming from top decays. *** Ticket #367: problems in using SUSY + LHE Susy events from Les Houches event files are now handled better. *** Infinite loop in remnant decayer The remnant decayer will now abort after 100 tries. *** Fix to HiggsVBF LO diagrams The diagram structure of HiggsVBF LO matrix elements has been fixed. *** LEP thrust fix The calculation of the transverse momentum of a branching from the evolution variable in final-state radiation can now be changed. While formally a sub-leading choice this enables a better description of the thrust distribution in e+e- collisions at small values of the thrust. Currently the default behaviour, where the cut-off masses are used in the calculation, remains the same as previous versions. * Herwig++-2.5.2 release: 2011-11-01 (tagged at SVN r6928) ** Optional new jet vetoing model The jet vetoing model by Schofield and Seymour (arXiv:1103.4811) is available via Evolver:ColourEvolutionMethod, PartnerFinder:PartnerMethod and SplittingFunction:SplittingColourMethod. The default behaviour is unchanged. ** MPI tune Version 3 of the MPI tunes is now the default. Please note that the pT parameter is energy-dependent and needs to be modified when an LHC run is not at 7 TeV. The latest tunes are always available at http://projects.hepforge.org/herwig/trac/wiki/MB_UE_tunes ** MPI PDFs MPI PDFs can now be controlled independently. ** Initialization time speedup A new BSMModel base class was introduced between StandardModel and the BSM model classes. Together with a restructured decay mode initialization, this offers significantly faster startup times for BSM runs. ThreeBodyDecays can now always be switched on without a large speed penalty. ** Decay mode file Decay mode files in the SLHA format can now be read separately in any BSM model with 'set Model:DecayFileName filename' ** Powheg DIS Charged- and neutral-current DIS processes implementing the POWHEG method are now available. ** Diffraction models Xi cut implemented in PomeronFlux ** Ticket #352: Colour reconnection fixed in DIS ** Ticket #353: Improved numerical stability in chargino decays ** Ticket #358: Infinite loop in top events with pT cut in shower ** Ticket #361: Problem with duplicate 2-body modes in BSM ** Tickets #362 / #363: Crashes with baryon number violating models Particle decays in SUSY models with RPV now work correctly in the colour 8 -> 3,3,3 case. Colour reshuffling now works for RPV clusters. ** Improved Fastjet detection The configure step uses fastjet-config to make sure all header file paths are seen. ** Darwin 11 / OS X Lion A configure bug was fixed which prevented 'make check' from succeeding on OS X Lion. ** Vertex classes The specification of QED / QCD orders has been moved to the vertex constructors, to allow ThePEG consistency checks. WWHH vertices in MSSM and NMSSM were fixed. Some Leptoquark and UED vertices fixed. ** Hadronization Cleanup of obsolete code. * Herwig++-2.5.1 release: 2011-06-24 (tagged at SVN r6609) ** Example input files at 7 TeV All our example input files for LHC now have their beam energy set to 7 TeV instead of 14 TeV. ** Colour reconnection on by default The colour reconnection tunes are now the default setup. Version 2 of the tunes replaces the *-1 tunes, which had a problem with LEP event shapes. ** Run name tags Aded possibility to add a tag to the run name when running with the '-t' option. One run file can thus be run with different seeds and results stored in different output files. ** Floating point exceptions The new command line option -D enables floating point error checking. ** General improvements to WeakCurrent decays ** Remnant decayer Hardwired gluon mass was removed. ** WeakCurrentDecayConstructor Instead of specifying separate Particle1...Particle5 vectors for the decay modes, the new interface DecayModes can be filled with decay tags in the standard syntax. ** BSM: improvements to handling of vertex and model initialisation ** Powheg Higgs Option to use pT or mT as the scale in alphaS and for the factorization scale in the PDFs ** Ticket #337: Tau polarization wrong in charged Higgs decay ** Ticket #339: Colour flows in GeneralThreeBody Decayers for 3bar -> 8 3bar 1 ** Ticket #340: Crash for resonant zero-width particles ** Ticket #341: Varying scale for BSM processes The scale used is now ResonantProcessConstructor:ScaleFactor or TwoToTwoProcessConstructor:ScaleFactor multiplied by sHat. ** Ticket #346: Chargino decays Chargino decayers now automatically switch between the mesonic decays for mass differences less than 2 GeV and the normal partonic decays above 2 GeV. ** Ticket #349: Stop by default on input file errors The '--exitonerror' flag is now the default behaviour for the Herwig++ binary. To switch back to the old behaviour, '--noexitonerror' is required. ** Ticket #351: Four-body stop decays ** Tested with gcc-4.6 * Herwig++-2.5.0 release: 2011-02-08 (tagged at SVN r6274) ** Uses ThePEG-1.7.0 Herwig++ 2.5.0 requires ThePEG 1.7.0 to benefit from various improvements, particularly: handling of diffractive processes; respecting LD_LIBRARY_PATH when loading dynamic libraries, including LHAPDF; improvements to repository commands for decay modes. See ThePEG's NEWS file for more details. ** POWHEG improvements *** New POWHEG processes Simulation at NLO accuracy using the POWHEG method is now available for hadronic diboson production (pp to WW,WZ,ZZ), Higgs decays to heavy quarks, and e+e- to two jets or ttbar, including full mass dependence. *** Input file changes The input files for setting up POWHEG process simulation have been simplified. See the example files LHC-Powheg.in and TVT-Powheg.in for the improved command list. *** Structural changes The POWHEG backend in the shower code has been restructured to make future additions easier: PowhegEvolver has merged with Evolver; both the matrix element corrections and real corrections in the POWHEG scheme are implemented directly in the ME or Decayer classes. ** New processes at leading order *** Photon initiated processes We have added a matrix element for dijet production in gamma hadron collisions. *** Bottom and charm in heavy quark ME The option of bottom and charm quarks is now supported for heavy quark production in MEHeavyQuark. ** Colour reconnection The cluster hadronization model has been extended by an option to reconnect coloured constituents between clusters with a given probability. This new model is different from the colour reconnection model used in FORTRAN HERWIG, and improves the description of minimum bias and underlying event data. ** Diffractive Processes Both single and double diffractive processes are now supported in Herwig++. The Pomeron PDF is implemented using a fit to HERA data, and a pion PDF can be used to model reggeon flux. ** BSM physics *** New models We have added new BSM models, particularly ADD-type extra dimension models and the next-to-minimal supersymmetric standard model (NMSSM). Effects of leptoquarks can as well be simulated. *** Vertex additions We have added flavour changing stop interactions (stop - neutralino - charm) and gravitino interactions with particular emphasis on numerical stability for very light gravitinos. Tri-linear Higgs and Higgs-Higgs/Vector-Vector four-vertices are available as well. *** Input file changes The SUSY model can now also extract the SLHA information from the header of a Les Houches event file: replace the SLHA file name in the example input files with the LH file name. *** Structure The backend structure of the HardProcessConstructor has changed, to allow easier inclusion of new process constructors. Some 2->3 BSM scattering processes involving neutral higgs bosons are now included. The spin handling has been improved in the background. ** Shower splitting code reorganized The selection of spin structures has been decoupled from the choice of colour structure. This gives more flexibility in implementing new splittings. Selected splittings can be disabled in the input files. ** B mixing B mixing, and indirect CP violation in the B meson system are included now. ** Looptools The Looptools directory has been updated to reflect T.Hahn's Looptools 2.6. ** Contrib changes The ROOT interface has been removed as deprecated. The MCPWNLO code has temporarily been removed from the Contrib directory as a major review of this code is required. Additionally, there are various fixes to all other codes shipped in Contrib. ** DIS improvements The momentum reshuffling in DIS events has been improved. ** mu and nu beams mu, nu_e and nu_mu and their antiparticles are now available as beam particles. They are all supported in the DIS matrix elements. mu+ mu- collisions are supported in the general matrix element code for BSM models, but not yet in the hard-coded matrix elements for lepton-lepton scattering. ** Structural changes *** Inline code Inline code has been merged into the header files, .icc files were removed. *** Silent build By default, Herwig++ now builds with silent build rules. To get the old behaviour, run 'make V=1'. *** Debug level The debug level on the command line will now always have priority. *** Event counter The event counter has been simplified. *** Interpolator persistency Interpolators can now be written persistently. ** Ticket #307: Momentum violation check in BasicConsistency Added parameters AbsoluteMomentumTolerance and RelativeMomentumTolerance ** Example POWHEG input files The example input files for Powheg processes now set the NLO alpha_S correctly, and are run as part of 'make check'. ** Truncated shower A problem which lead to the truncated shower not being applied in some cases has been fixed. ** Fixes to numerical problems Minor problems with values close to zero were fixed in several locations. ** Remove duplicated calculation of event shapes An accidental duplication in the calculation of event shapes was removed, they are now only calculated once per event. Several other minor issues in the event shape calculations have also been fixed. ** MRST PDFs fixed An initialization problem in the internal MRST PDFs was fixed. ** Vertex scale choice The scale in the Vertex classes can now be zero where possible. ** Treatment of -N flag The Herwig++ main program now correctly treats the -N flag as optional. ** Numerical stability improved The numerical stability in the 'RunningMass' and 'QTildeReconstructor' classes has been improved. The stability of the boosts in the SOPTHY code for the simulation of QED radiation has been improved. The accuracy of boosts in the z-direction has been improved to fix problems with extremely high p_T partons. ** Bugfix in initial state splittings A bug in the implementation of the PDF weight in initial-state qbar -> qbar g splittings has been fixed. ** Bugfix in chargino neutralino vertices A bug in the 'chi+- chi0 W-+' and charged Higgs-sfermions vertices has been fixed. ** Remove uninitialized variables written to repository A number of uninitialised variables which were written to the repository have been initialised to zero to avoid problems on some systems. ** Fix to QED radiation in hadronic collisions The longitudinal boost of the centre-of-mass frame in hadronic collisions is correctly accounted for now in the generation of QED radiation. ** Fix to numerical problems in two-body decays Numerical problems have been fixed, which appeared in the rare case that the three-momenta of the decay products in two-body decays are zero in the rest frame of the decay particle. ** A problem with forced splittings in the Remnant was fixed. ** ME correction for W+- decays applied properly The matrix element correction for QCD radiation in W+- decays which was not being applied is now correctly used. ** Top quark decays from SLHA file The presence of top quark decay modes in SLHA files is now handled correctly. ** Exceptional shower reconstruction kinematics Additional protection against problems due to the shower reconstruction leading to partons with x>1 has been added. ** Ordering of particles in BSM processes Changes have been made to allow arbitrary ordering of the outgoing particles in BSM processes. ** Bugfixes in tau decays Two bugs involving tau decays have been fixed. The wrong masses were used in the 'KPiCurrent' class for the scalar form factors and a mistake in the selection of decay products lead to tau- --> pi0 K- being generated instead of tau- --> eta K-. ** Avoid crashes in baryon number violating processes. To avoid crashes, better protection has been introduced for the case where diquarks cannot be formed from the quarks in a baryon-number violating process. In addition, the parents of the baryon-number violating clusters have been changed to avoid problems with the conversion of the events to HepMC. ** QED radiation in W- decays A bug in the 'QEDRadiationHandler' class which resulted in no QED radiation being generated in W- decays has been fixed. ** A number of minor fixes to the SUSY models have been made. ** Partial width calculations in BSM models A fix for the direction of the incoming particle in the calculation of two-body partial widths in BSM models has been made. ** LoopTools improvements The LoopTools cache is now cleared more frequently to reduce the amount of memory used by the particle. ** Negative gluino masses are now correctly handled. ** A problem with mixing matrices which are not square has been fixed. ** Removed duplicate diagram The 'MEee2gZ2ll' class has been fixed to only include the photon exchange diagram once rather than twice as previously. ** Fix for duplicate particles in DecayConstructor A problem has been fixed which occurred if the same particle was included in the list of DecayConstructor:DecayParticles. ** Fixes for UED model vertices A number of minor problems in the vertices for the UED model have been fixed. ** Include missing symmetry factor The missing identical-particle symmetry factor in 'MEPP2GammaGamma' has been included. ** Fix floating point problem in top decays A floating point problem in the matrix element correction for top decays has been fixed. * Herwig++-2.4.2 release: 2009-12-11 (tagged at SVN r5022) ** Ticket #292: Tau decay numerical instability The momentum assignment in tau decays contained numerical instabilities which have been fixed by postponing the tau decay until after the parton shower. A new interface setting DecayHandler:Excluded is available to prevent decays in the shower step. This is enabled by default for tau only. ** Ticket #290: Missing MSSM colour structure The missing colour structure for gluino -> gluon neutralino was added. ** Ticket #294: Zero momentum in some decays Some rare phase space points lead to zero momentum in two-body decays. This has been fixed. ** Ticket #295: Stability of QED radiation for lepton collider processes The numerical stability of QED radiation momenta was improved further. ** Ticket #296: K0 oscillation vertex was wrong The oscillation from K0 to K0_L/S now takes place at the production vertex of K0. ** Ticket #289: Undefined variables in repository On some system configurations, undefined variables were written to the repository. These have been fixed. ** Fixed QED radiation for hadron processes The longitudinal boost of the centre-of-mass frame in hadronic collisions is correctly accounted for now. ** Numerical stability fixes Small fixes in RunningMass and QTildeReconstructor. ** Powheg example input files The example input files for Powheg processes now set the NLO alpha_S correctly, and are run as part of 'make check'. ** OS X builds for Snow Leopard Snow Leopard machines will now be recognized as a 64bit architecture. * Herwig++-2.4.1 release: 2009-11-19 (tagged at SVN r4932) ** Uses ThePEG-1.6.0 Herwig++ now requires ThePEG-1.6.0 to benefit from the improved helicity code there. If you have self-written vertex classes, see ThePEG's NEWS file for conversion instructions. ** Vertex improvements ThePEG's new helicity code allowed major simplification of the vertex implementations for all Standard Model and BSM physics models. ** New Transplanckian scattering model An example configuration is in LHC-TRP.in ** BSM ModelGenerator as branching ratio calculator The BSM ModelGenerator has a new switch to output the branching ratios for a given SLHA file in SLHA format, which can then be used elsewhere. ** BSM debugging: HardProcessConstructor New interface 'Excluded' to exclude certain particles from intermediate lines. ** Chargino-Neutralino-W vertex fixed ** Spin correlations are now switched on by default for all perturbative decays. ** Ticket #276: Scale choice in BSM models' HardProcessConstructor New interface 'ScaleChoice' to choose process scale between - sHat (default for colour neutral intermediates) and - transverse mass (default for all other processes). ** Ticket #287: Powheg process scale choice The default choice is now the mass of the colour-singlet system. ** Ticket #278: QED radiation for BSM Soft QED radiation is now enabled in BSM decays and all perturbative decays by default. ** Ticket #279: Full 1-loop QED radiation for Z decays Soft QED radiation in Z decays is now fully 1-loop by default. ** Ticket #280: Redirect all files to stdout This is now implemented globally. The files previously ending in -UE.out and -BSMinfo.out are now appended to the log file. They now also obey the EventGenerator:UseStdout flag. ** Ticket #270: LaTeX output updated After each run, a LaTeX file is produced that contains the full list of citations. Please include the relevant ones in publications. ** Ticket #256: Mac OS X problems An initialization problem that affected only some configurations has been identified and fixed. ** Tests directory added This contains many .in files, to exercise most matrix elements. ** Minor fixes *** Prevent rare x>1 partons in shower reconstruction. *** SUSY-LHA parameter EXTPAR can be used to set tan beta *** Improved Fastjet detection at configure time * Herwig++-2.4.0 release: 2009-09-01 (tagged at SVN r4616) ** New matrix elements We have added a built-in implementation of several new matrix elements: PP --> WW / WZ / ZZ PP --> W gamma / Z gamma PP --> VBF Higgs PP --> Higgs tt / Higgs bb e+e- --> WW / ZZ gamma gamma --> ff / WW ** Base code improvements *** Ticket #257: Remnant handling A problem with forced splittings in the Remnant was fixed. *** Ticket #264: Soft matrix element correction A problem with emissions form antiquarks was fixed. ** PDF sets *** New default set MRST LO** is the new default PDF set. LO* is also available built-in. *** Shower PDFs can be set separately from the hard process Use the 'ShowerHandler:PDF' interface. ** Parameter tunes Shower, hadronization and underlying event parameters were retuned against LEP and Tevatron data respectively. ** BSM module improvements *** Ticket #259: read error for some UED models Arbitrary ordering of outgoing lines in the process description is now possible. *** Ticket #266: branching ratio sums The warning threshold for branching ratios not summing to 1 has been relaxed. It is now a user interface parameter. *** Ticket #267: Top decay modes Top decay modes listed in SLHA files are now handled correctly. ** QED radiation *** Ticket #241: Soft QED radiation is now enabled by default *** Ticket #265: Radiation off W+ and W- is now handled correctly ** Interfaces *** Ticket #243: Fastjet Fastjet is now the only supported jet finder code. All example analyses have been converted to use Fastjet. *** KtJet and CLHEP interfaces have been removed. *** New interfaces to AcerDet and PGS available in Contrib *** MCPWnlo distributed in Contrib *** HepMC and Rivet interfaces moved to ThePEG ** Ticket #239: Inelastic cross-section for MinBias This information is now available in the ...-UE.out files. ** Technical changes *** Ticket #186 Configure now looks for ThePEG in the --prefix location first. *** Configure information Important configuration information is listed at the end of the 'configure' run and in the file 'config.thepeg'. Please provide this file in any bug reports. *** New ZERO object The ZERO object can be used to set any dimensionful quantity to zero. This avoids explicit constructs like 0.0*GeV. *** Exception specifiers removed Client code changes are needed in doinit() etc., simply remove the exception specifier after the function name. *** Ticket #263: Tau polarizations can be forced in TauDecayer * Herwig++-2.3.2 release: 2009-05-08 (tagged at SVN r4249) ** SUSY enhancements *** Ticket #245: Select inclusive / exclusive production Using the new 'HardProcessConstructor:Processes' switch options 'SingleParticleInclusive', 'TwoParticleInclusive' or 'Exclusive' *** Improved three-body decay generation Several problems were fixed, incl. tickets #249 #250 #251 Thanks to J.Tattersall and K.Rolbiecki for the stress-testing! *** Looptools fix Release 2.3.1 had broken the Looptools initialization. *** Improved warning message texts ** Ticket #237: Values of q2last can now be zero where possible. ** Ticket #240: The Herwig++ main program now correctly treats the -N flag as optional. ** Ticket #246: Extreme pT partons fixed by improving accuracy of z boosts. ** DIS Improved parton shower momentum reshuffling. ** Minimum Bias events The zero-momentum interacting particle used for bookkeeping is now labelled as a pomeron. ** User Makefile Makefile-UserModules does not enable -pedantic anymore. User's ROOT code will not compile otherwise. ** Build system Small fixes in the build system. * Herwig++-2.3.1 release: 2009-03-31 (tagged at SVN r4140) ** Initial state showers The PDF veto was wrongly applied to qbar->qbar g splittings. ** User interaction The Makefile-UserModules now includes the Herwig version number. The -N flag to 'Herwig++ run' is optional now, as was always intended. ** Contrib The contrib directory is now included in the tarball. The omission was accidental. ** Numerical accuracy Minor problems with values close to zero were fixed in several locations. ** LEP event shapes An accidental duplication was removed, they are now only calculated once per event. ** MRST PDF code Initialization problem fixed. ** Mac OS X The configure script was improved to detect libraries better. ** Libtool Updated to version 2.2.6 * Herwig++-2.3.0 release: 2008-12-02 (tagged at SVN r3939) ** Major release, with many new features and bug fixes ** Extension to lepton-hadron collisions ** Inclusion of several processes accurate at next-to-leading order in the POsitive Weight Hardest Emission Generator (POWHEG) scheme ** Inclusion of three-body decays and finite-width effects in BSM processes ** New procedure for reconstructing kinematics of the parton shower based on the colour structure of the hard scattering process ** New model for baryon decays including excited baryon multiplets ** Addition of a soft component to the multiple scattering model of the underlying event and the option to choose more than one hard scattering explicitly ** New matrix elements for DIS and e+e- processes ** New /Contrib directory added containing external modules that will hopefully be of use to some users but are not expected to be needed by most users and are not supported at the same level as the main Herwig++ code ** Minor changes to improve the physics simulation: *** IncomingPhotonEvolver added to allow the simulation of partonic processes with incoming photons in hadron collisions *** KTRapidityCut added to allow cuts on the p_T and rapidity, rather than just the p_T and pseudorapidity used in SimpleKTCut. This is now used by default for cuts on massive particles such as the $W^\pm$, $Z^0$ and Higgs bosons and the top quark *** Several changes to the decayers of B mesons both to resolve problems with the modelling of partonic decays and improve agreement with $\Upsilon(4s)$ data *** Changes to allow values other than transverse mass of final-state particles as maximum transverse momentum for radiation in parton shower either SCALUP for Les Houches events or the scale of the hard process for internally generated hard processes *** Changed defaults for intrinsic transverse momentum in hadron collisions to 1.9GeV, 2.1GeV and 2.2GeV for the Tevatron and LHC at 10 TeV and 14 TeV, respectively *** Pdfinfo object is now created in the HepMC interface However in order to support all versions of HepMC containing this feature the PDF set is not specified as not all versions contain this information *** New option of only decaying particles with lifetimes below user specified value *** New options for the cut-off in the shower and some obsolete parameters removed *** Added option of switching off certain decay modes in BSM models *** Added a Matcher for Higgs boson to allow cuts to be placed on it *** Diffractive particles deleted from default input files they were not previously used ** Technical changes: *** Some AnalysisHandler classes comparing to LEP data have been renamed e.g. MultiplicityCount becomes LEPMultiplicityCount to avoid confusion with those supplied in /Contrib for observables at the Upsilon(4s) resonance *** Reorganisation to remove the majority of the .icc files by moving inlined functions to headers in an effort to improve compile time *** Restructured the decay libraries to reduce the amount of memory allocation and de-allocation which improves run-time performance *** The switch to turn off LoopTools has been removed because LoopTools is now used by several core modules. As LoopTools does not work on 64-bit platforms with g77 this build option is not supported *** Removed support for obsolete version of HepMC supplied with CLHEP and improved the support for different units options with HepMC *** EvtGen interface has been removed until it is more stable *** Support for ROOT has been removed it was not previously used *** CKKW infrastructure has been removed from the release until a concrete implementation is available *** Default optimisation has been increased from -O2 to -O3 *** Handling of the fortran compiler has been improved mainly due to improvements in the autotools *** Use of FixedAllocator for Particle objects in ThePEG has been removed as it had no performance benefits ** Bugs fixed: *** Problems with the mother/daughter relations in the hard process and diagram selection in W+- and Z0 production in association with a hard jet *** In general matrix element code for fermion-vector to fermion-scalar where the outgoing fermion is coloured and the scalar neutral *** In the selection of diagrams in some associated squark gaugino processes *** h0->mu+mu- was being generated when h0->tau+tau- *** Normalisation in the Histogram class for non unit-weight events *** Protection against negative PDF values has been improved these can occur when using NLO PDF sets *** Lifetime for BSM particles is now automatically calculated at the same time as the width *** Hadrons containing a top quark now treated like hadrons containing BSM particles in order to support this possibility *** Several ambiguous uses of unsigned int *** Several variables that may have been used undefined *** Several memory leaks at initialisation *** The configuration now aborts if no fortran compiler is found as this is required to compile Looptools *** Several minor floating point errors that did not affect results * Herwig++-2.2.1 release: 2008-07-09 (tagged at SVN r3434) ** Ticket #181: BSM shower with a decay close to threshold Now fixed. ** Ticket #191: Split SUSY crash Improved error message. ** Ticket #192: using SCALUP as the pT veto in the shower Now implemented. ** Ticket #194: production processes of ~chi_1(2)- Fixed bug in the diagram creation. ** Removed unused particles DiffractiveParticles.in was removed, they were never produced. ** Hadronization Top quark clusters now possible, handled as 'exotic' clusters. ** Improved handling of decay modes See ThePEG-1.3.0. 'defaultparticle' command is now obsolete. ** Multi-Parton interactions Increased phase space sampling to have less than 1% uncertainty on average multiplicity. ** New libtool version gfortran is now used as default if it is available. Set FC=g77 to override this. ** Fixed several memory leaks ** Memory allocation Now using plain 'new' and 'delete'. * Herwig++-2.2.0 release: 2008-04-18 (tagged at SVN r3195) ** Major release: now as stand-alone library Herwig++ is now a stand-alone dlopen() plugin to ThePEG. No compile-time linking to Herwig code is required. The Herwig++ binary is a simple executable steering ThePEG, which can be replaced by other frontends (such as setupThePEG / runThePEG). ** New matrix elements p p -> W + jet / Z + jet / W + higgs / Z + higgs e+ e- -> Z + higgs ** Looptools Updated to version 2.2. ** Ticket #141: segfault from using 'run' command Fixed by using default allocators in Herwig++, and the Repository::cleanup() method in ThePEG 1.2.0. ** Ticket #157: broken gsl library path on some 64bit systems Paths with lib64 are correctly identified now. ** Ticket #159: p_t spectrum of ttbar pair Fixed identical particle multiplier in Sudakov form factor. ** Ticket #161: glibc segfault Rare segfault in MPI handler fixed. ** Ticket #165: rare infinite loop in four-body decay All 4-body decays without dedicated decayers now use the Mambo algorithm. A loop guard has been introduced to 3-body decays to avoid infinite retries. ** Ticket #166: rare infinite loop in top ME correction These very rare events (O(1) in 10^7) close to mass threshold now are discarded. ** Higgs width fixes ** SatPDF Optionally, the PDF extrapolation behaviour outside a given range can now be specified. ** gcc 4.3 Herwig++-2.2 compiles cleanly with the new gcc 4.3 series. * Herwig++-2.1.4 release: 2008-03-03 (tagged at SVN r3024) ** Ticket #152: Vertex positions All vertex positions of unphysical particles are set to zero until a fix for the previous nonsensical values can be implemented. * Herwig++-2.1.3 release: 2008-02-25 (tagged at SVN r2957) ** Ticket #129: Baryon decays Fix for baryon decay modes. ** Ticket #131: HepMC Check if IO_GenEvent exists ** Ticket #134: Hadronization Smearing of hadron directions in cluster decay fixed. ** Ticket #137: HepMC HepMC conversion allows specification of energy and length units to be used. ** Ticket #139: Neutral kaons Ratio K_L / K_S corrected. ** Ticket #140 / #141: Crash on shutdown Event generation from the 'read' stage or an interface now shuts down cleanly. Fixes a crash bug introduced in 2.1.1 which affected external APIs to ThePEG / Herwig. ** Ticket #146: BSM models can be disabled To save build time, some or all of the BSM models can be disabled using the '--enable-models' configure switch. ** Reorganised .model files The .model files now include the model-specific particles, too. ** Re-tune Re-tuned hadronization parameters to LEP data. ** Other fixes in QSPAC implementation in Shower; Multi-parton interaction tuning; MRST initialization * Herwig++-2.1.2 release: 2008-01-05 (tagged at SVN r2694) ** Ticket #127 Thanks to a patch submitted by Fred Stober, HepMCFile now can output event files in all supported formats. ** Ticket #128 Fixed incorrect value of pi in histogram limits. ** Other fixes in CKKW Qtilde clusterers, BSM width cut, SUSY mixing matrices. * Herwig++-2.1.1 release: 2007-12-08 (tagged at SVN r2589) ** Bug #123 Fixed a bug with particle lifetimes which resulted in nan for some vertex positions. ** Secondary scatters Fixed bug which gave intrinsic pT to secondary scatters. ** gcc abs bug detection configure now checks for and works around http://gcc.gnu.org/bugzilla/show_bug.cgi?id=34130 ** CKKW reweighting Fixed wrong check for top quarks. ** MPIHandler Fixed call order ambiguity. * Herwig++-2.1.0 release: 2007-11-20 (tagged at SVN r2542) ** Major new release Herwig++-2.1 includes significant improvements, including multi-parton interactions, BSM physics and a new hadronic decay model, tuned to LEP data. For an overview of the changes, please see the release note arXiv:0711.3137 * Herwig++-2.0.3 release: 2007-08-21 (tagged at SVN r2101) ** Bug #90 nan in top decay ME corrections fixed. ** unlisted Colour flow fix in LightClusterDecayer ** unlisted Updated version of MultiplicityCount analysis handler. * Herwig++-2.0.2 release: 2007-07-06 (tagged at SVN r1716) ** Bug #80 Separation of HepMC from CLHEP is handled properly now. ** Bug #83 Workaround for OS X header problem ** unlisted Veto on very hard emissions from Shower. ** unlisted Detailed documentation in .in files * Herwig++-2.0.1 release: 2006-12-05 (tagged at SVN r1195) ** Bug #54 ClusterFissioner vertex calculation fixed. ** Bug #57 Crash when showering W+jet events supplied by Les Houches interface. ** Bug #59 Fix for #57 applied to LHC events. ** Bug #60 Segfault when PDF is set to NoPDF. ** Bug #61 Missing weight factor for I=0 mesons ** Bug #62 Spinor vertex calculations broken when spinor rep is not default rep. ** Bug #63 Top decay never produces tau. ** Bug #69 TTbar and HiggsJet analysis handlers fixed. ** unlisted Reorganization of Hadronization module gives 30% speedup. Thanks to Vincenzo Innocente at CMS for his profiling work! ** unlisted cleaner automake files in include/ and src/ ** unlisted Hw64 hadron selection algorithm 'abortnow' fixed. ** unlisted Top/LeptonDalitzAnalysis removed (only worked with modified code). ** unlisted removed f'_0 from particle list, decays were not handled * Herwig++-2.0.0 release: 2006-09-28 (tagged at SVN r1066) ** Full simulation of hadron collisions diff --git a/PDF/SaSPhotonPDF.h b/PDF/SaSPhotonPDF.h --- a/PDF/SaSPhotonPDF.h +++ b/PDF/SaSPhotonPDF.h @@ -1,134 +1,134 @@ // -*- C++ -*- #ifndef Herwig_SaSPhotonPDF_H #define Herwig_SaSPhotonPDF_H // // This is the declaration of the SaSPhotonPDF class. // #include "ThePEG/PDF/PDFBase.h" namespace Herwig { using namespace ThePEG; /** * The SaSPhotonPDF class provides an interface to the * * @see \ref SaSPhotonPDFInterfaces "The interfaces" * defined for SaSPhotonPDF. */ class SaSPhotonPDF: public PDFBase { public: /** * The default constructor. */ SaSPhotonPDF() : iset_(2), ip_(0) {} public: /** @name Virtual functions to be overridden by sub-classes. */ //@{ /** * Return true if this PDF can handle the extraction of partons from * the given \a particle. */ virtual bool canHandleParticle(tcPDPtr particle) const; /** * Return the partons which this PDF may extract from the given * \a particle. */ virtual cPDVector partons(tcPDPtr particle) const; /** * The density. Return the pdf for the given \a parton inside the * given \a particle for the virtuality \a partonScale and - * logarithmic momentum fraction \a l \f$(l=\log(1/x)\$f. The \a + * logarithmic momentum fraction \a l \f$(l=\log(1/x)\f$. The \a * particle is assumed to have a virtuality \a particleScale. */ virtual double xfl(tcPDPtr particle, tcPDPtr parton, Energy2 partonScale, double l, Energy2 particleScale = 0.0*GeV2) const; /** * The valence density. Return the pdf for the given cvalence \a * parton inside the given \a particle for the virtuality \a * partonScale and logarithmic momentum fraction \a l - * \f$(l=\log(1/x)\$f. The \a particle is assumed to have a + * \f$(l=\log(1/x)\f$. The \a particle is assumed to have a * virtuality \a particleScale. If not overidden by a sub class this * will return zero. */ virtual double xfvl(tcPDPtr particle, tcPDPtr parton, Energy2 partonScale, double l, Energy2 particleScale = 0.0*GeV2) const; //@} public: /** @name Functions used by the persistent I/O system. */ //@{ /** * Function used to write out object persistently. * @param os the persistent output stream written to. */ void persistentOutput(PersistentOStream & os) const; /** * Function used to read in object persistently. * @param is the persistent input stream read from. * @param version the version number of the object when written. */ void persistentInput(PersistentIStream & is, int version); //@} /** * The standard Init function used to initialize the interfaces. * Called exactly once for each class by the class description system * before the main function starts or * when this class is dynamically loaded. */ static void Init(); protected: /** @name Clone Methods. */ //@{ /** * Make a simple clone of this object. * @return a pointer to the new object. */ virtual IBPtr clone() const; /** Make a clone of this object, possibly modifying the cloned object * to make it sane. * @return a pointer to the new object. */ virtual IBPtr fullclone() const; //@} private: /** * The assignment operator is private and must never be called. * In fact, it should not even be implemented. */ SaSPhotonPDF & operator=(const SaSPhotonPDF &); private: /** * PDF Set */ mutable int iset_; /** * Scheme used to evaluate off-shell anomalous component. */ mutable int ip_; }; } #endif /* Herwig_SaSPhotonPDF_H */ diff --git a/README b/README --- a/README +++ b/README @@ -1,14 +1,14 @@ ======== Herwig 7 ======== -This is the release of Herwig 7.1.3, a multi purpose event +This is the release of Herwig 7.1.4, a multi purpose event generator for high energy physics. The Herwig++ distribution contains an adapted version of LoopTools 2.6 . BUILD AND INSTALL ================= Please refer to https://herwig.hepforge.org/tutorials/ for detailed instructions. diff --git a/Shower/Dipole/Makefile.am b/Shower/Dipole/Makefile.am --- a/Shower/Dipole/Makefile.am +++ b/Shower/Dipole/Makefile.am @@ -1,23 +1,23 @@ SUBDIRS = Base Kernels Kinematics Utility AlphaS Merging Colorea pkglib_LTLIBRARIES = HwDipoleShower.la -HwDipoleShower_la_LDFLAGS = $(AM_LDFLAGS) -module -version-info 9:0:0 +HwDipoleShower_la_LDFLAGS = $(AM_LDFLAGS) -module -version-info 9:1:0 HwDipoleShower_la_LIBADD = \ Base/libHwDipoleShowerBase.la \ Kernels/libHwDipoleShowerKernels.la \ Kinematics/libHwDipoleShowerKinematics.la \ Utility/libHwDipoleShowerUtility.la \ Merging/libHwDipoleShowerMerging.la \ Colorea/libHwDipoleShowerColorea.la HwDipoleShower_la_SOURCES = \ DipoleShowerHandler.h DipoleShowerHandler.fh DipoleShowerHandler.cc pkglib_LTLIBRARIES += HwKrknloEventReweight.la HwKrknloEventReweight_la_SOURCES = \ KrkNLO/KrknloEventReweight.h KrkNLO/KrknloEventReweight.cc HwKrknloEventReweight_la_LDFLAGS = $(AM_LDFLAGS) -module -version-info 1:0:0 diff --git a/Shower/Dipole/Merging/MergingReweight.cc b/Shower/Dipole/Merging/MergingReweight.cc --- a/Shower/Dipole/Merging/MergingReweight.cc +++ b/Shower/Dipole/Merging/MergingReweight.cc @@ -1,117 +1,119 @@ // -*- C++ -*- // // MergingReweight.cc is a part of Herwig - A multi-purpose Monte Carlo event generator // Copyright (C) 2002-2017 The Herwig Collaboration // // Herwig is licenced under version 3 of the GPL, see COPYING for details. // Please respect the MCnet academic guidelines, see GUIDELINES for details. // #include "MergingReweight.h" using namespace Herwig; IBPtr MergingReweight::clone() const { return new_ptr(*this); } IBPtr MergingReweight::fullclone() const { return new_ptr(*this); } double MergingReweight::weight() const { Energy maxpt = ZERO; Energy ht = ZERO; Energy maxmjj = ZERO; - - for (auto const & out : subProcess()->outgoing()) + + const auto sub=subProcess()->head()?subProcess()->head():subProcess(); + + for (auto const & out : sub->outgoing()) if ( !onlyColoured || out->coloured() ){ - for (auto const & out2 : subProcess()->outgoing()) + for (auto const & out2 : sub->outgoing()) if (!onlyColoured || out2->coloured() ) maxmjj=max(maxmjj,(out->momentum()+out2->momentum()).m()); maxpt = max(maxpt, out->momentum().perp()); ht+=out->momentum().perp(); } if (maxpt==ZERO||ht==ZERO) { return 1.; } return pow(maxpt/scale, MaxPTPower)*pow(ht/scale, HTPower)*pow(maxmjj/scale,MaxMjjPower); } #include "ThePEG/Persistency/PersistentOStream.h" void MergingReweight::persistentOutput(PersistentOStream & os) const { os << HTPower << MaxPTPower<> HTPower >> MaxPTPower >>MaxMjjPower>> iunit(scale,GeV) >> onlyColoured; } // Definition of the static class description member. // *** Attention *** The following static variable is needed for the type // description system in ThePEG. Please check that the template arguments // are correct (the class and its base class), and that the constructor // arguments are correct (the class name and the name of the dynamically // loadable library where the class implementation can be found). #include "ThePEG/Utilities/DescribeClass.h" DescribeClass describeHerwigMergingReweight("Herwig::MergingReweight", "HwDipoleShower.so"); #include "ThePEG/Interface/ClassDocumentation.h" #include "ThePEG/Interface/Parameter.h" #include "ThePEG/Interface/Switch.h" void MergingReweight::Init() { static ClassDocumentation documentation ("There is no documentation for the ThePEG::MergingReweight class"); static Parameter interfacePower ("HTPower", "Ht power", &MergingReweight::HTPower, 4.0, -10.0, 10.0, false, false, true); static Parameter interfaceMaxPtPower ("MaxPTPower", "PT2 power", &MergingReweight::MaxPTPower, 4.0, -10.0, 10.0, false, false, true); static Parameter interfaceMaxMjjPower ("MaxMjjPower", "Mjj power", &MergingReweight::MaxMjjPower, 0.0, 0.0, 10.0, false, false, true); static Parameter interfaceScale ("Scale", "The reference scale", &MergingReweight::scale, GeV, 50.0*GeV, ZERO, ZERO, false, false, Interface::lowerlim); static Switch interfaceOnlyColoured ("OnlyColoured", "Only consider coloured particles in the SubProcess when finding the minimum transverse momentum.", &MergingReweight::onlyColoured, false, true, false); static SwitchOption interfaceOnlyColouredYes (interfaceOnlyColoured, "Yes", "Use only coloured particles.", true); static SwitchOption interfaceOnlyColouredNo (interfaceOnlyColoured, "No", "Use all particles.", false); } diff --git a/Shower/QTilde/Base/ShowerTree.cc b/Shower/QTilde/Base/ShowerTree.cc --- a/Shower/QTilde/Base/ShowerTree.cc +++ b/Shower/QTilde/Base/ShowerTree.cc @@ -1,913 +1,914 @@ // -*- C++ -*- // // ShowerTree.cc is a part of Herwig - A multi-purpose Monte Carlo event generator // Copyright (C) 2002-2017 The Herwig Collaboration // // Herwig is licenced under version 3 of the GPL, see COPYING for details. // Please respect the MCnet academic guidelines, see GUIDELINES for details. // #include "ThePEG/EventRecord/MultiColour.h" #include "ThePEG/Repository/EventGenerator.h" #include "ShowerTree.h" #include "Herwig/Shower/QTilde/Base/ShowerParticle.h" #include "Herwig/Shower/ShowerHandler.h" #include "ThePEG/PDT/DecayMode.h" #include "ThePEG/Handlers/EventHandler.h" #include "ThePEG/Handlers/XComb.h" #include #include "ThePEG/Repository/CurrentGenerator.h" #include "ThePEG/PDT/StandardMatchers.h" using namespace Herwig; using namespace ThePEG; bool ShowerTree::_spaceTime = false; Energy2 ShowerTree::_vmin2 = 0.1*GeV2; namespace { void findBeam(tPPtr & beam, PPtr incoming) { + if(beam==incoming) return; while(!beam->children().empty()) { bool found=false; for(unsigned int ix=0;ixchildren().size();++ix) { if(beam->children()[ix]==incoming) { found = true; break; } } if(found) break; beam = beam->children()[0]; } } } ShowerTree::ShowerTree(PerturbativeProcessPtr process) : _parent(), _hasShowered(false) { // get the incoming and outgoing particles and make copies vector original,copy; for(unsigned int ix=0;ixincoming().size();++ix) { original.push_back(process->incoming()[ix].first); copy .push_back(new_ptr(Particle(*original.back()))); } for(unsigned int ix=0;ixoutgoing().size();++ix) { original.push_back(process->outgoing()[ix].first); copy .push_back(new_ptr(Particle(*original.back()))); } // isolate the colour colourIsolate(original,copy); // hard process unsigned int itype(1); if(process->incoming().size()==2) { _wasHard = true; // set the beams and incoming particles tPPair beam = CurrentGenerator::current().currentEvent()->incoming(); findBeam(beam.first ,process->incoming()[0].first); findBeam(beam.second,process->incoming()[1].first); _incoming = make_pair(process->incoming()[0].first, process->incoming()[1].first); double x1(_incoming.first ->momentum().rho()/beam.first ->momentum().rho()); double x2(_incoming.second->momentum().rho()/beam.second->momentum().rho()); // must have two incoming particles assert(_incoming.first && _incoming.second); // set the parent tree _parent=ShowerTreePtr(); for(unsigned int ix=0;ix<2;++ix) { ShowerParticlePtr temp=new_ptr(ShowerParticle(*copy[ix],itype,false)); fixColour(temp); temp->x(ix==0 ? x1 : x2); _incomingLines.insert(make_pair(new_ptr(ShowerProgenitor(original[ix], copy[ix],temp)),temp)); } } // decay process else if(process->incoming().size()==1) { _wasHard = false; itype=2; // create the parent shower particle ShowerParticlePtr sparent(new_ptr(ShowerParticle(*copy[0],itype,false))); fixColour(sparent); _incomingLines.insert(make_pair(new_ptr(ShowerProgenitor(original[0],copy[0],sparent)) ,sparent)); // return if not decayed if(original.size()==1) return; } else assert(false); // create the children assert(copy.size() == original.size()); for (unsigned int ix=process->incoming().size();ixid()==progenitor->id(); } ShowerParticlePtr newpart; if(matches[0]&&matches[1]) { if(parent->showerKinematics()->z()>0.5) newpart=children[0]; else newpart=children[1]; } else if(matches[0]) newpart=children[0]; else if(matches[1]) newpart=children[1]; _outgoingLines[progenitor]=newpart; } void ShowerTree::updateInitialStateShowerProduct(ShowerProgenitorPtr progenitor, ShowerParticlePtr newParent) { _incomingLines[progenitor]=newParent; } void ShowerTree::insertHard(StepPtr pstep, bool ISR, bool) { assert(_incomingLines.size()==2); colourLines().clear(); map::const_iterator cit; // construct the map of colour lines for hard process for(cit=_incomingLines.begin();cit!=_incomingLines.end();++cit) { if(!cit->first->perturbative()) continue; mapColour(cit->first->original(),cit->first->copy()); } map::const_iterator cjt; for(cjt=_outgoingLines.begin();cjt!=_outgoingLines.end();++cjt) { if(!cjt->first->perturbative()) continue; mapColour(cjt->first->original(),cjt->first->copy()); } // initial-state radiation if(ISR) { for(cit=incomingLines().begin();cit!=incomingLines().end();++cit) { ShowerParticlePtr init=(*cit).first->progenitor(); assert(init->thePEGBase()); PPtr original = (*cit).first->original(); if(original->parents().empty()) continue; PPtr hadron= original->parents()[0]; assert(!original->children().empty()); PPtr copy=cit->first->copy(); ParticleVector intermediates=original->children(); for(unsigned int ix=0;ixabandonChild(intermediates[ix]); copy->abandonChild(intermediates[ix]); } // if not from a matrix element correction if(cit->first->perturbative()) { // break mother/daugther relations init->addChild(original); hadron->abandonChild(original); // if particle showers add shower if(cit->first->hasEmitted()) { addInitialStateShower(init,hadron,pstep,false); } // no showering for this particle else { updateColour(init,false); hadron->addChild(init); pstep->addIntermediate(init); init->setLifeLength(Lorentz5Distance()); init->setVertex(LorentzPoint()); } } // from matrix element correction else { // break mother/daugther relations hadron->abandonChild(original); copy->addChild(original); updateColour(copy,false); init->addChild(copy); pstep->addIntermediate(copy); copy->setLifeLength(Lorentz5Distance()); copy->setVertex(LorentzPoint()); // if particle showers add shower if(cit->first->hasEmitted()) { addInitialStateShower(init,hadron,pstep,false); } // no showering for this particle else { updateColour(init,false); hadron->addChild(init); pstep->addIntermediate(init); init->setLifeLength(Lorentz5Distance()); init->setVertex(LorentzPoint()); } } } } else { for(cit=incomingLines().begin();cit!=incomingLines().end();++cit) { ShowerParticlePtr init=(*cit).first->progenitor(); assert(init->thePEGBase()); PPtr original = (*cit).first->original(); if(original->parents().empty()) continue; PPtr hadron= original->parents()[0]; assert(!original->children().empty()); PPtr copy=cit->first->copy(); ParticleVector intermediates=original->children(); for(unsigned int ix=0;ixabandonChild(intermediates[ix]); copy->abandonChild(intermediates[ix]); } // break mother/daugther relations init->addChild(original); hadron->abandonChild(original); // no showering for this particle updateColour(init,false); hadron->addChild(init); pstep->addIntermediate(init); init->setLifeLength(Lorentz5Distance()); init->setVertex(LorentzPoint()); original->setLifeLength(Lorentz5Distance()); original->setVertex(LorentzPoint()); } } // final-state radiation for(cjt=outgoingLines().begin();cjt!=outgoingLines().end();++cjt) { ShowerParticlePtr init=(*cjt).first->progenitor(); assert(init->thePEGBase()); // ZERO the distance of original (*cjt).first->original()->setLifeLength(Lorentz5Distance()); (*cjt).first->original()->setVertex(LorentzPoint()); // if not from a matrix element correction if(cjt->first->perturbative()) { // register the shower particle as a // copy of the one from the hard process tParticleVector parents=init->parents(); for(unsigned int ix=0;ixabandonChild(init); (*cjt).first->original()->addChild(init); pstep->addDecayProduct(init); } // from a matrix element correction else { if(cjt->first->original()==_incoming.first|| cjt->first->original()==_incoming.second) { updateColour((*cjt).first->copy(),false); (*cjt).first->original()->parents()[0]-> addChild((*cjt).first->copy()); pstep->addDecayProduct((*cjt).first->copy()); (*cjt).first->copy()->addChild(init); pstep->addDecayProduct(init); } else { updateColour((*cjt).first->copy(),false); (*cjt).first->original()->addChild((*cjt).first->copy()); pstep->addDecayProduct((*cjt).first->copy()); (*cjt).first->copy()->addChild(init); pstep->addDecayProduct(init); } // ZERO the distance of copy ??? \todo change if add space-time (*cjt).first->copy()->setLifeLength(Lorentz5Distance()); (*cjt).first->copy()->setVertex(LorentzPoint()); } // copy so travels no distance init->setLifeLength(Lorentz5Distance()); init->setVertex(init->parents()[0]->decayVertex()); // sort out the colour updateColour(init,false); // insert shower products addFinalStateShower(init,pstep); } colourLines().clear(); } void ShowerTree::addFinalStateShower(PPtr p, StepPtr s) { // if endpoint assume doesn't travel if(p->children().empty()) { if(p->dataPtr()->stable()||ShowerHandler::currentHandler()->decaysInShower(p->id())) p->setLifeLength(Lorentz5Distance()); else { Energy mass = p->mass()!=ZERO ? p->mass() : p->dataPtr()->mass(); Length ctau = p->dataPtr()->generateLifeTime(mass, p->dataPtr()->width()); Lorentz5Distance lifeLength(ctau,p->momentum().vect()*(ctau/mass)); p->setLifeLength(lifeLength); } return; } // set the space-time distance else { p->setLifeLength(spaceTimeDistance(p)); } ParticleVector::const_iterator child; for(child=p->children().begin(); child != p->children().end(); ++child) { updateColour(*child,false); s->addDecayProduct(*child); (**child).setVertex(p->decayVertex()); addFinalStateShower(*child,s); } } void ShowerTree::addInitialStateShower(PPtr p, PPtr hadron, StepPtr s, bool addchildren) { // Each parton here should only have one parent if(!p->parents().empty()) { if(p->parents().size()!=1) throw Exception() << "Particle must only have one parent in ShowerTree" << "::addInitialStateShower" << Exception::runerror; // set the space-time distances if(addchildren) { p->setLifeLength(spaceTimeDistance(p)); p->setVertex(p->children()[0]->vertex()-p->lifeLength()); } else { p->setLifeLength(spaceTimeDistance(p)); p->setVertex(-p->lifeLength()); } // recurse addInitialStateShower(p->parents()[0],hadron,s); } else { hadron->addChild(p); s->addIntermediate(p); p->setVertex(p->children()[0]->vertex()); p->setLifeLength(Lorentz5Distance()); } updateColour(p,false); // if not adding children return if(!addchildren) return; // add children ParticleVector::const_iterator child; for(child = p->children().begin(); child != p->children().end(); ++child) { // if a final-state particle update the colour ShowerParticlePtr schild = dynamic_ptr_cast(*child); (**child).setVertex(p->decayVertex()); if(schild && schild->isFinalState()) updateColour(*child,false); // if there are grandchildren of p if(!(*child)->children().empty()) { // Add child as intermediate s->addIntermediate(*child); // If child is shower particle and final-state, add children if(schild && schild->isFinalState()) addFinalStateShower(schild,s); } else s->addDecayProduct(*child); } } void ShowerTree::update(PerturbativeProcessPtr newProcess) { // must be one incoming particle assert(_incomingLines.size()==1); colourLines().clear(); // copy the particles and isolate the colour vector original,copy; for(unsigned int ix=0;ixincoming().size();++ix) { original.push_back(newProcess->incoming()[ix].first); copy .push_back(new_ptr(Particle(*original.back()))); } for(unsigned int ix=0;ixoutgoing().size();++ix) { original.push_back(newProcess->outgoing()[ix].first); copy .push_back(new_ptr(Particle(*original.back()))); } // isolate the colour colourIsolate(original,copy); // make the new progenitor ShowerParticlePtr stemp=new_ptr(ShowerParticle(*copy[0],2,false)); fixColour(stemp); ShowerProgenitorPtr newprog=new_ptr(ShowerProgenitor(original[0],copy[0],stemp)); _incomingLines.clear(); _incomingLines.insert(make_pair(newprog,stemp)); // create the children assert(copy.size() == original.size()); for (unsigned int ix=newProcess->incoming().size();ix_treelinks.empty()) final = _parent->_treelinks[this].second; else final=_incomingLines.begin()->first->original(); // construct the map of colour lines PPtr copy=_incomingLines.begin()->first->copy(); mapColour(final,copy); // now this is the ONE instance of the particle which should have a life length // \todo change if space-time picture added // set the lifelength, need this so that still in right direction after // any ISR recoils Length ctau = copy->lifeTime(); Lorentz5Distance lifeLength(ctau,final->momentum().vect()*(ctau/final->mass())); final->setLifeLength(lifeLength); // initial-state radiation if(ISR&&!_incomingLines.begin()->first->progenitor()->children().empty()) { ShowerParticlePtr init=_incomingLines.begin()->first->progenitor(); updateColour(init,false); final->addChild(init); pstep->addDecayProduct(init); // just a copy doesn't travel init->setLifeLength(Lorentz5Distance()); init->setVertex(final->decayVertex()); // insert shower products addFinalStateShower(init,pstep); // sort out colour final=_incomingLines.begin()->second; colourLines().clear(); mapColour(final,copy); } // get the decaying particles // make the copy tColinePair cline=make_pair(copy->colourLine(),copy->antiColourLine()); updateColour(copy,false); // sort out sinks and sources if needed if(cline.first) { if(cline.first->sourceNeighbours().first) { copy->colourLine()->setSourceNeighbours(cline.first->sourceNeighbours().first, cline.first->sourceNeighbours().second); } else if (cline.first->sinkNeighbours().first) { copy->colourLine()->setSinkNeighbours(cline.first->sinkNeighbours().first, cline.first->sinkNeighbours().second); } } if(cline.second) { if(cline.second->sourceNeighbours().first) { copy->antiColourLine()->setSourceNeighbours(cline.second->sourceNeighbours().first, cline.second->sourceNeighbours().second); } else if (cline.second->sinkNeighbours().first) { copy->antiColourLine()->setSinkNeighbours(cline.second->sinkNeighbours().first, cline.second->sinkNeighbours().second); } } // copy of the one from the hard process tParticleVector dpar=copy->parents(); for(unsigned int ix=0;ixabandonChild(copy); final->addChild(copy); pstep->addDecayProduct(copy); // just a copy does not move copy->setLifeLength(Lorentz5Distance()); copy->setVertex(final->decayVertex()); // final-state radiation map::const_iterator cit; for(cit=outgoingLines().begin();cit!=outgoingLines().end();++cit) { ShowerParticlePtr init=cit->first->progenitor(); // ZERO the distance init->setLifeLength(Lorentz5Distance()); if(!init->thePEGBase()) throw Exception() << "Final-state particle must have a ThePEGBase" << " in ShowerTree::insertDecay()" << Exception::runerror; // if not from matrix element correction if(cit->first->perturbative()) { // add the child updateColour(cit->first->copy(),false); PPtr orig=cit->first->original(); orig->setLifeLength(Lorentz5Distance()); orig->setVertex(copy->decayVertex()); copy->addChild(orig); pstep->addDecayProduct(orig); orig->addChild(cit->first->copy()); pstep->addDecayProduct(cit->first->copy()); // register the shower particle as a // copy of the one from the hard process tParticleVector parents=init->parents(); for(unsigned int ix=0;ixabandonChild(init);} (*cit).first->copy()->addChild(init); pstep->addDecayProduct(init); updateColour(init,false); } // from a matrix element correction else { if(copy->children().end()== find(copy->children().begin(),copy->children().end(), cit->first->original())) { updateColour(cit->first->original(),false); copy->addChild(cit->first->original()); pstep->addDecayProduct(cit->first->original()); } updateColour(cit->first->copy(),false); cit->first->original()->addChild(cit->first->copy()); pstep->addDecayProduct(cit->first->copy()); // register the shower particle as a // copy of the one from the hard process tParticleVector parents=init->parents(); for(unsigned int ix=0;ixabandonChild(init);} (*cit).first->copy()->addChild(init); pstep->addDecayProduct(init); updateColour(init,false); } // ZERO the distances as just copies cit->first->copy()->setLifeLength(Lorentz5Distance()); init->setLifeLength(Lorentz5Distance()); cit->first->copy()->setVertex(copy->decayVertex()); init->setVertex(copy->decayVertex()); // insert shower products addFinalStateShower(init,pstep); } colourLines().clear(); } void ShowerTree::clear() { // reset the has showered flag _hasShowered=false; // clear the colour map colourLines().clear(); map::const_iterator cit; map::const_iterator cjt; // abandon the children of the outgoing particles for(cit=_outgoingLines.begin();cit!=_outgoingLines.end();++cit) { ShowerParticlePtr orig=cit->first->progenitor(); orig->set5Momentum(cit->first->copy()->momentum()); ParticleVector children=orig->children(); for(unsigned int ix=0;ixabandonChild(children[ix]); _outgoingLines[cit->first]=orig; cit->first->hasEmitted(false); cit->first->reconstructed(ShowerProgenitor::notReconstructed); } // forward products _forward.clear(); for(cit=_outgoingLines.begin();cit!=_outgoingLines.end();++cit) _forward.insert(cit->first->progenitor()); // if a decay if(!_wasHard) { ShowerParticlePtr orig=_incomingLines.begin()->first->progenitor(); orig->set5Momentum(_incomingLines.begin()->first->copy()->momentum()); ParticleVector children=orig->children(); for(unsigned int ix=0;ixabandonChild(children[ix]); _incomingLines.begin()->first->reconstructed(ShowerProgenitor::notReconstructed); } // if a hard process else { for(cjt=_incomingLines.begin();cjt!=_incomingLines.end();++cjt) { tPPtr parent = cjt->first->original()->parents().empty() ? tPPtr() : cjt->first->original()->parents()[0]; ShowerParticlePtr temp= new_ptr(ShowerParticle(*cjt->first->copy(), cjt->first->progenitor()->perturbative(), cjt->first->progenitor()->isFinalState())); fixColour(temp); temp->x(cjt->first->progenitor()->x()); cjt->first->hasEmitted(false); if(!(cjt->first->progenitor()==cjt->second)&&cjt->second&&parent) parent->abandonChild(cjt->second); cjt->first->progenitor(temp); _incomingLines[cjt->first]=temp; cjt->first->reconstructed(ShowerProgenitor::notReconstructed); } } clearTransforms(); } void ShowerTree::resetShowerProducts() { map::const_iterator cit; map::const_iterator cjt; _forward.clear(); for(cjt=_outgoingLines.begin();cjt!=_outgoingLines.end();++cjt) _forward.insert(cjt->second); } void ShowerTree::updateAfterShower(ShowerDecayMap & decay) { // update the links map::const_iterator mit; map >::iterator tit; for(tit=_treelinks.begin();tit!=_treelinks.end();++tit) { if(tit->second.first) { mit=_outgoingLines.find(tit->second.first); if(mit!=_outgoingLines.end()) tit->second.second=mit->second; } } // get the particles coming from those in the hard process set hard; for(mit=_outgoingLines.begin();mit!=_outgoingLines.end();++mit) hard.insert(mit->second); // find the shower particles which should be decayed in the // shower but didn't come from the hard process set::const_iterator cit; // TODO construct _forward here? for(cit=_forward.begin();cit!=_forward.end();++cit) { if((ShowerHandler::currentHandler()->decaysInShower((**cit).id())&& !(**cit).dataPtr()->stable()) && hard.find(*cit)==hard.end()) { PerturbativeProcessPtr newProcess(new_ptr(PerturbativeProcess())); newProcess->incoming().push_back(make_pair(*cit,PerturbativeProcessPtr())); ShowerTreePtr newtree=new_ptr(ShowerTree(newProcess)); newtree->setParents(); newtree->_parent=this; Energy width=(**cit).dataPtr()->generateWidth((**cit).mass()); decay.insert(make_pair(width,newtree)); _treelinks.insert(make_pair(newtree, make_pair(tShowerProgenitorPtr(),*cit))); } } } void ShowerTree::addFinalStateBranching(ShowerParticlePtr parent, const ShowerParticleVector & children) { assert(children.size()==2); _forward.erase(parent); for(unsigned int ix=0; ix >::const_iterator tit; for(tit=_treelinks.begin();tit!=_treelinks.end();++tit) tit->first->_parent=this; } vector ShowerTree::extractProgenitors() { // extract the particles from the ShowerTree map::const_iterator mit; vector ShowerHardJets; for(mit=incomingLines().begin();mit!=incomingLines().end();++mit) ShowerHardJets.push_back((*mit).first); map::const_iterator mjt; for(mjt=outgoingLines().begin();mjt!=outgoingLines().end();++mjt) ShowerHardJets.push_back((*mjt).first); return ShowerHardJets; } void ShowerTree::transform(const LorentzRotation & boost, bool applyNow) { if(applyNow) { // now boost all the particles map::const_iterator cit; // incoming for(cit=_incomingLines.begin();cit!=_incomingLines.end();++cit) { cit->first->progenitor()->deepTransform(boost); cit->first->copy()->deepTransform(boost); } // outgoing map::const_iterator cjt; for(cjt=_outgoingLines.begin();cjt!=_outgoingLines.end();++cjt) { cjt->first->progenitor()->deepTransform(boost); cjt->first->copy()->deepTransform(boost); } } else { _transforms.transform(boost); } // child trees for(map >::const_iterator tit=_treelinks.begin();tit!=_treelinks.end();++tit) tit->first->transform(boost,applyNow); } void ShowerTree::applyTransforms() { // now boost all the particles map::const_iterator cit; // incoming for(cit=_incomingLines.begin();cit!=_incomingLines.end();++cit) { cit->first->progenitor()->deepTransform(_transforms); cit->first->copy()->deepTransform(_transforms); } // outgoing map::const_iterator cjt; for(cjt=_outgoingLines.begin();cjt!=_outgoingLines.end();++cjt) { cjt->first->progenitor()->deepTransform(_transforms); cjt->first->copy()->deepTransform(_transforms); } // child trees for(map >::const_iterator tit=_treelinks.begin();tit!=_treelinks.end();++tit) tit->first->applyTransforms(); _transforms = LorentzRotation(); } void ShowerTree::clearTransforms() { _transforms = LorentzRotation(); // // child trees // for(map >::const_iterator // tit=_treelinks.begin();tit!=_treelinks.end();++tit) // tit->first->clearTransforms(); } void ShowerTree::fixColour(tShowerParticlePtr part) { if(!part->colourInfo()->colourLines().empty()) { if(part->colourInfo()->colourLines().size()==1) { ColinePtr line=part->colourLine(); if(line) { line->removeColoured(part); line->addColoured(part); } } else { Ptr::pointer colour = dynamic_ptr_cast::pointer>(part->colourInfo()); vector lines = colour->colourLines(); for(unsigned int ix=0;ix(lines[ix]); if(line) { line->removeColoured(part); line->addColoured(part); } } } } if(!part->colourInfo()->antiColourLines().empty()) { if(part->colourInfo()->antiColourLines().size()==1) { ColinePtr line=part->antiColourLine(); if(line) { line->removeAntiColoured(part); line->addAntiColoured(part); } } else { Ptr::pointer colour = dynamic_ptr_cast::pointer>(part->colourInfo()); vector lines = colour->antiColourLines(); for(unsigned int ix=0;ix(lines[ix]); if(line) { line->removeAntiColoured(part); line->addAntiColoured(part); } } } } } vector ShowerTree::extractProgenitorParticles() { vector particles; // incoming particles for(map::const_iterator cit=incomingLines().begin(); cit!=incomingLines().end();++cit) particles.push_back(cit->first->progenitor()); // outgoing particles for(map::const_iterator cjt=outgoingLines().begin(); cjt!=outgoingLines().end();++cjt) particles.push_back(cjt->first->progenitor()); return particles; } Lorentz5Distance ShowerTree::spaceTimeDistance(tPPtr particle) { if(!_spaceTime) return Lorentz5Distance(); Energy2 q2 = particle->mass() > ZERO ? sqr(particle->mass()) : -sqr(particle->mass()); const tcPDPtr data = particle->dataPtr(); // calculate width imposing min value Energy conMass = max(data->constituentMass(),200*MeV); Energy width = max(data->generateWidth(particle->mass()),_vmin2/conMass); // offshellness Energy2 offShell = q2-sqr(data->constituentMass()); if(abs(offShell)<1e-10*GeV2) offShell = ZERO; InvEnergy2 fact = UseRandom::rndExp(1./sqrt((sqr(offShell)+sqr(q2*width/conMass)))); Lorentz5Distance output = (hbarc*fact)*particle->momentum(); return output; } void ShowerTree::constructTrees(ShowerTreePtr & hardTree, ShowerDecayMap & decayTrees, PerturbativeProcessPtr hard, DecayProcessMap decay) { map treeMap; // convert the hard process if(hardTree) { if(hardTree->isDecay()) hardTree->update(hard); } else { hardTree = new_ptr(ShowerTree(hard)); } treeMap.insert(make_pair(hard,hardTree)); for(DecayProcessMap::const_iterator it=decay.begin();it!=decay.end();++it) { ShowerTreePtr newTree = new_ptr(ShowerTree(it->second)); treeMap.insert(make_pair(it->second,newTree)); decayTrees.insert(make_pair(it->first,newTree)); } // set up the links between the trees for(map::const_iterator it=treeMap.begin(); it!=treeMap.end();++it) { // links to daughter trees map::const_iterator mit; for(mit=it->second->_outgoingLines.begin(); mit!=it->second->_outgoingLines.end();++mit) { unsigned int iloc=0; for(;ilocfirst->outgoing().size();++iloc) { if(it->first->outgoing()[iloc].first==mit->first->original()) break; } if(it->first->outgoing()[iloc].second) it->second->_treelinks.insert(make_pair(treeMap[it->first->outgoing()[iloc].second], make_pair(mit->first,mit->first->progenitor()))); } // links to parent trees if(it->first->incoming()[0].second) { it->second->_parent = treeMap[it->first->incoming()[0].second]; } } } namespace { Lorentz5Momentum sumMomenta(tPPtr particle) { if(particle->children().empty()) return particle->momentum(); Lorentz5Momentum output; for(unsigned int ix=0;ixchildren().size();++ix) { output += sumMomenta(particle->children()[ix]); } return output; } } void ShowerTree::checkMomenta() { vector pin; for(map::const_iterator it=_incomingLines.begin(); it!=_incomingLines.end();++it) { if(isDecay()) { tPPtr parent = it->first->progenitor(); pin.push_back(parent->momentum()); while(!parent->children().empty()) { pin.back() -= sumMomenta(parent->children()[1]); parent = parent->children()[0]; } } else { tPPtr parent = it->second; pin.push_back(parent->momentum()); while(!parent->children().empty()&&parent->children().size()==2) { pin.back() -= sumMomenta(parent->children()[1]); parent = parent->children()[0]; if(parent==it->first->progenitor()) break; } } } vector pout; for(map::const_iterator it= _outgoingLines.begin(); it!=_outgoingLines.end();++it) { pout.push_back(sumMomenta(it->first->progenitor())); } Lorentz5Momentum psum; for(unsigned int ix=0;ix< pin.size();++ix) { CurrentGenerator::log() << "pin " << ix << pin[ix]/GeV << "\n"; psum +=pin[ix]; } CurrentGenerator::log() << "In total " << psum/GeV << " " << psum.m()/GeV << "\n"; Lorentz5Momentum psum2; for(unsigned int ix=0;ix< pout.size();++ix) { CurrentGenerator::log() << "pout " << ix << pout[ix]/GeV << "\n"; psum2 +=pout[ix]; } CurrentGenerator::log() << "Out total " << psum2/GeV << " " << psum2.m()/GeV << "\n"; CurrentGenerator::log() << "Total " << (psum-psum2)/GeV << "\n"; } RealEmissionProcessPtr ShowerTree::perturbativeProcess() { RealEmissionProcessPtr output(new_ptr(RealEmissionProcess())); // add the incoming particles unsigned int ix=0; pair x; for(map::const_iterator it=_incomingLines.begin(); it!=_incomingLines.end();++it) { output->bornIncoming().push_back(it->first->progenitor()); if(!it->first->original()->parents().empty()) output->hadrons().push_back(it->first->original()->parents()[0]); else output->hadrons().push_back(it->first->progenitor()); if(ix==0) x.first = it->second->x(); else x.second = it->second->x(); ++ix; } output->x(x); // add the outgoing particles for(map::const_iterator it= _outgoingLines.begin(); it!=_outgoingLines.end();++it) { output->bornOutgoing().push_back(it->first->progenitor()); } return output; } void ShowerTree::setVetoes(const map & pTs, unsigned int type) { if(type==1||type==3) { for(map::const_iterator it=_incomingLines.begin(); it!=_incomingLines.end();++it) { for(map::const_iterator jt=pTs.begin();jt!=pTs.end();++jt) it->first->maximumpT(jt->second,jt->first); } } if(type==2||type==3) { for(map::const_iterator it= _outgoingLines.begin(); it!=_outgoingLines.end();++it) { for(map::const_iterator jt=pTs.begin();jt!=pTs.end();++jt) it->first->maximumpT(jt->second,jt->first); } } } diff --git a/Shower/QTilde/Makefile.am b/Shower/QTilde/Makefile.am --- a/Shower/QTilde/Makefile.am +++ b/Shower/QTilde/Makefile.am @@ -1,45 +1,45 @@ SUBDIRS = Matching pkglib_LTLIBRARIES = HwShower.la -HwShower_la_LDFLAGS = $(AM_LDFLAGS) -module -version-info 25:1:0 +HwShower_la_LDFLAGS = $(AM_LDFLAGS) -module -version-info 25:2:0 HwShower_la_SOURCES = \ Couplings/ShowerAlphaQCD.h Couplings/ShowerAlphaQCD.cc \ Couplings/ShowerAlphaQED.h Couplings/ShowerAlphaQED.cc\ QTildeShowerHandler.h QTildeShowerHandler.fh QTildeShowerHandler.cc \ SplittingFunctions/HalfHalfOneSplitFn.h SplittingFunctions/HalfHalfOneSplitFn.cc\ SplittingFunctions/OneOneOneSplitFn.h SplittingFunctions/OneOneOneSplitFn.cc\ SplittingFunctions/OneOneOneMassiveSplitFn.h SplittingFunctions/OneOneOneMassiveSplitFn.cc\ SplittingFunctions/ZeroZeroOneSplitFn.h SplittingFunctions/ZeroZeroOneSplitFn.cc\ SplittingFunctions/OneHalfHalfSplitFn.h SplittingFunctions/OneHalfHalfSplitFn.cc\ SplittingFunctions/HalfOneHalfSplitFn.h SplittingFunctions/HalfOneHalfSplitFn.cc\ SplittingFunctions/CMWOneOneOneSplitFn.h SplittingFunctions/CMWOneOneOneSplitFn.cc\ SplittingFunctions/CMWHalfHalfOneSplitFn.h SplittingFunctions/CMWHalfHalfOneSplitFn.cc\ Kinematics/Decay_QTildeShowerKinematics1to2.cc \ Kinematics/Decay_QTildeShowerKinematics1to2.h \ Kinematics/IS_QTildeShowerKinematics1to2.cc Kinematics/IS_QTildeShowerKinematics1to2.h \ Kinematics/FS_QTildeShowerKinematics1to2.cc Kinematics/FS_QTildeShowerKinematics1to2.h \ Kinematics/KinematicsReconstructor.cc \ Kinematics/KinematicsReconstructor.tcc \ Kinematics/KinematicsReconstructor.h \ Kinematics/KinematicsReconstructor.fh \ Base/HardTree.cc Base/HardTree.h Base/HardTree.fh \ Base/HardBranching.h Base/HardBranching.fh Base/HardBranching.cc\ Base/PartnerFinder.h Base/PartnerFinder.fh Base/PartnerFinder.cc \ Base/ShowerVeto.h Base/ShowerVeto.fh Base/ShowerVeto.cc \ Base/FullShowerVeto.h Base/FullShowerVeto.fh Base/FullShowerVeto.cc \ SplittingFunctions/SplittingGenerator.cc SplittingFunctions/SplittingGenerator.h\ SplittingFunctions/SplittingGenerator.fh \ Base/ShowerTree.h Base/ShowerTree.fh Base/ShowerTree.cc \ ShowerConfig.h ShowerConfig.cc \ Base/Branching.h \ Base/ShowerParticle.cc Base/ShowerParticle.fh Base/ShowerParticle.h \ Kinematics/ShowerKinematics.fh Kinematics/ShowerKinematics.h Kinematics/ShowerKinematics.cc \ Kinematics/ShowerBasis.fh Kinematics/ShowerBasis.h Kinematics/ShowerBasis.cc \ Base/ShowerProgenitor.fh Base/ShowerProgenitor.h \ SplittingFunctions/SudakovFormFactor.cc SplittingFunctions/SudakovFormFactor.h SplittingFunctions/SudakovFormFactor.fh \ SplittingFunctions/SudakovCutOff.cc SplittingFunctions/SudakovCutOff.h SplittingFunctions/SudakovCutOff.fh \ SplittingFunctions/PTCutOff.cc SplittingFunctions/PTCutOff.h \ SplittingFunctions/MassCutOff.cc SplittingFunctions/MassCutOff.h \ SplittingFunctions/VariableMassCutOff.cc SplittingFunctions/VariableMassCutOff.h \ SplittingFunctions/SplittingFunction.h SplittingFunctions/SplittingFunction.fh \ SplittingFunctions/SplittingFunction.cc \ Base/ShowerVertex.cc Base/ShowerVertex.fh Base/ShowerVertex.h diff --git a/Tests/Makefile.am b/Tests/Makefile.am --- a/Tests/Makefile.am +++ b/Tests/Makefile.am @@ -1,401 +1,397 @@ AM_LDFLAGS += -module -avoid-version -rpath /dummy/path/not/used EXTRA_DIST = Inputs python Rivet EXTRA_LTLIBRARIES = LeptonTest.la GammaTest.la HadronTest.la DISTest.la if WANT_LIBFASTJET EXTRA_LTLIBRARIES += HadronJetTest.la LeptonJetTest.la HadronJetTest_la_SOURCES = \ Hadron/VHTest.h Hadron/VHTest.cc\ Hadron/VTest.h Hadron/VTest.cc\ Hadron/HTest.h Hadron/HTest.cc HadronJetTest_la_CPPFLAGS = $(AM_CPPFLAGS) $(FASTJETINCLUDE) \ -I$(FASTJETPATH) HadronJetTest_la_LIBADD = $(FASTJETLIBS) LeptonJetTest_la_SOURCES = \ Lepton/TopDecay.h Lepton/TopDecay.cc LeptonJetTest_la_CPPFLAGS = $(AM_CPPFLAGS) $(FASTJETINCLUDE) \ -I$(FASTJETPATH) LeptonJetTest_la_LIBADD = $(FASTJETLIBS) endif LeptonTest_la_SOURCES = \ Lepton/VVTest.h Lepton/VVTest.cc \ Lepton/VBFTest.h Lepton/VBFTest.cc \ Lepton/VHTest.h Lepton/VHTest.cc \ Lepton/FermionTest.h Lepton/FermionTest.cc GammaTest_la_SOURCES = \ Gamma/GammaMETest.h Gamma/GammaMETest.cc \ Gamma/GammaPMETest.h Gamma/GammaPMETest.cc DISTest_la_SOURCES = \ DIS/DISTest.h DIS/DISTest.cc HadronTest_la_SOURCES = \ Hadron/HadronVVTest.h Hadron/HadronVVTest.cc\ Hadron/HadronVBFTest.h Hadron/HadronVBFTest.cc\ Hadron/WHTest.h Hadron/WHTest.cc\ Hadron/ZHTest.h Hadron/ZHTest.cc\ Hadron/VGammaTest.h Hadron/VGammaTest.cc\ Hadron/ZJetTest.h Hadron/ZJetTest.cc\ Hadron/WJetTest.h Hadron/WJetTest.cc\ Hadron/QQHTest.h Hadron/QQHTest.cc REPO = $(top_builddir)/src/HerwigDefaults.rpo HERWIG = $(top_builddir)/src/Herwig HWREAD = $(HERWIG) read --repo $(REPO) -L $(builddir)/.libs -i $(top_builddir)/src HWBUILD = $(HERWIG) build --repo $(REPO) -L $(builddir)/.libs -i $(top_builddir)/src HWINTEGRATE = $(HERWIG) integrate HWRUN = $(HERWIG) run -N $${NUMEVENTS:-10000} tests : tests-LEP tests-DIS tests-LHC tests-Gamma LEPDEPS = \ test-LEP-VV \ test-LEP-VH \ test-LEP-VBF \ test-LEP-BB \ test-LEP-Quarks \ test-LEP-Leptons if WANT_LIBFASTJET LEPDEPS += test-LEP-TopDecay endif tests-LEP : $(LEPDEPS) tests-DIS : test-DIS-Charged test-DIS-Neutral LHCDEPS = \ test-LHC-WW test-LHC-WZ test-LHC-ZZ \ test-LHC-ZGamma test-LHC-WGamma \ test-LHC-ZH test-LHC-WH \ test-LHC-ZJet test-LHC-WJet \ test-LHC-Z test-LHC-W \ test-LHC-ZZVBF test-LHC-VBF \ test-LHC-WWVBF \ test-LHC-bbH test-LHC-ttH \ test-LHC-GammaGamma test-LHC-GammaJet \ test-LHC-Higgs test-LHC-HiggsJet \ test-LHC-QCDFast test-LHC-QCD \ test-LHC-Top if WANT_LIBFASTJET LHCDEPS += \ test-LHC-Bottom \ test-LHC-WHJet test-LHC-ZHJet test-LHC-HJet \ test-LHC-ZShower test-LHC-WShower \ test-LHC-WHJet-Powheg test-LHC-ZHJet-Powheg test-LHC-HJet-Powheg \ test-LHC-ZShower-Powheg test-LHC-WShower-Powheg endif tests-LHC : $(LHCDEPS) tests-Gamma : test-Gamma-FF test-Gamma-WW test-Gamma-P LEPLIBS = LeptonTest.la HADLIBS = HadronTest.la if WANT_LIBFASTJET LEPLIBS += LeptonJetTest.la HADLIBS += HadronJetTest.la endif test-LEP-% : Inputs/LEP-%.in $(LEPLIBS) $(HWREAD) $< $(HWRUN) $(notdir $(subst .in,.run,$<)) test-Gamma-% : Inputs/Gamma-%.in GammaTest.la $(HWREAD) $< $(HWRUN) $(notdir $(subst .in,.run,$<)) test-DIS-% : Inputs/DIS-%.in DISTest.la $(HWREAD) $< $(HWRUN) $(notdir $(subst .in,.run,$<)) test-LHC-% : Inputs/LHC-%.in GammaTest.la $(HADLIBS) $(HWREAD) $< $(HWRUN) $(notdir $(subst .in,.run,$<)) tests-Rivet : Rivet-LEP Rivet-BFactory Rivet-DIS Rivet-Star Rivet-SppS \ Rivet-TVT-WZ Rivet-TVT-Photon Rivet-TVT-Jets \ Rivet-LHC-Jets Rivet-LHC-EW Rivet-LHC-Photon Rivet-LHC-Higgs Rivet-%.run : Rivet/%.in $(HWBUILD) -c .cache/$(subst .run,,$@) $< Rivet-Matchbox-%.yoda : Rivet-Matchbox-%.run $(HWINTEGRATE) -c .cache/$(subst .run,,$<) $< $(HWRUN) -c .cache/$(subst .run,,$<) $< Rivet-%.yoda : Rivet-%.run $(HWRUN) $< Rivet/%.in : python/make_input_files.py $(notdir $(subst .in,,$@)) Rivet-inputfiles: $(shell echo Rivet/LEP{,-Powheg,-Matchbox,-Dipole,-Dipole-MCatNLO,-Dipole-Matchbox-Powheg,-Matchbox-Powheg,-Merging}-{9.4,12,13,17,27.6,29,30.2,30.7,30.75,30,31.3,34.8,43.6,50,52,55,56,57,60.8,60,61.4,10,12.8,22,26.8,35,44,48.0,91,93.0,130,133,136,161,172,177,183,189,192,196,197,200,202,206,91-nopi}.in) \ $(shell echo Rivet/LEP{,-Dipole,-Dipole-MCatNLO,-Dipole-Matchbox-Powheg,-Powheg,-Matchbox-Powheg}-14.in) \ $(shell echo Rivet/LEP{,-Dipole}-{10.5,11.96,12.8,13.96,16.86,21.84,26.8,28.48,35.44,48.0,97.0}-gg.in) \ $(shell echo Rivet/BFactory{,-Powheg,-Matchbox,-Dipole,-Dipole-MCatNLO,-Dipole-Matchbox-Powheg,-Matchbox-Powheg}-{10.52,10.52-sym,10.54,10.45}.in) \ $(shell echo Rivet/BFactory{,-Dipole}-{Upsilon,Upsilon2,Upsilon4,Tau,10.58-res}.in) \ $(shell echo Rivet/DIS{,-NoME,-Powheg,-Matchbox,-Dipole,-Dipole-MCatNLO,-Dipole-Matchbox-Powheg,-Matchbox-Powheg,-Merging}-{e--LowQ2,e+-LowQ2,e+-HighQ2}.in) \ $(shell echo Rivet/TVT{,-Powheg,-Matchbox,-Dipole,-Dipole-MCatNLO,-Dipole-Matchbox-Powheg,-Matchbox-Powheg,-Merging}-{Run-I-Z,Run-I-W,Run-I-WZ,Run-II-Z-e,Run-II-Z-{,LowMass-,HighMass-}mu,Run-II-W}.in) \ $(shell echo Rivet/TVT{,-Dipole}-Run-II-{DiPhoton-GammaGamma,DiPhoton-GammaJet,PromptPhoton}.in) \ $(shell echo Rivet/TVT-Powheg-Run-II-{DiPhoton-GammaGamma,DiPhoton-GammaJet}.in) \ $(shell echo Rivet/TVT{,-Dipole,-Dipole-MCatNLO,-Dipole-Matchbox-Powheg,-Matchbox,-Matchbox-Powheg,-Merging}-{Run-II-Jets-{0..11},Run-I-Jets-{1..8}}.in ) \ $(shell echo Rivet/TVT{,-Dipole,-Dipole-MCatNLO,-Dipole-Matchbox-Powheg,-Matchbox,-Matchbox-Powheg,-Merging}-{630-Jets-{1..3},300-Jets-1,900-Jets-1}.in ) \ $(shell echo Rivet/TVT{,-Dipole}-{Run-I,Run-II,300,630,900}-UE.in) \ $(shell echo Rivet/LHC{,-Dipole,-Dipole-MCatNLO,-Dipole-Matchbox-Powheg,-Matchbox,-Matchbox-Powheg,-Merging}-7-DiJets-{1..7}-{A,B,C}.in ) \ $(shell echo Rivet/LHC{,-Dipole,-Dipole-MCatNLO,-Dipole-Matchbox-Powheg,-Matchbox,-Matchbox-Powheg,-Merging}-{7,8,13}-Jets-{0..10}.in ) \ $(shell echo Rivet/LHC{,-Dipole}-{900,2360,2760,7,8,13}-UE.in ) \ $(shell echo Rivet/LHC{,-Dipole}-{900,7,13}-UE-Long.in ) \ $(shell echo Rivet/LHC{,-Dipole,-Dipole-MCatNLO,-Dipole-Matchbox-Powheg,-Matchbox,-Matchbox-Powheg,-Merging}-7-Charm-{1..5}.in) \ $(shell echo Rivet/LHC{,-Dipole,-Dipole-MCatNLO,-Dipole-Matchbox-Powheg,-Matchbox,-Matchbox-Powheg,-Merging}-7-Bottom-{0..8}.in) \ $(shell echo Rivet/LHC{,-Dipole,-Dipole-MCatNLO,-Dipole-Matchbox-Powheg,-Matchbox,-Matchbox-Powheg,-Merging}-{7,8,13}-Top-{All,L,SL}.in) \ $(shell echo Rivet/Star{,-Dipole}-{UE,Jets-{1..4}}.in ) \ $(shell echo Rivet/SppS{,-Dipole}-{200,500,900,546}-UE.in ) \ $(shell echo Rivet/LHC{,-Matchbox,-Matchbox-Powheg,-Powheg,-Dipole,-Dipole-MCatNLO,-Dipole-Matchbox-Powheg,-Merging}-{W-{e,mu},13-Z-{e,mu},Z-HighMass{1,2}-e,{8,13}-W-mu,8-Z-Mass{1..4}-{e,mu},Z-{e,mu,mu-SOPHTY},Z-LowMass-{e,mu},Z-MedMass-e,WZ,WW-{emu,ll},ZZ-{ll,lv},{8,13}-WZ,8-ZZ-lv,8-WW-ll,W-Z-{e,mu}}.in) \ $(shell echo Rivet/LHC{,-Dipole}-7-{W,Z}Gamma-{e,mu}.in) \ $(shell echo Rivet/LHC{,-Matchbox,-Matchbox-Powheg,-Dipole,-Dipole-MCatNLO,-Dipole-Matchbox-Powheg,-Merging}-{7-W-Jet-{1..3}-e,7-Z-Jet-{0..3}-e,7-Z-Jet-0-mu}.in) \ $(shell echo Rivet/LHC{-Matchbox,-Matchbox-Powheg,-Dipole-MCatNLO,-Dipole-Matchbox-Powheg,-Merging}-{Z-b,Z-bb,8-Z-b,8-Z-bb,W-b,8-Z-jj}.in) \ $(shell echo Rivet/LHC{,-Dipole}-{7,8}-PromptPhoton-{1..4}.in) Rivet/LHC-GammaGamma-7.in \ $(shell echo Rivet/LHC{,-Powheg}-{7,8}-{DiPhoton-GammaGamma,DiPhoton-GammaJet}.in) \ $(shell echo Rivet/LHC{,-Powheg,-Matchbox,-Matchbox-Powheg,-Dipole,-Dipole-MCatNLO,-Dipole-Matchbox-Powheg,-Merging}-{ggH,VBF,WH,ZH}.in) \ $(shell echo Rivet/LHC{,-Powheg,-Matchbox,-Matchbox-Powheg,-Dipole,-Dipole-MCatNLO,-Dipole-Matchbox-Powheg,-Merging}-8-{{ggH,VBF,WH,ZH}{,-GammaGamma},ggH-WW}.in) \ $(shell echo Rivet/LHC{,-Matchbox,-Matchbox-Powheg,-Dipole,-Dipole-MCatNLO,-Dipole-Matchbox-Powheg,-Merging}-ggHJet.in) # $(shell echo Rivet/ISR-{30,44,53,62}-UE.in ) $(shell echo Rivet/SppS-{53,63}-UE.in ) Rivet-LEP : Rivet-LEP/done touch $@ Rivet-LEP/done : $(shell echo Rivet{,-Powheg,-Matchbox,-Dipole}-LEP-{9.4,12,13,17,27.6,29,30.2,30.7,30.75,30,31.3,34.8,43.6,50,52,55,56,57,60.8,60,61.4,10,12.8,22,26.8,35,44,48.0,91,93.0,130,133,136,161,172,177,183,189,192,196,197,200,202,206,91-nopi}.yoda) \ $(shell echo Rivet-LEP-{10.5,11.96,12.8,13.96,16.86,21.84,26.8,28.48,35.44,48.0,97.0}-gg.yoda) \ $(shell echo Rivet{,-Powheg,-Matchbox,-Matchbox-Powheg,-Dipole}-LEP-14.yoda) rm -rf Rivet-LEP python/merge-LEP --with-gg LEP python/merge-LEP Powheg-LEP python/merge-LEP Matchbox-LEP python/merge-LEP Dipole-LEP rivet-mkhtml -o Rivet-LEP LEP.yoda:Hw Powheg-LEP.yoda:Hw-Powheg Matchbox-LEP.yoda:Hw-Matchbox Dipole-LEP.yoda:Hw-Dipole touch $@ Rivet-BFactory : Rivet-BFactory/done touch $@ Rivet-BFactory/done: $(shell echo Rivet{,-Powheg,-Matchbox,-Dipole}-BFactory-{10.52,10.52-sym,10.54,10.45}.yoda) \ $(shell echo Rivet-BFactory-{Upsilon,Upsilon2,Upsilon4,Tau,10.58-res,10.58}.yoda) rm -rf Rivet-BFactory python/merge-BFactory BFactory python/merge-BFactory Powheg-BFactory python/merge-BFactory Matchbox-BFactory python/merge-BFactory Dipole-BFactory rivet-mkhtml -o Rivet-BFactory BFactory.yoda:Hw Powheg-BFactory.yoda:Hw-Powheg Matchbox-BFactory.yoda:Hw-Matchbox Dipole-BFactory.yoda:Hw-Dipole touch $@ Rivet-DIS : Rivet-DIS/done touch $@ Rivet-DIS/done: $(shell echo Rivet{-DIS,-DIS-NoME,-Powheg-DIS,-Matchbox-DIS,-Dipole-DIS}-{e--LowQ2,e+-LowQ2,e+-HighQ2}.yoda) rm -rf Rivet-DIS python/merge-DIS DIS python/merge-DIS Powheg-DIS python/merge-DIS DIS-NoME python/merge-DIS Matchbox-DIS python/merge-DIS Dipole-DIS rivet-mkhtml -o Rivet-DIS DIS.yoda:Hw Powheg-DIS.yoda:Hw-Powheg DIS-NoME.yoda:Hw-NoME Matchbox-DIS.yoda:Hw-Matchbox Dipole-DIS.yoda:Hw-Dipole touch $@ -Rivet-TVT-WZ : Rivet-TVT-WZ/done +Rivet-TVT-EW : Rivet-TVT-EW/done touch $@ -Rivet-TVT-WZ/done: $(shell echo Rivet{,-Powheg,-Matchbox,-Dipole}-TVT-{Run-I-Z,Run-I-W,Run-I-WZ,Run-II-Z-{e,{,LowMass-,HighMass-}mu},Run-II-W}.yoda) +Rivet-TVT-EW/done: $(shell echo Rivet{,-Powheg,-Matchbox,-Dipole}-TVT-{Run-I-Z,Run-I-W,Run-I-WZ,Run-II-Z-{e,{,LowMass-,HighMass-}mu},Run-II-W}.yoda) rm -rf Rivet-TVT-WZ - python/merge-TVT-EW Rivet-TVT-Run-II-W.yoda Rivet-TVT-Run-II-Z-{e,{,LowMass-,HighMass-}mu}.yoda\ - Rivet-TVT-Run-I-{W,Z,WZ}.yoda -o TVT-WZ.yoda - python/merge-TVT-EW Rivet-TVT-Powheg-Run-II-W.yoda Rivet-TVT-Powheg-Run-II-Z-{e,{,LowMass-,HighMass-}mu}.yoda\ - Rivet-TVT-Powheg-Run-I-{W,Z,WZ}.yoda -o TVT-Powheg-WZ.yoda - python/merge-TVT-EW Rivet-TVT-Matchbox-Run-II-W.yoda Rivet-TVT-Matchbox-Run-II-Z-{e,{,LowMass-,HighMass-}mu}.yoda\ - Rivet-TVT-Matchbox-Run-I-{W,Z,WZ}.yoda -o TVT-Matchbox-WZ.yoda - python/merge-TVT-EW Rivet-TVT-Dipole-Run-II-W.yoda Rivet-TVT-Dipole-Run-II-Z-{e,{,LowMass-,HighMass-}mu}.yoda\ - Rivet-TVT-Dipole-Run-I-{W,Z,WZ}.yoda -o TVT-Dipole-WZ.yoda - rivet-mkhtml -o Rivet-TVT-WZ TVT-WZ.yoda:Hw TVT-Powheg-WZ.yoda:Hw-Powheg TVT-Matchbox-WZ.yoda:Hw-Matchbox TVT-Dipole-WZ.yoda:Hw-Dipole + python/merge-TVT-EW TVT + python/merge-TVT-EW TVT-Powheg + python/merge-TVT-EW TVT-Matchbox + python/merge-TVT-EW TVT-Dipole + rivet-mkhtml -o Rivet-TVT-EW TVT-EW.yoda:Hw TVT-Powheg-EW.yoda:Hw-Powheg TVT-Matchbox-EW.yoda:Hw-Matchbox TVT-Dipole-EW.yoda:Hw-Dipole touch $@ Rivet-TVT-Photon : Rivet-TVT-Photon/done touch $@ Rivet-TVT-Photon/done: $(shell echo Rivet-TVT-Run-II-{DiPhoton-GammaGamma,DiPhoton-GammaJet,PromptPhoton}.yoda) \ $(shell echo Rivet-Powheg-TVT-Run-II-{DiPhoton-GammaGamma,DiPhoton-GammaJet}.yoda) rm -rf Rivet-TVT-Photon python/merge-TVT-Photon TVT -o TVT-Photon.yoda python/merge-TVT-Photon TVT-Powheg -o TVT-Powheg-Photon.yoda rivet-mkhtml -o Rivet-TVT-Photon TVT-Photon.yoda:Hw TVT-Powheg-Photon.yoda:Hw-Powheg touch $@ Rivet-TVT-Jets : Rivet-TVT-Jets/done touch $@ Rivet-TVT-Jets/done: $(shell echo Rivet-TVT-{Run-II-Jets-{0..11},Run-I-Jets-{1..8}}.yoda ) \ $(shell echo Rivet-TVT-{630-Jets-{1..3},300-Jets-1,900-Jets-1}.yoda ) \ $(shell echo Rivet-TVT-{Run-I,Run-II,300,630,900}-UE.yoda) rm -rf Rivet-TVT-Jets python/merge-TVT-Jets TVT rivet-mkhtml -o Rivet-TVT-Jets TVT-Jets.yoda:Hw touch $@ #python/merge-TVT-Energy TVT #rivet-merge-CDF_2012_NOTE10874 TVT-300-Energy.yoda TVT-900-Energy.yoda TVT-1960-Energy.yoda #flat2yoda RatioPlots.dat -o TVT-RatioPlots.yoda Rivet-Star : Rivet-Star/done touch $@ Rivet-Star/done : $(shell echo Rivet-Star-{UE,Jets-{1..4}}.yoda ) rm -rf Rivet-Star python/merge-Star Star ## broken DVI? ## rivet-mkhtml -v -o Rivet-Star Star.yoda mkdir -p Rivet-Star # remove when fixed touch $@ Rivet-SppS : Rivet-SppS/done touch $@ ## $(shell echo Rivet-ISR-{30,44,53,62}-UE.yoda ) \ ## {53,63,200,500,900,546} Rivet-SppS/done : $(shell echo Rivet-SppS-{200,500,900,546}-UE.yoda ) rm -rf Rivet-SppS python/merge-SppS SppS rivet-mkhtml -o Rivet-SppS SppS.yoda touch $@ Rivet-LHC-Jets : Rivet-LHC-Jets/done touch $@ Rivet-LHC-Jets/done : \ $(shell echo Rivet-LHC-7-DiJets-{1..7}-{A,B,C}.yoda ) \ $(shell echo Rivet-LHC-{7,8,13}-Jets-{0..10}.yoda ) \ $(shell echo Rivet-LHC-{900,2360,2760,7,8,13}-UE.yoda ) \ $(shell echo Rivet-LHC-{900,7,13}-UE-Long.yoda ) \ $(shell echo Rivet-LHC-7-Charm-{1..5}.yoda ) \ $(shell echo Rivet-LHC-7-Bottom-{0..8}.yoda ) \ $(shell echo Rivet-LHC-{7,8,13}-Top-{L,SL}.yoda ) \ $(shell echo Rivet-LHC-{7,8}-Top-All.yoda ) rm -rf Rivet-LHC-Jets python/merge-LHC-Jets LHC rivet-mkhtml -o Rivet-LHC-Jets LHC-Jets.yoda:Hw touch $@ Rivet-LHC-EW : Rivet-LHC-EW/done touch $@ Rivet-LHC-EW/done: \ $(shell echo Rivet{,-Matchbox,-Powheg,-Dipole}-LHC-{13-Z-{e,mu},{8,13}-W-mu,Z-HighMass{1,2}-e,8-Z-Mass{1..4}-{e,mu},W-{e,mu},Z-{e,mu,mu-SOPHTY},Z-LowMass-{e,mu},Z-MedMass-e,WZ,WW-{emu,ll},ZZ-{ll,lv},{8,13}-WZ,8-ZZ-lv,8-WW-ll,W-Z-{e,mu}}.yoda) \ $(shell echo Rivet{,-Matchbox,-Dipole}-LHC-{7-W-Jet-{1..3}-e,7-Z-Jet-{0..3}-e,7-Z-Jet-0-mu}.yoda) \ $(shell echo Rivet{-Matchbox,-Dipole}-LHC-{Z-b,Z-bb,8-Z-b,8-Z-bb,W-b,8-Z-jj}.yoda) \ $(shell echo Rivet-LHC-7-{W,Z}Gamma-{e,mu}.yoda) rm -rf Rivet-LHC-EW; - python/merge-LHC-EW Rivet-LHC-{13-Z-{e,mu},Z-HighMass{1,2}-e,8-Z-Mass{1..4}-{e,mu},W-{e,mu},Z-{e,mu,mu-Short},Z-LowMass-{e,mu},Z-MedMass-e,W-Z-{e,mu},WW-{emu,ll},WZ,ZZ-{ll,lv},{8,13}-WZ,8-ZZ-lv,8-WW-ll}.yoda Rivet-LHC-7-{W,Z}-Jet-{1,2,3}-e.yoda Rivet-LHC-7-{W,Z}Gamma-{e,mu}.yoda -o LHC-EW.yoda; - python/merge-LHC-EW Rivet-Matchbox-LHC-{13-Z-{e,mu},Z-HighMass{1,2}-e,8-Z-Mass{1..4}-{e,mu},W-{e,mu},Z-{e,mu,mu-Short},Z-LowMass-{e,mu},Z-MedMass-e,W-Z-{e,mu},WW-{emu,ll},WZ,ZZ-{ll,lv},{8,13}-WZ,{8,13}-WZ,8-ZZ-lv,8-WW-ll}.yoda Rivet-LHC-Matchbox-7-{W,Z}-Jet-{1,2,3}-e.yoda -o LHC-Matchbox-EW.yoda; - python/merge-LHC-EW Rivet-Dipole-LHC-{13-Z-{e,mu},Z-HighMass{1,2}-e,8-Z-Mass{1..4}-{e,mu},W-{e,mu},Z-{e,mu,mu-Short},Z-LowMass-{e,mu},Z-MedMass-e,W-Z-{e,mu},WW-{emu,ll},WZ,ZZ-{ll,lv},{8,13}-WZ,8-ZZ-lv,8-WW-ll}.yoda Rivet-LHC-Dipole-7-{W,Z}-Jet-{1,2,3}-e.yoda -o LHC-Dipole-EW.yoda; - python/merge-LHC-EW Rivet-Powheg-LHC-{W-{e,mu},Z-{e,mu},Z-LowMass-{e,mu},Z-MedMass-e,W-Z-{e,mu},WW-{emu,ll},WZ,ZZ-{ll,lv},{8,13}-WZ,8-ZZ-lv,8-WW-ll}.yoda -o LHC-Powheg-EW.yoda; + python/merge-LHC-EW LHC + python/merge-LHC-EW LHC-Matchbox + python/merge-LHC-EW LHC-Dipole + python/merge-LHC-EW LHC-Powheg rivet-mkhtml -o Rivet-LHC-EW LHC-EW.yoda:Hw LHC-Powheg-EW.yoda:Hw-Powheg LHC-Matchbox-EW.yoda:Hw-Matchbox LHC-Matchbox-Z-b.yoda:Hw-Matchbox-Zb \ LHC-Matchbox-Z-bb.yoda:Hw-Matchbox-Zbb LHC-Matchbox-W-b.yoda:Hw-Matchbox-W-bb LHC-Dipole-EW.yoda:Hw-Dipole \ LHC-Dipole-Z-b.yoda:Hw-Dipole-Zb LHC-Dipole-Z-bb.yoda:Hw-Dipole-Zbb LHC-Dipole-W-b.yoda:Hw-Dipole-W-bb \ LHC-Z-mu-SOPHTY.yoda:Hw LHC-Powheg-Z-mu-SOPHTY.yoda:Hw-Powheg LHC-Matchbox-Z-mu-SOPHTY.yoda:Hw-Matchbox \ LHC-Matchbox-8-Z-b.yoda:Hw-Matchbox-Zb LHC-Matchbox-8-Z-bb.yoda:Hw-Matchbox-Zbb \ LHC-Dipole-8-Z-b.yoda:Hw-Dipole-Zb LHC-Dipole-8-Z-bb.yoda:Hw-Dipole-Zbb touch $@ Rivet-LHC-Photon : Rivet-LHC-Photon/done touch $@ Rivet-LHC-Photon/done: \ $(shell echo Rivet-LHC-{7,8}-PromptPhoton-{1..4}.yoda) \ Rivet-LHC-GammaGamma-7.yoda \ $(shell echo Rivet{,-Powheg}-LHC-{7,8}-{DiPhoton-GammaGamma,DiPhoton-GammaJet}.yoda) rm -rf Rivet-LHC-Photon python/merge-LHC-Photon LHC -o LHC-Photon.yoda python/merge-LHC-Photon LHC-Powheg -o LHC-Powheg-Photon.yoda rivet-mkhtml -o Rivet-LHC-Photon LHC-Photon.yoda:Hw LHC-Powheg-Photon.yoda:Hw-Powheg touch $@ Rivet-LHC-Higgs : Rivet-LHC-Higgs/done touch $@ Rivet-LHC-Higgs/done: \ $(shell echo Rivet{,-Powheg}-LHC-{ggH,VBF,WH,ZH}.yoda) \ $(shell echo Rivet{,-Powheg}-LHC-8-{{ggH,VBF,WH,ZH}{,-GammaGamma},ggH-WW}.yoda) \ Rivet-LHC-ggHJet.yoda rm -rf Rivet-LHC-Higgs rivet-mkhtml -o Rivet-LHC-Higgs LHC-Powheg-ggH.yoda:gg-Powheg LHC-ggH.yoda:gg LHC-ggHJet.yoda:HJet \ LHC-Powheg-VBF.yoda:VBF-Powheg LHC-VBF.yoda:VBF LHC-WH.yoda:WH LHC-ZH.yoda:ZH \ LHC-Powheg-WH.yoda:WH-Powheg LHC-Powheg-ZH.yoda:ZH-Powheg touch $@ clean-local: rm -f *.out *.log *.tex *.top *.run *.dump *.mult *.Bmult *.yoda Rivet/*.in rm -rf Rivet-* distclean-local: rm -rf .cache diff --git a/Tests/python/merge-LHC-EW b/Tests/python/merge-LHC-EW --- a/Tests/python/merge-LHC-EW +++ b/Tests/python/merge-LHC-EW @@ -1,393 +1,401 @@ #! /usr/bin/env python import logging import sys import os, yoda """%prog Script for merging aida files """ def fillAbove(scale,desthisto, sourcehistosbyptmin) : pthigh= 1e100 ptlow =-1e100 for pt, h in sorted(sourcehistosbyptmin.iteritems(),reverse=True): ptlow=pt if(type(desthisto)==yoda.core.Scatter2D) : for i in range(0,h.numPoints) : xMin = h.points[i].x-h.points[i].xErrs.minus if( xMin*scale >= ptlow and xMin*scale < pthigh ) : desthisto.addPoint(h.points[i]) elif(type(desthisto)==yoda.core.Profile1D) : for i in range(0,h.numBins) : if(h.bins[i].xMin*scale >= ptlow and h.bins[i].xMin*scale < pthigh ) : desthisto.bins[i] += h.bins[i] elif(type(desthisto)==yoda.core.Histo1D) : for i in range(0,h.numBins) : if(h.bins[i].xMin*scale >= ptlow and h.bins[i].xMin*scale < pthigh ) : desthisto.bins[i] += h.bins[i] else : logging.error("Can't merge %s, unknown type" % desthisto.path) sys.exit(1) pthigh=pt def mergeByMass(hpath, sqrts, scale=1.): global inhistos_mass global outhistos try: fillAbove(scale,outhistos[hpath], inhistos_mass[hpath][float(sqrts)]) except: pass def useOneMass(hpath, sqrts, ptmin): global inhistos_mass global outhistos try: ## Find best pT_min match ptmins = inhistos_mass[hpath][float(sqrts)].keys() closest_ptmin = None for ptm in ptmins: if closest_ptmin is None or \ abs(ptm-float(ptmin)) < abs(closest_ptmin-float(ptmin)): closest_ptmin = ptm if closest_ptmin != float(ptmin): logging.warning("Inexact match for requested pTmin=%s: " % ptmin + \ "using pTmin=%e instead" % closest_ptmin) outhistos[hpath] = inhistos_mass[hpath][float(sqrts)][closest_ptmin] except: pass import sys if sys.version_info[:3] < (2,4,0): print "rivet scripts require Python version >= 2.4.0... exiting" sys.exit(1) if __name__ == "__main__": import logging from optparse import OptionParser, OptionGroup - parser = OptionParser(usage="%prog aidafile aidafile2 [...]") - parser.add_option("-o", "--out", dest="OUTFILE", default="-") + parser = OptionParser(usage="%prog base") verbgroup = OptionGroup(parser, "Verbosity control") verbgroup.add_option("-v", "--verbose", action="store_const", const=logging.DEBUG, dest="LOGLEVEL", default=logging.INFO, help="print debug (very verbose) messages") verbgroup.add_option("-q", "--quiet", action="store_const", const=logging.WARNING, dest="LOGLEVEL", default=logging.INFO, help="be very quiet") parser.add_option_group(verbgroup) (opts, args) = parser.parse_args() logging.basicConfig(level=opts.LOGLEVEL, format="%(message)s") ## Check args if len(args) < 1: - logging.error("Must specify at least one AIDA histogram file") + logging.error("Must specify at least the name of the files") sys.exit(1) + yodafiles=["-13-Z-e","-13-Z-mu","-Z-HighMass1-e","-Z-HighMass2-e", + "-8-Z-Mass1-e","-8-Z-Mass1-mu","-8-Z-Mass2-e","-8-Z-Mass2-mu","-8-Z-Mass3-e","-8-Z-Mass3-mu","-8-Z-Mass4-e","-8-Z-Mass4-mu", + "-W-e","-W-mu","-Z-e","-Z-mu","-Z-mu-Short","-Z-LowMass-e","-Z-LowMass-mu", + "-Z-MedMass-e","-W-Z-e","-W-Z-mu", + "-WW-emu","-WW-ll","-WZ","-ZZ-ll","-ZZ-lv","-8-WZ","-13-WZ","-8-ZZ-lv","-8-WW-ll", + "-7-W-Jet-1-e","-7-W-Jet-2-e","-7-W-Jet-3-e","-7-Z-Jet-1-e","-7-Z-Jet-2-e","-7-Z-Jet-3-e", + "-7-WGamma-e","-7-WGamma-mu","-7-ZGamma-e","-7-ZGamma-mu"] ## Get histos outhistos={} inhistos_mass = {} - for file in args: + for f in yodafiles: + file='Rivet-'+args[0]+f+".yoda" if not os.access(file, os.R_OK): logging.error("%s can not be read" % file) break try: aos = yoda.read(file) except: - logging.error("%s can not be parsed as XML" % file) + logging.error("%s can not be parsed as yoda" % file) break mass=66 if(file.find("HighMass1")>=0) : mass = 116 elif(file.find("HighMass2")>=0) : mass = 400 elif(file.find("Mass1")>=0) : mass = 12 elif(file.find("Mass2")>=0) : mass = 30 elif(file.find("Mass3")>=0) : mass = 66 elif(file.find("Mass4")>=0) : mass = 116 ## Get histos from this YODA file for aopath, ao in aos.iteritems() : if(aopath.find("ATLAS_2010_S8919674")>0) : if((aopath.find("d01")>0 or aopath.find("d05")>0 or aopath.find("d07")>0) and file.find("-e")>0) : outhistos[aopath] = ao elif((aopath.find("d02")>0 or aopath.find("d06")>0 or aopath.find("d08")>0) and file.find("-mu")>0) : outhistos[aopath] = ao elif (aopath.find("ATLAS_2011_S9131140")>0) : if(aopath.find("d01")>0 and file.find("-e")>0) : outhistos[aopath] = ao elif(aopath.find("d02")>0 and file.find("-mu")>0) : outhistos[aopath] = ao elif (aopath.find("ATLAS_2011_I925932")>0) : if(aopath.find("d01")>0 and file.find("-e")>0) : outhistos[aopath] = ao elif(aopath.find("d02")>0 and file.find("-mu")>0) : outhistos[aopath] = ao elif (aopath.find("ATLAS_2011_I945498")>0) : if(aopath.find("y01")>0 and file.find("-e")>0) : outhistos[aopath] = ao elif(aopath.find("y02")>0 and file.find("-mu")>0) : outhistos[aopath] = ao elif(aopath.find("y03")>0 and file.find("-mu")>0) : outhistos[aopath] = ao elif (aopath.find("ATLAS_2013_I1217867")>0) : if(aopath.find("y01")>0 and file.find("-e")>0) : outhistos[aopath] = ao elif(aopath.find("y02")>0 and file.find("-mu")>0) : outhistos[aopath] = ao elif (aopath.find("CMS_2012_I941555")>0) : if((aopath.find("y01")>0 or aopath.find("y03")>0 ) and file.find("-mu")>0) : outhistos[aopath] = ao elif(aopath.find("y02")>0 and file.find("-e")>0) : outhistos[aopath] = ao elif (aopath.find("ATLAS_2014_I1300647" )>0) : if(aopath.find("y01")>0 and file.find("-e")>0) : outhistos[aopath] = ao elif((not aopath.find("y01")>0) and file.find("-mu")>0) : outhistos[aopath] = ao elif (aopath.find("ATLAS_2014_I1288706" )>0) : if(aopath.find("y02")>0 and file.find("-e")>0) : outhistos[aopath] = ao elif(aopath.find("y01")>0 and file.find("-mu")>0) : outhistos[aopath] = ao elif (aopath.find("ATLAS_2012_I1204784" )>0) : if( file.find("-e")>0 and ( aopath.find("d03")>0 or ((aopath.find("d01")>0 or aopath.find("d02")>0) and aopath.find("y01")>0))) : outhistos[aopath] = ao elif(file.find("-mu")>0 and ( aopath.find("d04")>0 or ((aopath.find("d01")>0 or aopath.find("d02")>0) and aopath.find("y02")>0))) : outhistos[aopath] = ao elif (aopath.find("ATLAS_2014_I1312627_EL") >0) : if(file.find("-e")>0) : outhistos[aopath] = ao elif (aopath.find("ATLAS_2014_I1312627_MU") >0) : if(file.find("-mu")>0) : outhistos[aopath] = ao elif (aopath.find("ATLAS_2014_I1312627") >0) : if(file.find("-e")>0) : outhistos[aopath] = ao elif (aopath.find("CMS_2013_I1224539_WJET" )>0) : if(file.find("-1-e")>0 and (aopath.find("d52")>0 or aopath.find("d53")>0 or aopath.find("d56")>0 or aopath.find("d57")>0 or aopath.find("d60")>0 or aopath.find("d61")>0 or aopath.find("d64")>0 or aopath.find("d65")>0 or aopath.find("d68")>0 or aopath.find("d69")>0 or aopath.find("d72")>0)) : outhistos[aopath] = ao elif(file.find("-2-e")>0 and (aopath.find("d54")>0 or aopath.find("d58")>0 or aopath.find("d62")>0 or aopath.find("d66")>0 or aopath.find("d70")>0 or aopath.find("d73")>0)) : outhistos[aopath] = ao elif(file.find("-3-e")>0 and (aopath.find("d55")>0 or aopath.find("d59")>0 or aopath.find("d63")>0 or aopath.find("d67")>0 or aopath.find("d71")>0 or aopath.find("d74")>0)) : outhistos[aopath] = ao elif (aopath.find("CMS_2013_I1224539_ZJET" )>0) : if(file.find("-1-e")>0 and (aopath.find("d29")>0 or aopath.find("d30")>0 or aopath.find("d33")>0 or aopath.find("d34")>0 or aopath.find("d37")>0 or aopath.find("d38")>0 or aopath.find("d41")>0 or aopath.find("d42")>0 or aopath.find("d45")>0 or aopath.find("d46")>0 or aopath.find("d49")>0)) : outhistos[aopath] = ao elif(file.find("-2-e")>0 and (aopath.find("d31")>0 or aopath.find("d35")>0 or aopath.find("d39")>0 or aopath.find("d43")>0 or aopath.find("d47")>0 or aopath.find("d50")>0)) : outhistos[aopath] = ao elif(file.find("-3-e")>0 and (aopath.find("d32")>0 or aopath.find("d36")>0 or aopath.find("d40")>0 or aopath.find("d44")>0 or aopath.find("d48")>0 or aopath.find("d51")>0)) : outhistos[aopath] = ao elif (aopath.find("CMS_2013_I1258128")>0) : if(aopath.find("d01")>0 or aopath.find("d02")>0 or aopath.find("d03")>0 or aopath.find("d04")>0) : outhistos[aopath] = ao elif (aopath.find("CMS_2013_I1209721" )>0 and file.find("-0")>0 ) : outhistos[aopath] = ao elif (aopath.find("ATLAS_2011_I928289")>0) : if(file.find("-e")>=0 and (aopath.find("y01")>=0 or aopath.find("y02")>=0)) : outhistos[aopath] = ao elif(file.find("-mu")>=0 and (aopath.find("y03")>=0 or aopath.find("y04")>=0)) : outhistos[aopath] = ao elif (aopath.find("CMS_2013_I1122847")>0) : if(file.find("-mu")>=0 and aopath.find("d01")>=0 ) : outhistos[aopath] = ao elif(file.find("-e")>=0 and (aopath.find("d02")>=0 or aopath.find("d03")>=0)) : outhistos[aopath] = ao elif (aopath.find("ATLAS_2016_I1424838")>0) : if(file.find("-mu")>=0 and aopath.find("x02")>=0 ) : outhistos[aopath] = ao elif(file.find("-e")>=0 and (aopath.find("x01")>=0)) : outhistos[aopath] = ao elif (aopath.find("CMS_2015_I1310737")>0) : if aopath in outhistos : outhistos[aopath] += ao else : outhistos[aopath] = ao elif (aopath.find("ATLAS_2015_I1351916")>=0) : if(aopath.find("-y01")>=0) : pathBase = aopath.replace("-y01","") hp = aos[pathBase+"-y02"] hm = aos[pathBase+"-y03"] ratio = (hp-hm)/(hp+hm) hnew = yoda.Scatter2D(aopath,ao.title) hnew.combineWith(ratio) outhistos[aopath] = hnew else : continue elif (aopath.find("ATLAS_2014_I1282447")>=0) : if((aopath.find("/ATLAS_2014_I1282447/d02-x01-y01")>=0 or aopath.find("/ATLAS_2014_I1282447/d08-x01-y01")>=0 or aopath.find("/ATLAS_2014_I1282447/d02-x01-y02")>=0 or aopath.find("/ATLAS_2014_I1282447/d02-x01-y01")>=0 or aopath.find("/ATLAS_2014_I1282447/d05-x01-y02")>=0 or aopath.find("/ATLAS_2014_I1282447/d05-x01-y03")>=0 or aopath.find("/ATLAS_2014_I1282447/d06-x01-y01")>=0 or aopath.find("/ATLAS_2014_I1282447/d06-x01-y02")>=0 or aopath.find("/ATLAS_2014_I1282447/d06-x01-y03")>=0 or aopath.find("/ATLAS_2014_I1282447/d06-x01-y04")>=0) and not (aopath.find("plus")>=0 or aopath.find("minus")>=0 or aopath.find("inc")>=0)) : continue if aopath in outhistos : outhistos[aopath] += ao else : outhistos[aopath] = ao elif (aopath.find("ATLAS_2015_I1408516")>=0) : if not inhistos_mass.has_key(aopath): inhistos_mass[aopath] = {} tmpE = inhistos_mass[aopath] sqrts=8000 if not tmpE.has_key(sqrts): tmpE[sqrts] = {} tmpP = tmpE[sqrts] if not tmpP.has_key(mass): tmpP[mass] = ao else: raise Exception("A set with mass = %s already exists" % ( mass)) elif (aopath.find("ATLAS_2013_I1234228")>=0) : if not inhistos_mass.has_key(aopath): inhistos_mass[aopath] = {} tmpE = inhistos_mass[aopath] sqrts=7000 if not tmpE.has_key(sqrts): tmpE[sqrts] = {} tmpP = tmpE[sqrts] if not tmpP.has_key(mass): tmpP[mass] = ao else: raise Exception("A set with mass = %s already exists" % ( mass)) else : outhistos[aopath] = ao for hpath,hsets in inhistos_mass.iteritems(): if(hpath!="/ATLAS_2015_I1408516_EL/d41-x01-y01" and hpath!="/ATLAS_2015_I1408516_MU/d41-x01-y02" and hpath!="/ATLAS_2013_I1234228/d01-x01-y02" ) : continue if(type(hsets.values()[0].values()[0])==yoda.core.Scatter2D) : outhistos[hpath] = yoda.core.Scatter2D(hsets.values()[0].values()[0].path, hsets.values()[0].values()[0].title) elif(type(hsets.values()[0].values()[0])==yoda.core.Profile1D) : outhistos[hpath] = yoda.core.Profile1D(hsets.values()[0].values()[0].path, hsets.values()[0].values()[0].title) for i in range(0,hsets.values()[0].values()[0].numBins) : outhistos[hpath].addBin(hsets.values()[0].values()[0].bins[i].xMin, hsets.values()[0].values()[0].bins[i].xMax) elif(type(hsets.values()[0].values()[0])==yoda.core.Histo1D) : outhistos[hpath] = yoda.core.Histo1D(hsets.values()[0].values()[0].path, hsets.values()[0].values()[0].title) for i in range(0,hsets.values()[0].values()[0].numBins) : outhistos[hpath].addBin(hsets.values()[0].values()[0].bins[i].xMin, hsets.values()[0].values()[0].bins[i].xMax) else : logging.error("Histogram %s is of unknown type" % hpath) sys.exit(1) # sort out mass bins for ATLAS Z-> e,mu at 8 TeV for ltype in ["EL","MU"] : if(ltype=="EL") : y = "y01" mergeByMass("/ATLAS_2015_I1408516_EL/d41-x01-y01", "8000") else : y = "y04" mergeByMass("/ATLAS_2015_I1408516_MU/d41-x01-y02", "8000") for d in [2,3,04,14,26,38]: useOneMass("/ATLAS_2015_I1408516_%s/d%02d-x01-%s" % (ltype,d,y), "8000", "30" ) for d in [5,6,7,8,9,10,15,17,18,19,20,21,22,27,29,30,31,32,33,34,39]: useOneMass("/ATLAS_2015_I1408516_%s/d%02d-x01-%s" % (ltype,d,y), "8000", "66" ) for d in [11,12,13,16,28,40]: useOneMass("/ATLAS_2015_I1408516_%s/d%02d-x01-%s" % (ltype,d,y), "8000", "116" ) for d in [23,35]: useOneMass("/ATLAS_2015_I1408516_%s/d%02d-x01-%s" % (ltype,d,y), "8000", "12" ) for d in [24,36]: useOneMass("/ATLAS_2015_I1408516_%s/d%02d-x01-%s" % (ltype,d,y), "8000", "12" ) for d in [25,37]: useOneMass("/ATLAS_2015_I1408516_%s/d%02d-x01-%s" % (ltype,d,y), "8000", "30" ) # sort out ratios for ATLAS W+c if("/ATLAS_2014_I1282447/d02-x01-y01_plus" in outhistos and "/ATLAS_2014_I1282447/d02-x01-y01_minus" in outhistos) : d02y01_plus = outhistos["/ATLAS_2014_I1282447/d02-x01-y01_plus"] d02y01_minus = outhistos["/ATLAS_2014_I1282447/d02-x01-y01_minus"] ratio_d02y01 = d02y01_plus/d02y01_minus ratio_d02y01.path = "/ATLAS_2014_I1282447/d02-x01-y01" del outhistos["/ATLAS_2014_I1282447/d02-x01-y01_plus"] del outhistos["/ATLAS_2014_I1282447/d02-x01-y01_minus"] outhistos["/ATLAS_2014_I1282447/d02-x01-y01"] = ratio_d02y01 if("/ATLAS_2014_I1282447/d02-x01-y02_plus" in outhistos and "/ATLAS_2014_I1282447/d02-x01-y02_minus" in outhistos) : d02y02_plus = outhistos["/ATLAS_2014_I1282447/d02-x01-y02_plus"] d02y02_minus = outhistos["/ATLAS_2014_I1282447/d02-x01-y02_minus"] ratio_d02y02 = d02y02_plus/d02y02_minus ratio_d02y02.path = "/ATLAS_2014_I1282447/d02-x01-y02" del outhistos["/ATLAS_2014_I1282447/d02-x01-y02_plus"] del outhistos["/ATLAS_2014_I1282447/d02-x01-y02_minus"] outhistos["/ATLAS_2014_I1282447/d02-x01-y02"] = ratio_d02y02 if("/ATLAS_2014_I1282447/d08-x01-y01_plus" in outhistos and "/ATLAS_2014_I1282447/d08-x01-y01_minus" in outhistos) : d08y01_plus = outhistos["/ATLAS_2014_I1282447/d08-x01-y01_plus"] d08y01_minus = outhistos["/ATLAS_2014_I1282447/d08-x01-y01_minus"] ratio_d08y01 = d08y01_plus/d08y01_minus ratio_d08y01.path = "/ATLAS_2014_I1282447/d08-x01-y01" del outhistos["/ATLAS_2014_I1282447/d08-x01-y01_plus"] del outhistos["/ATLAS_2014_I1282447/d08-x01-y01_minus"] outhistos["/ATLAS_2014_I1282447/d08-x01-y01"] = ratio_d08y01 if ("/ATLAS_2014_I1282447/d05-x01-y01" in outhistos and "/ATLAS_2014_I1282447/d01-x01-y02" in outhistos) : h_winc = outhistos["/ATLAS_2014_I1282447/d05-x01-y01"] h_d = outhistos["/ATLAS_2014_I1282447/d01-x01-y02"] ratio_wd = h_d/h_winc ratio_wd.path = "/ATLAS_2014_I1282447/d05-x01-y02" outhistos["/ATLAS_2014_I1282447/d05-x01-y02"] = ratio_wd if ("/ATLAS_2014_I1282447/d05-x01-y01" in outhistos and "/ATLAS_2014_I1282447/d01-x01-y03" in outhistos) : h_winc = outhistos["/ATLAS_2014_I1282447/d05-x01-y01"] h_dstar= outhistos["/ATLAS_2014_I1282447/d01-x01-y03"] ratio_wdstar = h_dstar/h_winc ratio_wdstar.path = "/ATLAS_2014_I1282447/d05-x01-y03" outhistos["/ATLAS_2014_I1282447/d05-x01-y03"] = ratio_wdstar if("/ATLAS_2014_I1282447/d06-x01-y01_winc" in outhistos and "/ATLAS_2014_I1282447/d06-x01-y02_winc" in outhistos) : h_winc_plus = outhistos["/ATLAS_2014_I1282447/d06-x01-y01_winc"] h_winc_minus = outhistos["/ATLAS_2014_I1282447/d06-x01-y02_winc"] if( "/ATLAS_2014_I1282447/d06-x01-y01_wplus" in outhistos ) : h_wd_plus = outhistos["/ATLAS_2014_I1282447/d06-x01-y01_wplus"] ratio_wd_plus = h_wd_plus/h_winc_plus ratio_wd_plus.path = "/ATLAS_2014_I1282447/d06-x01-y01" outhistos["/ATLAS_2014_I1282447/d06-x01-y01"] = ratio_wd_plus del outhistos["/ATLAS_2014_I1282447/d06-x01-y01_wplus"] if( "/ATLAS_2014_I1282447/d06-x01-y02_wminus" in outhistos ) : h_wd_minus = outhistos["/ATLAS_2014_I1282447/d06-x01-y02_wminus"] ratio_wd_minus = h_wd_minus/h_winc_minus ratio_wd_minus.path = "/ATLAS_2014_I1282447/d06-x01-y02" outhistos["/ATLAS_2014_I1282447/d06-x01-y02"] = ratio_wd_minus del outhistos["/ATLAS_2014_I1282447/d06-x01-y02_wminus"] if ( "/ATLAS_2014_I1282447/d06-x01-y03_wplus" in outhistos) : h_wdstar_plus = outhistos["/ATLAS_2014_I1282447/d06-x01-y03_wplus"] ratio_wdstar_plus = h_wdstar_plus/h_winc_plus ratio_wdstar_plus.path = "/ATLAS_2014_I1282447/d06-x01-y03" outhistos["/ATLAS_2014_I1282447/d06-x01-y03"] = ratio_wdstar_plus del outhistos["/ATLAS_2014_I1282447/d06-x01-y03_wplus"] if ( "/ATLAS_2014_I1282447/d06-x01-y04_wminus" in outhistos) : h_wdstar_minus = outhistos["/ATLAS_2014_I1282447/d06-x01-y04_wminus"] ratio_wdstar_minus = h_wdstar_minus/h_winc_minus ratio_wdstar_minus.path = "/ATLAS_2014_I1282447/d06-x01-y04" outhistos["/ATLAS_2014_I1282447/d06-x01-y04"] = ratio_wdstar_minus del outhistos["/ATLAS_2014_I1282447/d06-x01-y04_wminus"] del outhistos["/ATLAS_2014_I1282447/d06-x01-y01_winc"] del outhistos["/ATLAS_2014_I1282447/d06-x01-y02_winc"] mergeByMass("/ATLAS_2013_I1234228/d01-x01-y02", "7000") # Choose output file - yoda.writeYODA(outhistos,opts.OUTFILE) + name = args[0]+"-EW.yoda" + yoda.writeYODA(outhistos,name) sys.exit(0) diff --git a/Tests/python/merge-LHC-Photon b/Tests/python/merge-LHC-Photon --- a/Tests/python/merge-LHC-Photon +++ b/Tests/python/merge-LHC-Photon @@ -1,288 +1,288 @@ #! /usr/bin/env python import logging import sys import os, yoda """%prog Script for merging aida files """ def fillAbove(scale,desthisto, sourcehistosbyptmin): pthigh= 1e100 ptlow =-1e100 for pt, h in sorted(sourcehistosbyptmin.iteritems(),reverse=True): ptlow=pt if(type(desthisto)==yoda.core.Scatter2D) : for i in range(0,h.numPoints) : xMin = h.points[i].x-h.points[i].xErrs.minus if( xMin*scale >= ptlow and xMin*scale < pthigh ) : desthisto.addPoint(h.points[i]) elif(type(desthisto)==yoda.core.Profile1D) : for i in range(0,h.numBins) : if(h.bins[i].xMin*scale >= ptlow and h.bins[i].xMin*scale < pthigh ) : desthisto.bins[i] += h.bins[i] elif(type(desthisto)==yoda.core.Histo1D) : for i in range(0,h.numBins) : if(h.bins[i].xMin*scale >= ptlow and h.bins[i].xMin*scale < pthigh ) : desthisto.bins[i] += h.bins[i] else : logging.error("Can't merge %s, unknown type" % desthisto.path) sys.exit(1) pthigh=pt def mergeByPt(hpath, scale=1.): global inhistos global outhistos try: fillAbove(scale,outhistos[hpath], inhistos[hpath]) except: pass def useOnePt(hpath, ptmin): global inhistos global outhistos try: ## Find best pT_min match ptmins = inhistos[hpath].keys() closest_ptmin = None for ptm in ptmins: if closest_ptmin is None or \ abs(ptm-float(ptmin)) < abs(closest_ptmin-float(ptmin)): closest_ptmin = ptm if closest_ptmin != float(ptmin): logging.warning("Inexact match for requested pTmin=%s: " % ptmin + \ "using pTmin=%e instead" % closest_ptmin) outhistos[hpath] = inhistos[hpath][closest_ptmin] except: pass if sys.version_info[:3] < (2,4,0): print "rivet scripts require Python version >= 2.4.0... exiting" sys.exit(1) if __name__ == "__main__": import logging from optparse import OptionParser, OptionGroup - parser = OptionParser(usage="%prog aidafile aidafile2 [...]") - parser.add_option("-o", "--out", dest="OUTFILE", default="-") + parser = OptionParser(usage="%prog base") verbgroup = OptionGroup(parser, "Verbosity control") verbgroup.add_option("-v", "--verbose", action="store_const", const=logging.DEBUG, dest="LOGLEVEL", default=logging.INFO, help="print debug (very verbose) messages") verbgroup.add_option("-q", "--quiet", action="store_const", const=logging.WARNING, dest="LOGLEVEL", default=logging.INFO, help="be very quiet") parser.add_option_group(verbgroup) (opts, args) = parser.parse_args() logging.basicConfig(level=opts.LOGLEVEL, format="%(message)s") ## Check args if len(args) < 1: logging.error("Must specify at least the name of the files") sys.exit(1) files=["-7-PromptPhoton-1.yoda","-7-PromptPhoton-2.yoda", "-7-PromptPhoton-3.yoda","-7-PromptPhoton-4.yoda", "-8-PromptPhoton-1.yoda","-8-PromptPhoton-2.yoda", "-8-PromptPhoton-3.yoda","-8-PromptPhoton-4.yoda", "-7-DiPhoton-GammaGamma.yoda","-7-DiPhoton-GammaJet.yoda","-GammaGamma-7.yoda", "-8-DiPhoton-GammaGamma.yoda","-8-DiPhoton-GammaJet.yoda","-GammaGamma-8.yoda"] ## Get histos inhistos = {} outhistos={} for f in files: file='Rivet-'+args[0]+f if not os.access(file, os.R_OK): logging.error("%s can not be read" % file) continue try: aos = yoda.read(file) except: logging.error("%s can not be parsed as XML" % file) break if(file.find("PromptPhoton")>=0) : if(file.find("PromptPhoton-1")>0) : ptmin=0. elif(file.find("PromptPhoton-2")>0) : ptmin=35. elif(file.find("PromptPhoton-3")>0) : ptmin=90. elif(file.find("PromptPhoton-4")>0) : ptmin=170. ## Get histos from this YODA file for aopath, ao in aos.iteritems() : if not inhistos.has_key(aopath): inhistos[aopath] = {} if (aopath.find("CMS_2013_I1258128")>0) : if(aopath.find("d05")>0 or aopath.find("d06")>0 or aopath.find("d07")>0 or aopath.find("d08")>0) : inhistos[aopath][ptmin] = ao else : inhistos[aopath][ptmin] = ao else : ## Get histos from this YODA file for aopath, ao in aos.iteritems() : if(aopath.find("XSEC")>=0 or aopath.find("EVTCOUNT")>=0) : continue if ( aopath in outhistos ) : aotype = type(ao) if aotype in (yoda.Counter, yoda.Histo1D, yoda.Histo2D, yoda.Profile1D, yoda.Profile2D): outhistos[aopath] += ao else : quit() else: outhistos[aopath] = ao for hpath,hsets in inhistos.iteritems(): if( hpath.find("1263495")>0 or hpath.find("1093738")>0 or hpath.find("921594" )>0 or hpath.find("8914702")>0 or hpath.find("1244522")>0 or hpath.find("1457605")>0 or hpath.find("1632756")>0 or hpath.find("1266056")>0 ) : if(type(hsets.values()[0])==yoda.core.Scatter2D) : outhistos[hpath] = yoda.core.Scatter2D(hsets.values()[0].path, hsets.values()[0].title) elif(type(hsets.values()[0])==yoda.core.Profile1D) : outhistos[hpath] = yoda.core.Profile1D(hsets.values()[0].path, hsets.values()[0].title) for i in range(0,hsets.values()[0].numBins) : outhistos[hpath].addBin(hsets.values()[0].bins[i].xMin, hsets.values()[0].bins[i].xMax) elif(type(hsets.values()[0])==yoda.core.Histo1D) : outhistos[hpath] = yoda.core.Histo1D(hsets.values()[0].path, hsets.values()[0].title) for i in range(0,hsets.values()[0].numBins) : outhistos[hpath].addBin(hsets.values()[0].bins[i].xMin, hsets.values()[0].bins[i].xMax) else : logging.error("Histogram %s is of unknown type" % hpath) print hpath,type(hsets.values()[0]) sys.exit(1) logging.info("Processing ATLAS_2013_I1263495") mergeByPt("/ATLAS_2013_I1263495/d01-x01-y01") mergeByPt("/ATLAS_2013_I1263495/d01-x01-y03") useOnePt("/ATLAS_2013_I1263495/d01-x02-y01", "90" ) logging.info("Processing ATLAS_2012_I1093738") mergeByPt("/ATLAS_2012_I1093738/d01-x01-y01") mergeByPt("/ATLAS_2012_I1093738/d02-x01-y01") mergeByPt("/ATLAS_2012_I1093738/d03-x01-y01") mergeByPt("/ATLAS_2012_I1093738/d04-x01-y01") mergeByPt("/ATLAS_2012_I1093738/d05-x01-y01") mergeByPt("/ATLAS_2012_I1093738/d06-x01-y01") logging.info("Processing ATLAS_2011_I921594") mergeByPt("/ATLAS_2011_I921594/d01-x01-y01") mergeByPt("/ATLAS_2011_I921594/d01-x01-y02") mergeByPt("/ATLAS_2011_I921594/d01-x01-y04") mergeByPt("/ATLAS_2011_I921594/d01-x01-y05") logging.info("Processing ATLAS_2010_S8914702") mergeByPt("/ATLAS_2010_S8914702/d01-x01-y01") mergeByPt("/ATLAS_2010_S8914702/d01-x01-y02") mergeByPt("/ATLAS_2010_S8914702/d01-x01-y03") logging.info("Processing CMS_2013_I1258128") useOnePt("/CMS_2013_I1258128/d05-x01-y01", "35" ) useOnePt("/CMS_2013_I1258128/d06-x01-y01", "35" ) useOnePt("/CMS_2013_I1258128/d07-x01-y01", "35" ) useOnePt("/CMS_2013_I1258128/d08-x01-y01", "35" ) logging.info("Processing ATLAS_2013_I1244522") mergeByPt("/ATLAS_2013_I1244522/d01-x01-y01") mergeByPt("/ATLAS_2013_I1244522/d02-x01-y01") useOnePt("/ATLAS_2013_I1244522/d03-x01-y01", "35" ) useOnePt("/ATLAS_2013_I1244522/d04-x01-y01", "35" ) useOnePt("/ATLAS_2013_I1244522/d05-x01-y01", "35" ) useOnePt("/ATLAS_2013_I1244522/d06-x01-y01", "35" ) useOnePt("/ATLAS_2013_I1244522/d07-x01-y01", "35" ) logging.info("Processing ATLAS_2016_I1457605") mergeByPt("/ATLAS_2016_I1457605/d01-x01-y01") mergeByPt("/ATLAS_2016_I1457605/d02-x01-y01") mergeByPt("/ATLAS_2016_I1457605/d03-x01-y01") mergeByPt("/ATLAS_2016_I1457605/d04-x01-y01") logging.info("Processing ATLAS_2017_I1632756") mergeByPt("/ATLAS_2017_I1632756/d02-x01-y01") mergeByPt("/ATLAS_2017_I1632756/d03-x01-y01") mergeByPt("/ATLAS_2017_I1632756/d04-x01-y01") mergeByPt("/ATLAS_2017_I1632756/d05-x01-y01") logging.info("Processing CMS_2014_I1266056") mergeByPt("/CMS_2014_I1266056/d01-x01-y01") mergeByPt("/CMS_2014_I1266056/d01-x01-y02") mergeByPt("/CMS_2014_I1266056/d02-x01-y01") mergeByPt("/CMS_2014_I1266056/d02-x01-y02") mergeByPt("/CMS_2014_I1266056/d03-x01-y01") mergeByPt("/CMS_2014_I1266056/d03-x01-y02") mergeByPt("/CMS_2014_I1266056/d04-x01-y01") mergeByPt("/CMS_2014_I1266056/d04-x01-y02") logging.info("Processing /MC_PHOTONJETS") useOnePt("/MC_PHOTONJETS/jet_HT","0") useOnePt("/MC_PHOTONJETS/jet_eta_1","0") useOnePt("/MC_PHOTONJETS/jet_eta_2","0") useOnePt("/MC_PHOTONJETS/jet_eta_3","0") useOnePt("/MC_PHOTONJETS/jet_eta_4","0") useOnePt("/MC_PHOTONJETS/jet_eta_pmratio_1","0") useOnePt("/MC_PHOTONJETS/jet_eta_pmratio_2","0") useOnePt("/MC_PHOTONJETS/jet_eta_pmratio_3","0") useOnePt("/MC_PHOTONJETS/jet_eta_pmratio_4","0") useOnePt("/MC_PHOTONJETS/jet_mass_1","0") useOnePt("/MC_PHOTONJETS/jet_mass_2","0") useOnePt("/MC_PHOTONJETS/jet_mass_3","0") useOnePt("/MC_PHOTONJETS/jet_mass_4","0") useOnePt("/MC_PHOTONJETS/jet_multi_exclusive","0") useOnePt("/MC_PHOTONJETS/jet_multi_inclusive","0") useOnePt("/MC_PHOTONJETS/jet_multi_ratio","0") useOnePt("/MC_PHOTONJETS/jet_pT_1","0") useOnePt("/MC_PHOTONJETS/jet_pT_2","0") useOnePt("/MC_PHOTONJETS/jet_pT_3","0") useOnePt("/MC_PHOTONJETS/jet_pT_4","0") useOnePt("/MC_PHOTONJETS/jet_y_1","0") useOnePt("/MC_PHOTONJETS/jet_y_2","0") useOnePt("/MC_PHOTONJETS/jet_y_3","0") useOnePt("/MC_PHOTONJETS/jet_y_4","0") useOnePt("/MC_PHOTONJETS/jet_y_pmratio_1","0") useOnePt("/MC_PHOTONJETS/jet_y_pmratio_2","0") useOnePt("/MC_PHOTONJETS/jet_y_pmratio_3","0") useOnePt("/MC_PHOTONJETS/jet_y_pmratio_4","0") useOnePt("/MC_PHOTONJETS/jets_dR_12","0") useOnePt("/MC_PHOTONJETS/jets_dR_13","0") useOnePt("/MC_PHOTONJETS/jets_dR_23","0") useOnePt("/MC_PHOTONJETS/jets_deta_12","0") useOnePt("/MC_PHOTONJETS/jets_deta_13","0") useOnePt("/MC_PHOTONJETS/jets_deta_23","0") useOnePt("/MC_PHOTONJETS/jets_dphi_12","0") useOnePt("/MC_PHOTONJETS/jets_dphi_13","0") useOnePt("/MC_PHOTONJETS/jets_dphi_23","0") useOnePt("/MC_PHOTONJETS/photon_jet1_dR","0") useOnePt("/MC_PHOTONJETS/photon_jet1_deta","0") useOnePt("/MC_PHOTONJETS/photon_jet1_dphi","0") useOnePt("/MC_PHOTONJETUE/gammajet-dR","0") useOnePt("/MC_PHOTONJETUE/gammajet-dphi","0") useOnePt("/MC_PHOTONJETUE/trans-maxnchg-gamma","0") useOnePt("/MC_PHOTONJETUE/trans-maxnchg-jet","0") useOnePt("/MC_PHOTONJETUE/trans-maxptsum-gamma","0") useOnePt("/MC_PHOTONJETUE/trans-maxptsum-jet","0") useOnePt("/MC_PHOTONJETUE/trans-minnchg-gamma","0") useOnePt("/MC_PHOTONJETUE/trans-minnchg-jet","0") useOnePt("/MC_PHOTONJETUE/trans-minptsum-gamma","0") useOnePt("/MC_PHOTONJETUE/trans-minptsum-jet","0") useOnePt("/MC_PHOTONJETUE/trans-nchg-gamma","0") useOnePt("/MC_PHOTONJETUE/trans-nchg-jet","0") useOnePt("/MC_PHOTONJETUE/trans-ptavg-gamma","0") useOnePt("/MC_PHOTONJETUE/trans-ptavg-jet","0") useOnePt("/MC_PHOTONJETUE/trans-ptsum-gamma","0") useOnePt("/MC_PHOTONJETUE/trans-ptsum-jet","0") # Choose output file -yoda.writeYODA(outhistos,opts.OUTFILE) +name = args[0]+"-Photon.yoda" +yoda.writeYODA(outhistos,name) sys.exit(0) diff --git a/Tests/python/merge-TVT-EW b/Tests/python/merge-TVT-EW --- a/Tests/python/merge-TVT-EW +++ b/Tests/python/merge-TVT-EW @@ -1,69 +1,73 @@ #! /usr/bin/env python import logging import sys import os, yoda """%prog Script for merging yoda files """ import sys if sys.version_info[:3] < (2,4,0): print "rivet scripts require Python version >= 2.4.0... exiting" sys.exit(1) if __name__ == "__main__": import logging from optparse import OptionParser, OptionGroup - parser = OptionParser(usage="%prog aidafile aidafile2 [...]") - parser.add_option("-o", "--out", dest="OUTFILE", default="-") + parser = OptionParser(usage="%prog base") verbgroup = OptionGroup(parser, "Verbosity control") verbgroup.add_option("-v", "--verbose", action="store_const", const=logging.DEBUG, dest="LOGLEVEL", default=logging.INFO, help="print debug (very verbose) messages") verbgroup.add_option("-q", "--quiet", action="store_const", const=logging.WARNING, dest="LOGLEVEL", default=logging.INFO, help="be very quiet") parser.add_option_group(verbgroup) (opts, args) = parser.parse_args() logging.basicConfig(level=opts.LOGLEVEL, format="%(message)s") ## Check args if len(args) < 1: - logging.error("Must specify at least one AIDA histogram file") + logging.error("Must specify at least the name of the files") sys.exit(1) + + yodafiles=["-Run-II-Z-e","-Run-II-Z-mu","-Run-II-Z-LowMass-mu","-Run-II-Z-HighMass-mu","-Run-I-W","-Run-I-Z","-Run-I-WZ"] + ## Get histos outhistos={} - for file in args: + for f in yodafiles: + file='Rivet-'+args[0]+f+".yoda" if not os.access(file, os.R_OK): logging.error("%s can not be read" % file) break try: aos = yoda.read(file) except: logging.error("%s can not be parsed as yoda" % file) break ## Get histos from this YODA file for aopath, ao in aos.iteritems() : if(aopath.find("D0_2010_S8821313")>0) : if(aopath.find("d01")>0 and file.find("-e")>0) : outhistos[aopath] = ao elif(aopath.find("d02")>0 and file.find("-mu")>0) : outhistos[aopath] = ao elif(aopath.find("D0_2015_I1324946")>0) : if(file.find("LowMass")>0) : if(aopath.find("d02")>0) : outhistos[aopath] = ao elif(file.find("HighMass")>0) : if(aopath.find("d03")>0 or aopath.find("d04")>0) : outhistos[aopath] = ao else: if(aopath.find("d01")>0) : outhistos[aopath] = ao else : outhistos[aopath] = ao # output the yoda file - yoda.writeYODA(outhistos,opts.OUTFILE) + name = args[0]+"-EW.yoda" + yoda.writeYODA(outhistos,name) sys.exit(0) diff --git a/Tests/python/merge-TVT-Jets b/Tests/python/merge-TVT-Jets --- a/Tests/python/merge-TVT-Jets +++ b/Tests/python/merge-TVT-Jets @@ -1,557 +1,557 @@ #! /usr/bin/env python import logging import sys if sys.version_info[:3] < (2,4,0): print "rivet scripts require Python version >= 2.4.0... exiting" sys.exit(1) import os, yoda # ############################################# def fillAbove(desthisto, sourcehistosbyptmin): pthigh= 1e100 ptlow =-1e100 for pt, h in sorted(sourcehistosbyptmin.iteritems(),reverse=True): ptlow=pt if(type(desthisto)==yoda.core.Scatter2D) : for i in range(0,h.numPoints) : xMin = h.points[i].x-h.points[i].xErrs.minus if( xMin >= ptlow and xMin < pthigh ) : desthisto.addPoint(h.points[i]) elif(type(desthisto)==yoda.core.Profile1D) : for i in range(0,h.numBins) : if(h.bins[i].xMin >= ptlow and h.bins[i].xMin < pthigh ) : desthisto.bins[i] += h.bins[i] elif(type(desthisto)==yoda.core.Histo1D) : for i in range(0,h.numBins) : if(h.bins[i].xMin >= ptlow and h.bins[i].xMin < pthigh ) : desthisto.bins[i] += h.bins[i] elif(type(desthisto)==yoda.core.Counter) : desthisto += h else : logging.error("Can't merge %s, unknown type" % desthisto.path) sys.exit(1) pthigh=pt def mergeByPt(hpath, sqrts): global inhistos global outhistos try: fillAbove(outhistos[hpath], inhistos[hpath][float(sqrts)]) except: pass def useOnePt(hpath, sqrts, ptmin): global inhistos global outhistos try: ## Find best pT_min match ptmins = inhistos[hpath][float(sqrts)].keys() closest_ptmin = None for ptm in ptmins: if closest_ptmin is None or \ abs(ptm-float(ptmin)) < abs(closest_ptmin-float(ptmin)): closest_ptmin = ptm if closest_ptmin != float(ptmin): logging.warning("Inexact match for requested pTmin=%s: " % ptmin + \ "using pTmin=%e instead" % closest_ptmin) outhistos[hpath] = inhistos[hpath][float(sqrts)][closest_ptmin] except: pass # ####################################### if __name__ == "__main__": import logging from optparse import OptionParser, OptionGroup - parser = OptionParser(usage="%prog name") + parser = OptionParser(usage="%progbase") verbgroup = OptionGroup(parser, "Verbosity control") verbgroup.add_option("-v", "--verbose", action="store_const", const=logging.DEBUG, dest="LOGLEVEL", default=logging.INFO, help="print debug (very verbose) messages") verbgroup.add_option("-q", "--quiet", action="store_const", const=logging.WARNING, dest="LOGLEVEL", default=logging.INFO, help="be very quiet") parser.add_option("--with-ue", action='store_true' , dest="ue", default=True, help="Include UE analyses") parser.add_option("--without-ue", action='store_false', dest="ue", default=True, help="Don\'t include UE analyses") parser.add_option_group(verbgroup) (opts, args) = parser.parse_args() logging.basicConfig(level=opts.LOGLEVEL, format="%(message)s") ## Check args if len(args) < 1: logging.error("Must specify at least the name of the files") sys.exit(1) yodafiles=["-Run-II-Jets-0.yoda","-Run-II-Jets-1.yoda",\ "-Run-II-Jets-2.yoda",\ "-Run-II-Jets-3.yoda","-Run-II-Jets-4.yoda","-Run-II-Jets-5.yoda",\ "-Run-II-Jets-6.yoda","-Run-II-Jets-7.yoda",\ "-Run-I-Jets-1.yoda","-Run-I-Jets-2.yoda",\ "-Run-I-Jets-3.yoda","-Run-I-Jets-4.yoda","-Run-I-Jets-5.yoda",\ "-630-Jets-1.yoda" ,"-630-Jets-2.yoda" ,\ "-630-Jets-3.yoda", "-300-UE.yoda", "-900-UE.yoda" # "-RatioPlots.yoda" ] if(opts.ue) : yodafiles += ["-Run-II-UE.yoda" ,"-Run-I-UE.yoda" ,"-630-UE.yoda" ,] ## Get histos inhistos = {} outhistos={} for f in yodafiles: file='Rivet-'+args[0]+f if(file.find("Run-II-UE")>0) : sqrts=1960 ptmin=0. elif(file.find("Run-II-Jets-0")>0) : sqrts=1960 ptmin=20. elif(file.find("Run-II-Jets-1")>0) : sqrts=1960 ptmin=36. elif(file.find("Run-II-Jets-2")>0) : sqrts=1960 ptmin=55. elif(file.find("Run-II-Jets-3")>0) : sqrts=1960 ptmin=75. elif(file.find("Run-II-Jets-4")>0) : sqrts=1960 ptmin=100. elif(file.find("Run-II-Jets-5")>0) : sqrts=1960 ptmin=125. elif(file.find("Run-II-Jets-6")>0) : ptmin=175. sqrts=1960 elif(file.find("Run-II-Jets-7")>0) : sqrts=1960 ptmin=265. elif(file.find("300-UE")>0) : sqrts=300 ptmin=0. elif(file.find("900-UE")>0) : sqrts=900 ptmin=0. elif(file.find("630-UE")>0) : sqrts=630 ptmin=0. elif(file.find("630-Jets-1")>0) : sqrts=630 ptmin=30. elif(file.find("630-Jets-2")>0) : sqrts=630 ptmin=55. elif(file.find("630-Jets-3")>0) : sqrts=630 ptmin=90. elif(file.find("Run-I-UE")>0) : sqrts=1800 ptmin=0. elif(file.find("Run-I-Jets-1")>0) : sqrts=1800 ptmin=30. elif(file.find("Run-I-Jets-2")>0) : sqrts=1800 ptmin=55. elif(file.find("Run-I-Jets-3")>0) : sqrts=1800 ptmin=80. elif(file.find("Run-I-Jets-4")>0) : sqrts=1800 ptmin=105. elif(file.find("Run-I-Jets-5")>0) : sqrts=1800 ptmin=175. if not os.access(file, os.R_OK): logging.error("%s can not be read" % file) continue try: aos = yoda.read(file) except: logging.error("%s can not be parsed as YODA" % file) continue ## Get histos from this YODA file for aopath, ao in aos.iteritems() : # di-jet decorrelations # jet shapes if(aopath.find("5992206")>0 or aopath.find("6217184")>0 or aopath.find("LEADINGJETS")>0 or aopath.find("7662670")>0 or aopath.find("7057202")>0 or aopath.find("6450792")>0 or aopath.find("7828950")>0 or aopath.find("4751469")>0 or aopath.find("5839831")>0 or aopath.find("4563131")>0 or aopath.find("4517016")>0 or aopath.find("3618439")>0 or aopath.find("8591881")>0 or aopath.find("1388868")>0 or aopath.find("398175")>0) : if not inhistos.has_key(aopath): inhistos[aopath] = {} tmpE = inhistos[aopath] if not tmpE.has_key(sqrts): tmpE[sqrts] = {} tmpP = tmpE[sqrts] if not tmpP.has_key(ptmin): tmpP[ptmin] = ao else: raise Exception("A set with ptmin = %s already exists" % ( ptmin)) elif(aopath.find("8233977")>0 or aopath.find("NOTE_9936")>0 or aopath.find("3905616")>0 or aopath.find("3324664")>0 or aopath.find("4796047")>0 or aopath.find("1865951")>0 or aopath.find("2089246")>0 or aopath.find("3108457")>0 or aopath.find("3349578")>0 or aopath.find("3541940")>0 or aopath.find("3214044")>0 or aopath.find("2952106")>0 or aopath.find("NOTE10874")>0 or aopath.find("895662")>0 ) : if( opts.ue ) : outhistos[aopath] = ao elif (aopath.find("NOTE10874")<0) : outhistos[aopath] = ao else : if(aopath.find("/_EVTCOUNT")>=0 or aopath.find("/_XSEC" )>=0 ) : continue print aopath quit() yodafiles=["-Run-II-Jets-8.yoda","-Run-II-Jets-9.yoda","-Run-II-Jets-10.yoda","-Run-II-Jets-11.yoda",\ "-Run-I-Jets-6.yoda","-Run-I-Jets-7.yoda","-Run-I-Jets-8.yoda"] for f in yodafiles: file='Rivet-'+args[0]+f if(file.find("Run-II-Jets-8")>0) : sqrts=1960 ptmin=0.150 elif(file.find("Run-II-Jets-9")>0) : sqrts=1960 ptmin=0.400 elif(file.find("Run-II-Jets-10")>0) : sqrts=1960 ptmin=0.600 elif(file.find("Run-II-Jets-11")>0) : sqrts=1960 ptmin=1.000 elif(file.find("Run-I-Jets-6")>0) : sqrts=1800 ptmin=0.150 elif(file.find("Run-I-Jets-7")>0) : sqrts=1800 ptmin=0.5 elif(file.find("Run-I-Jets-8")>0) : sqrts=1800 ptmin=0.8 if not os.access(file, os.R_OK): logging.error("%s can not be read" % file) continue try: aos = yoda.read(file) except: logging.error("%s can not be parsed as YODA" % file) continue ## Get histos from this AIDA file for aopath, ao in aos.iteritems() : if(aopath.find("8566488")>0 or aopath.find("8320160")>0) : if not inhistos.has_key(aopath): inhistos[aopath] = {} tmpE = inhistos[aopath] if not tmpE.has_key(sqrts): tmpE[sqrts] = {} tmpP = tmpE[sqrts] if not tmpP.has_key(ptmin): tmpP[ptmin] = ao else: raise Exception("A set with ptmin = %s already exists" % ( ptmin)) elif(aopath.find("8093652")>0 or aopath.find("3418421")>0 or aopath.find("4266730")>0) : if not inhistos.has_key(aopath): inhistos[aopath] = {} tmpE = inhistos[aopath] if not tmpE.has_key(sqrts): tmpE[sqrts] = {} tmpP = tmpE[sqrts] if not tmpP.has_key(1000.*ptmin): tmpP[1000.*ptmin] = ao else: raise Exception("A set with ptmin = %s already exists" % ( 1000.*ptmin)) ## Make empty output histos if needed for hpath,hsets in inhistos.iteritems(): if( (hpath.find("6217184")>0 and hpath.find("d13-x01-y01")>0 ) or hpath.find("LEADINGJETS")>0 or hpath.find("7662670")>0 or hpath.find("7057202")>0 or hpath.find("6450792")>0 or hpath.find("7828950")>0 or hpath.find("8566488")>0 or hpath.find("8320160")>0 or hpath.find("8093652")>0 or hpath.find("4751469")>0 or hpath.find("5839831")>0 or hpath.find("4563131")>0 or hpath.find("4517016")>0 or hpath.find("3618439")>0 or hpath.find("4266730")>0 or hpath.find("3418421")>0 or hpath.find("8591881")>0 or hpath.find("1388868")>0) : if(type(hsets.values()[0].values()[0])==yoda.core.Counter) : outhistos[hpath] = yoda.core.Counter(hsets.values()[0].values()[0].path, hsets.values()[0].values()[0].title) elif(type(hsets.values()[0].values()[0])==yoda.core.Scatter2D) : outhistos[hpath] = yoda.core.Scatter2D(hsets.values()[0].values()[0].path, hsets.values()[0].values()[0].title) elif(type(hsets.values()[0].values()[0])==yoda.core.Profile1D) : outhistos[hpath] = yoda.core.Profile1D(hsets.values()[0].values()[0].path, hsets.values()[0].values()[0].title) for i in range(0,hsets.values()[0].values()[0].numBins) : outhistos[hpath].addBin(hsets.values()[0].values()[0].bins[i].xMin, hsets.values()[0].values()[0].bins[i].xMax) elif(type(hsets.values()[0].values()[0])==yoda.core.Histo1D) : outhistos[hpath] = yoda.core.Histo1D(hsets.values()[0].values()[0].path, hsets.values()[0].values()[0].title) for i in range(0,hsets.values()[0].values()[0].numBins) : outhistos[hpath].addBin(hsets.values()[0].values()[0].bins[i].xMin, hsets.values()[0].values()[0].bins[i].xMax) else : logging.error("Histogram %s is of unknown type" % hpath) print hpath,type(hsets.values()[0].values()[0]) sys.exit(1) ## Field analysis logging.info("Processing CDF_2001_S4751469") ## Angular distributions in different pT bins if(opts.ue) : useOnePt("/CDF_2001_S4751469/d01-x01-y01", "1800", "0") useOnePt("/CDF_2001_S4751469/d01-x01-y02", "1800", "0") useOnePt("/CDF_2001_S4751469/d02-x01-y01", "1800", "0") useOnePt("/CDF_2001_S4751469/d02-x01-y02", "1800", "0") useOnePt("/CDF_2001_S4751469/d01-x01-y03", "1800", "30") useOnePt("/CDF_2001_S4751469/d02-x01-y03", "1800", "30") ## Number, profile in pT_lead (True?) if(opts.ue) : useOnePt("/CDF_2001_S4751469/d03-x01-y01", "1800", "0") useOnePt("/CDF_2001_S4751469/d03-x01-y02", "1800", "0") useOnePt("/CDF_2001_S4751469/d03-x01-y03", "1800", "0") useOnePt("/CDF_2001_S4751469/d04-x01-y01", "1800", "30") useOnePt("/CDF_2001_S4751469/d04-x01-y02", "1800", "30") useOnePt("/CDF_2001_S4751469/d04-x01-y03", "1800", "30") ## pT sums, profile in pT_lead (True?) if(opts.ue) : useOnePt("/CDF_2001_S4751469/d05-x01-y01", "1800", "0") useOnePt("/CDF_2001_S4751469/d05-x01-y02", "1800", "0") useOnePt("/CDF_2001_S4751469/d05-x01-y03", "1800", "0") useOnePt("/CDF_2001_S4751469/d06-x01-y01", "1800", "30") useOnePt("/CDF_2001_S4751469/d06-x01-y02", "1800", "30") useOnePt("/CDF_2001_S4751469/d06-x01-y03", "1800", "30") ## pT distributions (use a specific pT cut run) if(opts.ue) : useOnePt("/CDF_2001_S4751469/d07-x01-y01", "1800", "0") useOnePt("/CDF_2001_S4751469/d07-x01-y02", "1800", "0") useOnePt("/CDF_2001_S4751469/d07-x01-y03", "1800", "30") ## Acosta analysis logging.info("Processing CDF_2004_S5839831") ## Mean pT, profile in ET_lead mergeByPt("/CDF_2004_S5839831/d01-x01-y01", "1800") mergeByPt("/CDF_2004_S5839831/d01-x01-y02", "1800") ## pT_max,min, profiles in ET_lead mergeByPt("/CDF_2004_S5839831/d02-x01-y01", "1800") mergeByPt("/CDF_2004_S5839831/d02-x01-y02", "1800") mergeByPt("/CDF_2004_S5839831/d02-x01-y03", "1800") ## pT distributions (want to use a specific pT cut run) useOnePt("/CDF_2004_S5839831/d03-x01-y01", "1800", "30") useOnePt("/CDF_2004_S5839831/d03-x01-y02", "1800", "80") useOnePt("/CDF_2004_S5839831/d03-x01-y03", "1800", "105") useOnePt("/CDF_2004_S5839831/d03-x01-y04", "1800", "105") useOnePt("/CDF_2004_S5839831/d03-x01-y05", "1800", "175") ## N_max,min, profiles in ET_lead mergeByPt("/CDF_2004_S5839831/d04-x01-y01", "1800") mergeByPt("/CDF_2004_S5839831/d04-x01-y02", "1800") ## Min bias dbs (want to use min bias pT cut) if(opts.ue) : useOnePt("/CDF_2004_S5839831/d05-x01-y01", "1800", "0") useOnePt("/CDF_2004_S5839831/d06-x01-y01", "1800", "0") ## Swiss Cheese, profile in ET_lead mergeByPt("/CDF_2004_S5839831/d07-x01-y01", "1800") mergeByPt("/CDF_2004_S5839831/d07-x01-y02", "1800") ## pT_max,min, profiles in ET_lead mergeByPt("/CDF_2004_S5839831/d08-x01-y01", "630") mergeByPt("/CDF_2004_S5839831/d08-x01-y02", "630") mergeByPt("/CDF_2004_S5839831/d08-x01-y03", "630") ## Swiss Cheese, profile in ET_lead mergeByPt("/CDF_2004_S5839831/d09-x01-y01", "630") mergeByPt("/CDF_2004_S5839831/d09-x01-y02", "630") ## Min bias dbs (want to use min bias pT cut) if(opts.ue) : useOnePt("/CDF_2004_S5839831/d10-x01-y01", "630", "0") useOnePt("/CDF_2004_S5839831/d11-x01-y01", "630", "0") ## CDF jet shape analysis logging.info("Processing CDF_2005_S6217184") useOnePt("/CDF_2005_S6217184/d01-x01-y01", "1960", "36" ) useOnePt("/CDF_2005_S6217184/d01-x01-y02", "1960", "36" ) useOnePt("/CDF_2005_S6217184/d01-x01-y03", "1960", "55" ) useOnePt("/CDF_2005_S6217184/d02-x01-y01", "1960", "55" ) useOnePt("/CDF_2005_S6217184/d02-x01-y02", "1960", "55" ) useOnePt("/CDF_2005_S6217184/d02-x01-y03", "1960", "75" ) useOnePt("/CDF_2005_S6217184/d03-x01-y01", "1960", "75" ) useOnePt("/CDF_2005_S6217184/d03-x01-y02", "1960", "100") useOnePt("/CDF_2005_S6217184/d03-x01-y03", "1960", "100") useOnePt("/CDF_2005_S6217184/d04-x01-y01", "1960", "125") useOnePt("/CDF_2005_S6217184/d04-x01-y02", "1960", "125") useOnePt("/CDF_2005_S6217184/d04-x01-y03", "1960", "175") useOnePt("/CDF_2005_S6217184/d05-x01-y01", "1960", "175") useOnePt("/CDF_2005_S6217184/d05-x01-y02", "1960", "175") useOnePt("/CDF_2005_S6217184/d05-x01-y03", "1960", "175") useOnePt("/CDF_2005_S6217184/d06-x01-y01", "1960", "265") useOnePt("/CDF_2005_S6217184/d06-x01-y02", "1960", "265") useOnePt("/CDF_2005_S6217184/d06-x01-y03", "1960", "265") useOnePt("/CDF_2005_S6217184/d07-x01-y01", "1960", "36" ) useOnePt("/CDF_2005_S6217184/d07-x01-y02", "1960", "36" ) useOnePt("/CDF_2005_S6217184/d07-x01-y03", "1960", "55" ) useOnePt("/CDF_2005_S6217184/d08-x01-y01", "1960", "55" ) useOnePt("/CDF_2005_S6217184/d08-x01-y02", "1960", "55" ) useOnePt("/CDF_2005_S6217184/d08-x01-y03", "1960", "75" ) useOnePt("/CDF_2005_S6217184/d09-x01-y01", "1960", "75" ) useOnePt("/CDF_2005_S6217184/d09-x01-y02", "1960", "100") useOnePt("/CDF_2005_S6217184/d09-x01-y03", "1960", "100") useOnePt("/CDF_2005_S6217184/d10-x01-y01", "1960", "125") useOnePt("/CDF_2005_S6217184/d10-x01-y02", "1960", "125") useOnePt("/CDF_2005_S6217184/d10-x01-y03", "1960", "175") useOnePt("/CDF_2005_S6217184/d11-x01-y01", "1960", "175") useOnePt("/CDF_2005_S6217184/d11-x01-y02", "1960", "175") useOnePt("/CDF_2005_S6217184/d11-x01-y03", "1960", "175") useOnePt("/CDF_2005_S6217184/d12-x01-y01", "1960", "265") useOnePt("/CDF_2005_S6217184/d12-x01-y02", "1960", "265") useOnePt("/CDF_2005_S6217184/d12-x01-y03", "1960", "265") mergeByPt("/CDF_2005_S6217184/d13-x01-y01", "1960") # ## CDF dijet mass spectrum mergeByPt("/CDF_2008_S8093652/d01-x01-y01", "1960") # ## Rick Field Run-II Leading Jets analysis # logging.info("Processing CDF_2008_LEADINGJETS") # ## charged particle density # mergeByPt("/CDF_2008_LEADINGJETS/d01-x01-y01", "1960") # mergeByPt("/CDF_2008_LEADINGJETS/d02-x01-y01", "1960") # mergeByPt("/CDF_2008_LEADINGJETS/d03-x01-y01", "1960") # mergeByPt("/CDF_2008_LEADINGJETS/d04-x01-y01", "1960") # ## pT sum density # mergeByPt("/CDF_2008_LEADINGJETS/d05-x01-y01", "1960") # mergeByPt("/CDF_2008_LEADINGJETS/d06-x01-y01", "1960") # mergeByPt("/CDF_2008_LEADINGJETS/d07-x01-y01", "1960") # mergeByPt("/CDF_2008_LEADINGJETS/d08-x01-y01", "1960") # ## mean pT # mergeByPt("/CDF_2008_LEADINGJETS/d09-x01-y01", "1960") ## newer version logging.info("Processing CDF_2010_S8591881_QCD") mergeByPt("/CDF_2010_S8591881_QCD/d10-x01-y01", "1960") mergeByPt("/CDF_2010_S8591881_QCD/d10-x01-y02", "1960") mergeByPt("/CDF_2010_S8591881_QCD/d10-x01-y03", "1960") mergeByPt("/CDF_2010_S8591881_QCD/d11-x01-y01", "1960") mergeByPt("/CDF_2010_S8591881_QCD/d11-x01-y02", "1960") mergeByPt("/CDF_2010_S8591881_QCD/d11-x01-y03", "1960") mergeByPt("/CDF_2010_S8591881_QCD/d12-x01-y01", "1960") mergeByPt("/CDF_2010_S8591881_QCD/d12-x01-y02", "1960") mergeByPt("/CDF_2010_S8591881_QCD/d12-x01-y03", "1960") mergeByPt("/CDF_2010_S8591881_QCD/d13-x01-y01", "1960") mergeByPt("/CDF_2010_S8591881_QCD/d13-x01-y02", "1960") mergeByPt("/CDF_2010_S8591881_QCD/d13-x01-y03", "1960") mergeByPt("/CDF_2010_S8591881_QCD/d14-x01-y01", "1960") mergeByPt("/CDF_2010_S8591881_QCD/d15-x01-y01", "1960") ## D0 dijet correlation analysis logging.info("Processing D0_2004_S5992206") useOnePt("/D0_2004_S5992206/d01-x02-y01", "1960", "75") useOnePt("/D0_2004_S5992206/d02-x02-y01", "1960", "100") useOnePt("/D0_2004_S5992206/d03-x02-y01", "1960", "125") useOnePt("/D0_2004_S5992206/d04-x02-y01", "1960", "175") ## D0 incl jet cross-section analysis logging.info("Processing D0_2008_S7662670") mergeByPt("/D0_2008_S7662670/d01-x01-y01", "1960") mergeByPt("/D0_2008_S7662670/d02-x01-y01", "1960") mergeByPt("/D0_2008_S7662670/d03-x01-y01", "1960") mergeByPt("/D0_2008_S7662670/d04-x01-y01", "1960") mergeByPt("/D0_2008_S7662670/d05-x01-y01", "1960") mergeByPt("/D0_2008_S7662670/d06-x01-y01", "1960") mergeByPt("/D0_2010_S8566488/d01-x01-y01", "1960") mergeByPt("/D0_2010_S8566488/d02-x01-y01", "1960") mergeByPt("/D0_2010_S8566488/d03-x01-y01", "1960") mergeByPt("/D0_2010_S8566488/d04-x01-y01", "1960") mergeByPt("/D0_2010_S8566488/d05-x01-y01", "1960") mergeByPt("/D0_2010_S8566488/d06-x01-y01", "1960") # CDF jet cross section mergeByPt("/CDF_2001_S4563131/d01-x01-y01", "1800") mergeByPt("/CDF_2001_S4517016/d01-x01-y01", "1800") mergeByPt("/CDF_2001_S4517016/d02-x01-y01", "1800") mergeByPt("/CDF_2001_S4517016/d03-x01-y01", "1800") mergeByPt("/CDF_2001_S4517016/d04-x01-y01", "1800") useOnePt("/CDF_1998_S3618439/d01-x01-y01", "1800","105") useOnePt("/CDF_1998_S3618439/d01-x01-y02", "1800","105") mergeByPt("/CDF_2008_S7828950/d01-x01-y01", "1960") mergeByPt("/CDF_2008_S7828950/d02-x01-y01", "1960") mergeByPt("/CDF_2008_S7828950/d03-x01-y01", "1960") mergeByPt("/CDF_2008_S7828950/d04-x01-y01", "1960") mergeByPt("/CDF_2008_S7828950/d05-x01-y01", "1960") mergeByPt("/CDF_2007_S7057202/d01-x01-y01", "1960") mergeByPt("/CDF_2007_S7057202/d02-x01-y01", "1960") mergeByPt("/CDF_2007_S7057202/d03-x01-y01", "1960") mergeByPt("/CDF_2007_S7057202/d04-x01-y01", "1960") mergeByPt("/CDF_2007_S7057202/d05-x01-y01", "1960") mergeByPt("/CDF_2007_S7057202/d06-x01-y01", "1960") mergeByPt("/CDF_2007_S7057202/d07-x01-y01", "1960") mergeByPt("/CDF_2006_S6450792/d01-x01-y01", "1960") mergeByPt("/CDF_2000_S4266730/d01-x01-y01", "1800") useOnePt("/CDF_1996_S3418421/d01-x01-y01","1800","150") useOnePt("/CDF_1996_S3418421/d01-x01-y02","1800","150") useOnePt("/CDF_1996_S3418421/d01-x01-y03","1800","150") useOnePt("/CDF_1996_S3418421/d01-x01-y04","1800","500") useOnePt("/CDF_1996_S3418421/d01-x01-y05","1800","500") mergeByPt("/CDF_1996_S3418421/d02-x01-y01","1800") useOnePt("/D0_2009_S8320160/d01-x01-y01", "1960", "0.15" ) useOnePt("/D0_2009_S8320160/d02-x01-y01", "1960", "0.15" ) useOnePt("/D0_2009_S8320160/d03-x01-y01", "1960", "0.4" ) useOnePt("/D0_2009_S8320160/d04-x01-y01", "1960", "0.4" ) useOnePt("/D0_2009_S8320160/d05-x01-y01", "1960", "0.6" ) useOnePt("/D0_2009_S8320160/d06-x01-y01", "1960", "0.6" ) useOnePt("/D0_2009_S8320160/d07-x01-y01", "1960", "0.6" ) useOnePt("/D0_2009_S8320160/d08-x01-y01", "1960", "0.6" ) useOnePt("/D0_2009_S8320160/d09-x01-y01", "1960", "1.0" ) useOnePt("/D0_2009_S8320160/d10-x01-y01", "1960", "1.0" ) logging.info("Processing CDF_2015_I1388868") for d in range(1,4) : if d == 1 : energy="1960" elif d ==2 : energy = "900" elif d==3 : energy = "300" for y in [1,2,3,4,6,7,8,9]: useOnePt("/CDF_2015_I1388868/d0%s-x01-y0%s" % (d,y) , energy, "0" ) # D0 jet shape logging.info("Processing D0_1995_I398175") useOnePt("/D0_1995_I398175/d01-x01-y01", "1800", "30" ) useOnePt("/D0_1995_I398175/d02-x01-y01", "1800", "55" ) useOnePt("/D0_1995_I398175/d03-x01-y01", "1800", "105" ) useOnePt("/D0_1995_I398175/d04-x01-y01", "1800", "105" ) useOnePt("/D0_1995_I398175/d05-x01-y01", "1800", "30" ) useOnePt("/D0_1995_I398175/d06-x01-y01", "1800", "55" ) # Choose output file name = args[0]+"-Jets.yoda" yoda.writeYODA(outhistos,name) sys.exit(0) diff --git a/Tests/python/merge-TVT-Photon b/Tests/python/merge-TVT-Photon --- a/Tests/python/merge-TVT-Photon +++ b/Tests/python/merge-TVT-Photon @@ -1,65 +1,65 @@ #! /usr/bin/env python import logging import sys import os, yoda """%prog Script for merging aida files """ if sys.version_info[:3] < (2,4,0): print "rivet scripts require Python version >= 2.4.0... exiting" sys.exit(1) if __name__ == "__main__": import logging from optparse import OptionParser, OptionGroup - parser = OptionParser(usage="%prog aidafile aidafile2 [...]") - parser.add_option("-o", "--out", dest="OUTFILE", default="-") + parser = OptionParser(usage="%prog base") verbgroup = OptionGroup(parser, "Verbosity control") verbgroup.add_option("-v", "--verbose", action="store_const", const=logging.DEBUG, dest="LOGLEVEL", default=logging.INFO, help="print debug (very verbose) messages") verbgroup.add_option("-q", "--quiet", action="store_const", const=logging.WARNING, dest="LOGLEVEL", default=logging.INFO, help="be very quiet") parser.add_option_group(verbgroup) (opts, args) = parser.parse_args() logging.basicConfig(level=opts.LOGLEVEL, format="%(message)s") ## Check args if len(args) < 1: logging.error("Must specify at least the name of the files") sys.exit(1) files=["-Run-II-PromptPhoton.yoda", "-Run-II-DiPhoton-GammaGamma.yoda","-Run-II-DiPhoton-GammaJet.yoda"] ## Get histos inhistos = {} outhistos={} for f in files: file='Rivet-'+args[0]+f if not os.access(file, os.R_OK): logging.error("%s can not be read" % file) continue try: aos = yoda.read(file) except: logging.error("%s can not be parsed as XML" % file) break ## Get histos from this YODA file for aopath, ao in aos.iteritems() : if(aopath.find("XSEC")>=0 or aopath.find("EVTCOUNT")>=0) : continue if ( aopath in outhistos ) : aotype = type(ao) if aotype in (yoda.Counter, yoda.Histo1D, yoda.Histo2D, yoda.Profile1D, yoda.Profile2D): outhistos[aopath] += ao else : quit() else: outhistos[aopath] = ao # Choose output file -yoda.writeYODA(outhistos,opts.OUTFILE) +name = args[0]+"-Photon.yoda" +yoda.writeYODA(outhistos,name) sys.exit(0) diff --git a/lib/Makefile.am b/lib/Makefile.am --- a/lib/Makefile.am +++ b/lib/Makefile.am @@ -1,49 +1,49 @@ pkglib_LTLIBRARIES = Herwig.la Herwig_la_SOURCES = Herwig_la_LIBTOOLFLAGS = --tag=CXX -Herwig_la_LDFLAGS = $(AM_LDFLAGS) -module -version-info 24:0:0 +Herwig_la_LDFLAGS = $(AM_LDFLAGS) -module -version-info 25:0:0 Herwig_la_LDFLAGS += $(THEPEGLDFLAGS) $(BOOST_SYSTEM_LDFLAGS) $(BOOST_FILESYSTEM_LDFLAGS) $(FCLIBS) Herwig_la_LIBADD = \ $(top_builddir)/Hadronization/libHwHadronization.la \ $(top_builddir)/Models/libHwStandardModel.la \ $(top_builddir)/Decay/libHwDecay.la \ $(top_builddir)/Decay/libHwFormFactor.la \ $(top_builddir)/Decay/libHwDecRad.la \ $(top_builddir)/Utilities/libHwUtils.la \ $(top_builddir)/Models/libHwModelGenerator.la \ $(top_builddir)/Decay/General/libHwGeneralDecay.la \ $(top_builddir)/MatrixElement/General/libHwGeneralME.la \ $(top_builddir)/MatrixElement/libHwME.la \ $(top_builddir)/MatrixElement/Reweighters/libHwReweighters.la \ $(top_builddir)/MatrixElement/Matchbox/libHwMatchbox.la \ $(top_builddir)/Decay/libHwWeakCurrent.la \ $(top_builddir)/Looptools/libHwLooptools.la \ $(top_builddir)/Shower/libHwShower.la \ $(THEPEGLIB) -ldl dist_noinst_SCRIPTS = fix-osx-path POSTPROCESSING = done-all-links if NEED_APPLE_FIXES POSTPROCESSING += apple-fixes endif all-local: $(POSTPROCESSING) done-all-links: Herwig.la find $(top_builddir) \( -name '*.so.*' -or -name '*.so' \) \ -not -name 'lib*' -not -path '$(top_builddir)/lib/*' \ -not -path '$(top_builddir)/.hg/*' -exec $(LN_S) -f \{\} \; $(LN_S) -f .libs/Herwig*so* . echo "stamp" > $@ apple-fixes: fix-osx-path done-all-links ./$< echo "stamp" > $@ clean-local: rm -f *.so *.so.* done-all-links apple-fixes diff --git a/src/DIS.in b/src/DIS.in --- a/src/DIS.in +++ b/src/DIS.in @@ -1,75 +1,77 @@ # -*- ThePEG-repository -*- ################################################## # Example generator based on DIS parameters # usage: Herwig read DIS.in ################################################## read snippets/EPCollider.in +# for fixed target +# read snippets/FixedTarget.in ################################################## # Technical parameters for this run ################################################## cd /Herwig/Generators # no pdfs for leptons set /Herwig/Shower/ShowerHandler:PDFB /Herwig/Partons/ShowerLOPDF set /Herwig/Partons/MPIExtractor:SecondPDF /Herwig/Partons/MPIPDF set /Herwig/Partons/EPExtractor:SecondPDF /Herwig/Partons/HardNLOPDF ################################################## # DIS physics parameters (override defaults here) ################################################## ################################################## # Matrix Elements for lepton-hadron collisions # (by default only neutral-current switched on) ################################################## cd /Herwig/MatrixElements/ # Neutral current DIS insert SubProcess:MatrixElements[0] MEDISNC # Charged current matrix element insert SubProcess:MatrixElements[0] MEDISCC ################################################## # NLO IN POWHEG SCHEME ################################################## ################################################### ## Need to use an NLO PDF ################################################### # set /Herwig/Particles/p+:PDF /Herwig/Partons/HardNLOPDF # set /Herwig/Particles/pbar-:PDF /Herwig/Partons/HardNLOPDF # set /Herwig/Partons/EPExtractor:SecondPDF /Herwig/Partons/HardNLOPDF # ################################################### ## Setup the POWHEG shower ################################################### #cd /Herwig/Shower #set ShowerHandler:HardEmission POWHEG ################################################### ## NLO Matrix Elements for lepton-hadron collisions ## in the POWHEG approach ################################################### # #cd /Herwig/MatrixElements/ # ## Neutral current DIS #insert SubProcess:MatrixElements[0] PowhegMEDISNC ## Charged current matrix element #insert SubProcess:MatrixElements[0] PowhegMEDISCC ################################################## ## prepare for Rivet analysis or HepMC output ## when running with parton shower ################################################## #read snippets/Rivet.in #insert /Herwig/Analysis/Rivet:Analyses 0 XXX_2015_ABC123 #read snippets/HepMC.in #set /Herwig/Analysis/HepMC:PrintEvent NNN ################################################## # Save run for later usage with 'Herwig run' ################################################## cd /Herwig/Generators saverun DIS EventGenerator diff --git a/src/snippets/FixedTarget.in b/src/snippets/FixedTarget.in new file mode 100644 --- /dev/null +++ b/src/snippets/FixedTarget.in @@ -0,0 +1,7 @@ +# -*- ThePEG-repository -*- +cd /Herwig/EventHandlers +create ThePEG::FixedTargetLuminosity FixedTargetLuminosity FixedTargetLuminosity.so +set FixedTargetLuminosity:BeamParticle /Herwig/Particles/e- +set FixedTargetLuminosity:BeamEMaxA 200.*GeV +set FixedTargetLuminosity:TargetParticle /Herwig/Particles/p+ +set EventHandler:LuminosityFunction FixedTargetLuminosity