diff --git a/CepGen/Cards/PythonHandler.cpp b/CepGen/Cards/PythonHandler.cpp index ad168d3..7b00b54 100644 --- a/CepGen/Cards/PythonHandler.cpp +++ b/CepGen/Cards/PythonHandler.cpp @@ -1,464 +1,467 @@ #include "PythonHandler.h" #include "CepGen/Core/Exception.h" #ifdef PYTHON +#include "CepGen/Core/TamingFunction.h" + #include "CepGen/Processes/GamGamLL.h" #include "CepGen/Processes/PPtoLL.h" #include "CepGen/Processes/PPtoWW.h" #include "CepGen/Hadronisers/Pythia8Hadroniser.h" #include <algorithm> #ifdef PYTHIA8 void feedPythia( CepGen::Hadroniser::Pythia8Hadroniser* py8, PyObject* hadr, const char* config ) { PyObject* ppc = CepGen::Cards::PythonHandler::getElement( hadr, config ); if ( !ppc ) return; if ( !PyTuple_Check( ppc ) ) { Py_DECREF( ppc ); return; } for ( Py_ssize_t i = 0; i < PyTuple_Size( ppc ); ++i ) { PyObject* pln = PyTuple_GetItem( ppc, i ); std::string config = CepGen::Cards::PythonHandler::decode( pln ); Py_DECREF( pln ); py8->readString( config ); } Py_DECREF( ppc ); } #endif namespace CepGen { namespace Cards { //----- specialization for CepGen input cards PythonHandler::PythonHandler( const char* file ) { setenv( "PYTHONPATH", ".:..", 1 ); std::string filename = getPythonPath( file ); const size_t fn_len = filename.length()+1; #ifdef PYTHON2 char* sfilename = new char[fn_len]; sprintf( sfilename, "%s", filename.c_str() ); #else wchar_t* sfilename = new wchar_t[fn_len]; swprintf( sfilename, fn_len, L"%s", filename.c_str() ); #endif Py_SetProgramName( sfilename ); Py_InitializeEx( 0 ); if ( !Py_IsInitialized() ) throw Exception( __PRETTY_FUNCTION__, "Failed to initialise the python parser!", FatalError ); Debugging( Form( "Initialised the Python cards parser\n\t" "Python version: %s\n\t" "Platform: %s", Py_GetVersion(), Py_GetPlatform() ) ); PyObject* fn = encode( filename.c_str() ); if ( !fn ) - throwPythonError( Form( "Failed to encode the configuration filename %s", filename.c_str() ).c_str() ); + throwPythonError( Form( "Failed to encode the configuration filename %s", filename.c_str() ) ); PyObject* cfg = PyImport_Import( fn ); Py_DECREF( fn ); if ( !cfg ) - throwPythonError( Form( "Failed to parse the configuration card %s", file ).c_str(), FatalError ); + throwPythonError( Form( "Failed to parse the configuration card %s", file ) ); PyObject* process = PyObject_GetAttrString( cfg, "process" ); if ( !process ) - throwPythonError( Form( "Failed to extract a \"process\" keyword from the configuration card %s", file ).c_str(), FatalError ); + throwPythonError( Form( "Failed to extract a \"process\" keyword from the configuration card %s", file ) ); //--- type of process to consider PyObject* pproc_name = PyDict_GetItem( process, encode( module_name_ ) ); if ( !pproc_name ) - throwPythonError( Form( "Failed to extract the process name from the configuration card %s", file ).c_str(), FatalError ); + throwPythonError( Form( "Failed to extract the process name from the configuration card %s", file ) ); const std::string proc_name = decode( pproc_name ); Py_DECREF( pproc_name ); if ( proc_name == "lpair" ) params_.setProcess( new Process::GamGamLL ); else if ( proc_name == "pptoll" ) params_.setProcess( new Process::PPtoLL ); else if ( proc_name == "pptoww" ) params_.setProcess( new Process::PPtoWW ); else FatalError( Form( "Unrecognised process: %s", proc_name.c_str() ) ); //--- process mode getParameter( process, "mode", (int&)params_.kinematics.mode ); //--- process kinematics PyObject* pin_kinematics = getElement( process, "inKinematics" ); if ( pin_kinematics ) { parseIncomingKinematics( pin_kinematics ); Py_DECREF( pin_kinematics ); } PyObject* pout_kinematics = getElement( process, "outKinematics" ); if ( pout_kinematics ) { parseOutgoingKinematics( pout_kinematics ); Py_DECREF( pout_kinematics ); } Py_DECREF( process ); //--- hadroniser parameters PyObject* phad = PyObject_GetAttrString( cfg, "hadroniser" ); if ( phad ) { parseHadroniser( phad ); Py_DECREF( phad ); } //--- generation parameters PyObject* pint = PyObject_GetAttrString( cfg, "integrator" ); if ( pint ) { parseIntegrator( pint ); Py_DECREF( pint ); } std::cout << "tmp=" << params_.kinematics.mode << std::endl; PyObject* pgen = PyObject_GetAttrString( cfg, "generator" ); if ( pgen ) { parseGenerator( pgen ); Py_DECREF( pgen ); } //--- taming functions PyObject* ptam = PyObject_GetAttrString( cfg, "tamingFunctions" ); if ( ptam ) { parseTamingFunctions( ptam ); Py_DECREF( ptam ); } Py_DECREF( cfg ); #ifdef PYTHON2 Py_Finalize(); #else if ( Py_IsInitialized() ) throw Exception( __PRETTY_FUNCTION__, "Failed to unregister the python parser!", FatalError ); if ( Py_FinalizeEx() != 0 ) throw Exception( __PRETTY_FUNCTION__, "Failed to unregister the python parser!", FatalError ); #endif if ( sfilename ) delete sfilename; } void PythonHandler::parseIncomingKinematics( PyObject* kin ) { PyObject* ppz = getElement( kin, "pz" ); if ( ppz ) { if ( PyTuple_Check( ppz ) && PyTuple_Size( ppz ) == 2 ) { double pz0 = PyFloat_AsDouble( PyTuple_GetItem( ppz, 0 ) ); double pz1 = PyFloat_AsDouble( PyTuple_GetItem( ppz, 1 ) ); params_.kinematics.inp = { pz0, pz1 }; } Py_DECREF( ppz ); } double sqrt_s = -1.; getParameter( kin, "cmEnergy", sqrt_s ); if ( sqrt_s != -1. ) params_.kinematics.setSqrtS( sqrt_s ); getParameter( kin, "structureFunctions", (int&)params_.kinematics.structure_functions ); } void PythonHandler::parseOutgoingKinematics( PyObject* kin ) { PyObject* ppair = getElement( kin, "pair" ); if ( ppair ) { if ( isInteger( ppair ) ) { ParticleCode pair = (ParticleCode)asInteger( ppair ); params_.kinematics.central_system = { pair, pair }; } Py_DECREF( ppair ); } else if ( ppair && PyTuple_Check( ppair ) ) { if ( PyTuple_Size( ppair ) != 2 ) FatalError( "Invalid value for in_kinematics.pair!" ); ParticleCode pair1 = (ParticleCode)asInteger( PyTuple_GetItem( ppair, 0 ) ); ParticleCode pair2 = (ParticleCode)asInteger( PyTuple_GetItem( ppair, 1 ) ); params_.kinematics.central_system = { pair1, pair2 }; Py_DECREF( ppair ); } PyObject* pcuts = getElement( kin, "cuts" ); if ( pcuts && PyDict_Check( pcuts ) ) parseParticlesCuts( pcuts ); // for LPAIR/collinear matrix elements getLimits( kin, "q2", params_.kinematics.cuts.initial[Cuts::q2] ); // for the kT factorised matrix elements getLimits( kin, "qt", params_.kinematics.cuts.initial[Cuts::qt] ); getLimits( kin, "ptdiff", params_.kinematics.cuts.central[Cuts::pt_diff] ); getLimits( kin, "rapiditydiff", params_.kinematics.cuts.central[Cuts::rapidity_diff] ); getLimits( kin, "mx", params_.kinematics.cuts.remnants[Cuts::mass] ); } void PythonHandler::parseParticlesCuts( PyObject* cuts ) { PyObject* pkey = nullptr, *pvalue = nullptr; Py_ssize_t pos = 0; while ( PyDict_Next( cuts, &pos, &pkey, &pvalue ) ) { ParticleCode pdg = (ParticleCode)asInteger( pkey ); getLimits( pvalue, "pt", params_.kinematics.cuts.central_particles[pdg][Cuts::pt_single] ); getLimits( pvalue, "energy", params_.kinematics.cuts.central_particles[pdg][Cuts::energy_single] ); getLimits( pvalue, "eta", params_.kinematics.cuts.central_particles[pdg][Cuts::eta_single] ); getLimits( pvalue, "rapidity", params_.kinematics.cuts.central_particles[pdg][Cuts::rapidity_single] ); } } void PythonHandler::parseIntegrator( PyObject* integr ) { if ( !PyDict_Check( integr ) ) throwPythonError( "Integrator object should be a dictionary!" ); PyObject* palgo = getElement( integr, module_name_ ); if ( !palgo ) throwPythonError( "Failed to retrieve the integration algorithm name!" ); std::string algo = decode( palgo ); Py_DECREF( palgo ); if ( algo == "Plain" ) params_.integrator.type = Integrator::Plain; else if ( algo == "Vegas" ) { params_.integrator.type = Integrator::Vegas; getParameter( integr, "alpha", (double&)params_.integrator.vegas.alpha ); getParameter( integr, "iterations", (unsigned long&)params_.integrator.vegas.iterations ); getParameter( integr, "mode", (int&)params_.integrator.vegas.mode ); getParameter( integr, "verbosity", (int&)params_.integrator.vegas.verbose ); } else if ( algo == "MISER" ) { params_.integrator.type = Integrator::MISER; getParameter( integr, "estimateFraction", (double&)params_.integrator.miser.estimate_frac ); getParameter( integr, "minCalls", (unsigned long&)params_.integrator.miser.min_calls ); getParameter( integr, "minCallsPerBisection", (unsigned long&)params_.integrator.miser.min_calls_per_bisection ); getParameter( integr, "alpha", (double&)params_.integrator.miser.alpha ); getParameter( integr, "dither", (double&)params_.integrator.miser.dither ); } else - throwPythonError( Form( "Invalid integration algorithm: %s", algo.c_str() ).c_str() ); + throwPythonError( Form( "Invalid integration algorithm: %s", algo.c_str() ) ); getParameter( integr, "numPoints", (int&)params_.integrator.npoints ); getParameter( integr, "numFunctionCalls", (int&)params_.integrator.ncvg ); getParameter( integr, "seed", (unsigned long&)params_.integrator.seed ); } void PythonHandler::parseGenerator( PyObject* gen ) { if ( !PyDict_Check( gen ) ) throwPythonError( "Generation information object should be a dictionary!" ); params_.generation.enabled = true; getParameter( gen, "numEvents", (int&)params_.generation.maxgen ); getParameter( gen, "printEvery", (int&)params_.generation.gen_print_every ); } void PythonHandler::parseTamingFunctions( PyObject* tf ) { if ( !PyList_Check( tf ) ) throwPythonError( "Taming functions list should be a list!" ); for ( Py_ssize_t i = 0; i < PyList_Size( tf ); ++i ) { PyObject* pit = PyList_GetItem( tf, i ); if ( !PyDict_Check( pit ) ) - throwPythonError( Form( "Item %d is invalid", i ).c_str() ); - PyObject* pvar = getElement( pit, "variable" ), *pexpr = getElement( pit, "expression" ); - params_.taming_functions.add( decode( pvar ), decode( pexpr ) ); + throwPythonError( Form( "Item %d is invalid", i ) ); + PyObject* pvar = getElement( pit, "variable" ); + PyObject* pexpr = getElement( pit, "expression" ); + params_.taming_functions->add( decode( pvar ), decode( pexpr ) ); Py_DECREF( pvar ); Py_DECREF( pexpr ); } } void PythonHandler::parseHadroniser( PyObject* hadr ) { if ( !PyDict_Check( hadr ) ) throwPythonError( "Hadroniser object should be a dictionary!" ); PyObject* pname = getElement( hadr, module_name_ ); if ( !pname ) throwPythonError( "Hadroniser name is required!" ); std::string hadr_name = decode( pname ); Py_DECREF( pname ); if ( hadr_name == "pythia8" ) { #ifdef PYTHIA8 Hadroniser::Pythia8Hadroniser* pythia8 = new Hadroniser::Pythia8Hadroniser( params_ ); PyObject* pseed = getElement( hadr, "seed" ); long long seed = -1ll; if ( pseed ) { if ( isInteger( pseed ) ) seed = PyLong_AsLongLong( pseed ); Py_DECREF( pseed ); } pythia8->setSeed( seed ); feedPythia( pythia8, hadr, "pythiaPreConfiguration" ); pythia8->init(); feedPythia( pythia8, hadr, "pythiaConfiguration" ); feedPythia( pythia8, hadr, "pythiaProcessConfiguration" ); params_.setHadroniser( pythia8 ); #else InWarning( "Pythia8 is not linked to this instance... " "Ignoring this part of the configuration file." ) #endif } } //------------------------------------------------------------------ // Python API helpers //------------------------------------------------------------------ std::string PythonHandler::getPythonPath( const char* file ) { std::string s_filename = file; s_filename = s_filename.substr( 0, s_filename.find_last_of( "." ) ); std::replace( s_filename.begin(), s_filename.end(), '/', '.' ); return s_filename; } void - PythonHandler::throwPythonError( const char* message, const ExceptionType& type ) + PythonHandler::throwPythonError( const std::string& message, const ExceptionType& type ) { PyObject* ptype = nullptr, *pvalue = nullptr, *ptraceback = nullptr; PyErr_Fetch( &ptype, &pvalue, &ptraceback ); PyErr_Clear(); PyErr_NormalizeException( &ptype, &pvalue, &ptraceback ); std::ostringstream oss; oss << message; if ( ptype == nullptr ) { Py_Finalize(); throw Exception( __PRETTY_FUNCTION__, oss.str().c_str(), type ); } oss << "\n\tError: " #ifdef PYTHON2 << PyString_AsString( PyObject_Str( pvalue ) ); // deprecated in python v3+ #else << _PyUnicode_AsString( PyObject_Str( pvalue ) ); #endif Py_Finalize(); throw Exception( __PRETTY_FUNCTION__, oss.str().c_str(), type ); } const char* PythonHandler::decode( PyObject* obj ) { #ifdef PYTHON2 const char* str = PyString_AsString( obj ); // deprecated in python v3+ #else const char* str = _PyUnicode_AsString( obj ); #endif if ( !str ) throwPythonError( "Failed to decode a Python object!" ); return str; } PyObject* PythonHandler::encode( const char* str ) { PyObject* obj = PyUnicode_FromString( str ); if ( !obj ) - throwPythonError( Form( "Failed to encode the following string:\n\t%s", str ).c_str() ); + throwPythonError( Form( "Failed to encode the following string:\n\t%s", str ) ); return obj; } PyObject* PythonHandler::getElement( PyObject* obj, const char* key ) { PyObject* nink = encode( key ); PyObject* pout = PyDict_GetItem( obj, nink ); Py_DECREF( nink ); return pout; } void PythonHandler::getLimits( PyObject* obj, const char* key, Kinematics::Limits& lim ) { PyObject* pobj = getElement( obj, key ); if ( !pobj ) return; if ( !PyTuple_Check( pobj ) ) FatalError( Form( "Invalid value retrieved for %s", key ) ); if ( PyTuple_Size( pobj ) < 1 ) FatalError( Form( "Invalid number of values unpacked for %s!", key ) ); double min = PyFloat_AsDouble( PyTuple_GetItem( pobj, 0 ) ); lim.min() = min; if ( PyTuple_Size( pobj ) > 1 ) { double max = PyFloat_AsDouble( PyTuple_GetItem( pobj, 1 ) ); if ( max != -1 ) lim.max() = max; } Py_DECREF( pobj ); } void PythonHandler::getParameter( PyObject* parent, const char* key, int& out ) { PyObject* pobj = getElement( parent, key ); if ( !pobj ) return; #ifdef PYTHON2 if ( !PyInt_Check( pobj ) ) - throwPythonError( Form( "Object \"%s\" has invalid type", key ).c_str() ); + throwPythonError( Form( "Object \"%s\" has invalid type", key ) ); out = _PyInt_AsInt( pobj ); #else if ( !PyLong_Check( pobj ) ) - throwPythonError( Form( "Object \"%s\" has invalid type", key ).c_str() ); + throwPythonError( Form( "Object \"%s\" has invalid type", key ) ); out = PyLong_AsLong( pobj ); #endif Py_DECREF( pobj ); } void PythonHandler::getParameter( PyObject* parent, const char* key, unsigned long& out ) { PyObject* pobj = getElement( parent, key ); if ( !pobj ) return; if ( !PyLong_Check( pobj ) ) - throwPythonError( Form( "Object \"%s\" has invalid type", key ).c_str() ); + throwPythonError( Form( "Object \"%s\" has invalid type", key ) ); out = PyLong_AsUnsignedLong( pobj ); Py_DECREF( pobj ); } void PythonHandler::getParameter( PyObject* parent, const char* key, double& out ) { PyObject* pobj = getElement( parent, key ); if ( !pobj ) return; if ( !PyFloat_Check( pobj ) ) - throwPythonError( Form( "Object \"%s\" has invalid type", key ).c_str() ); + throwPythonError( Form( "Object \"%s\" has invalid type", key ) ); out = PyFloat_AsDouble( pobj ); Py_DECREF( pobj ); } bool PythonHandler::isInteger( PyObject* obj ) { #ifdef PYTHON2 return PyInt_Check( obj ); #else return PyLong_Check( obj ); #endif } int PythonHandler::asInteger( PyObject* obj ) { #ifdef PYTHON2 return _PyInt_AsInt( obj ); #else return PyLong_AsLong( obj ); #endif } } } #endif diff --git a/CepGen/Cards/PythonHandler.h b/CepGen/Cards/PythonHandler.h index 3d8d80e..140f00c 100644 --- a/CepGen/Cards/PythonHandler.h +++ b/CepGen/Cards/PythonHandler.h @@ -1,50 +1,51 @@ #ifndef CepGen_Cards_PythonHandler_h #define CepGen_Cards_PythonHandler_h #ifdef PYTHON #include <Python.h> +#include "CepGen/Core/Exception.h" #include "Handler.h" namespace CepGen { namespace Cards { /// CepGen Python configuration cards reader/writer class PythonHandler : public Handler { public: /// Read a standard configuration card explicit PythonHandler( const char* file ); ~PythonHandler() {} static PyObject* getElement( PyObject* obj, const char* key ); static const char* decode( PyObject* obj ); static PyObject* encode( const char* str ); private: static constexpr const char* module_name_ = "mod_name"; - static void throwPythonError( const char* message, const ExceptionType& type = FatalError ); + static void throwPythonError( const std::string& message, const ExceptionType& type = FatalError ); static std::string getPythonPath( const char* file ); static bool isInteger( PyObject* obj ); static int asInteger( PyObject* obj ); void getLimits( PyObject* obj, const char* key, Kinematics::Limits& lim ); void getParameter( PyObject* parent, const char* key, int& out ); void getParameter( PyObject* parent, const char* key, unsigned long& out ); void getParameter( PyObject* parent, const char* key, double& out ); void parseIncomingKinematics( PyObject* ); void parseOutgoingKinematics( PyObject* ); void parseParticlesCuts( PyObject* ); void parseIntegrator( PyObject* ); void parseGenerator( PyObject* ); void parseTamingFunctions( PyObject* ); void parseHadroniser( PyObject* ); }; } } #endif #endif diff --git a/CepGen/Core/Integrand.cpp b/CepGen/Core/Integrand.cpp index 44ee8d1..d167723 100644 --- a/CepGen/Core/Integrand.cpp +++ b/CepGen/Core/Integrand.cpp @@ -1,173 +1,180 @@ #include "CepGen/Core/Timer.h" #include "CepGen/Core/Exception.h" +#include "CepGen/Core/TamingFunction.h" #include "CepGen/Event/Event.h" #include "CepGen/Event/Particle.h" #include "CepGen/Physics/Kinematics.h" + #include "CepGen/Processes/GenericProcess.h" +#include "CepGen/Hadronisers/GenericHadroniser.h" + #include "CepGen/Parameters.h" #include <sstream> #include <fstream> namespace CepGen { double f( double* x, size_t ndim, void* params ) { std::ostringstream os; Parameters* p = static_cast<Parameters*>( params ); std::shared_ptr<Event> ev = p->process()->event(); if ( p->process()->hasEvent() ) { p->process()->clearEvent(); const Particle::Momentum p1( 0., 0., p->kinematics.inp.first ), p2( 0., 0., -p->kinematics.inp.second ); p->process()->setIncomingKinematics( p1, p2 ); // at some point introduce non head-on colliding beams? DebuggingInsideLoop( Form( "Function f called -- some parameters:\n\t" " pz(p1) = %5.2f pz(p2) = %5.2f\n\t" " remnant mode: %d", p->kinematics.inp.first, p->kinematics.inp.second, p->kinematics.structure_functions ) ); if ( p->integrator.first_run ) { if ( Logger::get().level >= Logger::Debug ) { std::ostringstream oss; oss << p->kinematics.mode; Debugging( Form( "Process mode considered: %s", oss.str().c_str() ) ); } Particle& op1 = ev->getOneByRole( Particle::OutgoingBeam1 ), &op2 = ev->getOneByRole( Particle::OutgoingBeam2 ); //--- add outgoing protons or remnants switch ( p->kinematics.mode ) { case Kinematics::ElectronProton: default: { InError( Form( "Process mode %d not yet handled!", (int)p->kinematics.mode ) ); } case Kinematics::ElasticElastic: break; // nothing to change in the event case Kinematics::ElasticInelastic: op2.setPdgId( DiffrProt, 1 ); break; case Kinematics::InelasticElastic: // set one of the outgoing protons to be fragmented op1.setPdgId( DiffrProt, 1 ); break; case Kinematics::InelasticInelastic: // set both the outgoing protons to be fragmented op1.setPdgId( DiffrProt, 1 ); op2.setPdgId( DiffrProt, 1 ); break; } //--- prepare the function to be integrated p->process()->prepareKinematics(); //--- add central system Particles& central_system = ev->getByRole( Particle::CentralSystem ); if ( central_system.size() == p->kinematics.central_system.size() ) { unsigned short i = 0; for ( Particles::iterator part = central_system.begin(); part != central_system.end(); ++part ) { if ( p->kinematics.central_system[i] == invalidParticle ) continue; part->setPdgId( p->kinematics.central_system[i] ); part->computeMass(); i++; } } p->process()->clearRun(); p->integrator.first_run = false; } } // event is not empty p->process()->setPoint( ndim, x ); if ( Logger::get().level >= Logger::DebugInsideLoop ) { std::ostringstream oss; for ( unsigned int i = 0; i < ndim; ++i ) { oss << x[i] << " "; } DebuggingInsideLoop( Form( "Computing dim-%d point ( %s)", ndim, oss.str().c_str() ) ); } //--- from this step on, the phase space point is supposed to be set by 'GenericProcess::setPoint()' p->process()->beforeComputeWeight(); Timer tmr; // start the timer double integrand = p->process()->computeWeight(); if ( integrand <= 0. ) return 0.; //--- fill in the process' Event object p->process()->fillKinematics(); //--- once the kinematics variables have been populated, // can apply the collection of taming functions - double taming = 1.0; - if ( p->taming_functions.has( "m_central" ) || p->taming_functions.has( "pt_central" ) ) { - Particle::Momentum central_system; - const Particles& cm_parts = ev->getByRole( Particle::CentralSystem ); - for ( Particles::const_iterator it_p = cm_parts.begin(); it_p != cm_parts.end(); ++it_p ) - central_system += it_p->momentum(); - taming *= p->taming_functions.eval( "m_central", central_system.mass() ); - taming *= p->taming_functions.eval( "pt_central", central_system.pt() ); - } - if ( p->taming_functions.has( "q2" ) ) { - taming *= p->taming_functions.eval( "q2", -ev->getOneByRole( Particle::Parton1 ).momentum().mass() ); - taming *= p->taming_functions.eval( "q2", -ev->getOneByRole( Particle::Parton2 ).momentum().mass() ); + if ( p->taming_functions ) { + if ( p->taming_functions->has( "m_central" ) + || p->taming_functions->has( "pt_central" ) ) { + Particle::Momentum central_system; + const Particles& cm_parts = ev->getByRole( Particle::CentralSystem ); + for ( Particles::const_iterator it_p = cm_parts.begin(); it_p != cm_parts.end(); ++it_p ) + central_system += it_p->momentum(); + if ( p->taming_functions->has( "m_central" ) ) + integrand *= p->taming_functions->eval( "m_central", central_system.mass() ); + if ( p->taming_functions->has( "pt_central" ) ) + integrand *= p->taming_functions->eval( "pt_central", central_system.pt() ); + } + if ( p->taming_functions->has( "q2" ) ) { + integrand *= p->taming_functions->eval( "q2", -ev->getOneByRole( Particle::Parton1 ).momentum().mass() ); + integrand *= p->taming_functions->eval( "q2", -ev->getOneByRole( Particle::Parton2 ).momentum().mass() ); + } } - integrand *= taming; //--- full event content (+ hadronisation) if generating events if ( p->hadroniser() ) { double br = 0.; // branching fraction for all decays if ( !p->hadroniser()->hadronise( *ev, br, true ) ) //if ( !p->hadroniser()->hadronise( *ev, br, p->storage() ) ) return 0.; integrand *= br; } //--- apply cuts on final state system (after hadronisation!) // (watch out your cuts, as this might be extremely time-consuming...) if ( p->kinematics.cuts.central_particles.size() > 0 ) { std::map<ParticleCode,std::map<Cuts::Central,Kinematics::Limits> >::const_iterator it_c; const Particles cs = ev->getByRole( Particle::CentralSystem ); for ( Particles::const_iterator it_p = cs.begin(); it_p != cs.end(); ++it_p ) { it_c = p->kinematics.cuts.central_particles.find( it_p->pdgId() ); if ( it_c == p->kinematics.cuts.central_particles.end() ) continue; // retrieve all cuts associated to this final state particle const std::map<Cuts::Central,Kinematics::Limits>& cm = it_c->second; // apply these cuts on the given particle if ( cm.count( Cuts::pt_single ) > 0 && !cm.at( Cuts::pt_single ).passes( it_p->momentum().pt() ) ) return 0.; //std::cout << it_c->first << "\t" << it_p->momentum().pt() << "\t" << cm.at( Cuts::pt_single ).passes( it_p->momentum().pt() ) << std::endl; if ( cm.count( Cuts::energy_single ) > 0 && !cm.at( Cuts::energy_single ).passes( it_p->momentum().energy() ) ) return 0.; if ( cm.count( Cuts::eta_single ) > 0 && !cm.at( Cuts::eta_single ).passes( it_p->momentum().eta() ) ) return 0.; if ( cm.count( Cuts::rapidity_single ) > 0 && !cm.at( Cuts::rapidity_single ).passes( it_p->momentum().rapidity() ) ) return 0.; } } if ( p->storage() ) { /* std::ofstream dump( "dump.txt", std::ios_base::app ); for ( unsigned short i = 0; i < ndim; ++i ) dump << x[i] << "\t"; dump << "\n";*/ ev->time_generation = tmr.elapsed(); ev->time_total = tmr.elapsed(); p->process()->addGenerationTime( ev->time_total ); Debugging( Form( "Generation time: %5.6f sec\n\t" "Total time (gen+hadr): %5.6f sec", ev->time_generation, ev->time_total ) ); p->generation.last_event = ev; } // generating events if ( Logger::get().level >= Logger::DebugInsideLoop ) { os.str( "" ); for ( unsigned int i = 0; i < ndim; ++i ) { os << Form( "%10.8f ", x[i] ); } Debugging( Form( "f value for dim-%d point ( %s): %4.4e", ndim, os.str().c_str(), integrand ) ); } return integrand; } } diff --git a/CepGen/Core/Integrator.cpp b/CepGen/Core/Integrator.cpp index 15f76ff..cc57e07 100644 --- a/CepGen/Core/Integrator.cpp +++ b/CepGen/Core/Integrator.cpp @@ -1,356 +1,361 @@ #include "Integrator.h" + #include "CepGen/Parameters.h" + +#include "CepGen/Core/utils.h" #include "CepGen/Core/Exception.h" + #include "CepGen/Event/Event.h" #include <fstream> +#include <math.h> namespace CepGen { Integrator::Integrator( const unsigned int dim, double f_( double*, size_t, void* ), Parameters* param ) : ps_bin_( 0 ), input_params_( param ), function_( std::unique_ptr<gsl_monte_function>( new gsl_monte_function ) ) { //--- function to be integrated function_->f = f_; function_->dim = dim; function_->params = (void*)param; //--- initialise the random number generator gsl_rng_env_setup(); rng_ = gsl_rng_alloc( gsl_rng_default ); unsigned long seed = ( param->integrator.seed > 0 ) ? param->integrator.seed : time( nullptr ); // seed with time gsl_rng_set( rng_, seed ); input_params_->integrator.vegas.ostream = stderr; // redirect all debugging information to the error stream input_params_->integrator.vegas.iterations = 10; Debugging( Form( "Number of integration dimensions: %d\n\t" "Number of iterations [VEGAS]: %d\n\t" "Number of function calls: %d", dim, input_params_->integrator.vegas.iterations, input_params_->integrator.ncvg ) ); } Integrator::~Integrator() { if ( rng_ ) gsl_rng_free( rng_ ); } int Integrator::integrate( double& result, double& abserr ) { int res = -1; gsl_monte_plain_state* pln_state; gsl_monte_vegas_state* veg_state; gsl_monte_miser_state* mis_state; const Integrator::Type algorithm = input_params_->integrator.type; //--- integration bounds std::vector<double> x_low( function_->dim, 0. ), x_up( function_->dim, 1. ); //--- prepare integrator if ( algorithm == Plain ) pln_state = gsl_monte_plain_alloc( function_->dim ); else if ( algorithm == Vegas ) veg_state = gsl_monte_vegas_alloc( function_->dim ); else if ( algorithm == MISER ) mis_state = gsl_monte_miser_alloc( function_->dim ); if ( algorithm == Plain ) res = gsl_monte_plain_integrate( function_.get(), &x_low[0], &x_up[0], function_->dim, input_params_->integrator.ncvg, rng_, pln_state, &result, &abserr ); else if ( algorithm == Vegas ) { gsl_monte_vegas_params_set( veg_state, &input_params_->integrator.vegas ); //----- Vegas warmup (prepare the grid) if ( !grid_.grid_prepared ) { res = gsl_monte_vegas_integrate( function_.get(), &x_low[0], &x_up[0], function_->dim, 25000, rng_, veg_state, &result, &abserr ); grid_.grid_prepared = true; } Information( "Finished the Vegas warm-up" ); //----- integration unsigned short it_chisq = 0; do { res = gsl_monte_vegas_integrate( function_.get(), &x_low[0], &x_up[0], function_->dim, 0.2 * input_params_->integrator.ncvg, rng_, veg_state, &result, &abserr ); PrintMessage( Form( "\t>> at call %2d: average = %10.6f " "sigma = %10.6f chi2 = %4.3f", it_chisq+1, result, abserr, gsl_monte_vegas_chisq( veg_state ) ) ); it_chisq++; } while ( fabs( gsl_monte_vegas_chisq( veg_state )-1. ) > 0.5 ); } //----- integration else if ( algorithm == MISER ) { gsl_monte_miser_params_set( mis_state, &input_params_->integrator.miser ); res = gsl_monte_miser_integrate( function_.get(), &x_low[0], &x_up[0], function_->dim, input_params_->integrator.ncvg, rng_, mis_state, &result, &abserr ); } //--- clean integrator if ( algorithm == Plain ) gsl_monte_plain_free( pln_state ); else if ( algorithm == Vegas ) gsl_monte_vegas_free( veg_state ); else if ( algorithm == MISER ) gsl_monte_miser_free( mis_state ); return res; } bool Integrator::generateOneEvent() { if ( !grid_.gen_prepared ) setGen(); const unsigned int max = pow( mbin_, function_->dim ); std::vector<double> x( function_->dim, 0. ); //--- correction cycles if ( ps_bin_ != 0 ) { bool has_correction = false; while ( !correctionCycle( x, has_correction ) ) {} if ( has_correction ) return storeEvent( x ); } double weight = 0., y = -1.; //--- normal generation cycle //----- select a Integrator bin and reject if fmax is too small do { do { // ... ps_bin_ = uniform() * max; y = uniform() * grid_.f_max_global; grid_.nm[ps_bin_] += 1; } while ( y > grid_.f_max[ps_bin_] ); // Select x values in this Integrator bin int jj = ps_bin_; for ( unsigned int i = 0; i < function_->dim; ++i ) { int jjj = jj / mbin_; grid_.n[i] = jj - jjj * mbin_; x[i] = ( uniform() + grid_.n[i] ) / mbin_; jj = jjj; } // Get weight for selected x value weight = F( x ); if ( weight <= 0. ) continue; } while ( y > weight ); if ( weight <= grid_.f_max[ps_bin_] ) ps_bin_ = 0; // Init correction cycle if weight is higher than fmax or ffmax else if ( weight <= grid_.f_max_global ) { grid_.f_max_old = grid_.f_max[ps_bin_]; grid_.f_max[ps_bin_] = weight; grid_.f_max_diff = weight-grid_.f_max_old; grid_.correc = ( grid_.nm[ps_bin_] - 1. ) * grid_.f_max_diff / grid_.f_max_global - 1.; } else { grid_.f_max_old = grid_.f_max[ps_bin_]; grid_.f_max[ps_bin_] = weight; grid_.f_max_diff = weight-grid_.f_max_old; grid_.f_max_global = weight; grid_.correc = ( grid_.nm[ps_bin_] - 1. ) * grid_.f_max_diff / grid_.f_max_global * weight / grid_.f_max_global - 1.; } Debugging( Form( "Correction applied: %f, phase space bin = %d", grid_.correc, ps_bin_ ) ); // Return with an accepted event if ( weight > 0. ) return storeEvent( x ); return false; } bool Integrator::correctionCycle( std::vector<double>& x, bool& has_correction ) { Debugging( Form( "Correction cycles are started.\n\t" "bin = %d" "correc = %g" "corre2 = %g", ps_bin_, grid_.correc, grid_.correc2 ) ); if ( grid_.correc >= 1. ) grid_.correc -= 1.; if ( uniform() < grid_.correc ) { grid_.correc = -1.; std::vector<double> xtmp( function_->dim, 0. ); // Select x values in phase space bin for ( unsigned int k = 0; k < function_->dim; ++k ) xtmp[k] = ( uniform() + grid_.n[k] ) * inv_mbin_; // Compute weight for x value const double weight = F( xtmp ); // Parameter for correction of correction if ( weight > grid_.f_max[ps_bin_] ) { if ( weight > grid_.f_max2 ) grid_.f_max2 = weight; grid_.correc2 -= 1.; grid_.correc += 1.; } // Accept event if ( weight >= grid_.f_max_diff*uniform() + grid_.f_max_old ) { // FIXME!!!! //Error("Accepting event!!!"); //return storeEvent(x); x = xtmp; has_correction = true; return true; } return false; } // Correction if too big weight is found while correction // (All your bases are belong to us...) if ( grid_.f_max2 > grid_.f_max[ps_bin_] ) { grid_.f_max_old = grid_.f_max[ps_bin_]; grid_.f_max[ps_bin_] = grid_.f_max2; grid_.f_max_diff = grid_.f_max2-grid_.f_max_old; const double correc_tmp = ( grid_.nm[ps_bin_] - 1. ) * grid_.f_max_diff / grid_.f_max_global; if ( grid_.f_max2 < grid_.f_max_global ) grid_.correc = correc_tmp - grid_.correc2; else { grid_.f_max_global = grid_.f_max2; grid_.correc = correc_tmp * grid_.f_max2 / grid_.f_max_global - grid_.correc2; } grid_.correc2 = 0.; grid_.f_max2 = 0.; return false; } return true; } bool Integrator::storeEvent( const std::vector<double>& x ) { input_params_->setStorage( true ); double weight = 0.; unsigned short i = 0; do { weight = F( x ); i++; } while ( weight <= 0. && i < 10 ); if ( weight <= 0. ) return false; input_params_->generation.ngen += 1; input_params_->setStorage( false ); if ( input_params_->generation.ngen % input_params_->generation.gen_print_every == 0 ) { Debugging( Form( "Generated events: %d", input_params_->generation.ngen ) ); input_params_->generation.last_event->dump(); } return true; } void Integrator::setGen() { Information( Form( "Preparing the grid for the generation of unweighted events: %d points", input_params_->integrator.npoints ) ); // Variables for debugging std::ostringstream os; if ( Logger::get().level >= Logger::Debug ) Debugging( Form( "Maximum weight = %d", input_params_->generation.maxgen ) ); const unsigned int max = pow( mbin_, function_->dim ); const double inv_npoin = 1./input_params_->integrator.npoints; if ( function_->dim > max_dimensions_ ) FatalError( Form( "Number of dimensions to integrate exceed the maximum number, %d", max_dimensions_ ) ); grid_.nm = std::vector<int>( max, 0 ); grid_.f_max = std::vector<double>( max, 0. ); grid_.n = std::vector<int>( function_->dim, 0 ); std::vector<double> x( function_->dim, 0. ); input_params_->generation.ngen = 0; // ... double sum = 0., sum2 = 0., sum2p = 0.; //--- main loop for ( unsigned int i = 0; i < max; ++i ) { int jj = i; for ( unsigned int j = 0; j < function_->dim; ++j ) { int jjj = jj*inv_mbin_; grid_.n[j] = jj-jjj*mbin_; jj = jjj; } double fsum = 0., fsum2 = 0.; for ( unsigned int j = 0; j < input_params_->integrator.npoints; ++j ) { for ( unsigned int k = 0; k < function_->dim; ++k ) x[k] = ( uniform() + grid_.n[k] ) * inv_mbin_; const double z = F( x ); grid_.f_max[i] = std::max( grid_.f_max[i], z ); fsum += z; fsum2 += z*z; } const double av = fsum*inv_npoin, av2 = fsum2*inv_npoin, sig2 = av2 - av*av; sum += av; sum2 += av2; sum2p += sig2; grid_.f_max_global = std::max( grid_.f_max_global, grid_.f_max[i] ); if ( Logger::get().level >= Logger::DebugInsideLoop ) { const double sig = sqrt( sig2 ); const double eff = ( grid_.f_max[i] != 0. ) ? grid_.f_max[i]/av : 1.e4; os.str(""); for ( unsigned int j = 0; j < function_->dim; ++j ) { os << grid_.n[j]; if ( j != function_->dim-1 ) os << ", "; } DebuggingInsideLoop( Form( "In iteration #%d:\n\t" "av = %f\n\t" "sig = %f\n\t" "fmax = %f\n\t" "eff = %f\n\t" "n = (%s)", i, av, sig, grid_.f_max[i], eff, os.str().c_str() ) ); } } // end of main loop sum = sum/max; sum2 = sum2/max; sum2p = sum2p/max; if ( Logger::get().level >= Logger::Debug ) { const double sig = sqrt( sum2-sum*sum ), sigp = sqrt( sum2p ); double eff1 = 0.; for ( unsigned int i = 0; i < max; ++i ) eff1 += ( grid_.f_max[i] / ( max*sum ) ); const double eff2 = grid_.f_max_global/sum; Debugging( Form( "Average function value = sum = %f\n\t" "Average function value**2 = sum2 = %f\n\t" "Overall standard deviation = sig = %f\n\t" "Average standard deviation = sigp = %f\n\t" "Maximum function value = ffmax = %f\n\t" "Average inefficiency = eff1 = %f\n\t" "Overall inefficiency = eff2 = %f\n\t", sum, sum2, sig, sigp, grid_.f_max_global, eff1, eff2 ) ); } grid_.gen_prepared = true; Information( "Grid prepared! Now launching the production." ); } std::ostream& operator<<( std::ostream& os, const Integrator::Type& type ) { switch ( type ) { case Integrator::Plain: return os << "Plain"; case Integrator::Vegas: return os << "Vegas"; case Integrator::MISER: return os << "MISER"; } return os; } } diff --git a/CepGen/Core/Parameters.cpp b/CepGen/Core/Parameters.cpp index 3e37528..f7da5b2 100644 --- a/CepGen/Core/Parameters.cpp +++ b/CepGen/Core/Parameters.cpp @@ -1,163 +1,193 @@ #include "CepGen/Parameters.h" + #include "CepGen/Core/Exception.h" +#include "CepGen/Core/TamingFunction.h" + #include "CepGen/Processes/GenericProcess.h" +#include "CepGen/Hadronisers/GenericHadroniser.h" namespace CepGen { Parameters::Parameters() : hadroniser_max_trials( 5 ), store_( false ) {} Parameters::Parameters( Parameters& param ) : kinematics( param.kinematics ), integrator( param.integrator ), generation( param.generation ), hadroniser_max_trials( param.hadroniser_max_trials ), - taming_functions( param.taming_functions ), + taming_functions( std::move( param.taming_functions ) ), process_( std::move( param.process_ ) ), hadroniser_( std::move( param.hadroniser_ ) ), store_( param.store_ ) {} Parameters::Parameters( const Parameters& param ) : kinematics( param.kinematics ), integrator( param.integrator ), generation( param.generation ), hadroniser_max_trials( param.hadroniser_max_trials ), - taming_functions( param.taming_functions ), store_( param.store_ ) {} + Parameters::~Parameters() + {} + void Parameters::setThetaRange( float thetamin, float thetamax ) { kinematics.cuts.central[Cuts::eta_single].in( Particle::thetaToEta( thetamax ), Particle::thetaToEta( thetamin ) ); if ( Logger::get().level >= Logger::Debug ) { std::ostringstream os; os << kinematics.cuts.central[Cuts::eta_single]; Debugging( Form( "eta in range: %s => theta(min) = %g, theta(max) = %g", os.str().c_str(), thetamin, thetamax ) ); } } + Process::GenericProcess* + Parameters::process() + { + return process_.get(); + } + std::string Parameters::processName() const { if ( process_ ) return process_->name(); return "no process"; } void + Parameters::setProcess( Process::GenericProcess* proc ) + { + process_.reset( proc ); + } + + Hadroniser::GenericHadroniser* + Parameters::hadroniser() + { + return hadroniser_.get(); + } + + void + Parameters::setHadroniser( Hadroniser::GenericHadroniser* hadr ) + { + hadroniser_.reset( hadr ); + } + + void Parameters::dump( std::ostream& out, bool pretty ) const { std::ostringstream os; const int wb = 90, wt = 40; os.str( "" ); os << "Parameters dump" << std::left << std::endl << std::endl << std::setfill('_') << std::setw( wb+3 ) << "_/¯¯RUN¯INFORMATION¯¯\\_" << std::setfill( ' ' ) << std::endl << std::right << std::setw( wb ) << std::left << std::endl << std::setw( wt ) << "Process to generate"; if ( process_ ) { os << ( pretty ? boldify( process_->name().c_str() ) : process_->name() ) << std::endl << std::setw( wt ) << "" << process_->description(); } else os << ( pretty ? boldify( "no process!" ) : "no process!" ); os << std::endl << std::setw( wt ) << "Events generation? " << ( pretty ? yesno( generation.enabled ) : std::to_string( generation.enabled ) ) << std::endl << std::setw( wt ) << "Number of events to generate" << ( pretty ? boldify( generation.maxgen ) : std::to_string( generation.maxgen ) ) << std::endl << std::setw( wt ) << "Verbosity level " << Logger::get().level << std::endl; if ( hadroniser_ ) { os << std::endl << std::setfill( '-' ) << std::setw( wb+6 ) << ( pretty ? boldify( " Hadronisation algorithm " ) : "Hadronisation algorithm" ) << std::setfill( ' ' ) << std::endl << std::endl << std::setw( wt ) << "Name" << ( pretty ? boldify( hadroniser_->name().c_str() ) : hadroniser_->name() ) << std::endl; } os << std::endl << std::setfill( '-' ) << std::setw( wb+6 ) << ( pretty ? boldify( " Integration parameters " ) : "Integration parameters" ) << std::setfill( ' ' ) << std::endl << std::endl; std::ostringstream int_algo; int_algo << integrator.type; os << std::setw( wt ) << "Integration algorithm" << ( pretty ? boldify( int_algo.str().c_str() ) : int_algo.str() ) << std::endl //<< std::setw( wt ) << "Maximum number of iterations" << ( pretty ? boldify( integrator.itvg ) : std::to_string( integrator.itvg ) ) << std::endl << std::setw( wt ) << "Number of function calls" << integrator.ncvg << std::endl << std::setw( wt ) << "Number of points to try per bin" << integrator.npoints << std::endl << std::setw( wt ) << "Random number generator seed" << integrator.seed << std::endl << std::endl << std::setfill('_') << std::setw( wb+3 ) << "_/¯¯EVENTS¯KINEMATICS¯¯\\_" << std::setfill( ' ' ) << std::endl << std::endl << std::setfill( '-' ) << std::setw( wb+6 ) << ( pretty ? boldify( " Incoming particles " ) : "Incoming particles" ) << std::setfill( ' ' ) << std::endl << std::endl; std::ostringstream proc_mode; proc_mode << kinematics.mode; std::ostringstream ip1, ip2, op; ip1 << kinematics.inpdg.first; ip2 << kinematics.inpdg.second; for ( std::vector<ParticleCode>::const_iterator cp = kinematics.central_system.begin(); cp != kinematics.central_system.end(); ++cp ) op << ( cp != kinematics.central_system.begin() ? ", " : "" ) << *cp; std::ostringstream q2range; q2range << kinematics.cuts.initial.at( Cuts::q2 ); os << std::setw( wt ) << "Subprocess mode" << ( pretty ? boldify( proc_mode.str().c_str() ) : proc_mode.str() ) << std::endl << std::setw( wt ) << "Incoming particles" << ( pretty ? boldify( ip1.str().c_str() ) : ip1.str() ) << ", " << ( pretty ? boldify( ip2.str().c_str() ) : ip2.str() ) << std::endl << std::setw( wt ) << "Momenta (GeV/c)" << kinematics.inp.first << ", " << kinematics.inp.second << std::endl << std::setw( wt ) << "Structure functions" << kinematics.structure_functions << std::endl << std::endl << std::setfill( '-' ) << std::setw( wb+6 ) << ( pretty ? boldify( " Incoming partons " ) : "Incoming partons" ) << std::setfill( ' ' ) << std::endl << std::endl; if ( kinematics.cuts.central.size() > 0 ) { for ( std::map<Cuts::InitialState,Kinematics::Limits>::const_iterator lim = kinematics.cuts.initial.begin(); lim != kinematics.cuts.initial.end(); ++lim ) { if ( !lim->second.valid() ) continue; os << std::setw( wt ) << lim->first << lim->second << std::endl; } } os << std::endl << std::setfill( '-' ) << std::setw( wb+6 ) << ( pretty ? boldify( " Outgoing central system " ) : "Outgoing central system" ) << std::setfill( ' ' ) << std::endl << std::endl << std::setw( wt ) << "Central particles" << ( pretty ? boldify( op.str().c_str() ) : op.str() ) << std::endl; if ( kinematics.cuts.central.size() > 0 ) { for ( std::map<Cuts::Central,Kinematics::Limits>::const_iterator lim = kinematics.cuts.central.begin(); lim != kinematics.cuts.central.end(); ++lim ) { if ( !lim->second.valid() ) continue; os << std::setw( wt ) << lim->first << lim->second << std::endl; } } if ( kinematics.cuts.central_particles.size() > 0 ) { os << std::setw( wt ) << ( pretty ? boldify( ">>> per-particle cuts:" ) : ">>> per-particle cuts:" ) << std::endl; for ( std::map<ParticleCode,std::map<Cuts::Central,Kinematics::Limits> >::const_iterator part_lim = kinematics.cuts.central_particles.begin(); part_lim != kinematics.cuts.central_particles.end(); ++part_lim ) { os << " * " << std::setw( wt-3 ) << part_lim->first << std::endl; for ( std::map<Cuts::Central,Kinematics::Limits>::const_iterator lim = part_lim->second.begin(); lim != part_lim->second.end(); ++lim ) { if ( !lim->second.valid() ) continue; os << " - " << std::setw( wt-5 ) << lim->first << lim->second << std::endl; } } } os << std::endl; os << std::setfill( '-' ) << std::setw( wb+6 ) << ( pretty ? boldify( " Proton / remnants " ) : "Proton / remnants" ) << std::setfill( ' ' ) << std::endl; os << std::endl; for ( std::map<Cuts::Remnants,Kinematics::Limits>::const_iterator lim = kinematics.cuts.remnants.begin(); lim != kinematics.cuts.remnants.end(); ++lim ) { os << std::setw( wt ) << lim->first << lim->second << std::endl; } if ( pretty ) { Information( os.str() ); } else out << os.str(); } Parameters::IntegratorParameters::IntegratorParameters() : type( Integrator::Vegas ), ncvg( 500000 ), npoints( 100 ), first_run( true ), seed( 0 ) { const size_t ndof = 10; gsl_monte_vegas_state* veg_state = gsl_monte_vegas_alloc( ndof ); gsl_monte_vegas_params_get( veg_state, &vegas ); gsl_monte_vegas_free( veg_state ); vegas.ostream = stderr; // redirect all debugging information to the error stream gsl_monte_miser_state* mis_state = gsl_monte_miser_alloc( ndof ); gsl_monte_miser_params_get( mis_state, &miser ); gsl_monte_miser_free( mis_state ); } Parameters::Generation::Generation() : enabled( false ), maxgen( 0 ), symmetrise( false ), ngen( 0 ), gen_print_every( 10000 ) {} } diff --git a/CepGen/Parameters.h b/CepGen/Parameters.h index cf546ab..9b3247d 100644 --- a/CepGen/Parameters.h +++ b/CepGen/Parameters.h @@ -1,118 +1,115 @@ #ifndef CepGen_Parameters_h #define CepGen_Parameters_h -#include "CepGen/Core/TamingFunction.h" #include "CepGen/Core/Integrator.h" #include "CepGen/Physics/Kinematics.h" -#include "CepGen/Processes/GenericProcess.h" -#include "CepGen/Hadronisers/GenericHadroniser.h" - -#include <gsl/gsl_monte_vegas.h> -#include <gsl/gsl_monte_miser.h> #include <memory> namespace CepGen { class Event; + class TamingFunctionsCollection; + namespace Process { class GenericProcess; } + namespace Hadroniser { class GenericHadroniser; } /// List of parameters used to start and run the simulation job class Parameters { public: Parameters(); /// Copy constructor (transfers ownership to the process/hadroniser!) Parameters( Parameters& ); /// Const copy constructor (all but the process and the hadroniser) Parameters( const Parameters& ); - ~Parameters() {} + ~Parameters(); // required for unique_ptr initialisation! /// Set the polar angle range for the produced leptons /// \param[in] thetamin The minimal value of \f$\theta\f$ for the outgoing leptons /// \param[in] thetamax The maximal value of \f$\theta\f$ for the outgoing leptons void setThetaRange( float thetamin, float thetamax ); /// Dump the input parameters in the console void dump( std::ostream& os = Logger::get().outputStream, bool pretty = true ) const; //----- process to compute /// Process for which the cross-section will be computed and the events will be generated - Process::GenericProcess* process() { return process_.get(); } + Process::GenericProcess* process(); /// Name of the process considered std::string processName() const; /// Set the process to study - void setProcess( Process::GenericProcess* proc ) { process_.reset( proc ); } + void setProcess( Process::GenericProcess* proc ); //----- events kinematics /// Events kinematics for phase space definition Kinematics kinematics; //----- VEGAS /// Collection of integrator parameters struct IntegratorParameters { IntegratorParameters(); Integrator::Type type; /// Number of function calls to be computed for each point unsigned int ncvg; // ?? /// Number of points to "shoot" in each integration bin by the algorithm unsigned int npoints; /// Is it the first time the integrator is run? bool first_run; /// Random number generator seed unsigned long seed; gsl_monte_vegas_params vegas; gsl_monte_miser_params miser; }; /// Integrator parameters IntegratorParameters integrator; //----- events generation /// Collection of events generation parameters struct Generation { Generation(); /// Are we generating events ? (true) or are we only computing the cross-section ? (false) bool enabled; /// Maximal number of events to generate in this run unsigned int maxgen; /// Pointer to the last event produced in this run std::shared_ptr<Event> last_event; /// Do we want the events to be symmetrised with respect to the \f$z\f$-axis ? bool symmetrise; /// Number of events already generated in this run unsigned int ngen; /// Frequency at which the events are displayed to the end-user unsigned int gen_print_every; }; /// Events generation parameters Generation generation; /// Specify if the generated events are to be stored void setStorage( bool store ) { store_ = store; } /// Are the events generated in this run to be stored in the output file ? bool storage() const { return store_; } //----- hadronisation algorithm /// Hadronisation algorithm to use for the proton(s) fragmentation - Hadroniser::GenericHadroniser* hadroniser() { return hadroniser_.get(); } + Hadroniser::GenericHadroniser* hadroniser(); /// Set the hadronisation algorithm - void setHadroniser( Hadroniser::GenericHadroniser* hadr ) { hadroniser_.reset( hadr ); } + void setHadroniser( Hadroniser::GenericHadroniser* hadr ); /// Maximal number of trials for the hadronisation of the proton(s) remnants unsigned int hadroniser_max_trials; //----- taming functions /// Functionals to be used to account for rescattering corrections (implemented within the process) - TamingFunctionsCollection taming_functions; + std::unique_ptr<TamingFunctionsCollection> taming_functions; private: std::unique_ptr<Process::GenericProcess> process_; std::unique_ptr<Hadroniser::GenericHadroniser> hadroniser_; bool store_; }; } #endif diff --git a/CepGen/Physics/Kinematics.cpp b/CepGen/Physics/Kinematics.cpp index 474bc9c..40f22a4 100644 --- a/CepGen/Physics/Kinematics.cpp +++ b/CepGen/Physics/Kinematics.cpp @@ -1,139 +1,141 @@ #include "Kinematics.h" + #include "CepGen/Core/Exception.h" #include "CepGen/Core/utils.h" +#include "CepGen/Event/Particle.h" namespace CepGen { Kinematics::Kinematics() : inp( { 6500., 6500. } ), inpdg( { Proton, Proton } ), central_system( {} ), mode( ElasticElastic ), structure_functions( StructureFunctions::SuriYennie ) {} Kinematics::Kinematics( const Kinematics& kin ) : inp( kin.inp ), inpdg( kin.inpdg ), central_system( kin.central_system ), mode( kin.mode ), structure_functions( kin.structure_functions ), cuts( kin.cuts ) {} Kinematics::~Kinematics() {} void Kinematics::setSqrtS( double sqrts ) { const double pin = 0.5 * sqrts; inp = { pin, pin }; } double Kinematics::sqrtS() const { return ( inp.first+inp.second ); } void Kinematics::dump( std::ostream& os ) const { os << std::setfill(' '); os << "===== Central system\n"; for ( std::map<Cuts::Central,Limits>::const_iterator lim = cuts.central.begin(); lim != cuts.central.end(); ++lim ) { os << std::setw(30) << lim->first << ": " << lim->second; } os << "===== Initial state\n"; for ( std::map<Cuts::InitialState,Limits>::const_iterator lim = cuts.initial.begin(); lim != cuts.initial.end(); ++lim ) { os << std::setw(30) << lim->first << ": " << lim->second; } os << "===== Remnants\n"; for ( std::map<Cuts::Remnants,Limits>::const_iterator lim = cuts.remnants.begin(); lim != cuts.remnants.end(); ++lim ) { os << std::setw(30) << lim->first << ": " << lim->second; } } std::ostream& operator<<( std::ostream& os, const Kinematics::ProcessMode& pm ) { switch ( pm ) { case Kinematics::ElectronElectron: return os << "electron/electron"; case Kinematics::ElectronProton: return os << "electron/proton"; case Kinematics::ProtonElectron: return os << "proton/electron"; case Kinematics::ElasticElastic: return os << "elastic/elastic"; case Kinematics::InelasticElastic: return os << "inelastic/elastic"; case Kinematics::ElasticInelastic: return os << "elastic/inelastic"; case Kinematics::InelasticInelastic: return os << "inelastic/inelastic"; } return os; } std::ostream& operator<<( std::ostream& os, const Kinematics::Limits& lim ) { if ( !lim.hasMin() && !lim.hasMax() ) return os << "no cuts"; if ( !lim.hasMin() ) return os << Form( "≤ %g", lim.max() ); if ( !lim.hasMax() ) return os << Form( "≥ %g", lim.min() ); return os << Form( "%g → %g", lim.min(), lim.max() ); } void Kinematics::Limits::in( double low, double up ) { first = low; second = up; } double Kinematics::Limits::range() const { return ( !hasMin() || !hasMax() ) ? 0. : second-first; } bool Kinematics::Limits::hasMin() const { return first != invalid_; } bool Kinematics::Limits::hasMax() const { return second != invalid_; } bool Kinematics::Limits::passes( double val ) const { if ( hasMin() && val < min() ) return false; if ( hasMax() && val > max() ) return false; return true; } bool Kinematics::Limits::valid() const { return hasMin() || hasMax(); } double Kinematics::Limits::x( double v ) const { if ( v < 0. || v > 1. ) { InError( Form( "x must be comprised between 0 and 1 ; x value = %.5e", v ) ); } if ( !hasMin() || !hasMax() ) return invalid_; return first + ( second-first ) * v; } Kinematics::CutsList::CutsList() : initial( { { Cuts::q2, { 0., 1.e5 } } } ), central( { { Cuts::pt_single, 0. } } ), remnants( { { Cuts::mass, { 1.07, 320. } } } ) {} Kinematics::CutsList::CutsList( const CutsList& cuts ) : initial( cuts.initial ), central( cuts.central ), central_particles( cuts.central_particles ), remnants( cuts.remnants ) {} } diff --git a/CepGen/Physics/Kinematics.h b/CepGen/Physics/Kinematics.h index 80f7c53..48dcfa1 100644 --- a/CepGen/Physics/Kinematics.h +++ b/CepGen/Physics/Kinematics.h @@ -1,112 +1,112 @@ #ifndef CepGen_Physics_Kinematics_h #define CepGen_Physics_Kinematics_h #include <iomanip> #include <algorithm> #include "CepGen/Core/Logger.h" -#include "CepGen/Event/Particle.h" #include "CepGen/StructureFunctions/StructureFunctions.h" +#include "CepGen/Physics/ParticleProperties.h" #include "Cuts.h" #include <vector> #include <map> using std::cout; using std::string; namespace CepGen { /// List of kinematic constraints to apply on the process phase space. class Kinematics { public: /// Validity interval for a variable class Limits : private std::pair<double,double> { public: /// Define lower and upper limits on a quantity Limits( double min = invalid_, double max = invalid_ ) : std::pair<double,double>( min, max ) {} /// Lower limit to apply on the variable double min() const { return first; } /// Lower limit to apply on the variable double& min() { return first; } /// Upper limit to apply on the variable double max() const { return second; } /// Upper limit to apply on the variable double& max() { return second; } double x( double v ) const; /// Specify the lower and upper limits on the variable void in( double low, double up ); /// Full variable range allowed double range() const; /// Have a lower limit? bool hasMin() const; /// Have an upper limit? bool hasMax() const; bool passes( double val ) const; bool valid() const; /// Human-readable expression of the limits friend std::ostream& operator<<( std::ostream&, const Limits& ); private: static constexpr double invalid_ = -999.999; }; public: Kinematics(); Kinematics( const Kinematics& kin ); ~Kinematics(); /// Type of kinematics to consider for the process enum ProcessMode { ElectronProton = 0, ///< electron-proton elastic case ElasticElastic = 1, ///< proton-proton elastic case ElasticInelastic = 2, ///< proton-proton single-dissociative (or inelastic-elastic) case InelasticElastic = 3, ///< proton-proton single-dissociative (or elastic-inelastic) case InelasticInelastic = 4, ///< proton-proton double-dissociative case ProtonElectron, ElectronElectron }; /// Human-readable format of a process mode (elastic/dissociative parts) friend std::ostream& operator<<( std::ostream&, const ProcessMode& ); /// Dump all the parameters used in this process cross-section computation /// or events generation void dump( std::ostream& os = Logger::get().outputStream ) const; /// Incoming particles' momentum (in \f$\text{GeV}/c\f$) std::pair<double,double> inp; /// Set the incoming particles' momenta (if the collision is symmetric) void setSqrtS( double sqrts ); /// Process centre of mass energy double sqrtS() const; /// Beam/primary particle's PDG identifier std::pair<ParticleCode,ParticleCode> inpdg; /// PDG id of the outgoing central particles std::vector<ParticleCode> central_system; /// Type of kinematics to consider for the phase space ProcessMode mode; /// Type of structure functions to consider StructureFunctions::Type structure_functions; struct CutsList { CutsList(); CutsList( const CutsList& cuts ); /// Cuts on the initial particles kinematics std::map<Cuts::InitialState,Limits> initial; /// Cuts on the central system produced std::map<Cuts::Central,Limits> central; std::map<ParticleCode,std::map<Cuts::Central,Limits> > central_particles; /// Cuts on the beam remnants system std::map<Cuts::Remnants,Limits> remnants; }; CutsList cuts; }; } #endif diff --git a/CepGen/Processes/GenericKTProcess.cpp b/CepGen/Processes/GenericKTProcess.cpp index a60588e..0fe8162 100644 --- a/CepGen/Processes/GenericKTProcess.cpp +++ b/CepGen/Processes/GenericKTProcess.cpp @@ -1,278 +1,280 @@ #include "GenericKTProcess.h" #include "CepGen/StructureFunctions/StructureFunctionsBuilder.h" #include "CepGen/StructureFunctions/SigmaRatio.h" #include "CepGen/Core/Exception.h" namespace CepGen { namespace Process { GenericKTProcess::GenericKTProcess( const std::string& name, const std::string& description, const unsigned int& num_user_dimensions, const std::array<ParticleCode,2>& partons, const std::vector<ParticleCode>& central ) : GenericProcess( name, description+" (kT-factorisation approach)" ), log_qmin_( 0. ), log_qmax_( 0. ), qt1_( 0. ), phi_qt1_( 0. ), qt2_( 0. ), phi_qt2_( 0. ), flux1_( 0. ), flux2_( 0. ), kNumUserDimensions( num_user_dimensions ), kIntermediateParts( partons ), kProducedParts( central ) {} void GenericKTProcess::prepareKTKinematics() { DebuggingInsideLoop("Dummy kinematics prepared!"); } void GenericKTProcess::addEventContent() { GenericProcess::setEventContent( { // incoming state { Particle::IncomingBeam1, Proton }, { Particle::IncomingBeam2, Proton }, { Particle::Parton1, kIntermediateParts[0] }, { Particle::Parton2, kIntermediateParts[1] } }, { // outgoing state { Particle::OutgoingBeam1, { Proton } }, { Particle::OutgoingBeam2, { Proton } }, { Particle::CentralSystem, kProducedParts } } ); setExtraContent(); } unsigned int GenericKTProcess::numDimensions( const Kinematics::ProcessMode& process_mode ) const { switch ( process_mode ) { default: case Kinematics::ElasticElastic: return kNumRequiredDimensions+kNumUserDimensions; case Kinematics::ElasticInelastic: case Kinematics::InelasticElastic: return kNumRequiredDimensions+kNumUserDimensions+1; case Kinematics::InelasticInelastic: return kNumRequiredDimensions+kNumUserDimensions+2; } } void GenericKTProcess::addPartonContent() { // Incoming partons qt1_ = exp( log_qmin_+( log_qmax_-log_qmin_ )*x( 0 ) ); qt2_ = exp( log_qmin_+( log_qmax_-log_qmin_ )*x( 1 ) ); phi_qt1_ = 2.*M_PI*x( 2 ); phi_qt2_ = 2.*M_PI*x( 3 ); DebuggingInsideLoop( Form( "photons transverse virtualities (qt):\n\t" " mag = %g / %f (%g < log(qt) < %g)\n\t" " phi = %g / %g", qt1_, qt2_, log_qmin_, log_qmax_, phi_qt1_, phi_qt2_ ) ); } void GenericKTProcess::setKinematics( const Kinematics& kin ) { cuts_ = kin; Kinematics::Limits qt_limits = cuts_.cuts.initial[Cuts::qt]; if ( !qt_limits.hasMin() ) - qt_limits.min() = 1.e-10; //FIXME fine-tuning needed! + qt_limits.min() = 1.e-10; if ( !qt_limits.hasMax() ) qt_limits.max() = 500.; - log_qmin_ = log( qt_limits.min() ); + log_qmin_ = std::max( log( qt_limits.min() ), -10. ); //FIXME fine-tuning needed! log_qmax_ = log( qt_limits.max() ); + if ( log_qmax_ > 10. ) + InWarning( Form( "qT range above \"reasonable\" range: maximal qT specified = 10^(%.1f)", log_qmax_ ) ); } double GenericKTProcess::computeWeight() { addPartonContent(); prepareKTKinematics(); computeOutgoingPrimaryParticlesMasses(); const double jac = computeJacobian(), integrand = computeKTFactorisedMatrixElement(), weight = jac*integrand; DebuggingInsideLoop( Form( "Jacobian = %g\n\tIntegrand = %g\n\tdW = %g", jac, integrand, weight ) ); return weight; } void GenericKTProcess::computeOutgoingPrimaryParticlesMasses() { const unsigned int op_index = kNumRequiredDimensions+kNumUserDimensions; const Kinematics::Limits remn_mx_cuts = cuts_.cuts.remnants[Cuts::mass]; switch ( cuts_.mode ) { case Kinematics::ElectronProton: default: { InError( "This kT factorisation process is intended for p-on-p collisions! Aborting!" ); exit( 0 ); } break; case Kinematics::ElasticElastic: { MX_ = event_->getOneByRole( Particle::IncomingBeam1 ).mass(); MY_ = event_->getOneByRole( Particle::IncomingBeam2 ).mass(); } break; case Kinematics::ElasticInelastic: { const double mx_min = remn_mx_cuts.min(), mx_range = remn_mx_cuts.range(); MX_ = event_->getOneByRole( Particle::IncomingBeam1 ).mass(); MY_ = mx_min + mx_range*x( op_index ); } break; case Kinematics::InelasticElastic: { const double mx_min = remn_mx_cuts.min(), mx_range = remn_mx_cuts.range(); MX_ = mx_min + mx_range*x( op_index ); MY_ = event_->getOneByRole( Particle::IncomingBeam2 ).mass(); } break; case Kinematics::InelasticInelastic: { const double mx_min = remn_mx_cuts.min(), mx_range = remn_mx_cuts.range(); MX_ = mx_min + mx_range*x( op_index ); MY_ = mx_min + mx_range*x( op_index+1 ); } break; } DebuggingInsideLoop( Form( "outgoing remnants invariant mass: %g / %g (%g < M(X/Y) < %g)", MX_, MY_, remn_mx_cuts.min(), remn_mx_cuts.max() ) ); } void GenericKTProcess::computeIncomingFluxes( double x1, double q1t2, double x2, double q2t2 ) { flux1_ = flux2_ = 0.; switch ( cuts_.mode ) { case Kinematics::ElasticElastic: flux1_ = elasticFlux( x1, q1t2 ); flux2_ = elasticFlux( x2, q2t2 ); break; case Kinematics::ElasticInelastic: flux1_ = elasticFlux( x1, q1t2 ); flux2_ = inelasticFlux( x2, q2t2, MY_, cuts_.structure_functions ); break; case Kinematics::InelasticElastic: flux1_ = inelasticFlux( x1, q1t2, MX_, cuts_.structure_functions ); flux2_ = elasticFlux( x2, q2t2 ); break; case Kinematics::InelasticInelastic: flux1_ = inelasticFlux( x1, q1t2, MX_, cuts_.structure_functions ); flux2_ = inelasticFlux( x2, q2t2, MY_, cuts_.structure_functions ); break; default: return; } flux1_ = std::max( flux1_, 1.e-20 ); flux2_ = std::max( flux2_, 1.e-20 ); DebuggingInsideLoop( Form( "Form factors: %g / %g", flux1_, flux2_ ) ); } void GenericKTProcess::fillKinematics( bool ) { fillPrimaryParticlesKinematics(); fillCentralParticlesKinematics(); } void GenericKTProcess::fillPrimaryParticlesKinematics() { //================================================================ // outgoing protons //================================================================ Particle& op1 = event_->getOneByRole( Particle::OutgoingBeam1 ), &op2 = event_->getOneByRole( Particle::OutgoingBeam2 ); op1.setMomentum( PX_ ); op2.setMomentum( PY_ ); switch ( cuts_.mode ) { case Kinematics::ElasticElastic: op1.setStatus( Particle::FinalState ); op2.setStatus( Particle::FinalState ); break; case Kinematics::ElasticInelastic: op1.setStatus( Particle::FinalState ); op2.setStatus( Particle::Unfragmented ); op2.setMass( MY_ ); break; case Kinematics::InelasticElastic: op1.setStatus( Particle::Unfragmented ); op1.setMass( MX_ ); op2.setStatus( Particle::FinalState ); break; case Kinematics::InelasticInelastic: op1.setStatus( Particle::Unfragmented ); op1.setMass( MX_ ); op2.setStatus( Particle::Unfragmented ); op2.setMass( MY_ ); break; default: { FatalError( "This kT factorisation process is intended for p-on-p collisions! Aborting!" ); } break; } //================================================================ // incoming partons (photons, pomerons, ...) //================================================================ //FIXME ensure the validity of this approach Particle& g1 = event_->getOneByRole( Particle::Parton1 ), &g2 = event_->getOneByRole( Particle::Parton2 ); g1.setMomentum( event_->getOneByRole( Particle::IncomingBeam1 ).momentum()-PX_, true ); g1.setStatus( Particle::Incoming ); g2.setMomentum( event_->getOneByRole( Particle::IncomingBeam2 ).momentum()-PY_, true ); g2.setStatus( Particle::Incoming ); //================================================================ // two-parton system //================================================================ event_->getOneByRole( Particle::Intermediate ).setMomentum( g1.momentum()+g2.momentum() ); } double GenericKTProcess::minimalJacobian() const { double jac = 1.; jac *= ( log_qmax_-log_qmin_ )*qt1_; // d(q1t) . q1t jac *= ( log_qmax_-log_qmin_ )*qt2_; // d(q2t) . q2t jac *= 2.*M_PI; // d(phi1) jac *= 2.*M_PI; // d(phi2) const double mx_range = cuts_.cuts.remnants.at( Cuts::mass ).range(); switch ( cuts_.mode ) { case Kinematics::ElasticElastic: default: break; case Kinematics::ElasticInelastic: jac *= 2.* mx_range * MY_; break; case Kinematics::InelasticElastic: jac *= 2.* mx_range * MX_; break; case Kinematics::InelasticInelastic: jac *= 2.* mx_range * MX_; jac *= 2.* mx_range * MY_; break; } // d(mx/y**2) return jac; } double GenericKTProcess::elasticFlux( double x, double kt2 ) { const double mp = ParticleProperties::mass( Proton ), mp2 = mp*mp; const double Q2_min = x*x*mp2/( 1.-x ), Q2_ela = ( kt2+x*x*mp2 )/( 1.-x ); const FormFactors ff = FormFactors::ProtonElastic( Q2_ela ); const double ela1 = ( 1.-x )*( 1.-Q2_min/Q2_ela ); //const double ela3 = 1.-( Q2_ela-kt2 )/Q2_ela; const double f_ela = Constants::alphaEM/M_PI/kt2*( ela1*ff.FE + 0.5*x*x*ff.FM ); // last factor below the Jacobian from dQ^2/Q^2 --> dkT^2/kT^2*(kT^2/Q^2) return f_ela*( 1.-x )*kt2 / Q2_ela; } double GenericKTProcess::inelasticFlux( double x, double kt2, double mx, const StructureFunctions::Type& sf ) { const double mx2 = mx*mx, mp = ParticleProperties::mass( Proton ), mp2 = mp*mp; // F2 structure function const double Q2min = 1. / ( 1.-x )*( x*( mx2-mp2 ) + x*x*mp2 ), Q2 = Q2min + kt2/( 1.-x ); float xbj = Q2 / ( Q2 + mx2 - mp2 ); const StructureFunctions str_fun = StructureFunctionsBuilder::get( sf, Q2, xbj ); const double F1 = 0.5*( ( 1+4.*xbj*xbj*mp2/Q2 )*str_fun.F2 - str_fun.FL )/xbj; const double term1 = ( 1.-x )*( 1.-Q2min/Q2 ); const double f_D = str_fun.F2/( mx2 + Q2 - mp2 ) * term1; const double f_C= 2.*F1/Q2; return Constants::alphaEM/M_PI*( 1.-x )/Q2*( f_D+0.5*x*x*f_C ); } } }