diff --git a/CMakeLists.txt b/CMakeLists.txt index 9f176cc..10bcf1a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,208 +1,208 @@ cmake_minimum_required(VERSION 3.1 FATAL_ERROR) set(CMAKE_LEGACY_CYGWIN_WIN32 0) set(CMAKE_EXPORT_COMPILE_COMMANDS ON) project("HEJ" VERSION 2.0.6 LANGUAGES C CXX) ## User settings include(CMakeDependentOption) option(EXCLUDE_QCDloop "Do not include QCDloop" FALSE) option(EXCLUDE_HighFive "Do not include HighFive" FALSE) option(EXCLUDE_HepMC "Do not include HepMC version 2" FALSE) option(EXCLUDE_HepMC3 "Do not include HepMC version 3" FALSE) CMAKE_DEPENDENT_OPTION(EXCLUDE_rivet "Do not include rivet" FALSE "NOT EXCLUDE_HepMC OR NOT EXCLUDE_HepMC3" TRUE) option(TEST_ALL "Run additional (longer) tests" FALSE) option(TEST_COVERAGE "Generate test coverage with \"gcovr\"" FALSE) # Set a default build type if none was specified set(default_build_type "RelWithDebInfo") if(NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES) message(STATUS "Setting build type to '${default_build_type}' as none was specified.") set(CMAKE_BUILD_TYPE "${default_build_type}" CACHE STRING "Choose the type of build." FORCE) # Set the possible values of build type for cmake-gui set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release" "MinSizeRel" "RelWithDebInfo") endif() ## Global flags for the compiler (all warnings) if (CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_GNUCXX) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Wpedantic") elseif (MSVC) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /W4 /WX /EHsc") endif() set(CMAKE_CXX_STANDARD_REQUIRED ON) set(CMAKE_CXX_STANDARD 14) ## Create Version set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake/Modules/") # Get the latest abbreviated commit hash of the working branch execute_process( COMMAND git rev-parse HEAD WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} OUTPUT_VARIABLE PROJECT_GIT_REVISION OUTPUT_STRIP_TRAILING_WHITESPACE ) # Get the current working branch execute_process( COMMAND git rev-parse --abbrev-ref HEAD WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} OUTPUT_VARIABLE PROJECT_GIT_BRANCH OUTPUT_STRIP_TRAILING_WHITESPACE ) ## target directories for install set(INSTALL_INCLUDE_DIR_BASE include) set(INSTALL_INCLUDE_DIR ${INSTALL_INCLUDE_DIR_BASE}/HEJ) set(INSTALL_BIN_DIR bin) set(INSTALL_LIB_DIR lib) set(INSTALL_CONFIG_DIR lib/cmake/HEJ) ## Template files configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/cmake/Templates/Version.hh.in ${PROJECT_BINARY_DIR}/include/HEJ/Version.hh @ONLY ) configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/cmake/Templates/HEJ-config.cc.in ${PROJECT_BINARY_DIR}/src/bin/HEJ-config.cc @ONLY ) # Generate CMake config file include(CMakePackageConfigHelpers) configure_package_config_file( cmake/Templates/hej-config.cmake.in ${CMAKE_CURRENT_BINARY_DIR}/${INSTALL_CONFIG_DIR}/hej-config.cmake INSTALL_DESTINATION ${INSTALL_CONFIG_DIR} PATH_VARS INSTALL_INCLUDE_DIR_BASE INSTALL_LIB_DIR ) write_basic_package_version_file( ${CMAKE_CURRENT_BINARY_DIR}/${INSTALL_CONFIG_DIR}/hej-config-version.cmake COMPATIBILITY SameMajorVersion ) install( FILES ${CMAKE_CURRENT_BINARY_DIR}/${INSTALL_CONFIG_DIR}/hej-config.cmake ${CMAKE_CURRENT_BINARY_DIR}/${INSTALL_CONFIG_DIR}/hej-config-version.cmake DESTINATION ${INSTALL_CONFIG_DIR}) ## find dependencies find_package(fastjet REQUIRED) find_package(CLHEP 2.3 REQUIRED) find_package(LHAPDF REQUIRED) ## Amend unintuitive behaviour of FindBoost.cmake ## Priority of BOOST_ROOT over BOOSTROOT matches FindBoost.cmake ## at least for cmake 3.12 if(DEFINED BOOST_ROOT) message("BOOST_ROOT set - only looking for Boost in ${BOOST_ROOT}") set(Boost_NO_BOOST_CMAKE ON) elseif(DEFINED BOOSTROOT) message("BOOSTROOT set - only looking for Boost in ${BOOSTROOT}") set(Boost_NO_BOOST_CMAKE ON) endif() find_package(Boost REQUIRED COMPONENTS iostreams) find_package(yaml-cpp) # requiring yaml does not work with fedora if(EXCLUDE_HepMC) message(STATUS "Skipping HepMC") else() find_package(HepMC 2 EXACT) endif() if(EXCLUDE_HepMC3) message(STATUS "Skipping HepMC3") else() ## version 3.1: Finding version not possible in 3.1.1, see HepMC3 git 29e7a6c3 find_package(HepMC3 QUIET) # suppress CMake warning if HepMC3 not available ## print standard message find_package_handle_standard_args( HepMC3 FOUND_VAR HepMC3_FOUND REQUIRED_VARS HEPMC3_INCLUDE_DIR HEPMC3_LIBRARIES # VERSION_VAR # HEPMC3_VERSION ) endif() if(EXCLUDE_rivet) message(STATUS "Skipping rivet") else() find_package(rivet) if(rivet_FOUND) if(rivet_USE_HEPMC3 AND NOT HepMC3_FOUND) message(FATAL_ERROR "Rivet was compiled with HepMC version 3, " "but HepMC3 was not found!") elseif(NOT rivet_USE_HEPMC3 AND NOT HepMC_FOUND) message(FATAL_ERROR "Rivet was compiled with HepMC version 2, " "but HepMC2 was not found!") endif() endif() endif() if(EXCLUDE_QCDloop) message(STATUS "Skipping QCDloop") else() find_package(QCDloop 2) endif() if(EXCLUDE_HighFive) message(STATUS "Skipping HighFive") else() find_package(HighFive QUIET) find_package_handle_standard_args( HighFive CONFIG_MODE ) endif() include(RepairTargets) # more reliable cmake targets ## shortcut for HEJ specific includes set(HEJ_INCLUDE_DIR $<INSTALL_INTERFACE:include> $<BUILD_INTERFACE:${PROJECT_BINARY_DIR}/include> $<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/include> ) if (TEST_COVERAGE AND (CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_GNUCXX) ) include(CodeCoverage) APPEND_COVERAGE_COMPILER_FLAGS() set(COVERAGE_GCOVR_EXCLUDES "${PROJECT_SOURCE_DIR}/include/LHEF/*" "${PROJECT_SOURCE_DIR}/include/cxxopts.hpp" "${PROJECT_SOURCE_DIR}/t/*") SETUP_TARGET_FOR_COVERAGE_GCOVR_HTML( NAME ctest_coverage # New target name EXECUTABLE ctest -j ${PROCESSOR_COUNT} # Executable in PROJECT_BINARY_DIR ) endif() ## create main HEJ library add_subdirectory(src) ## define executable add_executable(HEJ_main src/bin/HEJ.cc) set_target_properties(HEJ_main PROPERTIES OUTPUT_NAME "HEJ") ## link libraries target_link_libraries(HEJ_main HEJ) add_executable(HEJ-config src/bin/HEJ-config.cc) target_include_directories(HEJ-config PRIVATE ${HEJ_INCLUDE_DIR}) file(GLOB hej_headers ${CMAKE_CURRENT_SOURCE_DIR}/include/HEJ/*.hh ${PROJECT_BINARY_DIR}/include/HEJ/*.hh ) file(GLOB hej_detail_headers ${CMAKE_CURRENT_SOURCE_DIR}/include/HEJ/detail/*.hh) file(GLOB lhef_headers ${CMAKE_CURRENT_SOURCE_DIR}/include/LHEF/*.h) install(FILES ${hej_headers} DESTINATION ${INSTALL_INCLUDE_DIR}) install(FILES ${hej_detail_headers} DESTINATION ${INSTALL_INCLUDE_DIR}/detail/) install(FILES ${lhef_headers} DESTINATION ${INSTALL_INCLUDE_DIR_BASE}/LHEF/) install(TARGETS HEJ_main HEJ-config DESTINATION ${INSTALL_BIN_DIR}) ## tests (prevent testing if current project is a subproject) if(CMAKE_PROJECT_NAME STREQUAL PROJECT_NAME) ## advanced version of enable_testing() ## adds "BUILD_TESTING" option to deactivate testing include(CTest) if(BUILD_TESTING) add_subdirectory(t) endif() endif() diff --git a/FixedOrderGen/CMakeLists.txt b/FixedOrderGen/CMakeLists.txt index 826b1dd..127c2e6 100644 --- a/FixedOrderGen/CMakeLists.txt +++ b/FixedOrderGen/CMakeLists.txt @@ -1,102 +1,102 @@ cmake_minimum_required(VERSION 3.1 FATAL_ERROR) set(CMAKE_LEGACY_CYGWIN_WIN32 0) set(CMAKE_EXPORT_COMPILE_COMMANDS ON) project("HEJ Fixed Order Generation" VERSION 2.0.6 LANGUAGES C CXX) # Set a default build type if none was specified set(default_build_type "RelWithDebInfo") if(NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES) message(STATUS "Setting build type to '${default_build_type}' as none was specified.") set(CMAKE_BUILD_TYPE "${default_build_type}" CACHE STRING "Choose the type of build." FORCE) # Set the possible values of build type for cmake-gui set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release" "MinSizeRel" "RelWithDebInfo") endif() ## Global flags for the compiler (all warnings) if (CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_GNUCXX) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Wpedantic") elseif (MSVC) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /W4 /WX /EHsc") endif() set(CMAKE_CXX_STANDARD_REQUIRED ON) set(CMAKE_CXX_STANDARD 14) ## Create Version set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake/Modules/") # Get the latest abbreviated commit hash of the working branch execute_process( COMMAND git rev-parse HEAD WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} OUTPUT_VARIABLE PROJECT_GIT_REVISION OUTPUT_STRIP_TRAILING_WHITESPACE ) # Get the current working branch execute_process( COMMAND git rev-parse --abbrev-ref HEAD WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} OUTPUT_VARIABLE PROJECT_GIT_BRANCH OUTPUT_STRIP_TRAILING_WHITESPACE ) CONFIGURE_FILE( ${CMAKE_CURRENT_SOURCE_DIR}/cmake/Templates/Version.hh.in ${PROJECT_BINARY_DIR}/include/Version.hh @ONLY ) ## Use cmake modules from HEJ src set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/../cmake/Modules/") ## Find HEJ (only dependency) ## HEJ includes all sub dependencies (fastjet, lhapdf, ...) find_package(HEJ 2 REQUIRED) include(RepairTargets) # more reliable cmake targets ## define executable file(GLOB HEJFOG_source ${CMAKE_CURRENT_SOURCE_DIR}/src/*.cc) list(REMOVE_ITEM HEJFOG_source ${CMAKE_CURRENT_SOURCE_DIR}/src/main.cc) add_library(hejfog_lib STATIC ${HEJFOG_source}) target_include_directories(hejfog_lib PUBLIC ${PROJECT_SOURCE_DIR}/include ${PROJECT_BINARY_DIR}/include ) add_executable(HEJFOG ${CMAKE_CURRENT_SOURCE_DIR}/src/main.cc) ## link libraries set(libraries ${CMAKE_DL_LIBS}) target_link_libraries(hejfog_lib ${libraries} HEJ::HEJ) target_link_libraries(HEJFOG hejfog_lib) install(TARGETS HEJFOG DESTINATION bin) ## tests enable_testing() set(tst_dir "${CMAKE_CURRENT_SOURCE_DIR}/t") foreach(tst h_2j h_3j h_5j h_3j_uno1 h_3j_uno2 h_2j_decay 2j 4j W_reconstruct_enu W_2j_classify W_nj_classify) add_executable(test_${tst} ${tst_dir}/${tst}.cc) target_link_libraries(test_${tst} hejfog_lib) add_test(NAME t_${tst} COMMAND test_${tst} WORKING_DIRECTORY ${tst_dir}) endforeach() # this only tests if the runcard actually works, not if the result is correct add_test( NAME t_main_2j COMMAND HEJFOG ${tst_dir}/config_2j.yml ) add_test( NAME t_main_h2j COMMAND HEJFOG ${tst_dir}/config_h_2j.yml ) add_test( NAME t_main_h2j_decay COMMAND HEJFOG ${tst_dir}/config_h_2j_decay.yml ) add_test( NAME t_peakpt COMMAND HEJFOG ${tst_dir}/config_2j_peak.yml ) diff --git a/FixedOrderGen/cmake/Templates/Version.hh.in b/FixedOrderGen/cmake/Templates/Version.hh.in index c33b5f3..cd4e363 100644 --- a/FixedOrderGen/cmake/Templates/Version.hh.in +++ b/FixedOrderGen/cmake/Templates/Version.hh.in @@ -1,49 +1,49 @@ /** \file Version.hh * \brief The file gives the current HEJ Fixed Order Generator Version * * \authors The HEJ collaboration (see AUTHORS for details) * \date 2019 * \copyright GPLv2 or later */ #pragma once #include <string> /// @brief Full name of this package. #define HEJFOG_PACKAGE_NAME "@PROJECT_NAME@" /// @brief Version string of this package #define HEJFOG_VERSION "@PROJECT_VERSION@" /// @brief Full name and version of this package. #define HEJFOG_PACKAGE_STRING "@PROJECT_NAME@ @PROJECT_VERSION@" /// @brief Major version of this package #define HEJFOG_VERSION_MAJOR @PROJECT_VERSION_MAJOR@ /// @brief Minor version of this package #define HEJFOG_VERSION_MINOR @PROJECT_VERSION_MINOR@ /// @brief Patch version of this package #define HEJFOG_VERSION_PATCH @PROJECT_VERSION_PATCH@ /// @brief Git revision of this package #define HEJFOG_GIT_revision "@PROJECT_GIT_REVISION@" /// @brief Git branch name of this package #define HEJFOG_GIT_branch "@PROJECT_GIT_BRANCH@" namespace HEJFOG { namespace Version { inline std::string String() { return HEJFOG_VERSION; } inline std::string package_name() { return HEJFOG_PACKAGE_NAME; } inline std::string package_name_full() { return HEJFOG_PACKAGE_STRING; } inline int Major() { return HEJFOG_VERSION_MAJOR; } inline int Minor() { return HEJFOG_VERSION_MINOR; } inline int Patch() { return HEJFOG_VERSION_PATCH; } inline std::string revision() { return HEJFOG_GIT_revision; } - }; + } } diff --git a/cmake/Templates/Version.hh.in b/cmake/Templates/Version.hh.in index fc394ac..6409fe7 100644 --- a/cmake/Templates/Version.hh.in +++ b/cmake/Templates/Version.hh.in @@ -1,49 +1,49 @@ /** \file Version.hh * \brief The file gives the current HEJ Version * * \authors The HEJ collaboration (see AUTHORS for details) * \date 2019 * \copyright GPLv2 or later */ #pragma once #include <string> /// @brief Full name of this package. #define HEJ_PACKAGE_NAME "@PROJECT_NAME@" /// @brief HEJ version string #define HEJ_VERSION "@PROJECT_VERSION@" /// @brief Full name and version of this package. #define HEJ_PACKAGE_STRING "@PROJECT_NAME@ @PROJECT_VERSION@" /// @brief Major version of this package #define HEJ_VERSION_MAJOR @PROJECT_VERSION_MAJOR@ /// @brief Minor version of this package #define HEJ_VERSION_MINOR @PROJECT_VERSION_MINOR@ /// @brief Patch version of this package #define HEJ_VERSION_PATCH @PROJECT_VERSION_PATCH@ /// @brief Git revision of this package #define HEJ_GIT_revision "@PROJECT_GIT_REVISION@" /// @brief Git branch name of this package #define HEJ_GIT_branch "@PROJECT_GIT_BRANCH@" namespace HEJ { namespace Version { inline std::string String() { return HEJ_VERSION; } inline std::string package_name() { return HEJ_PACKAGE_NAME; } inline std::string package_name_full() { return HEJ_PACKAGE_STRING; } inline int Major() { return HEJ_VERSION_MAJOR; } inline int Minor() { return HEJ_VERSION_MINOR; } inline int Patch() { return HEJ_VERSION_PATCH; } inline std::string revision() { return HEJ_GIT_revision; } - }; + } } diff --git a/src/Event.cc b/src/Event.cc index 7424cdd..ed5c499 100644 --- a/src/Event.cc +++ b/src/Event.cc @@ -1,987 +1,987 @@ /** * \authors The HEJ collaboration (see AUTHORS for details) * \date 2019 * \copyright GPLv2 or later */ #include "HEJ/Event.hh" #include <algorithm> #include <assert.h> #include <iterator> #include <numeric> #include <unordered_set> #include <utility> #include "LHEF/LHEF.h" #include "fastjet/JetDefinition.hh" #include "HEJ/Constants.hh" #include "HEJ/exceptions.hh" #include "HEJ/PDG_codes.hh" namespace HEJ{ namespace { constexpr int status_in = -1; constexpr int status_decayed = 2; constexpr int status_out = 1; //! true if leptonic W decay bool valid_W_decay( int const w_type, // sign of W std::vector<Particle> const & decays ){ if(decays.size() != 2) // no 1->2 decay return false; const int pidsum = decays[0].type + decays[1].type; if( std::abs(pidsum) != 1 || pidsum != w_type ) // correct charge return false; // leptonic decay (only check first, second follows from pidsum) if( w_type == 1 ) // W+ return is_antilepton(decays[0]) || is_neutrino(decays[0]); // W- return is_lepton(decays[0]) || is_antineutrino(decays[0]); } /// @name helper functions to determine event type //@{ /** * \brief check if final state valid for HEJ * * check if there is at most one photon, W, H, Z in the final state * and all the rest are quarks or gluons */ bool final_state_ok(Event const & ev){ std::vector<Particle> const & outgoing = ev.outgoing(); if(ev.decays().size() > 1) // at most one decay return false; bool has_AWZH_boson = false; for( size_t i=0; i<outgoing.size(); ++i ){ auto const & out{ outgoing[i] }; if(is_AWZH_boson(out.type)){ // at most one boson if(has_AWZH_boson) return false; has_AWZH_boson = true; // valid decay for W if(std::abs(out.type) == ParticleID::Wp){ // exactly 1 decay of W if( ev.decays().size() != 1 || ev.decays().cbegin()->first != i ) return false; if( !valid_W_decay(out.type>0?+1:-1, ev.decays().cbegin()->second) ) return false; } } else if(! is_parton(out.type)) return false; } return true; } /** * returns all EventTypes implemented in HEJ */ size_t implemented_types(std::vector<Particle> const & bosons){ using namespace event_type; if(bosons.empty()) return FKL | unob | unof | qqxexb | qqxexf; // pure jets if(bosons.size()>1) return non_resummable; // multi boson switch (bosons[0].type) { case ParticleID::Wp: case ParticleID::Wm: return FKL | unob | unof | qqxexb | qqxexf | qqxmid; case ParticleID::h: return FKL | unob | unof; default: return non_resummable; } } /** * \brief function which determines if type change is consistent with Wp emission. * @param in incoming Particle id * @param out outgoing Particle id * @param qqx Current both incoming/both outgoing? * * \see is_Wm_Change */ bool is_Wp_Change(ParticleID in, ParticleID out, bool qqx){ if(!qqx && (in==-1 || in== 2 || in==-3 || in== 4)) return out== (in-1); if( qqx && (in== 1 || in==-2 || in== 3 || in==-4)) return out==-(in+1); return false; } /** * \brief function which determines if type change is consistent with Wm emission. * @param in incoming Particle id * @param out outgoing Particle id * @param qqx Current both incoming/both outgoing? * * Ensures that change type of quark line is possible by a flavour changing * Wm emission. Allows checking of qqx currents also. */ bool is_Wm_Change(ParticleID in, ParticleID out, bool qqx){ if(!qqx && (in== 1 || in==-2 || in== 3 || in==-4)) return out== (in+1); if( qqx && (in==-1 || in== 2 || in==-3 || in== 4)) return out==-(in-1); return false; } /** * \brief checks if particle type remains same from incoming to outgoing * @param in incoming Particle * @param out outgoing Particle * @param qqx Current both incoming/outgoing? */ bool no_flavour_change(ParticleID in, ParticleID out, bool qqx){ const int qqxCurrent = qqx?-1:1; if(abs(in)<=6 || in==pid::gluon) return (in==out*qqxCurrent); else return false; } bool has_2_jets(Event const & event){ return event.jets().size() >= 2; } /** * \brief check if we have a valid Impact factor * @param in incoming Particle * @param out outgoing Particle * @param qqx Current both incoming/outgoing? * @param qqx returns +1 if Wp, -1 if Wm, else 0 */ bool is_valid_impact_factor( ParticleID in, ParticleID out, bool qqx, int & W_change ){ if( no_flavour_change(in, out, qqx) ){ return true; } if( is_Wp_Change(in, out, qqx) ) { W_change+=1; return true; } if( is_Wm_Change(in, out, qqx) ) { W_change-=1; return true; } return false; } //! Returns all possible classifications from the impact factors // the beginning points are changed s.t. after the the classification they // point to the beginning of the (potential) FKL chain // sets W_change: + if Wp change // 0 if no change // - if Wm change // This function can be used with forward & backwards iterators template<class OutIterator> size_t possible_impact_factors( ParticleID incoming_id, // incoming OutIterator & begin_out, OutIterator const & end_out, // outgoing int & W_change, std::vector<Particle> const & boson, bool const backward // backward? ){ using namespace event_type; assert(boson.size() < 2); // keep track of all states that we don't test size_t not_tested = qqxmid; if(backward) not_tested |= unof | qqxexf; else not_tested |= unob | qqxexb; // Is this LL current? if( is_valid_impact_factor(incoming_id, begin_out->type, false, W_change) ){ ++begin_out; return not_tested | FKL; } // or NLL current? // -> needs two partons in two different jets if( std::distance(begin_out, end_out)>=2 ){ // Is this unordered emisson? if( incoming_id!=pid::gluon && begin_out->type==pid::gluon ){ if( is_valid_impact_factor( incoming_id, (begin_out+1)->type, false, W_change ) ){ // veto Higgs inside uno assert((begin_out+1)<end_out); if( !boson.empty() && boson.front().type == ParticleID::h ){ if( (backward && boson.front().rapidity() < (begin_out+1)->rapidity()) ||(!backward && boson.front().rapidity() > (begin_out+1)->rapidity())) return non_resummable; } begin_out+=2; return not_tested | (backward?unob:unof); } } // Is this QQbar? else if( incoming_id==pid::gluon ){ if( is_valid_impact_factor( begin_out->type, (begin_out+1)->type, true, W_change ) ){ // veto Higgs inside qqx assert((begin_out+1)<end_out); if( !boson.empty() && boson.front().type == ParticleID::h ){ if( (backward && boson.front().rapidity() < (begin_out+1)->rapidity()) ||(!backward && boson.front().rapidity() > (begin_out+1)->rapidity())) return non_resummable; } begin_out+=2; return not_tested | (backward?qqxexb:qqxexf); } } } return non_resummable; } //! Returns all possible classifications from central emissions // the beginning points are changed s.t. after the the classification they // point to the end of the emission chain // sets W_change: + if Wp change // 0 if no change // - if Wm change template<class OutIterator> size_t possible_central( OutIterator & begin_out, OutIterator const & end_out, int & W_change, std::vector<Particle> const & boson ){ using namespace event_type; assert(boson.size() < 2); // if we already passed the central chain, // then it is not a valid all-order state if(std::distance(begin_out, end_out) < 0) return non_resummable; // keep track of all states that we don't test size_t possible = unob | unof | qqxexb | qqxexf; // Find the first non-gluon/non-FKL while( (begin_out->type==pid::gluon) && (begin_out<end_out) ){ ++begin_out; } // end of chain -> FKL if( begin_out==end_out ){ return possible | FKL; } // is this a qqbar-pair? // needs two partons in two separate jets if( is_valid_impact_factor( begin_out->type, (begin_out+1)->type, true, W_change ) ){ // veto Higgs inside qqx if( !boson.empty() && boson.front().type == ParticleID::h && boson.front().rapidity() > begin_out->rapidity() && boson.front().rapidity() < (begin_out+1)->rapidity() ){ return non_resummable; } begin_out+=2; // remaining chain should be pure gluon/FKL for(; begin_out<end_out; ++begin_out){ if(begin_out->type != pid::gluon) return non_resummable; } return possible | qqxmid; } return non_resummable; } /** * \brief Checks for all event types * @param ev Event * @returns Event Type * */ event_type::EventType classify(Event const & ev){ using namespace event_type; if(! has_2_jets(ev)) return no_2_jets; // currently we can't handle multiple boson states in the ME. So they are // considered "bad_final_state" even though the "classify" could work with // them. if(! final_state_ok(ev)) return bad_final_state; // initialise variables auto const & in = ev.incoming(); auto const & out = filter_partons(ev.outgoing()); assert(std::distance(begin(in), end(in)) == 2); assert(out.size() >= 2); assert(std::distance(begin(out), end(out)) >= 2); assert(std::is_sorted(begin(out), end(out), rapidity_less{})); auto const boson{ filter_AWZH_bosons(ev.outgoing()) }; // we only allow one boson through final_state_ok assert(boson.size()<=1); // keep track of potential W couplings, at the end the sum should be 0 int remaining_Wp = 0; int remaining_Wm = 0; if(!boson.empty() && abs(boson.front().type) == ParticleID::Wp ){ if(boson.front().type>0) ++remaining_Wp; else ++remaining_Wm; } int W_change = 0; // range for current checks auto begin_out{out.cbegin()}; auto end_out{out.crbegin()}; size_t final_type = ~(no_2_jets | bad_final_state); // check forward impact factor final_type &= possible_impact_factors( in.front().type, begin_out, end_out.base(), W_change, boson, true ); if( final_type == non_resummable ) return non_resummable; if(W_change>0) remaining_Wp-=W_change; else if(W_change<0) remaining_Wm+=W_change; W_change = 0; // check backward impact factor final_type &= possible_impact_factors( in.back().type, end_out, std::make_reverse_iterator(begin_out), W_change, boson, false ); if( final_type == non_resummable ) return non_resummable; if(W_change>0) remaining_Wp-=W_change; else if(W_change<0) remaining_Wm+=W_change; W_change = 0; // check central emissions final_type &= possible_central( begin_out, end_out.base(), W_change, boson ); if( final_type == non_resummable ) return non_resummable; if(W_change>0) remaining_Wp-=W_change; else if(W_change<0) remaining_Wm+=W_change; // Check whether the right number of Ws are present if( remaining_Wp != 0 || remaining_Wm != 0 ) return non_resummable; // result has to be unique if( (final_type & (final_type-1)) != 0) return non_resummable; // check that each sub processes is implemented // (has to be done at the end) if( (final_type & ~implemented_types(boson)) != 0 ) return non_resummable; return static_cast<EventType>(final_type); } //@} Particle extract_particle(LHEF::HEPEUP const & hepeup, int i){ const ParticleID id = static_cast<ParticleID>(hepeup.IDUP[i]); const fastjet::PseudoJet momentum{ hepeup.PUP[i][0], hepeup.PUP[i][1], hepeup.PUP[i][2], hepeup.PUP[i][3] }; if(is_parton(id)) return Particle{ id, std::move(momentum), hepeup.ICOLUP[i] }; return Particle{ id, std::move(momentum), {} }; } bool is_decay_product(std::pair<int, int> const & mothers){ if(mothers.first == 0) return false; return mothers.second == 0 || mothers.first == mothers.second; } } // namespace anonymous Event::EventData::EventData(LHEF::HEPEUP const & hepeup){ parameters.central = EventParameters{ hepeup.scales.mur, hepeup.scales.muf, hepeup.weight() }; size_t in_idx = 0; for (int i = 0; i < hepeup.NUP; ++i) { // skip decay products // we will add them later on, but we have to ensure that // the decayed particle is added before if(is_decay_product(hepeup.MOTHUP[i])) continue; auto particle = extract_particle(hepeup, i); // needed to identify mother particles for decay products particle.p.set_user_index(i+1); if(hepeup.ISTUP[i] == status_in){ if(in_idx > incoming.size()) { throw std::invalid_argument{ "Event has too many incoming particles" }; } incoming[in_idx++] = std::move(particle); } else outgoing.emplace_back(std::move(particle)); } // add decay products for (int i = 0; i < hepeup.NUP; ++i) { if(!is_decay_product(hepeup.MOTHUP[i])) continue; const int mother_id = hepeup.MOTHUP[i].first; const auto mother = std::find_if( begin(outgoing), end(outgoing), [mother_id](Particle const & particle){ return particle.p.user_index() == mother_id; } ); if(mother == end(outgoing)){ throw std::invalid_argument{"invalid decay product parent"}; } const int mother_idx = std::distance(begin(outgoing), mother); assert(mother_idx >= 0); decays[mother_idx].emplace_back(extract_particle(hepeup, i)); } } Event::Event( UnclusteredEvent const & ev, fastjet::JetDefinition const & jet_def, double const min_jet_pt ): Event( Event::EventData{ ev.incoming, ev.outgoing, ev.decays, Parameters<EventParameters>{ev.central, ev.variations} }.cluster(jet_def, min_jet_pt) ) {} //! @TODO remove in HEJ 2.2.0 UnclusteredEvent::UnclusteredEvent(LHEF::HEPEUP const & hepeup){ Event::EventData const evData{hepeup}; incoming = evData.incoming; outgoing = evData.outgoing; decays = evData.decays; central = evData.parameters.central; variations = evData.parameters.variations; } void Event::EventData::sort(){ // sort particles std::sort( begin(incoming), end(incoming), [](Particle o1, Particle o2){return o1.p.pz()<o2.p.pz();} ); auto old_outgoing = std::move(outgoing); std::vector<size_t> idx(old_outgoing.size()); std::iota(idx.begin(), idx.end(), 0); std::sort(idx.begin(), idx.end(), [&old_outgoing](size_t i, size_t j){ return old_outgoing[i].rapidity() < old_outgoing[j].rapidity(); }); outgoing.clear(); outgoing.reserve(old_outgoing.size()); for(size_t i: idx) { outgoing.emplace_back(std::move(old_outgoing[i])); } // find decays again if(!decays.empty()){ auto old_decays = std::move(decays); decays.clear(); for(size_t i=0; i<idx.size(); ++i) { auto decay = old_decays.find(idx[i]); if(decay != old_decays.end()) decays.emplace(i, std::move(decay->second)); } assert(old_decays.size() == decays.size()); } } namespace { Particle reconstruct_boson(std::vector<Particle> const & leptons) { Particle decayed_boson; decayed_boson.p = leptons[0].p + leptons[1].p; const int pidsum = leptons[0].type + leptons[1].type; if(pidsum == +1) { assert(is_antilepton(leptons[0])); if(is_antineutrino(leptons[0])) { throw not_implemented{"lepton-flavour violating final state"}; } assert(is_neutrino(leptons[1])); // charged antilepton + neutrino means we had a W+ decayed_boson.type = pid::Wp; } else if(pidsum == -1) { assert(is_antilepton(leptons[0])); if(is_neutrino(leptons[1])) { throw not_implemented{"lepton-flavour violating final state"}; } assert(is_antineutrino(leptons[0])); // charged lepton + antineutrino means we had a W- decayed_boson.type = pid::Wm; } else { throw not_implemented{ "final state with leptons " + name(leptons[0].type) + " and " + name(leptons[1].type) }; } return decayed_boson; } } void Event::EventData::reconstruct_intermediate() { const auto begin_leptons = std::partition( begin(outgoing), end(outgoing), [](Particle const & p) {return !is_anylepton(p);} ); if(begin_leptons == end(outgoing)) return; assert(is_anylepton(*begin_leptons)); std::vector<Particle> leptons(begin_leptons, end(outgoing)); outgoing.erase(begin_leptons, end(outgoing)); if(leptons.size() != 2) { throw not_implemented{"Final states with one or more than two leptons"}; } std::sort( begin(leptons), end(leptons), [](Particle const & p0, Particle const & p1) { return p0.type < p1.type; } ); outgoing.emplace_back(reconstruct_boson(leptons)); decays.emplace(outgoing.size()-1, std::move(leptons)); } Event Event::EventData::cluster( fastjet::JetDefinition const & jet_def, double const min_jet_pt ){ sort(); Event ev{ std::move(incoming), std::move(outgoing), std::move(decays), std::move(parameters), jet_def, min_jet_pt }; assert(std::is_sorted(begin(ev.outgoing_), end(ev.outgoing_), rapidity_less{})); ev.type_ = classify(ev); return ev; } Event::Event( std::array<Particle, 2> && incoming, std::vector<Particle> && outgoing, std::unordered_map<size_t, std::vector<Particle>> && decays, Parameters<EventParameters> && parameters, fastjet::JetDefinition const & jet_def, double const min_jet_pt ): incoming_{std::move(incoming)}, outgoing_{std::move(outgoing)}, decays_{std::move(decays)}, parameters_{std::move(parameters)}, cs_{ to_PseudoJet( filter_partons(outgoing_) ), jet_def }, min_jet_pt_{min_jet_pt} { jets_ = sorted_by_rapidity(cs_.inclusive_jets(min_jet_pt_)); } namespace { // check that Particles have a reasonable colour bool correct_colour(Particle const & part){ ParticleID id{ part.type }; if(!is_parton(id)) return !part.colour; if(!part.colour) return false; Colour const & col{ *part.colour }; if(is_quark(id)) return col.first != 0 && col.second == 0; if(is_antiquark(id)) return col.first == 0 && col.second != 0; assert(id==ParticleID::gluon); return col.first != 0 && col.second != 0 && col.first != col.second; } } bool Event::is_leading_colour() const { if( !correct_colour(incoming()[0]) || !correct_colour(incoming()[1]) ) return false; Colour line_colour = *incoming()[0].colour; std::swap(line_colour.first, line_colour.second); for(auto const & part: outgoing()){ // reasonable colour if(!correct_colour(part)) return false; if(!is_parton(part)) // skip colour neutral particles continue; // if possible connect to line if( line_colour.first == part.colour->second ) line_colour.first = part.colour->first; else if( line_colour.second == part.colour->first ) line_colour.second = part.colour->second; else return false; // no colour singlet exchange/disconnected diagram if(line_colour.first == line_colour.second) return false; } return (incoming()[1].colour->first == line_colour.first) && (incoming()[1].colour->second == line_colour.second); } namespace { void connect_incoming(Particle & in, int & colour, int & anti_colour){ in.colour = std::make_pair(anti_colour, colour); // gluon if(in.type == pid::gluon) return; if(in.type > 0){ // quark assert(is_quark(in)); in.colour->second = 0; colour*=-1; return; } // anti-quark assert(is_antiquark(in)); in.colour->first = 0; anti_colour*=-1; return; } } bool Event::generate_colours(RNG & ran){ // generate only for HEJ events if(!event_type::is_resummable(type())) return false; assert(std::is_sorted( begin(outgoing()), end(outgoing()), rapidity_less{})); assert(incoming()[0].pz() < incoming()[1].pz()); // positive (anti-)colour -> can connect // negative (anti-)colour -> not available/used up by (anti-)quark int colour = COLOUR_OFFSET; int anti_colour = colour+1; // initialise first connect_incoming(incoming_[0], colour, anti_colour); for(auto & part: outgoing_){ assert(colour>0 || anti_colour>0); if(part.type == ParticleID::gluon){ // gluon if(colour>0 && anti_colour>0){ // on g line => connect to colour OR anti-colour (random) if(ran.flat() < 0.5){ part.colour = std::make_pair(colour+2,colour); colour+=2; } else { part.colour = std::make_pair(anti_colour, anti_colour+2); anti_colour+=2; } } else if(colour > 0){ // on q line => connect to available colour part.colour = std::make_pair(colour+2, colour); colour+=2; } else { assert(colour<0 && anti_colour>0); // on qx line => connect to available anti-colour part.colour = std::make_pair(anti_colour, anti_colour+2); anti_colour+=2; } } else if(is_quark(part)) { // quark assert(anti_colour>0); if(colour>0){ // on g line => connect and remove anti-colour part.colour = std::make_pair(anti_colour, 0); anti_colour+=2; anti_colour*=-1; } else { // on qx line => new colour colour*=-1; part.colour = std::make_pair(colour, 0); } } else if(is_antiquark(part)) { // anti-quark assert(colour>0); if(anti_colour>0){ // on g line => connect and remove colour part.colour = std::make_pair(0, colour); colour+=2; colour*=-1; } else { // on q line => new anti-colour anti_colour*=-1; part.colour = std::make_pair(0, anti_colour); } } else { // not a parton assert(!is_parton(part)); part.colour = {}; } } // Connect last connect_incoming(incoming_[1], anti_colour, colour); assert(is_leading_colour()); return true; } // generate_colours namespace { bool valid_parton( std::vector<fastjet::PseudoJet> const & jets, Particle const & parton, int const idx, double const max_ext_soft_pt_fraction, double const min_extparton_pt ){ // TODO code overlap with PhaseSpacePoint::pass_extremal_cuts if(min_extparton_pt > parton.pt()) return false; if(idx<0) return false; assert((int) jets.size()>=idx); auto const & jet{ jets[idx] }; if( (parton.p - jet).pt()/jet.pt() > max_ext_soft_pt_fraction) return false; return true; } } // this should work with multiple types bool Event::valid_hej_state(double const max_frac, double const min_pt ) const { using namespace event_type; if(!is_resummable(type())) return false; auto const & jet_idx{ particle_jet_indices() }; auto idx_begin{ jet_idx.cbegin() }; auto idx_end{ jet_idx.crbegin() }; auto part_begin{ cbegin_partons() }; auto part_end{ crbegin_partons() }; // always seperate extremal jets if( !valid_parton(jets(), *part_begin, *idx_begin, max_frac, min_pt) ) return false; ++part_begin; ++idx_begin; if( !valid_parton(jets(), *part_end, *idx_end, max_frac, min_pt) ) return false; ++part_end; ++idx_end; // unob -> second parton in own jet if( type() & (unob | qqxexb) ){ if( !valid_parton(jets(), *part_begin, *idx_begin, max_frac, min_pt) ) return false; ++part_begin; ++idx_begin; } if( type() & (unof | qqxexf) ){ if( !valid_parton(jets(), *part_end, *idx_end, max_frac, min_pt) ) return false; ++part_end; ++idx_end; } if( type() & qqxmid ){ // find qqx pair auto begin_qqx{ std::find_if( part_begin, part_end.base(), [](Particle const & part) -> bool { return part.type != ParticleID::gluon; } )}; assert(begin_qqx != part_end.base()); long int qqx_pos{ std::distance(part_begin, begin_qqx) }; assert(qqx_pos >= 0); idx_begin+=qqx_pos; if( !( valid_parton(jets(),*begin_qqx, *idx_begin, max_frac,min_pt) && valid_parton(jets(),*(++begin_qqx),*(++idx_begin),max_frac,min_pt) )) return false; } return true; } Event::ConstPartonIterator Event::begin_partons() const { return cbegin_partons(); - }; + } Event::ConstPartonIterator Event::cbegin_partons() const { return boost::make_filter_iterator( static_cast<bool (*)(Particle const &)>(is_parton), cbegin(outgoing()), cend(outgoing()) ); - }; + } Event::ConstPartonIterator Event::end_partons() const { return cend_partons(); - }; + } Event::ConstPartonIterator Event::cend_partons() const { return boost::make_filter_iterator( static_cast<bool (*)(Particle const &)>(is_parton), cend(outgoing()), cend(outgoing()) ); - }; + } Event::ConstReversePartonIterator Event::rbegin_partons() const { return crbegin_partons(); } Event::ConstReversePartonIterator Event::crbegin_partons() const { return std::reverse_iterator<ConstPartonIterator>( cend_partons() ); - }; + } Event::ConstReversePartonIterator Event::rend_partons() const { return crend_partons(); } Event::ConstReversePartonIterator Event::crend_partons() const { return std::reverse_iterator<ConstPartonIterator>( cbegin_partons() ); - }; + } namespace { void print_momentum(std::ostream & os, fastjet::PseudoJet const & part){ const std::streamsize orig_prec = os.precision(); os <<std::scientific<<std::setprecision(6) << "[" <<std::setw(13)<<std::right<< part.px() << ", " <<std::setw(13)<<std::right<< part.py() << ", " <<std::setw(13)<<std::right<< part.pz() << ", " <<std::setw(13)<<std::right<< part.E() << "]"<< std::fixed; os.precision(orig_prec); } void print_colour(std::ostream & os, optional<Colour> const & col){ if(!col) os << "(no color)"; // American spelling for better alignment else os << "(" <<std::setw(3)<<std::right<< col->first << ", " <<std::setw(3)<<std::right<< col->second << ")"; } } std::ostream& operator<<(std::ostream & os, Event const & ev){ const std::streamsize orig_prec = os.precision(); os <<std::setprecision(4)<<std::fixed; os << "########## " << event_type::name(ev.type()) << " ##########" << std::endl; os << "Incoming particles:\n"; for(auto const & in: ev.incoming()){ os <<std::setw(3)<< in.type << ": "; print_colour(os, in.colour); os << " "; print_momentum(os, in.p); os << std::endl; } os << "\nOutgoing particles: " << ev.outgoing().size() << "\n"; for(auto const & out: ev.outgoing()){ os <<std::setw(3)<< out.type << ": "; print_colour(os, out.colour); os << " "; print_momentum(os, out.p); os << " => rapidity=" <<std::setw(7)<<std::right<< out.rapidity() << std::endl; } os << "\nForming Jets: " << ev.jets().size() << "\n"; for(auto const & jet: ev.jets()){ print_momentum(os, jet); os << " => rapidity=" <<std::setw(7)<<std::right<< jet.rapidity() << std::endl; } if(ev.decays().size() > 0 ){ os << "\nDecays: " << ev.decays().size() << "\n"; for(auto const & decay: ev.decays()){ os <<std::setw(3)<< ev.outgoing()[decay.first].type << " (" << decay.first << ") to:\n"; for(auto const & out: decay.second){ os <<" "<<std::setw(3)<< out.type << ": "; print_momentum(os, out.p); os << " => rapidity=" <<std::setw(7)<<std::right<< out.rapidity() << std::endl; } } } os << std::defaultfloat; os.precision(orig_prec); return os; } double shat(Event const & ev){ return (ev.incoming()[0].p + ev.incoming()[1].p).m2(); } LHEF::HEPEUP to_HEPEUP(Event const & event, LHEF::HEPRUP * heprup){ LHEF::HEPEUP result; result.heprup = heprup; result.weights = {{event.central().weight, nullptr}}; for(auto const & var: event.variations()){ result.weights.emplace_back(var.weight, nullptr); } size_t num_particles = event.incoming().size() + event.outgoing().size(); for(auto const & decay: event.decays()) num_particles += decay.second.size(); result.NUP = num_particles; // the following entries are pretty much meaningless result.IDPRUP = event.type(); // event type result.AQEDUP = 1./128.; // alpha_EW //result.AQCDUP = 0.118 // alpha_QCD // end meaningless part result.XWGTUP = event.central().weight; result.SCALUP = event.central().muf; result.scales.muf = event.central().muf; result.scales.mur = event.central().mur; result.scales.SCALUP = event.central().muf; result.pdfinfo.p1 = event.incoming().front().type; result.pdfinfo.p2 = event.incoming().back().type; result.pdfinfo.scale = event.central().muf; result.IDUP.reserve(num_particles); // PID result.ISTUP.reserve(num_particles); // status (in, out, decay) result.PUP.reserve(num_particles); // momentum result.MOTHUP.reserve(num_particles); // index mother particle result.ICOLUP.reserve(num_particles); // colour // incoming std::array<Particle, 2> incoming{ event.incoming() }; // First incoming should be positive pz according to LHE standard // (or at least most (everyone?) do it this way, and Pythia assumes it) if(incoming[0].pz() < incoming[1].pz()) std::swap(incoming[0], incoming[1]); for(Particle const & in: incoming){ result.IDUP.emplace_back(in.type); result.ISTUP.emplace_back(status_in); result.PUP.push_back({in.p[0], in.p[1], in.p[2], in.p[3], in.p.m()}); result.MOTHUP.emplace_back(0, 0); assert(in.colour); result.ICOLUP.emplace_back(*in.colour); } // outgoing for(size_t i = 0; i < event.outgoing().size(); ++i){ Particle const & out = event.outgoing()[i]; result.IDUP.emplace_back(out.type); const int status = event.decays().count(i)?status_decayed:status_out; result.ISTUP.emplace_back(status); result.PUP.push_back({out.p[0], out.p[1], out.p[2], out.p[3], out.p.m()}); result.MOTHUP.emplace_back(1, 2); if(out.colour) result.ICOLUP.emplace_back(*out.colour); else{ result.ICOLUP.emplace_back(std::make_pair(0,0)); } } // decays for(auto const & decay: event.decays()){ for(auto const out: decay.second){ result.IDUP.emplace_back(out.type); result.ISTUP.emplace_back(status_out); result.PUP.push_back({out.p[0], out.p[1], out.p[2], out.p[3], out.p.m()}); const size_t mother_idx = 1 + event.incoming().size() + decay.first; result.MOTHUP.emplace_back(mother_idx, mother_idx); result.ICOLUP.emplace_back(0,0); } } assert(result.ICOLUP.size() == num_particles); static constexpr double unknown_spin = 9.; //per Les Houches accord result.VTIMUP = std::vector<double>(num_particles, unknown_spin); result.SPINUP = result.VTIMUP; return result; } } diff --git a/src/EventReweighter.cc b/src/EventReweighter.cc index bca986e..5c0b48a 100644 --- a/src/EventReweighter.cc +++ b/src/EventReweighter.cc @@ -1,282 +1,282 @@ /** * \authors The HEJ collaboration (see AUTHORS for details) * \date 2019 * \copyright GPLv2 or later */ #include "HEJ/EventReweighter.hh" #include <algorithm> #include <assert.h> #include <limits> #include <math.h> #include <stddef.h> #include <string> #include <unordered_map> #include "fastjet/ClusterSequence.hh" #include "LHEF/LHEF.h" #include "HEJ/Event.hh" #include "HEJ/exceptions.hh" #include "HEJ/Particle.hh" #include "HEJ/PDG_codes.hh" #include "HEJ/PhaseSpacePoint.hh" namespace HEJ{ using EventType = event_type::EventType; namespace { static_assert( std::numeric_limits<double>::has_quiet_NaN, "no quiet NaN for double" ); constexpr double NaN = std::numeric_limits<double>::quiet_NaN(); Event::EventData to_EventData(PhaseSpacePoint const & psp){ Event::EventData result; result.incoming=psp.incoming(); assert(result.incoming.size() == 2); result.outgoing=psp.outgoing(); // technically Event::EventData doesn't have to be sorted, // but PhaseSpacePoint should be anyway assert( std::is_sorted( begin(result.outgoing), end(result.outgoing), rapidity_less{} ) ); assert(result.outgoing.size() >= 2); result.decays = psp.decays(); result.parameters.central = {NaN, NaN, psp.weight()}; return result; } } // namespace anonymous EventReweighter::EventReweighter( LHEF::HEPRUP const & heprup, ScaleGenerator scale_gen, EventReweighterConfig conf, HEJ::RNG & ran ): EventReweighter{ HEJ::Beam{ heprup.EBMUP.first, {{ static_cast<HEJ::ParticleID>(heprup.IDBMUP.first), static_cast<HEJ::ParticleID>(heprup.IDBMUP.second) }} }, heprup.PDFSUP.first, std::move(scale_gen), std::move(conf), ran } { if(heprup.EBMUP.second != E_beam_){ throw std::invalid_argument( "asymmetric beam: " + std::to_string(E_beam_) + " ---> <--- " + std::to_string(heprup.EBMUP.second) ); }; if(heprup.PDFSUP.second != pdf_.id()){ throw std::invalid_argument( "conflicting PDF ids: " + std::to_string(pdf_.id()) + " vs. " + std::to_string(heprup.PDFSUP.second) ); } } EventReweighter::EventReweighter( Beam beam, int pdf_id, ScaleGenerator scale_gen, EventReweighterConfig conf, HEJ::RNG & ran ): param_{std::move(conf)}, E_beam_{beam.E}, pdf_{pdf_id, beam.type.front(), beam.type.back()}, MEt2_{ [this](double mu){ return pdf_.Halphas(mu); }, param_.ME_config }, scale_gen_(std::move(scale_gen)), ran_{ran} {} PDF const & EventReweighter::pdf() const{ return pdf_; } std::vector<Event> EventReweighter::reweight( Event const & input_ev, int num_events ){ auto res_events{ gen_res_events(input_ev, num_events) }; if(res_events.empty()) return {}; for(auto & event: res_events) event = scale_gen_(std::move(event)); return rescale(input_ev, std::move(res_events)); } EventTreatment EventReweighter::treatment(EventType type) const { return param_.treat.at(type); } std::vector<Event> EventReweighter::gen_res_events( Event const & ev, size_t phase_space_points ){ assert(ev.variations().empty()); status_.clear(); switch(treatment(ev.type())){ case EventTreatment::discard: { status_.emplace_back(StatusCode::discard); return {}; } case EventTreatment::keep: if(! jets_pass_resummation_cuts(ev)) { status_.emplace_back(StatusCode::failed_resummation_cuts); return {}; } else { status_.emplace_back(StatusCode::good); return {ev}; } default:; } const double Born_shat = shat(ev); std::vector<Event> resummation_events; status_.reserve(phase_space_points); for(size_t psp_number = 0; psp_number < phase_space_points; ++psp_number){ PhaseSpacePoint psp{ev, param_.psp_config, ran_}; status_.emplace_back(psp.status()); assert(psp.status() != StatusCode::unspecified); if(psp.status() != StatusCode::good) continue; assert(psp.weight() != 0.); if(psp.incoming()[0].E() > E_beam_ || psp.incoming()[1].E() > E_beam_) { status_.back() = StatusCode::too_much_energy; continue; } resummation_events.emplace_back( to_EventData( std::move(psp) ).cluster( param_.jet_param.def, param_.jet_param.min_pt ) ); auto & new_event = resummation_events.back(); assert( new_event.valid_hej_state( param_.psp_config.max_ext_soft_pt_fraction, param_.psp_config.min_extparton_pt ) ); if( new_event.type() != ev.type() ) throw std::logic_error{"Resummation Event does not match Born event"}; new_event.generate_colours(ran_); assert(new_event.variations().empty()); new_event.central().mur = ev.central().mur; new_event.central().muf = ev.central().muf; const double resum_shat = shat(new_event); new_event.central().weight *= ev.central().weight*Born_shat*Born_shat/ (phase_space_points*resum_shat*resum_shat); } return resummation_events; } std::vector<Event> EventReweighter::rescale( Event const & Born_ev, std::vector<Event> events ) const{ const double Born_pdf = pdf_factors(Born_ev).central; const double Born_ME = tree_matrix_element(Born_ev); for(auto & cur_event: events){ const auto pdf = pdf_factors(cur_event); assert(pdf.variations.size() == cur_event.variations().size()); const auto ME = matrix_elements(cur_event); assert(ME.variations.size() == cur_event.variations().size()); cur_event.parameters() *= pdf*ME/(Born_pdf*Born_ME); } return events; - }; + } bool EventReweighter::jets_pass_resummation_cuts( Event const & ev ) const{ const auto out_as_PseudoJet = to_PseudoJet(filter_partons(ev.outgoing())); fastjet::ClusterSequence cs{out_as_PseudoJet, param_.jet_param.def}; return cs.inclusive_jets(param_.jet_param.min_pt).size() == ev.jets().size(); } Weights EventReweighter::pdf_factors(Event const & ev) const{ auto const & a = ev.incoming().front(); auto const & b = ev.incoming().back(); const double xa = a.p.e()/E_beam_; const double xb = b.p.e()/E_beam_; Weights result; std::unordered_map<double, double> known_pdf; result.central = pdf_.pdfpt(0,xa,ev.central().muf,a.type)* pdf_.pdfpt(1,xb,ev.central().muf,b.type); known_pdf.emplace(ev.central().muf, result.central); result.variations.reserve(ev.variations().size()); for(auto const & ev_param: ev.variations()){ const double muf = ev_param.muf; auto cur_pdf = known_pdf.find(muf); if(cur_pdf == known_pdf.end()){ cur_pdf = known_pdf.emplace( muf, pdf_.pdfpt(0,xa,muf,a.type)*pdf_.pdfpt(1,xb,muf,b.type) ).first; } result.variations.emplace_back(cur_pdf->second); } assert(result.variations.size() == ev.variations().size()); return result; } Weights EventReweighter::matrix_elements(Event const & ev) const{ assert(param_.treat.count(ev.type()) > 0); if(param_.treat.find(ev.type())->second == EventTreatment::keep){ return fixed_order_scale_ME(ev); } return MEt2_(ev); } double EventReweighter::tree_matrix_element(Event const & ev) const{ assert(ev.variations().empty()); assert(param_.treat.count(ev.type()) > 0); if(param_.treat.find(ev.type())->second == EventTreatment::keep){ return fixed_order_scale_ME(ev).central; } return MEt2_.tree(ev).central; } Weights EventReweighter::fixed_order_scale_ME(Event const & ev) const{ int alpha_s_power = 0; for(auto const & part: ev.outgoing()){ if(is_parton(part)) ++alpha_s_power; else if(part.type == pid::Higgs) { alpha_s_power += 2; } // nothing to do for other uncoloured particles } Weights result; result.central = pow(pdf_.Halphas(ev.central().mur), alpha_s_power); for(auto const & var: ev.variations()){ result.variations.emplace_back( pow(pdf_.Halphas(var.mur), alpha_s_power) ); } return result; } } // namespace HEJ diff --git a/src/PDF.cc b/src/PDF.cc index 0792a99..c81a389 100644 --- a/src/PDF.cc +++ b/src/PDF.cc @@ -1,112 +1,112 @@ /** * \authors The HEJ collaboration (see AUTHORS for details) * \date 2019 * \copyright GPLv2 or later */ #include "HEJ/PDF.hh" #include <iostream> #include <stdexcept> #include <string> namespace HEJ{ namespace{ int to_beam(ParticleID id){ if(std::abs(id) == pid::proton){ return (id > 0)?1:-1; } throw std::invalid_argument( "unknown beam type: " + std::to_string(id) ); } } #if defined LHAPDF_MAJOR_VERSION && LHAPDF_MAJOR_VERSION == 6 PDF::PDF(int id, ParticleID beam1, ParticleID beam2): pdf{LHAPDF::mkPDF(id)}, beamtype{{to_beam(beam1), to_beam(beam2)}} {} double PDF::pdfpt(size_t beam_idx, double x, double q, ParticleID id) const{ if(!(inRangeQ(q) && inRangeX(x))) return 0.; if(id == pid::gluon){ return pdf->xfxQ(21,x,q); } else if(abs(id) < 7){ return pdf->xfxQ(id*beamtype[beam_idx],x,q); } else { std::cerr << "particle type unknown: "<< id << std::endl; return 0.0; } } double PDF::Halphas(double q) const{ double as = pdf->alphasQ(q); if (std::isnan(as) || as > 0.5) { as = 0.5; } return as; } int PDF::id() const{ return pdf->lhapdfID(); - }; + } bool PDF::inRangeQ(double q) const{ return pdf->inRangeQ(q); } bool PDF::inRangeX(double x) const{ return pdf->inRangeX(x); } #else /* LHAPDF version unknown or older than 6 */ PDF::PDF(std::string LHAPDFName, int LHAPDFsubset, ParticleID beam1, ParticleID beam2): beamtype{{to_beam(beam1_type), to_beam(beam2_type)}} { LHAPDF::initPDFSet(LHAPDFName, LHAPDF::LHGRID, LHAPDFsubset); } double PDF::pdfpt(size_t beam_idx, double x, double q, ParticleID id) const{ if(!(inRangeQ(q) && inRangeX(x))) return 0.; if (id == pid::gluon){ return LHAPDF::xfx(x,q,0); } else if (abs(id) < 7){ return LHAPDF::xfx(x,q,id*beamtype[beam_idx]); } else { std::cerr << "particle type unknown: "<< id <<std::endl; return 0.0; } } double PDF::Halphas(double q) const{ double as = LHAPDF::alphasPDF(q); if (isnan(as) || as > 0.5) as = 0.5; return as; } bool PDF::inRangeQ(double q) const{ // here we assume that all members actually have the same range! static constexpr int member = 0; return (LHAPDF::getQ2min(member) < q*q) && (q*q < LHAPDF::getQ2max(member)); } bool PDF::inRangeX(double x) const{ // here we assume that all members actually have the same range! static constexpr int member = 0; return (LHAPDF::getXmin(member) < x) && (x < LHAPDF::getXmax(member)); } #endif } diff --git a/src/YAMLreader.cc b/src/YAMLreader.cc index 943c4f0..bfafc5c 100644 --- a/src/YAMLreader.cc +++ b/src/YAMLreader.cc @@ -1,555 +1,555 @@ /** * \authors The HEJ collaboration (see AUTHORS for details) * \date 2019 * \copyright GPLv2 or later */ #include "HEJ/YAMLreader.hh" #include <algorithm> #include <iostream> #include <limits> #include <map> #include <string> #include <unordered_map> #include <vector> #include <dlfcn.h> #include "HEJ/ScaleFunction.hh" #include "HEJ/event_types.hh" #include "HEJ/output_formats.hh" #include "HEJ/Constants.hh" namespace HEJ{ class Event; namespace{ //! Get YAML tree of supported options /** * The configuration file is checked against this tree of options * in assert_all_options_known. */ YAML::Node const & get_supported_options(){ const static YAML::Node supported = [](){ YAML::Node supported; static const auto opts = { "trials", "min extparton pt", "max ext soft pt fraction", "scales", "scale factors", "max scale ratio", "import scales", "log correction", "event output", "analysis", "vev", "regulator parameter", "max events" }; // add subnodes to "supported" - the assigned value is irrelevant for(auto && opt: opts) supported[opt] = ""; for(auto && jet_opt: {"min pt", "algorithm", "R"}){ supported["resummation jets"][jet_opt] = ""; supported["fixed order jets"][jet_opt] = ""; } for(auto && opt: {"mt", "use impact factors", "include bottom", "mb"}){ supported["Higgs coupling"][opt] = ""; } for(auto && opt: {"name", "seed"}){ supported["random generator"][opt] = ""; } for(auto && opt: {"FKL", "unordered", "extremal qqx", "central qqx", "non-resummable"}){ supported["event treatment"][opt] = ""; } for(auto && particle_type: {"Higgs", "W", "Z"}){ for(auto && particle_opt: {"mass", "width"}){ supported["particle properties"][particle_type][particle_opt] = ""; } } for(auto && opt: {"type", "trials", "max deviation"}){ supported["unweight"][opt] = ""; } return supported; }(); return supported; } fastjet::JetAlgorithm to_JetAlgorithm(std::string const & algo){ using namespace fastjet; static const std::map<std::string, fastjet::JetAlgorithm> known = { {"kt", kt_algorithm}, {"cambridge", cambridge_algorithm}, {"antikt", antikt_algorithm}, {"cambridge for passive", cambridge_for_passive_algorithm}, {"plugin", plugin_algorithm} }; const auto res = known.find(algo); if(res == known.end()){ throw std::invalid_argument("Unknown jet algorithm \"" + algo + "\""); } return res->second; } EventTreatment to_EventTreatment(std::string const & name){ static const std::map<std::string, EventTreatment> known = { {"reweight", EventTreatment::reweight}, {"keep", EventTreatment::keep}, {"discard", EventTreatment::discard} }; const auto res = known.find(name); if(res == known.end()){ throw std::invalid_argument("Unknown event treatment \"" + name + "\""); } return res->second; } WeightType to_weight_type(std::string const & setting){ if(setting == "weighted") return WeightType::weighted; if(setting =="resummation") return WeightType::unweighted_resum; if(setting =="partial") return WeightType::partially_unweighted; throw std::invalid_argument{"Unknown weight type \"" + setting + "\""}; } } // namespace anonymous namespace detail{ void set_from_yaml(fastjet::JetAlgorithm & setting, YAML::Node const & yaml){ setting = to_JetAlgorithm(yaml.as<std::string>()); } void set_from_yaml(EventTreatment & setting, YAML::Node const & yaml){ setting = to_EventTreatment(yaml.as<std::string>()); } void set_from_yaml(ParticleID & setting, YAML::Node const & yaml){ setting = to_ParticleID(yaml.as<std::string>()); } void set_from_yaml(WeightType & setting, YAML::Node const & yaml){ setting = to_weight_type(yaml.as<std::string>()); } } // namespace detail JetParameters get_jet_parameters( YAML::Node const & node, std::string const & entry ){ assert(node); JetParameters result; fastjet::JetAlgorithm jet_algo = fastjet::antikt_algorithm; double R; set_from_yaml_if_defined(jet_algo, node, entry, "algorithm"); set_from_yaml(R, node, entry, "R"); result.def = fastjet::JetDefinition{jet_algo, R}; set_from_yaml(result.min_pt, node, entry, "min pt"); return result; } RNGConfig to_RNGConfig( YAML::Node const & node, std::string const & entry ){ assert(node); RNGConfig result; set_from_yaml(result.name, node, entry, "name"); set_from_yaml_if_defined(result.seed, node, entry, "seed"); return result; } ParticleProperties get_particle_properties( YAML::Node const & node, std::string const & entry, std::string const & boson ){ ParticleProperties result; set_from_yaml(result.mass, node, entry, boson, "mass"); set_from_yaml(result.width, node, entry, boson, "width"); return result; } EWConstants get_ew_parameters(YAML::Node const & node){ EWConstants result; double vev; set_from_yaml(vev, node, "vev"); result.set_vevWZH(vev, get_particle_properties(node, "particle properties", "W"), get_particle_properties(node, "particle properties", "Z"), get_particle_properties(node, "particle properties", "Higgs") ); return result; } HiggsCouplingSettings get_Higgs_coupling( YAML::Node const & node, std::string const & entry ){ assert(node); static constexpr double mt_max = 2e4; #ifndef HEJ_BUILD_WITH_QCDLOOP if(node[entry]){ throw std::invalid_argument{ "Higgs coupling settings require building HEJ 2 " "with QCDloop support" }; } #endif HiggsCouplingSettings settings; set_from_yaml_if_defined(settings.mt, node, entry, "mt"); set_from_yaml_if_defined(settings.mb, node, entry, "mb"); set_from_yaml_if_defined(settings.include_bottom, node, entry, "include bottom"); set_from_yaml_if_defined(settings.use_impact_factors, node, entry, "use impact factors"); if(settings.use_impact_factors){ if(settings.mt != std::numeric_limits<double>::infinity()){ throw std::invalid_argument{ "Conflicting settings: " "impact factors may only be used in the infinite top mass limit" }; } } else{ // huge values of the top mass are numerically unstable settings.mt = std::min(settings.mt, mt_max); } return settings; } FileFormat to_FileFormat(std::string const & name){ static const std::map<std::string, FileFormat> known = { {"Les Houches", FileFormat::Les_Houches}, {"HepMC", FileFormat::HepMC}, {"HepMC2", FileFormat::HepMC2}, {"HepMC3", FileFormat::HepMC3}, {"HDF5", FileFormat::HDF5} }; const auto res = known.find(name); if(res == known.end()){ throw std::invalid_argument("Unknown file format \"" + name + "\""); } return res->second; } std::string extract_suffix(std::string const & filename){ size_t separator = filename.rfind('.'); if(separator == filename.npos) return {}; return filename.substr(separator + 1); } FileFormat format_from_suffix(std::string const & filename){ const std::string suffix = extract_suffix(filename); if(suffix == "lhe") return FileFormat::Les_Houches; if(suffix == "hepmc") return FileFormat::HepMC; if(suffix == "hepmc3") return FileFormat::HepMC3; if(suffix == "hepmc2") return FileFormat::HepMC2; if(suffix == "hdf5") return FileFormat::HDF5; throw std::invalid_argument{ "Can't determine format for output file \"" + filename + "\"" }; } void assert_all_options_known( YAML::Node const & conf, YAML::Node const & supported ){ if(!conf.IsMap()) return; if(!supported.IsMap()) throw invalid_type{"must not have sub-entries"}; for(auto const & entry: conf){ const auto name = entry.first.as<std::string>(); if(! supported[name]) throw unknown_option{name}; /* check sub-options, e.g. 'resummation jets: min pt' * we don't check analysis sub-options * those depend on the analysis being used and should be checked there * similar for "import scales" */ if(name != "analysis" && name != "import scales"){ try{ assert_all_options_known(conf[name], supported[name]); } catch(unknown_option const & ex){ throw unknown_option{name + ": " + ex.what()}; } catch(invalid_type const & ex){ throw invalid_type{name + ": " + ex.what()}; } } } } } // namespace HEJ namespace YAML { Node convert<HEJ::OutputFile>::encode(HEJ::OutputFile const & outfile) { Node node; node[to_string(outfile.format)] = outfile.name; return node; - }; + } bool convert<HEJ::OutputFile>::decode(Node const & node, HEJ::OutputFile & out) { switch(node.Type()){ case NodeType::Map: { YAML::const_iterator it = node.begin(); out.format = HEJ::to_FileFormat(it->first.as<std::string>()); out.name = it->second.as<std::string>(); return true; } case NodeType::Scalar: out.name = node.as<std::string>(); out.format = HEJ::format_from_suffix(out.name); return true; default: return false; } } } // namespace YAML namespace HEJ{ namespace detail{ void set_from_yaml(OutputFile & setting, YAML::Node const & yaml){ setting = yaml.as<OutputFile>(); } } namespace{ void update_fixed_order_jet_parameters( JetParameters & fixed_order_jets, YAML::Node const & yaml ){ if(!yaml["fixed order jets"]) return; set_from_yaml_if_defined( fixed_order_jets.min_pt, yaml, "fixed order jets", "min pt" ); fastjet::JetAlgorithm algo = fixed_order_jets.def.jet_algorithm(); set_from_yaml_if_defined(algo, yaml, "fixed order jets", "algorithm"); double R = fixed_order_jets.def.R(); set_from_yaml_if_defined(R, yaml, "fixed order jets", "R"); fixed_order_jets.def = fastjet::JetDefinition{algo, R}; } // like std::stod, but throw if not the whole string can be converted double to_double(std::string const & str){ std::size_t pos; const double result = std::stod(str, &pos); if(pos < str.size()){ throw std::invalid_argument(str + " is not a valid double value"); } return result; } using EventScale = double (*)(Event const &); void import_scale_functions( std::string const & file, std::vector<std::string> const & scale_names, std::unordered_map<std::string, EventScale> & known ) { auto handle = dlopen(file.c_str(), RTLD_NOW); char * error = dlerror(); if(error != nullptr) throw std::runtime_error{error}; for(auto const & scale: scale_names) { void * sym = dlsym(handle, scale.c_str()); error = dlerror(); if(error != nullptr) throw std::runtime_error{error}; known.emplace(scale, reinterpret_cast<EventScale>(sym)); } } auto get_scale_map( YAML::Node const & yaml ) { std::unordered_map<std::string, EventScale> scale_map; scale_map.emplace("H_T", H_T); scale_map.emplace("max jet pperp", max_jet_pt); scale_map.emplace("jet invariant mass", jet_invariant_mass); scale_map.emplace("m_j1j2", m_j1j2); if(yaml["import scales"]) { if(! yaml["import scales"].IsMap()) { throw invalid_type{"Entry 'import scales' is not a map"}; } for(auto const & import: yaml["import scales"]) { const auto file = import.first.as<std::string>(); const auto scale_names = import.second.IsSequence() ?import.second.as<std::vector<std::string>>() :std::vector<std::string>{import.second.as<std::string>()}; import_scale_functions(file, scale_names, scale_map); } } return scale_map; } // simple (as in non-composite) scale functions /** * An example for a simple scale function would be H_T, * H_T/2 is then composite (take H_T and then divide by 2) */ ScaleFunction parse_simple_ScaleFunction( std::string const & scale_fun, std::unordered_map<std::string, EventScale> const & known ) { assert( scale_fun.empty() || (!std::isspace(scale_fun.front()) && !std::isspace(scale_fun.back())) ); const auto it = known.find(scale_fun); if(it != end(known)) return {it->first, it->second}; try{ const double scale = to_double(scale_fun); return {scale_fun, FixedScale{scale}}; } catch(std::invalid_argument const &){} throw std::invalid_argument{"Unknown scale choice: \"" + scale_fun + "\""}; } std::string trim_front(std::string const & str){ const auto new_begin = std::find_if( begin(str), end(str), [](char c){ return ! std::isspace(c); } ); return std::string(new_begin, end(str)); } std::string trim_back(std::string str){ size_t pos = str.size() - 1; // use guaranteed wrap-around behaviour to check whether we have // traversed the whole string for(; pos < str.size() && std::isspace(str[pos]); --pos) {} str.resize(pos + 1); // note that pos + 1 can be 0 return str; } ScaleFunction parse_ScaleFunction( std::string const & scale_fun, std::unordered_map<std::string, EventScale> const & known ){ assert( scale_fun.empty() || (!std::isspace(scale_fun.front()) && !std::isspace(scale_fun.back())) ); // parse from right to left => a/b/c gives (a/b)/c const size_t delim = scale_fun.find_last_of("*/"); if(delim == scale_fun.npos){ return parse_simple_ScaleFunction(scale_fun, known); } const std::string first = trim_back(std::string{scale_fun, 0, delim}); const std::string second = trim_front(std::string{scale_fun, delim+1}); if(scale_fun[delim] == '/'){ return parse_ScaleFunction(first, known) / parse_ScaleFunction(second, known); } else{ assert(scale_fun[delim] == '*'); return parse_ScaleFunction(first, known) * parse_ScaleFunction(second, known); } } EventTreatMap get_event_treatment( YAML::Node const & node, std::string const & entry ){ using namespace event_type; EventTreatMap treat { {no_2_jets, EventTreatment::discard}, {bad_final_state, EventTreatment::discard}, {FKL, EventTreatment::discard}, {unob, EventTreatment::discard}, {unof, EventTreatment::discard}, {qqxexb, EventTreatment::discard}, {qqxexf, EventTreatment::discard}, {qqxmid, EventTreatment::discard}, {non_resummable, EventTreatment::discard} }; set_from_yaml(treat.at(FKL), node, entry, "FKL"); set_from_yaml(treat.at(unob), node, entry, "unordered"); treat.at(unof) = treat.at(unob); set_from_yaml(treat.at(qqxexb), node, entry, "extremal qqx"); treat.at(qqxexf) = treat.at(qqxexb); set_from_yaml(treat.at(qqxmid), node, entry, "central qqx"); set_from_yaml(treat.at(non_resummable), node, entry, "non-resummable"); if(treat[non_resummable] == EventTreatment::reweight){ throw std::invalid_argument{"Cannot reweight Fixed Order events"}; } return treat; } Config to_Config(YAML::Node const & yaml){ try{ assert_all_options_known(yaml, get_supported_options()); } catch(unknown_option const & ex){ throw unknown_option{std::string{"Unknown option '"} + ex.what() + "'"}; } Config config; config.resummation_jets = get_jet_parameters(yaml, "resummation jets"); config.fixed_order_jets = config.resummation_jets; update_fixed_order_jet_parameters(config.fixed_order_jets, yaml); set_from_yaml_if_defined(config.min_extparton_pt, yaml, "min extparton pt"); if(config.min_extparton_pt!=0) std::cerr << "WARNING: \"min extparton pt\" is deprecated." << " Please use \"max ext soft pt fraction\" instead.\n"; set_from_yaml( config.max_ext_soft_pt_fraction, yaml, "max ext soft pt fraction" ); // Sets the standard value, then changes this if defined config.regulator_lambda=CLAMBDA; set_from_yaml_if_defined(config.regulator_lambda, yaml, "regulator parameter"); set_from_yaml_if_defined(config.max_events, yaml, "max events"); set_from_yaml(config.trials, yaml, "trials"); config.weight_type = WeightType::weighted; set_from_yaml_if_defined(config.weight_type, yaml, "unweight", "type"); if(config.weight_type == WeightType::partially_unweighted) { config.unweight_config = PartialUnweightConfig{}; set_from_yaml( config.unweight_config->trials, yaml, "unweight", "trials" ); set_from_yaml( config.unweight_config->max_dev, yaml, "unweight", "max deviation" ); } else if(yaml["unweight"]) { for(auto && opt: {"trials", "max deviation"}) { if(yaml["unweight"][opt]) { throw std::invalid_argument{ "'unweight: " + std::string{opt} + "' " "is only supported if 'unweight: type' is set to 'partial'" }; } } } set_from_yaml(config.log_correction, yaml, "log correction"); config.treat = get_event_treatment(yaml, "event treatment"); set_from_yaml_if_defined(config.output, yaml, "event output"); config.rng = to_RNGConfig(yaml, "random generator"); set_from_yaml_if_defined(config.analysis_parameters, yaml, "analysis"); config.scales = to_ScaleConfig(yaml); config.ew_parameters = get_ew_parameters(yaml); config.Higgs_coupling = get_Higgs_coupling(yaml, "Higgs coupling"); return config; } } // namespace anonymous ScaleConfig to_ScaleConfig(YAML::Node const & yaml){ ScaleConfig config; auto scale_funs = get_scale_map(yaml); std::vector<std::string> scales; set_from_yaml(scales, yaml, "scales"); config.base.reserve(scales.size()); std::transform( begin(scales), end(scales), std::back_inserter(config.base), [scale_funs](auto const & entry){ return parse_ScaleFunction(entry, scale_funs); } ); set_from_yaml_if_defined(config.factors, yaml, "scale factors"); config.max_ratio = std::numeric_limits<double>::infinity(); set_from_yaml_if_defined(config.max_ratio, yaml, "max scale ratio"); return config; } Config load_config(std::string const & config_file){ try{ return to_Config(YAML::LoadFile(config_file)); } catch(...){ std::cerr << "Error reading " << config_file << ":\n "; throw; } } } // namespace HEJ diff --git a/t/check_res.cc b/t/check_res.cc index ae65bfb..69ab76b 100644 --- a/t/check_res.cc +++ b/t/check_res.cc @@ -1,169 +1,169 @@ /** * \authors The HEJ collaboration (see AUTHORS for details) * \date 2019 * \copyright GPLv2 or later */ #include <cmath> #include <iostream> #include "LHEF/LHEF.h" #include "HEJ/CrossSectionAccumulator.hh" #include "HEJ/Event.hh" #include "HEJ/EventReweighter.hh" #include "HEJ/Mixmax.hh" #include "HEJ/stream.hh" #include "hej_test.hh" namespace{ const fastjet::JetDefinition jet_def{fastjet::kt_algorithm, 0.4}; const fastjet::JetDefinition Born_jet_def{jet_def}; constexpr double Born_jetptmin = 30; constexpr double max_ext_soft_pt_fraction = 0.1; constexpr double jetptmin = 35; constexpr bool log_corr = false; const HEJ::ParticleProperties Wprop{80.385, 2.085}; const HEJ::ParticleProperties Zprop{91.187, 2.495}; const HEJ::ParticleProperties Hprop{125, 0.004165}; constexpr double vev = 246.2196508; using EventTreatment = HEJ::EventTreatment; using namespace HEJ::event_type; HEJ::EventTreatMap treat{ {no_2_jets, EventTreatment::discard}, {bad_final_state, EventTreatment::discard}, {non_resummable, EventTreatment::discard}, {unof, EventTreatment::discard}, {unob, EventTreatment::discard}, {qqxexb, EventTreatment::discard}, {qqxexf, EventTreatment::discard}, {qqxmid, EventTreatment::discard}, {FKL, EventTreatment::reweight} }; /// true if colour is allowed for particle bool correct_colour(HEJ::Particle const & part){ if(HEJ::is_AWZH_boson(part) && !part.colour) return true; if(!part.colour) return false; int const colour = part.colour->first; int const anti_colour = part.colour->second; if(part.type == HEJ::ParticleID::gluon) return colour != anti_colour && colour > 0 && anti_colour > 0; if(HEJ::is_quark(part)) return anti_colour == 0 && colour > 0; return colour == 0 && anti_colour > 0; } bool correct_colour(HEJ::Event const & ev){ if(!HEJ::event_type::is_resummable(ev.type())) return true; for(auto const & part: ev.incoming()){ if(!correct_colour(part)) return false; } for(auto const & part: ev.outgoing()){ if(!correct_colour(part)) return false; } return true; } -}; +} int main(int argn, char** argv) { if(argn == 5 && std::string(argv[4]) == "unof"){ --argn; treat[unof] = EventTreatment::reweight; treat[unob] = EventTreatment::discard; treat[FKL] = EventTreatment::discard; } if(argn == 5 && std::string(argv[4]) == "unob"){ --argn; treat[unof] = EventTreatment::discard; treat[unob] = EventTreatment::reweight; treat[FKL] = EventTreatment::discard; } else if(argn == 5 && std::string(argv[4]) == "splitf"){ --argn; treat[qqxexb] = EventTreatment::discard; treat[qqxexf] = EventTreatment::reweight; treat[FKL] = EventTreatment::discard; } else if(argn == 5 && std::string(argv[4]) == "splitb"){ --argn; treat[qqxexb] = EventTreatment::reweight; treat[qqxexf] = EventTreatment::discard; treat[FKL] = EventTreatment::discard; } else if(argn == 5 && std::string(argv[4]) == "qqxmid"){ --argn; treat[qqxmid] = EventTreatment::reweight; treat[FKL] = EventTreatment::discard; } if(argn != 4){ std::cerr << "Usage: check_res eventfile xsection tolerance [uno]"; return EXIT_FAILURE; } const double xsec_ref = std::stod(argv[2]); const double tolerance = std::stod(argv[3]); HEJ::istream in{argv[1]}; LHEF::Reader reader{in}; HEJ::PhaseSpacePointConfig psp_conf; psp_conf.jet_param = HEJ::JetParameters{jet_def, jetptmin}; psp_conf.max_ext_soft_pt_fraction = max_ext_soft_pt_fraction; HEJ::MatrixElementConfig ME_conf; ME_conf.log_correction = log_corr; ME_conf.Higgs_coupling = HEJ::HiggsCouplingSettings{}; ME_conf.ew_parameters.set_vevWZH(vev, Wprop, Zprop, Hprop); HEJ::EventReweighterConfig conf; conf.psp_config = std::move(psp_conf); conf.ME_config = std::move(ME_conf); conf.jet_param = psp_conf.jet_param; conf.treat = treat; reader.readEvent(); const bool has_Higgs = std::find( begin(reader.hepeup.IDUP), end(reader.hepeup.IDUP), 25 ) != end(reader.hepeup.IDUP); const double mu = has_Higgs?125.:91.188; HEJ::ScaleGenerator scale_gen{ {{std::to_string(mu), HEJ::FixedScale{mu}}}, {}, 1. }; HEJ::Mixmax ran{}; HEJ::EventReweighter hej{reader.heprup, std::move(scale_gen), conf, ran}; HEJ::CrossSectionAccumulator xs; do{ auto ev_data = HEJ::Event::EventData{reader.hepeup}; shuffle_particles(ev_data); ev_data.reconstruct_intermediate(); HEJ::Event ev{ ev_data.cluster( Born_jet_def, Born_jetptmin ) }; auto resummed_events = hej.reweight(ev, 100); for(auto const & ev: resummed_events) { ASSERT(correct_colour(ev)); ASSERT(std::isfinite(ev.central().weight)); // we fill the xs uncorrelated since we only want to test the uncertainty // of the resummation xs.fill(ev); } } while(reader.readEvent()); const double xsec = xs.total().value; const double xsec_err = std::sqrt(xs.total().error); const double significance = std::abs(xsec - xsec_ref) / std::sqrt( xsec_err*xsec_err + tolerance*tolerance ); std::cout << xsec_ref << " +/- " << tolerance << " ~ " << xsec << " +- " << xsec_err << " => " << significance << " sigma\n"; if(significance > 3.){ std::cerr << "Cross section is off by over 3 sigma!\n"; return EXIT_FAILURE; } return EXIT_SUCCESS; } diff --git a/t/test_psp.cc b/t/test_psp.cc index 10bcd48..1c502aa 100644 --- a/t/test_psp.cc +++ b/t/test_psp.cc @@ -1,68 +1,68 @@ /** * \authors The HEJ collaboration (see AUTHORS for details) * \date 2019 * \copyright GPLv2 or later */ #include "LHEF/LHEF.h" #include "HEJ/stream.hh" #include "HEJ/config.hh" #include "HEJ/event_types.hh" #include "HEJ/Event.hh" #include "HEJ/PhaseSpacePoint.hh" #include "HEJ/Ranlux64.hh" #include "hej_test.hh" namespace{ constexpr int max_trials = 100; constexpr double max_ext_soft_pt_fraction = 0.1; const fastjet::JetDefinition jet_def{fastjet::kt_algorithm, 0.4}; constexpr double min_jet_pt = 50; -}; +} int main(int argn, char** argv) { if(argn != 2){ std::cerr << "Usage: " << argv[0] << " eventfile"; return EXIT_FAILURE; } HEJ::istream in{argv[1]}; LHEF::Reader reader{in}; LHEF::Writer writer{std::cerr}; writer.heprup = reader.heprup; HEJ::PhaseSpacePointConfig conf; conf.jet_param = HEJ::JetParameters{jet_def, min_jet_pt}; conf.max_ext_soft_pt_fraction = max_ext_soft_pt_fraction; HEJ::Ranlux64 ran{}; while(reader.readEvent()){ HEJ::Event::EventData ev_data{ reader.hepeup }; shuffle_particles(ev_data); const HEJ::Event ev{ ev_data( jet_def, min_jet_pt ) }; for(int trial = 0; trial < max_trials; ++trial){ HEJ::PhaseSpacePoint psp{ev, conf, ran}; if(psp.weight() != 0){ HEJ::Event::EventData tmp_ev; tmp_ev.incoming = psp.incoming(); tmp_ev.outgoing = psp.outgoing(); tmp_ev.parameters.central = {0,0,0}; shuffle_particles(tmp_ev); HEJ::Event out_ev{ tmp_ev(jet_def, min_jet_pt) }; if(out_ev.type() != ev.type()){ using HEJ::event_type::name; std::cerr << "Wrong class of phase space point:\n" "original event of class " << name(ev.type()) << ":\n"; writer.hepeup = reader.hepeup; writer.writeEvent(); std::cerr << "Phase space point of class " << name(out_ev.type()) << ":\n"; writer.hepeup = to_HEPEUP(out_ev, &writer.heprup); writer.writeEvent(); return EXIT_FAILURE; } } } } return EXIT_SUCCESS; }