diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 831d03e..d2c0447 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,487 +1,481 @@ # --------------------------------- # - General Setup - # --------------------------------- stages: - build - test - FOG:build - FOG:test - clean_code - FOG:clear_code - long_test - publish workflow: rules: - when: always variables: GIT_SUBMODULE_STRATEGY: recursive # build directories HEJ_BUILD_DIR: tmp_HEJ/HEJ_build HEJ_INSTALL_DIR: tmp_HEJ/HEJ_installed FOG_BUILD_DIR: tmp_HEJ/FOG_build FOG_INSTALL_DIR: ${HEJ_INSTALL_DIR} # docker images DOCKER_BASIC: hejdock/hepenv # slc6 DOCKER_HEJV2ENV: 'hejdock/masterenv:centos7v2' DOCKER_CLANG: 'hejdock/hepenv:clang9_ubuntu' # ubuntu with clang DOCKER_HEPMC3: hejdock/hepmc3env # centos7 DOCKER_HIGHFIVE: hejdock/highfiveenv # centos7 - DOCKER_QCDLOOP: hejdock/qcdloopenv # slc6, includes rivet - DOCKER_RIVET3: 'hejdock/rivetenv:rivet3_gcc9_ubuntu' # ubuntu - DOCKER_RIVET3H2: 'hejdock/rivetenv:rivet3_hepmc2_gcc9_ubuntu' # ubuntu - DOCKER_RIVET: hejdock/rivetenv # slc6 + DOCKER_QCDLOOP: 'hejdock/qcdloopenv:centos7v2' # includes rivet + DOCKER_RIVET_HEPMC2: 'hejdock/rivetenv:hepmc2' + DOCKER_RIVET: hejdock/rivetenv # default name of cmake CMAKE: cmake CTEST: ctest OVERWRITE_BOOST: "" HEPSW_INSTALL_SOURCEFILE: '/cvmfs/pheno.egi.eu/HEJ/HEJ_env.sh' default: after_script: - date tags: - docker # save complete history of failed tests artifacts: when: on_failure untracked: true expire_in: 3d # ----------- Macros ----------- # default pipeline triggers .rules_template: rules: &rules_def - if: $CI_MERGE_REQUEST_ID when: on_success - if: $CI_COMMIT_TAG when: on_success - if: '$CI_COMMIT_BRANCH != null && $CI_COMMIT_BRANCH == "master"' when: on_success - if: '$CI_COMMIT_BRANCH != null && $CI_COMMIT_BRANCH =~ /^v\d+\.\d+$/' when: on_success - when: manual # non-blocking manual allow_failure: true # --------------------------------- # - Script Templates - # --------------------------------- # ----------- Build ----------- .HEJ_build: rules: *rules_def stage: build before_script: - date - source ${HEPSW_INSTALL_SOURCEFILE} || exit 1 # prepare build - t_HEJ_DIR=${PWD} - t_HEJ_INSTALL_DIR=${PWD}/${HEJ_INSTALL_DIR} - t_HEJ_BUILD_DIR=${PWD}/${HEJ_BUILD_DIR} # hack to allow setting "variable in variable" - t_CMAKE_FLAGS="" - if [[ -n ${OVERWRITE_BOOST} ]]; then declare -n t_boost_root=${OVERWRITE_BOOST}; t_CMAKE_FLAGS="-DBOOST_ROOT=${t_boost_root}"; fi - echo ${t_CMAKE_FLAGS} - mkdir -p ${t_HEJ_BUILD_DIR} - cd ${t_HEJ_BUILD_DIR} - ${CMAKE} ${t_HEJ_DIR} -DCMAKE_BUILD_TYPE=DEBUG -DCMAKE_INSTALL_PREFIX=${t_HEJ_INSTALL_DIR} ${t_CMAKE_FLAGS} script: - make -j $(nproc --ignore=1) - make install needs: [] # can run immediately artifacts: # save build and installed folder name: build expire_in: 1d paths: - ${HEJ_INSTALL_DIR} - ${HEJ_BUILD_DIR} # ----------- Test ----------- .HEJ_test: rules: *rules_def stage: test before_script: - date - source ${HEPSW_INSTALL_SOURCEFILE} || exit 1 # load HEJ - t_HEJ_DIR=${PWD} - t_HEJ_INSTALL_DIR=${PWD}/${HEJ_INSTALL_DIR} - export LD_LIBRARY_PATH=${t_HEJ_INSTALL_DIR}/lib:${LD_LIBRARY_PATH} - export PATH=${t_HEJ_INSTALL_DIR}/bin:${PATH} - cd ${HEJ_BUILD_DIR} - ${CMAKE} ${t_HEJ_DIR} # rerun cmake to create all test configure files script: - ${CTEST} --output-on-failure -j $(nproc --ignore=1) .HEJrivet_test: extends: .HEJ_test script: - ${CTEST} --output-on-failure -j $(nproc --ignore=1) - bash -c '[ -f t/tst.yoda ]' && echo "found rivet output" - rivet-cmphistos t/tst.yoda - bash -c '[ -f MC_XS_XS.dat ]' && echo "yoda not empty" ## ----------- FOG build ----------- .FOG_build: rules: *rules_def stage: FOG:build before_script: - date - source ${HEPSW_INSTALL_SOURCEFILE} || exit 1 # load HEJ - t_HEJ_INSTALL_DIR=${PWD}/${HEJ_INSTALL_DIR} - export LD_LIBRARY_PATH=${t_HEJ_INSTALL_DIR}/lib:${LD_LIBRARY_PATH} - export PATH=${t_HEJ_INSTALL_DIR}/bin:${PATH} # prepare build - t_FOG_DIR=${PWD}/FixedOrderGen - t_FOG_INSTALL_DIR=${PWD}/${FOG_INSTALL_DIR} - t_FOG_BUILD_DIR=${PWD}/${FOG_BUILD_DIR} # hack to allow setting "variable in variable" - t_CMAKE_FLAGS="" - if [[ -n ${OVERWRITE_BOOST} ]]; then declare -n t_boost_root=${OVERWRITE_BOOST}; t_CMAKE_FLAGS="-DBOOST_ROOT=${t_boost_root}"; fi - mkdir -p ${t_FOG_BUILD_DIR} - cd ${t_FOG_BUILD_DIR} - ${CMAKE} ${t_FOG_DIR} -DCMAKE_BUILD_TYPE=DEBUG -DCMAKE_INSTALL_PREFIX=${t_FOG_INSTALL_DIR} ${t_CMAKE_FLAGS} script: - make -j $(nproc --ignore=1) - make install artifacts: # save build and installed folder name: FOG_build expire_in: 1d paths: - ${HEJ_INSTALL_DIR} - ${FOG_INSTALL_DIR} - ${FOG_BUILD_DIR} ## ----------- FOG test ----------- .FOG_test: rules: *rules_def stage: FOG:test before_script: - date - source ${HEPSW_INSTALL_SOURCEFILE} || exit 1 # load HEJ - t_FOG_DIR=${PWD}/FixedOrderGen - t_HEJ_INSTALL_DIR=${PWD}/${HEJ_INSTALL_DIR} - t_FOG_INSTALL_DIR=${PWD}/${FOG_INSTALL_DIR} - export LD_LIBRARY_PATH=${t_HEJ_INSTALL_DIR}/lib:${LD_LIBRARY_PATH} - export PATH=${t_HEJ_INSTALL_DIR}/bin:${t_FOG_INSTALL_DIR}/bin:${PATH} - t_FOG_BUILD_DIR=${PWD}/${FOG_BUILD_DIR} - cd ${t_FOG_BUILD_DIR} - ${CMAKE} ${t_FOG_DIR} # rerun cmake to create all test configure files script: - ${CTEST} --output-on-failure -j $(nproc --ignore=1) # --------------------------------- # - Build & Test - # --------------------------------- # ----------- basic (always run) ----------- build:basic: image: ${DOCKER_BASIC} extends: .HEJ_build rules: - when: on_success test:basic: image: ${DOCKER_BASIC} extends: .HEJ_test needs: - job: build:basic artifacts: true rules: - when: on_success FOG:build:basic: image: ${DOCKER_BASIC} extends: .FOG_build needs: - job: build:basic artifacts: true rules: - when: on_success FOG:test:basic: image: ${DOCKER_BASIC} extends: .FOG_test needs: - job: FOG:build:basic artifacts: true rules: - when: on_success # ----------- HighFive/hdf5 ----------- build:HighFive: image: ${DOCKER_HIGHFIVE} extends: .HEJ_build variables: CMAKE: cmake3 test:HighFive: image: ${DOCKER_HIGHFIVE} extends: .HEJ_test variables: CMAKE: cmake3 CTEST: ctest3 needs: - job: build:HighFive artifacts: true # ----------- QCDloop ----------- build:qcdloop: image: ${DOCKER_QCDLOOP} extends: .HEJ_build + variables: + HEPSW_INSTALL_SOURCEFILE: '/cvmfs/pheno.egi.eu/HEJV2/HEJ_env.sh' test:qcdloop: image: ${DOCKER_QCDLOOP} extends: .HEJ_test - needs: - - job: build:qcdloop - artifacts: true - -# ----------- rivet3 & HepMC3 ----------- - -build:rivet3: - image: ${DOCKER_RIVET3} - extends: .HEJ_build variables: - OVERWRITE_BOOST: boost_ROOT_DIR - -test:rivet3: - image: ${DOCKER_RIVET3} - extends: .HEJrivet_test + HEPSW_INSTALL_SOURCEFILE: '/cvmfs/pheno.egi.eu/HEJV2/HEJ_env.sh' needs: - - job: build:rivet3 + - job: build:qcdloop artifacts: true # ----------- HepMC 3 ----------- # HepMC 3 already in rivet3 build:hepmc3: image: ${DOCKER_HEPMC3} extends: .HEJ_build variables: CMAKE: cmake3 test:hepmc3: image: ${DOCKER_HEPMC3} extends: .HEJ_test variables: CMAKE: cmake3 CTEST: ctest3 needs: - job: build:hepmc3 artifacts: true # ----------- rivet ----------- -# rivet2 already in qcdloop build:rivet: image: ${DOCKER_RIVET} extends: .HEJ_build + variables: + HEPSW_INSTALL_SOURCEFILE: '/cvmfs/pheno.egi.eu/HEJV2/HEJ_env.sh' test:rivet: image: ${DOCKER_RIVET} extends: .HEJrivet_test + variables: + HEPSW_INSTALL_SOURCEFILE: '/cvmfs/pheno.egi.eu/HEJV2/HEJ_env.sh' needs: - job: build:rivet artifacts: true # ----------- rivet3 & HepMC2 ----------- # This shouldn't change too often build:rivet3h2: - image: ${DOCKER_RIVET3H2} - extends: .HEJ_build + image: ${DOCKER_RIVET_HEPMC2} variables: - OVERWRITE_BOOST: boost_ROOT_DIR + HEPSW_INSTALL_SOURCEFILE: '/cvmfs/pheno.egi.eu/HEJV2/HEJ_env.sh' + extends: .HEJ_build test:rivet3h2: - image: ${DOCKER_RIVET3H2} + image: ${DOCKER_RIVET_HEPMC2} extends: .HEJrivet_test + variables: + HEPSW_INSTALL_SOURCEFILE: '/cvmfs/pheno.egi.eu/HEJV2/HEJ_env.sh' needs: - job: build:rivet3h2 artifacts: true - - + # ----------- HEJV2 Environment ----------- build:hejv2: image: ${DOCKER_HEJV2ENV} extends: .HEJ_build variables: HEPSW_INSTALL_SOURCEFILE: '/cvmfs/pheno.egi.eu/HEJV2/HEJ_env.sh' test:hejv2: image: ${DOCKER_HEJV2ENV} extends: .HEJ_test variables: HEPSW_INSTALL_SOURCEFILE: '/cvmfs/pheno.egi.eu/HEJV2/HEJ_env.sh' needs: - job: build:hejv2 artifacts: true # --------------------------------- # - Clean Code - # --------------------------------- No_tabs: stage: clean_code rules: *rules_def image: hejdock/git needs: [] # can run immediately script: - date - check_tabs artifacts: # don't save anything # ----------- Template no compiler warnings ----------- .Warning_build: extends: .HEJ_build stage: clean_code script: - ${CMAKE} ${t_HEJ_DIR} -DCMAKE_CXX_FLAGS="-Werror" -DCMAKE_BUILD_TYPE=RelWithDebInfo - make -j $(nproc --ignore=1) - make -j $(nproc --ignore=1) install artifacts: # don't save anything needs: [] # can run immediately .Warning_FOG: extends: .FOG_build stage: FOG:clear_code script: - ${CMAKE} ${t_FOG_DIR} -DCMAKE_CXX_FLAGS="-Werror" ${t_CMAKE_FLAGS} -DCMAKE_BUILD_TYPE=RelWithDebInfo - make -j $(nproc --ignore=1) - make -j $(nproc --ignore=1) install artifacts: # don't save anything # ----------- No gcc warnings ----------- No_Warning:basic: image: ${DOCKER_BASIC} extends: .Warning_build artifacts: # save build and installed folder (needed for Long_test:FOG) name: build expire_in: 1d paths: - ${HEJ_INSTALL_DIR} - ${HEJ_BUILD_DIR} No_Warning:basic:FOG: image: ${DOCKER_BASIC} extends: .Warning_FOG needs: - job: build:basic artifacts: true # ----------- No clang warnings ----------- No_Warning:clang: image: ${DOCKER_CLANG} extends: .Warning_build variables: OVERWRITE_BOOST: boost_ROOT_DIR artifacts: # save installed folder name: build expire_in: 1d paths: - ${HEJ_INSTALL_DIR} No_Warning:clang:FOG: image: ${DOCKER_CLANG} extends: .Warning_FOG needs: - job: No_Warning:clang artifacts: true variables: OVERWRITE_BOOST: boost_ROOT_DIR # --------------------------------- # - Long tests - # --------------------------------- .HEJ_test_long: extends: .HEJ_build stage: long_test script: - ${CMAKE} ${t_HEJ_DIR} -DCMAKE_BUILD_TYPE=RelWithDebInfo ${t_CMAKE_FLAGS} -DTEST_ALL=TRUE - make -j $(nproc --ignore=1) install - ${CTEST} --output-on-failure -j $(nproc --ignore=1) needs: [] # can run immediately .FOG_test_long: extends: .FOG_build stage: long_test script: - ${CMAKE} ${t_FOG_DIR} -DCMAKE_BUILD_TYPE=RelWithDebInfo ${t_CMAKE_FLAGS} -DTEST_ALL=TRUE - make -j $(nproc --ignore=1) install - ${CTEST} --output-on-failure -j $(nproc --ignore=1) # ----------- QCDloop ----------- Long_test:qcdloop: image: ${DOCKER_QCDLOOP} extends: .HEJ_test_long + variables: + HEPSW_INSTALL_SOURCEFILE: '/cvmfs/pheno.egi.eu/HEJV2/HEJ_env.sh' # ----------- Basic ----------- Long_test:FOG: image: ${DOCKER_BASIC} extends: .FOG_test_long needs: - job: No_Warning:basic artifacts: true # --------------------------------- # - Publish - # --------------------------------- Publish_version: stage: publish image: hejdock/git dependencies: [] artifacts: # don't save anything rules: - if: '$CI_COMMIT_BRANCH != null && $CI_COMMIT_BRANCH =~ /^v\d+\.\d+$/' when: on_success before_script: - mkdir -p .ssh/ && echo "${SSH_KEY}" > .ssh/id_rsa && chmod 0600 .ssh/id_rsa - rm -rf ~/.ssh/id_rsa; mkdir -p ~/.ssh/ - ln -s $PWD/.ssh/id_rsa ~/.ssh/id_rsa && chmod 0600 ~/.ssh/id_rsa - ssh -T ${PUBLIC_GIT_PREFIX} -o "StrictHostKeyChecking no" || echo "added ssh" script: - git remote add public ${PUBLIC_GIT_PREFIX}${PUBLIC_GIT_POSTFIX} - git checkout $CI_COMMIT_REF_NAME - git branch - git pull - git push public - git push public --tags after_script: - rm -f /root/.ssh/id_rsa && rm -fr .ssh - git remote rm public diff --git a/Changes-API.md b/Changes-API.md index 9f1ad56..921cc3c 100644 --- a/Changes-API.md +++ b/Changes-API.md @@ -1,133 +1,134 @@ # Changelog for HEJ API This log lists only changes on the HEJ API. These are primarily code changes relevant for calling HEJ as an API. This file should only be read as an addition to [`Changes.md`](Changes.md), where the main features are documented. ## Version 2.2 ### 2.2.0 #### New and updated functions * New functions to decide whether particles are massive, charged, charged leptons, antiparticles. * Added `to_string` function for `Event`. * Updated function `Event::EventData.reconstruct_intermediate()` - Now requires an `EWConstants` argument - Added support for WpWp and WmWm events. - In the case of WW same-flavour the reconstruction will minimise the total off-shell momentum. * Replaced `no_2_jets` and `bad_final_state` enumerators in `EventType` with `invalid` and `unknown`. * Added helper functions `is_backward_g_to_h`, `is_forward_g_to_h` to detect incoming gluon to outgoing Higgs transitions. * Updated function `implemented_types()` to now be in HEJ namespace. - +* `EventWriter` has a new `set_xs_scale` member function to set the + ratio between the sum of event weights and the cross section. ## Version 2.1 ### 2.1.0 #### Changes to Event class * Restructured `Event` class - `Event` can now only be build from a (new) `Event::EventData` class - Removed default constructor for `Event` - `Event::EventData` replaces the old `UnclusteredEvent` struct. - `UnclusteredEvent` is now **deprecated**, and will be removed in version 2.2.0 - Removed `Event.unclustered()` function - Added new member function `Events.parameters()`, to directly access (underlying) `Parameters` - New member functions `begin_partons`, `end_partons` (`rbegin_partons`, `rend_partons`) with aliases `cbegin_partons`, `cend_partons` (`crbegin_partons`, `crend_partons`) for constant (reversed) iterators over outgoing partons. * New function `Event::EventData.reconstruct_intermediate()` to reconstruct bosons from decays, e.g. `positron + nu_e => Wp` * Added optional Colour charges to particles (`Particle.colour`) - Colour connection in the HEJ limit can be generated via `Event.generate_colours` (automatically done in the resummation) - Colour configuration of input events can be checked with `Event.is_leading_colour` * Added function `Event.valid_hej_state` to check if event _could have_ been produced by `HEJ` according to the `soft pt regulator` cut on the jets #### New and updated functions * Renamed `EventType::nonHEJ` to `EventType::non_resummable` and `is_HEJ()` to `is_resummable()` such that run card is consistent with internal workings * Made `MatrixElement.tree_kin(...)` and `MatrixElement.tree_param(...)` public * New `EventReweighter` member function `treatment` to query the treatment with respect to resummation for the various event types. * Added auxiliary functions to obtain a `std::string` from `EventDescription` (`to_string` for human readable, and `to_simple_string` for easy parsable string) * New `get_analyses` function to read in multiple `HEJ::Analysis` at once, similar to `get_analysis` * New `get_ew_parameters` function to extract electroweak parameters from YAML configuration. #### New classes * New class `Unweighter` to do unweighting * New class `CrossSectionAccumulator` to keep track of Cross Section of the different subprocess * New template struct `Parameters` similar to old `Weights` - `Weights` are now an alias for `Parameters`. Calling `Weights` did not change - `Weights.hh` was replaced by `Parameters.hh`. The old `Weights.hh` header will be removed in version 2.2.0 * Function to multiplication and division of `EventParameters.weight` by double - This can be combined with `Parameters`, e.g. `Parameters*Weights`, see also `Events.parameters()` - Moved `EventParameters` to `Parameters.hh` header * new class `EWConstants` replaces previously hard coded `vev` - `EWConstants` have to be set in the general `Config` and the `MatrixElementConfig` #### Input/Output * New abstract `EventReader` class, as base for reading events from files - Moved LHE file reader to `HEJ::LesHouchesReader` * New (optional) function `finish()` in abstract class `EventWriter`. `finish()` is called _after_ all events are written. * Support reading (`HDF5Reader`) and writing (`HDF5Writer`) `hdf5` files * New `BufferedEventReader` class that allows to put events back into the reader. * New `SherpaLHEReader` to read Sherpa LHE files with correct weights * `get_analysis` now requires `YAML::Node` and `LHEF::HEPRUP` as arguments * Replaced `HepMCInterface` and `HepMCWriter` by `HepMCInterfaceX` and `HepMCWriterX` respectively, with `X` being the major version of HepMC (2 or 3) - Renamed `HepMCInterfaceX::init_kinematics` to `HepMCInterfaceX::init_event` and protected it, use `HepMCInterfaceX::operator()` instead - Removed redundant `HepMCInterfaceX::add_variation` function #### Linking * Export cmake target `HEJ::HEJ` to link directly against `libHEJ` * Preprocessor flags (`HEJ_BUILD_WITH_XYZ`) for enabled/disabled dependencies are now written to `ConfigFlags.hh` * Provide links to version specific object files, e.g. `libHEJ.so -> libHEJ.so.2.1 (soname) -> libHEJ.so.2.1.0` * Removed `LHAPDF` from public interface #### Miscellaneous * Capitalisation of `Config.hh` now matches class `Config` (was `config.hh`) * Renamed `Config::max_ext_soft_pt_fraction` to `Config::soft_pt_regulator`. The new `Config::soft_pt_regulator` parameter is optional and the old `Config::max_ext_soft_pt_fraction` is **deprecated**. * Replaced redundant member `EventReweighterConfig::jet_param` with getter function `EventReweighter.jet_param()` (`JetParameters` are already in `PhaseSpacePointConfig`) * Avoid storing reference to the Random Number Generator inside classes - Constructors of `EventReweighter` now expect `std::shared_ptr` (was reference) - Moved reference to `HEJ::RNG` from constructor of `JetSplitter` to `JetSplitter.split` ## Version 2.0 ### 2.0.4 * Fixed wrong path of `HEJ_INCLUDE_DIR` in `hej-config.cmake` ### 2.0.0 * First release diff --git a/Changes.md b/Changes.md index 954c587..71c8186 100644 --- a/Changes.md +++ b/Changes.md @@ -1,203 +1,206 @@ # Changelog This is the log for changes to the HEJ program. Further changes to the HEJ API are documented in [`Changes-API.md`](Changes-API.md). If you are using HEJ as a library, please also read the changes there. ## Version 2.2 This release adds support for the new processes: * Wp+Wp with jets. * Wm+Wm with jets. In addition, there are new options when running HEJ: * Enable NLO truncation for use in HEJ@NLO predictions. * Require a lowpt jet, used for running lowpt separately. This release includes minor changes which affect users of HEJ as a library (see [`Changes-API.md`](Changes-API.md)). Compilation now requires a compiler supporting C++17. ### 2.2.0 #### New Processes * Resummation for WpWp/WmWm with jets, including interference between configurations. * Significantly improved description of Higgs boson plus jet production: - Processes with extremal Higgs boson emission are now treated as leading-log. - Resummation is now enabled for Higgs boson production with a single jet. * `HEJFOG` can generate multiple jets together with a charged lepton-antilepton pair via a virtual Z boson or photon. #### Updates to configuration file * With the new option `off-shell tolerance` HEJ repairs the momenta of massless particles that are slightly off-shell and incoming particles with small but non-vanishing transverse momenta. * Introduced new event types `unknown` and `invalid` under the `event treatment` option. `unknown` indicates processes that are not recognised by HEJ. `invalid` is reserved for unphysical processes. * Introduced new event treatment `abort` for aborting the program when an undesired event type is encountered. +#### Changes to input/output +* Event reader supports event input from named pipes (fifo) + ## Version 2.1 This release adds support for two new processes: * W boson with jets. * Jets with a charged lepton-antilepton pair via a virtual Z boson or photon. In addition, the complete set of first subleading processes (unordered gluon, central and extremal quark-antiquark pair) is implemented for pure jets and W + jets, see [arXiv:2012.10310](https://arxiv.org/abs/2012.10310). Unordered gluon emission is also supported for Higgs boson + jets and Z boson/photon + jets. This release include many changes to the code, which affect users of HEJ as a library (see [`Changes-API.md`](Changes-API.md)). ### 2.1.3 * Updated documentation. ### 2.1.2 * Updated `cxxopts.hpp` dependency. ### 2.1.1 * Fixed invalid iterator accesses. * Reorganised automated tests. * Updated documentation. ### 2.1.0 #### New Processes * Resummation for W bosons with jets - New subleading processes `extremal qqbar` & `central qqbar` for a quark and anti-quark in the final state, e.g. `g g => u d_bar Wm g` (the other subleading processes also work with W's) - `HEJFOG` can generate multiple jets together with a (off-shell) W bosons decaying into lepton & neutrino * Resummation for jets with a charged lepton-antilepton pair via a virtual Z boson or photon. Includes the `unordered` subleading process. * Resummation can now be performed on all subleading processes within pure jets also. This includes `unordered`, `extremal qqbar` and `central qqbar` processes. #### More Physics implementation * Partons now have a Colour charge - Colours are read from and written to LHE files - For reweighted events the colours are created according to leading colour in the FKL limit * Use relative fraction for soft transverse momentum in tagging jets (`soft pt regulator`) as new (optional) parameter. - This supersedes `min extparton pt`, which is marked **deprecated** and will be removed in version 2.2.0 - This is a direct replacement for the old `max ext soft pt fraction`, which is also **deprecated**. * Changed the redistribution of the momenta of soft emissions. Now also bosons can take part of the recoil (previously only jets). #### Updates to Runcard * Allow multiplication and division of multiple scale functions e.g. `H_T/2*m_j1j2` * Grouped `event treatment` for subleading channels together in runcard - Rename `non-HEJ` processes to `non-resummable` * Read electro-weak constants from input - new mandatory setting `vev` to change vacuum expectation value - new mandatory settings `particle properties` to specify mass & width of bosons - `HEJFOG`: decays are now specified in `decays` setting (previously under `particle properties`) * Allow loading multiple analyses with `analyses`. The old `analysis` (with "i") is marked **deprecated**. * Optional setting to specify maximal number of Fixed Order events (`max events`, default is all) * Allow changing the regulator lambda in input (`regulator parameter`, only for advanced users) #### Changes to Input/Output * Added support to read & write `hdf5` event files suggested in [arXiv:1905.05120](https://arxiv.org/abs/1905.05120) (needs [HighFive](https://github.com/BlueBrain/HighFive)) * Support input with average weight equal to the cross section (`IDWTUP=1 or 4`) * Support unmodified Les Houches Event Files written by Sherpa with `cross section = sum(weights)/sum(trials)` * Analyses now get general run information (`LHEF::HEPRUP`) in the constructor. **This might break previously written, external analyses!** - external analyses should now be created with `make_analysis(YAML::Node const & config, LHEF::HEPRUP const & heprup)` * Support `rivet` version 3 with both `HepMC` version 2 and 3 - Multiple weights with `rivet 3` will only create one `.yoda` file (instead of one per weight/scale) * Added option to unweight only resummation events (`unweight: {type: resummation}`) * Added option for partially unweighting resummation events, similar to the fixed-order generator. * Improved unweighting algorithm. * Follow HepMC convention for particle Status codes: incoming = 11, decaying = 2, outgoing = 1 (unchanged) #### Miscellaneous * Print cross sections at end of run * Added example analysis & scale to `examples/`. Everything in `examples/` will be build when the flag `-DBUILD_EXAMPLES=TRUE` is set in `cmake`. * Dropped support for HepMC 3.0.0, either HepMC version 2 or >3.1 is required - It is now possible to write out both HepMC 2 and HepMC 3 events at the same time * Require LHADPF version 6. Dropped support for all other versions. * Use `git-lfs` for raw data in test (`make test` now requires `git-lfs`) * Currents are now generated with [`FORM`](https://github.com/vermaseren/form) - `FORM` is included as a `git submodule`, use `git submodule update --init` to download `FORM` * Create [Sphinx](http://sphinx-doc.org/) and [Doxygen](http://doxygen.org/) documentation by `make sphinx` or `make doxygen` in your `build/` folder ## Version 2.0 First release of HEJ 2. Complete code rewrite compared to HEJ 1. Improved matching to Fixed Order ([arXiv:1805.04446](https://arxiv.org/abs/1805.04446)). Implemented processes: Higgs boson with jets (FKL and unordered gluon emission, with finite quark mass loop, [arXiv:1812.08072](https://arxiv.org/abs/1812.08072)), and pure jets (only FKL). See [arXiv:1902.08430](https://arxiv.org/abs/1902.08430) ## 2.0.7 * Added missing `#include` directives. ### 2.0.6 * Fixed compiling rivet when YODA headers are _outside_ of rivet directory. ### 2.0.5 * Fixed event classification for input not ordered in rapidity. ### 2.0.4 * Fixed wrong path of `HEJ_INCLUDE_DIR` in `hej-config.cmake`. ### 2.0.3 * Fixed parsing of (numerical factor) * (base scale) in configuration. * Don't change scale names, but sanitise Rivet output file names instead. ### 2.0.2 * Changed scale names to `"_over_"` and `"_times_"` for proper file names (was `"/"` and `"*"` before). ### 2.0.1 * Fixed name of fixed-order generator in error message. ### 2.0.0 * First release. diff --git a/doc/sphinx/HEJ.rst b/doc/sphinx/HEJ.rst index 0c18246..302e8fa 100644 --- a/doc/sphinx/HEJ.rst +++ b/doc/sphinx/HEJ.rst @@ -1,513 +1,519 @@ .. _`Running HEJ 2`: Running HEJ 2 ============= Quick start ----------- In order to run HEJ 2, you need a configuration file and a file containing fixed-order events. A sample configuration is given by the :file:`config.yml` file distributed together with HEJ 2. Events in the Les Houches Event File format can be generated with standard Monte Carlo generators like `MadGraph5_aMC@NLO `_ or `Sherpa `_. If HEJ 2 was compiled with `HDF5 `_ support, it can also read and write event files in the format suggested in `arXiv:1905.05120 `_. HEJ 2 assumes that the cross section is given by the sum of the event weights. Depending on the fixed-order generator it may be necessary to adjust the weights in the Les Houches Event File accordingly. The processes supported by HEJ 2 are - Pure multijet production - Production of a Higgs boson with jets - Production of a W boson with jets - Production of jets with a charged lepton-antilepton pair, via a virtual Z boson and/or photon - Production of two same-sign W bosons with jets where at least two jets are required in each case. For the time being, only leading-order input events are supported. After generating an event file :file:`events.lhe` adjust the parameters under the `fixed order jets`_ setting in :file:`config.yml` to the settings in the fixed-order generation. Resummation can then be added by running:: HEJ config.yml events.lhe +Alternatively it can be more efficient to run HEJ with input events piped from a +fixed order generator. To do this with e.g. Sherpa (where the event output file is +specified in the Sherpa runcard as :file:`events.lhe`), run:: + + mkfifo events.lhe + Sherpa -f Run.dat & + HEJ config.yml events.lhe + + Using the default settings, this will produce an output event file :file:`HEJ.lhe` with events including high-energy resummation. When using the `Docker image `_, HEJ can be run with .. code-block:: bash docker run -v $PWD:$PWD -w $PWD hejdock/hej HEJ config.yml events.lhe .. _`HEJ 2 settings`: Settings -------- HEJ 2 configuration files follow the `YAML `_ format. The following configuration parameters are supported: .. _`trials`: **trials** High-energy resummation is performed by generating a number of resummation phase space configurations corresponding to an input fixed-order event. This parameter specifies how many such configurations HEJ 2 should try to generate for each input event. Typical values vary between 10 and 100. .. _`fixed order jets`: **fixed order jets** This tag collects a number of settings specifying the jet definition in the event input. The settings should correspond to the ones used in the fixed-order Monte Carlo that generated the input events. .. _`fixed order jets: min pt`: **min pt** Minimum transverse momentum in GeV of fixed-order jets. .. _`fixed order jets: algorithm`: **algorithm** The algorithm used to define jets. Allowed settings are :code:`kt`, :code:`cambridge`, :code:`antikt`, :code:`cambridge for passive`. See the `FastJet `_ documentation for a description of these algorithms. .. _`fixed order jets: R`: **R** The R parameter used in the jet algorithm, roughly corresponding to the jet radius in the plane spanned by the rapidity and the azimuthal angle. .. _`resummation jets`: **resummation jets** This tag collects a number of settings specifying the jet definition in the observed, i.e. resummed events. These settings are optional, by default the same values as for the `fixed order jets`_ are assumed. .. _`resummation jets: min pt`: **min pt** Minimum transverse momentum in GeV of resummation jets. This should be between 25% and 50% larger than the minimum transverse momentum of fixed order jets set by `fixed order jets: min pt`_. The contribution from the lower min pt fixed order jets can also be calculated separately using the `require low pt jet`_ option. .. _`resummation jets: algorithm`: **algorithm** The algorithm used to define jets. The HEJ 2 approach to resummation relies on properties of :code:`antikt` jets, so this value is strongly recommended. For a list of possible other values, see the `fixed order jets: algorithm`_ setting. .. _`resummation jets: R`: **R** The R parameter used in the jet algorithm. .. _`event treatment`: **event treatment** Specify how to treat different event types. The different event types contribute to different orders in the high-energy limit. The types are: .. _`FKL`: **FKL** Specifies how to treat events respecting FKL rapidity ordering, where all but the two partons extremal in rapidity have to be gluons, e.g. :code:`u d => u g d`. These configurations are dominant in the high-energy limit. .. _`unordered`: **unordered** Specifies how to treat events with one gluon emission that does not respect FKL ordering, e.g. :code:`u d => g u d`. In the high-energy limit, such configurations are logarithmically suppressed compared to FKL configurations. .. _`extremal qqbar`: **extremal qqbar** Specifies how to treat events with a quark-antiquark pair as extremal partons in rapidity, e.g. :code:`g d => u u_bar d`. In the high-energy limit, such configurations are logarithmically suppressed compared to FKL configurations. .. _`central qqbar`: **central qqbar** Specifies how to treat events with a quark-antiquark pair central in rapidity, e.g. :code:`g g => g u u_bar g`. In the high-energy limit, such configurations are logarithmically suppressed compared to FKL configurations. .. _`non-resummable`: **non-resummable** Specifies how to treat events where resummation is currently not supported for the given rapidity ordering and/or assignment of parton flavours. .. _`unknown`: **unknown** Specifies how to treat events for processes that are not implemented, e.g. di-photon production. .. _`invalid`: **invalid** Specifies how to treat events that are considered unphysical. Events in this category violate one or more of the following requirements: - Charge conservation. - Four-momentum conservation. - Momenta of massless particles must be lightlike. - Incoming particles must not have transverse momentum. The possible treatments are - :code:`reweight` to enable resummation - :code:`keep` to keep the events as they are up to a possible change of renormalisation and factorisation scale - :code:`discard` to discard these events - :code:`abort` to abort the program if one of these events is encountered The settings :code:`keep`, :code:`discard`, and :code:`abort` are supported for all event types. The :code:`reweight` treatment is implemented for different event types depending on the considered process: .. csv-table:: :header: , "FKL", "unordered", "extremal qqbar", "central qqbar" :widths: auto :align: center :stub-columns: 1 "pure jets", "Yes", "Yes", "Yes", "Yes" "Higgs + jets", "Yes", "Yes", "No", "No" "W + jets", "Yes", "Yes", "Yes", "Yes" "Z/γ + jets", "Yes", "Yes", "No", "No" "same-sign W + jets", "Yes", "No", "No", "No" .. _`scales`: **scales** Specifies the renormalisation and factorisation scales for the output events. This can either be a single entry or a list :code:`[scale1, scale2, ...]`. For the case of a list the first entry defines the central scale. Possible values are fixed numbers to set the scale in GeV or the following: - :code:`H_T`: The sum of the scalar transverse momenta of all final-state particles - :code:`max jet pperp`: The maximum transverse momentum of all jets - :code:`jet invariant mass`: Sum of the invariant masses of all jets - :code:`m_j1j2`: Invariant mass between the two hardest jets. Scales can be multiplied or divided by overall factors, e.g. :code:`H_T/2`. It is also possible to import scales from an external library, see :ref:`Custom scales` .. _`scale factors`: **scale factors** A list of numeric factors by which each of the `scales`_ should be multiplied. Renormalisation and factorisation scales are varied independently. For example, a list with entries :code:`[0.5, 2]` would give the four scale choices (0.5μ\ :sub:`r`, 0.5μ\ :sub:`f`); (0.5μ\ :sub:`r`, 2μ\ :sub:`f`); (2μ\ :sub:`r`, 0.5μ\ :sub:`f`); (2μ\ :sub:`r`, 2μ\ :sub:`f`) in this order. The ordering corresponds to the order of the final event weights. .. _`max scale ratio`: **max scale ratio** Specifies the maximum factor by which renormalisation and factorisation scales may difer. For a value of :code:`2` and the example given for the `scale factors`_ the scale choices (0.5μ\ :sub:`r`, 2μ\ :sub:`f`) and (2μ\ :sub:`r`, 0.5μ\ :sub:`f`) will be discarded. .. _`log correction`: **log correction** Whether to include corrections due to the evolution of the strong coupling constant in the virtual corrections. Allowed values are :code:`true` and :code:`false`. .. _`NLO truncation`: **NLO truncation** Options to truncate the HEJ resummation at next-to-leading order. Used for bin-by-bin NLO reweighting. .. _`NLO truncation: enabled`: **enabled** Enable truncation. Allowed values are :code:`true` and :code:`false` (default). .. _`NLO truncation: nlo order`: **nlo order** Set the (base) number of jets in the NLO sample. Allowed values are integers (default: 2). .. _`unweight`: **unweight** Settings for unweighting events. Unweighting can greatly reduce the number of resummation events, speeding up analyses and shrinking event file sizes. .. _`type`: **type** How to unweight events. The supported settings are - :code:`weighted`: Generate weighted events. Default, if nothing else specified. - :code:`resummation`: Unweight only resummation events. Each set of resummation events coming from *a single fixed order event* are unweighted separately according to the largest weight in the current chunk of events. - :code:`partial`: Unweight only resummation events with weights below a certain threshold. The weight threshold is determined automatically in a calibration run prior to the usual event generation. .. _`unweight: trials`: **trials** Maximum number of trial resummation events generated in the calibration run for partial unweighting. This option should only be set for partial unweighting. If possible, each trial is generated from a different input fixed-order event. If there are not sufficiently many input events, more than one trial event may be generated for each of them and the actual number of trial events may be smaller than requested. Increasing the number of trials generally leads to better unweighting calibration but increases the run time. Between 1000 and 10000 trials are usually sufficient. .. _`unweight: max deviation`: **max deviation** Controls the range of events to which unweighting is applied. This option should only be set for partial unweighting. A larger value means that a larger fraction of events are unweighted. Typical values are between -1 and 1. .. _`event output`: **event output** Specifies the name of a single event output file or a list of such files. The file format is either specified explicitly or derived from the suffix. For example, :code:`events.lhe` or, equivalently :code:`Les Houches: events.lhe` generates an output event file :code:`events.lhe` in the Les Houches format. The supported formats are - :code:`file.lhe` or :code:`Les Houches: file`: The Les Houches event file format. - :code:`file.hepmc2` or :code:`HepMC2: file`: HepMC format version 2. - :code:`file.hepmc3` or :code:`HepMC3: file`: HepMC format version 3. - :code:`file.hepmc` or :code:`HepMC: file`: The latest supported version of the HepMC format, currently version 3. - :code:`file.hdf5` or :code:`HDF5: file`: The HDF5-based format of `arXiv:1905.05120 `_. .. _`random generator`: **random generator** Sets parameters for random number generation. .. _`random generator: name`: **name** Which random number generator to use. Currently, :code:`mixmax` and :code:`ranlux64` are supported. Mixmax is recommended. See the `CLHEP documentation `_ for details on the generators. .. _`random generator: seed`: **seed** The seed for random generation. This should be a single number for :code:`mixmax` and the name of a state file for :code:`ranlux64`. .. _`analyses`: **analyses** Names and settings for one or more custom and Rivet event analyses. Entries containing the :code:`rivet` key are interpreted as Rivet analyses; the values corresponding to this key should be the analyses names. In addition, there is a mandatory :code:`output` key which determines the prefix for the yoda output file. For a custom analysis the :code:`plugin` sub-entry should be set to the analysis file path. All further entries are passed on to the analysis. See :ref:`Writing custom analyses` for details. .. _`vev`: **vev** Higgs vacuum expectation value in GeV. All electro-weak constants are derived from this together with the `particle properties`_. .. _`particle properties`: **particle properties** Specifies various properties of the different particles (Higgs, W or Z). All electro-weak constants are derived from these together with the :ref:`vacuum expectation value`. .. _`particle properties: particle`: **Higgs, W or Z** The particle (Higgs, |W+| or |W-|, Z) for which the following properties are defined. .. |W+| replace:: W\ :sup:`+` .. |W-| replace:: W\ :sup:`-` .. _`particle properties: particle: mass`: **mass** The mass of the particle in GeV. .. _`particle properties: particle: width`: **width** The total decay width of the particle in GeV. .. _`Higgs coupling`: **Higgs coupling** This collects a number of settings concerning the effective coupling of the Higgs boson to gluons. This is only relevant for the production process of a Higgs boson with jets and only supported if HEJ 2 was compiled with `QCDLoop `_ support. .. _`Higgs coupling: use impact factors`: **use impact factors** Whether to use impact factors for the coupling to the most forward and most backward partons. Impact factors imply the infinite top-quark mass limit. .. _`Higgs coupling: mt`: **mt** The value of the top-quark mass in GeV. If this is not specified, the limit of an infinite mass is taken. .. _`Higgs coupling: include bottom`: **include bottom** Whether to include the Higgs coupling to bottom quarks. .. _`Higgs coupling: mb`: **mb** The value of the bottom-quark mass in GeV. Only used for the Higgs coupling, external bottom-quarks are always assumed to be massless. .. _`require low pt jet`: **require low pt jet** Restrict output to only include events which have fixed order jets with pT less than the `resummation jets: min pt`_. Used to calculate the contribution from a lower min pt in the fixed order jets. This option allows this calculation to be done separately from the calculation where the fixed order min pt is equal to the resummation jets min pt. This is usually a small correction. .. _`off-shell tolerance`: **off-shell tolerance** Tolerance for numerical inaccuracies in input momenta. Momenta of massless particles with an invariant mass below the given value are rescaled to be on-shell. Transverse momentum components of incoming particles that are smaller than the given tolerance are set to zero. The default value is 0, leaving input momenta unchanged. Advanced Settings ~~~~~~~~~~~~~~~~~ All of the following settings are optional. Please **do not set** any of the following options, unless you know exactly what you are doing. The default behaviour gives the most reliable results for a wide range of observables. .. _`soft pt regulator`: **soft pt regulator** Specifies the maximum fraction that soft radiation can contribute to the transverse momentum of each the tagging jets, i.e. any jet that affects the event classification, like the most forward and most backward jet or the jets of the central qqbar pair. This setting is needed to regulate an otherwise cancelled divergence. Default is 0.1. .. _`max ext soft pt fraction`: **max ext soft pt fraction** This is the same as `soft pt regulator`_ and will be removed in future versions. .. _`min extparton pt`: **min extparton pt** Specifies the minimum transverse momentum in GeV of the most forward and the most backward parton. Its value should be slightly below the minimum transverse momentum of jets specified by `resummation jets: min pt`_. This setting got superseded by `soft pt regulator`_ and will be removed in future versions. .. _`max events`: **max events** Maximal number of (input) Fixed Order events. HEJ will stop after processing `max events` many events. Default considers all events. .. _`regulator parameter`: **regulator parameter** Slicing parameter to regularise the subtraction term, called :math:`\lambda` in `arxiv:1706.01002 `_. Default is 0.2. - - - diff --git a/doc/sphinx/analyses.rst b/doc/sphinx/analyses.rst index 57fc41f..4a43b7e 100644 --- a/doc/sphinx/analyses.rst +++ b/doc/sphinx/analyses.rst @@ -1,91 +1,94 @@ .. _`Writing custom analyses`: Writing custom analyses ======================= HEJ 2 and the HEJ fixed-order generator can generate HepMC files, so you can always run a `Rivet `_ analysis on these. However if you compiled HEJ 2 with Rivet you can use the native Rivet interface. For example .. code-block:: YAML analyses: - rivet: [MC_XS, MC_JETS] output: HEJ would call the generic `MC_XS `_ and `MC_JETS `_ analysis and write the result into :code:`HEJ[.Scalename].yoda`. HEJ 2 will then run Rivet over all different scales seperatly and write out each into a different yoda file. Alternatively instead of using Rivet, you can provide a custom analysis inside a C++ library. An analysis is a class that derives from the abstract :code:`Analysis` -base class provided by HEJ 2. It has to implement three public +base class provided by HEJ 2. It has to implement four public functions: * The :code:`pass_cuts` member function return true if and only if the given event (first argument) passes the analysis cuts * The :code:`fill` member function adds an event to the analysis, which for example can be used to fill histograms. HEJ 2 will only pass events for which :code:`pass_cuts` has returned true. +* The :code:`set_xs_scale` member function updates the ratio between + the total cross section and the sum of event weights. + * The :code:`finalise` member function is called after all events have been processed. It can be used, for example, to print out or save the analysis results. The :code:`pass_cuts` and :code:`fill` functions take two arguments: the resummation event generated by HEJ 2 and the original fixed-order input event. Usually, the second argument can be ignored. It can be used, for example, for implementing cuts that depend on the ratio of the weights between the fixed-order and the resummation event. In addition to the three member functions, there has to be a global :code:`make_analysis` function that takes the analysis parameters in the form of a YAML :code:`Node` and returns a :code:`std::unique_ptr` to the Analysis. The following code creates the simplest conceivable analysis. .. literalinclude:: ../../examples/AnalysisTemplate.cc You can save this code to a file, for example :code:`myanalysis.cc`, and compile it into a shared library. Using the :code:`g++` compiler, the library can be built with .. code-block:: sh g++ $(HEJ-config --cxxflags) -fPIC -shared -O2 -fvisibility=hidden -Wl,-soname,libmyanalysis.so -o libmyanalysis.so myanalysis.cc It is also good practice to add :code:`__attribute__((visibility("default")))` after :code:`extern "C"` in the above code snippet and then compile with the additional flag :code:`-fvisibility=hidden` to prevent name clashes. You can use the analysis in HEJ 2 or the HEJ fixed-order generator by adding .. code-block:: YAML analyses: - plugin: /path/to/libmyanalysis.so to the .yml configuration file. As a more interesting example, here is the code for an analysis that sums up the total cross section and prints the result to both standard output and a file specified in the .yml config with .. code-block:: YAML analyses: - plugin: analysis/build/directory/src/libmy_analysis.so output: outfile To access the configuration at run time, HEJ 2 uses the yaml-cpp library; for more details see the `yaml-cpp tutorial `_. The analysis code itself is .. literalinclude:: ../../examples/AnalysisPrint.cc diff --git a/doc/sphinx/installation.rst b/doc/sphinx/installation.rst index 407ca97..bd036c5 100644 --- a/doc/sphinx/installation.rst +++ b/doc/sphinx/installation.rst @@ -1,128 +1,128 @@ .. _Installation: Installation ============ Download -------- A tar archive of the HEJ 2 source code can be downloaded and decompressed with the command:: curl https://hej.hepforge.org/downloads?f=HEJ_2.1.tar.gz | tar -xz To obtain the latest stable HEJ version, `HEJ_2.1.tar.gz` should be replaced by `HEJ.tar.gz`. Alternatively, the HEJ source code can be obtained by installing the `git version control system `_. and running: .. code-block:: sh git clone https://phab.hepforge.org/source/hej.git We also provide a `Docker image `_ containing a HEJ 2 installation. This image can be pulled with:: docker pull hejdock/hej When using the Docker image the remaining installation steps can be skipped. Prerequisites ------------- Before installing HEJ 2, you need the following programs and libraries: - `CMake `_ version 3.1 - A compiler supporting the C++17 standard, for example `gcc `_ 7 or later - `FastJet `_ - `CLHEP `_ version 2.3 - `LHAPDF `_ version 6 - The `IOStreams` and `uBLAS` `boost `_ libraries - `yaml-cpp `_ - `autoconf` and `automake` for `FORM `_ In addition, some optional features have additional dependencies: - `Version 2 of QCDLoop `_ is required to include finite top mass corrections in Higgs boson + jets production. - `HepMC versions 2 and 3 `_ enable event output in the respective format. -- `Rivet `_ together with HepMC 2 or 3 allow - using Rivet analyses. +- `Rivet `_ version 3.1.4 or later + together with HepMC 2 or 3 allow using Rivet analyses. - `HighFive `_ has to be installed in order to read and write event files in the `HDF5 `_-based format suggested in `arXiv:1905.05120 `_. We strongly recommend to install these programs and libraries to standard locations: - The executable files should be inside one of the directories listed in the `PATH` environment variable. This concerns `cmake`, the C++ compiler, and the executables contained in `autoconf` and `automake`. - The library header files ending with `.h`, `.hh`, or `.hpp` should be in a directory where they are found by the C++ compiler. For `gcc` or `clang`, custom locations can be specified using the `CPLUS_INCLUDE_PATH` environment variable. - The compiled library files ending with `.a`, `.so`, or `.dylib` should be in a directory where they are found by the linker. Custom locations can be set via the `LIBRARY_PATH` environment variable. For shared object libraries (`.so` or `.dylib`) custom locations should also be part of `LD_LIBRARY_PATH` on linux and `DYLD_FALLBACK_LIBRARY_PATH` or `DYLD_LIBRARY_PATH` on macOS. Compilation ----------- To compile and install HEJ 2 run:: cmake source/directory -DCMAKE_INSTALL_PREFIX=target/directory make install :file:`source/directory` is the directory containing the file :file:`CMakeLists.txt`. If you omit :code:`-DCMAKE_INSTALL_PREFIX=target/directory` HEJ 2 will be installed to some default location. In case some of the aforementioned prerequisites are not found by :code:`cmake` you can give a hint by adding an additional argument :code:`-Dlibname_ROOT_DIR=/directory/with/library`, where :code:`libname` should be replaced by the name of the library in question. For example, if `FastJet` is installed in the subdirectory `.local` of your home directory with the `libfastjet.*` library files in `.local/lib` and the header files ending with `.hh` in `.local/include/fastjet` you can pass :code:`-Dfastjet_ROOT_DIR=$HOME/.local` to `cmake`. If :code:`cmake` fails to find (the correct) boost path, try setting :code:`-DBOOST_ROOT=/path/to/boost`, this will force :code:`cmake` to search for boost only in :code:`/path/to/boost`. To not include specific packages one can add :code:`-DEXCLUDE_packagename=TRUE` to :code:`cmake`, e.g. by setting :code:`-DEXCLUDE_rivet=TRUE` HEJ 2 will not be interfaced to Rivet even if it is available on the system. Testing ------- To test your installation, download the NNPDF 2.3 PDF set with:: lhapdf install NNPDF23_nlo_as_0119 and run:: make test The test data of HEJ are stored in a `Git Large File Storage `_ format. :code:`git clone` therefore requires :code:`git-lfs` to download the data correctly. diff --git a/examples/AnalysisPrint.cc b/examples/AnalysisPrint.cc index 59cf3e7..02dbc56 100644 --- a/examples/AnalysisPrint.cc +++ b/examples/AnalysisPrint.cc @@ -1,71 +1,80 @@ //! HEJ analysis to output the cross section to a file #include #include #include #include #include #include "HEJ/Analysis.hh" #include "HEJ/Event.hh" #include "yaml-cpp/yaml.h" #include "LHEF/LHEF.h" namespace { class AnalysisPrint: public HEJ::Analysis { public: AnalysisPrint(YAML::Node const & config, LHEF::HEPRUP const & heprup): - xsection_{0.}, xsection_error_{0.}, + wt_sum_{0.}, wt2_sum_{0.}, outfile_{config["output"].as()}, generators_{heprup.generators} {} void fill( HEJ::Event const & event, HEJ::Event const & /* FO_event */ ) override { const double wt = event.central().weight; - xsection_ += wt; - xsection_error_ += wt*wt; // this error estimate is too small + wt_sum_ += wt; + wt2_sum_ += wt*wt; // this error estimate is too small } bool pass_cuts( HEJ::Event const & /* event */, HEJ::Event const & /* FO_event */ ) override { return true; } + void set_xs_scale(double xsscale) override { + xs_scale_ = xsscale; + } + void finalise() override { + // compute cross section from sum of weights and scaling factor + const double xsection = wt_sum_ * xs_scale_; + const double xsection_error = std::sqrt(wt2_sum_) * xs_scale_; + // print to screen std::cout << "Generated with:\n"; for(auto const & generator: generators_) std::cout << generator.name << " " << generator.version << "\n"; - std::cout << "cross section: " << xsection_ << " +- " - << std::sqrt(xsection_error_) << std::endl; + std::cout << "cross section: " << xsection << " +- " + << xsection_error << std::endl; // print to file std::ofstream fout{outfile_}; fout << "Generated with\n"; for(auto const & generator: generators_) fout << generator.name << " " << generator.version << "\n"; - fout << "cross section: " << xsection_ << " +- " - << std::sqrt(xsection_error_) << std::endl; + fout << "cross section: " << xsection << " +- " + << xsection_error << std::endl; } private: - double xsection_, xsection_error_; + double wt_sum_, wt2_sum_; + double xs_scale_{1.0}; std::string outfile_; std::vector generators_; }; } extern "C" __attribute__((visibility("default"))) std::unique_ptr make_analysis( YAML::Node const & config, LHEF::HEPRUP const & heprup ){ return std::make_unique(config, heprup); } diff --git a/examples/AnalysisROOT.cc b/examples/AnalysisROOT.cc index 7419c9f..f73ab03 100644 --- a/examples/AnalysisROOT.cc +++ b/examples/AnalysisROOT.cc @@ -1,86 +1,118 @@ //! Simple HEJ analysis to plot Delta y_{fb} with ROOT #include // for std::unique_ptr #include // for std::cout +#include #include "TFile.h" +#include "TKey.h" #include "TH1.h" #include "HEJ/Analysis.hh" #include "HEJ/Event.hh" #include "HEJ/Version.hh" namespace YAML { class Node; } namespace LHEF { class HEPRUP; } namespace { void Fill2DHistogram(TH1D & h,double x,double wt) { const int binnumber=h.GetXaxis()->FindBin(x); const double binwidth=h.GetXaxis()->GetBinWidth(binnumber); h.Fill(x,wt/binwidth); } class AnalysisROOT: public HEJ::Analysis { private: + double xs_scale_{1.0}; TFile hfile_; TH1D htotal_,hydif_,hyf_,hyb_; public: AnalysisROOT( YAML::Node const & /* config */, LHEF::HEPRUP const & /* heprup */ ):hfile_{"HEJ.root","RECREATE", ("Output from "+HEJ::Version::package_name_full()).c_str()}, htotal_{"total","total",1,-0.5,.5}, hydif_{"ydif","ydif",40,0.,10.}, hyf_{"yf","yf",40,-5.,5.}, hyb_{"yb","yb",40,-5.,5.} { // Setup histograms htotal_.Sumw2(); hydif_.Sumw2(); hyf_.Sumw2(); hyb_.Sumw2(); } void fill( HEJ::Event const & event, HEJ::Event const & /* FO_event */ ) override { const double weight=event.central().weight; htotal_.Fill(0.0,weight); auto const & jets=event.jets(); // jets are sorted in rapidity Fill2DHistogram(hydif_, fabs(jets.back().rapidity()-jets.front().rapidity()), weight); Fill2DHistogram(hyf_, jets.front().rapidity(), weight); Fill2DHistogram(hyb_, jets.back().rapidity(), weight); } bool pass_cuts( HEJ::Event const & event, HEJ::Event const & /* FO_event */ ) override { if (event.jets().size()<2) return false; return true; } + void set_xs_scale(double xsscale) override { + xs_scale_ = xsscale; + } + void finalise() override { + scale(); hfile_.Write(); hfile_.Close(); - std::cout << "AnalysisROOT bids you farewell " << std::endl; // be polite + std::cout << "AnalysisROOT bids you farewell\n"; // be polite } + private: + void scale() { + std::cout << "AnalysisROOT scales the histograms\n"; + + TList* list = hfile_.GetListOfKeys() ; + if(!list) throw std::logic_error{" No keys defined\n"}; + TIter next(list) ; + TKey* key ; + TObject* obj ; + + while ( (key = (TKey*)next()) ) { + obj = key->ReadObj() ; + auto hist = dynamic_cast(obj); + if(hist) { + std::cerr << "Histo name: " << hist->GetName() + << " title: " << hist->GetTitle() + << "will be scaled\n"; + hist->Scale(xs_scale_); + } else { + std::cerr << " Object " << obj->GetName() + << " is not 1D or 2D histogram : will not be scaled\n"; + } + } + } }; } extern "C" __attribute__((visibility("default"))) std::unique_ptr make_analysis( YAML::Node const & config, LHEF::HEPRUP const & heprup ){ return std::make_unique(config, heprup); } diff --git a/examples/AnalysisTemplate.cc b/examples/AnalysisTemplate.cc index c8ac66e..96bb5c6 100644 --- a/examples/AnalysisTemplate.cc +++ b/examples/AnalysisTemplate.cc @@ -1,48 +1,52 @@ //! simple HEJ analysis that doesn't do anything #include // for std::unique_ptr #include #include "HEJ/Analysis.hh" namespace YAML { class Node; } namespace LHEF { class HEPRUP; } namespace { class AnalysisTemplate: public HEJ::Analysis { public: AnalysisTemplate( YAML::Node const & /* config */, LHEF::HEPRUP const & /* heprup */ ) {} void fill( HEJ::Event const & /* event */, HEJ::Event const & /* FO_event */ ) override { } bool pass_cuts( HEJ::Event const & /* event */, HEJ::Event const & /* FO_event */ ) override { return true; } + void set_xs_scale(double /* xsscale */) override { + // typically store this information for later + } + void finalise() override { std::cout << "bye" << std::endl; // be polite } }; } extern "C" __attribute__((visibility("default"))) std::unique_ptr make_analysis( YAML::Node const & config, LHEF::HEPRUP const & heprup ){ return std::make_unique(config, heprup); } diff --git a/examples/WRapidityAnalysisROOT.cc b/examples/WRapidityAnalysisROOT.cc index 5668bbd..1ebdcd0 100644 --- a/examples/WRapidityAnalysisROOT.cc +++ b/examples/WRapidityAnalysisROOT.cc @@ -1,131 +1,164 @@ //! Simple HEJ analysis to plot y_W with ROOT //! //! See README.md for installation and running instructions. //! #include #include #include #include // for std::unique_ptr #include // for std::cout #include "HEJ/PDG_codes.hh" + #include "TFile.h" +#include "TKey.h" #include "TH1.h" #include "HEJ/Analysis.hh" #include "HEJ/Event.hh" #include "HEJ/Version.hh" namespace YAML { class Node; } namespace LHEF { class HEPRUP; } namespace { void Fill2DHistogram(TH1D & h,double x,double wt) { const int binnumber=h.GetXaxis()->FindBin(x); const double binwidth=h.GetXaxis()->GetBinWidth(binnumber); h.Fill(x,wt/binwidth); } class WRapidityAnalysisROOT: public HEJ::Analysis { private: + double xs_scale_{1.0}; TFile hfile_; TH1D htotal_,hydif_,hyf_,hyb_,hyW_,hyWsampling_; public: WRapidityAnalysisROOT( YAML::Node const & /* config */, LHEF::HEPRUP const & /* heprup */ ):hfile_{"HEJ.root","RECREATE", ("Output from "+HEJ::Version::package_name_full()).c_str()}, htotal_{"total","total",1,-0.5,.5}, hydif_{"ydif","ydif",40,0.,10.}, hyf_{"yf","yf",40,-5.,5.}, hyb_{"yb","yb",40,-5.,5.}, hyW_{"yW","yW",40,-5.,5.}, hyWsampling_{"yWsampling","yWsampling",40,-5.,5.} { // Setup histograms htotal_.Sumw2(); hydif_.Sumw2(); hyf_.Sumw2(); hyb_.Sumw2(); hyW_.Sumw2(); } void fill( HEJ::Event const & event, HEJ::Event const & /* FO_event */ ) override { const double weight = event.central().weight; htotal_.Fill(0.0,weight); auto const & jets = event.jets(); // jets are sorted in rapidity // find the W momentum auto const & outgoing = event.outgoing(); auto the_boson = std::find_if( outgoing.begin(), outgoing.end(), [](auto const & particle) { return std::abs(particle.type) == HEJ::pid::Wp; } ); // we already checked in `pass_cuts` that there is a boson, // so the following assert cannot fail assert(the_boson != outgoing.end()); Fill2DHistogram(hydif_, std::abs(jets.back().rapidity()-jets.front().rapidity()), weight); Fill2DHistogram(hyf_, jets.front().rapidity(), weight); Fill2DHistogram(hyb_, jets.back().rapidity(), weight); Fill2DHistogram(hyW_, the_boson->rapidity(), weight); Fill2DHistogram(hyWsampling_, the_boson->rapidity(),1); } bool pass_cuts( HEJ::Event const & event, HEJ::Event const & /* FO_event */ ) override { if(event.jets().size() < 2) return false; auto const & outgoing = event.outgoing(); // Find the W boson auto the_W_boson = std::find_if( outgoing.begin(), outgoing.end(), [](auto const & particle) { return std::abs(particle.type) == HEJ::pid::Wp; } ); if(the_W_boson == outgoing.end()) return false; const auto W_pos = std::distance(outgoing.begin(), the_W_boson); // Find the decay products const auto the_decay = event.decays().find(W_pos); if(the_decay == event.decays().end()) return false; // Find the charged lepton const auto the_charged_lepton = std::find_if( the_decay->second.begin(), the_decay->second.end(), [](HEJ::Particle const & p) { return HEJ::is_anylepton(p) && !HEJ::is_anyneutrino(p); } ); if(the_charged_lepton == the_decay->second.end()) return false; return std::abs(the_charged_lepton->rapidity()) > 2.5; } + void set_xs_scale(double xsscale) override { + xs_scale_ = xsscale; + } + void finalise() override { + scale(); hfile_.Write(); hfile_.Close(); - std::cout << "WRapidityAnalysisROOT bids you farewell " << std::endl; // be polite + std::cout << "WRapidityAnalysisROOT bids you farewell\n"; // be polite + } + + private: + void scale() { + std::cout << "AnalysisROOT scales the histograms\n"; + + TList* list = hfile_.GetListOfKeys() ; + if(!list) throw std::logic_error{" No keys defined\n"}; + TIter next(list) ; + TKey* key ; + TObject* obj ; + + while ( (key = (TKey*)next()) ) { + obj = key->ReadObj() ; + auto hist = dynamic_cast(obj); + if(hist) { + std::cerr << "Histo name: " << hist->GetName() + << " title: " << hist->GetTitle() + << "will be scaled\n"; + hist->Scale(xs_scale_); + } else { + std::cerr << " Object " << obj->GetName() + << " is not 1D or 2D histogram : will not be scaled\n"; + } + } } }; } extern "C" __attribute__((visibility("default"))) std::unique_ptr make_analysis( YAML::Node const & config, LHEF::HEPRUP const & heprup ){ return std::make_unique(config, heprup); } diff --git a/include/HEJ/Analysis.hh b/include/HEJ/Analysis.hh index b165b38..3f6b45c 100644 --- a/include/HEJ/Analysis.hh +++ b/include/HEJ/Analysis.hh @@ -1,50 +1,53 @@ /** \file * \brief Header file for the Analysis interface * * This header contains declarations that facilitate creating custom analyses * to be used with HEJ 2. * \todo link to user documentation * * \authors The HEJ collaboration (see AUTHORS for details) - * \date 2019 + * \date 2022-2023 * \copyright GPLv2 or later */ #pragma once namespace LHEF { class HEPRUP; } //! Main HEJ 2 Namespace namespace HEJ { class Event; //! Analysis base class /** * This is the interface that all analyses should implement, * i.e. all custom analyses have to be derived from this struct. */ struct Analysis { /** * @param res_event The event in resummation phase space * @param FO_event The original fixed-order event */ virtual void fill(Event const & res_event, Event const & FO_event) = 0; //! Decide whether an event passes the cuts /** * @param res_event The event in resummation phase space * @param FO_event The original fixed-order event * @returns Whether the event passes all cuts */ virtual bool pass_cuts(Event const & res_event, Event const & FO_event) = 0; //! Finalise analysis /** * This function is called after all events have been processed and * can be used for example to print out or save the results. */ virtual void finalise() = 0; + //! Set the ratio (cross section) / (sum of event weights) + virtual void set_xs_scale(double scale) = 0; + virtual ~Analysis() = default; }; } // namespace HEJ diff --git a/include/HEJ/CombinedEventWriter.hh b/include/HEJ/CombinedEventWriter.hh index 66ff8e6..3933b65 100644 --- a/include/HEJ/CombinedEventWriter.hh +++ b/include/HEJ/CombinedEventWriter.hh @@ -1,50 +1,53 @@ /** \file * \brief Declares the CombinedEventWriter class * * \authors The HEJ collaboration (see AUTHORS for details) - * \date 2019-2020 + * \date 2019-2023 * \copyright GPLv2 or later */ #pragma once #include #include #include "HEJ/EventWriter.hh" namespace LHEF { class HEPRUP; } namespace HEJ { class Event; struct OutputFile; //! Write event output to zero or more output files. class CombinedEventWriter: public EventWriter { public: //! Constructor /** * @param outfiles Specifies files output should be written to. * Each entry in the vector contains a file name * and output format. * @param heprup General process information */ CombinedEventWriter( std::vector const & outfiles, LHEF::HEPRUP const & heprup ); ~CombinedEventWriter() override; //! Write one event to all output files void write(Event const & /*ev*/) override; + //! Set the ratio (cross section) / (sum of event weights) + void set_xs_scale(double scale) override; + //! Finish writing, e.g. write out general informations at end of run //! @warning using the writer after finishing is not guaranteed to work void finish() override; private: std::vector> writers_; }; } // namespace HEJ diff --git a/include/HEJ/CrossSectionAccumulator.hh b/include/HEJ/CrossSectionAccumulator.hh index 42bdd63..ff81fea 100644 --- a/include/HEJ/CrossSectionAccumulator.hh +++ b/include/HEJ/CrossSectionAccumulator.hh @@ -1,107 +1,110 @@ /** * \authors The HEJ collaboration (see AUTHORS for details) * \date 2019-2020 * \copyright GPLv2 or later */ #pragma once #include #include #include #include #include "HEJ/event_types.hh" namespace HEJ { class Event; //! Collection of Cross Section with its uncertainty template struct XSWithError { T value = T{}; //!< Cross Section T error = T{}; //!< Squared error (Variance) }; /** * @brief Sum of Cross Section for different subproccess */ class CrossSectionAccumulator { public: //! Fill with single event //! @note for multiple resummation events use fill_correlated() instead void fill(Event const & ev); //! Fill by weight and type //! @note for multiple resummation events use fill_correlated() instead void fill(double wt, event_type::EventType type); //! Fill by weight, error and type //! @note The error will be _added_ to the current error void fill(double wt, double err, event_type::EventType type); /** * @brief Fill with multiple correlated weights * @details This should be used to fill multiple reweighted events, * coming from the same fixed order point. * Total error for \f$N\f$ fixed order points each giving \f$M_i\f$ * resummation events is: * \f[ * \delta^2=\sum_i \left(\sum_j w_{i,j}\right)^2 * +\sum_{i,j} \left(w_{i,j}\right)^2, * \f] * @note This is equivalent to fill() for only one reweighted event * coming from each fixed order point (\f$M_i=1\f$) */ void fill_correlated(std::vector const & evts); //! iterator implementation of fill_correlated() template void fill_correlated(ConstIt begin, ConstIt end); //! explicit version of fill_correlated() by giving sum(wt) and sum(wt^2) void fill_correlated(double sum, double sum2, event_type::EventType type); //! begin of Cross Section and error for subprocesses auto begin() const { return std::begin(xs_); } //! end of Cross Section and error for subprocesses auto end() const { return std::end(xs_); } //! total Cross Section and error XSWithError const & total() const { return total_; } //! Cross Section and error of specific type XSWithError const & at(event_type::EventType type) const { return xs_.at(type); } //! Cross Section and error of specific type XSWithError const & operator[](event_type::EventType type) const { return at(type); } + // method to scale total_ and xs_ after they have been filled + void scale(double xsscale); + private: std::map> xs_; XSWithError total_; }; //! Print CrossSectionAccumulator to stream std::ostream& operator<<(std::ostream& os, const CrossSectionAccumulator& xs); // ------------ Implementation ------------ template void CrossSectionAccumulator::fill_correlated(ConstIt begin, ConstIt end){ if(std::distance(begin, end) < 2){ // only one event fill(*begin); return; } double sum = 0.; double sum2 = 0.; const auto type = begin->type(); for(; begin != end; ++begin){ double const wt = begin->central().weight; sum += wt; sum2 += wt*wt; } fill_correlated(sum, sum2, type); } } // namespace HEJ diff --git a/include/HEJ/EmptyAnalysis.hh b/include/HEJ/EmptyAnalysis.hh index abfbab9..f093c12 100644 --- a/include/HEJ/EmptyAnalysis.hh +++ b/include/HEJ/EmptyAnalysis.hh @@ -1,34 +1,35 @@ /** \file * \brief Declaration of the trivial (empty) analysis * * \authors The HEJ collaboration (see AUTHORS for details) * \date 2019-2022 * \copyright GPLv2 or later */ #pragma once #include "HEJ/detail/EmptyAnalysis.hh" namespace HEJ { /** An analysis that does nothing * * This analysis is used by default if no user analysis is specified. * The member functions don't do anything and events passed to the * analysis are simply ignored. * * \internal The actual logic is in detail::EmptyAnalysis. * The reason for this split is that until HEJ 2.3 * `get_analysis` should return an `EmptyAnalysis` as a fallback, * to not destroy backward compatibility without warning. * Returning a detail::EmptyAnalysis instead avoids an internal * deprecation warning during compilation. */ struct [[deprecated("to be removed in HEJ 2.3")]] EmptyAnalysis: detail::EmptyAnalysis { static std::unique_ptr create( YAML::Node const & parameters, LHEF::HEPRUP const & /*unused*/ ); + ~EmptyAnalysis() override = default; }; } // namespace HEJ diff --git a/include/HEJ/EventReader.hh b/include/HEJ/EventReader.hh index b32d6f9..43c861f 100644 --- a/include/HEJ/EventReader.hh +++ b/include/HEJ/EventReader.hh @@ -1,59 +1,55 @@ /** \file * \brief Header file for event reader interface * * This header defines an abstract base class for reading events from files. * * \authors The HEJ collaboration (see AUTHORS for details) * \date 2019-2022 * \copyright GPLv2 or later */ #pragma once #include #include #include #include namespace LHEF { class HEPEUP; class HEPRUP; } namespace HEJ { //! Abstract base class for reading events from files struct EventReader { //! Read an event virtual bool read_event() = 0; //! Access header text virtual std::string const & header() const = 0; //! Access run information virtual LHEF::HEPRUP const & heprup() const = 0; //! Access last read event virtual LHEF::HEPEUP const & hepeup() const = 0; //! Guess number of events from header - virtual std::optional number_events() const { - std::size_t start = header().rfind("Number of Events"); - start = header().find_first_of("123456789", start); - if(start == std::string::npos) { - return {}; - } - const std::size_t end = header().find_first_not_of("0123456789", start); - return std::stoi(header().substr(start, end - start)); - } + virtual std::optional number_events() const = 0; + + virtual double scalefactor() const { + return 1.; + }; virtual ~EventReader() = default; }; //! Factory function for event readers /** * @param filename The name of the input file * @returns A pointer to an instance of an EventReader * for the input file */ std::unique_ptr make_reader(std::string const & filename); } // namespace HEJ diff --git a/include/HEJ/EventWriter.hh b/include/HEJ/EventWriter.hh index c4b437e..9e636d1 100644 --- a/include/HEJ/EventWriter.hh +++ b/include/HEJ/EventWriter.hh @@ -1,41 +1,45 @@ /** \file * \brief Header file for the EventWriter interface. * * \authors The HEJ collaboration (see AUTHORS for details) - * \date 2019-2020 + * \date 2019-2023 * \copyright GPLv2 or later */ #pragma once #include namespace HEJ { class Event; //! Pure abstract base class for event writers class EventWriter { public: //! Write an event virtual void write(Event const &) = 0; + + //! Set the ratio (cross section) / (sum of event weights) + virtual void set_xs_scale(double scale) = 0; + //! Finish writing, e.g. write out general informations at end of run //! @warning using the writer after finishing is not guaranteed to work virtual void finish(){finished_=true;}; virtual ~EventWriter() = default; protected: //! @brief If writer is not finished run finish() and abort on error //! //! @param writer writer that is expected to finish, i.e. `this` //! @param name name for error message //! //! Used in the destructor of inherited EventWriter to force finish() //! @warning When finish() failed this will abort the whole program! void finish_or_abort( EventWriter* writer, std::string const & name ) const noexcept; bool finished() const {return finished_;}; private: bool finished_ = false; }; } // namespace HEJ diff --git a/include/HEJ/HDF5Writer.hh b/include/HEJ/HDF5Writer.hh index 2843eca..afddd7e 100644 --- a/include/HEJ/HDF5Writer.hh +++ b/include/HEJ/HDF5Writer.hh @@ -1,57 +1,60 @@ /** \file * \brief Contains the EventWriter for HDF5 Output. * * The output format is specified in arXiv:1905.05120. * * \authors The HEJ collaboration (see AUTHORS for details) - * \date 2019-2020 + * \date 2019-2023 * \copyright GPLv2 or later */ #pragma once #include #include #include "HEJ/EventWriter.hh" namespace LHEF { class HEPRUP; } namespace HEJ { class Event; //! This is an event writer specifically for HDF5 output. /** * \internal Implementation note: This uses the pimpl ("pointer to * implementation") idiom. HDF5 support is optional. Without pimpl, * we would have to specify whether HDF5 is available via the * preprocessor whenever this header is included. We don't want to * burden users of the HEJ library (for example the HEJ fixed-order * generator) with those details */ class HDF5Writer: public EventWriter{ public: //! Constructor /** * @param file name of the output file * @param heprup general process information */ HDF5Writer(std::string const & file, LHEF::HEPRUP heprup); HDF5Writer() = delete; //! Write an event to the output file void write(Event const & ev) override; + //! Set the ratio (cross section) / (sum of event weights) + void set_xs_scale(double scale) override; + //! Finish writing void finish() override; ~HDF5Writer() override; private: struct HDF5WriterImpl; std::unique_ptr impl_; }; } // namespace HEJ diff --git a/include/HEJ/HepMC2Interface.hh b/include/HEJ/HepMC2Interface.hh index 40c918f..f896e58 100644 --- a/include/HEJ/HepMC2Interface.hh +++ b/include/HEJ/HepMC2Interface.hh @@ -1,77 +1,81 @@ /** \file * \brief Header file for the HepMC2Interface * * \authors The HEJ collaboration (see AUTHORS for details) - * \date 2019-2020 + * \date 2019-2023 * \copyright GPLv2 or later */ #pragma once #include #include #include "HEJ/PDG_codes.hh" namespace HepMC { class GenCrossSection; class GenEvent; } namespace LHEF { class HEPRUP; } namespace HEJ { class Event; //! This class converts the Events into HepMC::GenEvents /** * \details The output is depended on the HepMC version HEJ is compiled with, * both HepMC 2 and HepMC 3 are supported. If HEJ 2 is compiled * without HepMC calling this interface will throw an error. * * This interface will also keep track of the cross section of all the events that * being fed into it. */ class HepMC2Interface{ public: HepMC2Interface(LHEF::HEPRUP const & /*heprup*/); /** * \brief main function to convert an event into HepMC::GenEvent * * \param event Event to convert * \param weight_index optional selection of specific weight * (negative value gives central weight) */ HepMC::GenEvent operator()(Event const & event, int weight_index = -1); /** * \brief Sets the central value from \p event to \p out_ev * * \param out_ev HepMC::GenEvent to write to * \param event Event to convert * \param weight_index optional selection of specific weight * (negative value gives "central") * * This overwrites the central value of \p out_ev. */ void set_central(HepMC::GenEvent & out_ev, Event const & event, int weight_index = -1); + //! Set the ratio (cross section) / (sum of event weights) + void set_xs_scale(double scale); + protected: /** * \brief initialise generic event infomrations (not central weights) * * \param event Event to convert */ HepMC::GenEvent init_event(Event const & event) const; private: std::array beam_particle_{}; std::array beam_energy_{}; std::size_t event_count_; double tot_weight_; double tot_weight2_; + double xs_scale_ = 1.; HepMC::GenCrossSection cross_section() const; }; } // namespace HEJ diff --git a/include/HEJ/HepMC2Writer.hh b/include/HEJ/HepMC2Writer.hh index 004c08a..ba4b354 100644 --- a/include/HEJ/HepMC2Writer.hh +++ b/include/HEJ/HepMC2Writer.hh @@ -1,53 +1,56 @@ /** \file * \brief Contains the EventWriter for HepMC Output. * * \authors The HEJ collaboration (see AUTHORS for details) - * \date 2019-2020 + * \date 2019-2023 * \copyright GPLv2 or later */ #pragma once #include #include #include "HEJ/EventWriter.hh" namespace LHEF { class HEPRUP; } namespace HEJ { class Event; //! This is an event writer specifically for HepMC output. /** * \internal Implementation note: * This uses the pimpl ("pointer to implementation") idiom. * HepMC support is optional and the implementation depends on the * HepMC version. Without pimpl, we would have to specify the HepMC version * via the preprocessor whenever this header is included. We don't want to * burden users of the HEJ library (for example the HEJ fixed-order generator) * with those details */ class HepMC2Writer: public EventWriter{ public: //! Constructor /** * @param file name of the output file * @param heprup general process information */ HepMC2Writer(std::string const & file, LHEF::HEPRUP heprup); HepMC2Writer() = delete; //! Write an event to the output file void write(Event const & ev) override; + //! Set the ratio (cross section) / (sum of event weights) + void set_xs_scale(double scale) override; + ~HepMC2Writer() override; private: struct HepMC2WriterImpl; std::unique_ptr impl_; }; } // namespace HEJ diff --git a/include/HEJ/HepMC3Interface.hh b/include/HEJ/HepMC3Interface.hh index 94f821f..83e9d6e 100644 --- a/include/HEJ/HepMC3Interface.hh +++ b/include/HEJ/HepMC3Interface.hh @@ -1,89 +1,93 @@ /** \file * \brief Header file for the HepMC3Interface * * \authors The HEJ collaboration (see AUTHORS for details) - * \date 2019-2020 + * \date 2019-2023 * \copyright GPLv2 or later */ #pragma once #include #include #include #include "HEJ/PDG_codes.hh" namespace HepMC3 { class GenCrossSection; class GenEvent; class GenRunInfo; } namespace LHEF { class HEPRUP; } namespace HEJ { class Event; //! This class converts the Events into HepMC3::GenEvents /** * \details The output is depended on the HepMC3 version HEJ is compiled with, * both HepMC3 2 and HepMC3 3 are supported. If HEJ 2 is compiled * without HepMC3 calling this interface will throw an error. * * This interface will also keep track of the cross section of all the events that * being fed into it. */ class HepMC3Interface{ public: HepMC3Interface(LHEF::HEPRUP heprup); ~HepMC3Interface(); HepMC3Interface & operator=(HepMC3Interface const & other) = default; HepMC3Interface(HepMC3Interface const & other) = default; HepMC3Interface & operator=(HepMC3Interface && other) = default; HepMC3Interface(HepMC3Interface && other) = default; /** * \brief main function to convert an event into HepMC3::GenEvent * * \param event Event to convert * \param weight_index optional selection of specific weight * (negative value gives central weight) */ HepMC3::GenEvent operator()(Event const & event, int weight_index = -1); /** * \brief Sets the central value from \p event to \p out_ev * * \param out_ev HepMC3::GenEvent to write to * \param event Event to convert * \param weight_index optional selection of specific weight * (negative value gives "central") * * This overwrites the central value of \p out_ev. */ void set_central(HepMC3::GenEvent & out_ev, Event const & event, int weight_index = -1); //! Pointer to generic run informations std::shared_ptr run_info(){ return run_info_; } + //! Set the ratio (cross section) / (sum of event weights) + void set_xs_scale(double scale); + protected: /** * \brief initialise generic event infomrations (not central weights) * * \param event Event to convert */ HepMC3::GenEvent init_event(Event const & event) const; private: std::array beam_particle_{}; std::array beam_energy_{}; std::shared_ptr run_info_; std::size_t event_count_; double tot_weight_; double tot_weight2_; + double xs_scale_ = 1.; std::shared_ptr xs_; }; } // namespace HEJ diff --git a/include/HEJ/HepMC3Writer.hh b/include/HEJ/HepMC3Writer.hh index 05a6b88..b628520 100644 --- a/include/HEJ/HepMC3Writer.hh +++ b/include/HEJ/HepMC3Writer.hh @@ -1,55 +1,58 @@ /** \file * \brief Contains the EventWriter for HepMC3 Output. * * \authors The HEJ collaboration (see AUTHORS for details) - * \date 2019-2020 + * \date 2019-2023 * \copyright GPLv2 or later */ #pragma once #include #include #include "HEJ/EventWriter.hh" namespace LHEF { class HEPRUP; } namespace HEJ { class Event; //! This is an event writer specifically for HepMC3 output. /** * \internal Implementation note: * This uses the pimpl ("pointer to implementation") idiom. * HepMC3 support is optional and the implementation depends on the * HepMC3 version. Without pimpl, we would have to specify the HepMC3 version * via the preprocessor whenever this header is included. We don't want to * burden users of the HEJ library (for example the HEJ fixed-order generator) * with those details */ class HepMC3Writer: public EventWriter{ public: //! Constructor /** * @param file name of the output file * @param heprup general process information */ HepMC3Writer(std::string const & file, LHEF::HEPRUP heprup); HepMC3Writer() = delete; //! Write an event to the output file void write(Event const & ev) override; + //! Set the ratio (cross section) / (sum of event weights) + void set_xs_scale(double scale) override; + //! Finish writing void finish() override; ~HepMC3Writer() override; private: struct HepMC3WriterImpl; std::unique_ptr impl_; }; } // namespace HEJ diff --git a/include/HEJ/LesHouchesReader.hh b/include/HEJ/LesHouchesReader.hh index a84421b..9f7cc31 100644 --- a/include/HEJ/LesHouchesReader.hh +++ b/include/HEJ/LesHouchesReader.hh @@ -1,85 +1,88 @@ /** \file * \brief Header file for reading events in the Les Houches Event File format. * * \authors The HEJ collaboration (see AUTHORS for details) - * \date 2019-2020 + * \date 2019-2022 * \copyright GPLv2 or later */ #pragma once #include +#include #include "LHEF/LHEF.h" #include "HEJ/EventReader.hh" #include "HEJ/stream.hh" namespace HEJ { //! Class for reading events from a file in the Les Houches Event File format class LesHouchesReader : public EventReader{ public: //! Contruct object reading from the given file - explicit LesHouchesReader(std::string const & filename): - stream_{filename}, - reader_{stream_} - { - // always use the newest LHE version - reader_.heprup.version = LHEF::HEPRUP().version; - } + explicit LesHouchesReader(std::string const & filename); //! Read an event - bool read_event() override { - return reader_.readEvent(); - } + bool read_event() override; //! Access header text std::string const & header() const override { return reader_.headerBlock; } //! Access run information LHEF::HEPRUP const & heprup() const override { return reader_.heprup; } //! Access last read event LHEF::HEPEUP const & hepeup() const override { return reader_.hepeup; } - private: - istream stream_; - LHEF::Reader reader_; - - protected: - //! Underlying reader - LHEF::Reader & reader(){ - return reader_; + std::optional number_events() const override { + std::size_t start = header().rfind("Number of Events"); + start = header().find_first_of("123456789", start); + if(start == std::string::npos) { + return {}; + } + const std::size_t end = header().find_first_not_of("0123456789", start); + return std::stoi(header().substr(start, end - start)); } - }; - /** - * @brief Les Houches Event file reader for LHE files created by Sherpa - * @details In Sherpa the cross section is given by - * sum(weights)/(number of trials). This EventReader converts the - * weights such that cross section=sum(weights) - * @note Reading from a pipe is not possible! - */ - class SherpaLHEReader : public LesHouchesReader{ - public: - //! Inialise Reader for a Sherpa LHE file - explicit SherpaLHEReader(std::string const & filename); - - bool read_event() override; - - std::optional number_events() const override { - return num_events_; + double scalefactor() const override { + if (generator_==Generator::Sherpa) { + const std::size_t num_trials = std::accumulate( + num_trials_.begin(), num_trials_.end(), 0 + ); + return 1./static_cast(num_trials); + } + else + return 1.; } private: - double num_trials_; - size_t num_events_; + enum class Generator{ + HEJ, + HEJFOG, + Sherpa, + MG, + unknown + }; + + bool calculate_XSECUP() { + return calculate_XSECUP_; + } + Generator get_generator( + LHEF::HEPRUP const & heprup, std::string const & header + ); + + istream stream_; + LHEF::Reader reader_; + std::vector num_trials_; + Generator generator_; + bool calculate_XSECUP_; }; } // namespace HEJ diff --git a/include/HEJ/LesHouchesWriter.hh b/include/HEJ/LesHouchesWriter.hh index 2137dac..70c4d82 100644 --- a/include/HEJ/LesHouchesWriter.hh +++ b/include/HEJ/LesHouchesWriter.hh @@ -1,63 +1,67 @@ /** \file * \brief Contains the writer for LesHouches output * * \authors The HEJ collaboration (see AUTHORS for details) - * \date 2019-2020 + * \date 2019-2023 * \copyright GPLv2 or later */ #pragma once #include #include #include #include "LHEF/LHEF.h" #include "HEJ/EventWriter.hh" namespace HEJ { class Event; //! Class for writing events to a file in the Les Houches Event File format class LesHouchesWriter : public EventWriter{ public: //! Constructor /** * @param file Name of output file * @param heprup General process information */ LesHouchesWriter(std::string const & file, LHEF::HEPRUP heprup); LesHouchesWriter(LesHouchesWriter const & other) = delete; LesHouchesWriter & operator=(LesHouchesWriter const & other) = delete; /** @TODO in principle, this class should be movable * but that somehow(?) breaks the write member function */ LesHouchesWriter(LesHouchesWriter && other) = delete; LesHouchesWriter & operator=(LesHouchesWriter && other) = delete; ~LesHouchesWriter() override; //! Write an event to the file specified in the constructor void write(Event const & ev) override; + //! Set the ratio (cross section) / (sum of event weights) + void set_xs_scale(double scale) override; + //! Dump general information & finish writing void finish() override; private: void write_init(){ writer_->init(); } - void rewrite_init(); + void rewrite_init_and_close(); LHEF::HEPRUP & heprup(){ return writer_->heprup; } LHEF::HEPEUP & hepeup(){ return writer_->hepeup; } std::fstream out_; std::unique_ptr writer_; + double xs_scale_ = 1.; }; } // namespace HEJ diff --git a/include/HEJ/ProgressBar.hh b/include/HEJ/ProgressBar.hh index 5f38051..bf895aa 100644 --- a/include/HEJ/ProgressBar.hh +++ b/include/HEJ/ProgressBar.hh @@ -1,94 +1,96 @@ /** \file * \brief Contains the ProgressBar class * * \authors The HEJ collaboration (see AUTHORS for details) * \date 2019 * \copyright GPLv2 or later */ #pragma once #include #include #include #include namespace HEJ { //! Class representing (and printing) a progress bar template class ProgressBar { public: //! Constructor /** * @param out Output stream * @param max Maximum value of the progress parameter * * This will print a fixed-width progress bar, which is initially at 0%. */ ProgressBar(std::ostream & out, T max) : out_{out}, max_{max} { - if(max <= 0) { + if(max < 0) { throw std::invalid_argument{ "Maximum in progress bar has to be positive" }; } - print_bar(); + if (max != 0) { + print_bar(); + } } //! Increment progress /** * @param count Value to add to the current progress parameter * * After updating the progess parameter, the progress bar is updated * to a percentage that corresponds to the ratio of the current and * maximum progress parameters. */ ProgressBar & increment(T count) { counter_ += count; update_progress(); return *this; } //! Increase progress by one unit /** * After updating the progess parameter, the progress bar is updated * to a percentage that corresponds to the ratio of the current and * maximum progress parameters. */ ProgressBar & operator++() { ++counter_; update_progress(); return *this; } private: void update_progress() { counter_ = std::min(counter_, max_); const int ndots = (100*counter_)/max_; const int new_dots = ndots - ndots_; if(new_dots > 0) { for(int dot = 0; dot < new_dots; ++dot) out_.get() << '.'; out_.get().flush(); ndots_ = ndots; } } void print_bar() const { out_.get() << "0% "; for(int i = 10; i <= 100; i+= 10){ out_.get() << " " + std::to_string(i) + "%"; } out_.get() << "\n|"; for(int i = 10; i <= 100; i+= 10){ out_.get() << "---------|"; } out_.get() << '\n'; } std::reference_wrapper out_; T counter_ = 0; T ndots_ = 0; T max_; }; } // namespace HEJ diff --git a/include/HEJ/RivetAnalysis.hh b/include/HEJ/RivetAnalysis.hh index 7cadf17..64c3955 100644 --- a/include/HEJ/RivetAnalysis.hh +++ b/include/HEJ/RivetAnalysis.hh @@ -1,70 +1,71 @@ /** \file * \brief HEJ 2 interface to rivet analyses * * \authors The HEJ collaboration (see AUTHORS for details) - * \date 2019-2022 + * \date 2019-2023 * \copyright GPLv2 or later */ #pragma once #include #include #include #include "LHEF/LHEF.h" #include "HEJ/Analysis.hh" namespace YAML { class Node; } namespace HEJ { /** * @brief Class representing a Rivet analysis * * This class inherits from Analysis and can therefore be used * like any other HEJ 2 analysis. */ class RivetAnalysis: public Analysis { public: //! Create RivetAnalysis static std::unique_ptr create( YAML::Node const & config, LHEF::HEPRUP const & heprup); //! Constructor /** * @param config Configuration parameters * @param heprup General run informations * * config["rivet"] should be the name of a single Rivet analysis or * a list of Rivet analyses. config["output"] is the prefix for * the .yoda output files. */ RivetAnalysis(YAML::Node const & config, LHEF::HEPRUP heprup); ~RivetAnalysis() override; //! Pass an event to the underlying Rivet analysis void fill(Event const & event, Event const & /*unused*/) override; //! no additional cuts are applied bool pass_cuts(Event const & /*unused*/, Event const & /*unused*/ ) override { return true; } + void set_xs_scale(double scale) override; void finalise() override; private: std::vector analyses_names_; std::string output_name_; LHEF::HEPRUP heprup_; /// struct to organise the infos per rivet run/scale setting struct rivet_info; std::vector rivet_runs_; /** * \internal * @brief Calculates the scale variation from the first event for the output * file */ void init(Event const & event); bool first_event_; }; } // namespace HEJ diff --git a/include/HEJ/detail/EmptyAnalysis.hh b/include/HEJ/detail/EmptyAnalysis.hh index 1e8355a..08563cc 100644 --- a/include/HEJ/detail/EmptyAnalysis.hh +++ b/include/HEJ/detail/EmptyAnalysis.hh @@ -1,50 +1,52 @@ /** \file * \brief Internal header for empty analysis, to be removed in HEJ 2.3 * * \authors The HEJ collaboration (see AUTHORS for details) - * \date 2022 + * \date 2022-2023 * \copyright GPLv2 or later */ #pragma once #include #include "HEJ/Analysis.hh" namespace YAML { class Node; } namespace LHEF { class HEPRUP; } namespace HEJ { class Event; namespace detail { struct EmptyAnalysis: Analysis{ //! Create EmptyAnalysis static std::unique_ptr create( YAML::Node const & parameters, LHEF::HEPRUP const & /*unused*/); //! Fill event into analysis (e.g. to histograms) /** * This function does nothing */ void fill(Event const & /*res_event*/, Event const & /*FO_event*/) override; //! Whether a resummation event passes all cuts /** * There are no cuts, so all events pass */ bool pass_cuts(Event const & /*res_event*/, Event const & /*FO_event*/) override; + //! Set the ratio (cross section) / (sum of event weights) + void set_xs_scale(double scale) override; //! Finalise analysis /** * This function does nothing */ void finalise() override; ~EmptyAnalysis() override = default; }; } } // namespace HEJ diff --git a/include/HEJ/filesystem.hh b/include/HEJ/filesystem.hh new file mode 100644 index 0000000..77cc71b --- /dev/null +++ b/include/HEJ/filesystem.hh @@ -0,0 +1,19 @@ +/** + * \file + * \brief Utilities for interacting with the file system + * + * \authors The HEJ collaboration (see AUTHORS for details) + * \date 2023 + * \copyright GPLv2 or later + * + * `std::filesystem` requires special library-dependent linker flags with older + * versions of libstc++ and libc++. To avoid this we reimplement the limited + * functionality we need using only POSIX. + * + */ +#pragma once + +namespace HEJ { + //! Check if the file is regular + bool is_regular(char const * filename); +} diff --git a/src/CombinedEventWriter.cc b/src/CombinedEventWriter.cc index 3139d8b..4951b45 100644 --- a/src/CombinedEventWriter.cc +++ b/src/CombinedEventWriter.cc @@ -1,41 +1,47 @@ /** * \authors The HEJ collaboration (see AUTHORS for details) * \date 2019-2020 * \copyright GPLv2 or later */ #include "HEJ/CombinedEventWriter.hh" #include #include "HEJ/exceptions.hh" #include "HEJ/make_writer.hh" #include "HEJ/output_formats.hh" namespace HEJ { CombinedEventWriter::CombinedEventWriter( std::vector const & outfiles, LHEF::HEPRUP const & heprup ){ writers_.reserve(outfiles.size()); for(OutputFile const & outfile: outfiles){ writers_.emplace_back( make_format_writer(outfile.format, outfile.name, heprup) ); } } void CombinedEventWriter::write(Event const & ev){ for(auto & writer: writers_) writer->write(ev); } + void CombinedEventWriter::set_xs_scale(const double scale) { + for(auto & writer: writers_) { + writer->set_xs_scale(scale); + } + } + void CombinedEventWriter::finish(){ EventWriter::finish(); for(auto & writer: writers_) writer->finish(); } CombinedEventWriter::~CombinedEventWriter(){ finish_or_abort(this, "CombinedEventWriter"); } } // namespace HEJ diff --git a/src/CrossSectionAccumulator.cc b/src/CrossSectionAccumulator.cc index af70c4b..3c27f0e 100644 --- a/src/CrossSectionAccumulator.cc +++ b/src/CrossSectionAccumulator.cc @@ -1,69 +1,81 @@ /** * \authors The HEJ collaboration (see AUTHORS for details) * \date 2019-2020 * \copyright GPLv2 or later */ #include "HEJ/CrossSectionAccumulator.hh" #include #include #include #include #include "HEJ/Event.hh" #include "HEJ/Parameters.hh" namespace HEJ { void CrossSectionAccumulator::fill( double const wt, double const err, event_type::EventType const type ){ total_.value += wt; total_.error += err; auto & entry = xs_[type]; entry.value += wt; entry.error += err; } void CrossSectionAccumulator::fill( double const wt, event_type::EventType const type ){ fill(wt, wt*wt, type); } void CrossSectionAccumulator::fill(Event const & ev){ fill(ev.central().weight, ev.type()); } void CrossSectionAccumulator::fill_correlated( double const sum, double const sum2, event_type::EventType const type ){ fill(sum, sum*sum+sum2, type); } void CrossSectionAccumulator::fill_correlated(std::vector const & evts){ if(evts.empty()) return; fill_correlated(evts.cbegin(), evts.cend()); } + // method to scale total_ and xs_ after they have been filled + void CrossSectionAccumulator::scale(double const xsscale) { + // scale total cross section: + total_.value*=xsscale; + total_.error*=xsscale*xsscale; + + for(auto & temp : xs_) { + temp.second.value*=xsscale; + temp.second.error*=xsscale*xsscale; + } + } + std::ostream& operator<<(std::ostream& os, const CrossSectionAccumulator& xs){ constexpr size_t name_width = 25; constexpr size_t precision = 3; const std::streamsize orig_prec = os.precision(); os << std::scientific << std::setprecision(precision) << " " << std::left << std::setw(name_width) << "Cross section: " << xs.total().value << " +- " << std::sqrt(xs.total().error) << " (pb)\n"; for(auto const & xs_type: xs) { os << " " << std::left << std::scientific < #include +#include #include "yaml-cpp/yaml.h" #include "HEJ/exceptions.hh" namespace HEJ::detail { namespace { std::vector param_as_strings(YAML::Node const & parameters){ using YAML::NodeType; switch(parameters.Type()){ case NodeType::Null: case NodeType::Undefined: return {}; case NodeType::Scalar: return {parameters.as()}; case NodeType::Sequence: { std::vector param_strings; for(auto && param: parameters){ param_strings.emplace_back(param.as()); } return param_strings; } case NodeType::Map: { std::vector param_strings; for(auto && param: parameters){ param_strings.emplace_back(param.first.as()); } return param_strings; } default:; } throw std::logic_error{"unreachable"}; } } // namespace std::unique_ptr EmptyAnalysis::create( YAML::Node const & parameters, LHEF::HEPRUP const & /*unused*/ ){ const auto param_strings = param_as_strings(parameters); if(! param_strings.empty()){ std::string error{"Unknown analysis parameter(s):"}; for(auto && p: param_strings) error += " " + p; throw unknown_option{error}; } return std::make_unique(); } void EmptyAnalysis::fill( Event const & /*res_event*/, Event const & /*FO_event*/ ){ // do nothing } bool EmptyAnalysis::pass_cuts( Event const & /*res_event*/, Event const & /*FO_event*/ ){ return true; } + void EmptyAnalysis::set_xs_scale(double /*xsscale*/){ + // do nothing + } + void EmptyAnalysis::finalise(){ // do nothing } } // namespace HEJ::detail namespace HEJ { std::unique_ptr EmptyAnalysis::create( YAML::Node const & parameters, LHEF::HEPRUP const & heprup ){ return detail::EmptyAnalysis::create(parameters, heprup); } } diff --git a/src/EventReader.cc b/src/EventReader.cc index 18080dc..6dd5189 100644 --- a/src/EventReader.cc +++ b/src/EventReader.cc @@ -1,77 +1,32 @@ /** * \authors The HEJ collaboration (see AUTHORS for details) - * \date 2019-2020 + * \date 2019-2022 * \copyright GPLv2 or later */ #include "HEJ/EventReader.hh" #include #include #include "LHEF/LHEF.h" #include "HEJ/ConfigFlags.hh" #include "HEJ/HDF5Reader.hh" #include "HEJ/LesHouchesReader.hh" #include "HEJ/Version.hh" -namespace { - enum class generator{ - HEJ, - HEJFOG, - Sherpa, - MG, - unknown - }; - - generator get_generator( - LHEF::HEPRUP const & heprup, std::string const & header - ){ - // try getting generator name from specific tag - if(!heprup.generators.empty()){ - std::string const & gen_name = heprup.generators.back().name; - if(gen_name == "HEJ" || gen_name == HEJ::Version::String()) - return generator::HEJ; - if(gen_name == "HEJ Fixed Order Generation") - return generator::HEJFOG; - if(gen_name == "MadGraph5_aMC@NLO") return generator::MG; - std::cerr << "Unknown LHE Generator " << gen_name - << " using default LHE interface.\n"; - return generator::unknown; - } - // The generator tag is not always used -> check by hand - if(header.find("generated with HEJ")!=std::string::npos) - return generator::HEJ; - if(header.find("# created by SHERPA")!=std::string::npos) - return generator::Sherpa; - if(header.find("")!=std::string::npos) - return generator::MG; - std::cerr<<"Could not determine LHE Generator using default LHE interface.\n"; - return generator::unknown; - } -} // namespace - namespace HEJ { std::unique_ptr make_reader(std::string const & filename) { try { auto reader{ std::make_unique(filename) }; - switch( get_generator(reader->heprup(), reader->header()) ){ - case generator::Sherpa: - return std::make_unique(filename); - case generator::HEJ: - case generator::HEJFOG: - case generator::MG: - //! @TODO we could directly fix the MG weights here similar to Sherpa - default: - return reader; - } + return reader; } catch(std::runtime_error&) { #ifdef HEJ_BUILD_WITH_HDF5 return std::make_unique(filename); #else throw; #endif } } } // namespace HEJ diff --git a/src/HDF5Writer.cc b/src/HDF5Writer.cc index 0e0b33b..06508c1 100644 --- a/src/HDF5Writer.cc +++ b/src/HDF5Writer.cc @@ -1,432 +1,451 @@ /** * \authors The HEJ collaboration (see AUTHORS for details) * \date 2019-2020 * \copyright GPLv2 or later */ #include "HEJ/HDF5Writer.hh" #include #include "LHEF/LHEF.h" #include "HEJ/ConfigFlags.hh" #ifdef HEJ_BUILD_WITH_HDF5 #include #include #include #include #include #include #include "highfive/H5File.hpp" #include "HEJ/Event.hh" #include "HEJ/event_types.hh" #else #include "HEJ/exceptions.hh" #endif #ifdef HEJ_BUILD_WITH_HDF5 namespace HEJ { using HighFive::File; using HighFive::DataSpace; namespace { using std::size_t; constexpr size_t CHUNK_SIZE = 1000; constexpr unsigned COMPRESSION_LEVEL = 3; size_t to_index(event_type::EventType const type){ return type==0?0:std::floor(std::log2(static_cast(type)))+1; } template void write_dataset(HighFive::Group & group, std::string const & name, T val) { using data_t = std::decay_t; group.createDataSet(name, DataSpace::From(val)).write(val); } template void write_dataset( HighFive::Group & group, std::string const & name, std::vector const & val ) { using data_t = std::decay_t; group.createDataSet(name, DataSpace::From(val)).write(val); } struct Cache { explicit Cache(size_t capacity): capacity{capacity} { nparticles.reserve(capacity); start.reserve(capacity); pid.reserve(capacity); weight.reserve(capacity); scale.reserve(capacity); fscale.reserve(capacity); rscale.reserve(capacity); aqed.reserve(capacity); aqcd.reserve(capacity); trials.reserve(capacity); npLO.reserve(capacity); npNLO.reserve(capacity); } void fill(Event const & ev) { const auto hepeup = to_HEPEUP(ev, nullptr); // HEJ event to get nice wrapper const auto num_partons = std::distance(ev.cbegin_partons(), ev.cend_partons()); assert(num_partons>0); // Number of partons for LO matching, HEJ requires at least 2 partons npLO.emplace_back(num_partons>1?num_partons-2:num_partons); // Number of real emissions in NLO, HEJ is LO -> -1 npNLO.emplace_back(-1); fill_event_params(hepeup); fill_event_particles(hepeup); } void fill_event_params(LHEF::HEPEUP const & ev) { nparticles.emplace_back(ev.NUP); start.emplace_back(particle_pos); pid.emplace_back(ev.IDPRUP); weight.emplace_back(ev.XWGTUP); scale.emplace_back(ev.SCALUP); fscale.emplace_back(ev.scales.muf); rscale.emplace_back(ev.scales.mur); aqed.emplace_back(ev.AQEDUP); aqcd.emplace_back(ev.AQCDUP); // set first trial=1 for first event // -> sum(trials) = 1 -> xs=sum(weights)/sum(trials) as in Sherpa if(particle_pos == 0){ trials.emplace_back(1.); } else { trials.emplace_back(0.); } particle_pos += ev.NUP; } void fill_event_particles(LHEF::HEPEUP const & ev) { id.insert(end(id), begin(ev.IDUP), end(ev.IDUP)); status.insert(end(status), begin(ev.ISTUP), end(ev.ISTUP)); lifetime.insert(end(lifetime), begin(ev.VTIMUP), end(ev.VTIMUP)); spin.insert(end(spin), begin(ev.SPINUP), end(ev.SPINUP)); for(int i = 0; i < ev.NUP; ++i) { mother1.emplace_back(ev.MOTHUP[i].first); mother2.emplace_back(ev.MOTHUP[i].second); color1.emplace_back(ev.ICOLUP[i].first); color2.emplace_back(ev.ICOLUP[i].second); px.emplace_back(ev.PUP[i][0]); py.emplace_back(ev.PUP[i][1]); pz.emplace_back(ev.PUP[i][2]); e.emplace_back(ev.PUP[i][3]); m.emplace_back(ev.PUP[i][4]); } } bool is_full() const { return nparticles.size() >= capacity; } void clear() { nparticles.clear(); start.clear(); pid.clear(); id.clear(); status.clear(); mother1.clear(); mother2.clear(); color1.clear(); color2.clear(); weight.clear(); scale.clear(); fscale.clear(); rscale.clear(); aqed.clear(); aqcd.clear(); trials.clear(); npLO.clear(); npNLO.clear(); px.clear(); py.clear(); pz.clear(); e.clear(); m.clear(); lifetime.clear(); spin.clear(); } size_t capacity; std::vector nparticles, start, pid, id, status, mother1, mother2, color1, color2, npLO, npNLO; std::vector weight, scale, fscale, rscale, aqed, aqcd, trials, px, py, pz, e, m, lifetime, spin; size_t particle_pos = 0; }; } // namespace struct HDF5Writer::HDF5WriterImpl: EventWriter{ File file; LHEF::HEPRUP heprup; Cache cache{CHUNK_SIZE}; size_t event_idx = 0; size_t particle_idx = 0; + double xs_scale = 1.; HDF5WriterImpl(std::string const & filename, LHEF::HEPRUP && hepr): file{filename, File::ReadWrite | File::Create | File::Truncate}, heprup{std::forward(hepr)} { // TODO: code duplication with Les Houches Writer const size_t max_number_types = to_index(event_type::last_type)+1; heprup.NPRUP = max_number_types; // ids of event types heprup.LPRUP.clear(); heprup.LPRUP.reserve(max_number_types); heprup.LPRUP.emplace_back(0); for(size_t i=event_type::first_type+1; i<=event_type::last_type; i*=2) { heprup.LPRUP.emplace_back(i); } heprup.XSECUP = std::vector(max_number_types, 0.); heprup.XERRUP = std::vector(max_number_types, 0.); heprup.XMAXUP = std::vector(max_number_types, 0.); write_init(); create_event_group(); create_particle_group(); } void write_init() { auto init = file.createGroup("init"); write_dataset(init, "PDFgroupA" , heprup.PDFGUP.first); write_dataset(init, "PDFgroupB" , heprup.PDFGUP.second); write_dataset(init, "PDFsetA" , heprup.PDFSUP.first); write_dataset(init, "PDFsetB" , heprup.PDFSUP.second); write_dataset(init, "beamA" , heprup.IDBMUP.first); write_dataset(init, "beamB" , heprup.IDBMUP.second); write_dataset(init, "energyA" , heprup.EBMUP.first); write_dataset(init, "energyB" , heprup.EBMUP.second); write_dataset(init, "numProcesses" , heprup.NPRUP); write_dataset(init, "weightingStrategy", heprup.IDWTUP); auto proc_info = file.createGroup("procInfo"); write_dataset(proc_info, "procId", heprup.LPRUP); } static HighFive::DataSetCreateProps const & hdf5_chunk() { static const auto props = [](){ HighFive::DataSetCreateProps props; props.add(HighFive::Chunking({CHUNK_SIZE})); props.add(HighFive::Deflate(COMPRESSION_LEVEL)); return props; }(); return props; } void create_event_group() { static const auto dim = DataSpace({0}, {DataSpace::UNLIMITED}); auto events = file.createGroup("event"); events.createDataSet("nparticles", dim, hdf5_chunk()); events.createDataSet("start", dim, hdf5_chunk()); events.createDataSet("pid", dim, hdf5_chunk()); events.createDataSet("weight", dim, hdf5_chunk()); events.createDataSet("scale", dim, hdf5_chunk()); events.createDataSet("fscale", dim, hdf5_chunk()); events.createDataSet("rscale", dim, hdf5_chunk()); events.createDataSet("aqed", dim, hdf5_chunk()); events.createDataSet("aqcd", dim, hdf5_chunk()); events.createDataSet("trials", dim, hdf5_chunk()); events.createDataSet("npLO", dim, hdf5_chunk()); events.createDataSet("npNLO", dim, hdf5_chunk()); } void resize_event_group(size_t new_size) { auto events = file.getGroup("event"); events.getDataSet("nparticles").resize({new_size}); events.getDataSet("start").resize({new_size}); events.getDataSet("pid").resize({new_size}); events.getDataSet("weight").resize({new_size}); events.getDataSet("scale").resize({new_size}); events.getDataSet("fscale").resize({new_size}); events.getDataSet("rscale").resize({new_size}); events.getDataSet("aqed").resize({new_size}); events.getDataSet("aqcd").resize({new_size}); events.getDataSet("trials").resize({new_size}); events.getDataSet("npLO").resize({new_size}); events.getDataSet("npNLO").resize({new_size}); } void create_particle_group() { static const auto dim = DataSpace({0}, {DataSpace::UNLIMITED}); auto particles = file.createGroup("particle"); particles.createDataSet("id", dim, hdf5_chunk()); particles.createDataSet("status", dim, hdf5_chunk()); particles.createDataSet("mother1", dim, hdf5_chunk()); particles.createDataSet("mother2", dim, hdf5_chunk()); particles.createDataSet("color1", dim, hdf5_chunk()); particles.createDataSet("color2", dim, hdf5_chunk()); particles.createDataSet("px", dim, hdf5_chunk()); particles.createDataSet("py", dim, hdf5_chunk()); particles.createDataSet("pz", dim, hdf5_chunk()); particles.createDataSet("e", dim, hdf5_chunk()); particles.createDataSet("m", dim, hdf5_chunk()); particles.createDataSet("lifetime", dim, hdf5_chunk()); particles.createDataSet("spin", dim, hdf5_chunk()); } void resize_particle_group(size_t new_size) { auto particles = file.getGroup("particle"); particles.getDataSet("id").resize({new_size}); particles.getDataSet("status").resize({new_size}); particles.getDataSet("mother1").resize({new_size}); particles.getDataSet("mother2").resize({new_size}); particles.getDataSet("color1").resize({new_size}); particles.getDataSet("color2").resize({new_size}); particles.getDataSet("px").resize({new_size}); particles.getDataSet("py").resize({new_size}); particles.getDataSet("pz").resize({new_size}); particles.getDataSet("e").resize({new_size}); particles.getDataSet("m").resize({new_size}); particles.getDataSet("lifetime").resize({new_size}); particles.getDataSet("spin").resize({new_size}); } void write(Event const & ev) override { cache.fill(ev); if(cache.is_full()) { dump_cache(); } const double wt = ev.central().weight; const size_t idx = to_index(ev.type()); heprup.XSECUP[idx] += wt; heprup.XERRUP[idx] += wt*wt; if(wt > heprup.XMAXUP[idx]){ heprup.XMAXUP[idx] = wt; } } + void set_xs_scale(const double scale) override { + xs_scale = scale; + } + void dump_cache() { write_event_params(); write_event_particles(); cache.clear(); } void write_event_params() { auto events = file.getGroup("event"); // choose arbitrary dataset to find size const auto dataset = events.getDataSet("nparticles"); const size_t size = dataset.getSpace().getDimensions().front(); resize_event_group(size + cache.nparticles.size()); // NOLINTNEXTLINE #define WRITE_FROM_CACHE(GROUP, PROPERTY) \ GROUP.getDataSet(#PROPERTY).select({size}, {cache.PROPERTY.size()}).write(cache.PROPERTY) WRITE_FROM_CACHE(events, nparticles); WRITE_FROM_CACHE(events, start); WRITE_FROM_CACHE(events, pid); WRITE_FROM_CACHE(events, weight); WRITE_FROM_CACHE(events, scale); WRITE_FROM_CACHE(events, fscale); WRITE_FROM_CACHE(events, rscale); WRITE_FROM_CACHE(events, aqed); WRITE_FROM_CACHE(events, aqcd); WRITE_FROM_CACHE(events, trials); WRITE_FROM_CACHE(events, npLO); WRITE_FROM_CACHE(events, npNLO); } void write_event_particles() { auto particles = file.getGroup("particle"); // choose arbitrary dataset to find size const auto dataset = particles.getDataSet("id"); const size_t size = dataset.getSpace().getDimensions().front(); resize_particle_group(size + cache.id.size()); WRITE_FROM_CACHE(particles, id); WRITE_FROM_CACHE(particles, status); WRITE_FROM_CACHE(particles, lifetime); WRITE_FROM_CACHE(particles, spin); WRITE_FROM_CACHE(particles, mother1); WRITE_FROM_CACHE(particles, mother2); WRITE_FROM_CACHE(particles, color1); WRITE_FROM_CACHE(particles, color2); WRITE_FROM_CACHE(particles, px); WRITE_FROM_CACHE(particles, py); WRITE_FROM_CACHE(particles, pz); WRITE_FROM_CACHE(particles, e); WRITE_FROM_CACHE(particles, m); } #undef WRITE_FROM_CACHE void finish() override { if(finished()) throw std::ios_base::failure("HDF5Writer writer already finished."); EventWriter::finish(); dump_cache(); auto proc_info = file.getGroup("procInfo"); + for(auto & xs: heprup.XSECUP) { + xs *= xs_scale; + } write_dataset(proc_info, "xSection", heprup.XSECUP); + for(auto & xs_err: heprup.XERRUP) { + xs_err = xs_scale*std::sqrt(xs_err); + } write_dataset(proc_info, "error", heprup.XERRUP); write_dataset(proc_info, "unitWeight", heprup.XMAXUP); file.flush(); } ~HDF5WriterImpl() override { finish_or_abort(this, "HDF5Writer"); } }; HDF5Writer::HDF5Writer(std::string const & filename, LHEF::HEPRUP heprup): impl_{std::make_unique( filename, std::move(heprup))} {} void HDF5Writer::write(Event const & ev){ impl_->write(ev); } + void HDF5Writer::set_xs_scale(const double scale) { + impl_->set_xs_scale(scale); + } + void HDF5Writer::finish(){ impl_->finish(); } } // namespace HEJ #else // no HDF5 support namespace HEJ { struct HDF5Writer::HDF5WriterImpl{}; HDF5Writer::HDF5Writer(std::string const & /*file*/, LHEF::HEPRUP /*heprup*/){ throw std::invalid_argument{ "Failed to create HDF5 writer: " "HEJ 2 was built without HDF5 support" }; } void HDF5Writer::write(Event const & /*ev*/){ assert(false); } + void HDF5Writer::set_xs_scale(double /* scale */) { + assert(false); + } + void HDF5Writer::finish(){ assert(false); } } #endif namespace HEJ { HDF5Writer::~HDF5Writer() = default; } diff --git a/src/HepMC2Interface.cc b/src/HepMC2Interface.cc index edba099..38307ec 100644 --- a/src/HepMC2Interface.cc +++ b/src/HepMC2Interface.cc @@ -1,165 +1,172 @@ /** * \authors The HEJ collaboration (see AUTHORS for details) * \date 2019-2020 * \copyright GPLv2 or later */ #include "HEJ/HepMC2Interface.hh" #include "HEJ/ConfigFlags.hh" #include "HEJ/exceptions.hh" #ifdef HEJ_BUILD_WITH_HepMC2 #include #include #include #include "LHEF/LHEF.h" #include "HepMC/GenCrossSection.h" #include "HepMC/GenEvent.h" #include "HepMC/GenParticle.h" #include "HepMC/GenVertex.h" #include "HepMC/SimpleVector.h" #include "HepMC/Units.h" #include "HEJ/Event.hh" #include "HEJ/Particle.hh" #include "HEJ/detail/HepMCInterface_common.hh" #else // no HepMC2 #include "HEJ/utility.hh" #endif #ifdef HEJ_BUILD_WITH_HepMC2 namespace HEJ { namespace detail_HepMC { template<> struct HepMCVersion<2> { using GenEvent = HepMC::GenEvent; using Beam = std::array; }; template<> auto make_particle_ptr<2> ( Particle const & sp, int status ) { return new HepMC::GenParticle( to_FourVector(sp), static_cast (sp.type), status ); } template<> auto make_vx_ptr<2>() { return new HepMC::GenVertex(); } } // namespace detail_HepMC HepMC2Interface::HepMC2Interface(LHEF::HEPRUP const & heprup): beam_particle_{static_cast(heprup.IDBMUP.first), static_cast(heprup.IDBMUP.second)}, beam_energy_{heprup.EBMUP.first, heprup.EBMUP.second}, event_count_(0.), tot_weight_(0.), tot_weight2_(0.) {} HepMC::GenCrossSection HepMC2Interface::cross_section() const { HepMC::GenCrossSection xs; - xs.set_cross_section(tot_weight_, std::sqrt(tot_weight2_)); + xs.set_cross_section( + xs_scale_ * tot_weight_, + xs_scale_ * std::sqrt(tot_weight2_) + ); return xs; } + void HepMC2Interface::set_xs_scale(const double scale) { + xs_scale_ = scale; + } + HepMC::GenEvent HepMC2Interface::init_event(Event const & event) const { const std::array beam { new HepMC::GenParticle( HepMC::FourVector(0,0,-beam_energy_[0],beam_energy_[0]), beam_particle_[0], detail_HepMC::Status::beam ), new HepMC::GenParticle( HepMC::FourVector(0,0, beam_energy_[1],beam_energy_[1]), beam_particle_[1], detail_HepMC::Status::beam ) }; auto hepmc_ev{ detail_HepMC::HepMC_init_kinematics<2>( event, beam, HepMC::GenEvent{ HepMC::Units::GEV, HepMC::Units::MM } ) }; hepmc_ev.weights().push_back( event.central().weight ); for(auto const & var: event.variations()){ hepmc_ev.weights().push_back( var.weight ); // no weight name for HepMC2 since rivet3 seem to mix them up // (could be added via hepmc_ev.weights()[name]=weight) } return hepmc_ev; } void HepMC2Interface::set_central( HepMC::GenEvent & out_ev, Event const & event, int const weight_index ){ EventParameters event_param; if(weight_index < 0) event_param = event.central(); else if ( static_cast(weight_index) < event.variations().size()) event_param = event.variations(weight_index); else throw std::invalid_argument{ "HepMC2Interface tried to access a weight outside of the variation range." }; const double wt = event_param.weight; tot_weight_ += wt; tot_weight2_ += wt * wt; ++event_count_; // central always on first assert(out_ev.weights().size() == event.variations().size()+1); out_ev.weights()[0] = wt; out_ev.set_cross_section( cross_section() ); out_ev.set_signal_process_id(event.type()); out_ev.set_event_scale(event_param.mur); out_ev.set_event_number(event_count_); /// @TODO add alphaQCD (need function) and alphaQED /// @TODO output pdf (currently not avaiable from event alone) } HepMC::GenEvent HepMC2Interface::operator()(Event const & event, int const weight_index ){ HepMC::GenEvent out_ev(init_event(event)); set_central(out_ev, event, weight_index); return out_ev; } } // namespace HEJ #else // no HepMC2 => empty class namespace HepMC { class GenEvent {}; class GenCrossSection {}; } namespace HEJ { HepMC2Interface::HepMC2Interface(LHEF::HEPRUP const & /*heprup*/){ ignore(beam_particle_,beam_energy_,event_count_,tot_weight_,tot_weight2_); throw std::invalid_argument( "Failed to create HepMC2Interface: " "HEJ 2 was built without HepMC2 support" ); } HepMC::GenEvent HepMC2Interface::operator()( Event const & /*event*/, int /*weight_index*/ ){return HepMC::GenEvent();} HepMC::GenEvent HepMC2Interface::init_event(Event const & /*event*/) const {return HepMC::GenEvent();} void HepMC2Interface::set_central( HepMC::GenEvent & /*out_ev*/, Event const & /*event*/, int /*weight_index*/ ){} HepMC::GenCrossSection HepMC2Interface::cross_section() const {return HepMC::GenCrossSection();} } #endif diff --git a/src/HepMC2Writer.cc b/src/HepMC2Writer.cc index 04f77e0..d001305 100644 --- a/src/HepMC2Writer.cc +++ b/src/HepMC2Writer.cc @@ -1,88 +1,101 @@ /** * \authors The HEJ collaboration (see AUTHORS for details) * \date 2019-2020 * \copyright GPLv2 or later */ #include "HEJ/HepMC2Writer.hh" #include "LHEF/LHEF.h" #include "HEJ/ConfigFlags.hh" #ifdef HEJ_BUILD_WITH_HepMC2 #include #include "HepMC/IO_GenEvent.h" #include "HEJ/HepMC2Interface.hh" #else #include #include "HEJ/exceptions.hh" #endif #ifdef HEJ_BUILD_WITH_HepMC2 namespace HEJ { struct HepMC2Writer::HepMC2WriterImpl{ HepMC2Interface hepmc_; HepMC2WriterImpl & operator=(HepMC2WriterImpl const & other) = delete; HepMC2WriterImpl(HepMC2WriterImpl const & other) = delete; HepMC2WriterImpl & operator=(HepMC2WriterImpl && other) = delete; HepMC2WriterImpl(HepMC2WriterImpl && other) = delete; ~HepMC2WriterImpl() = default; HepMC::IO_GenEvent writer_; HepMC2WriterImpl( std::string const & file, LHEF::HEPRUP && heprup ): hepmc_(heprup), writer_{file} {} void write(Event const & ev){ auto out_ev = hepmc_(ev); writer_.write_event(&out_ev); } + + void set_xs_scale(const double scale) { + hepmc_.set_xs_scale(scale); + } + }; HepMC2Writer::HepMC2Writer(std::string const & file, LHEF::HEPRUP heprup): impl_{std::make_unique(file, std::move(heprup))} {} + void HepMC2Writer::set_xs_scale(const double scale) { + impl_->set_xs_scale(scale); + } + void HepMC2Writer::write(Event const & ev){ impl_->write(ev); } } // namespace HEJ #else // no HepMC2 namespace HEJ { struct HepMC2Writer::HepMC2WriterImpl{}; HepMC2Writer::HepMC2Writer( std::string const & /*file*/, LHEF::HEPRUP /*heprup*/ ){ throw std::invalid_argument( "Failed to create HepMC writer: " "HEJ 2 was built without HepMC2 support" ); } void HepMC2Writer::write(Event const & /*ev*/){ assert(false); } + void HepMC2Writer::set_xs_scale(double /* scale */) { + assert(false); + } + } #endif namespace HEJ { HepMC2Writer::~HepMC2Writer() = default; } diff --git a/src/HepMC3Interface.cc b/src/HepMC3Interface.cc index d6c2714..48a7dd2 100644 --- a/src/HepMC3Interface.cc +++ b/src/HepMC3Interface.cc @@ -1,226 +1,233 @@ /** * \authors The HEJ collaboration (see AUTHORS for details) * \date 2019-2020 * \copyright GPLv2 or later */ #include "HEJ/HepMC3Interface.hh" #include "HEJ/ConfigFlags.hh" #include "HEJ/exceptions.hh" // include before HepMC3 to override include of "HepMC3/LHEF.h" // (theoretically both files should be the same) #include "LHEF/LHEF.h" #ifdef HEJ_BUILD_WITH_HepMC3 #include #include #include #include #include #include "HepMC3/Attribute.h" #include "HepMC3/FourVector.h" #include "HepMC3/GenCrossSection.h" #include "HepMC3/GenCrossSection_fwd.h" #include "HepMC3/GenEvent.h" #include "HepMC3/GenParticle.h" #include "HepMC3/GenParticle_fwd.h" #include "HepMC3/GenRunInfo.h" #include "HepMC3/GenVertex.h" #include "HepMC3/LHEFAttributes.h" #include "HepMC3/Units.h" #include "HEJ/Event.hh" #include "HEJ/Parameters.hh" #include "HEJ/Particle.hh" #include "HEJ/detail/HepMCInterface_common.hh" #else // no HepMC3 #include "HEJ/utility.hh" #endif #ifdef HEJ_BUILD_WITH_HepMC3 namespace HEJ { namespace detail_HepMC { template<> struct HepMCVersion<3> { using GenEvent = HepMC3::GenEvent; using Beam = std::array; }; template<> auto make_particle_ptr<3> ( Particle const & sp, int status ) { return HepMC3::make_shared( to_FourVector(sp), static_cast (sp.type), status ); } template<> auto make_vx_ptr<3>() { return HepMC3::make_shared(); } } // namespace detail_HepMC namespace { void reset_weight_info(LHEF::HEPRUP & heprup){ heprup.IDWTUP = 2; // use placeholders for unknown init block values // we can overwrite them after processing all events heprup.XSECUP = {0.}; heprup.XERRUP = {0.}; heprup.XMAXUP = {0.}; } HepMC3::shared_ptr init_runinfo(LHEF::HEPRUP heprup){ reset_weight_info(heprup); auto runinfo{ HepMC3::make_shared() }; auto hepr{ HepMC3::make_shared() }; hepr->heprup = std::move(heprup); runinfo->add_attribute(std::string("HEPRUP"), hepr); for(auto const & gen: hepr->heprup.generators){ runinfo->tools().emplace_back( HepMC3::GenRunInfo::ToolInfo{gen.name, gen.version, gen.contents} ); } return runinfo; } std::vector get_weight_names(Event const & ev){ std::vector names; names.reserve(ev.variations().size()+1); // +1 from central names.emplace_back(""); // rivet assumes central band to have no name for(auto const & var : ev.variations()){ if(var.description){ names.emplace_back( to_simple_string(*var.description) ); } else { names.emplace_back( "" ); } } assert(names.size() == ev.variations().size()+1); return names; } } // namespace HepMC3Interface::HepMC3Interface(LHEF::HEPRUP heprup): beam_particle_{static_cast(heprup.IDBMUP.first), static_cast(heprup.IDBMUP.second)}, beam_energy_{heprup.EBMUP.first, heprup.EBMUP.second}, run_info_{ init_runinfo(std::move(heprup)) }, event_count_(0.), tot_weight_(0.), tot_weight2_(0.), xs_{std::make_shared()} { } HepMC3::GenEvent HepMC3Interface::init_event(Event const & event) const { const std::array beam { HepMC3::make_shared( HepMC3::FourVector(0,0,-beam_energy_[0],beam_energy_[0]), beam_particle_[0], detail_HepMC::Status::beam ), HepMC3::make_shared( HepMC3::FourVector(0,0, beam_energy_[1],beam_energy_[1]), beam_particle_[1], detail_HepMC::Status::beam ) }; auto hepmc_ev{ detail_HepMC::HepMC_init_kinematics<3>( event, beam, HepMC3::GenEvent{ HepMC3::Units::GEV, HepMC3::Units::MM } ) }; // set up run specific informations if( run_info_->weight_names().size() != event.variations().size()+1 ){ run_info_->set_weight_names( get_weight_names(event) ); } // order matters: weights in hepmc_ev initialised when registering run_info hepmc_ev.set_run_info(run_info_); assert(hepmc_ev.weights().size() == event.variations().size()+1); for(size_t i=0; i(weight_index) < event.variations().size()) event_param = event.variations(weight_index); else throw std::invalid_argument{ "HepMC3Interface tried to access a weight outside of the variation range." }; const double wt = event_param.weight; tot_weight_ += wt; tot_weight2_ += wt * wt; ++event_count_; // central always on first assert(out_ev.weights().size() == event.variations().size()+1); out_ev.weights()[0] = wt; // out_ev can be setup with a different central scale -> save xs manually out_ev.set_cross_section(xs_); assert(out_ev.cross_section() && out_ev.cross_section() == xs_); // overwrites all previously set xs ... - xs_->set_cross_section(tot_weight_,std::sqrt(tot_weight2_)); + xs_->set_cross_section( + xs_scale_ * tot_weight_, + xs_scale_ * std::sqrt(tot_weight2_) + ); out_ev.set_event_number(event_count_); /// @TODO add number of attempted events xs_->set_accepted_events(event_count_); /// @TODO add alphaQCD (need function) and alphaQED /// @TODO output pdf (currently not avaiable from event alone) } HepMC3::GenEvent HepMC3Interface::operator()( Event const & event, int const weight_index ){ HepMC3::GenEvent out_ev(init_event(event)); set_central(out_ev, event, weight_index); return out_ev; } + void HepMC3Interface::set_xs_scale(const double scale) { + xs_scale_ = scale; + } + } // namespace HEJ #else // no HepMC3 => empty class namespace HepMC3 { class GenEvent {}; class GenCrossSection {}; class GenRunInfo {}; } namespace HEJ { HepMC3Interface::HepMC3Interface(LHEF::HEPRUP /*heprup*/){ ignore(beam_particle_,beam_energy_,event_count_,tot_weight_,tot_weight2_); throw std::invalid_argument( "Failed to create HepMC3Interface: " "HEJ 2 was built without HepMC3 support" ); } HepMC3::GenEvent HepMC3Interface::operator()( Event const & /*event*/, int /*weight_index*/ ){return HepMC3::GenEvent();} HepMC3::GenEvent HepMC3Interface::init_event(Event const & /*event*/) const {return HepMC3::GenEvent();} void HepMC3Interface::set_central( HepMC3::GenEvent & /*out_ev*/, Event const & /*event*/, int /*weight_index*/ ){} } #endif namespace HEJ { HepMC3Interface::~HepMC3Interface() = default; } diff --git a/src/HepMC3Writer.cc b/src/HepMC3Writer.cc index 5059cbf..18b7f9c 100644 --- a/src/HepMC3Writer.cc +++ b/src/HepMC3Writer.cc @@ -1,111 +1,124 @@ /** * \authors The HEJ collaboration (see AUTHORS for details) * \date 2019-2020 * \copyright GPLv2 or later */ #include "HEJ/HepMC3Writer.hh" #include "LHEF/LHEF.h" #include "HEJ/ConfigFlags.hh" #include "HEJ/exceptions.hh" #ifdef HEJ_BUILD_WITH_HepMC3 #include #include "HepMC3/Attribute.h" #include "HepMC3/WriterAscii.h" #include "HEJ/HepMC3Interface.hh" #else #include #endif #ifdef HEJ_BUILD_WITH_HepMC3 namespace HEJ { struct HepMC3Writer::HepMC3WriterImpl: EventWriter{ HepMC3Interface HepMC3_; std::unique_ptr writer_; std::string const file_; HepMC3WriterImpl( std::string file, LHEF::HEPRUP && heprup ): HepMC3_{std::forward(heprup)}, file_{std::move(file)} {} void init_writer(){ writer_ = std::make_unique(file_, HepMC3_.run_info()); } void finish() override { if(finished()) throw std::ios_base::failure("HepMC3 writer already finished."); EventWriter::finish(); if(!writer_) // make sure that we always write something init_writer(); writer_->close(); } void write(Event const & ev) override { auto out_ev = HepMC3_(ev); //! weight names are only available after first event if(!writer_) init_writer(); writer_->write_event(out_ev); } + void set_xs_scale(const double scale) override { + HepMC3_.set_xs_scale(scale); + } + ~HepMC3WriterImpl() override { finish_or_abort(this, "HepMC3Writer"); } }; HepMC3Writer::HepMC3Writer(std::string const & file, LHEF::HEPRUP heprup): impl_{ std::make_unique(file, std::move(heprup)) } {} void HepMC3Writer::write(Event const & ev){ impl_->write(ev); } + void HepMC3Writer::set_xs_scale(const double scale) { + impl_->set_xs_scale(scale); + } + void HepMC3Writer::finish(){ impl_->finish(); } } // namespace HEJ #else // no HepMC3 namespace HEJ { struct HepMC3Writer::HepMC3WriterImpl{}; HepMC3Writer::HepMC3Writer( std::string const & /*file*/, LHEF::HEPRUP /*heprup*/ ){ throw std::invalid_argument( "Failed to create HepMC3 writer: " "HEJ 2 was built without HepMC3 support" ); } void HepMC3Writer::write(Event const & /*ev*/){ assert(false); } + + void HepMC3Writer::set_xs_scale(double /* scale */) { + assert(false); + } + void HepMC3Writer::finish(){ assert(false); } } #endif namespace HEJ { HepMC3Writer::~HepMC3Writer() = default; } diff --git a/src/LesHouchesReader.cc b/src/LesHouchesReader.cc index 4c41db4..cde2908 100644 --- a/src/LesHouchesReader.cc +++ b/src/LesHouchesReader.cc @@ -1,36 +1,81 @@ /** * \authors The HEJ collaboration (see AUTHORS for details) - * \date 2020 + * \date 2022 * \copyright GPLv2 or later */ #include "HEJ/LesHouchesReader.hh" +#include "HEJ/Version.hh" #include namespace HEJ { - SherpaLHEReader::SherpaLHEReader(std::string const & filename): - LesHouchesReader{filename}, - num_trials_{0.}, num_events_{0} - { - LesHouchesReader tmp_reader{filename}; - reader().heprup.XSECUP = std::vector{0}; - while(tmp_reader.read_event()){ - ++num_events_; - num_trials_ += std::stod(tmp_reader.hepeup().attributes.at("trials")); - reader().heprup.XSECUP.front() += tmp_reader.hepeup().XWGTUP; + LesHouchesReader::Generator LesHouchesReader::get_generator( + LHEF::HEPRUP const & heprup, std::string const & header + ){ + // try getting generator name from specific tag + if(!heprup.generators.empty()){ + std::string const & gen_name = heprup.generators.back().name; + if(gen_name == "HEJ" || gen_name == HEJ::Version::String()) + return Generator::HEJ; + if(gen_name == "HEJ Fixed Order Generation") + return Generator::HEJFOG; + if(gen_name == "MadGraph5_aMC@NLO") return Generator::MG; + std::cerr << "Unknown LHE Generator " << gen_name + << " using default LHE interface.\n"; + return Generator::unknown; } - reader().heprup.XSECUP.front() /= num_trials_; + // The generator tag is not always used -> check by hand + if(header.find("generated with HEJ")!=std::string::npos) + return Generator::HEJ; + if(header.find("# created by SHERPA")!=std::string::npos) + return Generator::Sherpa; + if(header.find("")!=std::string::npos) + return Generator::MG; + std::cerr<<"Could not determine LHE Generator using default LHE interface.\n"; + return Generator::unknown; + } + + + LesHouchesReader::LesHouchesReader( + std::string const & filename + ): + stream_{filename}, + reader_{stream_}, + num_trials_(reader_.heprup.NPRUP, 0), + generator_{get_generator(reader_.heprup, reader_.headerBlock)}, + // calculate cross section if it was not set to > 0. + calculate_XSECUP_{reader_.heprup.XSECUP.front() <= 0.} + { + // always use the newest LHE version + reader_.heprup.version = LHEF::HEPRUP().version; + + reader_.heprup.XSECUP.resize(reader_.heprup.NPRUP); + // For IDWTUP == 1 or 4 we assume avg(weight)=xs // With the modified weights we have in Sherpa sum(weight)=xs // -> overwrite IDWTUP to "something neutral" - reader().heprup.IDWTUP = reader().heprup.IDWTUP>0?3:-3; + if(generator_ == Generator::Sherpa) { + reader_.heprup.IDWTUP = reader_.heprup.IDWTUP>0?3:-3; + } + } - bool SherpaLHEReader::read_event() { - if(!LesHouchesReader::read_event()) return false; - reader().hepeup.XWGTUP/=num_trials_; - for(auto & wt: reader().hepeup.weights) - wt.first/=num_trials_; + bool LesHouchesReader::read_event() { + if(!reader_.readEvent()) return false; + + int nprup=reader_.heprup.NPRUP; + int position=std::max(1,std::min(hepeup().IDPRUP,nprup))-1; // 0..nprup-1 + + if (generator_ == Generator::Sherpa) { + reader_.heprup.XSECUP.at(position) *= num_trials_.at(position); + num_trials_.at(position) += + std::stod(hepeup().attributes.at("trials")); + reader_.heprup.XSECUP.at(position) += hepeup().XWGTUP; + reader_.heprup.XSECUP.at(position) /= + num_trials_.at(position); + } else if (LesHouchesReader::calculate_XSECUP()) { + reader_.heprup.XSECUP.at(position) += hepeup().XWGTUP; + } return true; } } // namespace HEJ diff --git a/src/LesHouchesWriter.cc b/src/LesHouchesWriter.cc index 7b653b8..5631e41 100644 --- a/src/LesHouchesWriter.cc +++ b/src/LesHouchesWriter.cc @@ -1,142 +1,154 @@ /** * \authors The HEJ collaboration (see AUTHORS for details) * \date 2019-2020 * \copyright GPLv2 or later */ #include "HEJ/LesHouchesWriter.hh" #include #include #include #include #include #include "HEJ/Event.hh" #include "HEJ/Parameters.hh" #include "HEJ/event_types.hh" #include "HEJ/exceptions.hh" #include "HEJ/utility.hh" namespace HEJ { namespace { std::size_t to_index(event_type::EventType const type){ return type==0?0:1+std::floor(std::log2(static_cast(type))); } } LesHouchesWriter::LesHouchesWriter( std::string const & file, LHEF::HEPRUP heprup ): out_{file, std::fstream::in | std::fstream::out | std::fstream::trunc}, writer_{std::make_unique(out_)} { if(! out_.is_open()){ throw std::ios_base::failure("Failed to open " + file); }; // scientific style is needed to allow rewriting the init block out_ << std::scientific; writer_->heprup = std::move(heprup); // lhe Standard: IDWTUP (negative => weights = +/-) // IDWTUP: HEJ -> SHG/Pythia/next program // 1: weighted->unweighted, xs = mean(weight), XMAXUP given // 2: weighted->unweighted, xs = XSECUP, XMAXUP given // 3: unweighted (weight=+1)->unweighted, no additional information // 4: weighted->weighted, xs = mean(weight) // // None of these codes actually match what we want: // 1 and 4 require xs = mean(weight), which is impossible until after generation // 2 tells the SHG to unweight our events, which is wasteful // 3 claims we produce unweighted events, which is both wasteful _and_ // impossible until after generation (we don't know the maximum weight before) // // For the time being, we choose -3. If the consumer (like Pythia) assumes // weight=+1, the final weights have to be corrected by multiplying with // the original weight we provided. We are also often use NLO-PDFs which can // give negative weights, hence the native IDWTUP. // writer_->heprup.IDWTUP = -3; // always use the newest LHE version // Pythia only saves weights (hepeup.XWGTUP) for version >=2 writer_->heprup.version = LHEF::HEPRUP().version; const std::size_t max_number_types = to_index(event_type::last_type)+1; writer_->heprup.NPRUP = max_number_types; // ids of event types writer_->heprup.LPRUP.clear(); writer_->heprup.LPRUP.reserve(max_number_types); writer_->heprup.LPRUP.emplace_back(0); for(auto i=event_type::first_type+1; i<=event_type::last_type; i*=2) writer_->heprup.LPRUP.emplace_back(i); // use placeholders for unknown init block values // we can overwrite them after processing all events writer_->heprup.XSECUP = std::vector(max_number_types, 0.); writer_->heprup.XERRUP = std::vector(max_number_types, 0.); writer_->heprup.XMAXUP = std::vector(max_number_types, 0.); write_init(); } void LesHouchesWriter::write(Event const & ev){ if(finished()) { throw std::logic_error{ "Tried to write a Les Houches event after calling `finish`" }; } assert(writer_ && out_.is_open()); const double wt = ev.central().weight; writer_->hepeup = to_HEPEUP(ev, &heprup()); writer_->writeEvent(); assert(heprup().XSECUP.size() > to_index(ev.type())); heprup().XSECUP[to_index(ev.type())] += wt; heprup().XERRUP[to_index(ev.type())] += wt*wt; if(wt > heprup().XMAXUP[to_index(ev.type())]){ heprup().XMAXUP[to_index(ev.type())] = wt; } } + void LesHouchesWriter::set_xs_scale(const double scale) { + xs_scale_ = scale; + } + // this function is called after overwritting the Les Houches init block // assert that we have overwritten *exactly* the init block, // i.e. we are at the end of the file or an intact event block is next void assert_next_event_intact(std::istream & out){ ignore(out); // suppress compiler warnings if not in debug mode #ifndef NDEBUG std::string line; getline(out, line); assert(out.eof() || line.rfind("" << std::endl; + rewrite_init_and_close(); } LesHouchesWriter::~LesHouchesWriter(){ finish_or_abort(this, "LesHouchesWriter"); } } // namespace HEJ diff --git a/src/RivetAnalysis.cc b/src/RivetAnalysis.cc index 68cf6c6..6e148ec 100644 --- a/src/RivetAnalysis.cc +++ b/src/RivetAnalysis.cc @@ -1,191 +1,199 @@ /** * \authors The HEJ collaboration (see AUTHORS for details) * \date 2019-2020 * \copyright GPLv2 or later */ #include "HEJ/RivetAnalysis.hh" #include "HEJ/ConfigFlags.hh" #include "HEJ/utility.hh" #ifdef HEJ_BUILD_WITH_RIVET #include #include #include #include "yaml-cpp/yaml.h" #include "Rivet/AnalysisHandler.hh" #include "Rivet/Config/RivetConfig.hh" #ifdef RIVET_ENABLE_HEPMC_3 #include "HepMC3/GenEvent.h" #include "HEJ/HepMC3Interface.hh" #else // rivet with HepMC 2 #include "HEJ/HepMC2Interface.hh" #include "HepMC/GenEvent.h" #endif // RIVET_ENABLE_HEPMC_3 #include "HEJ/Event.hh" #include "HEJ/exceptions.hh" #endif namespace HEJ { std::unique_ptr RivetAnalysis::create( YAML::Node const & config, LHEF::HEPRUP const & heprup ){ return std::make_unique(config, heprup); } } #ifdef HEJ_BUILD_WITH_RIVET namespace HEJ { #ifdef RIVET_ENABLE_HEPMC_3 using HepMCInterface=HepMC3Interface; #else using HepMCInterface=HepMC2Interface; #endif struct RivetAnalysis::rivet_info { rivet_info(std::unique_ptr && h, std::string && s, HepMCInterface && m): handler{std::move(h)}, name{std::move(s)}, hepmc{std::move(m)} {} // AnalysisHandler doesn't allow a copy constructor -> use ptr std::unique_ptr handler; std::string name; HepMCInterface hepmc; }; RivetAnalysis::RivetAnalysis(YAML::Node const & config, LHEF::HEPRUP heprup ): heprup_{std::move(heprup)}, first_event_(true) { // assert that only supported options are provided if(!config.IsMap()) throw invalid_type{"Rivet config has to be a map!"}; for(auto const & entry: config){ const auto name = entry.first.as(); if(name != "output" && name != "rivet") throw unknown_option{"Unknown option \'"+name+"\' provided to rivet."}; } // get output name if(!config["output"]) throw std::invalid_argument{ "No output was provided to rivet. " "Either give an output or deactivate rivet." }; if(!config["output"].IsScalar()) throw invalid_type{ "Output for rivet must be a scalar." }; output_name_ = config["output"].as(); // read in analyses auto const & name_node = config["rivet"]; switch(name_node.Type()){ case YAML::NodeType::Scalar: analyses_names_.push_back(name_node.as()); break; case YAML::NodeType::Sequence: for(YAML::const_iterator it = name_node.begin(); it != name_node.end(); ++it){ analyses_names_.push_back(it->as()); } break; default: throw std::invalid_argument{ "No analysis was provided to rivet. " "Either give an analysis or deactivate rivet." }; } } // it is a bit ugly that we can't do this directly in `initialise` void RivetAnalysis::init(Event const & event){ #ifdef HEJ_USE_RIVET2 rivet_runs_.reserve(event.variations().size()+1); #else ignore(event); // shut up compiler #endif rivet_runs_.emplace_back( std::make_unique(), std::string(""), HepMCInterface(heprup_) ); rivet_runs_.back().handler->addAnalyses(analyses_names_); #ifdef HEJ_USE_RIVET2 //! scale variation in rivet 2 through multiple analyses if( !event.variations().empty() ){ for(auto const & vari : event.variations()){ rivet_runs_.emplace_back( std::make_unique(), "."+to_simple_string(*vari.description), HepMCInterface(heprup_) ); rivet_runs_.back().handler->addAnalyses(analyses_names_); } } #endif } void RivetAnalysis::fill(Event const & event, Event const & /*unused*/){ if(first_event_){ first_event_=false; init(event); } auto hepmc_event(rivet_runs_.front().hepmc(event)); rivet_runs_.front().handler->analyze(hepmc_event); #ifdef HEJ_USE_RIVET2 for(std::size_t i = 1; i < rivet_runs_.size(); ++i){ auto & run = rivet_runs_[i]; run.hepmc.set_central(hepmc_event, event, i-1); run.handler->analyze(hepmc_event); } #endif } + void RivetAnalysis::set_xs_scale(double scale) { + for(auto & run: rivet_runs_) { + run.hepmc.set_xs_scale(scale); + } + } + void RivetAnalysis::finalise(){ for(auto & run: rivet_runs_){ run.handler->finalize(); run.handler->writeData(output_name_+run.name+std::string(".yoda")); } } } // namespace HEJ #else // no rivet => create empty analysis namespace Rivet { class AnalysisHandler {}; } namespace HEJ { struct RivetAnalysis::rivet_info{}; RivetAnalysis::RivetAnalysis( YAML::Node const & /*config*/, LHEF::HEPRUP /*heprup*/ ){ ignore(first_event_); throw std::invalid_argument( "Failed to create RivetAnalysis: " "HEJ 2 was built without rivet support" ); } void RivetAnalysis::init(Event const & /*event*/){} void RivetAnalysis::fill(Event const & /*event*/, Event const & /*unused*/){} + void RivetAnalysis::set_xs_scale(double /*xsscale*/){} + void RivetAnalysis::finalise(){} } // namespace HEJ #endif namespace HEJ { RivetAnalysis::~RivetAnalysis() = default; } diff --git a/src/bin/HEJ.cc b/src/bin/HEJ.cc index 42f09dc..365816d 100644 --- a/src/bin/HEJ.cc +++ b/src/bin/HEJ.cc @@ -1,425 +1,444 @@ /** * \authors The HEJ collaboration (see AUTHORS for details) - * \date 2019-2020 + * \date 2019-2022 * \copyright GPLv2 or later */ #include #include #include #include #include #include #include #include "yaml-cpp/yaml.h" #include "fastjet/ClusterSequence.hh" #include "HEJ/CombinedEventWriter.hh" #include "HEJ/Config.hh" #include "HEJ/CrossSectionAccumulator.hh" #include "HEJ/Event.hh" #include "HEJ/EventReader.hh" #include "HEJ/BufferedEventReader.hh" #include "HEJ/EventReweighter.hh" +#include "HEJ/filesystem.hh" #include "HEJ/get_analysis.hh" #include "HEJ/make_RNG.hh" #include "HEJ/ProgressBar.hh" #include "HEJ/stream.hh" #include "HEJ/Unweighter.hh" #include "HEJ/Version.hh" #include "HEJ/YAMLreader.hh" HEJ::Config load_config(char const * filename){ try{ return HEJ::load_config(filename); } catch(std::exception const & exc){ std::cerr << "Error: " << exc.what() << '\n'; std::exit(EXIT_FAILURE); } } std::vector> get_analyses( std::vector const & parameters, LHEF::HEPRUP const & heprup ){ try{ return HEJ::get_analyses(parameters, heprup); } catch(std::exception const & exc){ std::cerr << "Failed to load analysis: " << exc.what() << '\n'; std::exit(EXIT_FAILURE); } } -// unique_ptr is a workaround: -// std::optional is a better fit, but gives spurious errors with g++ 7.3.0 -std::unique_ptr> make_progress_bar( - std::vector const & xs -) { - if(xs.empty()) return {}; - const double Born_xs = std::accumulate(begin(xs), end(xs), 0.); - return std::make_unique>(std::cout, Born_xs); -} - std::string time_to_string(const time_t time){ char s[30]; struct tm * p = localtime(&time); strftime(s, 30, "%a %b %d %Y %H:%M:%S", p); return s; } void check_one_parton_per_jet(HEJ::Event const & ev) { const std::size_t npartons = std::count_if( ev.outgoing().begin(), ev.outgoing().end(), [](HEJ::Particle const & p) { return is_parton(p); } ); if(ev.jets().size() < npartons) { throw std::invalid_argument{ "Number of partons (" + std::to_string(npartons) + ") in input event" " does not match number of jets (" + std::to_string(ev.jets().size())+ ")" }; } } HEJ::Event to_event( LHEF::HEPEUP const & hepeup, HEJ::JetParameters const & fixed_order_jets, HEJ::EWConstants const & ew_parameters, const double off_shell_tolerance ) { HEJ::Event::EventData event_data{hepeup}; event_data.reconstruct_intermediate(ew_parameters); event_data.repair_momenta(off_shell_tolerance); HEJ::Event ev{ std::move(event_data).cluster( fixed_order_jets.def, fixed_order_jets.min_pt ) }; check_one_parton_per_jet(ev); return ev; } void unweight( HEJ::Unweighter & unweighter, HEJ::WeightType weight_type, std::vector & events, HEJ::RNG & ran ) { if(weight_type == HEJ::WeightType::unweighted_resum){ unweighter.set_cut_to_maxwt(events); } events.erase( unweighter.unweight(begin(events), end(events), ran), end(events) ); } // peek up to nevents events from reader std::vector peek_events( HEJ::BufferedEventReader & reader, const int nevents ) { std::vector events; while( static_cast(events.size()) < nevents && reader.read_event() ) { events.emplace_back(reader.hepeup()); } // put everything back into the reader for(auto it = rbegin(events); it != rend(events); ++it) { reader.emplace(*it); } return events; } void append_resummed_events( std::vector & resummation_events, HEJ::EventReweighter & reweighter, LHEF::HEPEUP const & hepeup, const size_t trials, HEJ::JetParameters const & fixed_order_jets, HEJ::EWConstants const & ew_parameters, const double off_shell_tolerance ) { const HEJ::Event FO_event = to_event( hepeup, fixed_order_jets, ew_parameters, off_shell_tolerance ); if(reweighter.treatment(FO_event.type()) != HEJ::EventTreatment::reweight) { return; } const auto resummed = reweighter.reweight(FO_event, trials); resummation_events.insert( end(resummation_events), begin(resummed), end(resummed) ); } +std::optional> make_progress_bar( + std::optional n_input_events, + char const * event_file +) { + if(n_input_events) { + return HEJ::ProgressBar{std::cout, *n_input_events}; + } + if(HEJ::is_regular(event_file)) { + auto t_reader = HEJ::make_reader(event_file); + std::size_t n_input_events = 0; + while(t_reader->read_event()) ++n_input_events; + return HEJ::ProgressBar{std::cout, n_input_events}; + } + return {}; +} void train( HEJ::Unweighter & unweighter, HEJ::BufferedEventReader & reader, HEJ::EventReweighter & reweighter, const size_t total_trials, const double max_dev, double reweight_factor, HEJ::JetParameters const & fixed_order_jets, HEJ::EWConstants const & ew_parameters, const double off_shell_tolerance ) { std::cout << "Reading up to " << total_trials << " training events...\n"; auto FO_events = peek_events(reader, total_trials); if(FO_events.empty()) { throw std::runtime_error{ "No events generated to calibrate the unweighting weight!" "Please increase the number \"trials\" or deactivate the unweighting." }; } const size_t trials = total_trials/FO_events.size(); // adjust reweight factor so that the overall normalisation // is the same as in the full run reweight_factor *= trials; for(auto & hepeup: FO_events) { hepeup.XWGTUP *= reweight_factor; } std::cout << "Training unweighter with " << trials << '*' << FO_events.size() << " events\n"; auto progress = HEJ::ProgressBar{ std::cout, static_cast(FO_events.size()) }; std::vector resummation_events; for(auto const & hepeup: FO_events) { append_resummed_events( resummation_events, reweighter, hepeup, trials, fixed_order_jets, ew_parameters, off_shell_tolerance ); ++progress; } unweighter.set_cut_to_peakwt(resummation_events, max_dev); std::cout << "\nUnweighting events with weight up to " << unweighter.get_cut() << '\n'; } int main(int argn, char** argv) { using clock = std::chrono::system_clock; if (argn != 3) { std::cerr << "\n# Usage:\n."<< argv[0] <<" config_file input_file\n\n"; return EXIT_FAILURE; } const auto start_time = clock::now(); { std::cout << "Starting " << HEJ::Version::package_name_full() << ", revision " << HEJ::Version::revision() << " (" << time_to_string(clock::to_time_t(start_time)) << ")" << std::endl; } fastjet::ClusterSequence::print_banner(); // read configuration const HEJ::Config config = load_config(argv[1]); auto reader = HEJ::make_reader(argv[2]); assert(reader); auto heprup{ reader->heprup() }; heprup.generators.emplace_back(LHEF::XMLTag{}); heprup.generators.back().name = HEJ::Version::package_name(); heprup.generators.back().version = HEJ::Version::String(); auto analyses = get_analyses( config.analyses_parameters, heprup ); assert(analyses.empty() || analyses.front() != nullptr); HEJ::CombinedEventWriter writer{config.output, std::move(heprup)}; double global_reweight = 1.; auto const & max_events = config.max_events; + // if we need the event number: if(std::abs(heprup.IDWTUP) == 4 || std::abs(heprup.IDWTUP) == 1 || max_events){ // try to read from LHE head auto input_events{reader->number_events()}; if(!input_events) { // else count manually auto t_reader = HEJ::make_reader(argv[2]); input_events = 0; while(t_reader->read_event()) ++(*input_events); } if(std::abs(heprup.IDWTUP) == 4 || std::abs(heprup.IDWTUP) == 1){ // IDWTUP 4 or 1 assume average(weight)=xs, but we need sum(weights)=xs std::cout << "Found IDWTUP " << heprup.IDWTUP << ": " << "assuming \"cross section = average weight\".\n" << "converting to \"cross section = sum of weights\" "; global_reweight /= *input_events; } if(max_events && (*input_events > *max_events)){ // maximal number of events given global_reweight *= *input_events/static_cast(*max_events); std::cout << "Processing " << *max_events << " out of " << *input_events << " events\n"; } } HEJ::ScaleGenerator scale_gen{ config.scales.base, config.scales.factors, config.scales.max_ratio }; std::shared_ptr ran{ HEJ::make_RNG(config.rng.name, config.rng.seed)}; assert(ran != nullptr); HEJ::EventReweighter hej{ reader->heprup(), std::move(scale_gen), to_EventReweighterConfig(config), ran }; std::optional unweighter{}; if(config.weight_type != HEJ::WeightType::weighted) { unweighter = HEJ::Unweighter{}; } if(config.weight_type == HEJ::WeightType::partially_unweighted) { HEJ::BufferedEventReader buffered_reader{std::move(reader)}; assert(config.unweight_config); train( *unweighter, buffered_reader, hej, config.unweight_config->trials, config.unweight_config->max_dev, global_reweight/config.trials, config.fixed_order_jets, config.ew_parameters, config.off_shell_tolerance ); reader = std::make_unique( std::move(buffered_reader) ); } // status infos & eye candy if (config.nlo.enabled){ std::cout << "HEJ@NLO Truncation Enabled for NLO order: " << config.nlo.nj << std::endl; } size_t nevent = 0; std::array nevent_type{0}, nfailed_type{0}; - auto progress = make_progress_bar(reader->heprup().XSECUP); + + auto progress = make_progress_bar(reader->number_events(), argv[2]); + if(!progress) { + std::cout + << "Cannot determine number of input events. " + "Disabling progress bar" + << std::endl; + } + HEJ::CrossSectionAccumulator xs; std::map status_counter; size_t total_trials = 0; size_t total_resum = 0; - // Loop over the events in the input file while(reader->read_event() && (!max_events || nevent < *max_events) ){ ++nevent; // reweight events so that the total cross section is conserved auto hepeup = reader->hepeup(); hepeup.XWGTUP *= global_reweight; const auto FO_event = to_event( hepeup, config.fixed_order_jets, config.ew_parameters, config.off_shell_tolerance ); if(FO_event.central().weight == 0) { static const bool warned_once = [argv,nevent](){ std::cerr << "WARNING: event number " << nevent << " in " << argv[2] << " has zero weight. " "Ignoring this and all further events with vanishing weight.\n"; return true; }(); (void) warned_once; // shut up compiler warnings continue; } auto resummed_events{ hej.reweight(FO_event, config.trials) }; // some bookkeeping for(auto const & s: hej.status()) ++status_counter[s]; total_trials+=hej.status().size(); ++nevent_type[FO_event.type()]; if(resummed_events.empty()) ++nfailed_type[FO_event.type()]; if(unweighter) { unweight(*unweighter, config.weight_type, resummed_events, *ran); } // analysis for(auto & ev: resummed_events){ //TODO: move pass_cuts to after phase space point generation bool passed = analyses.empty(); for(auto const & analysis: analyses){ + analysis->set_xs_scale(reader->scalefactor()); if(analysis->pass_cuts(ev, FO_event)){ passed = true; analysis->fill(ev, FO_event); }; } if(passed){ + writer.set_xs_scale(reader->scalefactor()); writer.write(ev); } else { ev.parameters()*=0; // do not use discarded events afterwards } } xs.fill_correlated(resummed_events); total_resum += resummed_events.size(); - if(progress) progress->increment(FO_event.central().weight); + if(progress) ++*progress; } // main event loop std::cout << '\n'; + // scale to account for num_trials of sherpa + xs.scale(reader->scalefactor()); + for(auto const & analysis: analyses){ analysis->finalise(); } writer.finish(); using namespace HEJ::event_type; std::cout<< "Events processed: " << nevent << " (" << total_resum << " resummed)"<< '\n'; std::cout << '\t' << name(EventType::first_type) << ": " << nevent_type[EventType::first_type] << ", failed to reconstruct " << nfailed_type[EventType::first_type] << '\n'; for(auto i=EventType::first_type+1; i<=EventType::last_type; i*=2){ std::cout << '\t' << name(static_cast(i)) << ": " << nevent_type[i] << ", failed to reconstruct " << nfailed_type[i] << '\n'; } std::cout << '\n' << xs << '\n'; std::cout << "Generation statistic: " << status_counter[HEJ::StatusCode::good] << "/" << total_trials << " trials successful.\n"; for(auto && entry: status_counter){ const double fraction = static_cast(entry.second)/total_trials; const int percent = std::round(100*fraction); std::cout << std::left << std::setw(17) << (to_string(entry.first) + ":") << " ["; for(int i = 0; i < percent/2; ++i) std::cout << '#'; for(int i = percent/2; i < 50; ++i) std::cout << ' '; std::cout << "] " < run_time = (clock::now() - start_time); std::cout << "\nFinished " << HEJ::Version::package_name() << " at " << time_to_string(clock::to_time_t(clock::now())) << "\n=> Runtime: " << run_time.count() << " sec (" << nevent/run_time.count() << " Events/sec).\n"; return EXIT_SUCCESS; } diff --git a/src/filesystem.cc b/src/filesystem.cc new file mode 100644 index 0000000..e972d7d --- /dev/null +++ b/src/filesystem.cc @@ -0,0 +1,16 @@ +#include "HEJ/filesystem.hh" + +#include +#include +#include + +namespace HEJ { + bool is_regular(char const * filename) { + struct stat buf; + const int err = stat(filename, &buf); + if(err) { + return false; + } + return S_ISREG(buf.st_mode); + } +} diff --git a/t/CMakeLists.txt b/t/CMakeLists.txt index 07003b0..c47bd7c 100644 --- a/t/CMakeLists.txt +++ b/t/CMakeLists.txt @@ -1,652 +1,707 @@ set(tst_dir "${CMAKE_CURRENT_SOURCE_DIR}") set(tst_ME_data_dir "${tst_dir}/ME_data") - # small library for common test functions add_library(hej_test SHARED hej_test.cc) target_include_directories(hej_test PUBLIC ${tst_dir}) target_link_libraries(hej_test HEJ) # test event classification # test explicit configurations add_executable(test_classify ${tst_dir}/test_classify.cc) target_compile_options(test_classify PRIVATE "-O0") # avoid compiler optimisation target_link_libraries(test_classify HEJ hej_test) add_test( NAME classify COMMAND test_classify ) # test against reference data add_executable(test_classify_ref ${tst_dir}/test_classify_ref.cc) target_link_libraries(test_classify_ref HEJ hej_test) add_test( NAME classify_ref COMMAND test_classify_ref ${tst_dir}/classify_ref ${tst_dir}/classify.lhe.gz ) add_test( NAME classify_ref_4j COMMAND test_classify_ref ${tst_dir}/classify_ref_4j ${tst_dir}/4j.lhe.gz ) add_test( NAME classify_ref_W4j COMMAND test_classify_ref ${tst_dir}/classify_ref_W4j ${tst_dir}/MGemnubar4j_unweighted.lhe.gz ) add_test( NAME t_classify_ref_Z4j COMMAND test_classify_ref ${tst_dir}/classify_ref_Z4j ${tst_dir}/Z_4j.lhe.gz ) # test for valid W decays add_executable(test_decay ${tst_dir}/test_decay.cc) target_link_libraries(test_decay HEJ hej_test) add_test( NAME valid_decay COMMAND test_decay ) # test valid jet cuts on tagging jets add_executable(test_jet_cuts ${tst_dir}/test_jet_cuts.cc) target_link_libraries(test_jet_cuts HEJ hej_test) add_test( NAME jet_cuts COMMAND test_jet_cuts ) # test phase space point add_executable(test_psp ${tst_dir}/test_psp.cc) target_link_libraries(test_psp HEJ hej_test) add_test( NAME PhaseSpace COMMAND test_psp ${tst_dir}/psp_gen.lhe.gz ) # test importing analyses file(COPY "${tst_dir}/analysis_config.yml" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}") set(test_config "${CMAKE_CURRENT_BINARY_DIR}/analysis_config.yml") get_target_property(ANALYSIS_PATH AnalysisTemplate_lib BINARY_DIR) get_target_property(ANALYSIS_LIB AnalysisTemplate_lib OUTPUT_NAME) set(ANALYSIS_PARAMETERS "") file(READ ${test_config} config) file(WRITE ${test_config} "${config} - plugin: ${ANALYSIS_PATH}/lib${ANALYSIS_LIB}.so\n ${ANALYSIS_PARAMETERS}\n") configure_file( ${tst_dir}/analysis_config_single.yml.in analysis_config_simple.yml @ONLY ) add_test( NAME analysis_simple COMMAND $ analysis_config_simple.yml ${tst_dir}/2j.lhe.gz ) get_target_property(ANALYSIS_PATH AnalysisPrint_lib BINARY_DIR) get_target_property(ANALYSIS_LIB AnalysisPrint_lib OUTPUT_NAME) set(ANALYSIS_PARAMETERS " output: ana_output") file(READ ${test_config} config) file(WRITE ${test_config} "${config} - plugin: ${ANALYSIS_PATH}/lib${ANALYSIS_LIB}.so\n ${ANALYSIS_PARAMETERS}\n") configure_file( ${tst_dir}/analysis_config_single.yml.in analysis_config_print.yml @ONLY ) add_test( NAME analysis_print COMMAND $ analysis_config_print.yml ${tst_dir}/2j.lhe.gz ) if(ROOT_FOUND) get_target_property(ANALYSIS_PATH AnalysisROOT_lib BINARY_DIR) get_target_property(ANALYSIS_LIB AnalysisROOT_lib OUTPUT_NAME) set(ANALYSIS_PARAMETERS "") file(READ ${test_config} config) file(WRITE ${test_config} "${config} - plugin: ${ANALYSIS_PATH}/lib${ANALYSIS_LIB}.so\n ${ANALYSIS_PARAMETERS}\n") configure_file( ${tst_dir}/analysis_config_single.yml.in analysis_config_root.yml @ONLY ) add_test( NAME analysis_root COMMAND $ analysis_config_root.yml ${tst_dir}/2j.lhe.gz ) endif() if(RIVET_FOUND) file(READ ${test_config} config) file(WRITE ${test_config} "${config} - rivet: MC_XS\n output: ana_rivet\n") endif() add_test( NAME analysis_all COMMAND $ ${test_config} ${tst_dir}/2j.lhe.gz ) # test importing scales (from examples/softestptScale) add_executable(test_scale_import ${tst_dir}/test_scale_import.cc) target_link_libraries(test_scale_import HEJ) get_target_property(SCALE_PATH softestptScale_lib BINARY_DIR) get_target_property(SCALE_LIB softestptScale_lib OUTPUT_NAME) set(SCALE_NAME "softest_jet_pt") configure_file( ${tst_dir}/jet_config_with_import.yml.in jet_config_with_import.yml @ONLY ) add_test( NAME scale_import COMMAND test_scale_import jet_config_with_import.yml ) # test scale arithmetic (e.g. 2*H_T/4) add_executable(test_scale_arithmetics ${tst_dir}/test_scale_arithmetics.cc) target_link_libraries(test_scale_arithmetics HEJ hej_test) add_test( NAME scale_arithmetics COMMAND test_scale_arithmetics ${tst_dir}/jet_config.yml ${tst_dir}/2j.lhe.gz ) # test "ParameterDescription" add_executable(test_descriptions ${tst_dir}/test_descriptions.cc) target_link_libraries(test_descriptions HEJ hej_test) add_test( NAME descriptions COMMAND test_descriptions ) # test "EventParameters*Weight" add_executable(test_parameters ${tst_dir}/test_parameters.cc) target_link_libraries(test_parameters HEJ hej_test) add_test( NAME parameters COMMAND test_parameters ) # test Particle IDs add_executable(test_pid ${tst_dir}/test_pid.cc) target_link_libraries(test_pid HEJ hej_test) add_test( NAME pid COMMAND test_pid ) # test EW constants add_executable(test_EWConstants ${tst_dir}/test_EWConstants.cc) target_link_libraries(test_EWConstants HEJ hej_test) add_test( NAME EWConstants COMMAND test_EWConstants ) # test unweighting add_executable(test_unweighter ${tst_dir}/test_unweighter.cc) target_link_libraries(test_unweighter HEJ hej_test) add_test( NAME unweighter COMMAND test_unweighter ${tst_dir}/4j.lhe.gz ) # test colour generation add_executable(test_colours ${tst_dir}/test_colours.cc) target_link_libraries(test_colours HEJ hej_test) add_test( NAME colour_flow COMMAND test_colours ) add_executable(test_colours2 ${tst_dir}/test_colours2.cc) target_link_libraries(test_colours2 HEJ hej_test) add_test( NAME colour_flow2 COMMAND test_colours2 ) # test matrix elements add_executable(test_ME_generic ${tst_dir}/test_ME_generic.cc) target_link_libraries(test_ME_generic HEJ hej_test) add_test( NAME ME_j COMMAND test_ME_generic ${tst_ME_data_dir}/config_mtinf.yml ${tst_ME_data_dir}/ME_jets_tree.dat ${tst_ME_data_dir}/PSP_jets.lhe.gz ) add_test( NAME ME_j_virt COMMAND test_ME_generic ${tst_ME_data_dir}/config_mtinf.yml ${tst_ME_data_dir}/ME_jets_tree_virt.dat ${tst_ME_data_dir}/PSP_jets.lhe.gz ) add_test( NAME ME_h COMMAND test_ME_generic ${tst_ME_data_dir}/config_mtinf.yml ${tst_ME_data_dir}/ME_h_mtinf_tree.dat ${tst_ME_data_dir}/PSP_h.lhe.gz ) add_test( NAME ME_h_subl COMMAND test_ME_generic ${tst_ME_data_dir}/config_mtinf.yml ${tst_ME_data_dir}/ME_h_subl_mtinf_tree.dat ${tst_ME_data_dir}/PSP_h_subl.lhe.gz ) add_test( NAME ME_hj COMMAND test_ME_generic ${tst_ME_data_dir}/config_mtinf.yml ${tst_ME_data_dir}/ME_hj_mtinf_tree.dat ${tst_ME_data_dir}/PSP_hj.lhe.gz ) add_test( NAME ME_h_virt COMMAND test_ME_generic ${tst_ME_data_dir}/config_mtinf.yml ${tst_ME_data_dir}/ME_h_mtinf_virt.dat ${tst_ME_data_dir}/PSP_h.lhe.gz ) add_test( NAME ME_hj_virt COMMAND test_ME_generic ${tst_ME_data_dir}/config_mtinf.yml ${tst_ME_data_dir}/ME_hj_mtinf_virt.dat ${tst_ME_data_dir}/PSP_hj.lhe.gz ) if(QCDloop_FOUND) add_test( NAME ME_h_mt COMMAND test_ME_generic ${tst_ME_data_dir}/config_mt.yml ${tst_ME_data_dir}/ME_h_mt_tree.dat ${tst_ME_data_dir}/PSP_h.lhe.gz ) add_test( NAME ME_h_mtmb COMMAND test_ME_generic ${tst_ME_data_dir}/config_mtmb.yml ${tst_ME_data_dir}/ME_h_mtmb_tree.dat ${tst_ME_data_dir}/PSP_h.lhe.gz ) add_test( NAME ME_h_subl_mtmb COMMAND test_ME_generic ${tst_ME_data_dir}/config_mtmb.yml ${tst_ME_data_dir}/ME_h_subl_mtmb_tree.dat ${tst_ME_data_dir}/PSP_h_subl.lhe.gz ) add_test( NAME ME_hj_mt COMMAND test_ME_generic ${tst_ME_data_dir}/config_mt.yml ${tst_ME_data_dir}/ME_hj_mt_tree.dat ${tst_ME_data_dir}/PSP_hj.lhe.gz ) add_test( NAME ME_hj_mtmb COMMAND test_ME_generic ${tst_ME_data_dir}/config_mtmb.yml ${tst_ME_data_dir}/ME_hj_mtmb_tree.dat ${tst_ME_data_dir}/PSP_hj.lhe.gz ) endif() add_test( NAME ME_j_subl COMMAND test_ME_generic ${tst_ME_data_dir}/config_pure.yml ${tst_ME_data_dir}/ME_jets_subl_tree.dat ${tst_ME_data_dir}/PSP_jets_subl.lhe.gz ) add_test( NAME ME_j_subl_virt COMMAND test_ME_generic ${tst_ME_data_dir}/config_pure.yml ${tst_ME_data_dir}/ME_jets_subl_tree_virt.dat ${tst_ME_data_dir}/PSP_jets_subl.lhe.gz ) add_test( NAME ME_j_subl_4j COMMAND test_ME_generic ${tst_ME_data_dir}/config_pure.yml ${tst_ME_data_dir}/ME_jets_subl_tree_new.dat ${tst_dir}/4j.lhe.gz ) add_test( NAME ME_j_subl_4j_virt COMMAND test_ME_generic ${tst_ME_data_dir}/config_pure.yml ${tst_ME_data_dir}/ME_jets_subl_tree_new_virt.dat ${tst_dir}/4j.lhe.gz ) add_test( NAME ME_w_FKL COMMAND test_ME_generic ${tst_ME_data_dir}/config_w_ME.yml ${tst_ME_data_dir}/ME_w_FKL_tree.dat ${tst_ME_data_dir}/PSP_w_FKL.lhe.gz ) add_test( NAME ME_w_FKL_virt COMMAND test_ME_generic ${tst_ME_data_dir}/config_w_ME.yml ${tst_ME_data_dir}/ME_w_FKL_virt.dat ${tst_ME_data_dir}/PSP_w_FKL.lhe.gz ) add_test( NAME ME_Wp COMMAND test_ME_generic ${tst_ME_data_dir}/config_w_ME.yml ${tst_ME_data_dir}/ME_Wp.dat ${tst_ME_data_dir}/PSP_Wp.lhe.gz ) add_test( NAME ME_Wp_virt COMMAND test_ME_generic ${tst_ME_data_dir}/config_w_ME.yml ${tst_ME_data_dir}/ME_Wp_virt.dat ${tst_ME_data_dir}/PSP_Wp.lhe.gz ) add_test( NAME ME_Wm COMMAND test_ME_generic ${tst_ME_data_dir}/config_w_ME.yml ${tst_ME_data_dir}/ME_Wm.dat ${tst_ME_data_dir}/PSP_Wm.lhe.gz ) add_test( NAME ME_Wm_virt COMMAND test_ME_generic ${tst_ME_data_dir}/config_w_ME.yml ${tst_ME_data_dir}/ME_Wm_virt.dat ${tst_ME_data_dir}/PSP_Wm.lhe.gz ) add_test( NAME ME_Z COMMAND test_ME_generic ${tst_ME_data_dir}/config_Z_ME.yml ${tst_ME_data_dir}/ME_Z.dat ${tst_ME_data_dir}/PSP_Z.lhe.gz ) add_test( NAME ME_Z_virt COMMAND test_ME_generic ${tst_ME_data_dir}/config_Z_ME.yml ${tst_ME_data_dir}/ME_Z_virt.dat ${tst_ME_data_dir}/PSP_Z.lhe.gz ) add_test( NAME ME_Z_uno COMMAND test_ME_generic ${tst_ME_data_dir}/config_Z_ME.yml ${tst_ME_data_dir}/ME_Z_uno_tree.dat ${tst_ME_data_dir}/PSP_Z_uno.lhe.gz ) add_test( NAME ME_Z_uno_virt COMMAND test_ME_generic ${tst_ME_data_dir}/config_Z_ME.yml ${tst_ME_data_dir}/ME_Z_uno_virt.dat ${tst_ME_data_dir}/PSP_Z_uno.lhe.gz ) add_test( NAME ME_WpWp COMMAND test_ME_generic ${tst_ME_data_dir}/config_ww_ME.yml ${tst_ME_data_dir}/ME_WpWp.dat ${tst_ME_data_dir}/PSP_WpWp.lhe.gz ) add_test( NAME ME_WpWp_virt COMMAND test_ME_generic ${tst_ME_data_dir}/config_ww_ME.yml ${tst_ME_data_dir}/ME_WpWp_virt.dat ${tst_ME_data_dir}/PSP_WpWp.lhe.gz ) add_test( NAME ME_WmWm COMMAND test_ME_generic ${tst_ME_data_dir}/config_ww_ME.yml ${tst_ME_data_dir}/ME_WmWm.dat ${tst_ME_data_dir}/PSP_WmWm.lhe.gz ) add_test( NAME ME_WmWm_virt COMMAND test_ME_generic ${tst_ME_data_dir}/config_ww_ME.yml ${tst_ME_data_dir}/ME_WmWm_virt.dat ${tst_ME_data_dir}/PSP_WmWm.lhe.gz ) # HEJ@NLO Virtuals add_test( NAME ME_WmWm_hejnlo COMMAND test_ME_generic ${tst_ME_data_dir}/config_ww_ME_hejnlo.yml ${tst_ME_data_dir}/ME_WmWm_virt_hejnlo.dat ${tst_ME_data_dir}/PSP_WmWm.lhe.gz ) add_test( NAME ME_WpWp_hejnlo COMMAND test_ME_generic ${tst_ME_data_dir}/config_ww_ME_hejnlo.yml ${tst_ME_data_dir}/ME_WpWp_virt_hejnlo.dat ${tst_ME_data_dir}/PSP_WpWp.lhe.gz ) add_test( NAME ME_Wp_hejnlo COMMAND test_ME_generic ${tst_ME_data_dir}/config_wzhpure_ME_hejnlo.yml ${tst_ME_data_dir}/ME_Wp_virt_hejnlo.dat ${tst_ME_data_dir}/PSP_Wp.lhe.gz ) add_test( NAME ME_Wm_hejnlo COMMAND test_ME_generic ${tst_ME_data_dir}/config_wzhpure_ME_hejnlo.yml ${tst_ME_data_dir}/ME_Wm_virt_hejnlo.dat ${tst_ME_data_dir}/PSP_Wm.lhe.gz ) add_test( NAME ME_Z_hejnlo COMMAND test_ME_generic ${tst_ME_data_dir}/config_wzhpure_ME_hejnlo.yml ${tst_ME_data_dir}/ME_Z_virt_hejnlo.dat ${tst_ME_data_dir}/PSP_Z.lhe.gz ) add_test( NAME ME_jets_hejnlo COMMAND test_ME_generic ${tst_ME_data_dir}/config_wzhpure_ME_hejnlo.yml ${tst_ME_data_dir}/ME_jets_virt_hejnlo.dat ${tst_ME_data_dir}/PSP_jets.lhe.gz ) add_test( NAME ME_h_hejnlo COMMAND test_ME_generic ${tst_ME_data_dir}/config_wzhpure_ME_hejnlo.yml ${tst_ME_data_dir}/ME_h_virt_hejnlo.dat ${tst_ME_data_dir}/PSP_h.lhe.gz ) add_test( NAME ME_j_log_corr COMMAND test_ME_generic ${tst_ME_data_dir}/config_log_corr.yml ${tst_ME_data_dir}/ME_jets_log_corr.dat ${tst_ME_data_dir}/PSP_jets.lhe.gz ) add_test( NAME ME_j_lambda COMMAND test_ME_generic ${tst_ME_data_dir}/config_lambda.yml ${tst_ME_data_dir}/ME_jets_lambda.dat ${tst_ME_data_dir}/PSP_jets.lhe.gz ) # test main executable file(COPY "${tst_dir}/jet_config.yml" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}") set(test_config "${CMAKE_CURRENT_BINARY_DIR}/jet_config.yml") if(HighFive_FOUND) file(READ ${test_config} config) file(WRITE ${test_config} "${config} - tst.hdf5\n") endif() if(HepMC3_FOUND) file(READ ${test_config} config) file(WRITE ${test_config} "${config} - tst.hepmc\n") endif() if(HepMC_FOUND) file(READ ${test_config} config) file(WRITE ${test_config} "${config} - tst.hepmc2\n") endif() if(rivet_FOUND) file(READ ${test_config} config) file(WRITE ${test_config} "${config}\nanalysis:\n rivet: MC_XS\n output: tst\n") endif() set(test_cmd_main "$\\\;${test_config}\\\;${tst_dir}/2j.lhe.gz") # check that HepMC3 output is correct if(HepMC3_FOUND) add_executable(check_hepmc ${tst_dir}/check_hepmc.cc) target_link_libraries(check_hepmc HEJ ${HEPMC3_LIBRARIES}) target_include_directories(check_hepmc PRIVATE ${HEPMC3_INCLUDE_DIR}) set(test_cmd_hepmc "$\\\;tst.hepmc") else() set(test_cmd_hepmc "") endif() +if(HepMC_FOUND) + add_executable(check_hepmc2 ${tst_dir}/check_hepmc2.cc) + target_link_libraries(check_hepmc2 HEJ ${HepMC_LIBRARY}) + target_include_directories(check_hepmc2 PRIVATE ${HepMC_INCLUDE_DIR}) +endif() +if(HighFive_FOUND) + add_executable(check_highfive ${tst_dir}/check_highfive.cc) + target_link_libraries(check_highfive HEJ HighFive) +endif() # check that LHEF output is correct add_executable(check_lhe ${tst_dir}/check_lhe.cc) target_link_libraries(check_lhe HEJ hej_test) set(test_cmd_lhe "$\\\;tst.lhe") # check that rivet interface is consistent with naive rivet if(rivet_FOUND) # this assumes "rivet" and "yodadiff" are found in PATH if(rivet_USE_HEPMC3) set(hepmc_file "tst.hepmc") else() set(hepmc_file "tst.hepmc2") endif() if(rivet_USE_HEPMC3 OR (rivet_VERSION VERSION_LESS 3)) set(histo_exclude "") else() # rivet 3 with HepMC 2 is inconsistent in order of weights # -> interface != direct call (by permutation) # REQUIRES Yoda 1.7.5 set(histo_exclude "-M\\\;\\\\d") endif() set(test_cmd_rivet "rivet\\\;-a\\\;MC_XS\\\;${hepmc_file}\\\;-o\\\;tst_direct.yoda\ \;yodadiff\\\;${histo_exclude}\\\;tst.yoda\\\;tst_direct.yoda") else() set(test_cmd_rivet "") endif() # Run dependent tests in one command to ensure correct execution order # Note: The commands are concatenated with "\;" to escape CMake lists. # Thus arguments have to be escaped twice "\\\;". # e.g. "cmd1\;cmd2\\\;arg1\\\;arg2" is executed like "cmd1 && cmd2 arg1 arg2" add_test( NAME main COMMAND ${CMAKE_COMMAND} -DCMDS=${test_cmd_main}\;${test_cmd_hepmc}\;${test_cmd_lhe}\;${test_cmd_rivet} -P ${PROJECT_SOURCE_DIR}/cmake/run_multiple_tests.cmake ) add_test( NAME main_example COMMAND $ ${PROJECT_SOURCE_DIR}/config.yml ${tst_dir}/h_3j.lhe.gz ) # check that Sherpas LHE input can be read add_executable(check_lhe_sherpa ${tst_dir}/check_lhe_sherpa.cc) target_link_libraries(check_lhe_sherpa HEJ hej_test) add_test( NAME sherpa_reader COMMAND check_lhe_sherpa ${tst_dir}/SherpaLHE.lhe 1.62624e+08 ) # check HDF5 reader & writer if(HighFive_FOUND) add_executable(test_hdf5 ${tst_dir}/test_hdf5.cc) target_link_libraries(test_hdf5 HEJ) add_test( NAME hdf5_read COMMAND test_hdf5 ${tst_dir}/Wm9-g4-repack.hdf5 ) add_executable(test_hdf5_write ${tst_dir}/test_hdf5_write.cc) target_link_libraries(test_hdf5_write HEJ hej_test) add_test( NAME hdf5_write COMMAND test_hdf5_write ${tst_dir}/Wm9-g4-repack.hdf5 output.hdf5 ) endif() # check rivet interface if(RIVET_FOUND) add_executable(check_rivet ${tst_dir}/check_rivet.cc) target_link_libraries(check_rivet HEJ rivet::rivet) add_test( NAME rivet COMMAND check_rivet ) + find_program (BASH_PROGRAM bash) + if (BASH_PROGRAM) + add_test( + NAME fifo_test + COMMAND ${BASH_PROGRAM} ${tst_dir}/fifotest.sh ${tst_dir}/2j.lhe.gz $) + endif (BASH_PROGRAM) endif() # test boson reconstruction add_executable(cmp_events ${tst_dir}/cmp_events.cc) target_link_libraries(cmp_events HEJ) add_test( NAME reconstruct_W COMMAND cmp_events ${tst_dir}/epnu2jLOFKL_unweight.lhe.tar.gz ${tst_dir}/epnu2jLOFKL_unweight_noW.lhe.gz ) # test repairing slightly off-shell momenta add_executable(repair_off-shell ${tst_dir}/repair_off-shell.cc) target_link_libraries(repair_off-shell HEJ) add_test( NAME repair_off-shell COMMAND repair_off-shell ${tst_dir}/off-shell.lhe.gz ) # test resummed result add_executable(check_res ${tst_dir}/check_res.cc) target_link_libraries(check_res HEJ hej_test) # Setup yaml files for check res tests set(CENTRALSCALE 91.188) set(LOWPT_OPTION true) configure_file( ${tst_dir}/config_check_res.yml.in config_lowpt.yml @ONLY ) set(LOWPT_OPTION false) configure_file( ${tst_dir}/config_check_res.yml.in config_default.yml @ONLY ) set(CENTRALSCALE 125.) configure_file( ${tst_dir}/config_check_res.yml.in config_higgs.yml @ONLY ) - - - - +# Check first event of HEJ output +set(xstest_cmd_lhef "$\\\;${test_config}\\\;${tst_dir}/h_1j.lhe.gz") +set(xstest_cmd_lhef_check "$\\\;tst.lhe\\\;28024.1") +add_test( + NAME test_lhe_xs + COMMAND ${CMAKE_COMMAND} -DCMDS=${xstest_cmd_lhef}\;${xstest_cmd_lhef_check} + -P ${PROJECT_SOURCE_DIR}/cmake/run_multiple_tests.cmake +) +set_tests_properties(test_lhe_xs PROPERTIES DEPENDS "main") +if(HepMC3_FOUND) + set(xstest_cmd_hepmc "$\\\;${test_config}\\\;${tst_dir}/h_1j.lhe.gz") + set(xstest_cmd_hepmc_check "$\\\;tst.hepmc\\\;28024.1") + add_test( + NAME test_hepmc_xs + COMMAND ${CMAKE_COMMAND} -DCMDS=${xstest_cmd_hepmc}\;${xstest_cmd_hepmc_check} + -P ${PROJECT_SOURCE_DIR}/cmake/run_multiple_tests.cmake + ) + set_tests_properties(test_hepmc_xs PROPERTIES DEPENDS "main") + set_tests_properties(test_hepmc_xs PROPERTIES RESOURCE_LOCK xstests) +endif() +if(HepMC_FOUND) + set(xstest_cmd_hepmc2 "$\\\;${test_config}\\\;${tst_dir}/h_1j.lhe.gz") + set(xstest_cmd_hepmc2_check "$\\\;tst.hepmc2\\\;28024.1") + add_test( + NAME test_hepmc2_xs + COMMAND ${CMAKE_COMMAND} -DCMDS=${xstest_cmd_hepmc2}\;${xstest_cmd_hepmc2_check} + -P ${PROJECT_SOURCE_DIR}/cmake/run_multiple_tests.cmake + ) + set_tests_properties(test_hepmc2_xs PROPERTIES DEPENDS "main") + set_tests_properties(test_hepmc2_xs PROPERTIES RESOURCE_LOCK xstests) +endif() +if(HighFive_FOUND) + set(xstest_cmd_highfive "$\\\;${test_config}\\\;${tst_dir}/h_1j.lhe.gz") + set(xstest_cmd_highfive_check "$\\\;tst.hdf5\\\;28024.1") + add_test( + NAME xstest_highfive + COMMAND ${CMAKE_COMMAND} -DCMDS=${xstest_cmd_highfive}\;${xstest_cmd_highfive_check} + -P ${PROJECT_SOURCE_DIR}/cmake/run_multiple_tests.cmake + ) + set_tests_properties(xstest_highfive PROPERTIES DEPENDS "main;test_lhe_xs;") + set_tests_properties(xstest_highfive PROPERTIES RESOURCE_LOCK xstests) +endif() +set_tests_properties(main test_lhe_xs + PROPERTIES RESOURCE_LOCK xstests +) add_test( NAME xs_h_1j COMMAND check_res config_higgs.yml ${tst_dir}/h_1j.lhe.gz 4.97087 0.0302519 ) add_test( NAME xs_2j COMMAND check_res config_default.yml ${tst_dir}/2j.lhe.gz 3.49391e+07 419684 ) add_test( NAME xs_3j COMMAND check_res config_default.yml ${tst_dir}/3j.lhe.gz 2.37902e+06 25746.6 ) add_test( NAME xs_3j_unof COMMAND check_res config_default.yml ${tst_dir}/3j.lhe.gz 133399 4688.83 unof ) add_test( NAME xs_3j_unob COMMAND check_res config_default.yml ${tst_dir}/3j.lhe.gz 105247 3449.45 unob ) add_test( NAME xs_3j_splitf COMMAND check_res config_default.yml ${tst_dir}/3j.lhe.gz 97659.9 2748.65 splitf ) add_test( NAME xs_3j_splitb COMMAND check_res config_default.yml ${tst_dir}/3j.lhe.gz 107150 2799.8 splitb ) add_test( NAME xs_4j COMMAND check_res config_default.yml ${tst_dir}/4j.lhe.gz 603713 72822.6 ) add_test( NAME xs_4j_qqbar_mid COMMAND check_res config_default.yml ${tst_dir}/4j.lhe.gz 21866.7 1716.96 qqbar_mid ) add_test( NAME xs_h_3j COMMAND check_res config_higgs.yml ${tst_dir}/h_3j.lhe.gz 0.667678 0.0126859 ) add_test( NAME xs_h_3j_unof COMMAND check_res config_higgs.yml ${tst_dir}/h_3j_uno.lhe.gz 0.0110041 0.000274214 unof ) add_test( NAME xs_h_3j_unob COMMAND check_res config_higgs.yml ${tst_dir}/h_3j_uno.lhe.gz 0.0116516 0.000376737 unob ) add_test( NAME xs_epnu_3j COMMAND check_res config_default.yml ${tst_dir}/MGepnu3j_unweighted.lhe.gz 84394.7 3770.35 ) add_test( NAME xs_emnubar_3j COMMAND check_res config_default.yml ${tst_dir}/MGemnubar3j_unweighted.lhe.gz 53489.1 977.234 ) add_test( NAME xs_epnu_4j COMMAND check_res config_default.yml ${tst_dir}/MGepnu4j_unweighted.lhe.gz 19616.2 774.336 ) add_test( NAME xs_emnubar_4j COMMAND check_res config_default.yml ${tst_dir}/MGemnubar4j_unweighted.lhe.gz 10169.6 248.741 ) add_test( NAME xs_2j_lowpt COMMAND check_res config_lowpt.yml ${tst_dir}/2j.lhe.gz 45890.2 5325.62 ) if(TEST_ALL) # deactivate long tests by default add_test( NAME xs_h_1j_long COMMAND check_res config_higgs.yml ${tst_dir}/h_1j_large.lhe.gz 4.70112 0.00415353 ) add_test( NAME xs_epnu_2j COMMAND check_res config_default.yml ${tst_dir}/epnu2jLOFKL_unweight.lhe.tar.gz 262.7 3 ) add_test( NAME xs_epnu_3j_long COMMAND check_res config_default.yml ${tst_dir}/MGepnu3j_unweighted_large.lhe.gz 38.9512 1 ) add_test( NAME xs_emnubar_3j_long COMMAND check_res config_default.yml ${tst_dir}/MGemnubar3j_unweighted_large.lhe.gz 24.1575 1 ) add_test( NAME xs_epnu_3j_unof COMMAND check_res config_default.yml ${tst_dir}/MGepnu3j_unweighted_large.lhe.gz 9.63702 0.128355 unof ) add_test( NAME xs_epnu_3j_unob COMMAND check_res config_default.yml ${tst_dir}/MGepnu3j_unweighted_large.lhe.gz 9.70119 0.108436 unob ) add_test( NAME xs_epnu_3j_splitf COMMAND check_res config_default.yml ${tst_dir}/MGepnu3j_unweighted_large.lhe.gz 2.91995 0.0463182 splitf ) add_test( NAME xs_epnu_3j_splitb COMMAND check_res config_default.yml ${tst_dir}/MGepnu3j_unweighted_large.lhe.gz 3.40708 0.0550975 splitb ) add_test( NAME xs_epnu_4j_long COMMAND check_res config_default.yml ${tst_dir}/MGepnu4j_unweighted_large.lhe.gz 10.2542 0.135106 ) add_test( NAME xs_emnubar_4j_long COMMAND check_res config_default.yml ${tst_dir}/MGemnubar4j_unweighted_large.lhe.gz 5.73282 0.0583738 ) add_test( NAME xs_epnu_4j_qqbar_mid COMMAND check_res config_default.yml ${tst_dir}/MGepnu4j_unweighted_large.lhe.gz 0.802603 0.0126908 qqbar_mid ) endif() diff --git a/t/check_hepmc.cc b/t/check_hepmc.cc index 367815a..3e60a35 100644 --- a/t/check_hepmc.cc +++ b/t/check_hepmc.cc @@ -1,28 +1,44 @@ /** * \authors The HEJ collaboration (see AUTHORS for details) - * \date 2019-2020 + * \date 2019-2023 * \copyright GPLv2 or later */ #include #include #include "HepMC3/GenEvent.h" #include "HepMC3/ReaderAscii.h" +#include "HepMC3/Print.h" #include "HEJ/exceptions.hh" int main(int argn, char** argv) { - if(argn != 2){ - std::cerr << "Usage: check_hepmc hepmc_file\n"; + constexpr double EP = 1e-3; + + if(argn != 2 && argn != 3){ + std::cerr << "Usage: check_hepmc hepmc_file [XS test value]\n"; return EXIT_FAILURE; } HepMC3::ReaderAscii input{argv[1]}; if(input.failed()) throw std::runtime_error{"failed to open HepMC file"}; + HepMC3::GenEvent ev{}; + std::shared_ptr cs; while(true){ - HepMC3::GenEvent ev{}; if ( !input.read_event(ev) || ev.event_number() == 0 ) break; if(input.failed()) throw std::runtime_error{"failed to read HepMC event"}; + cs = ev.attribute("GenCrossSection"); + } + const double xs = cs->xsec(); + + if (argn == 3){ + const double xstest = std::stod(argv[2]); + if(std::abs(xs-xstest) > EP*xs){ + std::cerr << "Total XS deviates substantially from ref value: " + << xs << " vs " << xstest << "\n"; + return EXIT_FAILURE; + } } + return EXIT_SUCCESS; } diff --git a/t/check_hepmc2.cc b/t/check_hepmc2.cc new file mode 100644 index 0000000..0de5129 --- /dev/null +++ b/t/check_hepmc2.cc @@ -0,0 +1,41 @@ +/** + * \authors The HEJ collaboration (see AUTHORS for details) + * \date 2019-2023 + * \copyright GPLv2 or later + */ +#include +#include +#include + +#include + +int main(int argn, char** argv) { + constexpr double EP = 1e-3; + + if(argn != 3){ + std::cerr << "Usage: check_hepmc2 hepmc2_file XS_test_value\n"; + return EXIT_FAILURE; + } + const double xstest = std::stod(argv[2]); + double xs = 0; + + //Doing it manually as that is basically what hepmc2 does anyway! + //https://gitlab.cern.ch/hepmc/HepMC/-/blob/master/src/GenCrossSection.cc + std::ifstream istr( argv[1] ); + + std::string line; + std::vector vals; + + while (std::getline(istr, line)) { + if (line.find("C ") == std::string::npos) + continue; + boost::algorithm::split(vals, line, boost::is_any_of(" ")); + xs = std::stod(vals[1]); + } + if(std::abs(xs-xstest) > EP*xs){ + std::cerr << "Total XS deviates substantially from ref value: " + << xs << " vs " << xstest << "\n"; + return EXIT_FAILURE; + } + return EXIT_SUCCESS; +} diff --git a/t/check_highfive.cc b/t/check_highfive.cc new file mode 100644 index 0000000..9de22b2 --- /dev/null +++ b/t/check_highfive.cc @@ -0,0 +1,41 @@ +/** + * \authors The HEJ collaboration (see AUTHORS for details) + * \date 2019-2023 + * \copyright GPLv2 or later + */ +#include +#include + +#include + +#include "HEJ/exceptions.hh" + +using namespace HighFive; + +int main(int argn, char** argv) { + constexpr double EP = 1e-3; + + if(argn != 3){ + std::cerr << "Usage: check_highfive hdf5_file XS_test_value\n"; + return EXIT_FAILURE; + } + const double xstest = std::stod(argv[2]); + File file(argv[1], File::ReadOnly); + auto dataset = file.getDataSet("procInfo/xSection"); + std::vector result; + // hdf data in form + // { 0, 1, 2, 3,...} + // {fkl, uno, qqbar, non-resummable,...} + const std::vector fkl_xs_idx{0}; + dataset.select(fkl_xs_idx).read(result); + double xs = result[0]; + const std::vector nonresum_xs_idx{3}; + dataset.select(nonresum_xs_idx).read(result); + xs += result[0]; + if(std::abs(xs-xstest) > EP*xs){ + std::cerr << "Total XS deviates substantially from ref value: " + << xs << " vs " << xstest << "\n"; + return EXIT_FAILURE; + } + return EXIT_SUCCESS; +} diff --git a/t/check_lhe.cc b/t/check_lhe.cc index 1c073f4..b7a79a8 100644 --- a/t/check_lhe.cc +++ b/t/check_lhe.cc @@ -1,74 +1,86 @@ /** * \authors The HEJ collaboration (see AUTHORS for details) - * \date 2019-2020 + * \date 2019-2023 * \copyright GPLv2 or later */ #include "hej_test.hh" #include #include #include #include "LHEF/LHEF.h" #include "fastjet/JetDefinition.hh" #include "HEJ/Event.hh" #include "HEJ/event_types.hh" #include "HEJ/EventReader.hh" namespace { constexpr double EP = 1e-3; const fastjet::JetDefinition JET_DEF{fastjet::kt_algorithm, 0.4}; constexpr double MIN_JET_PT = 30; } int main(int argn, char** argv) { - if(argn != 2){ - std::cerr << "Usage: " << argv[0] << " lhe_file\n"; + if(argn != 2 && argn != 3){ + std::cerr << "Usage: " << argv[0] << " lhe_file [XS test value]\n"; return EXIT_FAILURE; } auto reader{ HEJ::make_reader(argv[1]) }; // version should be overwritten ASSERT(reader->heprup().version==LHEF::HEPRUP().version); std::unordered_map xsec_ref; for(int i=0; i < reader->heprup().NPRUP; ++i) xsec_ref[reader->heprup().LPRUP[i]] = 0.; while(reader->read_event()){ auto const & hepeup = reader->hepeup(); ASSERT(hepeup.NUP > 2); // at least 3 particles (2 in + 1 out) // first incoming has positive pz ASSERT(hepeup.PUP[0][2] > hepeup.PUP[1][2]); // test that we can trasform IDPRUP to event type (void) name(static_cast(hepeup.IDPRUP)); xsec_ref[hepeup.IDPRUP] += hepeup.weight(); // test that a HEJ event can be transformed back to the original HEPEUP auto hej_event = HEJ::Event::EventData(hepeup).cluster(JET_DEF, MIN_JET_PT); // there are two different weight infos, which should be the same ASSERT(hej_event.central().weight == hepeup.weight()); ASSERT(hej_event.central().weight == hepeup.XWGTUP); // reader->heprup() is const, we can't use it to create a hepeup auto cp_heprup = reader->heprup(); auto new_event = HEJ::to_HEPEUP(hej_event, &cp_heprup); ASSERT_PROPERTY(new_event, hepeup, weight()); ASSERT_PROPERTY(new_event, hepeup, XWGTUP); ASSERT_PROPERTY(new_event, hepeup, SCALUP); ASSERT_PROPERTY(new_event, hepeup, NUP); } - + double xstotal = 0; for(std::size_t i = 0; i < xsec_ref.size(); ++i){ + std::cout << "[+]" << xsec_ref.size() << std::endl; double const ref = xsec_ref[reader->heprup().LPRUP[i]]; double const calc = reader->heprup().XSECUP[i]; + xstotal += calc; std::cout << ref << '\t' << calc << '\n'; if(std::abs(calc-ref) > EP*calc){ std::cerr << "Cross sections deviate substantially"; return EXIT_FAILURE; } } + if (argn == 3){ + const double xstest = std::stod(argv[2]); + if(std::abs(xstotal-xstest) > EP*xstotal){ + std::cerr << "Total XS deviates substantially from ref value: " + << xstotal << " vs " << xstest << "\n"; + return EXIT_FAILURE; + } + } + + return EXIT_SUCCESS; } diff --git a/t/check_lhe_sherpa.cc b/t/check_lhe_sherpa.cc index a9b6074..7597656 100644 --- a/t/check_lhe_sherpa.cc +++ b/t/check_lhe_sherpa.cc @@ -1,58 +1,62 @@ /** * \authors The HEJ collaboration (see AUTHORS for details) * \date 2019-2020 * \copyright GPLv2 or later */ #include "hej_test.hh" #include #include #include #include #include "HEJ/EventReader.hh" #include "LHEF/LHEF.h" namespace { constexpr double EP = 1e-5; } int main(int argn, char** argv) { if(argn != 3){ std::cerr << "Usage: " << argv[0] << " lhe_file xs\n"; return EXIT_FAILURE; } auto reader{ HEJ::make_reader(argv[1])}; const double ref_xs = std::stod(argv[2]); - if(std::abs(reader->heprup().IDWTUP) != 3){ - std::cerr << "Sherpa Events should always be neutral/unweighted\n"; + std::cerr<<"IDWTUP : "<heprup().IDWTUP<heprup().IDWTUP) < 3||std::abs(reader->heprup().IDWTUP) >4){ + std::cerr << "Bang: This seems to be a PYTHIA file read with SHERPA interpreter!\n"; return EXIT_FAILURE; } // version should be overwritten ASSERT(reader->heprup().version==LHEF::HEPRUP().version); double xs { 0. }; std::size_t n_evts { 0 }; - ASSERT(std::abs(reader->heprup().XSECUP.front()-ref_xs) < EP*ref_xs); while(reader->read_event()){ ++n_evts; xs += reader->hepeup().weight(); ASSERT(reader->hepeup().weight() == reader->hepeup().XWGTUP); } + ASSERT(std::abs(reader->heprup().XSECUP.front()-ref_xs) < EP*ref_xs); + // scale to account for num_trials of sherpa + double scalefactor(reader->scalefactor()); + xs*=scalefactor; if(std::abs(xs-ref_xs) > EP*xs){ std::cerr << "Cross sections deviate substantially!\n" <<"Found "<< xs <<" but expected "<< ref_xs <<" -> "<< xs/ref_xs <<"\n"; return EXIT_FAILURE; } - if(!reader->number_events() || *(reader->number_events()) != n_evts){ + if(reader->number_events() && *(reader->number_events()) != n_evts){ std::cerr << "Number of Event not correctly set for Sherpa LHE reader\n"; return EXIT_FAILURE; } return EXIT_SUCCESS; } diff --git a/t/check_res.cc b/t/check_res.cc index 6445d36..d5e3bf2 100644 --- a/t/check_res.cc +++ b/t/check_res.cc @@ -1,151 +1,152 @@ /** * \authors The HEJ collaboration (see AUTHORS for details) * \date 2019-2020 * \copyright GPLv2 or later */ #include "hej_test.hh" #include #include #include #include #include #include #include #include #include "HEJ/Config.hh" #include "HEJ/CrossSectionAccumulator.hh" #include "HEJ/Event.hh" #include "HEJ/event_types.hh" #include "HEJ/EventReweighter.hh" #include "HEJ/EWConstants.hh" #include "HEJ/Fraction.hh" #include "HEJ/HiggsCouplingSettings.hh" #include "HEJ/make_RNG.hh" #include "HEJ/Mixmax.hh" #include "HEJ/Parameters.hh" #include "HEJ/ScaleFunction.hh" #include "HEJ/EventReader.hh" #include "HEJ/YAMLreader.hh" #include "fastjet/JetDefinition.hh" #include "LHEF/LHEF.h" namespace HEJ { struct RNG; } namespace { using EventTreatment = HEJ::EventTreatment; using namespace HEJ::event_type; HEJ::EventTreatMap TREAT{ {non_resummable, EventTreatment::discard}, {unof, EventTreatment::discard}, {unob, EventTreatment::discard}, {qqbar_exb, EventTreatment::discard}, {qqbar_exf, EventTreatment::discard}, {qqbar_mid, EventTreatment::discard}, {FKL, EventTreatment::reweight} }; bool correct_colour(HEJ::Event const & ev){ if(!HEJ::event_type::is_resummable(ev.type())) return true; return ev.is_leading_colour(); } } // namespace int main(int argn, char** argv) { if(argn == 6 && std::string(argv[5]) == "unof"){ --argn; TREAT[unof] = EventTreatment::reweight; TREAT[unob] = EventTreatment::discard; TREAT[FKL] = EventTreatment::discard; } else if(argn == 6 && std::string(argv[5]) == "unob"){ --argn; TREAT[unof] = EventTreatment::discard; TREAT[unob] = EventTreatment::reweight; TREAT[FKL] = EventTreatment::discard; } else if(argn == 6 && std::string(argv[5]) == "splitf"){ --argn; TREAT[qqbar_exb] = EventTreatment::discard; TREAT[qqbar_exf] = EventTreatment::reweight; TREAT[FKL] = EventTreatment::discard; } else if(argn == 6 && std::string(argv[5]) == "splitb"){ --argn; TREAT[qqbar_exb] = EventTreatment::reweight; TREAT[qqbar_exf] = EventTreatment::discard; TREAT[FKL] = EventTreatment::discard; } else if(argn == 6 && std::string(argv[5]) == "qqbar_mid"){ --argn; TREAT[qqbar_mid] = EventTreatment::reweight; TREAT[FKL] = EventTreatment::discard; } if(argn != 5){ std::cerr << "Usage: check_res yaml eventfile xsection tolerance [uno] \n"; return EXIT_FAILURE; } HEJ::Config config = HEJ::load_config(argv[1]); auto reader = HEJ::make_reader(argv[2]); const double xsec_ref = std::stod(argv[3]); const double tolerance = std::stod(argv[4]); reader->read_event(); HEJ::EventReweighterConfig conf = HEJ::to_EventReweighterConfig(config); conf.treat = TREAT; HEJ::MatrixElementConfig ME_conf = HEJ::to_MatrixElementConfig(config); HEJ::PhaseSpacePointConfig psp_conf = HEJ::to_PhaseSpacePointConfig(config); const fastjet::JetDefinition BORN_JET_DEF = config.fixed_order_jets.def; const double BORN_JETPTMIN = config.fixed_order_jets.min_pt; const int NUM_TRIES = config.trials; HEJ::ScaleGenerator scale_gen{ config.scales.base, config.scales.factors, config.scales.max_ratio }; std::shared_ptr ran{ HEJ::make_RNG(config.rng.name,config.rng.seed) }; HEJ::EventReweighter hej{reader->heprup(), std::move(scale_gen), conf, ran}; HEJ::CrossSectionAccumulator xs; do{ auto ev_data = HEJ::Event::EventData{reader->hepeup()}; shuffle_particles(ev_data); ev_data.reconstruct_intermediate(ME_conf.ew_parameters); HEJ::Event ev{ ev_data.cluster( BORN_JET_DEF, BORN_JETPTMIN ) }; auto resummed_events = hej.reweight(ev, NUM_TRIES); for(auto const & res_ev: resummed_events) { ASSERT(correct_colour(res_ev)); ASSERT(std::isfinite(res_ev.central().weight)); // we fill the xs uncorrelated since we only want to test the uncertainty // of the resummation xs.fill(res_ev); } } while(reader->read_event()); + xs.scale(reader->scalefactor()); const double xsec = xs.total().value; const double xsec_err = std::sqrt(xs.total().error); const double significance = std::abs(xsec - xsec_ref) / std::sqrt( xsec_err*xsec_err + tolerance*tolerance ); std::cout << xsec_ref << " +/- " << tolerance << " ~ " << xsec << " +- " << xsec_err << " => " << significance << " sigma\n"; if(significance > 3.){ std::cerr << "Cross section is off by over 3 sigma!\n"; return EXIT_FAILURE; } return EXIT_SUCCESS; } diff --git a/t/fifotest.sh b/t/fifotest.sh new file mode 100644 index 0000000..47c4c64 --- /dev/null +++ b/t/fifotest.sh @@ -0,0 +1,27 @@ +# Script for running HEJ with FIFO + +set -eu + +FIFO=input.lhe.gz + +# Copy config file and change output file name +cp analysis_config_simple.yml config_rivet.yml +sed -i "s/analysis/analyses/g" config_rivet.yml +sed -i "s/plugin:.*/ - rivet: MC_XS\n output: analysis_rivet/g" config_rivet.yml + +# run fifo +mkfifo $FIFO +cat $1 >> $FIFO & +$2 config_rivet.yml $FIFO +rm $FIFO +mv analysis_rivet.yoda analysis_fifo.yoda + +# run w/o fifo +$2 config_rivet.yml $1 +mv analysis_rivet.yoda analysis_nofifo.yoda + +# make sure fails +#sed -i "s/YODA/GROGU/g" analysis_fifo.yoda + +#compare +diff analysis_fifo.yoda analysis_nofifo.yoda