diff --git a/src/HDF5Writer.cc b/src/HDF5Writer.cc
index a7e75d2..1ff3525 100644
--- a/src/HDF5Writer.cc
+++ b/src/HDF5Writer.cc
@@ -1,383 +1,403 @@
 /**
  *  \authors   The HEJ collaboration (see AUTHORS for details)
  *  \date      2019
  *  \copyright GPLv2 or later
  */
 #include "HEJ/HDF5Writer.hh"
 
 #include <cassert>
 
 #include "LHEF/LHEF.h"
 
 #include "HEJ/ConfigFlags.hh"
 
 #ifdef HEJ_BUILD_WITH_HDF5
 
 #include <type_traits>
+#include <iterator>
 
 #include "HEJ/event_types.hh"
 #include "HEJ/Event.hh"
 
 #include "highfive/H5File.hpp"
 
 namespace HEJ{
 
   using HighFive::File;
   using HighFive::DataSpace;
 
   namespace{
     constexpr std::size_t chunk_size = 1000;
     constexpr unsigned compression_level = 3;
 
     size_t to_index(event_type::EventType const type){
       return type==0?0:floor(log2(type))+1;
     }
 
     template<typename T>
     void write_dataset(HighFive::Group & group, std::string const & name, T val) {
       using data_t = std::decay_t<T>;
       group.createDataSet<data_t>(name, DataSpace::From(val)).write(val);
     }
 
     template<typename T>
     void write_dataset(
         HighFive::Group & group, std::string const & name,
         std::vector<T> const & val
     ) {
       using data_t = std::decay_t<T>;
       group.createDataSet<data_t>(name, DataSpace::From(val)).write(val);
     }
 
     struct Cache {
       explicit Cache(size_t capacity):
         capacity{capacity}
       {
         nparticles.reserve(capacity);
         start.reserve(capacity);
         pid.reserve(capacity);
         weight.reserve(capacity);
         scale.reserve(capacity);
         fscale.reserve(capacity);
         rscale.reserve(capacity);
         aqed.reserve(capacity);
         aqcd.reserve(capacity);
         trials.reserve(capacity);
+        npLO.reserve(capacity);
+        npNLO.reserve(capacity);
       }
 
       void fill(HEJ::Event ev) {
         const auto hepeup = to_HEPEUP(ev, nullptr);
 
+        // HEJ event to get nice wrapper
+        const auto num_partons = std::distance(ev.cbegin_partons(),
+                                               ev.cend_partons());
+        assert(num_partons>0);
+        // Number of partons for LO matching, HEJ requires at least 2 partons
+        npLO.emplace_back(num_partons>1?num_partons-2:num_partons);
+        // Number of real emissions in  NLO, HEJ is LO -> -1
+        npNLO.emplace_back(-1);
+
         fill_event_params(hepeup);
         fill_event_particles(hepeup);
       }
 
       void fill_event_params(LHEF::HEPEUP const & ev) {
         nparticles.emplace_back(ev.NUP);
         start.emplace_back(particle_pos);
         pid.emplace_back(ev.IDPRUP);
         weight.emplace_back(ev.XWGTUP);
         scale.emplace_back(ev.SCALUP);
         fscale.emplace_back(ev.scales.muf);
         rscale.emplace_back(ev.scales.mur);
         aqed.emplace_back(ev.AQEDUP);
         aqcd.emplace_back(ev.AQCDUP);
         // set first trial=1 for first event
         // -> sum(trials) = 1 -> xs=sum(weights)/sum(trials) as in Sherpa
         if(particle_pos == 0){
           trials.emplace_back(1.);
         } else {
           trials.emplace_back(0.);
         }
         particle_pos += ev.NUP;
       }
 
       void fill_event_particles(LHEF::HEPEUP const & ev) {
         id.insert(end(id), begin(ev.IDUP), end(ev.IDUP));
         status.insert(end(status), begin(ev.ISTUP), end(ev.ISTUP));
         lifetime.insert(end(lifetime), begin(ev.VTIMUP), end(ev.VTIMUP));
         spin.insert(end(spin), begin(ev.SPINUP), end(ev.SPINUP));
         for(int i = 0; i < ev.NUP; ++i) {
           mother1.emplace_back(ev.MOTHUP[i].first);
           mother2.emplace_back(ev.MOTHUP[i].second);
           color1.emplace_back(ev.ICOLUP[i].first);
           color2.emplace_back(ev.ICOLUP[i].second);
           px.emplace_back(ev.PUP[i][0]);
           py.emplace_back(ev.PUP[i][1]);
           pz.emplace_back(ev.PUP[i][2]);
           e.emplace_back(ev.PUP[i][3]);
           m.emplace_back(ev.PUP[i][4]);
         }
       }
 
       bool is_full() const {
         return nparticles.size() >= capacity;
       }
 
       void clear() {
         nparticles.clear();
         start.clear();
         pid.clear();
         id.clear();
         status.clear();
         mother1.clear();
         mother2.clear();
         color1.clear();
         color2.clear();
         weight.clear();
         scale.clear();
         fscale.clear();
         rscale.clear();
         aqed.clear();
         aqcd.clear();
         trials.clear();
+        npLO.clear();
+        npNLO.clear();
         px.clear();
         py.clear();
         pz.clear();
         e.clear();
         m.clear();
         lifetime.clear();
         spin.clear();
       }
 
       size_t capacity;
       std::vector<int> nparticles, start, pid, id, status,
-        mother1, mother2, color1, color2;
+        mother1, mother2, color1, color2, npLO, npNLO;
       std::vector<double> weight, scale, fscale, rscale, aqed,
         aqcd, trials, px, py, pz, e, m, lifetime, spin;
 
     private:
       size_t particle_pos = 0;
     };
 
   }
 
   struct HDF5Writer::HDF5WriterImpl{
     File file;
     LHEF::HEPRUP heprup;
     Cache cache{chunk_size};
     size_t event_idx = 0;
     size_t particle_idx = 0;
 
     HDF5WriterImpl(std::string const & filename, LHEF::HEPRUP && hepr):
       file{filename, File::ReadWrite | File::Create | File::Truncate},
       heprup{std::move(hepr)}
     {
       // TODO: code duplication with Les Houches Writer
       const int max_number_types = to_index(event_type::last_type)+1;
       heprup.NPRUP = max_number_types;
       // ids of event types
       heprup.LPRUP.clear();
       heprup.LPRUP.reserve(max_number_types);
       heprup.LPRUP.emplace_back(0);
       for(size_t i=event_type::first_type+1; i<=event_type::last_type; i*=2) {
         heprup.LPRUP.emplace_back(i);
       }
 
       heprup.XSECUP = std::vector<double>(max_number_types, 0.);
       heprup.XERRUP = std::vector<double>(max_number_types, 0.);
       heprup.XMAXUP = std::vector<double>(max_number_types, 0.);
 
       write_init();
       create_event_group();
       create_particle_group();
     }
 
     void write_init() {
       auto init = file.createGroup("init");
 
       write_dataset(init, "PDFgroupA"        , heprup.PDFGUP.first);
       write_dataset(init, "PDFgroupB"        , heprup.PDFGUP.second);
       write_dataset(init, "PDFsetA"          , heprup.PDFSUP.first);
       write_dataset(init, "PDFsetB"          , heprup.PDFSUP.second);
       write_dataset(init, "beamA"            , heprup.IDBMUP.first);
       write_dataset(init, "beamB"            , heprup.IDBMUP.second);
       write_dataset(init, "energyA"          , heprup.EBMUP.first);
       write_dataset(init, "energyB"          , heprup.EBMUP.second);
       write_dataset(init, "numProcesses"     , heprup.NPRUP);
       write_dataset(init, "weightingStrategy", heprup.IDWTUP);
 
       auto proc_info = file.createGroup("procInfo");
       write_dataset(proc_info, "procId", heprup.LPRUP);
     }
 
     static HighFive::DataSetCreateProps const & hdf5_chunk() {
       static const auto props = [](){
         HighFive::DataSetCreateProps props;
         props.add(HighFive::Chunking({chunk_size}));
         props.add(HighFive::Deflate(compression_level));
         return props;
       }();
       return props;
     }
 
     void create_event_group() {
       static const auto dim = DataSpace({0}, {DataSpace::UNLIMITED});
       auto events = file.createGroup("event");
       events.createDataSet<int>("nparticles", dim, hdf5_chunk());
       events.createDataSet<int>("start", dim, hdf5_chunk());
       events.createDataSet<int>("pid", dim, hdf5_chunk());
       events.createDataSet<double>("weight", dim, hdf5_chunk());
       events.createDataSet<double>("scale", dim, hdf5_chunk());
       events.createDataSet<double>("fscale", dim, hdf5_chunk());
       events.createDataSet<double>("rscale", dim, hdf5_chunk());
       events.createDataSet<double>("aqed", dim, hdf5_chunk());
       events.createDataSet<double>("aqcd", dim, hdf5_chunk());
       events.createDataSet<double>("trials", dim, hdf5_chunk());
+      events.createDataSet<int>("npLO", dim, hdf5_chunk());
+      events.createDataSet<int>("npNLO", dim, hdf5_chunk());
     }
 
     void resize_event_group(size_t new_size) {
       auto events = file.getGroup("event");
       events.getDataSet("nparticles").resize({new_size});
       events.getDataSet("start").resize({new_size});
       events.getDataSet("pid").resize({new_size});
       events.getDataSet("weight").resize({new_size});
       events.getDataSet("scale").resize({new_size});
       events.getDataSet("fscale").resize({new_size});
       events.getDataSet("rscale").resize({new_size});
       events.getDataSet("aqed").resize({new_size});
       events.getDataSet("aqcd").resize({new_size});
       events.getDataSet("trials").resize({new_size});
+      events.getDataSet("npLO").resize({new_size});
+      events.getDataSet("npNLO").resize({new_size});
     }
 
     void create_particle_group() {
       static const auto dim = DataSpace({0}, {DataSpace::UNLIMITED});
       auto particles = file.createGroup("particle");
       particles.createDataSet<int>("id", dim, hdf5_chunk());
       particles.createDataSet<int>("status", dim, hdf5_chunk());
       particles.createDataSet<int>("mother1", dim, hdf5_chunk());
       particles.createDataSet<int>("mother2", dim, hdf5_chunk());
       particles.createDataSet<int>("color1", dim, hdf5_chunk());
       particles.createDataSet<int>("color2", dim, hdf5_chunk());
       particles.createDataSet<double>("px", dim, hdf5_chunk());
       particles.createDataSet<double>("py", dim, hdf5_chunk());
       particles.createDataSet<double>("pz", dim, hdf5_chunk());
       particles.createDataSet<double>("e", dim, hdf5_chunk());
       particles.createDataSet<double>("m", dim, hdf5_chunk());
       particles.createDataSet<double>("lifetime", dim, hdf5_chunk());
       particles.createDataSet<double>("spin", dim, hdf5_chunk());
     }
 
     void resize_particle_group(size_t new_size) {
       auto particles = file.getGroup("particle");
       particles.getDataSet("id").resize({new_size});
       particles.getDataSet("status").resize({new_size});
       particles.getDataSet("mother1").resize({new_size});
       particles.getDataSet("mother2").resize({new_size});
       particles.getDataSet("color1").resize({new_size});
       particles.getDataSet("color2").resize({new_size});
       particles.getDataSet("px").resize({new_size});
       particles.getDataSet("py").resize({new_size});
       particles.getDataSet("pz").resize({new_size});
       particles.getDataSet("e").resize({new_size});
       particles.getDataSet("m").resize({new_size});
       particles.getDataSet("lifetime").resize({new_size});
       particles.getDataSet("spin").resize({new_size});
     }
 
     void write(Event const & ev){
       cache.fill(ev);
       if(cache.is_full()) {
         dump_cache();
       }
       const double wt = ev.central().weight;
       const size_t idx = to_index(ev.type());
       heprup.XSECUP[idx] += wt;
       heprup.XERRUP[idx] += wt*wt;
       if(wt > heprup.XMAXUP[idx]){
         heprup.XMAXUP[idx] = wt;
       }
     }
 
     void dump_cache() {
       write_event_params();
       write_event_particles();
       cache.clear();
     }
 
     void write_event_params() {
       auto events = file.getGroup("event");
       // choose arbitrary dataset to find size
       const auto dataset = events.getDataSet("nparticles");
       const size_t size = dataset.getSpace().getDimensions().front();
       resize_event_group(size + cache.nparticles.size());
 
 #define WRITE_FROM_CACHE(GROUP, PROPERTY)                               \
       GROUP.getDataSet(#PROPERTY).select({size}, {cache.PROPERTY.size()}).write(cache.PROPERTY)
 
       WRITE_FROM_CACHE(events, nparticles);
       WRITE_FROM_CACHE(events, start);
       WRITE_FROM_CACHE(events, pid);
       WRITE_FROM_CACHE(events, weight);
       WRITE_FROM_CACHE(events, scale);
       WRITE_FROM_CACHE(events, fscale);
       WRITE_FROM_CACHE(events, rscale);
       WRITE_FROM_CACHE(events, aqed);
       WRITE_FROM_CACHE(events, aqcd);
       WRITE_FROM_CACHE(events, trials);
+      WRITE_FROM_CACHE(events, npLO);
+      WRITE_FROM_CACHE(events, npNLO);
     }
 
     void write_event_particles() {
       auto particles = file.getGroup("particle");
       // choose arbitrary dataset to find size
       const auto dataset = particles.getDataSet("id");
       const size_t size = dataset.getSpace().getDimensions().front();
       resize_particle_group(size + cache.id.size());
       WRITE_FROM_CACHE(particles, id);
       WRITE_FROM_CACHE(particles, status);
       WRITE_FROM_CACHE(particles, lifetime);
       WRITE_FROM_CACHE(particles, spin);
       WRITE_FROM_CACHE(particles, mother1);
       WRITE_FROM_CACHE(particles, mother2);
       WRITE_FROM_CACHE(particles, color1);
       WRITE_FROM_CACHE(particles, color2);
       WRITE_FROM_CACHE(particles, px);
       WRITE_FROM_CACHE(particles, py);
       WRITE_FROM_CACHE(particles, pz);
       WRITE_FROM_CACHE(particles, e);
       WRITE_FROM_CACHE(particles, m);
     }
 #undef WRITE_FROM_CACHE
 
     ~HDF5WriterImpl(){
       dump_cache();
       auto proc_info = file.getGroup("procInfo");
       write_dataset(proc_info, "xSection", heprup.XSECUP);
       write_dataset(proc_info, "error", heprup.XERRUP);
       write_dataset(proc_info, "unitWeight", heprup.XMAXUP);
     }
 
   };
 
   HDF5Writer::HDF5Writer(std::string const & filename, LHEF::HEPRUP heprup):
     impl_{new HDF5Writer::HDF5WriterImpl{filename, std::move(heprup)}}
   {}
 
   void HDF5Writer::write(Event const & ev){
     impl_->write(ev);
   }
 }
 
 #else // no HDF5 support
 
 namespace HEJ{
 
   class HDF5Writer::HDF5WriterImpl{};
 
   HDF5Writer::HDF5Writer(std::string const &, LHEF::HEPRUP){
     throw std::invalid_argument{
       "Failed to create HDF5 writer: "
       "HEJ 2 was built without HDF5 support"
     };
   }
 
   void HDF5Writer::write(Event const &){
     assert(false);
   }
 
 }
 
 #endif
 
 namespace HEJ {
   HDF5Writer::~HDF5Writer() = default;
 }
diff --git a/t/CMakeLists.txt b/t/CMakeLists.txt
index b39b8e7..f72d945 100644
--- a/t/CMakeLists.txt
+++ b/t/CMakeLists.txt
@@ -1,453 +1,453 @@
 set(tst_dir "${CMAKE_CURRENT_SOURCE_DIR}")
 set(tst_ME_data_dir "${tst_dir}/ME_data")
 
 # small library for common test functions
 add_library(hej_test SHARED hej_test.cc)
 target_include_directories(hej_test PUBLIC ${tst_dir})
 target_link_libraries(hej_test HEJ)
 # test event classification
 # test explicit configurations
 add_executable(test_classify ${tst_dir}/test_classify.cc)
 target_compile_options(test_classify PRIVATE "-O0") # avoid compiler optimisation
 target_link_libraries(test_classify HEJ hej_test)
 add_test(
-  NAME t_classify
+  NAME classify
   COMMAND test_classify
   )
 # test against reference data
 add_executable(test_classify_ref ${tst_dir}/test_classify_ref.cc)
 target_link_libraries(test_classify_ref HEJ hej_test)
 add_test(
-  NAME t_classify_ref
+  NAME classify_ref
   COMMAND test_classify_ref ${tst_dir}/classify_ref ${tst_dir}/classify.lhe.gz
   )
 add_test(
-  NAME t_classify_ref_4j
+  NAME classify_ref_4j
   COMMAND test_classify_ref ${tst_dir}/classify_ref_4j ${tst_dir}/4j.lhe.gz
   )
 add_test(
-  NAME t_classify_ref_W4j
+  NAME classify_ref_W4j
   COMMAND test_classify_ref
           ${tst_dir}/classify_ref_W4j
           ${tst_dir}/MGemnubar4j_unweighted.lhe.gz
   )
 
 # test for valid W decays
 add_executable(test_decay ${tst_dir}/test_decay.cc)
 target_link_libraries(test_decay HEJ hej_test)
 add_test(
-  NAME t_valid_decay
+  NAME valid_decay
   COMMAND test_decay
   )
 
 # test valid jet cuts on tagging jets
 add_executable(test_jet_cuts ${tst_dir}/test_jet_cuts.cc)
 target_link_libraries(test_jet_cuts HEJ hej_test)
 add_test(
-  NAME t_jet_cuts
+  NAME jet_cuts
   COMMAND test_jet_cuts
   )
 
 # test phase space point
 add_executable(test_psp ${tst_dir}/test_psp.cc)
 target_link_libraries(test_psp HEJ hej_test)
 add_test(
-  NAME t_psp
+  NAME PhaseSpace
   COMMAND test_psp ${tst_dir}/psp_gen.lhe.gz
   )
 
 # test importing analyses
 file(COPY "${tst_dir}/analysis_config.yml" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}")
 set(test_config "${CMAKE_CURRENT_BINARY_DIR}/analysis_config.yml")
 
 get_target_property(ANALYSIS_PATH AnalysisTemplate_lib BINARY_DIR)
 get_target_property(ANALYSIS_LIB AnalysisTemplate_lib OUTPUT_NAME)
 set(ANALYSIS_PARAMETERS "")
 file(READ ${test_config} config)
 file(WRITE ${test_config} "${config}  - plugin: ${ANALYSIS_PATH}/lib${ANALYSIS_LIB}.so\n  ${ANALYSIS_PARAMETERS}\n")
 configure_file( ${tst_dir}/analysis_config_single.yml.in
                 analysis_config_simple.yml  @ONLY )
 add_test(
-  NAME t_analysis_simple
+  NAME analysis_simple
   COMMAND $<TARGET_FILE:HEJ_main>
           analysis_config_simple.yml
           ${tst_dir}/2j.lhe.gz
 )
 
 get_target_property(ANALYSIS_PATH AnalysisPrint_lib BINARY_DIR)
 get_target_property(ANALYSIS_LIB AnalysisPrint_lib OUTPUT_NAME)
 set(ANALYSIS_PARAMETERS "  output: ana_output")
 file(READ ${test_config} config)
 file(WRITE ${test_config} "${config}  - plugin: ${ANALYSIS_PATH}/lib${ANALYSIS_LIB}.so\n  ${ANALYSIS_PARAMETERS}\n")
 configure_file( ${tst_dir}/analysis_config_single.yml.in
                 analysis_config_print.yml  @ONLY )
 add_test(
-  NAME t_analysis_print
+  NAME analysis_print
   COMMAND $<TARGET_FILE:HEJ_main>
           analysis_config_print.yml
           ${tst_dir}/2j.lhe.gz
 )
 
 if(ROOT_FOUND)
   get_target_property(ANALYSIS_PATH AnalysisROOT_lib BINARY_DIR)
   get_target_property(ANALYSIS_LIB AnalysisROOT_lib OUTPUT_NAME)
   set(ANALYSIS_PARAMETERS "")
   file(READ ${test_config} config)
   file(WRITE ${test_config} "${config}  - plugin: ${ANALYSIS_PATH}/lib${ANALYSIS_LIB}.so\n  ${ANALYSIS_PARAMETERS}\n")
   configure_file( ${tst_dir}/analysis_config_single.yml.in
                   analysis_config_root.yml  @ONLY )
   add_test(
-    NAME t_analysis_root
+    NAME analysis_root
     COMMAND $<TARGET_FILE:HEJ_main>
             analysis_config_root.yml
             ${tst_dir}/2j.lhe.gz
   )
 endif()
 if(RIVET_FOUND)
   file(READ ${test_config} config)
   file(WRITE ${test_config} "${config}  - rivet: MC_XS\n    output: ana_rivet\n")
 endif()
 
 add_test(
-  NAME t_analysis_all
+  NAME analysis_all
   COMMAND $<TARGET_FILE:HEJ_main>
           ${test_config}
           ${tst_dir}/2j.lhe.gz
 )
 
 # test importing scales (from examples/softestptScale)
 add_executable(test_scale_import ${tst_dir}/test_scale_import)
 target_link_libraries(test_scale_import HEJ)
 
 get_target_property(SCALE_PATH softestptScale_lib BINARY_DIR)
 get_target_property(SCALE_LIB softestptScale_lib OUTPUT_NAME)
 set(SCALE_NAME "softest_jet_pt")
 configure_file( ${tst_dir}/jet_config_with_import.yml.in
                 jet_config_with_import.yml  @ONLY )
 
 add_test(
-  NAME t_scale_import
+  NAME scale_import
   COMMAND test_scale_import jet_config_with_import.yml
 )
 
 # test scale arithmetic (e.g. 2*H_T/4)
 add_executable(test_scale_arithmetics ${tst_dir}/test_scale_arithmetics)
 target_link_libraries(test_scale_arithmetics HEJ hej_test)
 add_test(
-  NAME t_scale_arithmetics
+  NAME scale_arithmetics
   COMMAND test_scale_arithmetics ${tst_dir}/jet_config.yml ${tst_dir}/2j.lhe.gz
   )
 
 # test "ParameterDescription"
 add_executable(test_descriptions ${tst_dir}/test_descriptions)
 target_link_libraries(test_descriptions HEJ hej_test)
 add_test(
-  NAME t_descriptions
+  NAME descriptions
   COMMAND test_descriptions
   )
 
 # test "EventParameters*Weight"
 add_executable(test_parameters ${tst_dir}/test_parameters)
 target_link_libraries(test_parameters HEJ hej_test)
 add_test(
-  NAME test_parameters
+  NAME parameters
   COMMAND test_parameters
   )
 
 # test unweighting
 add_executable(test_unweighter ${tst_dir}/test_unweighter)
 target_link_libraries(test_unweighter HEJ hej_test)
 add_test(
-  NAME test_unweighter
+  NAME unweighter
   COMMAND test_unweighter ${tst_dir}/4j.lhe.gz
   )
 
 # test colour generation
 add_executable(test_colours ${tst_dir}/test_colours)
 target_link_libraries(test_colours HEJ hej_test)
 add_test(
-  NAME t_colour_flow
+  NAME colour_flow
   COMMAND test_colours
   )
 
 # test matrix elements
 add_executable(test_ME_generic ${tst_dir}/test_ME_generic.cc)
 target_link_libraries(test_ME_generic HEJ hej_test)
 add_test(
-  NAME t_ME_j
+  NAME ME_j
   COMMAND test_ME_generic ${tst_ME_data_dir}/config_mtinf.yml
           ${tst_ME_data_dir}/ME_jets_tree.dat ${tst_ME_data_dir}/PSP_jets.lhe.gz
   )
 add_test(
-  NAME t_ME_j_virt
+  NAME ME_j_virt
   COMMAND test_ME_generic ${tst_ME_data_dir}/config_mtinf.yml
           ${tst_ME_data_dir}/ME_jets_tree_virt.dat ${tst_ME_data_dir}/PSP_jets.lhe.gz
   )
 add_test(
-  NAME t_ME_h
+  NAME ME_h
   COMMAND test_ME_generic ${tst_ME_data_dir}/config_mtinf.yml
           ${tst_ME_data_dir}/ME_h_mtinf_tree.dat ${tst_ME_data_dir}/PSP_h.lhe.gz
   )
 add_test(
-  NAME t_ME_h_virt
+  NAME ME_h_virt
   COMMAND test_ME_generic ${tst_ME_data_dir}/config_mtinf.yml
           ${tst_ME_data_dir}/ME_h_mtinf_virt.dat ${tst_ME_data_dir}/PSP_h.lhe.gz
   )
 if(QCDloop_FOUND)
   add_test(
-    NAME t_ME_h_mt
+    NAME ME_h_mt
     COMMAND test_ME_generic ${tst_ME_data_dir}/config_mt.yml
             ${tst_ME_data_dir}/ME_h_mt_tree.dat ${tst_ME_data_dir}/PSP_h.lhe.gz
     )
   add_test(
-    NAME t_ME_h_mtmb
+    NAME ME_h_mtmb
     COMMAND test_ME_generic ${tst_ME_data_dir}/config_mtmb.yml
             ${tst_ME_data_dir}/ME_h_mtmb_tree.dat ${tst_ME_data_dir}/PSP_h.lhe.gz
     )
 endif()
 add_test(
-  NAME t_ME_j_subl
+  NAME ME_j_subl
   COMMAND test_ME_generic ${tst_ME_data_dir}/config_pure.yml
           ${tst_ME_data_dir}/ME_jets_subl_tree.dat ${tst_ME_data_dir}/PSP_jets_subl.lhe.gz
   )
 add_test(
-  NAME t_ME_j_subl_virt
+  NAME ME_j_subl_virt
   COMMAND test_ME_generic ${tst_ME_data_dir}/config_pure.yml
           ${tst_ME_data_dir}/ME_jets_subl_tree_virt.dat ${tst_ME_data_dir}/PSP_jets_subl.lhe.gz
   )
 add_test(
-  NAME t_ME_j_subl_new
+  NAME ME_j_subl_new
   COMMAND test_ME_generic ${tst_ME_data_dir}/config_pure.yml
           ${tst_ME_data_dir}/ME_jets_subl_tree_new.dat ${tst_dir}/4j.lhe.gz
   )
 add_test(
-  NAME t_ME_j_subl_new_virt
+  NAME ME_j_subl_new_virt
   COMMAND test_ME_generic ${tst_ME_data_dir}/config_pure.yml
           ${tst_ME_data_dir}/ME_jets_subl_tree_new_virt.dat ${tst_dir}/4j.lhe.gz
   )
 add_test(
-  NAME t_ME_w_FKL
+  NAME ME_w_FKL
   COMMAND test_ME_generic ${tst_ME_data_dir}/config_w_ME.yml
           ${tst_ME_data_dir}/ME_w_FKL_tree.dat ${tst_ME_data_dir}/PSP_w_FKL.lhe.gz
   )
 add_test(
-  NAME t_ME_w_FKL_virt
+  NAME ME_w_FKL_virt
   COMMAND test_ME_generic ${tst_ME_data_dir}/config_w_ME.yml
           ${tst_ME_data_dir}/ME_w_FKL_virt.dat ${tst_ME_data_dir}/PSP_w_FKL.lhe.gz
   )
 add_test(
-  NAME t_ME_Wp
+  NAME ME_Wp
   COMMAND test_ME_generic ${tst_ME_data_dir}/config_w_ME.yml
           ${tst_ME_data_dir}/ME_Wp.dat ${tst_ME_data_dir}/PSP_Wp.lhe.gz
   )
 add_test(
-  NAME t_ME_Wp_virt
+  NAME ME_Wp_virt
   COMMAND test_ME_generic ${tst_ME_data_dir}/config_w_ME.yml
           ${tst_ME_data_dir}/ME_Wp_virt.dat ${tst_ME_data_dir}/PSP_Wp.lhe.gz
   )
 add_test(
-  NAME t_ME_Wm
+  NAME ME_Wm
   COMMAND test_ME_generic ${tst_ME_data_dir}/config_w_ME.yml
           ${tst_ME_data_dir}/ME_Wm.dat ${tst_ME_data_dir}/PSP_Wm.lhe.gz
   )
 add_test(
-  NAME t_ME_Wm_virt
+  NAME ME_Wm_virt
   COMMAND test_ME_generic ${tst_ME_data_dir}/config_w_ME.yml
           ${tst_ME_data_dir}/ME_Wm_virt.dat ${tst_ME_data_dir}/PSP_Wm.lhe.gz
   )
 
 # test main executable
 file(COPY "${tst_dir}/jet_config.yml" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}")
 set(test_config "${CMAKE_CURRENT_BINARY_DIR}/jet_config.yml")
 if(HighFive_FOUND)
   file(READ ${test_config} config)
   file(WRITE ${test_config} "${config}  - tst.hdf5\n")
 endif()
 if(HepMC3_FOUND)
   file(READ ${test_config} config)
   file(WRITE ${test_config} "${config}  - tst.hepmc\n")
 endif()
 if(HepMC_FOUND)
   file(READ ${test_config} config)
   file(WRITE ${test_config} "${config}  - tst.hepmc2\n")
 endif()
 if(rivet_FOUND)
   file(READ ${test_config} config)
   file(WRITE ${test_config} "${config}\nanalysis:\n  rivet: MC_XS\n  output: tst\n")
 endif()
 set(test_cmd_main "$<TARGET_FILE:HEJ_main>\\\;${test_config}\\\;${tst_dir}/2j.lhe.gz")
 # check that HepMC3 output is correct
 if(HepMC3_FOUND)
   add_executable(check_hepmc ${tst_dir}/check_hepmc.cc)
   target_link_libraries(check_hepmc HEJ ${HEPMC3_LIBRARIES})
   target_include_directories(check_hepmc PRIVATE ${HEPMC3_INCLUDE_DIR})
   set(test_cmd_hepmc "$<TARGET_FILE:check_hepmc>\\\;tst.hepmc")
 else()
   set(test_cmd_hepmc "")
 endif()
 # check that LHEF output is correct
 add_executable(check_lhe ${tst_dir}/check_lhe.cc)
 target_link_libraries(check_lhe HEJ hej_test)
 set(test_cmd_lhe "$<TARGET_FILE:check_lhe>\\\;tst.lhe")
 # check that rivet interface is consistent with naive rivet
 if(rivet_FOUND)
   # this assumes "rivet" and "yodadiff" are found in PATH
   if(rivet_USE_HEPMC3)
     set(hepmc_file "tst.hepmc")
   else()
     set(hepmc_file "tst.hepmc2")
   endif()
   if(rivet_USE_HEPMC3 OR (rivet_VERSION VERSION_LESS 3))
     set(histo_exclude "")
   else()
     # rivet 3 with HepMC 2 is inconsistent in order of weights
     # -> interface != direct call (by permutation)
     # REQUIRES Yoda 1.7.5
     set(histo_exclude "-M\\\;\\\\d")
   endif()
   set(test_cmd_rivet "rivet\\\;-a\\\;MC_XS\\\;${hepmc_file}\\\;-o\\\;tst_direct.yoda\
 \;yodadiff\\\;${histo_exclude}\\\;tst.yoda\\\;tst_direct.yoda")
 else()
   set(test_cmd_rivet "")
 endif()
 # Run dependent tests in one command to ensure correct execution order
 # Note: The commands are concatenated with "\;" to escape CMake lists.
 #       Thus arguments have to be escaped twice "\\\;".
 #       e.g. "cmd1\;cmd2\\\;arg1\\\;arg2" is executed like "cmd1 && cmd2 arg1 arg2"
 add_test(
-  NAME t_main
+  NAME main
   COMMAND ${CMAKE_COMMAND}
     -DCMDS=${test_cmd_main}\;${test_cmd_hepmc}\;${test_cmd_lhe}\;${test_cmd_rivet}
     -P ${PROJECT_SOURCE_DIR}/cmake/run_multiple_tests.cmake
   )
 # check that Sherpas LHE input can be read
 add_executable(check_lhe_sherpa ${tst_dir}/check_lhe_sherpa.cc)
 target_link_libraries(check_lhe_sherpa HEJ hej_test)
 add_test(
-  NAME t_sherpa_reader
+  NAME sherpa_reader
   COMMAND check_lhe_sherpa ${tst_dir}/SherpaLHE.lhe 1.62624e+08
   )
 # check HDF5 reader & writer
 if(HighFive_FOUND)
   add_executable(test_hdf5 ${tst_dir}/test_hdf5.cc)
   target_link_libraries(test_hdf5 HEJ)
   add_test(
-    NAME t_hdf5
+    NAME hdf5_read
     COMMAND test_hdf5 ${tst_dir}/Wm9-g4-repack.hdf5
     )
   add_executable(test_hdf5_write ${tst_dir}/test_hdf5_write.cc)
-  target_link_libraries(test_hdf5_write HEJ)
+  target_link_libraries(test_hdf5_write HEJ hej_test)
   add_test(
-    NAME t_hdf5_write
-    COMMAND test_hdf5_write ${tst_dir}/Wm9-g4-repack.hdf5
+    NAME hdf5_write
+    COMMAND test_hdf5_write ${tst_dir}/Wm9-g4-repack.hdf5 output.hdf5
     )
 endif()
 # check rivet interface
 if(RIVET_FOUND)
   add_executable(check_rivet ${tst_dir}/check_rivet.cc)
   target_link_libraries(check_rivet HEJ rivet::rivet)
   add_test(
-    NAME t_rivet
+    NAME rivet
     COMMAND check_rivet
     )
 endif()
 
 # test boson reconstruction
 add_executable(cmp_events ${tst_dir}/cmp_events.cc)
 target_link_libraries(cmp_events HEJ)
 add_test(
-  NAME t_epnu_2j_noW
+  NAME reconstruct_W
   COMMAND cmp_events ${tst_dir}/epnu2jLOFKL_unweight.lhe.tar.gz
           ${tst_dir}/epnu2jLOFKL_unweight_noW.lhe.gz
   )
 
 # test resummed result
 add_executable(check_res ${tst_dir}/check_res.cc)
 target_link_libraries(check_res HEJ hej_test)
 
 if(TEST_ALL) # deactivate long tests by default
   add_test(
-    NAME t_2j
+    NAME xs_2j
     COMMAND check_res ${tst_dir}/2j.lhe.gz 3.49391e+07 419684
     )
   add_test(
-    NAME t_3j
+    NAME xs_3j
     COMMAND check_res ${tst_dir}/3j.lhe.gz  2.37902e+06 25746.6
     )
   add_test(
-    NAME t_3j_unof
+    NAME xs_3j_unof
     COMMAND check_res ${tst_dir}/3j.lhe.gz  133399 4688.83 unof
     )
   add_test(
-    NAME t_3j_unob
+    NAME xs_3j_unob
     COMMAND check_res ${tst_dir}/3j.lhe.gz  105247 3449.45 unob
     )
   add_test(
-    NAME t_3j_splitf
+    NAME xs_3j_splitf
     COMMAND check_res ${tst_dir}/3j.lhe.gz  97659.9 2748.65 splitf
     )
   add_test(
-    NAME t_3j_splitb
+    NAME xs_3j_splitb
     COMMAND check_res ${tst_dir}/3j.lhe.gz  107150 2799.8 splitb
     )
   add_test(
-    NAME t_4j
+    NAME xs_4j
     COMMAND check_res ${tst_dir}/4j.lhe.gz  603713 72822.6
     )
   add_test(
-    NAME t_4j_qqxmid
+    NAME xs_4j_qqxmid
     COMMAND check_res ${tst_dir}/4j.lhe.gz  21866.7 1716.96 qqxmid
     )
   add_test(
-    NAME t_h_3j
+    NAME xs_h_3j
     COMMAND check_res ${tst_dir}/h_3j.lhe.gz 0.821622 0.0220334
     )
   add_test(
-    NAME t_h_3j_unof
+    NAME xs_h_3j_unof
     COMMAND check_res ${tst_dir}/h_3j_uno.lhe.gz 0.0127362 0.000271555 unof
     )
   add_test(
-    NAME t_h_3j_unob
+    NAME xs_h_3j_unob
     COMMAND check_res ${tst_dir}/h_3j_uno.lhe.gz 0.0130615 0.000224793 unob
     )
   add_test(
-    NAME t_epnu_2j
+    NAME xs_epnu_2j
     COMMAND check_res ${tst_dir}/epnu2jLOFKL_unweight.lhe.tar.gz 262.7 3
     )
   add_test(
-    NAME t_MGepnu_3j
+    NAME xs_epnu_3j
     COMMAND check_res ${tst_dir}/MGepnu3j_unweighted.lhe.gz 38.9512 1
     )
   add_test(
-    NAME t_MGemnubar_3j
+    NAME xs_emnubar_3j
     COMMAND check_res ${tst_dir}/MGemnubar3j_unweighted.lhe.gz 24.1575 1
     )
   add_test(
-    NAME t_MGepnu_3j_unof
+    NAME xs_epnu_3j_unof
     COMMAND check_res ${tst_dir}/MGepnu3j_unweighted.lhe.gz 9.63702 0.128355 unof
     )
   add_test(
-    NAME t_MGepnu_3j_unob
+    NAME xs_epnu_3j_unob
     COMMAND check_res ${tst_dir}/MGepnu3j_unweighted.lhe.gz 9.70119 0.108436 unob
     )
   add_test(
-    NAME t_MGepnu_3j_splitf
+    NAME xs_epnu_3j_splitf
     COMMAND check_res ${tst_dir}/MGepnu3j_unweighted.lhe.gz 2.91995 0.0463182 splitf
     )
   add_test(
-    NAME t_MGepnu_3j_splitb
+    NAME xs_epnu_3j_splitb
     COMMAND check_res ${tst_dir}/MGepnu3j_unweighted.lhe.gz 3.40708 0.0550975 splitb
     )
   add_test(
-    NAME t_MGepnu_4j
+    NAME xs_epnu_4j
     COMMAND check_res ${tst_dir}/MGepnu4j_unweighted.lhe.gz 10.2542 0.135106
     )
   add_test(
-    NAME t_MGemnubar_4j
+    NAME xs_emnubar_4j
     COMMAND check_res ${tst_dir}/MGemnubar4j_unweighted.lhe.gz 5.57909 0.0300496
     )
   add_test(
-    NAME t_MGepnu_4j_qqxmid
+    NAME xs_epnu_4j_qqxmid
     COMMAND check_res ${tst_dir}/MGepnu4j_unweighted.lhe.gz 0.732084 0.005 qqxmid
     )
 endif()
diff --git a/t/check_lhe.cc b/t/check_lhe.cc
index 43a637b..837fefe 100644
--- a/t/check_lhe.cc
+++ b/t/check_lhe.cc
@@ -1,64 +1,65 @@
 /**
  *  \authors   The HEJ collaboration (see AUTHORS for details)
  *  \date      2019
  *  \copyright GPLv2 or later
  */
 #include <iostream>
 #include <unordered_map>
 
 #include "HEJ/event_types.hh"
 #include "HEJ/EventReader.hh"
 
 #include "hej_test.hh"
 
 namespace {
   static constexpr double ep = 1e-3;
   const fastjet::JetDefinition jet_def{fastjet::kt_algorithm, 0.4};
   constexpr double min_jet_pt = 30;
 }
 
 
 int main(int argn, char** argv) {
   if(argn != 2){
     std::cerr << "Usage: " << argv[0] << " lhe_file\n";
     return EXIT_FAILURE;
   }
 
   auto reader{ HEJ::make_reader(argv[1]) };
 
   std::unordered_map<int, double> xsec_ref;
   for(int i=0; i < reader->heprup().NPRUP; ++i)
       xsec_ref[reader->heprup().LPRUP[i]] = 0.;
 
   while(reader->read_event()){
-    ASSERT(reader->hepeup().NUP > 2); // at least 3 particles (2 in + 1 out)
+    auto const & hepeup = reader->hepeup();
+    ASSERT(hepeup.NUP > 2); // at least 3 particles (2 in + 1 out)
     // first incoming has positive pz
-    ASSERT(reader->hepeup().PUP[0][2] > reader->hepeup().PUP[1][2]);
+    ASSERT(hepeup.PUP[0][2] > hepeup.PUP[1][2]);
     // test that we can trasform IDPRUP to event type
-    (void) name(static_cast<HEJ::event_type::EventType>(reader->hepeup().IDPRUP));
-    xsec_ref[reader->hepeup().IDPRUP] += reader->hepeup().weight();
+    (void) name(static_cast<HEJ::event_type::EventType>(hepeup.IDPRUP));
+    xsec_ref[hepeup.IDPRUP] += hepeup.weight();
     // test that a HEJ event can be transformed back to the original HEPEUP
-    auto hej_event = HEJ::Event::EventData(reader->hepeup()).cluster(jet_def, min_jet_pt);
+    auto hej_event = HEJ::Event::EventData(hepeup).cluster(jet_def, min_jet_pt);
     // there are two different weight infos, which should be the same
-    ASSERT(hej_event.central().weight == reader->hepeup().weight());
-    ASSERT(hej_event.central().weight == reader->hepeup().XWGTUP);
+    ASSERT(hej_event.central().weight == hepeup.weight());
+    ASSERT(hej_event.central().weight == hepeup.XWGTUP);
     // reader->heprup() is const, we can't use it to create a hepeup
     auto cp_heprup = reader->heprup();
     auto new_event = HEJ::to_HEPEUP(hej_event, &cp_heprup);
-    ASSERT(new_event.weight() == reader->hepeup().weight());
-    ASSERT(new_event.XWGTUP == reader->hepeup().XWGTUP);
-    ASSERT(new_event.SCALUP == reader->hepeup().SCALUP);
-    ASSERT(new_event.NUP == reader->hepeup().NUP);
+    ASSERT_PROPERTY(new_event, hepeup, weight());
+    ASSERT_PROPERTY(new_event, hepeup, XWGTUP);
+    ASSERT_PROPERTY(new_event, hepeup, SCALUP);
+    ASSERT_PROPERTY(new_event, hepeup, NUP);
   }
 
   for(size_t i = 0; i < xsec_ref.size(); ++i){
     double const ref = xsec_ref[reader->heprup().LPRUP[i]];
     double const calc = reader->heprup().XSECUP[i];
     std::cout << ref << '\t' << calc << '\n';
     if(std::abs(calc-ref) > ep*calc){
       std::cerr << "Cross sections deviate substantially";
       return EXIT_FAILURE;
     }
   }
   return EXIT_SUCCESS;
 }
diff --git a/t/hej_test.hh b/t/hej_test.hh
index dd23615..b3c83e6 100644
--- a/t/hej_test.hh
+++ b/t/hej_test.hh
@@ -1,38 +1,42 @@
 /**
  *  \authors   The HEJ collaboration (see AUTHORS for details)
  *  \date      2019
  *  \copyright GPLv2 or later
  */
 #pragma once
 
 #include "HEJ/Event.hh"
 #include "HEJ/exceptions.hh"
 
+//! throw error if condition not fulfilled
 #define ASSERT(x) if(!(x)) { \
     throw std::logic_error("Assertion '" #x "' failed."); \
   }
 
+//! throw error if prop is different between ev1 and ev2
+#define ASSERT_PROPERTY(ev1,ev2,prop) ASSERT(ev1.prop == ev2.prop)
+
 /** @brief get specific Phase Space Points for njets with boson at pos_boson
  *
  * if pos_boson = -1 (or not implemented) -> no boson
  *
  * njet==7 is special: has less jets, i.e. multiple parton in one jet,
  *                     all partons are massive (4 GeV) -> can be boson/decay
  *                     pos_boson < 0 to select process (see list for details)
  */
 HEJ::Event::EventData get_process(int njet, int pos_boson);
 
 //! select progess from string input (see also get_process)
 //!
 //! overwrite_boson to force a specific boson position, indepentent from input
 //! (useful for njet == 7)
 HEJ::Event::EventData parse_configuration(
     std::array<std::string,2> const & in, std::vector<std::string> const & out,
     int overwrite_boson = 0
 );
 
 //! shuffle particles around
 void shuffle_particles(HEJ::Event::EventData & ev);
 
 //! Decay W boson to lepton & neutrino
 std::vector<HEJ::Particle> decay_W( HEJ::Particle const & parent );
diff --git a/t/test_hdf5_write.cc b/t/test_hdf5_write.cc
index 4eb04be..1fe7ed5 100644
--- a/t/test_hdf5_write.cc
+++ b/t/test_hdf5_write.cc
@@ -1,92 +1,77 @@
 /**
  *  \authors   The HEJ collaboration (see AUTHORS for details)
  *  \date      2019
  *  \copyright GPLv2 or later
  */
 
+#include <iostream>
+#include <unistd.h>
+
 #include "HEJ/Event.hh"
 #include "HEJ/HDF5Reader.hh"
 #include "HEJ/make_writer.hh"
 #include "HEJ/utility.hh"
 
-#include <cstdio>
-#include <iostream>
-#include <unistd.h>
-
-// apparently hdf5 can't deal with the (safer) files in /proc/self/fd
-// so we use tmpnam instead and RAII to ensure it gets deleted
-class Tempfile {
-public:
-  Tempfile():
-    file_{std::tmpnam(nullptr)}
-  {}
+#include "hej_test.hh"
 
-  std::string const & name() const {
-    return file_;
-  }
-
-  ~Tempfile(){
-    unlink(file_.c_str());
-  }
-
-private:
-  std::string file_;
-};
+namespace {
+  const fastjet::JetDefinition jet_def{fastjet::JetAlgorithm::antikt_algorithm, 0.4};
+  const double min_jet_pt{20.};
+}
 
 int main(int argc, char** argv) {
-  if(argc != 2) {
-    std::cerr << "Usage: " << argv[0] << " file.hdf5\n";
+  if(argc != 3) {
+    std::cerr << "Usage: " << argv[0] << " in_file.hdf5 out_file.hdf5\n";
     return EXIT_FAILURE;
   }
 
-  Tempfile file;
   std::vector<HEJ::Event> events;
   size_t max_nevent = 15299;
 
   // this scope is needed to trigger the HEJ::HDF5Writer destructor
   // don't remove it
   {
     auto reader = HEJ::make_reader(argv[1]);
 
     auto writer = HEJ::make_format_writer(
-        HEJ::FileFormat::HDF5, file.name(), reader->heprup()
+        HEJ::FileFormat::HDF5, argv[2], reader->heprup()
     );
 
     size_t nevent = 0;
     while(reader->read_event()) {
       ++nevent;
       const auto event = HEJ::Event::EventData{reader->hepeup()}.cluster(
-        fastjet::JetDefinition{fastjet::antikt_algorithm, 0.4}, 20.
+        jet_def, min_jet_pt
       );
       events.emplace_back(event);
       writer->write(event);
       if(nevent>=max_nevent)
         break;
     }
     max_nevent = std::min(nevent,max_nevent);
   }
 
-  HEJ::HDF5Reader reader{file.name()};
+  HEJ::HDF5Reader reader{argv[2]};
   size_t nevent = 0;
   for(auto const & event: events) {
     ++nevent;
     reader.read_event();
     const auto ev = HEJ::Event::EventData{reader.hepeup()}.cluster(
-      fastjet::JetDefinition{fastjet::antikt_algorithm, 0.4}, 20.
+        jet_def, min_jet_pt
     );
+    ASSERT_PROPERTY(ev, event, incoming().size());
     for(size_t i = 0; i < ev.incoming().size(); ++i) {
-      if(!HEJ::nearby(ev.incoming()[i].p, event.incoming()[i].p)) return EXIT_FAILURE;
+      ASSERT(HEJ::nearby(ev.incoming()[i].p, event.incoming()[i].p));
     }
-    if(ev.outgoing().size() != event.outgoing().size()) return EXIT_FAILURE;
+    ASSERT_PROPERTY(ev, event, outgoing().size());
     for(size_t i = 0; i < ev.outgoing().size(); ++i) {
-      if(!HEJ::nearby(ev.outgoing()[i].p, event.outgoing()[i].p)) return EXIT_FAILURE;
+      ASSERT(HEJ::nearby(ev.outgoing()[i].p, event.outgoing()[i].p));
     }
-    if(ev.decays().size() != event.decays().size()) return EXIT_FAILURE;
-    if(ev.type() != event.type()) return EXIT_FAILURE;
-    if(ev.central().weight != ev.central().weight) return EXIT_FAILURE;
+    ASSERT_PROPERTY(ev, event, decays().size());
+    ASSERT_PROPERTY(ev, event, type());
+    ASSERT_PROPERTY(ev, event, central().weight);
   }
-  if(nevent!=max_nevent)
-    return EXIT_FAILURE;
+  ASSERT(nevent == max_nevent);
 
   return EXIT_SUCCESS;
 }