Page Menu
Home
HEPForge
Search
Configure Global Search
Log In
Files
F7879694
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Award Token
Flag For Later
Size
69 KB
Subscribers
None
View Options
diff --git a/src/Event.cc b/src/Event.cc
index e8cad36..b33c3a1 100644
--- a/src/Event.cc
+++ b/src/Event.cc
@@ -1,987 +1,987 @@
/**
* \authors The HEJ collaboration (see AUTHORS for details)
* \date 2019
* \copyright GPLv2 or later
*/
#include "HEJ/Event.hh"
#include <algorithm>
#include <assert.h>
#include <iterator>
#include <numeric>
#include <unordered_set>
#include <utility>
#include "LHEF/LHEF.h"
#include "fastjet/JetDefinition.hh"
#include "HEJ/Constants.hh"
#include "HEJ/exceptions.hh"
#include "HEJ/PDG_codes.hh"
namespace HEJ{
namespace {
constexpr int status_in = -1;
constexpr int status_decayed = 2;
constexpr int status_out = 1;
//! true if leptonic W decay
bool valid_W_decay( int const w_type, // sign of W
std::vector<Particle> const & decays
){
if(decays.size() != 2) // no 1->2 decay
return false;
const int pidsum = decays[0].type + decays[1].type;
if( std::abs(pidsum) != 1 || pidsum != w_type ) // correct charge
return false;
// leptonic decay (only check first, second follows from pidsum)
if( w_type == 1 ) // W+
return is_antilepton(decays[0]) || is_neutrino(decays[0]);
// W-
return is_lepton(decays[0]) || is_antineutrino(decays[0]);
}
/// @name helper functions to determine event type
//@{
/**
* \brief check if final state valid for HEJ
*
* check if there is at most one photon, W, H, Z in the final state
* and all the rest are quarks or gluons
*/
bool final_state_ok(Event const & ev){
std::vector<Particle> const & outgoing = ev.outgoing();
if(ev.decays().size() > 1) // at most one decay
return false;
bool has_AWZH_boson = false;
for( size_t i=0; i<outgoing.size(); ++i ){
auto const & out{ outgoing[i] };
if(is_AWZH_boson(out.type)){
// at most one boson
if(has_AWZH_boson) return false;
has_AWZH_boson = true;
// valid decay for W
if(std::abs(out.type) == ParticleID::Wp){
// exactly 1 decay of W
if( ev.decays().size() != 1 || ev.decays().cbegin()->first != i )
return false;
if( !valid_W_decay(out.type>0?+1:-1, ev.decays().cbegin()->second) )
return false;
}
}
else if(! is_parton(out.type)) return false;
}
return true;
}
/**
* returns all EventTypes implemented in HEJ
*/
size_t implemented_types(std::vector<Particle> const & bosons){
using namespace event_type;
if(bosons.empty()) return FKL | unob | unof | qqxexb | qqxexf | qqxmid;
if(bosons.size()>1) return non_resummable; // multi boson
switch (bosons[0].type) {
case ParticleID::Wp:
case ParticleID::Wm:
return FKL | unob | unof | qqxexb | qqxexf | qqxmid;
case ParticleID::h:
return FKL | unob | unof;
default:
return non_resummable;
}
}
/**
* \brief function which determines if type change is consistent with Wp emission.
* @param in incoming Particle id
* @param out outgoing Particle id
* @param qqx Current both incoming/both outgoing?
*
* \see is_Wm_Change
*/
bool is_Wp_Change(ParticleID in, ParticleID out, bool qqx){
if(!qqx && (in==-1 || in== 2 || in==-3 || in== 4)) return out== (in-1);
if( qqx && (in== 1 || in==-2 || in== 3 || in==-4)) return out==-(in+1);
return false;
}
/**
* \brief function which determines if type change is consistent with Wm emission.
* @param in incoming Particle id
* @param out outgoing Particle id
* @param qqx Current both incoming/both outgoing?
*
* Ensures that change type of quark line is possible by a flavour changing
* Wm emission. Allows checking of qqx currents also.
*/
bool is_Wm_Change(ParticleID in, ParticleID out, bool qqx){
if(!qqx && (in== 1 || in==-2 || in== 3 || in==-4)) return out== (in+1);
if( qqx && (in==-1 || in== 2 || in==-3 || in== 4)) return out==-(in-1);
return false;
}
/**
* \brief checks if particle type remains same from incoming to outgoing
* @param in incoming Particle
* @param out outgoing Particle
* @param qqx Current both incoming/outgoing?
*/
bool no_flavour_change(ParticleID in, ParticleID out, bool qqx){
const int qqxCurrent = qqx?-1:1;
if(abs(in)<=6 || in==pid::gluon) return (in==out*qqxCurrent);
else return false;
}
bool has_2_jets(Event const & event){
return event.jets().size() >= 2;
}
/**
* \brief check if we have a valid Impact factor
* @param in incoming Particle
* @param out outgoing Particle
* @param qqx Current both incoming/outgoing?
* @param qqx returns +1 if Wp, -1 if Wm, else 0
*/
bool is_valid_impact_factor(
ParticleID in, ParticleID out, bool qqx, int & W_change
){
if( no_flavour_change(in, out, qqx) ){
return true;
}
if( is_Wp_Change(in, out, qqx) ) {
W_change+=1;
return true;
}
if( is_Wm_Change(in, out, qqx) ) {
W_change-=1;
return true;
}
return false;
}
//! Returns all possible classifications from the impact factors
// the beginning points are changed s.t. after the the classification they
// point to the beginning of the (potential) FKL chain
// sets W_change: + if Wp change
// 0 if no change
// - if Wm change
// This function can be used with forward & backwards iterators
template<class OutIterator>
size_t possible_impact_factors(
ParticleID incoming_id, // incoming
OutIterator & begin_out, OutIterator const & end_out, // outgoing
int & W_change, std::vector<Particle> const & boson,
bool const backward // backward?
){
using namespace event_type;
assert(boson.size() < 2);
// keep track of all states that we don't test
size_t not_tested = qqxmid;
if(backward)
not_tested |= unof | qqxexf;
else
not_tested |= unob | qqxexb;
// Is this LL current?
if( is_valid_impact_factor(incoming_id, begin_out->type, false, W_change) ){
++begin_out;
return not_tested | FKL;
}
// or NLL current?
// -> needs two partons in two different jets
if( std::distance(begin_out, end_out)>=2
){
// Is this unordered emisson?
if( incoming_id!=pid::gluon && begin_out->type==pid::gluon ){
if( is_valid_impact_factor(
incoming_id, (begin_out+1)->type, false, W_change )
){
// veto Higgs inside uno
assert((begin_out+1)<end_out);
if( !boson.empty() && boson.front().type == ParticleID::h
){
if( (backward && boson.front().rapidity() < (begin_out+1)->rapidity())
||(!backward && boson.front().rapidity() > (begin_out+1)->rapidity()))
return non_resummable;
}
begin_out+=2;
return not_tested | (backward?unob:unof);
}
}
// Is this QQbar?
else if( incoming_id==pid::gluon ){
if( is_valid_impact_factor(
begin_out->type, (begin_out+1)->type, true, W_change )
){
// veto Higgs inside qqx
assert((begin_out+1)<end_out);
if( !boson.empty() && boson.front().type == ParticleID::h
){
if( (backward && boson.front().rapidity() < (begin_out+1)->rapidity())
||(!backward && boson.front().rapidity() > (begin_out+1)->rapidity()))
return non_resummable;
}
begin_out+=2;
return not_tested | (backward?qqxexb:qqxexf);
}
}
}
return non_resummable;
}
//! Returns all possible classifications from central emissions
// the beginning points are changed s.t. after the the classification they
// point to the end of the emission chain
// sets W_change: + if Wp change
// 0 if no change
// - if Wm change
template<class OutIterator>
size_t possible_central(
OutIterator & begin_out, OutIterator const & end_out,
int & W_change, std::vector<Particle> const & boson
){
using namespace event_type;
assert(boson.size() < 2);
// if we already passed the central chain,
// then it is not a valid all-order state
if(std::distance(begin_out, end_out) < 0) return non_resummable;
// keep track of all states that we don't test
size_t possible = unob | unof
| qqxexb | qqxexf;
// Find the first non-gluon/non-FKL
while( (begin_out->type==pid::gluon) && (begin_out<end_out) ){
++begin_out;
}
// end of chain -> FKL
if( begin_out==end_out ){
return possible | FKL;
}
// is this a qqbar-pair?
// needs two partons in two separate jets
if( is_valid_impact_factor(
begin_out->type, (begin_out+1)->type, true, W_change )
){
// veto Higgs inside qqx
if( !boson.empty() && boson.front().type == ParticleID::h
&& boson.front().rapidity() > begin_out->rapidity()
&& boson.front().rapidity() < (begin_out+1)->rapidity()
){
return non_resummable;
}
begin_out+=2;
// remaining chain should be pure gluon/FKL
for(; begin_out<end_out; ++begin_out){
if(begin_out->type != pid::gluon) return non_resummable;
}
return possible | qqxmid;
}
return non_resummable;
}
/**
* \brief Checks for all event types
* @param ev Event
* @returns Event Type
*
*/
event_type::EventType classify(Event const & ev){
using namespace event_type;
if(! has_2_jets(ev))
return no_2_jets;
// currently we can't handle multiple boson states in the ME. So they are
// considered "bad_final_state" even though the "classify" could work with
// them.
if(! final_state_ok(ev))
return bad_final_state;
// initialise variables
auto const & in = ev.incoming();
auto const & out = filter_partons(ev.outgoing());
assert(std::distance(begin(in), end(in)) == 2);
assert(out.size() >= 2);
assert(std::distance(begin(out), end(out)) >= 2);
assert(std::is_sorted(begin(out), end(out), rapidity_less{}));
auto const boson{ filter_AWZH_bosons(ev.outgoing()) };
// we only allow one boson through final_state_ok
assert(boson.size()<=1);
// keep track of potential W couplings, at the end the sum should be 0
int remaining_Wp = 0;
int remaining_Wm = 0;
if(!boson.empty() && abs(boson.front().type) == ParticleID::Wp ){
if(boson.front().type>0) ++remaining_Wp;
else ++remaining_Wm;
}
int W_change = 0;
// range for current checks
auto begin_out{out.cbegin()};
auto end_out{out.crbegin()};
size_t final_type = ~(no_2_jets | bad_final_state);
// check forward impact factor
final_type &= possible_impact_factors(
in.front().type,
begin_out, end_out.base(),
W_change, boson, true );
if( final_type == non_resummable )
return non_resummable;
if(W_change>0) remaining_Wp-=W_change;
else if(W_change<0) remaining_Wm+=W_change;
W_change = 0;
// check backward impact factor
final_type &= possible_impact_factors(
in.back().type,
end_out, std::make_reverse_iterator(begin_out),
W_change, boson, false );
if( final_type == non_resummable )
return non_resummable;
if(W_change>0) remaining_Wp-=W_change;
else if(W_change<0) remaining_Wm+=W_change;
W_change = 0;
// check central emissions
final_type &= possible_central(
begin_out, end_out.base(), W_change, boson );
if( final_type == non_resummable )
return non_resummable;
if(W_change>0) remaining_Wp-=W_change;
else if(W_change<0) remaining_Wm+=W_change;
// Check whether the right number of Ws are present
if( remaining_Wp != 0 || remaining_Wm != 0 ) return non_resummable;
// result has to be unique
if( (final_type & (final_type-1)) != 0) return non_resummable;
// check that each sub processes is implemented
// (has to be done at the end)
if( (final_type & ~implemented_types(boson)) != 0 )
return non_resummable;
return static_cast<EventType>(final_type);
}
//@}
Particle extract_particle(LHEF::HEPEUP const & hepeup, int i){
const ParticleID id = static_cast<ParticleID>(hepeup.IDUP[i]);
const fastjet::PseudoJet momentum{
hepeup.PUP[i][0], hepeup.PUP[i][1],
hepeup.PUP[i][2], hepeup.PUP[i][3]
};
if(is_parton(id))
return Particle{ id, std::move(momentum), hepeup.ICOLUP[i] };
return Particle{ id, std::move(momentum), {} };
}
bool is_decay_product(std::pair<int, int> const & mothers){
if(mothers.first == 0) return false;
return mothers.second == 0 || mothers.first == mothers.second;
}
} // namespace anonymous
Event::EventData::EventData(LHEF::HEPEUP const & hepeup){
parameters.central = EventParameters{
- hepeup.scales.mur, hepeup.scales.muf, hepeup.weight()
+ hepeup.scales.mur, hepeup.scales.muf, hepeup.XWGTUP
};
size_t in_idx = 0;
for (int i = 0; i < hepeup.NUP; ++i) {
// skip decay products
// we will add them later on, but we have to ensure that
// the decayed particle is added before
if(is_decay_product(hepeup.MOTHUP[i])) continue;
auto particle = extract_particle(hepeup, i);
// needed to identify mother particles for decay products
particle.p.set_user_index(i+1);
if(hepeup.ISTUP[i] == status_in){
if(in_idx > incoming.size()) {
throw std::invalid_argument{
"Event has too many incoming particles"
};
}
incoming[in_idx++] = std::move(particle);
}
else outgoing.emplace_back(std::move(particle));
}
// add decay products
for (int i = 0; i < hepeup.NUP; ++i) {
if(!is_decay_product(hepeup.MOTHUP[i])) continue;
const int mother_id = hepeup.MOTHUP[i].first;
const auto mother = std::find_if(
begin(outgoing), end(outgoing),
[mother_id](Particle const & particle){
return particle.p.user_index() == mother_id;
}
);
if(mother == end(outgoing)){
throw std::invalid_argument{"invalid decay product parent"};
}
const int mother_idx = std::distance(begin(outgoing), mother);
assert(mother_idx >= 0);
decays[mother_idx].emplace_back(extract_particle(hepeup, i));
}
}
Event::Event(
UnclusteredEvent const & ev,
fastjet::JetDefinition const & jet_def, double const min_jet_pt
):
Event( Event::EventData{
ev.incoming, ev.outgoing, ev.decays,
Parameters<EventParameters>{ev.central, ev.variations}
}.cluster(jet_def, min_jet_pt) )
{}
//! @TODO remove in HEJ 2.2.0
UnclusteredEvent::UnclusteredEvent(LHEF::HEPEUP const & hepeup){
Event::EventData const evData{hepeup};
incoming = evData.incoming;
outgoing = evData.outgoing;
decays = evData.decays;
central = evData.parameters.central;
variations = evData.parameters.variations;
}
void Event::EventData::sort(){
// sort particles
std::sort(
begin(incoming), end(incoming),
[](Particle o1, Particle o2){return o1.p.pz()<o2.p.pz();}
);
auto old_outgoing = std::move(outgoing);
std::vector<size_t> idx(old_outgoing.size());
std::iota(idx.begin(), idx.end(), 0);
std::sort(idx.begin(), idx.end(), [&old_outgoing](size_t i, size_t j){
return old_outgoing[i].rapidity() < old_outgoing[j].rapidity();
});
outgoing.clear();
outgoing.reserve(old_outgoing.size());
for(size_t i: idx) {
outgoing.emplace_back(std::move(old_outgoing[i]));
}
// find decays again
if(!decays.empty()){
auto old_decays = std::move(decays);
decays.clear();
for(size_t i=0; i<idx.size(); ++i) {
auto decay = old_decays.find(idx[i]);
if(decay != old_decays.end())
decays.emplace(i, std::move(decay->second));
}
assert(old_decays.size() == decays.size());
}
}
namespace {
Particle reconstruct_boson(std::vector<Particle> const & leptons) {
Particle decayed_boson;
decayed_boson.p = leptons[0].p + leptons[1].p;
const int pidsum = leptons[0].type + leptons[1].type;
if(pidsum == +1) {
assert(is_antilepton(leptons[0]));
if(is_antineutrino(leptons[0])) {
throw not_implemented{"lepton-flavour violating final state"};
}
assert(is_neutrino(leptons[1]));
// charged antilepton + neutrino means we had a W+
decayed_boson.type = pid::Wp;
}
else if(pidsum == -1) {
assert(is_antilepton(leptons[0]));
if(is_neutrino(leptons[1])) {
throw not_implemented{"lepton-flavour violating final state"};
}
assert(is_antineutrino(leptons[0]));
// charged lepton + antineutrino means we had a W-
decayed_boson.type = pid::Wm;
}
else {
throw not_implemented{
"final state with leptons "
+ name(leptons[0].type)
+ " and "
+ name(leptons[1].type)
};
}
return decayed_boson;
}
}
void Event::EventData::reconstruct_intermediate() {
const auto begin_leptons = std::partition(
begin(outgoing), end(outgoing),
[](Particle const & p) {return !is_anylepton(p);}
);
if(begin_leptons == end(outgoing)) return;
assert(is_anylepton(*begin_leptons));
std::vector<Particle> leptons(begin_leptons, end(outgoing));
outgoing.erase(begin_leptons, end(outgoing));
if(leptons.size() != 2) {
throw not_implemented{"Final states with one or more than two leptons"};
}
std::sort(
begin(leptons), end(leptons),
[](Particle const & p0, Particle const & p1) {
return p0.type < p1.type;
}
);
outgoing.emplace_back(reconstruct_boson(leptons));
decays.emplace(outgoing.size()-1, std::move(leptons));
}
Event Event::EventData::cluster(
fastjet::JetDefinition const & jet_def, double const min_jet_pt
){
sort();
Event ev{ std::move(incoming), std::move(outgoing), std::move(decays),
std::move(parameters),
jet_def, min_jet_pt
};
assert(std::is_sorted(begin(ev.outgoing_), end(ev.outgoing_),
rapidity_less{}));
ev.type_ = classify(ev);
return ev;
}
Event::Event(
std::array<Particle, 2> && incoming,
std::vector<Particle> && outgoing,
std::unordered_map<size_t, std::vector<Particle>> && decays,
Parameters<EventParameters> && parameters,
fastjet::JetDefinition const & jet_def,
double const min_jet_pt
): incoming_{std::move(incoming)},
outgoing_{std::move(outgoing)},
decays_{std::move(decays)},
parameters_{std::move(parameters)},
cs_{ to_PseudoJet( filter_partons(outgoing_) ), jet_def },
min_jet_pt_{min_jet_pt}
{
jets_ = sorted_by_rapidity(cs_.inclusive_jets(min_jet_pt_));
}
namespace {
// check that Particles have a reasonable colour
bool correct_colour(Particle const & part){
ParticleID id{ part.type };
if(!is_parton(id))
return !part.colour;
if(!part.colour)
return false;
Colour const & col{ *part.colour };
if(is_quark(id))
return col.first != 0 && col.second == 0;
if(is_antiquark(id))
return col.first == 0 && col.second != 0;
assert(id==ParticleID::gluon);
return col.first != 0 && col.second != 0 && col.first != col.second;
}
}
bool Event::is_leading_colour() const {
if( !correct_colour(incoming()[0]) || !correct_colour(incoming()[1]) )
return false;
Colour line_colour = *incoming()[0].colour;
std::swap(line_colour.first, line_colour.second);
for(auto const & part: outgoing()){
// reasonable colour
if(!correct_colour(part))
return false;
if(!is_parton(part)) // skip colour neutral particles
continue;
// if possible connect to line
if( line_colour.first == part.colour->second )
line_colour.first = part.colour->first;
else if( line_colour.second == part.colour->first )
line_colour.second = part.colour->second;
else
return false;
// no colour singlet exchange/disconnected diagram
if(line_colour.first == line_colour.second)
return false;
}
return (incoming()[1].colour->first == line_colour.first)
&& (incoming()[1].colour->second == line_colour.second);
}
namespace {
void connect_incoming(Particle & in, int & colour, int & anti_colour){
in.colour = std::make_pair(anti_colour, colour);
// gluon
if(in.type == pid::gluon)
return;
if(in.type > 0){
// quark
assert(is_quark(in));
in.colour->second = 0;
colour*=-1;
return;
}
// anti-quark
assert(is_antiquark(in));
in.colour->first = 0;
anti_colour*=-1;
return;
}
}
bool Event::generate_colours(RNG & ran){
// generate only for HEJ events
if(!event_type::is_resummable(type()))
return false;
assert(std::is_sorted(
begin(outgoing()), end(outgoing()), rapidity_less{}));
assert(incoming()[0].pz() < incoming()[1].pz());
// positive (anti-)colour -> can connect
// negative (anti-)colour -> not available/used up by (anti-)quark
int colour = COLOUR_OFFSET;
int anti_colour = colour+1;
// initialise first
connect_incoming(incoming_[0], colour, anti_colour);
for(auto & part: outgoing_){
assert(colour>0 || anti_colour>0);
if(part.type == ParticleID::gluon){
// gluon
if(colour>0 && anti_colour>0){
// on g line => connect to colour OR anti-colour (random)
if(ran.flat() < 0.5){
part.colour = std::make_pair(colour+2,colour);
colour+=2;
} else {
part.colour = std::make_pair(anti_colour, anti_colour+2);
anti_colour+=2;
}
} else if(colour > 0){
// on q line => connect to available colour
part.colour = std::make_pair(colour+2, colour);
colour+=2;
} else {
assert(colour<0 && anti_colour>0);
// on qx line => connect to available anti-colour
part.colour = std::make_pair(anti_colour, anti_colour+2);
anti_colour+=2;
}
} else if(is_quark(part)) {
// quark
assert(anti_colour>0);
if(colour>0){
// on g line => connect and remove anti-colour
part.colour = std::make_pair(anti_colour, 0);
anti_colour+=2;
anti_colour*=-1;
} else {
// on qx line => new colour
colour*=-1;
part.colour = std::make_pair(colour, 0);
}
} else if(is_antiquark(part)) {
// anti-quark
assert(colour>0);
if(anti_colour>0){
// on g line => connect and remove colour
part.colour = std::make_pair(0, colour);
colour+=2;
colour*=-1;
} else {
// on q line => new anti-colour
anti_colour*=-1;
part.colour = std::make_pair(0, anti_colour);
}
} else { // not a parton
assert(!is_parton(part));
part.colour = {};
}
}
// Connect last
connect_incoming(incoming_[1], anti_colour, colour);
assert(is_leading_colour());
return true;
} // generate_colours
namespace {
bool valid_parton(
std::vector<fastjet::PseudoJet> const & jets,
Particle const & parton, int const idx,
double const max_ext_soft_pt_fraction, double const min_extparton_pt
){
// TODO code overlap with PhaseSpacePoint::pass_extremal_cuts
if(min_extparton_pt > parton.pt()) return false;
if(idx<0) return false;
assert((int) jets.size()>=idx);
auto const & jet{ jets[idx] };
if( (parton.p - jet).pt()/jet.pt() > max_ext_soft_pt_fraction)
return false;
return true;
}
}
// this should work with multiple types
bool Event::valid_hej_state(double const max_frac,
double const min_pt
) const {
using namespace event_type;
if(!is_resummable(type()))
return false;
auto const & jet_idx{ particle_jet_indices() };
auto idx_begin{ jet_idx.cbegin() };
auto idx_end{ jet_idx.crbegin() };
auto part_begin{ cbegin_partons() };
auto part_end{ crbegin_partons() };
// always seperate extremal jets
if( !valid_parton(jets(), *part_begin, *idx_begin, max_frac, min_pt) )
return false;
++part_begin;
++idx_begin;
if( !valid_parton(jets(), *part_end, *idx_end, max_frac, min_pt) )
return false;
++part_end;
++idx_end;
// unob -> second parton in own jet
if( type() & (unob | qqxexb) ){
if( !valid_parton(jets(), *part_begin, *idx_begin, max_frac, min_pt) )
return false;
++part_begin;
++idx_begin;
}
if( type() & (unof | qqxexf) ){
if( !valid_parton(jets(), *part_end, *idx_end, max_frac, min_pt) )
return false;
++part_end;
++idx_end;
}
if( type() & qqxmid ){
// find qqx pair
auto begin_qqx{ std::find_if( part_begin, part_end.base(),
[](Particle const & part) -> bool {
return part.type != ParticleID::gluon;
}
)};
assert(begin_qqx != part_end.base());
long int qqx_pos{ std::distance(part_begin, begin_qqx) };
assert(qqx_pos >= 0);
idx_begin+=qqx_pos;
if( !( valid_parton(jets(),*begin_qqx, *idx_begin, max_frac,min_pt)
&& valid_parton(jets(),*(++begin_qqx),*(++idx_begin),max_frac,min_pt)
))
return false;
}
return true;
}
Event::ConstPartonIterator Event::begin_partons() const {
return cbegin_partons();
}
Event::ConstPartonIterator Event::cbegin_partons() const {
return boost::make_filter_iterator(
static_cast<bool (*)(Particle const &)>(is_parton),
cbegin(outgoing()),
cend(outgoing())
);
}
Event::ConstPartonIterator Event::end_partons() const {
return cend_partons();
}
Event::ConstPartonIterator Event::cend_partons() const {
return boost::make_filter_iterator(
static_cast<bool (*)(Particle const &)>(is_parton),
cend(outgoing()),
cend(outgoing())
);
}
Event::ConstReversePartonIterator Event::rbegin_partons() const {
return crbegin_partons();
}
Event::ConstReversePartonIterator Event::crbegin_partons() const {
return std::reverse_iterator<ConstPartonIterator>( cend_partons() );
}
Event::ConstReversePartonIterator Event::rend_partons() const {
return crend_partons();
}
Event::ConstReversePartonIterator Event::crend_partons() const {
return std::reverse_iterator<ConstPartonIterator>( cbegin_partons() );
}
namespace {
void print_momentum(std::ostream & os, fastjet::PseudoJet const & part){
const std::streamsize orig_prec = os.precision();
os <<std::scientific<<std::setprecision(6) << "["
<<std::setw(13)<<std::right<< part.px() << ", "
<<std::setw(13)<<std::right<< part.py() << ", "
<<std::setw(13)<<std::right<< part.pz() << ", "
<<std::setw(13)<<std::right<< part.E() << "]"<< std::fixed;
os.precision(orig_prec);
}
void print_colour(std::ostream & os, optional<Colour> const & col){
if(!col)
os << "(no color)"; // American spelling for better alignment
else
os << "(" <<std::setw(3)<<std::right<< col->first
<< ", " <<std::setw(3)<<std::right<< col->second << ")";
}
}
std::ostream& operator<<(std::ostream & os, Event const & ev){
const std::streamsize orig_prec = os.precision();
os <<std::setprecision(4)<<std::fixed;
os << "########## " << event_type::name(ev.type()) << " ##########" << std::endl;
os << "Incoming particles:\n";
for(auto const & in: ev.incoming()){
os <<std::setw(3)<< in.type << ": ";
print_colour(os, in.colour);
os << " ";
print_momentum(os, in.p);
os << std::endl;
}
os << "\nOutgoing particles: " << ev.outgoing().size() << "\n";
for(auto const & out: ev.outgoing()){
os <<std::setw(3)<< out.type << ": ";
print_colour(os, out.colour);
os << " ";
print_momentum(os, out.p);
os << " => rapidity="
<<std::setw(7)<<std::right<< out.rapidity() << std::endl;
}
os << "\nForming Jets: " << ev.jets().size() << "\n";
for(auto const & jet: ev.jets()){
print_momentum(os, jet);
os << " => rapidity="
<<std::setw(7)<<std::right<< jet.rapidity() << std::endl;
}
if(ev.decays().size() > 0 ){
os << "\nDecays: " << ev.decays().size() << "\n";
for(auto const & decay: ev.decays()){
os <<std::setw(3)<< ev.outgoing()[decay.first].type
<< " (" << decay.first << ") to:\n";
for(auto const & out: decay.second){
os <<" "<<std::setw(3)<< out.type << ": ";
print_momentum(os, out.p);
os << " => rapidity="
<<std::setw(7)<<std::right<< out.rapidity() << std::endl;
}
}
}
os << std::defaultfloat;
os.precision(orig_prec);
return os;
}
double shat(Event const & ev){
return (ev.incoming()[0].p + ev.incoming()[1].p).m2();
}
LHEF::HEPEUP to_HEPEUP(Event const & event, LHEF::HEPRUP * heprup){
LHEF::HEPEUP result;
result.heprup = heprup;
result.weights = {{event.central().weight, nullptr}};
for(auto const & var: event.variations()){
result.weights.emplace_back(var.weight, nullptr);
}
size_t num_particles = event.incoming().size() + event.outgoing().size();
for(auto const & decay: event.decays()) num_particles += decay.second.size();
result.NUP = num_particles;
// the following entries are pretty much meaningless
result.IDPRUP = event.type(); // event type
result.AQEDUP = 1./128.; // alpha_EW
//result.AQCDUP = 0.118 // alpha_QCD
// end meaningless part
result.XWGTUP = event.central().weight;
result.SCALUP = event.central().muf;
result.scales.muf = event.central().muf;
result.scales.mur = event.central().mur;
result.scales.SCALUP = event.central().muf;
result.pdfinfo.p1 = event.incoming().front().type;
result.pdfinfo.p2 = event.incoming().back().type;
result.pdfinfo.scale = event.central().muf;
result.IDUP.reserve(num_particles); // PID
result.ISTUP.reserve(num_particles); // status (in, out, decay)
result.PUP.reserve(num_particles); // momentum
result.MOTHUP.reserve(num_particles); // index mother particle
result.ICOLUP.reserve(num_particles); // colour
// incoming
std::array<Particle, 2> incoming{ event.incoming() };
// First incoming should be positive pz according to LHE standard
// (or at least most (everyone?) do it this way, and Pythia assumes it)
if(incoming[0].pz() < incoming[1].pz())
std::swap(incoming[0], incoming[1]);
for(Particle const & in: incoming){
result.IDUP.emplace_back(in.type);
result.ISTUP.emplace_back(status_in);
result.PUP.push_back({in.p[0], in.p[1], in.p[2], in.p[3], in.p.m()});
result.MOTHUP.emplace_back(0, 0);
assert(in.colour);
result.ICOLUP.emplace_back(*in.colour);
}
// outgoing
for(size_t i = 0; i < event.outgoing().size(); ++i){
Particle const & out = event.outgoing()[i];
result.IDUP.emplace_back(out.type);
const int status = event.decays().count(i)?status_decayed:status_out;
result.ISTUP.emplace_back(status);
result.PUP.push_back({out.p[0], out.p[1], out.p[2], out.p[3], out.p.m()});
result.MOTHUP.emplace_back(1, 2);
if(out.colour)
result.ICOLUP.emplace_back(*out.colour);
else{
result.ICOLUP.emplace_back(std::make_pair(0,0));
}
}
// decays
for(auto const & decay: event.decays()){
for(auto const out: decay.second){
result.IDUP.emplace_back(out.type);
result.ISTUP.emplace_back(status_out);
result.PUP.push_back({out.p[0], out.p[1], out.p[2], out.p[3], out.p.m()});
const size_t mother_idx = 1 + event.incoming().size() + decay.first;
result.MOTHUP.emplace_back(mother_idx, mother_idx);
result.ICOLUP.emplace_back(0,0);
}
}
assert(result.ICOLUP.size() == num_particles);
static constexpr double unknown_spin = 9.; //per Les Houches accord
result.VTIMUP = std::vector<double>(num_particles, unknown_spin);
result.SPINUP = result.VTIMUP;
return result;
}
}
diff --git a/src/LesHouchesReader.cc b/src/LesHouchesReader.cc
index 6fe761c..c00de59 100644
--- a/src/LesHouchesReader.cc
+++ b/src/LesHouchesReader.cc
@@ -1,27 +1,31 @@
#include "HEJ/LesHouchesReader.hh"
#include <string>
namespace HEJ{
SherpaLHEReader::SherpaLHEReader(std::string const & filename):
LesHouchesReader{filename},
num_trials{0.}, num_events{0}
{
LesHouchesReader tmp_reader{filename};
+ reader_.heprup.XSECUP = std::vector<double>{0};
while(tmp_reader.read_event()){
++num_events;
- num_trials+=std::stod(tmp_reader.hepeup().attributes.at("trials"));
+ num_trials += std::stod(tmp_reader.hepeup().attributes.at("trials"));
+ reader_.heprup.XSECUP.front() += tmp_reader.hepeup().XWGTUP;
}
+ reader_.heprup.XSECUP.front() /= num_trials;
// For IDWTUP == 1 or 4 we assume avg(weight)=xs
// With the modified weights we have in Sherpa sum(weight)=xs
// -> overwrite IDWTUP to "something neutral"
reader_.heprup.IDWTUP = reader_.heprup.IDWTUP>0?3:-3;
}
bool SherpaLHEReader::read_event() {
if(!LesHouchesReader::read_event()) return false;
+ reader_.hepeup.XWGTUP/=num_trials;
for(auto & wt: reader_.hepeup.weights)
wt.first/=num_trials;
return true;
}
}
diff --git a/src/LesHouchesWriter.cc b/src/LesHouchesWriter.cc
index 2fae860..0b2693e 100644
--- a/src/LesHouchesWriter.cc
+++ b/src/LesHouchesWriter.cc
@@ -1,123 +1,124 @@
/**
* \authors The HEJ collaboration (see AUTHORS for details)
* \date 2019
* \copyright GPLv2 or later
*/
#include <cassert>
#include <utility>
#include <vector>
#include "HEJ/Event.hh"
#include "HEJ/event_types.hh"
#include "HEJ/LesHouchesWriter.hh"
#include "HEJ/utility.hh"
namespace HEJ{
namespace{
template<class T, class... Args>
std::unique_ptr<T> make_unique(Args&&... a){
return std::unique_ptr<T>{new T{std::forward<Args>(a)...}};
}
size_t to_index(event_type::EventType const type){
return type==0?0:floor(log2(type))+1;
}
}
LesHouchesWriter::LesHouchesWriter(
std::string const & file, LHEF::HEPRUP heprup
):
out_{file, std::fstream::in | std::fstream::out | std::fstream::trunc},
writer_{HEJ::make_unique<LHEF::Writer>(out_)}
{
if(! out_.is_open()){
throw std::ios_base::failure("Failed to open " + file);
};
// scientific style is needed to allow rewriting the init block
out_ << std::scientific;
writer_->heprup = std::move(heprup);
// lhe Standard: IDWTUP (negative => weights = +/-)
// IDWTUP: HEJ -> SHG/Pythia/next program
// 1: weighted->unweighted, xs = mean(weight), XMAXUP given
// 2: weighted->unweighted, xs = XSECUP, XMAXUP given
// 3: unweighted (weight=+1)->unweighted, no additional information
// 4: weighted->weighted, xs = mean(weight)
//
// None of these codes actually match what we want:
// 1 and 4 require xs = mean(weight), which is impossible until after generation
// 2 tells the SHG to unweight our events, which is wasteful
// 3 claims we produce unweighted events, which is both wasteful _and_
// impossible until after generation (we don't know the maximum weight before)
//
- // For the time being, we choose 3. If the consumer (like Pythia) assumes
+ // For the time being, we choose -3. If the consumer (like Pythia) assumes
// weight=+1, the final weights have to be corrected by multiplying with
- // the original weight we provided.
+ // the original weight we provided. We are also often use NLO-PDFs which can
+ // give negative weights, hence the native IDWTUP.
//
- writer_->heprup.IDWTUP = 3;
+ writer_->heprup.IDWTUP = -3;
const int max_number_types = to_index(event_type::last_type)+1;
writer_->heprup.NPRUP = max_number_types;
// ids of event types
writer_->heprup.LPRUP.clear();
writer_->heprup.LPRUP.reserve(max_number_types);
writer_->heprup.LPRUP.emplace_back(0);
for(size_t i=event_type::first_type+1; i<=event_type::last_type; i*=2)
writer_->heprup.LPRUP.emplace_back(i);
// use placeholders for unknown init block values
// we can overwrite them after processing all events
writer_->heprup.XSECUP = std::vector<double>(max_number_types, 0.);
writer_->heprup.XERRUP = std::vector<double>(max_number_types, 0.);
writer_->heprup.XMAXUP = std::vector<double>(max_number_types, 0.);
write_init();
}
void LesHouchesWriter::write(Event const & ev){
assert(writer_ && out_.is_open());
const double wt = ev.central().weight;
writer_->hepeup = HEJ::to_HEPEUP(std::move(ev), &heprup());
writer_->writeEvent();
assert(heprup().XSECUP.size() > to_index(ev.type()));
heprup().XSECUP[to_index(ev.type())] += wt;
heprup().XERRUP[to_index(ev.type())] += wt*wt;
if(wt > heprup().XMAXUP[to_index(ev.type())]){
heprup().XMAXUP[to_index(ev.type())] = wt;
}
}
// this function is called after overwritting the Les Houches init block
// assert that we have overwritten *exactly* the init block,
// i.e. we are at the end of the file or an intact event block is next
void assert_next_event_intact(std::istream & out){
(void) out; // suppress compiler warnings if not in debug mode
#ifndef NDEBUG
std::string line;
getline(out, line);
assert(out.eof() || line.rfind("<event", 0) == 0);
#endif
}
void LesHouchesWriter::rewrite_init(){
assert(writer_ && out_.is_open());
// replace placeholder entries
const auto pos = out_.tellp();
out_.seekp(0);
write_init();
assert_next_event_intact(out_);
out_.seekp(pos);
}
LesHouchesWriter::~LesHouchesWriter(){
assert(writer_ && out_.is_open());
for(auto & xs_err: heprup().XERRUP)
{
xs_err = sqrt(xs_err);
}
rewrite_init();
}
}
diff --git a/src/bin/HEJ.cc b/src/bin/HEJ.cc
index d2b8bb6..ccc20ff 100644
--- a/src/bin/HEJ.cc
+++ b/src/bin/HEJ.cc
@@ -1,369 +1,375 @@
/**
* \authors The HEJ collaboration (see AUTHORS for details)
* \date 2019
* \copyright GPLv2 or later
*/
#include <array>
#include <chrono>
#include <iostream>
#include <limits>
#include <memory>
#include <numeric>
#include "yaml-cpp/yaml.h"
#include "fastjet/ClusterSequence.hh"
#include "HEJ/CombinedEventWriter.hh"
#include "HEJ/Config.hh"
#include "HEJ/CrossSectionAccumulator.hh"
#include "HEJ/Event.hh"
#include "HEJ/EventReader.hh"
#include "HEJ/BufferedEventReader.hh"
#include "HEJ/EventReweighter.hh"
#include "HEJ/get_analysis.hh"
#include "HEJ/make_RNG.hh"
#include "HEJ/optional.hh"
#include "HEJ/ProgressBar.hh"
#include "HEJ/stream.hh"
#include "HEJ/Unweighter.hh"
#include "HEJ/Version.hh"
#include "HEJ/YAMLreader.hh"
HEJ::Config load_config(char const * filename){
try{
return HEJ::load_config(filename);
}
catch(std::exception const & exc){
std::cerr << "Error: " << exc.what() << '\n';
std::exit(EXIT_FAILURE);
}
}
std::unique_ptr<HEJ::Analysis> get_analysis(
YAML::Node const & parameters, LHEF::HEPRUP const & heprup
){
try{
return HEJ::get_analysis(parameters, heprup);
}
catch(std::exception const & exc){
std::cerr << "Failed to load analysis: " << exc.what() << '\n';
std::exit(EXIT_FAILURE);
}
}
// unique_ptr is a workaround:
// HEJ::optional is a better fit, but gives spurious errors with g++ 7.3.0
std::unique_ptr<HEJ::ProgressBar<double>> make_progress_bar(
std::vector<double> const & xs
) {
if(xs.empty()) return {};
const double Born_xs = std::accumulate(begin(xs), end(xs), 0.);
return std::make_unique<HEJ::ProgressBar<double>>(std::cout, Born_xs);
}
std::string time_to_string(const time_t time){
char s[30];
struct tm * p = localtime(&time);
strftime(s, 30, "%a %b %d %Y %H:%M:%S", p);
return s;
}
HEJ::Event to_event(
LHEF::HEPEUP const & hepeup,
HEJ::JetParameters const & fixed_order_jets
) {
HEJ::Event::EventData event_data{hepeup};
event_data.reconstruct_intermediate();
return HEJ::Event{
std::move(event_data).cluster(
fixed_order_jets.def, fixed_order_jets.min_pt
)
};
}
void unweight(
HEJ::Unweighter & unweighter,
HEJ::WeightType weight_type,
std::vector<HEJ::Event> & events,
HEJ::RNG & ran
) {
if(weight_type == HEJ::WeightType::unweighted_resum){
unweighter.set_cut_to_maxwt(events);
}
events.erase(
unweighter.unweight(begin(events), end(events), ran),
end(events)
);
}
// peek up to nevents events from reader
std::vector<LHEF::HEPEUP> peek_events(
HEJ::BufferedEventReader & reader,
const int nevents
) {
std::vector<LHEF::HEPEUP> events;
while(
static_cast<int>(events.size()) < nevents
&& reader.read_event()
) {
events.emplace_back(reader.hepeup());
}
// put everything back into the reader
for(auto it = rbegin(events); it != rend(events); ++it) {
reader.emplace(*it);
}
return events;
}
void append_resummed_events(
std::vector<HEJ::Event> & resummation_events,
HEJ::EventReweighter & reweighter,
LHEF::HEPEUP const & hepeup,
const int trials,
HEJ::JetParameters const & fixed_order_jets
) {
const HEJ::Event FO_event = to_event(hepeup, fixed_order_jets);
if(reweighter.treatment(FO_event.type()) != HEJ::EventTreatment::reweight) {
return;
}
const auto resummed = reweighter.reweight(FO_event, trials);
resummation_events.insert(
end(resummation_events),
begin(resummed), end(resummed)
);
}
void train(
HEJ::Unweighter & unweighter,
HEJ::BufferedEventReader & reader,
HEJ::EventReweighter & reweighter,
const int total_trials,
const double max_dev,
double reweight_factor,
HEJ::JetParameters const & fixed_order_jets
) {
std::cout << "Reading up to " << total_trials << " training events...\n";
auto FO_events = peek_events(reader, total_trials);
+ if(FO_events.empty()) {
+ throw std::runtime_error{
+ "No events generated to calibrate the unweighting weight!"
+ "Please increase the number \"trials\" or deactivate the unweighting."
+ };
+ }
const int trials = total_trials/FO_events.size();
// adjust reweight factor so that the overall normalisation
// is the same as in the full run
reweight_factor *= trials;
for(auto & hepeup: FO_events) {
- hepeup.setWeight(0, reweight_factor * hepeup.weight());
+ hepeup.XWGTUP *= reweight_factor;
}
std::cout << "Training unweighter with "
<< trials << '*' << FO_events.size() << " events\n";
auto progress = HEJ::ProgressBar<int>{
std::cout, static_cast<int>(FO_events.size())
};
std::vector<HEJ::Event> resummation_events;
for(auto const & hepeup: FO_events) {
append_resummed_events(
resummation_events,
reweighter, hepeup, trials, fixed_order_jets
);
++progress;
}
unweighter.set_cut_to_peakwt(resummation_events, max_dev);
std::cout << "\nUnweighting events with weight up to "
<< unweighter.get_cut() << '\n';
}
int main(int argn, char** argv) {
using clock = std::chrono::system_clock;
if (argn != 3) {
std::cerr << "\n# Usage:\n."<< argv[0] <<" config_file input_file\n\n";
return EXIT_FAILURE;
}
const auto start_time = clock::now();
{
std::cout << "Starting " << HEJ::Version::package_name_full()
<< ", revision " << HEJ::Version::revision() << " ("
<< time_to_string(clock::to_time_t(start_time)) << ")" << std::endl;
}
fastjet::ClusterSequence::print_banner();
// read configuration
const HEJ::Config config = load_config(argv[1]);
auto reader = HEJ::make_reader(argv[2]);
assert(reader);
auto heprup{ reader->heprup() };
heprup.generators.emplace_back(LHEF::XMLTag{});
heprup.generators.back().name = HEJ::Version::package_name();
heprup.generators.back().version = HEJ::Version::String();
std::unique_ptr<HEJ::Analysis> analysis = get_analysis(
config.analysis_parameters, heprup
);
assert(analysis != nullptr);
HEJ::CombinedEventWriter writer{config.output, std::move(heprup)};
double global_reweight = 1.;
const auto & max_events = config.max_events;
// if we need the event number:
if(std::abs(heprup.IDWTUP) == 4 || std::abs(heprup.IDWTUP) == 1 || max_events){
// try to read from LHE head
auto input_events{reader->number_events()};
if(!input_events) {
// else count manually
auto t_reader = HEJ::make_reader(argv[2]);
input_events = 0;
while(t_reader->read_event()) ++(*input_events);
}
if(std::abs(heprup.IDWTUP) == 4 || std::abs(heprup.IDWTUP) == 1){
// IDWTUP 4 or 1 assume average(weight)=xs, but we need sum(weights)=xs
std::cout << "Found IDWTUP " << heprup.IDWTUP << ": "
<< "assuming \"cross section = average weight\".\n"
<< "converting to \"cross section = sum of weights\" ";
global_reweight /= *input_events;
}
if(max_events && (*input_events > *max_events)){
// maximal number of events given
global_reweight *= *input_events/static_cast<double>(*max_events);
std::cout << "Processing " << *max_events
<< " out of " << *input_events << " events\n";
}
}
HEJ::ScaleGenerator scale_gen{
config.scales.base,
config.scales.factors,
config.scales.max_ratio
};
auto ran = HEJ::make_RNG(config.rng.name, config.rng.seed);
assert(ran != nullptr);
HEJ::EventReweighter hej{
reader->heprup(),
std::move(scale_gen),
to_EventReweighterConfig(config),
*ran
};
HEJ::optional<HEJ::Unweighter> unweighter{};
if(config.weight_type != HEJ::WeightType::weighted) {
unweighter = HEJ::Unweighter{};
}
if(config.weight_type == HEJ::WeightType::partially_unweighted) {
HEJ::BufferedEventReader buffered_reader{std::move(reader)};
assert(config.unweight_config);
train(
*unweighter,
buffered_reader,
hej,
config.unweight_config->trials,
config.unweight_config->max_dev,
global_reweight/config.trials,
config.fixed_order_jets
);
reader = std::make_unique<HEJ::BufferedEventReader>(
std::move(buffered_reader)
);
}
// status infos & eye candy
size_t nevent = 0;
std::array<int, HEJ::event_type::last_type + 1>
nevent_type{0}, nfailed_type{0};
auto progress = make_progress_bar(reader->heprup().XSECUP);
HEJ::CrossSectionAccumulator xs;
std::map<HEJ::StatusCode, int> status_counter;
size_t total_trials = 0;
size_t total_resum = 0;
// Loop over the events in the input file
while(reader->read_event() && (!max_events || nevent < *max_events) ){
++nevent;
// reweight events so that the total cross section is conserved
auto hepeup = reader->hepeup();
- hepeup.setWeight(0, global_reweight * hepeup.weight());
+ hepeup.XWGTUP *= global_reweight;
const auto FO_event = to_event(hepeup, config.fixed_order_jets);
if(FO_event.central().weight == 0) {
static const bool warned_once = [argv,nevent](){
std::cerr
<< "WARNING: event number " << nevent
<< " in " << argv[2] << " has zero weight. "
"Ignoring this and all further events with vanishing weight.\n";
return true;
}();
(void) warned_once; // shut up compiler warnings
continue;
}
auto resummed_events{ hej.reweight(FO_event, config.trials) };
// some bookkeeping
for(auto const & s: hej.status())
++status_counter[s];
total_trials+=hej.status().size();
++nevent_type[FO_event.type()];
if(resummed_events.empty()) ++nfailed_type[FO_event.type()];
if(unweighter) {
unweight(*unweighter, config.weight_type, resummed_events, *ran);
}
// analysis
for(auto & ev: resummed_events){
//TODO: move pass_cuts to after phase space point generation
if(analysis->pass_cuts(ev, FO_event)){
analysis->fill(ev, FO_event);
writer.write(ev);
} else {
ev.parameters()*=0; // do not use discarded events afterwards
}
}
xs.fill_correlated(resummed_events);
total_resum += resummed_events.size();
if(progress) progress->increment(FO_event.central().weight);
} // main event loop
std::cout << '\n';
analysis->finalise();
using namespace HEJ::event_type;
std::cout<< "Events processed: " << nevent << " (" << total_resum << " resummed)"<< '\n';
std::cout << '\t' << name(EventType::first_type) << ": "
<< nevent_type[EventType::first_type]
<< ", failed to reconstruct " << nfailed_type[EventType::first_type]
<< '\n';
for(auto i=EventType::first_type+1; i<=EventType::last_type; i*=2){
std::cout << '\t' << name(static_cast<EventType>(i)) << ": "
<< nevent_type[i]
<< ", failed to reconstruct " << nfailed_type[i]
<< '\n';
}
std::cout << '\n' << xs << '\n';
std::cout << "Generation statistic: "
<< status_counter[HEJ::StatusCode::good] << "/" << total_trials
<< " trials successful.\n";
for(auto && entry: status_counter){
const double fraction = static_cast<double>(entry.second)/total_trials;
const int percent = std::round(100*fraction);
std::cout << std::left << std::setw(17) << (to_string(entry.first) + ":")
<< " [";
for(int i = 0; i < percent/2; ++i) std::cout << '#';
for(int i = percent/2; i < 50; ++i) std::cout << ' ';
std::cout << "] " <<std::setw(2)<<std::right<< percent << "%\n";
}
std::chrono::duration<double> run_time = (clock::now() - start_time);
std::cout << "\nFinished " << HEJ::Version::package_name() << " at "
<< time_to_string(clock::to_time_t(clock::now()))
<< "\n=> Runtime: " << run_time.count() << " sec ("
<< nevent/run_time.count() << " Events/sec).\n";
return EXIT_SUCCESS;
}
diff --git a/t/CMakeLists.txt b/t/CMakeLists.txt
index 2f4ee85..ddabd4f 100644
--- a/t/CMakeLists.txt
+++ b/t/CMakeLists.txt
@@ -1,401 +1,401 @@
set(tst_dir "${CMAKE_CURRENT_SOURCE_DIR}")
set(tst_ME_data_dir "${tst_dir}/ME_data")
# small library for common test functions
add_library(hej_test SHARED hej_test.cc)
target_include_directories(hej_test PUBLIC ${tst_dir})
target_link_libraries(hej_test HEJ)
# test event classification
# test explicit configurations
add_executable(test_classify ${tst_dir}/test_classify.cc)
target_compile_options(test_classify PRIVATE "-O0") # avoid compiler optimisation
target_link_libraries(test_classify HEJ hej_test)
add_test(
NAME t_classify
COMMAND test_classify
)
# test against reference data
add_executable(test_classify_ref ${tst_dir}/test_classify_ref.cc)
target_link_libraries(test_classify_ref HEJ hej_test)
add_test(
NAME t_classify_ref
COMMAND test_classify_ref ${tst_dir}/classify_ref ${tst_dir}/classify.lhe.gz
)
add_test(
NAME t_classify_ref_4j
COMMAND test_classify_ref ${tst_dir}/classify_ref_4j ${tst_dir}/4j.lhe.gz
)
add_test(
NAME t_classify_ref_W4j
COMMAND test_classify_ref ${tst_dir}/classify_ref_W4j ${tst_dir}/MGemnubar4j_unweighted.lhe.gz
)
# test for valid W decays
add_executable(test_decay ${tst_dir}/test_decay.cc)
target_link_libraries(test_decay HEJ hej_test)
add_test(
NAME t_valid_decay
COMMAND test_decay
)
# test valid jet cuts on tagging jets
add_executable(test_jet_cuts ${tst_dir}/test_jet_cuts.cc)
target_link_libraries(test_jet_cuts HEJ hej_test)
add_test(
NAME t_jet_cuts
COMMAND test_jet_cuts
)
# test phase space point
add_executable(test_psp ${tst_dir}/test_psp.cc)
target_link_libraries(test_psp HEJ hej_test)
add_test(
NAME t_psp
COMMAND test_psp ${tst_dir}/psp_gen.lhe.gz
)
# test importing analyses
get_target_property(ANALYSIS_PATH AnalysisTemplate_lib BINARY_DIR)
get_target_property(ANALYSIS_LIB AnalysisTemplate_lib OUTPUT_NAME)
set(ANALYSIS_PARAMETERS "")
configure_file( ${tst_dir}/analysis_config.yml.in
${PROJECT_BINARY_DIR}/t/analysis_config_simple.yml @ONLY )
add_test(
NAME t_analysis_simple
COMMAND $<TARGET_FILE:HEJ_main>
${PROJECT_BINARY_DIR}/t/analysis_config_simple.yml
${tst_dir}/2j.lhe.gz
)
get_target_property(ANALYSIS_PATH AnalysisPrint_lib BINARY_DIR)
get_target_property(ANALYSIS_LIB AnalysisPrint_lib OUTPUT_NAME)
set(ANALYSIS_PARAMETERS " output: ana_output")
configure_file( ${tst_dir}/analysis_config.yml.in
${PROJECT_BINARY_DIR}/t/analysis_config_print.yml @ONLY )
add_test(
NAME t_analysis_print
COMMAND $<TARGET_FILE:HEJ_main>
${PROJECT_BINARY_DIR}/t/analysis_config_print.yml
${tst_dir}/2j.lhe.gz
)
# test importing scales (from examples/softestptScale)
add_executable(test_scale_import ${tst_dir}/test_scale_import)
target_link_libraries(test_scale_import HEJ)
get_target_property(SCALE_PATH softestptScale_lib BINARY_DIR)
get_target_property(SCALE_LIB softestptScale_lib OUTPUT_NAME)
set(SCALE_NAME "softest_jet_pt")
configure_file( ${tst_dir}/jet_config_with_import.yml.in
${PROJECT_BINARY_DIR}/t/jet_config_with_import.yml @ONLY )
add_test(
NAME t_scale_import
COMMAND test_scale_import ${PROJECT_BINARY_DIR}/t/jet_config_with_import.yml
)
# test scale arithmetic (e.g. 2*H_T/4)
add_executable(test_scale_arithmetics ${tst_dir}/test_scale_arithmetics)
target_link_libraries(test_scale_arithmetics HEJ hej_test)
add_test(
NAME t_scale_arithmetics
COMMAND test_scale_arithmetics ${tst_dir}/jet_config.yml ${tst_dir}/2j.lhe.gz
)
# test "ParameterDescription"
add_executable(test_descriptions ${tst_dir}/test_descriptions)
target_link_libraries(test_descriptions HEJ hej_test)
add_test(
NAME t_descriptions
COMMAND test_descriptions
)
# test "EventParameters*Weight"
add_executable(test_parameters ${tst_dir}/test_parameters)
target_link_libraries(test_parameters HEJ hej_test)
add_test(
NAME test_parameters
COMMAND test_parameters
)
# test unweighting
add_executable(test_unweighter ${tst_dir}/test_unweighter)
target_link_libraries(test_unweighter HEJ hej_test)
add_test(
NAME test_unweighter
COMMAND test_unweighter ${tst_dir}/4j.lhe.gz
)
# test colour generation
add_executable(test_colours ${tst_dir}/test_colours)
target_link_libraries(test_colours HEJ hej_test)
add_test(
NAME t_colour_flow
COMMAND test_colours
)
# test matrix elements
add_executable(test_ME_generic ${tst_dir}/test_ME_generic.cc)
target_link_libraries(test_ME_generic HEJ hej_test)
add_test(
NAME t_ME_j
COMMAND test_ME_generic ${tst_ME_data_dir}/config_mtinf.yml ${tst_ME_data_dir}/ME_jets_tree.dat ${tst_ME_data_dir}/PSP_jets.lhe.gz
)
add_test(
NAME t_ME_j_virt
COMMAND test_ME_generic ${tst_ME_data_dir}/config_mtinf.yml ${tst_ME_data_dir}/ME_jets_tree_virt.dat ${tst_ME_data_dir}/PSP_jets.lhe.gz
)
add_test(
NAME t_ME_h
COMMAND test_ME_generic ${tst_ME_data_dir}/config_mtinf.yml ${tst_ME_data_dir}/ME_h_mtinf_tree.dat ${tst_ME_data_dir}/PSP_h.lhe.gz
)
add_test(
NAME t_ME_h_virt
COMMAND test_ME_generic ${tst_ME_data_dir}/config_mtinf.yml ${tst_ME_data_dir}/ME_h_mtinf_virt.dat ${tst_ME_data_dir}/PSP_h.lhe.gz
)
if(QCDloop_FOUND)
add_test(
NAME t_ME_h_mt
COMMAND test_ME_generic ${tst_ME_data_dir}/config_mt.yml ${tst_ME_data_dir}/ME_h_mt_tree.dat ${tst_ME_data_dir}/PSP_h.lhe.gz
)
add_test(
NAME t_ME_h_mtmb
COMMAND test_ME_generic ${tst_ME_data_dir}/config_mtmb.yml ${tst_ME_data_dir}/ME_h_mtmb_tree.dat ${tst_ME_data_dir}/PSP_h.lhe.gz
)
endif()
add_test(
NAME t_ME_j_subl
COMMAND test_ME_generic ${tst_ME_data_dir}/config_pure.yml ${tst_ME_data_dir}/ME_jets_subl_tree.dat ${tst_ME_data_dir}/PSP_jets_subl.lhe.gz
)
add_test(
NAME t_ME_j_subl_virt
COMMAND test_ME_generic ${tst_ME_data_dir}/config_pure.yml ${tst_ME_data_dir}/ME_jets_subl_tree_virt.dat ${tst_ME_data_dir}/PSP_jets_subl.lhe.gz
)
add_test(
NAME t_ME_j_subl_new
COMMAND test_ME_generic ${tst_ME_data_dir}/config_pure.yml ${tst_ME_data_dir}/ME_jets_subl_tree_new.dat ${tst_dir}/4j.lhe.gz
)
add_test(
NAME t_ME_j_subl_new_virt
COMMAND test_ME_generic ${tst_ME_data_dir}/config_pure.yml ${tst_ME_data_dir}/ME_jets_subl_tree_new_virt.dat ${tst_dir}/4j.lhe.gz
)
add_test(
NAME t_ME_w_FKL
COMMAND test_ME_generic ${tst_ME_data_dir}/config_w_ME.yml ${tst_ME_data_dir}/ME_w_FKL_tree.dat ${tst_ME_data_dir}/PSP_w_FKL.lhe.gz
)
add_test(
NAME t_ME_w_FKL_virt
COMMAND test_ME_generic ${tst_ME_data_dir}/config_w_ME.yml ${tst_ME_data_dir}/ME_w_FKL_virt.dat ${tst_ME_data_dir}/PSP_w_FKL.lhe.gz
)
add_test(
NAME t_ME_Wp
COMMAND test_ME_generic ${tst_ME_data_dir}/config_w_ME.yml ${tst_ME_data_dir}/ME_Wp.dat ${tst_ME_data_dir}/PSP_Wp.lhe.gz
)
add_test(
NAME t_ME_Wp_virt
COMMAND test_ME_generic ${tst_ME_data_dir}/config_w_ME.yml ${tst_ME_data_dir}/ME_Wp_virt.dat ${tst_ME_data_dir}/PSP_Wp.lhe.gz
)
add_test(
NAME t_ME_Wm
COMMAND test_ME_generic ${tst_ME_data_dir}/config_w_ME.yml ${tst_ME_data_dir}/ME_Wm.dat ${tst_ME_data_dir}/PSP_Wm.lhe.gz
)
add_test(
NAME t_ME_Wm_virt
COMMAND test_ME_generic ${tst_ME_data_dir}/config_w_ME.yml ${tst_ME_data_dir}/ME_Wm_virt.dat ${tst_ME_data_dir}/PSP_Wm.lhe.gz
)
# test main executable
file(COPY "${tst_dir}/jet_config.yml" DESTINATION "${PROJECT_BINARY_DIR}")
set(test_config "${PROJECT_BINARY_DIR}/jet_config.yml")
if(HighFive_FOUND)
file(READ ${test_config} config)
file(WRITE ${test_config} "${config} - tst.hdf5\n")
endif()
if(HepMC3_FOUND)
file(READ ${test_config} config)
file(WRITE ${test_config} "${config} - tst.hepmc\n")
endif()
if(HepMC_FOUND)
file(READ ${test_config} config)
file(WRITE ${test_config} "${config} - tst.hepmc2\n")
endif()
if(rivet_FOUND)
file(READ ${test_config} config)
file(WRITE ${test_config} "${config}\nanalysis:\n rivet: MC_XS\n output: tst\n")
endif()
set(test_cmd_main "$<TARGET_FILE:HEJ_main>\\\;${test_config}\\\;${tst_dir}/2j.lhe.gz")
# check that HepMC3 output is correct
if(HepMC3_FOUND)
add_executable(check_hepmc ${tst_dir}/check_hepmc.cc)
target_link_libraries(check_hepmc HEJ ${HEPMC3_LIBRARIES})
target_include_directories(check_hepmc PRIVATE ${HEPMC3_INCLUDE_DIR})
set(test_cmd_hepmc "$<TARGET_FILE:check_hepmc>\\\;tst.hepmc")
else()
set(test_cmd_hepmc "")
endif()
# check that LHEF output is correct
add_executable(check_lhe ${tst_dir}/check_lhe.cc)
target_link_libraries(check_lhe HEJ hej_test)
set(test_cmd_lhe "$<TARGET_FILE:check_lhe>\\\;tst.lhe")
# check that rivet interface is consistent with naive rivet
if(rivet_FOUND)
# this assumes "rivet" and "yodadiff" are found in PATH
if(rivet_USE_HEPMC3)
set(hepmc_file "tst.hepmc")
else()
set(hepmc_file "tst.hepmc2")
endif()
if(rivet_USE_HEPMC3 OR (rivet_VERSION VERSION_LESS 3))
set(histo_exclude "")
else()
# rivet 3 with HepMC 2 is inconsistent in order of weights
# -> interface != direct call (by permutation)
# REQUIRES Yoda 1.7.5
set(histo_exclude "-M\\\;\\\\d")
endif()
set(test_cmd_rivet "rivet\\\;-a\\\;MC_XS\\\;${hepmc_file}\\\;-o\\\;tst_direct.yoda\
\;yodadiff\\\;${histo_exclude}\\\;tst.yoda\\\;tst_direct.yoda")
else()
set(test_cmd_rivet "")
endif()
# Run dependent tests in one command to ensure correct execution order
# Note: The commands are concatenated with "\;" to escape CMake lists.
# Thus arguments have to be escaped twice "\\\;".
# e.g. "cmd1\;cmd2\\\;arg1\\\;arg2" is executed like "cmd1 && cmd2 arg1 arg2"
add_test(
NAME t_main
COMMAND ${CMAKE_COMMAND}
-DCMDS=${test_cmd_main}\;${test_cmd_hepmc}\;${test_cmd_lhe}\;${test_cmd_rivet}
-P ${PROJECT_SOURCE_DIR}/cmake/run_multiple_tests.cmake
)
# check that Sherpas LHE input can be read
add_executable(check_lhe_sherpa ${tst_dir}/check_lhe_sherpa.cc)
-target_link_libraries(check_lhe_sherpa HEJ)
+target_link_libraries(check_lhe_sherpa HEJ hej_test)
add_test(
NAME t_sherpa_reader
COMMAND check_lhe_sherpa ${tst_dir}/SherpaLHE.lhe 1.62624e+08
)
# check HDF5 reader & writer
if(HighFive_FOUND)
add_executable(test_hdf5 ${tst_dir}/test_hdf5.cc)
target_link_libraries(test_hdf5 HEJ)
add_test(
NAME t_hdf5
COMMAND test_hdf5 ${tst_dir}/Wm9-g4-repack.hdf5
)
add_executable(test_hdf5_write ${tst_dir}/test_hdf5_write.cc)
target_link_libraries(test_hdf5_write HEJ)
add_test(
NAME t_hdf5_write
COMMAND test_hdf5_write ${tst_dir}/Wm9-g4-repack.hdf5
)
endif()
# check rivet interface
if(RIVET_FOUND)
add_executable(check_rivet ${tst_dir}/check_rivet.cc)
target_link_libraries(check_rivet HEJ rivet::rivet)
add_test(
NAME t_rivet
COMMAND check_rivet
)
endif()
# test boson reconstruction
add_executable(cmp_events ${tst_dir}/cmp_events.cc)
target_link_libraries(cmp_events HEJ)
add_test(
NAME t_epnu_2j_noW
COMMAND cmp_events ${tst_dir}/epnu2jLOFKL_unweight.lhe.tar.gz ${tst_dir}/epnu2jLOFKL_unweight_noW.lhe.gz
)
# test resummed result
add_executable(check_res ${tst_dir}/check_res.cc)
target_link_libraries(check_res HEJ hej_test)
if(TEST_ALL) # deactivate long tests by default
add_test(
NAME t_2j
COMMAND check_res ${tst_dir}/2j.lhe.gz 3.49391e+07 419684
)
add_test(
NAME t_3j
COMMAND check_res ${tst_dir}/3j.lhe.gz 2.37902e+06 25746.6
)
add_test(
NAME t_3j_unof
COMMAND check_res ${tst_dir}/3j.lhe.gz 133399 4688.83 unof
)
add_test(
NAME t_3j_unob
COMMAND check_res ${tst_dir}/3j.lhe.gz 105247 3449.45 unob
)
add_test(
NAME t_3j_splitf
COMMAND check_res ${tst_dir}/3j.lhe.gz 97659.9 2748.65 splitf
)
add_test(
NAME t_3j_splitb
COMMAND check_res ${tst_dir}/3j.lhe.gz 107150 2799.8 splitb
)
add_test(
NAME t_4j
COMMAND check_res ${tst_dir}/4j.lhe.gz 603713 72822.6
)
add_test(
NAME t_4j_qqxmid
COMMAND check_res ${tst_dir}/4j.lhe.gz 21866.7 1716.96 qqxmid
)
add_test(
NAME t_h_3j
COMMAND check_res ${tst_dir}/h_3j.lhe.gz 0.821622 0.0220334
)
add_test(
NAME t_h_3j_unof
COMMAND check_res ${tst_dir}/h_3j_uno.lhe.gz 0.0127362 0.000271555 unof
)
add_test(
NAME t_h_3j_unob
COMMAND check_res ${tst_dir}/h_3j_uno.lhe.gz 0.0130615 0.000224793 unob
)
add_test(
NAME t_epnu_2j
COMMAND check_res ${tst_dir}/epnu2jLOFKL_unweight.lhe.tar.gz 262.7 3
)
add_test(
NAME t_MGepnu_3j
COMMAND check_res ${tst_dir}/MGepnu3j_unweighted.lhe.gz 38.9512 1
)
add_test(
NAME t_MGemnubar_3j
COMMAND check_res ${tst_dir}/MGemnubar3j_unweighted.lhe.gz 24.1575 1
)
add_test(
NAME t_MGepnu_3j_unof
COMMAND check_res ${tst_dir}/MGepnu3j_unweighted.lhe.gz 9.63702 0.128355 unof
)
add_test(
NAME t_MGepnu_3j_unob
COMMAND check_res ${tst_dir}/MGepnu3j_unweighted.lhe.gz 9.70119 0.108436 unob
)
add_test(
NAME t_MGepnu_3j_splitf
COMMAND check_res ${tst_dir}/MGepnu3j_unweighted.lhe.gz 2.91995 0.0463182 splitf
)
add_test(
NAME t_MGepnu_3j_splitb
COMMAND check_res ${tst_dir}/MGepnu3j_unweighted.lhe.gz 3.40708 0.0550975 splitb
)
add_test(
NAME t_MGepnu_4j
COMMAND check_res ${tst_dir}/MGepnu4j_unweighted.lhe.gz 10.2542 0.135106
)
add_test(
NAME t_MGemnubar_4j
COMMAND check_res ${tst_dir}/MGemnubar4j_unweighted.lhe.gz 5.57909 0.0300496
)
add_test(
NAME t_MGepnu_4j_qqxmid
COMMAND check_res ${tst_dir}/MGepnu4j_unweighted.lhe.gz 0.732084 0.005 qqxmid
)
endif()
diff --git a/t/check_lhe.cc b/t/check_lhe.cc
index b2376af..43a637b 100644
--- a/t/check_lhe.cc
+++ b/t/check_lhe.cc
@@ -1,47 +1,64 @@
/**
* \authors The HEJ collaboration (see AUTHORS for details)
* \date 2019
* \copyright GPLv2 or later
*/
#include <iostream>
#include <unordered_map>
#include "HEJ/event_types.hh"
#include "HEJ/EventReader.hh"
#include "hej_test.hh"
-static constexpr double ep = 1e-3;
+namespace {
+ static constexpr double ep = 1e-3;
+ const fastjet::JetDefinition jet_def{fastjet::kt_algorithm, 0.4};
+ constexpr double min_jet_pt = 30;
+}
+
int main(int argn, char** argv) {
if(argn != 2){
std::cerr << "Usage: " << argv[0] << " lhe_file\n";
return EXIT_FAILURE;
}
auto reader{ HEJ::make_reader(argv[1]) };
std::unordered_map<int, double> xsec_ref;
for(int i=0; i < reader->heprup().NPRUP; ++i)
xsec_ref[reader->heprup().LPRUP[i]] = 0.;
while(reader->read_event()){
ASSERT(reader->hepeup().NUP > 2); // at least 3 particles (2 in + 1 out)
// first incoming has positive pz
ASSERT(reader->hepeup().PUP[0][2] > reader->hepeup().PUP[1][2]);
// test that we can trasform IDPRUP to event type
(void) name(static_cast<HEJ::event_type::EventType>(reader->hepeup().IDPRUP));
xsec_ref[reader->hepeup().IDPRUP] += reader->hepeup().weight();
+ // test that a HEJ event can be transformed back to the original HEPEUP
+ auto hej_event = HEJ::Event::EventData(reader->hepeup()).cluster(jet_def, min_jet_pt);
+ // there are two different weight infos, which should be the same
+ ASSERT(hej_event.central().weight == reader->hepeup().weight());
+ ASSERT(hej_event.central().weight == reader->hepeup().XWGTUP);
+ // reader->heprup() is const, we can't use it to create a hepeup
+ auto cp_heprup = reader->heprup();
+ auto new_event = HEJ::to_HEPEUP(hej_event, &cp_heprup);
+ ASSERT(new_event.weight() == reader->hepeup().weight());
+ ASSERT(new_event.XWGTUP == reader->hepeup().XWGTUP);
+ ASSERT(new_event.SCALUP == reader->hepeup().SCALUP);
+ ASSERT(new_event.NUP == reader->hepeup().NUP);
}
for(size_t i = 0; i < xsec_ref.size(); ++i){
double const ref = xsec_ref[reader->heprup().LPRUP[i]];
double const calc = reader->heprup().XSECUP[i];
std::cout << ref << '\t' << calc << '\n';
if(std::abs(calc-ref) > ep*calc){
std::cerr << "Cross sections deviate substantially";
return EXIT_FAILURE;
}
}
return EXIT_SUCCESS;
}
diff --git a/t/check_lhe_sherpa.cc b/t/check_lhe_sherpa.cc
index 1c92d24..31561d6 100644
--- a/t/check_lhe_sherpa.cc
+++ b/t/check_lhe_sherpa.cc
@@ -1,46 +1,50 @@
/**
* \authors The HEJ collaboration (see AUTHORS for details)
* \date 2019
* \copyright GPLv2 or later
*/
#include <iostream>
#include <string>
#include "HEJ/LesHouchesReader.hh"
+#include "hej_test.hh"
+
static constexpr double ep = 1e-5;
int main(int argn, char** argv) {
if(argn != 3){
std::cerr << "Usage: " << argv[0] << " lhe_file xs\n";
return EXIT_FAILURE;
}
auto reader{ HEJ::make_reader(argv[1])};
const double ref_xs = std::stod(argv[2]);
if(std::abs(reader->heprup().IDWTUP) != 3){
std::cerr << "Sherpa Events should always be neutral/unweighted\n";
return EXIT_FAILURE;
}
double xs { 0. };
size_t n_evts { 0 };
+ ASSERT(std::abs(reader->heprup().XSECUP.front()-ref_xs) < ep*ref_xs);
while(reader->read_event()){
++n_evts;
xs += reader->hepeup().weight();
+ ASSERT(reader->hepeup().weight() == reader->hepeup().XWGTUP);
}
if(std::abs(xs-ref_xs) > ep*xs){
std::cerr << "Cross sections deviate substantially!\n"
<<"Found "<< xs <<" but expected "<< ref_xs <<" -> "<< xs/ref_xs <<"\n";
return EXIT_FAILURE;
}
if(!reader->number_events() || *(reader->number_events()) != n_evts){
std::cerr << "Number of Event not correctly set for Sherpa LHE reader\n";
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
File Metadata
Details
Attached
Mime Type
text/x-diff
Expires
Tue, Nov 19, 8:42 PM (1 d, 2 h)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
3806130
Default Alt Text
(69 KB)
Attached To
rHEJ HEJ
Event Timeline
Log In to Comment