diff --git a/analyses/pluginATLAS/ATLAS_2013_I1243871.cc b/analyses/pluginATLAS/ATLAS_2013_I1243871.cc --- a/analyses/pluginATLAS/ATLAS_2013_I1243871.cc +++ b/analyses/pluginATLAS/ATLAS_2013_I1243871.cc @@ -1,234 +1,280 @@ // -*- C++ -*- #include "Rivet/Analysis.hh" +#include "Rivet/Tools/Logging.hh" #include "Rivet/Projections/FinalState.hh" #include "Rivet/Projections/IdentifiedFinalState.hh" #include "Rivet/Projections/VetoedFinalState.hh" -#include "Rivet/Projections/HeavyHadrons.hh" #include "Rivet/Projections/FastJets.hh" +#include "Rivet/Tools/ParticleIdUtils.hh" +#include "Rivet/Particle.hh" namespace Rivet { class ATLAS_2013_I1243871 : public Analysis { public: /// Constructor ATLAS_2013_I1243871() : Analysis("ATLAS_2013_I1243871") { } /// Book histograms and initialise projections before the run void init() { // Set up projections const FinalState fs(-4.5, 4.5); declare(fs, "ALL_FS"); /// Get electrons from truth record IdentifiedFinalState elec_fs(Cuts::abseta < 2.47 && Cuts::pT > 25*GeV); elec_fs.acceptIdPair(PID::ELECTRON); declare(elec_fs, "ELEC_FS"); /// Get muons which pass the initial kinematic cuts: IdentifiedFinalState muon_fs(Cuts::abseta < 2.5 && Cuts::pT > 20*GeV); muon_fs.acceptIdPair(PID::MUON); declare(muon_fs, "MUON_FS"); - /// Get b-hadrons for tagging - HeavyHadrons hh(Cuts::pT > 5*GeV); - declare(hh, "HF_HADRONS"); - // Final state used as input for jet-finding. // We include everything except the muons and neutrinos VetoedFinalState jet_input(fs); jet_input.vetoNeutrinos(); jet_input.addVetoPairId(PID::MUON); declare(jet_input, "JET_INPUT"); // Get the jets FastJets jets(jet_input, FastJets::ANTIKT, 0.4); declare(jets, "JETS"); // Book histograms for (size_t d = 0; d < 5; ++d) { _p_b_rho[d] = bookProfile1D(d+1, 1, 1); _p_l_rho[d] = bookProfile1D(d+1, 2, 1); _p_b_Psi[d] = bookProfile1D(d+1, 1, 2); _p_l_Psi[d] = bookProfile1D(d+1, 2, 2); } } /// Perform the per-event analysis void analyze(const Event& event) { const double weight = event.weight(); /// Get the various sets of final state particles - const Particles& elecs = apply(event, "ELEC_FS").particlesByPt(); - const Particles& muons = apply(event, "MUON_FS").particlesByPt(); + const ParticleVector& elecFS = apply(event, "ELEC_FS").particlesByPt(); + const ParticleVector& muonFS = apply(event, "MUON_FS").particlesByPt(); // Get all jets with pT > 7 GeV (ATLAS standard jet collection) - const Jets& allJets = apply(event, "JETS").jetsByPt(7*GeV); + /// @todo Why rewrite the jets collection as a vector of pointers? + const Jets& jets = apply(event, "JETS").jetsByPt(7*GeV); + vector allJets; + for (const Jet& j : jets) allJets.push_back(&j); // Keep any jets that pass the pt cut - const Jets pt_jets = filter_select(allJets, Cuts::pt > 25*GeV && Cuts::abseta < 2.5); + vector pt_jets; + foreach (const Jet* j, allJets) { + /// @todo Use direct kinematics access + const double pt = j->momentum().pT(); + const double eta = j->momentum().eta(); + if (pt > 25*GeV && fabs(eta) < 2.5) pt_jets.push_back(j); + } // Remove jets too close to an electron - const Jets good_jets = filter_discard(pt_jets, [&](const Jet& j){ - return any(elecs, deltaRLess(j, 0.2)); - }); + vector good_jets; + foreach (const Jet* j, pt_jets) { + bool isElectron = 0; + foreach (const Particle& e, elecFS) { + const double elec_jet_dR = deltaR(e.momentum(), j->momentum()); + if (elec_jet_dR < 0.2) { isElectron = true; break; } + } + if (!isElectron) good_jets.push_back(j); + } // Classify the event type - const size_t nElec = elecs.size(); - const size_t nMuon = muons.size(); + const size_t nElec = elecFS.size(); + const size_t nMuon = muonFS.size(); bool isSemilepton = false, isDilepton = false; if (nElec == 1 && nMuon == 0) { isSemilepton = true; } else if (nElec == 0 && nMuon == 1) { isSemilepton = true; } else if (nElec == 2 && nMuon == 0) { - if (charge(elecs[0]) != charge(elecs[1])) isDilepton = true; + if (charge(elecFS[0]) != charge(elecFS[1])) isDilepton = true; } else if (nElec == 1 && nMuon == 1) { - if (charge(elecs[0]) != charge(muons[0])) isDilepton = true; + if (charge(elecFS[0]) != charge(muonFS[0])) isDilepton = true; } else if (nElec == 0 && nMuon == 2) { - if (charge(muons[0]) != charge(muons[1])) isDilepton = true; + if (charge(muonFS[0]) != charge(muonFS[1])) isDilepton = true; } const bool isGoodEvent = (isSemilepton && good_jets.size() >= 4) || (isDilepton && good_jets.size() >= 2); if (!isGoodEvent) vetoEvent; - // Weakly-decaying b-hadrons for tagging - const Particles& b_hadrons = apply(event, "HF_HADRONS").bHadrons(); + // Select b-hadrons + /// @todo Use built-in identification on Particle, avoid HepMC + vector b_hadrons; + vector allParticles = HepMCUtils::particles(event.genEvent()); + for (size_t i = 0; i < allParticles.size(); i++) { + ConstGenParticlePtr p = allParticles.at(i); + if ( !(PID::isHadron( p->pdg_id() ) && PID::hasBottom( p->pdg_id() )) ) continue; + if (p->momentum().perp() < 5*GeV) continue; + b_hadrons.push_back(p); + } - // Select b-jets as those containing a b-hadron and not overlapped with any other jet - Jets b_jets = filter_select(good_jets, [&](const Jet& j){ - if (all(b_hadrons, deltaRGtr(j, 0.3))) return false; - for (const Jet& k : allJets) - if (inRange(deltaR(j,k), 0.01, 0.8)) return false; - return true; - }); + // Select b-jets as those containing a b-hadron + /// @todo Use built-in dR < 0.3 Jet tagging, avoid HepMC + vector b_jets; + foreach (const Jet* j, good_jets) { + bool isbJet = false; + foreach (ConstGenParticlePtr b, b_hadrons) { + /// @todo Use direct momentum accessor / delta functions + const FourMomentum hadron = b->momentum(); + const double hadron_jet_dR = deltaR(j->momentum(), hadron); + if (hadron_jet_dR < 0.3) { isbJet = true; break; } + } + // Check if it is overlapped to any other jet + bool isOverlapped = false; + foreach (const Jet* k, allJets) { + if (j == k) continue; + double dRjj = deltaR(j->momentum(), k->momentum()); + if (dRjj < 0.8) { isOverlapped = true; break; } + } + if (isbJet && !isOverlapped) b_jets.push_back(j); + } MSG_DEBUG(b_jets.size() << " b-jets selected"); // Select light-jets as the pair of non-b-jets with invariant mass closest to the W mass /// @todo Use built-in b-tagging (dR < 0.3 defn), avoid HepMC const double nominalW = 80.4*GeV; double deltaM = 500*GeV; - const Jet* light1 = nullptr; const Jet* light2 = nullptr; // NB: const Jets, not const pointers! - for (const Jet& i : good_jets) { - const bool isbJet1 = any(b_hadrons, deltaRLess(i, 0.3)); + const Jet* light1 = NULL; const Jet* light2 = NULL; // NB: const Jets, not const pointers! + foreach (const Jet* i, good_jets) { + bool isbJet1 = false; + for (ConstGenParticlePtr b : b_hadrons) { + /// @todo Use direct momentum accessor / delta functions + const FourMomentum hadron = b->momentum(); + const double hadron_jet_dR = deltaR(i->momentum(), hadron); + if (hadron_jet_dR < 0.3) { isbJet1 = true; break; } + } if (isbJet1) continue; - for (const Jet& j : good_jets) { - const bool isbJet2 = any(b_hadrons, deltaRLess(j, 0.3)); + foreach (const Jet* j, good_jets) { + bool isbJet2 = false; + foreach (ConstGenParticlePtr b, b_hadrons) { + FourMomentum hadron = b->momentum(); + double hadron_jet_dR = deltaR(j->momentum(), hadron); + if (hadron_jet_dR < 0.3) { isbJet2 = true; break; } + } if (isbJet2) continue; - const double invMass = (i.momentum()+j.momentum()).mass(); - const double dM = fabs(invMass - nominalW); - if (dM < deltaM) { - deltaM = dM; - light1 = &i; - light2 = &j; + double invMass = (i->momentum()+j->momentum()).mass(); + if (fabs(invMass-nominalW) < deltaM){ + deltaM = fabs(invMass - nominalW); + light1 = i; + light2 = j; } } } // Check that both jets are not overlapped, and populate the light jets list - Jets light_jets; - const bool hasGoodLight = light1 != nullptr && light2 != nullptr && light1 != light2; + vector light_jets; + const bool hasGoodLight = light1 != NULL && light2 != NULL && light1 != light2; if (hasGoodLight) { bool isOverlap1 = false, isOverlap2 = false; - for (const Jet& j : allJets) { - if (light1 == &j) continue; - const double dR1j = deltaR(*light1, j); + foreach (const Jet* j, allJets) { + if (light1 == j) continue; + const double dR1j = deltaR(light1->momentum(), j->momentum()); if (dR1j < 0.8) { isOverlap1 = true; break; } } - for (const Jet& j : allJets) { - if (light2 == &j) continue; - const double dR2j = deltaR(*light2, j); + foreach (const Jet* j, allJets) { + if (light2 == j) continue; + const double dR2j = deltaR(light2->momentum(), j->momentum()); if (dR2j < 0.8) { isOverlap2 = true; break; } } if (!isOverlap1 && !isOverlap2) { - light_jets = {*light1, *light2}; + light_jets.push_back(light1); + light_jets.push_back(light2); } } MSG_DEBUG(light_jets.size() << " light jets selected"); // Calculate the jet shapes + /// @todo Use C++11 vector/array initialization const double binWidth = 0.04; // -> 10 bins from 0.0-0.4 - const vector ptEdges = {{ 30, 40, 50, 70, 100, 150 }}; + vector ptEdges; ptEdges += {{ 30, 40, 50, 70, 100, 150 }}; // b-jet shapes MSG_DEBUG("Filling b-jet shapes"); - for (const Jet& bJet : b_jets) { + foreach (const Jet* bJet, b_jets) { // Work out jet pT bin and skip this jet if out of range - const double jetPt = bJet.pT(); + const double jetPt = bJet->momentum().pT(); MSG_DEBUG("Jet pT = " << jetPt/GeV << " GeV"); if (!inRange(jetPt/GeV, 30., 150.)) continue; /// @todo Use YODA bin index lookup tools size_t ipt; for (ipt = 0; ipt < 5; ++ipt) if (inRange(jetPt/GeV, ptEdges[ipt], ptEdges[ipt+1])) break; MSG_DEBUG("Jet pT index = " << ipt); // Calculate jet shape vector rings(10, 0); - for (const Particle& p : bJet.particles()) { - const double dR = deltaR(bJet, p); + foreach (const Particle& p, bJet->particles()) { + const double dR = deltaR(bJet->momentum(), p.momentum()); const size_t idR = (size_t) floor(dR/binWidth); for (size_t i = idR; i < 10; ++i) rings[i] += p.pT(); } // Fill each dR bin of the histos for this jet pT for (int iBin = 0; iBin < 10; ++iBin) { const double rcenter = 0.02 + iBin*binWidth; const double rhoval = (iBin != 0 ? (rings[iBin]-rings[iBin-1]) : rings[iBin]) / binWidth / rings[9]; const double psival = rings[iBin] / rings[9]; MSG_DEBUG(rcenter << ", " << rhoval << ", " << psival); _p_b_rho[ipt]->fill(rcenter, rhoval, weight); _p_b_Psi[ipt]->fill(rcenter, psival, weight); } } // Light jet shapes MSG_DEBUG("Filling light jet shapes"); - for (const Jet& lJet : light_jets) { + foreach (const Jet* lJet, light_jets) { // Work out jet pT bin and skip this jet if out of range - const double jetPt = lJet.pT(); + const double jetPt = lJet->momentum().pT(); MSG_DEBUG("Jet pT = " << jetPt/GeV << " GeV"); if (!inRange(jetPt/GeV, 30., 150.)) continue; /// @todo Use YODA bin index lookup tools size_t ipt; for (ipt = 0; ipt < 5; ++ipt) if (inRange(jetPt/GeV, ptEdges[ipt], ptEdges[ipt+1])) break; MSG_DEBUG("Jet pT index = " << ipt); // Calculate jet shape vector rings(10, 0); - for (const Particle& p : lJet.particles()) { - const double dR = deltaR(lJet, p); + foreach (const Particle& p, lJet->particles()) { + const double dR = deltaR(lJet->momentum(), p.momentum()); const size_t idR = (size_t) floor(dR/binWidth); for (size_t i = idR; i < 10; ++i) rings[i] += p.pT(); } // Fill each dR bin of the histos for this jet pT for (int iBin = 0; iBin < 10; ++iBin) { const double rcenter = 0.02 + iBin*binWidth; const double rhoval = (iBin != 0 ? (rings[iBin]-rings[iBin-1]) : rings[iBin]) / binWidth / rings[9]; const double psival = rings[iBin] / rings[9]; _p_l_rho[ipt]->fill(rcenter, rhoval, weight); _p_l_Psi[ipt]->fill(rcenter, psival, weight); } } } private: Profile1DPtr _p_b_rho[5]; Profile1DPtr _p_l_rho[5]; Profile1DPtr _p_b_Psi[5]; Profile1DPtr _p_l_Psi[5]; }; // The hook for the plugin system DECLARE_RIVET_PLUGIN(ATLAS_2013_I1243871); } diff --git a/configure.ac b/configure.ac --- a/configure.ac +++ b/configure.ac @@ -1,388 +1,400 @@ ## Process this file with autoconf to produce a configure script. AC_PREREQ(2.59) AC_INIT([Rivet],[2.7.0],[rivet@projects.hepforge.org],[Rivet]) ## Check and block installation into the src/build dir if test "$prefix" = "$PWD"; then AC_MSG_ERROR([Installation into the build directory is not supported: use a different --prefix argument]) fi ## Force default prefix to have a path value rather than NONE if test "$prefix" = "NONE"; then prefix=/usr/local fi AC_CONFIG_SRCDIR([src/Core/Analysis.cc]) AC_CONFIG_HEADERS([include/Rivet/Config/DummyConfig.hh include/Rivet/Config/RivetConfig.hh include/Rivet/Config/BuildOptions.hh]) AM_INIT_AUTOMAKE([dist-bzip2 -Wall 1.10]) m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])]) m4_ifdef([AM_PROG_AR], [AM_PROG_AR]) AC_CONFIG_MACRO_DIR([m4]) AC_SUBST(LT_OBJDIR) ## Package-specific #defines AC_DEFINE_UNQUOTED(RIVET_VERSION, "$PACKAGE_VERSION", "Rivet version string") AC_DEFINE_UNQUOTED(RIVET_NAME, "$PACKAGE_NAME", "Rivet name string") AC_DEFINE_UNQUOTED(RIVET_STRING, "$PACKAGE_STRING", "Rivet name and version string") AC_DEFINE_UNQUOTED(RIVET_TARNAME, "$PACKAGE_TARNAME", "Rivet short name string") AC_DEFINE_UNQUOTED(RIVET_BUGREPORT, "$PACKAGE_BUGREPORT", "Rivet contact email address") ## OS X AC_CEDAR_OSX ## Work out the LCG platform tag AC_LCG_TAG ## Set default compiler flags if test "x$CXXFLAGS" == "x"; then CXXFLAGS="-O2"; fi ## Compiler setup AC_LANG(C++) AC_PROG_CXX AX_CXX_COMPILE_STDCXX([11], [noext], [mandatory]) ## Store and propagate the compiler identity and flags RIVETCXX="$CXX" AC_SUBST(RIVETCXX) RIVETCXXFLAGS="$CXXFLAGS" AC_SUBST(RIVETCXXFLAGS) ## Checks for programs. AC_PROG_INSTALL AC_PROG_LN_S AC_DISABLE_STATIC AC_LIBTOOL_DLOPEN AC_PROG_LIBTOOL AC_FUNC_STRERROR_R ## YODA histogramming library AC_CEDAR_LIBRARYANDHEADERS([YODA], , , [AC_MSG_ERROR([YODA is required])]) YODABINPATH=$YODALIBPATH/../bin AC_SUBST(YODABINPATH) AC_PATH_PROG(YODACONFIG, yoda-config, [], [$YODALIBPATH/../bin:$PATH]) YODA_PYTHONPATH="" if test -f "$YODACONFIG"; then AC_MSG_CHECKING([YODA version using yoda-config]) YODA_VERSION=`$YODACONFIG --version` AC_MSG_RESULT([$YODA_VERSION]) YODA_VERSION1=[`echo $YODA_VERSION | cut -d. -f1 | sed -e 's/\([0-9]*\).*/\1/g'`] YODA_VERSION2=[`echo $YODA_VERSION | cut -d. -f2 | sed -e 's/\([0-9]*\).*/\1/g'`] YODA_VERSION3=[`echo $YODA_VERSION | cut -d. -f3 | sed -e 's/\([0-9]*\).*/\1/g'`] let YODA_VERSION_INT=YODA_VERSION1*10000+YODA_VERSION2*100+YODA_VERSION3 if test $YODA_VERSION_INT -lt 10500; then AC_MSG_ERROR([YODA version isn't sufficient: at least version 1.5.0 required]) fi AC_MSG_CHECKING([YODA Python path using yoda-config]) YODA_PYTHONPATH=`$YODACONFIG --pythonpath` AC_MSG_RESULT([$YODA_PYTHONPATH]) fi AC_SUBST(YODA_PYTHONPATH) ## HepMC event record library -#AC_CEDAR_LIBRARYANDHEADERS([HepMC], , , [AC_MSG_ERROR([HepMC is required])]) -AC_CEDAR_LIBRARYANDHEADERS([HepMC3], , [use_hepmc3=yes], [use_hepmc3=no]) -if test x$use_hepmc3 = xno ; then +if test "${with_hepmc+set}" = set; then + if test "${with_hepmc3+set}" = set; then + AC_MSG_ERROR([HepMC2 *OR* HepMC3 is required. Both were specified!]) + fi +fi + +if test "${with_hepmc+set}" = set; then AC_CEDAR_LIBRARYANDHEADERS([HepMC], , [use_hepmc2=yes], [use_hepmc2=no]) else AM_CONDITIONAL(WITH_HEPMC,false) AM_CONDITIONAL(WITH_HEPMCINC,false) AM_CONDITIONAL(WITH_HEPMCLIB,false) AM_CONDITIONAL(WITHOUT_HEPMC,true) AM_CONDITIONAL(WITHOUT_HEPMCINC,true) AM_CONDITIONAL(WITHOUT_HEPMCLIB,true) fi + +if test "${with_hepmc3+set}" = set; then + AC_CEDAR_LIBRARYANDHEADERS([HepMC3], , [use_hepmc3=yes], [use_hepmc3=no]) +else + AM_CONDITIONAL(WITH_HEPMC3,false) + AM_CONDITIONAL(WITH_HEPMC3INC,false) + AM_CONDITIONAL(WITH_HEPMC3LIB,false) + AM_CONDITIONAL(WITHOUT_HEPMC3,true) + AM_CONDITIONAL(WITHOUT_HEPMC3INC,true) + AM_CONDITIONAL(WITHOUT_HEPMC3LIB,true) +fi + if test x$use_hepmc2 = xno && test x$use_hepmc3 = xno ; then AC_MSG_ERROR([HepMC2 or HepMC3 is required]) fi -if test x$use_hepmc2 = xyes && test x$use_hepmc3 = xyes ; then - AC_MSG_ERROR([HepMC2 *OR* HepMC3 is required. Both were specified!]) -fi - if test x$use_hepmc2 = xyes; then oldCPPFLAGS=$CPPFLAGS CPPFLAGS="$CPPFLAGS -I$HEPMCINCPATH" if test -e "$HEPMCINCPATH/HepMC/HepMCDefs.h"; then AC_LANG_CONFTEST([AC_LANG_SOURCE([#include #include "HepMC/HepMCDefs.h" int main() { std::cout << HEPMC_VERSION << std::endl; return 0; }])]) else AC_LANG_CONFTEST([AC_LANG_SOURCE([#include #include "HepMC/defs.h" int main() { std::cout << VERSION << std::endl; return 0; }])]) fi if test -f conftest.cc; then $CXX $CPPFLAGS conftest.cc -o conftest 2>&1 1>&5 elif test -f conftest.C; then $CXX $CPPFLAGS conftest.C -o conftest 2>&1 1>&5 else $CXX $CPPFLAGS conftest.cpp -o conftest 2>&1 1>&5 fi hepmc_version=`./conftest` if test x$hepmc_version != x; then let hepmc_major=`echo "$hepmc_version" | cut -d. -f1` let hepmc_minor=`echo "$hepmc_version" | cut -d. -f2` fi rm -f conftest conftest.cpp conftest.cc conftest.C HEPMC_VERSION=$hepmc_major$hepmc_minor AC_MSG_NOTICE([HepMC version is $hepmc_version -> $HEPMC_VERSION]) AC_SUBST(HEPMC_VERSION) CPPFLAGS=$oldCPPFLAGS else oldCPPFLAGS=$CPPFLAGS CPPFLAGS="$CPPFLAGS -I$HEPMC3INCPATH" AC_LANG_CONFTEST([AC_LANG_SOURCE([#include #include "HepMC3/Version.h" int main() { std::cout << HepMC3::version() << std::endl; return 0; }])]) if test -f conftest.cc; then $CXX $CPPFLAGS conftest.cc -o conftest 2>&1 1>&5 elif test -f conftest.C; then $CXX $CPPFLAGS conftest.C -o conftest 2>&1 1>&5 else $CXX $CPPFLAGS conftest.cpp -o conftest 2>&1 1>&5 fi hepmc_version=`./conftest` if test x$hepmc_version != x; then let hepmc_major=`echo "$hepmc_version" | cut -d. -f1` let hepmc_minor=`echo "$hepmc_version" | cut -d. -f2` fi rm -f conftest conftest.cpp conftest.cc conftest.C HEPMC_VERSION=$hepmc_major$hepmc_minor AC_MSG_NOTICE([HepMC version is $hepmc_version -> $HEPMC_VERSION]) AC_SUBST(HEPMC_VERSION) CPPFLAGS=$oldCPPFLAGS fi AM_CONDITIONAL([ENABLE_HEPMC_3], [test x$hepmc_major = x3]) AM_COND_IF([ENABLE_HEPMC_3],[CPPFLAGS="$CPPFLAGS -DENABLE_HEPMC_3=true"]) ## FastJet clustering library AC_CEDAR_LIBRARYANDHEADERS([fastjet], , , [AC_MSG_ERROR([FastJet is required])]) AC_PATH_PROG(FJCONFIG, fastjet-config, [], $FASTJETPATH/bin:$PATH) if test -f "$FJCONFIG"; then AC_MSG_CHECKING([FastJet version using fastjet-config]) fjversion=`$FJCONFIG --version` AC_MSG_RESULT([$fjversion]) fjmajor=$(echo $fjversion | cut -f1 -d.) fjminor=$(echo $fjversion | cut -f2 -d.) fjmicro=$(echo $fjversion | cut -f3 -d.) if test "$fjmajor" -lt 3; then AC_MSG_ERROR([FastJet version 3.0.0 or later is required]) fi FASTJETCONFIGLIBADD="$($FJCONFIG --plugins --shared --libs)" else FASTJETCONFIGLIBADD="-L$FASTJETLIBPATH -l$FASTJETLIBNAME" FASTJETCONFIGLIBADD="$FASTJETCONFIGLIBADD -lSISConePlugin -lsiscone -lsiscone_spherical" FASTJETCONFIGLIBADD="$FASTJETCONFIGLIBADD -lCDFConesPlugin -lD0RunIIConePlugin -lNestedDefsPlugin" FASTJETCONFIGLIBADD="$FASTJETCONFIGLIBADD -lTrackJetPlugin -lATLASConePlugin -lCMSIterativeConePlugin" FASTJETCONFIGLIBADD="$FASTJETCONFIGLIBADD -lEECambridgePlugin -lJadePlugin" fi; AC_MSG_NOTICE([FastJet LIBADD = $FASTJETCONFIGLIBADD]) AC_SUBST(FASTJETCONFIGLIBADD) # Check for FastJet headers that require the --enable-all(cxx)plugins option FASTJET_ERRMSG="Required FastJet plugin headers were not found: did you build FastJet with the --enable-allcxxplugins option?" oldCPPFLAGS=$CPPFLAGS CPPFLAGS="$CPPFLAGS -I$FASTJETINCPATH" AC_CHECK_HEADER([fastjet/D0RunIIConePlugin.hh], [], [AC_MSG_ERROR([$FASTJET_ERRMSG])]) AC_CHECK_HEADER([fastjet/TrackJetPlugin.hh], [], [AC_MSG_ERROR([$FASTJET_ERRMSG])]) CPPFLAGS=$oldCPPFLAGS # ## GNU Scientific Library # AC_SEARCH_GSL # AC_CEDAR_HEADERS([gsl], , , [AC_MSG_ERROR([GSL (GNU Scientific Library) is required])]) # oldCPPFLAGS=$CPPFLAGS # CPPFLAGS="$CPPFLAGS -I$GSLINCPATH" # AC_CHECK_HEADER([gsl/gsl_vector.h], [], [AC_MSG_ERROR([GSL vectors not found.])]) # CPPFLAGS=$oldCPPFLAGS ## Disable build/install of standard analyses AC_ARG_ENABLE([analyses], [AC_HELP_STRING(--disable-analyses, [don't try to build or install standard analyses])], [], [enable_analyses=yes]) if test x$enable_analyses != xyes; then AC_MSG_WARN([Not building standard Rivet analyses, by request]) fi AM_CONDITIONAL(ENABLE_ANALYSES, [test x$enable_analyses = xyes]) ## Build LaTeX docs if possible... AC_PATH_PROG(PDFLATEX, pdflatex) AM_CONDITIONAL(WITH_PDFLATEX, [test x$PDFLATEX != x]) ## ... unless told otherwise! AC_ARG_ENABLE([pdfmanual], [AC_HELP_STRING(--enable-pdfmanual, [build and install the manual])], [], [enable_pdfmanual=no]) if test x$enable_pdfmanual = xyes; then AC_MSG_WARN([Building Rivet manual, by request]) fi AM_CONDITIONAL(ENABLE_PDFMANUAL, [test x$enable_pdfmanual = xyes]) ## Build Doxygen documentation if possible AC_ARG_ENABLE([doxygen], [AC_HELP_STRING(--disable-doxygen, [don't try to make Doxygen documentation])], [], [enable_doxygen=yes]) if test x$enable_doxygen = xyes; then AC_PATH_PROG(DOXYGEN, doxygen) fi AM_CONDITIONAL(WITH_DOXYGEN, [test x$DOXYGEN != x]) ## Build asciidoc docs if possible AC_PATH_PROG(ASCIIDOC, asciidoc) AM_CONDITIONAL(WITH_ASCIIDOC, [test x$ASCIIDOC != x]) ## Python extension AC_ARG_ENABLE(pyext, [AC_HELP_STRING(--disable-pyext, [don't build Python module (default=build)])], [], [enable_pyext=yes]) ## Basic Python checks if test x$enable_pyext == xyes; then AX_PYTHON_DEVEL([>= '2.7.3']) AC_SUBST(PYTHON_VERSION) RIVET_PYTHONPATH=`$PYTHON -c "from __future__ import print_function; import distutils.sysconfig; print(distutils.sysconfig.get_python_lib(prefix='$prefix', plat_specific=True));"` AC_SUBST(RIVET_PYTHONPATH) if test -z "$PYTHON"; then AC_MSG_ERROR([Can't build Python extension since python can't be found]) enable_pyext=no fi if test -z "$PYTHON_CPPFLAGS"; then AC_MSG_ERROR([Can't build Python extension since Python.h header file cannot be found]) enable_pyext=no fi fi AM_CONDITIONAL(ENABLE_PYEXT, [test x$enable_pyext == xyes]) dnl dnl setup.py puts its build artifacts into a labelled path dnl this helps the test scripts to find them locally instead of dnl having to install first dnl RIVET_SETUP_PY_PATH=$(${PYTHON} -c 'from __future__ import print_function; import distutils.util as u, sys; vi=sys.version_info; print("lib.%s-%s.%s" % (u.get_platform(),vi.major, vi.minor))') AC_SUBST(RIVET_SETUP_PY_PATH) ## Cython checks if test x$enable_pyext == xyes; then AM_CHECK_CYTHON([0.24.0], [:], [:]) if test x$CYTHON_FOUND = xyes; then AC_MSG_NOTICE([Cython >= 0.24 found: Python extension source can be rebuilt (for developers)]) fi AC_CHECK_FILE([pyext/rivet/core.cpp], [], [if test "x$CYTHON_FOUND" != "xyes"; then AC_MSG_ERROR([Cython is required for --enable-pyext, no pre-built core.cpp was found.]) fi]) cython_compiler=$CXX ## Set extra Python extension build flags (to cope with Cython output code oddities) PYEXT_CXXFLAGS="$CXXFLAGS" AC_CEDAR_CHECKCXXFLAG([-Wno-unused-but-set-variable], [PYEXT_CXXFLAGS="$PYEXT_CXXFLAGS -Wno-unused-but-set-variable"]) AC_CEDAR_CHECKCXXFLAG([-Wno-sign-compare], [PYEXT_CXXFLAGS="$PYEXT_CXXFLAGS -Wno-sign-compare"]) AC_SUBST(PYEXT_CXXFLAGS) AC_MSG_NOTICE([All Python build checks successful: 'rivet' Python extension will be built]) fi AM_CONDITIONAL(WITH_CYTHON, [test x$CYTHON_FOUND = xyes]) ## Set default build flags AM_CPPFLAGS="-I\$(top_srcdir)/include -I\$(top_builddir)/include" #AM_CPPFLAGS="$AM_CPPFLAGS -I\$(top_srcdir)/include/eigen3" #AM_CPPFLAGS="$AM_CPPFLAGS \$(GSL_CPPFLAGS)" dnl AM_CPPFLAGS="$AM_CPPFLAGS \$(BOOST_CPPFLAGS)" AM_CPPFLAGS="$AM_CPPFLAGS -I\$(YODAINCPATH)" if test x$use_hepmc2 = xyes ; then AM_CPPFLAGS="$AM_CPPFLAGS -I\$(HEPMCINCPATH)" else AM_CPPFLAGS="$AM_CPPFLAGS -I\$(HEPMC3INCPATH)" fi AM_CPPFLAGS="$AM_CPPFLAGS -I\$(FASTJETINCPATH)" AC_CEDAR_CHECKCXXFLAG([-pedantic], [AM_CXXFLAGS="$AM_CXXFLAGS -pedantic"]) AC_CEDAR_CHECKCXXFLAG([-Wall], [AM_CXXFLAGS="$AM_CXXFLAGS -Wall"]) AC_CEDAR_CHECKCXXFLAG([-Wno-long-long], [AM_CXXFLAGS="$AM_CXXFLAGS -Wno-long-long"]) AC_CEDAR_CHECKCXXFLAG([-Wno-format], [AM_CXXFLAGS="$AM_CXXFLAGS -Wno-format"]) dnl AC_CEDAR_CHECKCXXFLAG([-Wno-unused-variable], [AM_CXXFLAGS="$AM_CXXFLAGS -Wno-unused-variable"]) AC_CEDAR_CHECKCXXFLAG([-Werror=uninitialized], [AM_CXXFLAGS="$AM_CXXFLAGS -Werror=uninitialized"]) AC_CEDAR_CHECKCXXFLAG([-Werror=delete-non-virtual-dtor], [AM_CXXFLAGS="$AM_CXXFLAGS -Werror=delete-non-virtual-dtor"]) ## Add OpenMP-enabling flags if possible AX_OPENMP([AM_CXXFLAGS="$AM_CXXFLAGS $OPENMP_CXXFLAGS"]) ## Optional zlib support for gzip-compressed data streams/files AX_CHECK_ZLIB ## Debug flag (default=-DNDEBUG, enabled=-g) AC_ARG_ENABLE([debug], [AC_HELP_STRING(--enable-debug, [build with debugging symbols @<:@default=no@:>@])], [], [enable_debug=no]) if test x$enable_debug == xyes; then AM_CXXFLAGS="$AM_CXXFLAGS -g" fi ## Extra warnings flag (default=none) AC_ARG_ENABLE([extra-warnings], [AC_HELP_STRING(--enable-extra-warnings, [build with extra compiler warnings (recommended for developers) @<:@default=no@:>@])], [], [enable_extra_warnings=no]) if test x$enable_extra_warnings == xyes; then AC_CEDAR_CHECKCXXFLAG([-Wextra], [AM_CXXFLAGS="$AM_CXXFLAGS -Wextra "]) fi AC_SUBST(AM_CPPFLAGS) AC_SUBST(AM_CXXFLAGS) AC_EMPTY_SUBST AC_CONFIG_FILES(Makefile Doxyfile) AC_CONFIG_FILES(include/Makefile include/Rivet/Makefile) AC_CONFIG_FILES(src/Makefile) AC_CONFIG_FILES(src/Core/Makefile src/Core/yamlcpp/Makefile) AC_CONFIG_FILES(src/Tools/Makefile) AC_CONFIG_FILES(src/Tools/fjcontrib/Makefile) AC_CONFIG_FILES(src/Tools/fjcontrib/EnergyCorrelator/Makefile) AC_CONFIG_FILES(src/Tools/fjcontrib/Nsubjettiness/Makefile) AC_CONFIG_FILES(src/Tools/fjcontrib/RecursiveTools/Makefile) AC_CONFIG_FILES(src/Projections/Makefile) AC_CONFIG_FILES(src/AnalysisTools/Makefile) AC_CONFIG_FILES(analyses/Makefile) AC_CONFIG_FILES(test/Makefile) AC_CONFIG_FILES(pyext/Makefile pyext/rivet/Makefile pyext/setup.py) AC_CONFIG_FILES(data/Makefile data/texmf/Makefile) AC_CONFIG_FILES(doc/Makefile) AC_CONFIG_FILES(doc/rivetversion.sty) AC_CONFIG_FILES(bin/Makefile bin/rivet-config bin/rivet-buildplugin) AC_CONFIG_FILES(rivetenv.sh rivetenv.csh rivet.pc) AC_OUTPUT if test x$enable_pyrivet == xyes; then cat <