diff --git a/doc/developer_manual/developer_manual.tex b/doc/developer_manual/developer_manual.tex index eff5da6..12857ca 100644 --- a/doc/developer_manual/developer_manual.tex +++ b/doc/developer_manual/developer_manual.tex @@ -1,2034 +1,2034 @@ \documentclass[a4paper,11pt]{article} \usepackage{fourier} \usepackage[T1]{fontenc} \usepackage{microtype} \usepackage{geometry} \usepackage{enumitem} \setlist[description]{leftmargin=\parindent,labelindent=\parindent} \usepackage{amsmath} \usepackage{amssymb} \usepackage[utf8x]{inputenc} \usepackage{graphicx} \usepackage{xcolor} \usepackage{todonotes} \usepackage{listings} \usepackage{xspace} \usepackage{tikz} \usepackage{slashed} \usepackage{subcaption} \usetikzlibrary{arrows.meta} \usetikzlibrary{shapes} \usetikzlibrary{calc} \usepackage[colorlinks,linkcolor={blue!50!black}]{hyperref} \graphicspath{{build/figures/}{figures/}} \usepackage[left]{showlabels} \renewcommand{\showlabelfont}{\footnotesize\color{darkgreen}} \emergencystretch \hsize \setlength{\marginparwidth}{2cm} \newcommand{\HEJ}{{\tt HEJ}\xspace} \newcommand{\NLO}{{\tt NLO}\xspace} \newcommand{\LO}{{\tt LO}\xspace} \newcommand{\HIGHEJ}{\emph{High Energy Jets}\xspace} \newcommand{\cmake}{\href{https://cmake.org/}{cmake}\xspace} \newcommand{\html}{\href{https://www.w3.org/html/}{html}\xspace} \newcommand{\YAML}{\href{http://yaml.org/}{YAML}\xspace} \newcommand{\QCDloop}{\href{https://github.com/scarrazza/qcdloop}{QCDloop}\xspace} \newcommand{\FORM}{{\tt FORM}\xspace} \newcommand\matel[4][]{\mathinner{\langle#2\vert#3\vert#4\rangle}_{#1}} \newcommand{\as}{\alpha_s} \DeclareRobustCommand{\mathgraphics}[1]{\vcenter{\hbox{\includegraphics{#1}}}} \def\spa#1.#2{\left\langle#1\,#2\right\rangle} \def\spb#1.#2{\left[#1\,#2\right]} \def\spaa#1.#2.#3{\langle\mskip-1mu{#1} | #2 | {#3}\mskip-1mu\rangle} \def\spbb#1.#2.#3{[\mskip-1mu{#1} | #2 | {#3}\mskip-1mu]} \def\spab#1.#2.#3{\langle\mskip-1mu{#1} | #2 | {#3}\mskip-1mu\rangle} \def\spba#1.#2.#3{\langle\mskip-1mu{#1}^+ | #2 | {#3}^+\mskip-1mu\rangle} \def\spav#1.#2.#3{\|\mskip-1mu{#1} | #2 | {#3}\mskip-1mu\|^2} \def\jc#1.#2.#3{j^{#1}_{#2#3}} % expectation value \newcommand{\ev}[1]{\text{E}\left[#1\right]} \definecolor{darkgreen}{rgb}{0,0.4,0} \lstset{ % backgroundcolor=\color{lightgray}, % choose the background color; you must add \usepackage{color} or \usepackage{xcolor} basicstyle=\footnotesize\usefont{T1}{DejaVuSansMono-TLF}{m}{n}, % the size of the fonts that are used for the code breakatwhitespace=false, % sets if automatic breaks should only happen at whitespace breaklines=false, % sets automatic line breaking captionpos=t, % sets the caption-position to bottom commentstyle=\color{red}, % comment style deletekeywords={...}, % if you want to delete keywords from the given language escapeinside={\%*}{*)}, % if you want to add LaTeX within your code extendedchars=true, % lets you use non-ASCII characters; for 8-bits encodings only, does not work with UTF-8 frame=false, % adds a frame around the code keepspaces=true, % keeps spaces in text, useful for keeping indentation of code (possibly needs columns=flexible) keywordstyle=\color{blue}, % keyword style otherkeywords={}, % if you want to add more keywords to the set numbers=none, % where to put the line-numbers; possible values are (none, left, right) numbersep=5pt, % how far the line-numbers are from the code rulecolor=\color{black}, % if not set, the frame-color may be changed on line-breaks within not-black text (e.g. comments (green here)) showspaces=false, % show spaces everywhere adding particular underscores; it overrides 'showstringspaces' showstringspaces=false, % underline spaces within strings only showtabs=false, % show tabs within strings adding particular underscores stepnumber=2, % the step between two line-numbers. If it's 1, each line will be numbered stringstyle=\color{gray}, % string literal style tabsize=2, % sets default tabsize to 2 spaces title=\lstname, emph={}, emphstyle=\color{darkgreen} } \begin{document} \tikzstyle{mynode}=[rectangle split,rectangle split parts=2, draw,rectangle split part fill={lightgray, none}] \title{\HEJ 2 developer manual} \author{} \maketitle \tableofcontents \newpage \section{Overview} \label{sec:overview} \HEJ 2 is a C++ program and library implementing an algorithm to apply \HIGHEJ resummation~\cite{Andersen:2008ue,Andersen:2008gc} to pre-generated fixed-order events. This document is intended to give an overview over the concepts and structure of this implementation. \subsection{Project structure} \label{sec:project} \HEJ 2 is developed under the \href{https://git-scm.com/}{git} version control system. The main repository is on the IPPP \href{https://gitlab.com/}{gitlab} server under \url{https://gitlab.dur.scotgrid.ac.uk/hej/hej}. To get a local copy, get an account on the gitlab server and use \begin{lstlisting}[language=sh,caption={}] git clone git@gitlab.dur.scotgrid.ac.uk:hej/hej.git \end{lstlisting} This should create a directory \texttt{hej} with the following contents: \begin{description} \item[README.md:] Basic information concerning \HEJ. \item[doc:] Contains additional documentation, see section~\ref{sec:doc}. \item[include:] Contains the C++ header files. \item[src:] Contains the C++ source files. \item[examples:] Example analyses and scale setting code. \item[t:] Contains the source code for the automated tests. \item[CMakeLists.txt:] Configuration file for the \cmake build system. See section~\ref{sec:cmake}. \item[cmake:] Auxiliary files for \cmake. This includes modules for finding installed software in \texttt{cmake/Modules} and templates for code generation during the build process in \texttt{cmake/Templates}. \item[config.yml:] Sample configuration file for running \HEJ 2. \item[current\_generator:] Contains the code for the current generator, see section~\ref{sec:cur_gen}. \item[FixedOrderGen:] Contains the code for the fixed-order generator, see section~\ref{sec:HEJFOG}. \item[COPYING:] License file. \item[AUTHORS:] List of \HEJ authors. \item[Changes-API.md:] Change history for the \HEJ API (application programming interface). \item[Changes.md:] History of changes relevant for \HEJ users. \end{description} In the following all paths are given relative to the \texttt{hej} directory. \subsection{Documentation} \label{sec:doc} The \texttt{doc} directory contains user documentation in \texttt{doc/sphinx} and the configuration to generate source code documentation in \texttt{doc/doxygen}. The user documentation explains how to install and run \HEJ 2. The format is \href{http://docutils.sourceforge.net/rst.html}{reStructuredText}, which is mostly human-readable. Other formats, like \html, can be generated with the \href{http://www.sphinx-doc.org/en/master/}{sphinx} generator with \begin{lstlisting}[language=sh,caption={}] make sphinx \end{lstlisting} To document the source code we use \href{https://www.stack.nl/~dimitri/doxygen/}{doxygen}. To generate \html documentation, use the command \begin{lstlisting}[language=sh,caption={}] make doxygen \end{lstlisting} in your \texttt{build/} directory. \subsection{Build system} \label{sec:cmake} For the most part, \HEJ 2 is a library providing classes and functions that can be used to add resummation to fixed-order events. In addition, there is a relatively small executable program leveraging this library to read in events from an input file and produce resummation events. Both the library and the program are built and installed with the help of \cmake. Debug information can be turned on by using \begin{lstlisting}[language=sh,caption={}] cmake base/directory -DCMAKE_BUILD_TYPE=Debug make install \end{lstlisting} This facilitates the use of debuggers like \href{https://www.gnu.org/software/gdb/}{gdb}. The main \cmake configuration file is \texttt{CMakeLists.txt}. It defines the compiler flags, software prerequisites, header and source files used to build \HEJ 2, and the automated tests. \texttt{cmake/Modules} contains module files that help with the detection of the software prerequisites and \texttt{cmake/Templates} template files for the automatic generation of header and source files. For example, this allows to only keep the version information in one central location (\texttt{CMakeLists.txt}) and automatically generate a header file from the template \texttt{Version.hh.in} to propagate this to the C++ code. \subsection{General coding guidelines} \label{sec:notes} The goal is to make the \HEJ 2 code well-structured and readable. Here are a number of guidelines to this end. \begin{description} \item[Observe the boy scout rule.] Always leave the code cleaner than how you found it. Ugly hacks can be useful for testing, but shouldn't make their way into the main branch. \item[Ask if something is unclear.] Often there is a good reason why code is written the way it is. Sometimes that reason is only obvious to the original author (use \lstinline!git blame! to find them), in which case they should be poked to add a comment. Sometimes there is no good reason, but nobody has had the time to come up with something better, yet. In some places the code might just be bad. \item[Don't break tests.] There are a number of tests in the \texttt{t} directory, which can be run with \lstinline!make test!. Ideally, all tests should run successfully in each git revision. If your latest commit broke a test and you haven't pushed to the central repository yet, you can fix it with \lstinline!git commit --amend!. If an earlier local commit broke a test, you can use \lstinline!git rebase -i! if you feel confident. Additionally each \lstinline!git push! is also automatically tested via the GitLab CI (see appendix~\ref{sec:CI}). \item[Test your new code.] When you add some new functionality, also add an automated test. This can be useful even if you don't know the ``correct'' result because it prevents the code from changing its behaviour silently in the future. \href{http://www.valgrind.org/}{valgrind} is a very useful tool to detect potential memory leaks. The code coverage of all tests can be generated with \href{https://gcovr.com/en/stable/}{gcovr}. Therefore add the flag \lstinline!-DTEST_COVERAGE=True! to cmake and run \lstinline!make ctest_coverage!. \item[Stick to the coding style.] It is somewhat easier to read code that has a uniform coding and indentation style. We don't have a strict style, but it helps if your code looks similar to what is already there. \end{description} \section{Program flow} \label{sec:flow} A run of the \HEJ 2 program has three stages: initialisation, event processing, and cleanup. The following sections outline these stages and their relations to the various classes and functions in the code. Unless denoted otherwise, all classes and functions are part of the \lstinline!HEJ! namespace. The code for the \HEJ 2 program is in \texttt{src/bin/HEJ.cc}, all other code comprises the \HEJ 2 library. Classes and free functions are usually implemented in header and source files with a corresponding name, i.e. the code for \lstinline!MyClass! can usually be found in \texttt{include/HEJ/MyClass.hh} and \texttt{src/MyClass.cc}. \subsection{Initialisation} \label{sec:init} The first step is to load and parse the \YAML configuration file. The entry point for this is the \lstinline!load_config! function and the related code can be found in \texttt{include/HEJ/YAMLreader.hh}, \texttt{include/HEJ/config.hh} and the corresponding \texttt{.cc} files in the \texttt{src} directory. The implementation is based on the \href{https://github.com/jbeder/yaml-cpp}{yaml-cpp} library. The \lstinline!load_config! function returns a \lstinline!Config! object containing all settings. To detect potential mistakes as early as possible, we throw an exception whenever one of the following errors occurs: \begin{itemize} \item There is an unknown option in the \YAML file. \item A setting is invalid, for example a string is given where a number would be expected. \item An option value is not set. \end{itemize} The third rule is sometimes relaxed for ``advanced'' settings with an obvious default, like for importing custom scales or analyses. The information stored in the \lstinline!Config! object is then used to initialise various objects required for the event processing stage described in section~\ref{sec:processing}. First, the \lstinline!get_analyses! function creates a number objects that inherits from the \lstinline!Analysis! interface.\footnote{In the context of C++ the proper technical expression is ``pure abstract class''.} Using an interface allows us to decide the concrete type of the analysis at run time instead of having to make a compile-time decision. Depending on the settings, \lstinline!get_analyses! creates a number of user-defined analyses loaded from one or more external libraries and \textsc{rivet} analyses (see the user documentation \url{https://hej.hepforge.org/doc/current/user/}) Together with a number of further objects, whose roles are described in section~\ref{sec:processing}, we also initialise the global random number generator. We again use an interface to defer deciding the concrete type until the program is actually run. Currently, we support the \href{https://mixmax.hepforge.org/}{MIXMAX} (\texttt{include/HEJ/Mixmax.hh}) and Ranlux64 (\texttt{include/HEJ/Ranlux64.hh}) random number generators, both are provided by \href{http://proj-clhep.web.cern.ch/}{CLHEP}. We also set up a \lstinline!HEJ::EventReader! object for reading events either in the the Les Houches event file format~\cite{Alwall:2006yp} or an \href{https://www.hdfgroup.org/}{HDF5}-based format~\cite{Hoeche:2019rti}. To allow making the decision at run time, \lstinline!HEJ::EventReader! is an abstract base class defined in \texttt{include/HEJ/EventReader.hh} and the implementations of the derived classes are in \texttt{include/HEJ/LesHouchesReader.hh}, \texttt{include/HEJ/HDF5Reader.hh} and the corresponding \texttt{.cc} source files in the \texttt{src} directory. The \lstinline!LesHouchesReader! leverages \href{http://home.thep.lu.se/~leif/LHEF/}{\texttt{include/LHEF/LHEF.h}}. A small wrapper around the \href{https://www.boost.org/doc/libs/1_67_0/libs/iostreams/doc/index.html}{boost iostreams} library allows us to also read event files compressed with \href{https://www.gnu.org/software/gzip/}{gzip}. The wrapper code is in \texttt{include/HEJ/stream.hh} and the \texttt{src/stream.cc}. If unweighting is enabled, we also initialise an unweighter as defined in \texttt{include/HEJ/Unweighter.hh}. The unweighting strategies are explained in section~\ref{sec:unweight}. \subsection{Event processing} \label{sec:processing} In the second stage events are continuously read from the event file. After jet clustering, a number of corresponding resummation events are generated for each input event and fed into the analyses and a number of output files. The roles of various classes and functions are illustrated in the following flow chart: \begin{center} \begin{tikzpicture}[node distance=2cm and 5mm] \node (reader) [mynode] {\lstinline!EventReader::read_event!\nodepart{second}{read event}}; \node (data) [mynode,below=of reader] {\lstinline!Event::EventData! constructor\nodepart{second}{convert to \HEJ object}}; \node (cluster) [mynode,below=of data] {\lstinline!Event::EventData::cluster!\nodepart{second}{cluster jets \& classify \lstinline!EventType!}}; \node (resum) [mynode,below=of cluster] {\lstinline!EventReweighter::reweight!\nodepart{second}{perform resummation}}; \node (cut) [mynode,below=of resum] {\lstinline!Analysis::pass_cuts!\nodepart{second}{apply cuts}}; \node (cut) [mynode,below=of resum] {\lstinline!Analysis::pass_cuts!\nodepart{second}{apply cuts}}; \node (unweight) [mynode,below=of cut] {\lstinline!Unweighter::unweight!\nodepart{second}{unweight (optional)}}; \node (fill) [mynode,below left=of unweight] {\lstinline!Analysis::fill!\nodepart{second}{analyse event}}; \node (write) [mynode,below right=of unweight] {\lstinline!CombinedEventWriter::write!\nodepart{second}{write out event}}; \node (control) [below=of unweight] {}; \draw[-{Latex[length=3mm, width=1.5mm]}] (reader.south) -- node[left] {\lstinline!LHEF::HEPEUP!} (data.north); \draw[-{Latex[length=3mm, width=1.5mm]}] (data.south) -- node[left] {\lstinline!Event::EventData!} (cluster.north); \draw[-{Latex[length=3mm, width=1.5mm]}] (cluster.south) -- node[left] {\lstinline!Event!} (resum.north); \draw[-{Latex[length=3mm, width=1.5mm]}] (resum.south) -- (cut.north); \draw[-{Latex[length=3mm, width=1.5mm]}] ($(resum.south)+(10mm, 0cm)$) -- ($(cut.north)+(10mm, 0cm)$); \draw[-{Latex[length=3mm, width=1.5mm]}] ($(resum.south)+(5mm, 0cm)$) -- ($(cut.north)+(5mm, 0cm)$); \draw[-{Latex[length=3mm, width=1.5mm]}] ($(resum.south)-(5mm, 0cm)$) -- ($(cut.north)-(5mm, 0cm)$); \draw[-{Latex[length=3mm, width=1.5mm]}] ($(resum.south)-(10mm, 0cm)$) -- node[left] {\lstinline!Event!} ($(cut.north)-(10mm, 0cm)$); \draw[-{Latex[length=3mm, width=1.5mm]}] (cut.south) -- (unweight.north); \draw[-{Latex[length=3mm, width=1.5mm]}] ($(cut.south)+(7mm, 0cm)$) -- ($(unweight.north)+(7mm, 0cm)$); \draw[-{Latex[length=3mm, width=1.5mm]}] ($(cut.south)-(7mm, 0cm)$) -- node[left] {\lstinline!Event!} ($(unweight.north)-(7mm, 0cm)$); \draw[-{Latex[length=3mm, width=1.5mm]}] ($(unweight.south)-(3mm,0mm)$) .. controls ($(control)-(3mm,0mm)$) ..node[left] {\lstinline!Event!} (fill.east); \draw[-{Latex[length=3mm, width=1.5mm]}] ($(unweight.south)-(3mm,0mm)$) .. controls ($(control)-(3mm,0mm)$) .. (write.west); \draw[-{Latex[length=3mm, width=1.5mm]}] ($(unweight.south)+(3mm,0mm)$) .. controls ($(control)+(3mm,0mm)$) .. (fill.east); \draw[-{Latex[length=3mm, width=1.5mm]}] ($(unweight.south)+(3mm,0mm)$) .. controls ($(control)+(3mm,0mm)$) ..node[right] {\lstinline!Event!} (write.west); \end{tikzpicture} \end{center} \lstinline!EventData! is an intermediate container, its members are completely accessible. In contrast after jet clustering and classification the phase space inside \lstinline!Event! can not be changed any more (\href{https://wikipedia.org/wiki/Builder_pattern}{Builder design pattern}). The resummation is performed by the \lstinline!EventReweighter! class, which is described in more detail in section~\ref{sec:resum}. The \lstinline!CombinedEventWriter! writes events to zero or more output files. To this end, it contains a number of objects implementing the \lstinline!EventWriter! interface. These event writers typically write the events to a file in a given format. We currently have the \lstinline!LesHouchesWriter! for event files in the Les Houches Event File format, the \lstinline!HDF5Writer! for \href{https://www.hdfgroup.org/}{HDF5}~\cite{Hoeche:2019rti} and the \lstinline!HepMC2Writer! or \lstinline!HepMC3Writer! for the \href{https://hepmc.web.cern.ch/hepmc/}{HepMC} format (Version 2 and 3). \subsection{Resummation} \label{sec:resum} In the \lstinline!EventReweighter::reweight! member function, we first classify the input fixed-order event (FKL, unordered, non-resummable, \dots) and decide according to the user settings whether to discard, keep, or resum the event. If we perform resummation for the given event, we generate a number of trial \lstinline!PhaseSpacePoint! objects. Phase space generation is discussed in more detail in section~\ref{sec:pspgen}. We then perform jet clustering according to the settings for the resummation jets on each \lstinline!PhaseSpacePoint!, update the factorisation and renormalisation scale in the resulting \lstinline!Event! and reweight it according to the ratio of pdf factors and \HEJ matrix elements between resummation and original fixed-order event: \begin{center} \begin{tikzpicture}[node distance=1.5cm and 5mm] \node (in) {}; \node (treat) [diamond,draw,below=of in,minimum size=3.5cm, label={[anchor=west, inner sep=8pt]west:discard}, label={[anchor=east, inner sep=14pt]east:keep}, label={[anchor=south, inner sep=20pt]south:reweight} ] {}; \draw (treat.north west) -- (treat.south east); \draw (treat.north east) -- (treat.south west); \node (psp) [mynode,below=of treat] {\lstinline!PhaseSpacePoint! constructor}; \node (cluster) [mynode,below=of psp] {\lstinline!Event::EventData::cluster!\nodepart{second}{cluster jets}}; \node (colour) [mynode,below=of cluster] {\lstinline!Event::generate_colours()!\nodepart{second}{generate particle colour}}; \node (gen_scales) [mynode,below=of colour] {\lstinline!ScaleGenerator::operator()!\nodepart{second}{update scales}}; \node (rescale) [mynode,below=of gen_scales] {\lstinline!PDF::pdfpt!, \lstinline!MatrixElement!\nodepart{second}{reweight}}; \node (out) [below of=rescale] {}; \draw[-{Latex[length=3mm, width=1.5mm]}] (in.south) -- node[left] {\lstinline!Event!} (treat.north); \draw[-{Latex[length=3mm, width=1.5mm]}] (treat.south) -- node[left] {\lstinline!Event!} (psp.north); \draw[-{Latex[length=3mm, width=1.5mm]}] (psp.south) -- (cluster.north); \draw[-{Latex[length=3mm, width=1.5mm]}] ($(psp.south)+(7mm, 0cm)$) -- ($(cluster.north)+(7mm, 0cm)$); \draw[-{Latex[length=3mm, width=1.5mm]}] ($(psp.south)-(7mm, 0cm)$) -- node[left] {\lstinline!PhaseSpacePoint!} ($(cluster.north)-(7mm, 0cm)$); \draw[-{Latex[length=3mm, width=1.5mm]}] (cluster.south) -- (colour.north); \draw[-{Latex[length=3mm, width=1.5mm]}] ($(cluster.south)+(7mm, 0cm)$) -- ($(colour.north)+(7mm, 0cm)$); \draw[-{Latex[length=3mm, width=1.5mm]}] ($(cluster.south)-(7mm, 0cm)$) -- node[left] {\lstinline!Event!} ($(colour.north)-(7mm, 0cm)$); \draw[-{Latex[length=3mm, width=1.5mm]}] (colour.south) -- (gen_scales.north); \draw[-{Latex[length=3mm, width=1.5mm]}] ($(colour.south)+(7mm, 0cm)$) -- ($(gen_scales.north)+(7mm, 0cm)$); \draw[-{Latex[length=3mm, width=1.5mm]}] ($(colour.south)-(7mm, 0cm)$) -- node[left] {\lstinline!Event!} ($(gen_scales.north)-(7mm, 0cm)$); \draw[-{Latex[length=3mm, width=1.5mm]}] (gen_scales.south) -- (rescale.north); \draw[-{Latex[length=3mm, width=1.5mm]}] ($(gen_scales.south)+(7mm, 0cm)$) -- ($(rescale.north)+(7mm, 0cm)$); \draw[-{Latex[length=3mm, width=1.5mm]}] ($(gen_scales.south)-(7mm, 0cm)$) -- node[left] {\lstinline!Event!} ($(rescale.north)-(7mm, 0cm)$); \draw[-{Latex[length=3mm, width=1.5mm]}] (rescale.south) -- (out.north); \draw[-{Latex[length=3mm, width=1.5mm]}] ($(rescale.south)+(7mm, 0cm)$) -- ($(out.north)+(7mm, 0cm)$); \draw[-{Latex[length=3mm, width=1.5mm]}] ($(rescale.south)-(7mm, 0cm)$) -- node[left] {\lstinline!Event!} ($(out.north)-(7mm, 0cm)$); \node (helper) at ($(treat.east) + (15mm,0cm)$) {}; \draw[-{Latex[length=3mm, width=1.5mm]}] (treat.east) -- ($(treat.east) + (15mm,0cm)$) -- node[left] {\lstinline!Event!} (helper |- gen_scales.east) -- (gen_scales.east) ; \end{tikzpicture} \end{center} \subsubsection{HEJ@NLO Truncation} \label{sec:hejnlo} For predictions involving NLO matching, we need to multiply our inclusive HEJ prediction, $\sigma^{2j}_{\HEJ}$, by an appropriate reweighting factor, \begin{equation} \sigma^{2j}_{\HEJ} \left(\frac{\sigma^{2j}_{\NLO}}{\sigma^{2j}_{\tt HEJ@NLO}}\right) = \sigma^{2j}_{\HEJ} \left(\frac{\sigma^{2j}_{\NLO}}{\sigma^{2jE}_{\tt HEJ@NLO}+ \sigma^{3j}_{\LO},}\right) \end{equation} with an analogous equation for a histogram bin. To obtain the exclusive $\sigma^{2jE}_{\tt HEJ@NLO}$ cross section, we can enable the \lstinline!NLO truncation! options in the config file at runtime. This option restricts the method \lstinline!PhaseSpacePoint::sample_ng! to only generate 0 or 1 additional gluons for the real emissions and then truncates to the first term in $\Delta y$ in the \lstinline!MatrixElement! virtual corrections for the case of 0 extra emissions. \subsection{Phase space point generation} \label{sec:pspgen} The resummed and matched \HEJ cross section for pure jet production of FKL configurations is given by (cf. eq. (3) of~\cite{Andersen:2018tnm}) \begin{align} \label{eq:resumdijetFKLmatched2} % \begin{split} \sigma&_{2j}^\mathrm{resum, match}=\sum_{f_1, f_2}\ \sum_m \prod_{j=1}^m\left( \int_{p_{j\perp}^B=0}^{p_{j\perp}^B=\infty} \frac{\mathrm{d}^2\mathbf{p}_{j\perp}^B}{(2\pi)^3}\ \int \frac{\mathrm{d} y_j^B}{2} \right) \ (2\pi)^4\ \delta^{(2)}\!\!\left(\sum_{k=1}^{m} \mathbf{p}_{k\perp}^B\right)\nonumber\\ &\times\ x_a^B\ f_{a, f_1}(x_a^B, Q_a^B)\ x_b^B\ f_{b, f_2}(x_b^B, Q_b^B)\ \frac{\overline{\left|\mathcal{M}_\text{LO}^{f_1f_2\to f_1g\cdots gf_2}\big(\big\{p^B_j\big\}\big)\right|}^2}{(\hat {s}^B)^2}\nonumber\\ & \times (2\pi)^{-4+3m}\ 2^m \nonumber\\ &\times\ \sum_{n=2}^\infty\ \int_{p_{1\perp}=p_{\perp,\mathrm{min}} }^{p_{1\perp}=\infty} \frac{\mathrm{d}^2\mathbf{p}_{1\perp}}{(2\pi)^3}\ \int_{p_{n\perp}=p_{\perp,\mathrm{min}}}^{p_{n\perp}=\infty} \frac{\mathrm{d}^2\mathbf{p}_{n\perp}}{(2\pi)^3}\ \prod_{i=2}^{n-1}\int_{p_{i\perp}=\lambda}^{p_{i\perp}=\infty} \frac{\mathrm{d}^2\mathbf{p}_{i\perp}}{(2\pi)^3}\ (2\pi)^4\ \delta^{(2)}\!\!\left(\sum_{k=1}^n \mathbf{p}_{k\perp}\right )\\ &\times \ \mathbf{T}_y \prod_{i=1}^n \left(\int \frac{\mathrm{d} y_i}{2}\right)\ \mathcal{O}_{mj}^e\ \left(\prod_{l=1}^{m-1}\delta^{(2)}(\mathbf{p}_{\mathcal{J}_{l}\perp}^B - \mathbf{j}_{l\perp})\right)\ \left(\prod_{l=1}^m\delta(y^B_{\mathcal{J}_l}-y_{\mathcal{J}_l})\right) \ \mathcal{O}_{2j}(\{p_i\})\nonumber\\ &\times \frac{(\hat{s}^B)^2}{\hat{s}^2}\ \frac{x_a f_{a,f_1}(x_a, Q_a)\ x_b f_{b,f_2}(x_b, Q_b)}{x_a^B\ f_{a,f_1}(x_a^B, Q_a^B)\ x_b^B\ f_{b,f_2}(x_b^B, Q_b^B)}\ \frac{\overline{\left|\mathcal{M}_{\mathrm{HEJ}}^{f_1 f_2\to f_1 g\cdots gf_2}(\{ p_i\})\right|}^2}{\overline{\left|\mathcal{M}_\text{LO, HEJ}^{f_1f_2\to f_1g\cdots gf_2}\big(\big\{p^B_j\big\}\big)\right|}^{2}} \,.\nonumber % \end{split} \end{align} The first two lines correspond to the generation of the fixed-order input events with incoming partons $f_1, f_2$ and outgoing momenta $p_j^B$, where $\mathbf{p}_{j\perp}^B$ and $y_j^B$ denote the respective transverse momentum and rapidity. Note that, at leading order, these coincide with the fixed-order jet momenta $p_{\mathcal{J}_j}^B$. $f_{a,f_1}(x_a, Q_a),f_{b,f_2}(x_b, Q_b)$ are the pdf factors for the incoming partons with momentum fractions $x_a$ and $x_b$. The square of the partonic centre-of-mass energy is denoted by $\hat{s}^B$ and $\mathcal{M}_\text{LO}^{f_1f_2\to f_1g\cdots gf_2}$ is the leading-order matrix element. The third line is a factor accounting for the different multiplicities between fixed-order and resummation events. Lines four and five are the integration over the resummation phase space described in this section. $p_i$ are the momenta of the outgoing partons in resummation phase space. $\mathbf{T}_y$ denotes rapidity ordering and $\mathcal{O}_{mj}^e$ projects out the exclusive $m$-jet component. The relation between resummation and fixed-order momenta is fixed by the $\delta$ functions. The first sets each transverse fixed-order jet momentum to some function $\mathbf{j_{l\perp}}$ of the resummation momenta. The exact form is described in section~\ref{sec:ptj_res}. The second $\delta$ forces the rapidities of resummation and fixed-order jets to be the same. Finally, the last line is the reweighting of pdf and matrix element factors already shown in section~\ref{sec:resum}. There are two kinds of cut-off in the integration over the resummation partons. $\lambda$ is a technical cut-off connected to the cancellation of infrared divergencies between real and virtual corrections. Its numerical value is set in \texttt{include/HEJ/Constants.h}. $p_{\perp,\mathrm{min}}$ regulates and \emph{uncancelled} divergence in the extremal parton momenta. Its size is set by the user configuration \url{https://hej.hepforge.org/doc/current/user/HEJ.html#settings}. It is straightforward to generalise eq.~(\ref{eq:resumdijetFKLmatched2}) to unordered configurations and processes with additional colourless emissions, for example a Higgs or electroweak boson. In the latter case only the fixed-order integration and the matrix elements change. \subsubsection{Gluon Multiplicity} \label{sec:psp_ng} The first step in evaluating the resummation phase space in eq.~(\ref{eq:resumdijetFKLmatched2}) is to randomly pick terms in the sum over the number of emissions. This sampling of the gluon multiplicity is done in the \lstinline!PhaseSpacePoint::sample_ng! function in \texttt{src/PhaseSpacePoint.cc}. The typical number of extra emissions depends strongly on the rapidity span of the underlying fixed-order event. Let us, for example, consider a fixed-order FKL-type multi-jet configuration with rapidities $y_{j_f},\,y_{j_b}$ of the most forward and backward jets, respectively. By eq.~(\ref{eq:resumdijetFKLmatched2}), the jet multiplicity and the rapidity of each jet are conserved when adding resummation. This implies that additional hard radiation is restricted to rapidities $y$ within a region $y_{j_b} \lesssim y \lesssim y_{j_f}$. Within \HEJ, we require the most forward and most backward emissions to be hard \todo{specify how hard} in order to avoid divergences, so this constraint in fact applies to \emph{all} additional radiation. To simplify the remaining discussion, let us remove the FKL rapidity ordering \begin{equation} \label{eq:remove_y_order} \mathbf{T}_y \prod_{i=1}^n\int \frac{\mathrm{d}y_i}{2} = \frac{1}{n!}\prod_{i=1}^n\int \frac{\mathrm{d}y_i}{2}\,, \end{equation} where all rapidity integrals now cover a region which is approximately bounded by $y_{j_b}$ and $y_{j_f}$. Each of the $m$ jets has to contain at least one parton; selecting random emissions we can rewrite the phase space integrals as \begin{equation} \label{eq:select_jets} \frac{1}{n!}\prod_{i=1}^n\int [\mathrm{d}p_i] = \left(\prod_{i=1}^{m}\int [\mathrm{d}p_i]\ {\cal J}_i(p_i)\right) \frac{1}{n_g!}\prod_{i=m+1}^{m+n_g}\int [\mathrm{d}p_i] \end{equation} with jet selection functions \begin{equation} \label{eq:def_jet_selection} {\cal J}_i(p) = \begin{cases} 1 &p\text{ clustered into jet }i\\ 0 & \text{otherwise} \end{cases} \end{equation} and $n_g \equiv n - m$. Here and in the following we use the short-hand notation $[\mathrm{d}p_i]$ to denote the phase-space measure for parton $i$. As is evident from eq.~\eqref{eq:select_jets}, adding an extra emission $n_g+1$ introduces a suppression factor $\tfrac{1}{n_g+1}$. However, the additional phase space integral also results in an enhancement proportional to $\Delta y_{j_f j_b} = y_{j_f} - y_{j_b}$. This is a result of the rapidity-independence of the MRK limit of the integrand, consisting of the matrix elements divided by the flux factor. Indeed, we observe that the typical number of gluon emissions is to a good approximation proportional to the rapidity separation and the phase space integral is dominated by events with $n_g \approx \Delta y_{j_f j_b}$. For the actual phase space sampling, we assume a Poisson distribution and extract the mean number of gluon emissions in different rapidity bins and fit the results to a linear function in $\Delta y_{j_f j_b}$, finding a coefficient of $0.975$ for the inclusive production of a Higgs boson with two jets. Here are the observed and fitted average gluon multiplicities as a function of $\Delta y_{j_f j_b}$: \begin{center} \includegraphics[width=.75\textwidth]{ng_mean} \end{center} As shown for two rapidity slices the assumption of a Poisson distribution is also a good approximation: \begin{center} \includegraphics[width=.49\textwidth]{{ng_1.5}.pdf}\hfill \includegraphics[width=.49\textwidth]{{ng_5.5}.pdf} \end{center} For configurations beyond leading-log, gluon emission is not allowed everywhere and the rapidity span has to be adjusted. When determining the most forward and backward jets, we exclude unordered emissions and the extremal jet in extremal quark-antiquark emissions. In addition, we subtract the rapidity region between central quark and antiquark emission. Technically, we could have two or more additional gluons inside one of the latter jets, in which case there could be gluons that end up in between the jet centres but still outside the region spanned by quark and antiquark. However, the phase space for this is very small and should not affect the average number of emitted gluons very much. \subsubsection{Number of Gluons inside Jets} \label{sec:psp_ng_jet} For each of the $n_g$ gluon emissions we can split the phase-space integral into a (disconnected) region inside the jets and a remainder: \begin{equation} \label{eq:psp_split} \int [\mathrm{d}p_i] = \int [\mathrm{d}p_i]\, \theta\bigg(\sum_{j=1}^{m}{\cal J}_j(p_i)\bigg) + \int [\mathrm{d}p_i]\, \bigg[1-\theta\bigg(\sum_{j=1}^{m}{\cal J}_j(p_i)\bigg)\bigg]\,. \end{equation} The next step is to decide how many of the gluons will form part of a jet. This is done in the \lstinline!PhaseSpacePoint::sample_ng_jets! function. We choose an importance sampling which is flat in the plane spanned by the azimuthal angle $\phi$ and the rapidity $y$. This is observed in BFKL and valid in the limit of Multi-Regge-Kinematics (MRK). Furthermore, we assume anti-$k_t$ jets, which cover an area of $\pi R^2$. In principle, the total accessible area in the $y$-$\phi$ plane is given by $2\pi \Delta y_{fb}$, where $\Delta y_{fb}\geq \Delta y_{j_f j_b}$ is the a priori unknown rapidity separation between the most forward and backward partons. In most cases the extremal jets consist of single partons, so that $\Delta y_{fb} = \Delta y_{j_f j_b}$. For the less common case of two partons forming a jet we observe a maximum distance of $R$ between the constituents and the jet centre. In rare cases jets have more than two constituents. Empirically, they are always within a distance of $\tfrac{5}{3}R$ to the centre of the jet, so $\Delta y_{fb} \leq \Delta y_{j_f j_b} + \tfrac{10}{3} R$. In practice, the extremal partons are required to carry a large fraction of the jet transverse momentum and will therefore be much closer to the jet axis. In summary, for sufficiently large rapidity separations we can use the approximation $\Delta y_{fb} \approx \Delta y_{j_f j_b}$. This scenario is depicted here: \begin{center} \includegraphics[width=0.5\linewidth]{ps_large_y} \end{center} If there is no overlap between jets, the probability $p_{\cal J, >}$ for an extra gluon to end up inside a jet is then given by \begin{equation} \label{eq:p_J_large} p_{\cal J, >} = \frac{(m - 1)\*R^2}{2\Delta y_{j_f j_b}}\,. \end{equation} For a very small rapidity separation, eq.~\eqref{eq:p_J_large} obviously overestimates the true probability. The maximum phase space covered by jets in the limit of a vanishing rapidity distance between all partons is $2mR \Delta y_{fb}$: \begin{center} \includegraphics[width=0.5\linewidth]{ps_small_y} \end{center} We therefore estimate the probability for a parton to end up inside a jet as \begin{equation} \label{eq:p_J} p_{\cal J} = \min\bigg(\frac{(m - 1)\*R^2}{2\Delta y_{j_f j_b}}, \frac{mR}{\pi}\bigg)\,. \end{equation} Here we compare this estimate with the actually observed fraction of additional emissions into jets as a function of the rapidity separation: \begin{center} \includegraphics[width=0.75\linewidth]{pJ} \end{center} Again, for configurations beyond leading-log the allowed rapidity range has to be modified as explained in section~\ref{sec:psp_ng}. We also subtract the areas of unordered jets and the extremal jet in extremal quark-antiquark emission when we estimate the phase space area covered by jets. The reason is that in this case any additional emission inside such a jet would have to beyond the hard parton dominating the next jet in rapidity, which is possible but rare. \subsubsection{Gluons outside Jets} \label{sec:gluons_nonjet} Using our estimate for the probability of a gluon to be a jet constituent, we choose a number $n_{g,{\cal J}}$ of gluons inside jets, which also fixes the number $n_g - n_{g,{\cal J}}$ of gluons outside jets. As explained later on, we need to generate the momenta of the gluons outside jets first. This is done in \lstinline!PhaseSpacePoint::gen_non_jet!. The azimuthal angle $\phi$ is generated flat within $0\leq \phi \leq 2 \pi$. The allowed rapidity interval is set by the most forward and backward partons, which are necessarily inside jets. Since these parton rapidities are not known at this point, we also have to postpone the rapidity generation for the gluons outside jets. For the scalar transverse momentum $p_\perp = |\mathbf{p}_\perp|$ of a gluon outside jets we use the parametrisation \begin{equation} \label{eq:p_nonjet} p_\perp = \lambda + \tilde{p}_\perp\*\tan(\tau\*r)\,, \qquad \tau = \arctan\bigg(\frac{p_{\perp{\cal J}_\text{min}} - \lambda}{\tilde{p}_\perp}\bigg)\,. \end{equation} For $r \in [0,1)$, $p_\perp$ is always less than the minimum momentum $p_{\perp{\cal J}_\text{min}}$ required for a jet. $\tilde{p}_\perp$ is a free parameter, a good empirical value is $\tilde{p}_\perp = [1.3 + 0.2\*(n_g - n_{g,\cal J})]\,$GeV \subsubsection{Resummation jet momenta} \label{sec:ptj_res} Since \HEJ generated additional soft emissions, their recoil have to be compensated through the remaining particles. In the traditional reshuffling this recoil was only distributed over all jets. However in anticipation of including more bosons ($WW+2j$), or less jets ($H+1j$) this changed to also include bosons in the reshuffling. In the following we will still always call the recoilers \emph{jet} instead of generic \emph{Born particles}, since the bosons are treated completely equivalent to (massive) jets. On the one hand, each jet momentum is given by the sum of its constituent momenta. On the other hand, the resummation jet momenta are fixed by the constraints in line five of the master equation~\eqref{eq:resumdijetFKLmatched2}. We therefore have to calculate the resummation jet momenta from these constraints before generating the momenta of the gluons inside jets. This is done in \lstinline!PhaseSpacePoint::reshuffle! and in the free \lstinline!resummation_jet_momenta! function (declared in \texttt{resummation\_jet.hh}). The resummation jet momenta are determined by the $\delta$ functions in line five of eq.~(\ref{eq:resumdijetFKLmatched2}). The rapidities are fixed to the rapidities of the jets in the input fixed-order events, so that the FKL ordering is guaranteed to be preserved. In traditional \HEJ reshuffling the transverse momentum are given through \begin{equation} \label{eq:ptreassign_old} \mathbf{p}^B_{\mathcal{J}_{l\perp}} = \mathbf{j}_{l\perp} \equiv \mathbf{p}_{\mathcal{J}_{l}\perp} + \mathbf{q}_\perp \,\frac{|\mathbf{p}_{\mathcal{J}_{l}\perp}|}{P_\perp}, \end{equation} where $\mathbf{q}_\perp = \sum_{j=1}^n \mathbf{p}_{i\perp} \bigg[1-\theta\bigg(\sum_{j=1}^{m}{\cal J}_j(p_i)\bigg)\bigg] $ is the total transverse momentum of all partons \emph{outside} jets and $P_\perp = \sum_{j=1}^m |\mathbf{p}_{\mathcal{J}_{j}\perp}|$. Since the total transverse momentum of an event vanishes, we can also use $\mathbf{q}_\perp = - \sum_{j=1}^m \mathbf{p}_{\mathcal{J}_{j}\perp}$. Eq.~(\ref{eq:ptreassign}) is a non-linear system of equations in the resummation jet momenta $\mathbf{p}_{\mathcal{J}_{l}\perp}$. Hence we would have to solve \begin{equation} \label{eq:ptreassign_eq} \mathbf{p}_{\mathcal{J}_{l}\perp}=\mathbf{j}^B_{l\perp} \equiv\mathbf{j}_{l\perp}^{-1} \left(\mathbf{p}^B_{\mathcal{J}_{l\perp}}\right) \end{equation} numerically. Since solving such a system is computationally expensive, we instead change the reshuffling around to be linear in the resummation jet momenta. Hence~\eqref{eq:ptreassign_eq} gets replaces by \begin{equation} \label{eq:ptreassign} \mathbf{p}_{\mathcal{J}_{l\perp}} = \mathbf{j}^B_{l\perp} \equiv \mathbf{p}^B_{\mathcal{J}_{l}\perp} - \mathbf{q}_\perp \,\frac{|\mathbf{p}^B_{\mathcal{J}_{l}\perp}|}{P^B_\perp}, \end{equation} which is linear in the resummation momentum. Consequently the equivalent of~\eqref{eq:ptreassign_old} is non-linear in the Born momentum. However the exact form of~\eqref{eq:ptreassign_old} is not relevant for the resummation. Both methods have been tested for two and three jets with the \textsc{rivet} standard analysis \texttt{MC\_JETS}. They didn't show any differences even after $10^9$ events. The reshuffling relation~\eqref{eq:ptreassign} allows the transverse momenta $p^B_{\mathcal{J}_{l\perp}}$ of the fixed-order jets to be somewhat below the minimum transverse momentum of resummation jets. It is crucial that this difference does not become too large, as the fixed-order cross section diverges for vanishing transverse momenta. In the production of a Higgs boson with resummation jets above $30\,$GeV we observe that the contribution from fixed-order events with jets softer than about $20\,$GeV can be safely neglected. This is shown in the following plot of the differential cross section over the transverse momentum of the softest fixed-order jet: \begin{center} \includegraphics[width=.75\textwidth]{ptBMin} \end{center} Finally, we have to account for the fact that the reshuffling relation~\eqref{eq:ptreassign} is non-linear in the Born momenta. To arrive at the master formula~\eqref{eq:resumdijetFKLmatched2} for the cross section, we have introduced unity in the form of an integral over the Born momenta with $\delta$ functions in the integrand, that is \begin{equation} \label{eq:delta_intro} 1 = \int_{p_{j\perp}^B=0}^{p_{j\perp}^B=\infty} \mathrm{d}^2\mathbf{p}_{j\perp}^B\delta^{(2)}(\mathbf{p}_{\mathcal{J}_{j\perp}}^B - \mathbf{j}_{j\perp})\,. \end{equation} If the arguments of the $\delta$ functions are not linear in the Born momenta, we have to compensate with additional Jacobians as factors. Explicitly, for the reshuffling relation~\eqref{eq:ptreassign} we have \begin{equation} \label{eq:delta_rewrite} \prod_{l=1}^m \delta^{(2)}(\mathbf{p}_{\mathcal{J}_{l\perp}}^B - \mathbf{j}_{l\perp}) = \Delta \prod_{l=1}^m \delta^{(2)}(\mathbf{p}_{\mathcal{J}_{l\perp}} - \mathbf{j}_{l\perp}^B)\,, \end{equation} where $\mathbf{j}_{l\perp}^B$ is given by~\eqref{eq:ptreassign_eq} and only depends on the Born momenta. We have extended the product to run to $m$ instead of $m-1$ by eliminating the last $\delta$ function $\delta^{(2)}\!\!\left(\sum_{k=1}^n \mathbf{p}_{k\perp}\right )$. The Jacobian $\Delta$ is the determinant of a $2m \times 2m$ matrix with $l, l' = 1,\dots,m$ and $X, X' = x,y$. \begin{equation} \label{eq:jacobian} \Delta = \left|\frac{\partial\,\mathbf{j}^B_{l'\perp}}{\partial\, \mathbf{p}^B_{{\cal J}_l \perp}} \right| = \left| \delta_{l l'} \delta_{X X'} - \frac{q_X\, p^B_{{\cal J}_{l'}X'}}{\left|\mathbf{p}^B_{{\cal J}_{l'} \perp}\right| P^B_\perp}\left(\delta_{l l'} - \frac{\left|\mathbf{p}^B_{{\cal J}_l \perp}\right|}{P^B_\perp}\right)\right|\,. \end{equation} The determinant is calculated in \lstinline!resummation_jet_weight!, again coming from the \texttt{resummation\_jet.hh} header. Having to introduce this Jacobian is not a disadvantage specific to the new reshuffling. If we instead use the old reshuffling relation~\eqref{eq:ptreassign_old} we \emph{also} have to introduce a similar Jacobian since we actually want to integrate over the resummation phase space and need to transform the argument of the $\delta$ function to be linear in the resummation momenta for this. \subsubsection{Gluons inside Jets} \label{sec:gluons_jet} After the steps outlined in section~\ref{sec:psp_ng_jet}, we have a total number of $m + n_{g,{\cal J}}$ constituents. In \lstinline!PhaseSpacePoint::distribute_jet_partons! we distribute them randomly among the jets such that each jet has at least one constituent. We then generate their momenta in \lstinline!PhaseSpacePoint::split! using the \lstinline!Splitter! class. The phase space integral for a jet ${\cal J}$ is given by \begin{equation} \label{eq:ps_jetparton} \prod_{i\text{ in }{\cal J}} \bigg(\int \mathrm{d}\mathbf{p}_{i\perp}\ \int \mathrm{d} y_i \bigg)\delta^{(2)}\Big(\sum_{i\text{ in }{\cal J}} \mathbf{p}_{i\perp} - \mathbf{j}_{\perp}^B\Big)\delta(y_{\mathcal{J}}-y^B_{\mathcal{J}})\,. \end{equation} For jets with a single constituent, the parton momentum is obiously equal to the jet momentum. In the case of two constituents, we observe that the partons are always inside the jet cone with radius $R$ and often very close to the jet centre. The following plots show the typical relative distance $\Delta R/R$ for this scenario: \begin{center} \includegraphics[width=0.45\linewidth]{dR_2} \includegraphics[width=0.45\linewidth]{dR_2_small} \end{center} According to this preference for small values of $\Delta R$, we parametrise the $\Delta R$ integrals as \begin{equation} \label{eq:dR_sampling} \frac{\Delta R}{R} = \begin{cases} 0.25\,x_R & x_R < 0.4 \\ 1.5\,x_R - 0.5 & x_R \geq 0.4 \end{cases}\,. \end{equation} Next, we generate $\Theta_1 \equiv \Theta$ and use the constraint $\Theta_2 = \Theta \pm \pi$. The transverse momentum of the first parton is then given by \begin{equation} \label{eq:delta_constraints} p_{1\perp} = \frac{p_{\mathcal{J} y} - \tan(\phi_2) p_{\mathcal{J} x}}{\sin(\phi_1) - \tan(\phi_2)\cos(\phi_1)}\,. \end{equation} We get $p_{2\perp}$ by exchanging $1 \leftrightarrow 2$ in the indices. To obtain the Jacobian of the transformation, we start from the single jet phase space eq.~(\ref{eq:ps_jetparton}) with the rapidity delta function already rewritten to be linear in the rapidity of the last parton, i.e. \begin{equation} \label{eq:jet_2p} \prod_{i=1,2} \bigg(\int \mathrm{d}\mathbf{p}_{i\perp}\ \int \mathrm{d} y_i \bigg)\delta^{(2)}\Big(\mathbf{p}_{1\perp} + \mathbf{p}_{2\perp} - \mathbf{j}_{\perp}^B\Big)\delta(y_2- \dots)\,. \end{equation} The integral over the second parton momentum is now trivial; we can just replace the integral over $y_2$ with the equivalent constraint \begin{equation} \label{eq:R2} \int \mathrm{d}R_2 \ \delta\bigg(R_2 - \bigg[\phi_{\cal J} - \arctan \bigg(\frac{p_{{\cal J}y} - p_{1y}}{p_{{\cal J}x} - p_{1x}}\bigg)\bigg]/\cos \Theta\bigg) \,. \end{equation} In order to fix the integral over $p_{1\perp}$ instead, we rewrite this $\delta$ function. This introduces the Jacobian \begin{equation} \label{eq:jac_pt1} \bigg|\frac{\partial p_{1\perp}}{\partial R_2} \bigg| = \frac{\cos(\Theta)\mathbf{p}_{2\perp}^2}{p_{{\cal J}\perp}\sin(\phi_{\cal J}-\phi_1)}\,. \end{equation} The final form of the integral over the two parton momenta is then \begin{equation} \label{eq:ps_jet_2p} \int \mathrm{d}R_1\ R_1 \int \mathrm{d}R_2 \int \mathrm{d}x_\Theta\ 2\pi \int \mathrm{d}p_{1\perp}\ p_{1\perp} \int \mathrm{d}p_{2\perp} \ \bigg|\frac{\partial p_{1\perp}}{\partial R_2} \bigg|\delta(p_{1\perp} -\dots) \delta(p_{2\perp} - \dots)\,. \end{equation} As is evident from section~\ref{sec:psp_ng_jet}, jets with three or more constituents are rare and an efficient phase-space sampling is less important. For such jets, we exploit the observation that partons with a distance larger than $R_{\text{max}} = \tfrac{5}{3} R$ to the jet centre are never clustered into the jet. Assuming $N$ constituents, we generate all components for the first $N-1$ partons and fix the remaining parton with the $\delta$-functional. In order to end up inside the jet, we use the parametrisation \begin{align} \label{eq:ps_jet_param} \phi_i ={}& \phi_{\cal J} + \Delta \phi_i\,, & \Delta \phi_i ={}& \Delta R_i \cos(\Theta_i)\,, \\ y_i ={}& y_{\cal J} + \Delta y_i\,, & \Delta y_i ={}& \Delta R_i \sin(\Theta_i)\,, \end{align} and generate $\Theta_i$ and $\Delta R_i$ randomly with $\Delta R_i \leq R_{\text{max}}$ and the empiric value $R_{\text{max}} = 5\*R/3$. We can then write the phase space integral for a single parton as $(p_\perp = |\mathbf{p}_\perp|)$ \begin{equation} \label{eq:ps_jetparton_x} \int \mathrm{d}\mathbf{p}_{\perp}\ \int \mathrm{d} y \approx \int_{\Box} \mathrm{d}x_{\perp} \mathrm{d}x_{ R} \mathrm{d}x_{\theta}\ 2\*\pi\,\*R_{\text{max}}^2\,\*x_{R}\,\*p_{\perp}\,\*(p_{\perp,\text{max}} - p_{\perp,\text{min}}) \end{equation} with \begin{align} \label{eq:ps_jetparton_parameters} \Delta \phi ={}& R_{\text{max}}\*x_{R}\*\cos(2\*\pi\*x_\theta)\,,& \Delta y ={}& R_{\text{max}}\*x_{R}\*\sin(2\*\pi\*x_\theta)\,, \\ p_{\perp} ={}& (p_{\perp,\text{max}} - p_{\perp,\text{min}})\*x_\perp + p_{\perp,\text{min}}\,. \end{align} $p_{\perp,\text{max}}$ is determined from the requirement that the total contribution from the first $n-1$ partons --- i.e. the projection onto the jet $p_{\perp}$ axis --- must never exceed the jet $p_\perp$. This gives \todo{This bound is too high} \begin{equation} \label{eq:pt_max} p_{i\perp,\text{max}} = \frac{p_{{\cal J}\perp} - \sum_{j.frm} inside \texttt{current\_generator} it generates a corresponding header \texttt{include/HEJ/currents/.hh} inside the build directory. The header can be included with \begin{lstlisting}[language=C++,caption={}] #include "HEJ/currents/.hh" \end{lstlisting} The naming scheme is \begin{itemize} \item \texttt{j1\_j2.frm} for the contraction of current \texttt{j1} with current \texttt{j2}. \item \texttt{j1\_vx\_j2.frm} for the contraction of current \texttt{j1} with the vertex \texttt{vx} and the current \texttt{j2}. \end{itemize} For instance, \texttt{juno\_qqbarW\_j.frm} would indicate the contraction of an unordered current with a (central) quark-antiquark-W emission vertex and a standard FKL current. \subsection{Implementing new current contractions} \label{sec:cur_gen_new} Before adding a new current contraction, first find a representation where all momenta that appear inside the currents are \textcolor{red}{lightlike} and have \textcolor{red}{positive energy}. Section~\ref{sec:p_massive} describes how massive momenta can be decomposed into massless ones. For momenta $p'$ with negative energies replace $p' \to - p$ and exploit that negative- and positive-energy spinors are related by a phase, which is usually irrelevant: $u^\lambda(-p) = \pm i u^\lambda(p)$. To implement a current contraction \lstinline!jcontr! create a new file \texttt{jcontr.frm} inside the \texttt{current\_generator} directory. \texttt{jcontr.frm} should contain \FORM code, see \url{https://www.nikhef.nl/~form/} for more information on \FORM. Here is a small example: \begin{lstlisting}[caption={}] * FORM comments are started by an asterisk * at the beginning of a line * First include the relevant headers #include- include/helspin.frm #include- include/write.frm * Define the symbols that appear. * UPPERCASE symbols are reserved for internal use vectors p1,...,p10; indices mu1,...,mu10; * Define local expressions of the form [NAME HELICITIES] * for the current contractions for all relevant helicity configurations #do HELICITY1={+,-} #do HELICITY2={+,-} * We use the Current function * Current(h, p1, mu1, ..., muX, p2) = * u(p1) \gamma_{mu1} ... \gamma_{muX} u(p2) * where h=+1 or h=-1 is the spinor helicity. * All momenta appearing as arguments have to be *lightlike* local [jcontr `HELICITY1'`HELICITY2'] = Current(`HELICITY1'1, p1, mu1, p2, mu2, p3) *Current(`HELICITY2'1, p4, mu2, p2, mu1, p1); #enddo #enddo .sort * Main procedure that calculates the contraction #call ContractCurrents .sort * Optimise expression format O4; * Format in a (mostly) c compatible way format c; * Write start of C++ header file #call WriteHeader(`OUTPUT') * Write a template function jcontr * taking as template arguments two helicities * and as arguments the momenta p1,...,p4 * returning the contractions [jcontr HELICITIES] defined above #call WriteOptimised(`OUTPUT',jcontr,2,p1,p2,p3,p4) * Wrap up #call WriteFooter(`OUTPUT') .end \end{lstlisting} \subsection{Calculation of contractions} \label{sec:contr_calc} In order to describe the algorithm for the calculation of current contractions we first have to define the currents and establish some useful relations. \subsubsection{Massive momenta} \label{sec:p_massive} We want to use relations for lightlike momenta. Momenta $P$ that are \emph{not} lightlike can be written as the sum of two lightlike momenta: \begin{equation} \label{eq:P_massive} P^\mu = p^\mu + q^\mu\,, \qquad p^2 = q^2 = 0 \,. \end{equation} This decomposition is not unique. If we impose the arbitrary constraint $q_\perp = 0$ and require real-valued momentum components we can use the ansatz \begin{align} \label{eq:P_massive_p} p_\mu ={}& P_\perp\*(\cosh y, \cos \phi, \sin \phi, \sinh y)\,,\\ \label{eq:P_massive_q} q_\mu ={}& (E, 0, 0, s\,E)\,, \end{align} where $P_\perp$ is the transverse momentum of $P$ and $\phi$ the corresponding azimuthal angle. For the remaining parameters we obtain \begin{align} \label{eq:P_massive_plus} P^+ > 0:& & y ={}& \log \frac{P^+}{P_\perp}\,,\qquad E = \frac{P^2}{2P^+}\,,\qquad s = -1\,,\\ \label{eq:P_massive_minus} P^- > 0:& & y ={}& \log \frac{P_\perp}{P^-}\,,\qquad E = \frac{P^2}{2P^-}\,,\qquad s = +1\,. \end{align} The decomposition is implemented in the \lstinline!split_into_lightlike! function. \subsubsection{Currents and current relations} \label{sec:current_relations} Our starting point are generalised currents \begin{equation} \label{eq:j_gen} j^{\pm}(p, \mu_1,\dots,\mu_{2N-1},q) = \bar{u}^{\pm}(p)\gamma_{\mu_1} \dots \gamma_{\mu_{2N-1}} u^\pm(q)\,. \end{equation} Since there are no masses, we can consider two-component chiral spinors \begin{align} \label{eq:u_plus} u^+(p)={}& \left(\sqrt{p^+}, \sqrt{p^-} \hat{p}_\perp \right) \,,\\ \label{eq:u_minus} u^-(p)={}& \left(\sqrt{p^-} \hat{p}^*_\perp, -\sqrt{p^+}\right)\,, \end{align} with $p^\pm = E\pm p_z,\, \hat{p}_\perp = \tfrac{p_\perp}{|p_\perp|},\, p_\perp = p_x + i p_y$. The spinors for vanishing transverse momentum are obtained by replacing $\hat{p}_\perp \to -1$. This gives \begin{equation} \label{eq:jminus_gen} j^-(p,\mu_1,\dots,\mu_{2N-1},q) = u^{-,\dagger}(p)\ \sigma^-_{\mu_1}\ \sigma^+_{\mu_2}\dots\sigma^-_{\mu_{2N-1}}\ u^{-}(q)\,. \end{equation} where $\sigma_\mu^\pm = (1, \pm \sigma_i)$ and $\sigma_i$ are the Pauli matrices \begin{equation} \label{eq:Pauli_matrices} \sigma_1 = \begin{pmatrix} 0 & 1\\ 1 & 0 \end{pmatrix} \,, \qquad \sigma_2 = \begin{pmatrix} 0 & -i\\ i & 0 \end{pmatrix} \,, \qquad \sigma_3 = \begin{pmatrix} 1 & 0\\ 0 & -1 \end{pmatrix} \,. \end{equation} For positive-helicity currents we can either flip all helicities in eq.~(\ref{eq:jminus_gen}) or reverse the order of the arguments, i.e. \begin{equation} \label{eq:jplus_gen} j^+(p,\mu_1,\dots,\mu_{2N-1},q) = \big(j^-(p,\mu_1,\dots,\mu_{2N-1},q)\big)^* = j^-(q,\mu_{2N-1},\dots,\mu_1,p) \,. \end{equation} Using the standard spinor-helicity notation we have \begin{gather} \label{eq:spinors_spinhel} u^+(p) = | p \rangle\,, \qquad u^-(p) = | p ]\,, \qquad u^{+,\dagger}(p) = [ p |\,, \qquad u^{-,\dagger}(p) = \langle p |\,,\\ \label{eq:current_spinhel} j^-(p,\mu_1,\dots,\mu_{2N-1},q) = \langle p |\ \mu_1\ \dots\ \mu_{2N-1}\ | q ] \,.\\ \label{eq:contraction_spinhel} P_{\mu_i} j^-(p,\mu_1,\dots,\mu_{2N-1},q) = \langle p |\ \mu_1\ \dots\ \mu_{i-1}\ P\ \mu_{i+1}\ \dots\ \mu_{2N-1}\ | q ] \,. \end{gather} Lightlike momenta $p$ can be decomposed into spinor products: \begin{equation} \label{eq:p_decomp} \slashed{p} = |p\rangle [p| + |p] \langle p |\,. \end{equation} Taking into account helicity conservation this gives the following relations: \begingroup \addtolength{\jot}{1em} \begin{align} \label{eq:p_in_current} \langle p |\ \mu_1\ \dots\ \mu_i\ P\ \mu_{i+1}\ \dots\ \mu_{2N-1}\ | q ] ={}& \begin{cases} \langle p |\ \mu_1\ \dots\ \mu_i\ |P]\ \langle P|\ \mu_{i+1}\ \dots\ \mu_{2N-1}\ | q ]& i \text{ even}\\ \langle p |\ \mu_1\ \dots\ \mu_i\ |P\rangle\ [ P|\ \mu_{i+1}\ \dots\ \mu_{2N-1}\ | q ]& i \text{ odd} \end{cases}\,,\\ \label{eq:p_in_angle} \langle p |\ \mu_1\ \dots\ \mu_i\ P\ \mu_{i+1}\ \dots\ \mu_{2N}\ | q \rangle ={}& \begin{cases} \langle p |\ \mu_1\ \dots\ \mu_i\ |P]\ \langle P| \mu_{i+1}\ \dots\ \mu_{2N}\ | q \rangle & i \text{ even}\\ \langle p |\ \mu_1\ \dots\ \mu_i\ |P\rangle\ \big(\langle P| \mu_{i+1}\ \dots\ \mu_{2N}\ | q ]\big)^* & i \text{ odd} \end{cases}\,,\\ \label{eq:p_in_square} [ p |\ \mu_1\ \dots\ \mu_i\ P\ \mu_{i+1}\ \dots\ \mu_{2N}\ | q ] ={}& \begin{cases} \big(\langle p |\ \mu_1\ \dots\ \mu_i\ |P]\big)^* \ [ P| \mu_{i+1}\ \dots\ \mu_{2N}\ | q ] & i \text{ even}\\ [ p |\ \mu_1\ \dots\ \mu_i\ |P]\ \langle P| \mu_{i+1}\ \dots\ \mu_{2N}\ | q ] & i \text{ odd} \end{cases}\,. \end{align} \endgroup For contractions of vector currents we can use the Fierz identity \begin{equation} \label{eq:Fierz} \langle p|\ \mu\ |q]\ \langle k|\ \mu\ |l] = 2 \spa p.k \spb l.q\,. \end{equation} The scalar angle and square products are given by \begin{align} \label{eq:angle_product} \spa p.q ={}& {\big(u^-(p)\big)}^\dagger u^+(q) = \sqrt{p^-q^+}\hat{p}_{i,\perp} - \sqrt{p^+q^-}\hat{q}_{j,\perp} = - \spa q.p\,,\\ \label{eq:square_product} \spb p.q ={}& {\big(u^+(p)\big)}^\dagger u^-(q) = -\spa p.q ^* = - \spb q.p\,. \end{align} We also define polarisation vectors \begin{equation} \label{eq:pol_vector} \epsilon_\mu^-(p_g, p_r) = \frac{\langle p_r|\mu|p_g]}{\sqrt{2}\spa p_g.{p_r}}\,,\qquad\epsilon_\mu^+(p_g, p_r) = \frac{\langle p_g|\mu|p_r]}{\sqrt{2}\spb p_g.{p_r}}\,. \end{equation} fulfilling \begin{equation} \label{eq:pol_vector_norm} \epsilon_\mu^\lambda(p_g, p_r)\big[\epsilon^{\mu\,\lambda'}(p_g, p_r)\big]^* = -\delta_{\lambda\lambda'}\,. \end{equation} \subsubsection{Contraction algorithm} \label{sec:contr_calc_algo} The contractions are now calculated as follows: \begin{enumerate} \item Use equations \eqref{eq:jplus_gen}, \eqref{eq:current_spinhel} to write all currents in a canonical form. \item Assume that all momenta are lightlike and use the relations \eqref{eq:p_in_current}, \eqref{eq:p_in_angle}, \eqref{eq:p_in_square} to split up currents that are contracted with momenta. \item Apply the Fierz transformation~\eqref{eq:Fierz} to eliminate contractions between vector currents. \item Write the arguments of the antisymmetric angle and scalar products in canonical order, see equations~\eqref{eq:angle_product} ,\eqref{eq:square_product}. \end{enumerate} The corresponding \lstinline!ContractCurrents! procedure is implemented in \texttt{include/helspin.fm}. \section{The fixed-order generator} \label{sec:HEJFOG} Even at leading order, standard fixed-order generators can only generate events with a limited number of final-state particles within reasonable CPU time. The purpose of the fixed-order generator is to supplement this with high-multiplicity input events according to the first two lines of eq.~\eqref{eq:resumdijetFKLmatched2} with the \HEJ approximation $\mathcal{M}_\text{LO, \HEJ}^{f_1f_2\to f_1g\cdots gf_2}$ instead of the full fixed-order matrix element $\mathcal{M}_\text{LO}^{f_1f_2\to f_1g\cdots gf_2}$. Its usage is described in the user documentation \url{https://hej.web.cern.ch/HEJ/doc/current/user/HEJFOG.html}. \subsection{File structure} \label{sec:HEJFOG_structure} The code for the fixed-order generator is in the \texttt{FixedOrderGen} directory, which contains the following: \begin{description} \item[include:] Contains the C++ header files. \item[src:] Contains the C++ source files. \item[t:] Contains the source code for the automated tests. \item[CMakeLists.txt:] Configuration file for the \cmake build system. \item[configFO.yml:] Sample configuration file for the fixed-order generator. \end{description} The code is generally in the \lstinline!HEJFOG! namespace. Functions and classes \lstinline!MyClass! are usually declared in \texttt{include/MyClass.hh} and implemented in \texttt{src/MyClass.cc}. \subsection{Program flow} \label{sec:prog_flow} A single run of the fixed-order generator consists of three or four stages. First, we perform initialisation similar to \HEJ 2, see section~\ref{sec:init}. Since there is a lot of overlap we frequently reuse classes and functions from \HEJ 2, i.e. from the \lstinline!HEJ! namespace. The code for parsing the configuration file is in \texttt{include/config.hh} and implemented in \texttt{src/config.cc}. If partial unweighting is requested in the user settings \url{https://hej.web.cern.ch/HEJ/doc/current/user/HEJFOG.html#settings}, the initialisation is followed by a calibration phase. We use a \lstinline!EventGenerator! to produce a number of trial events. We use these to calibrate the \lstinline!Unweighter! in its constructor and produce a first batch of partially unweighted events. This also allows us to estimate our unweighting efficiency. In the next step, we continue to generate events and potentially unweight them. Once the user-defined target number of events is reached, we adjust their weights according to the number of required trials. As in \HEJ 2 (see section~\ref{sec:processing}), we pass the final events to a number of \lstinline!HEJ::Analysis! objects and a \lstinline!HEJ::CombinedEventWriter!. \subsection{Event generation} \label{sec:evgen} Event generation is performed by the \lstinline!EventGenerator::gen_event! member function. We begin by generating a \lstinline!PhaseSpacePoint!. This is not to be confused with the resummation phase space points represented by \lstinline!HEJ::PhaseSpacePoint!! After jet clustering, we compute the leading-order matrix element (see section~\ref{sec:ME}) and pdf factors. The phase space point generation is performed in the \lstinline!PhaseSpacePoint! constructor. We first construct the user-defined number of $n_p$ partons (by default gluons) in \lstinline!PhaseSpacePoint::gen_LO_partons!. We use flat sampling in rapidity and azimuthal angle. The scalar transverse momenta is generated based on a random variable $x_{p_\perp}$ according to \begin{equation} \label{eq:pt_sampling} p_\perp = p_{\perp,\text{min}} + p_{\perp,\text{par}} \tan\left( x_{p_\perp} \arctan\left( \frac{p_{\perp,\text{max}} - p_{\perp,\text{min}}}{p_{\perp,\text{par}}} \right) \right)\,, \end{equation} where $p_{\perp,\text{min}}$ is the minimum jet transverse momentum, $p_{\perp,\text{max}}$ is the maximum transverse parton momentum, tentatively set to the beam energy, and $p_{\perp,\text{par}}$ is a generation parameter set to the heuristically determined value of \begin{equation} \label{eq:pt_par} p_{\perp,\text{par}}=p_{\perp,\min}+\frac{n_p}{5}. \end{equation} The problem with this generation is that the transverse momenta peak at the minimum transverse momentum required for fixed-order jets. However, if we use the generated events as input for \HEJ resummation, events with such soft transverse momenta hardly contribute, see section~\ref{sec:ptj_res}. To generate efficient input for resummation, there is the user option \texttt{peak pt}, which specifies the dominant transverse momentum for resummation jets. If this option is set, most jets will be generated as above, but with $p_{\perp,\text{min}}$ set to the peak transverse momentum $p_{\perp, \text{peak}}$. In addition, there is a small chance of around $2\%$ to generate softer jets. The heuristic ansatz for the transverse momentum distribution in the ``soft'' region is \begin{equation} \label{FO_pt_soft} \frac{\partial \sigma}{\partial p_\perp} \propto e^{n_p\frac{p_\perp- p_{\perp, \text{peak}}}{\bar{p}_\perp}}\,, \end{equation} where $n_p$ is the number of partons and $\bar{p}_\perp \approx 4\,$GeV. To achieve this distribution, we use \begin{equation} \label{eq:FO_pt_soft_sampling} p_\perp = p_{\perp, \text{peak}} + \bar{p}_\perp \frac{\log x_{p_\perp}}{n_p} \end{equation} and discard the phase space point if the parton is too soft, i.e. below the threshold for fixed-order jets. After ensuring that all partons form separate jets, we generate any potential colourless emissions. We then determine the incoming momenta and flavours in \lstinline!PhaseSpacePoint::reconstruct_incoming! and adjust the outgoing flavours to ensure an FKL configuration. Finally, we may reassign outgoing flavours to generate suppressed (for example unordered) configurations. \input{currents} \appendix \section{Continuous Integration} \label{sec:CI} Whenever you are implementing something new or fixed a bug, please also add a test for the new behaviour to \texttt{t/CMakeLists.txt} via \lstinline!add_test!. These test can be triggered by running \lstinline!make test! or \lstinline!ctest! after compiling. A typical test should be at most a few seconds, so it can be potentially run on each commit change by each developer. If you require a longer, more careful test, preferably on top of a small one, surround it with \begin{lstlisting}[caption={}] if(${TEST_ALL}) add_test( NAME t_feature COMMAND really_long_test ) endif() \end{lstlisting} Afterwards you can execute the longer tests with\footnote{No recompiling is needed, as long as only the \lstinline!add_test! command is guarded, not the compiling commands itself.} \begin{lstlisting}[language=sh,caption={}] cmake base/directory -DTEST_ALL=TRUE make test \end{lstlisting} On top of that you should add \href{https://en.cppreference.com/w/cpp/error/assert}{\lstinline!assert!s} in the code itself. They are only executed when compiled with \lstinline!CMAKE_BUILD_TYPE=Debug!, without slowing down release code. So you can use them everywhere to test \textit{expected} or \textit{assumed} behaviour, e.g. requiring a Higgs boson or relying on rapidity ordering. GitLab provides ways to directly test code via \textit{Continuous integrations}. The CI is controlled by \texttt{.gitlab-ci.yml}. For all options for the YAML file see \href{https://docs.gitlab.com/ee/ci/yaml/}{docs.gitlab.com/ee/ci/yaml/}.https://gitlab.dur.scotgrid.ac.uk/hej/docold/tree/master/Theses GitLab also provides a small tool to check that YAML syntax is correct under \lstinline!CI/CD > Pipelines > CI Lint! or \href{https://gitlab.dur.scotgrid.ac.uk/hej/HEJ/-/ci/lint}{gitlab.dur.scotgrid.ac.uk/hej/HEJ/-/ci/lint}. Currently the CI is configured to trigger a \textit{Pipeline} on each \lstinline!git push!. The corresponding \textit{GitLab runners} are configured under \lstinline!CI/CD Settings>Runners! in the GitLab UI. All runners use a \href{https://www.docker.com/}{docker} image as virtual environments\footnote{To use only Docker runners set the \lstinline!docker! tag in \texttt{.gitlab-ci.yml}.}. The specific docker images maintained separately. If you add a new dependencies, please also provide a docker image for the CI. The goal to be able to test \HEJ with all possible configurations. Each pipeline contains multiple stages (see \lstinline!stages! in \texttt{.gitlab-ci.yml}) which are executed in order from top to bottom. Additionally each stage contains multiple jobs. For example the stage \lstinline!build! contains the jobs \lstinline!build:basic!, \lstinline!build:qcdloop!, \lstinline!build:rivet!, etc., which compile \HEJ for different environments and dependencies, by using different Docker images. Jobs starting with an dot are ignored by the Runner, e.g. \lstinline!.HEJ_build! is only used as a template, but never executed directly. Only after all jobs of the previous stage was executed without any error the next stage will start. To pass information between multiple stages we use \lstinline!artifacts!. The runner will automatically load all artifacts form all \lstinline!dependencies! for each job\footnote{If no dependencies are defined \textit{all} artifacts from all previous jobs are downloaded. Thus please specify an empty dependence if you do not want to load any artifacts.}. For example the compiled \HEJ code from \lstinline!build:basic! gets loaded in \lstinline!test:basic! and \lstinline!FOG:build:basic!, without recompiling \HEJ again. Additionally artifacts can be downloaded from the GitLab web page, which could be handy for debugging. We also trigger some jobs \lstinline!only! on specific events. For example we only push the code to \href{https://phab.hepforge.org/source/hej/repository/v2.0/}{HepForge} on release branches (e.g. v2.0). Also we only execute the \textit{long} tests for merge requests, on pushes for any release or the \lstinline!master! branch, or when triggered manually from the GitLab web page. The actual commands are given in the \lstinline!before_script!, \lstinline!script! and \lstinline!after_script! \footnote{\lstinline!after_script! is always executed} sections, and are standard Linux shell commands (dependent on the docker image). Any failed command, i.e. returning not zero, stops the job and making the pipeline fail entirely. Most tests are just running \lstinline!make test! or are based on it. Thus, to emphasise it again, write tests for your code in \lstinline!cmake!. The CI is only intended to make automated testing in different environments easier. \section{Monte Carlo uncertainty} \label{sec:MC_err} Since \HEJ is reweighting each Fixed Order point with multiple resummation events, the Monte Carlo uncertainty of \HEJ is a little bit more complicated then usual. We start by defining the \HEJ cross section after $N$ FO points \begin{align} \sigma_N:=\sum_{i}^N x_i \sum_{j}^{M_i} y_{i,j}=:\sum_i^N\sum_j^{M_i} w_{i,j}, \end{align} where $x_i$ are the FO weights\footnote{In this definition $x_i$ can be zero, see the discussion in the next section.}, $y_{i,j}$ are the reweighting weights , and $M_i$ the number of resummation points. We can set $M=M_i \forall i$ by potentially adding some points with $y_{i,j}=0$, i.e. $M$ correspond to the \lstinline!trials! in \lstinline!EventReweighter!. $w_{i,j}$ are the weights as written out by \HEJ. The expectation value of $\sigma$ is then \begin{align} \ev{\sigma_N}= \sum_i \ev{x_i}\sum_j\ev{y_{i,j}}=M \mu_x\sum_i\mu_{y_i},\label{eq:true_sigma} \end{align} with $\mu_{x/y}$ being the (true) mean value of $x$ or $y$, i.e. \begin{align} \mu_{x}:=\ev{\bar{x}}=\ev{\frac{\sum_i x_i}{N}}=\ev{x}. \end{align} The true underlying standard derivation on $\sigma_N$, assuming $\delta_{x}$ and $\delta_{y_i}$ are the standard derivations of $x$ and $y_i$ is \begin{align} \delta_{\sigma_N}^2&=M^2 \delta_{x}^2 \sum_i \mu_{y_i}^2 +M \mu_x^2 \sum_i \delta_{y_i}^2. \label{eq:true_err} \end{align} Notice that each point $i$ can have an different expectation for $y_i$. Since we do not know the true distribution of $x$ and $y$ we need to estimate it. We use the standard derivation \begin{align} \tilde{\delta}_{x_i}^2&:=\left(x_i-\bar x\right)^2 =\left(\frac{N-1}{N} x_i - \frac{\sum_{j\neq i} x_j}{N}\right)^2 \label{eq:err_x}\\ \tilde{\delta}_{y_{i,j}}^2&:=\left(y_{i,j}-\bar y_i\right)^2 \label{eq:err_y}, \end{align} and the mean values $\bar x$ and $\bar y$, to get an estimator for $\delta_{\sigma_N}$ \begin{align} \tilde\delta_{\sigma_N}^2&=M^2 \sum_i \tilde\delta_{x_i}^2 \bar{y_i}^2 +\sum_{i,j} x_i^2\tilde\delta_{y_{i,j}}^2. \label{eq:esti_err} \end{align} Trough error propagation we can connect the estimated uncertainties back to the fundamental ones \begin{align} \delta_{\tilde{\delta}_{x_i}}^2=\frac{N-1}{N} \delta_x^2. \end{align} Together with $\delta_x^2=\ev{x^2}-\ev{x}^2$ and $\ev{\tilde\delta}=0$ this leads to \begin{align} \ev{\tilde{\delta}_{x_i}^2 \bar y_i^2}&=\ev{\tilde{\delta}_{x_i} \bar y_i}^2 +\delta_{\tilde{\delta}_{x_i}}^2 \mu_{y_i}^2 +\delta_{y_i}^2 \mu_{\tilde\delta}^2 \\ &=\frac{N-1}{N} \delta_x^2\mu_{y_i}^2, \end{align} and a similar results for $y$. Therefore \begin{align} \ev{\delta_{\sigma_N}}=\frac{N-1}{N} M^2 \delta_{x}^2 \sum_i \mu_{y_i}^2 +\frac{M-1}{M} M \mu_x^2 \sum_i \delta_{y_i}^2, \end{align} where we can compensate for the additional factors compared to~\eqref{eq:true_err}, by replacing \begin{align} \tilde\delta_x&\to\frac{N}{N-1}\tilde\delta_x \label{eq:xcom_bias}\\ \tilde\delta_{y_i}&\to\frac{M}{M-1}\tilde\delta_{y_i}. \label{eq:ycom_bias} \end{align} Thus~\eqref{eq:esti_err} is an unbiased estimator of $\delta_{\sigma_N}$. \subsection{Number of events vs. number of trials} Even though the above calculation is completely valid, it is unpractical. Both $x_i$ and $y_{ij}$ could be zero, but zero weight events are typically not written out. In that sense $N$ and $M$ are the \textit{number of trials} it took to generate $N'$ and $M'$ (non-zero) events. We can not naively replace all $N$ and $M$ with $N'$ and $M'$ in the above equations, since this would also change the definition of the average $\bar x$ and $\bar y$. For illustration let us consider unweighted events, with all weights equal to $x'$, without changing the cross section $\sum_i^N x_i=\sum_i^{N'} x'_i=N' x'$. Then the average trial weight is unequal to the average event weight \begin{align} \bar x = \frac{\sum_i^{N} x_i}{N} = \frac{\sum_i^{N'} x'}{N}=x'\frac{N'}{N} \neq x'=\frac{\sum_i^{N'} x'}{N'}. \end{align} $N=N'$ would correspond to an $100\%$ efficient unweighting, i.e. a perfect sampling, where we know the analytical results. In particular using $N'$ instead of $N$ in the standard derivation gives \begin{align} \sum_i \left(x_i-\frac{\sum_i^{N} x_i}{N'}\right)^2=\sum_i \left(x'-x' \frac{\sum_i^{N'}}{N'}\right)^2=0, \end{align} which is obviously not true in general for $\tilde\delta^2_x$. Hence we would have to use the number of trials $N$ everywhere. This would require an additional parameter to be passed with each events, which is not always available in practice\footnote{ \texttt{Sherpa} gives the number of trials, as an \lstinline!attribute::trials! of \lstinline!HEPEUP! in the \texttt{LHE} file, or similarly as a data member in the HDF5 format \cite{Hoeche:2019rti}. The \texttt{LHE} standard itself provides the variable \lstinline!ntries! per event (see \href{https://phystev.cnrs.fr/wiki/2017:groups:tools:lhe}{this proposal}), though I have not seen this used anywhere.}. Instead we use \begin{align} \tilde\delta_{x}'^2:=\sum_i^{N} x_i^2\geq\tilde\delta_x^2, \label{eq:err_prac} \end{align} where the bias of $\delta_x'^2$ vanishes for large $N$. Thus we can use the sum of weight squares~\eqref{eq:err_prac} instead of~\eqref{eq:err_x} and~\eqref{eq:err_y}, without worrying about the difference between trials and generated events. The total error~\eqref{eq:esti_err} becomes \begin{align} \tilde\delta_{\sigma_N}^2=\sum_i \left(\sum_j w_{i,j}\right)^2+\sum_{i,j} \left(w_{i,j}\right)^2, \end{align} which (conveniently) only dependent on the \HEJ weights $w_{i,j}$. \section{Explicit formulas for vector currents} \label{sec:j_vec} Using eqs.~\eqref{eq:u_plus}\eqref{eq:u_minus}\eqref{eq:jminus_gen}\eqref{eq:Pauli_matrices}\eqref{eq:jplus_gen}, the vector currents read \begin{align} \label{eq:j-_explicit} j^-_\mu(p, q) ={}& \begin{pmatrix} \sqrt{p^+\,q^+} + \sqrt{p^-\,q^-} \hat{p}_{\perp} \hat{q}_{\perp}^*\\ \sqrt{p^-\,q^+}\, \hat{p}_{\perp} + \sqrt{p^+\,q^-}\,\hat{q}_{\perp}^*\\ -i \sqrt{p^-\,q^+}\, \hat{p}_{\perp} + i \sqrt{p^+\,q^-}\, \hat{q}_{\perp}^*\\ \sqrt{p^+\,q^+} - \sqrt{p^-\,q^-}\, \hat{p}_{\perp}\, \hat{q}_{\perp}^* \end{pmatrix}\,,\\ j^+_\mu(p, q) ={}&\big(j^-_\mu(p, q)\big)^*\,,\\ j^\pm_\mu(q, p) ={}&\big(j^\pm_\mu(p, q)\big)^*\,. \end{align} If $q= p_{\text{in}}$ is the momentum of an incoming parton, we have $\hat{p}_{\text{in} \perp} = -1$ and either $p_{\text{in}}^+ = 0$ or $p_{\text{in}}^- = 0$. The current simplifies further:\todo{Helicities flipped w.r.t code} \begin{align} \label{eq:j_explicit} j^-_\mu(p_{\text{out}}, p_{\text{in}}) ={}& \begin{pmatrix} \sqrt{p_{\text{in}}^+\,p_{\text{out}}^+}\\ \sqrt{p_{\text{in}}^+\,p_{\text{out}}^-} \ \hat{p}_{\text{out}\,\perp}\\ -i\,j^-_1\\ j^-_0 \end{pmatrix} & p_{\text{in}\,z} > 0\,,\\ j^-_\mu(p_{\text{out}}, p_{\text{in}}) ={}& \begin{pmatrix} -\sqrt{p_{\text{in}}^-\,p_{\text{out}}^{-\phantom{+}}} \ \hat{p}_{\text{out}\,\perp}\\ - \sqrt{p_{\text{in}}^-\,p_{\text{out}}^+}\\ i\,j^-_1\\ -j^-_0 \end{pmatrix} & p_{\text{in}\,z} < 0\,. \end{align} \bibliographystyle{JHEP} \bibliography{biblio} \end{document} diff --git a/src/MatrixElement.cc b/src/MatrixElement.cc index 50a948f..c007721 100644 --- a/src/MatrixElement.cc +++ b/src/MatrixElement.cc @@ -1,2426 +1,2450 @@ /** * \authors The HEJ collaboration (see AUTHORS for details) * \date 2019-2022 * \copyright GPLv2 or later */ #include "HEJ/MatrixElement.hh" #include #include #include #include #include #include #include #include #include #include "fastjet/PseudoJet.hh" #include "HEJ/ConfigFlags.hh" #include "HEJ/Constants.hh" #include "HEJ/EWConstants.hh" #include "HEJ/Event.hh" #include "HEJ/HiggsCouplingSettings.hh" #include "HEJ/Hjets.hh" #include "HEJ/LorentzVector.hh" #include "HEJ/PDG_codes.hh" #include "HEJ/Particle.hh" #include "HEJ/WWjets.hh" #include "HEJ/Wjets.hh" #include "HEJ/Zjets.hh" #include "HEJ/event_types.hh" #include "HEJ/exceptions.hh" #include "HEJ/jets.hh" #include "HEJ/utility.hh" namespace HEJ { namespace { // Colour acceleration multiplier for gluons // see eq:K_g in developer manual double K_g(double p1minus, double paminus) { return 1./2.*(p1minus/paminus + paminus/p1minus)*(C_A - 1./C_A) + 1./C_A; } double K_g( CLHEP::HepLorentzVector const & pout, CLHEP::HepLorentzVector const & pin ) { if(pin.z() > 0) return K_g(pout.plus(), pin.plus()); return K_g(pout.minus(), pin.minus()); } // Colour acceleration multiplier for quarks // see eq:K_q in developer manual constexpr double K_q = C_F; // Colour acceleration multipliers double K( ParticleID type, CLHEP::HepLorentzVector const & pout, CLHEP::HepLorentzVector const & pin ){ if(type == pid::gluon) return K_g(pout, pin); return K_q; } } // namespace double MatrixElement::omega0( double alpha_s, double mur, fastjet::PseudoJet const & q_j ) const { const double lambda = param_.regulator_lambda; const double result = - alpha_s*N_C/M_PI*std::log(q_j.perp2()/(lambda*lambda)); if(! param_.log_correction) return result; return ( 1. + alpha_s/(4.*M_PI)*BETA0*std::log(mur*mur/(q_j.perp()*lambda)) )*result; } Weights MatrixElement::operator()(Event const & event) const { std::vector tree_kin_part=tree_kin(event); std::vector virtual_part=virtual_corrections(event); if(tree_kin_part.size() != virtual_part.size()) { throw std::logic_error("tree and virtuals have different sizes"); } Weights sum = Weights{0., std::vector(event.variations().size(), 0.)}; for(size_t i=0; i tree_kin_part=tree_kin(event); double sum = 0.; for(double i : tree_kin_part) { sum += i; } return tree_param(event)*sum; } Weights MatrixElement::tree_param(Event const & event) const { if(! is_resummable(event.type())) { return Weights{0., std::vector(event.variations().size(), 0.)}; } Weights result; // only compute once for each renormalisation scale std::unordered_map known; result.central = tree_param(event, event.central().mur); known.emplace(event.central().mur, result.central); for(auto const & var: event.variations()) { const auto ME_it = known.find(var.mur); if(ME_it == end(known)) { const double wt = tree_param(event, var.mur); result.variations.emplace_back(wt); known.emplace(var.mur, wt); } else { result.variations.emplace_back(ME_it->second); } } return result; } std::vector MatrixElement::virtual_corrections(Event const & event) const { if(!event.valid_hej_state(param_.soft_pt_regulator)) { return {Weights{0., std::vector(event.variations().size(), 0.)}}; } // only compute once for each renormalisation scale std::unordered_map > known_vec; std::vector central_vec=virtual_corrections(event, event.central().mur); known_vec.emplace(event.central().mur, central_vec); for(auto const & var: event.variations()) { const auto ME_it = known_vec.find(var.mur); if(ME_it == end(known_vec)) { known_vec.emplace(var.mur, virtual_corrections(event, var.mur)); } } // At this stage known_vec contains one vector of virtual corrections for each mur value // Now put this into a vector of Weights std::vector result_vec; for(size_t i=0; isecond.at(i)); } result_vec.emplace_back(result); } return result_vec; } template std::vector MatrixElement::virtual_corrections_interference( InputIterator begin_parton, InputIterator end_parton, fastjet::PseudoJet const & q0_t, fastjet::PseudoJet const & q0_b, const double mur ) const{ const double alpha_s = alpha_s_(mur); auto qi_t = q0_t; auto qi_b = q0_b; double sum_top = 0.; double sum_bot = 0.; double sum_mix = 0.; for(auto parton_it = begin_parton; parton_it != end_parton; ++parton_it){ Particle parton = *parton_it; Particle parton_next = *(parton_it+1); const double dy = parton_next.rapidity() - parton.rapidity(); const double tmp_top = omega0(alpha_s, mur, qi_t)*dy; const double tmp_bot = omega0(alpha_s, mur, qi_b)*dy; sum_top += tmp_top; sum_bot += tmp_bot; sum_mix += (tmp_top + tmp_bot) / 2.; qi_t -= parton_next.p; qi_b -= parton_next.p; } if (param_.nlo.enabled){ return {(sum_top), (sum_bot), (sum_mix)}; } return {exp(sum_top), exp(sum_bot), exp(sum_mix)}; } double MatrixElement::virtual_corrections_W( Event const & event, const double mur, Particle const & WBoson ) const{ auto const & in = event.incoming(); const auto partons = filter_partons(event.outgoing()); fastjet::PseudoJet const & pa = in.front().p; #ifndef NDEBUG fastjet::PseudoJet const & pb = in.back().p; double const norm = (in.front().p + in.back().p).E(); #endif assert(std::is_sorted(partons.begin(), partons.end(), rapidity_less{})); assert(partons.size() >= 2); assert(pa.pz() < pb.pz()); fastjet::PseudoJet q = pa - partons[0].p; std::size_t first_idx = 0; std::size_t last_idx = partons.size() - 1; #ifndef NDEBUG bool wc = true; #endif bool wqq = false; // With extremal qqbar or unordered gluon outside the extremal // partons then it is not part of the FKL ladder and does not // contribute to the virtual corrections. W emitted from the // most backward leg must be taken into account in t-channel if (event.type() == event_type::unob) { q -= partons[1].p; ++first_idx; if (in[0].type != partons[1].type ){ q -= WBoson.p; #ifndef NDEBUG wc=false; #endif } } else if (event.type() == event_type::qqbar_exb) { q -= partons[1].p; ++first_idx; if (std::abs(partons[0].type) != std::abs(partons[1].type)){ q -= WBoson.p; #ifndef NDEBUG wc=false; #endif } } else { if(event.type() == event_type::unof || event.type() == event_type::qqbar_exf){ --last_idx; } if (in[0].type != partons[0].type ){ q -= WBoson.p; #ifndef NDEBUG wc=false; #endif } } std::size_t first_idx_qqbar = last_idx; std::size_t last_idx_qqbar = last_idx; //if qqbarMid event, virtual correction do not occur between qqbar pair. if(event.type() == event_type::qqbar_mid){ const auto backquark = std::find_if( begin(partons) + 1, end(partons) - 1 , [](Particle const & s){ return (s.type != pid::gluon); } ); if(backquark == end(partons) || (backquark+1)->type==pid::gluon) return 0; if(std::abs(backquark->type) != std::abs((backquark+1)->type)) { wqq=true; #ifndef NDEBUG wc=false; #endif } last_idx = std::distance(begin(partons), backquark); first_idx_qqbar = last_idx+1; } double exponent = 0; const double alpha_s = alpha_s_(mur); for(std::size_t j = first_idx; j < last_idx; ++j){ exponent += omega0(alpha_s, mur, q)*( partons[j+1].rapidity() - partons[j].rapidity() ); q -=partons[j+1].p; } // End Loop one if (last_idx != first_idx_qqbar) q -= partons[last_idx+1].p; if (wqq) q -= WBoson.p; for(std::size_t j = first_idx_qqbar; j < last_idx_qqbar; ++j){ exponent += omega0(alpha_s, mur, q)*( partons[j+1].rapidity() - partons[j].rapidity() ); q -= partons[j+1].p; } #ifndef NDEBUG if (wc) q -= WBoson.p; assert( nearby(q, -1*pb, norm) || is_AWZH_boson(partons.back().type) || event.type() == event_type::unof || event.type() == event_type::qqbar_exf ); #endif if (param_.nlo.enabled){ double nlo_virtual =1.; //Only apply virtual corrections to a nlo order event. if (partons.size() == param_.nlo.nj) nlo_virtual+=exponent; return nlo_virtual; } return std::exp(exponent); } std::vector MatrixElement::virtual_corrections_WW( Event const & event, const double mur ) const{ auto const & in = event.incoming(); const auto partons = filter_partons(event.outgoing()); fastjet::PseudoJet const & pa = in.front().p; #ifndef NDEBUG fastjet::PseudoJet const & pb = in.back().p; #endif assert(std::is_sorted(partons.begin(), partons.end(), rapidity_less{})); assert(partons.size() >= 2); assert(pa.pz() < pb.pz()); assert(event.decays().size() == 2); std::vector plbar; std::vector pl; for(auto const & decay_pair : event.decays()) { auto const decay = decay_pair.second; if(decay.at(0).type < 0) { plbar.emplace_back(decay.at(0).p); pl .emplace_back(decay.at(1).p); } else { pl .emplace_back(decay.at(0).p); plbar.emplace_back(decay.at(1).p); } } fastjet::PseudoJet q_t = pa - partons[0].p - pl[0] - plbar[0]; fastjet::PseudoJet q_b = pa - partons[0].p - pl[1] - plbar[1]; auto const begin_parton = cbegin(partons); auto const end_parton = cend(partons) - 1; if (param_.nlo.enabled){ std::vector virt_corrections_nlo {1.0,1.0,1.0}; //set virtual corrections to 1. // Only apply virtual corrections to a nlo order event. if (partons.size() == param_.nlo.nj) { std::vector virt_corrections_nlo_interference = virtual_corrections_interference( begin_parton, end_parton, q_t, q_b, mur ); assert( virt_corrections_nlo_interference.size() == virt_corrections_nlo.size() ); for(std::size_t i = 0; i < virt_corrections_nlo.size(); ++i){ virt_corrections_nlo[i] += virt_corrections_nlo_interference[i]; } } return virt_corrections_nlo; } return virtual_corrections_interference(begin_parton, end_parton, q_t, q_b, mur); } std::vector MatrixElement::virtual_corrections_Z_qq( Event const & event, const double mur, Particle const & ZBoson ) const{ auto const & in = event.incoming(); const auto partons = filter_partons(event.outgoing()); fastjet::PseudoJet const & pa = in.front().p; #ifndef NDEBUG fastjet::PseudoJet const & pb = in.back().p; #endif assert(std::is_sorted(partons.begin(), partons.end(), rapidity_less{})); assert(partons.size() >= 2); assert(pa.pz() < pb.pz()); fastjet::PseudoJet q_t = pa - partons[0].p - ZBoson.p; fastjet::PseudoJet q_b = pa - partons[0].p; auto begin_parton = cbegin(partons); auto end_parton = cend(partons) - 1; // Unordered gluon does not contribute to the virtual corrections if (event.type() == event_type::unob) { // Gluon is partons[0] and is already subtracted // partons[1] is the backward quark q_t -= partons[1].p; q_b -= partons[1].p; ++begin_parton; } else if (event.type() == event_type::unof) { // End sum at forward quark --end_parton; } if (param_.nlo.enabled){ //set virtual corrections to 1. std::vector virt_corrections_nlo {1.0,1.0,1.0}; // Only apply virtual corrections to a nlo order event. if (partons.size() == param_.nlo.nj) { std::vector virt_corrections_nlo_interference = virtual_corrections_interference( begin_parton, end_parton, q_t, q_b, mur ); assert( virt_corrections_nlo.size() == virt_corrections_nlo_interference.size() ); for(std::size_t i = 0; i < virt_corrections_nlo.size(); ++i){ virt_corrections_nlo[i] += virt_corrections_nlo_interference[i]; } } return virt_corrections_nlo; } return virtual_corrections_interference(begin_parton, end_parton, q_t, q_b, mur); } double MatrixElement::virtual_corrections_Z_qg( Event const & event, const double mur, Particle const & ZBoson, const bool is_gq_event ) const{ auto const & in = event.incoming(); const auto partons = filter_partons(event.outgoing()); fastjet::PseudoJet const & pa = in.front().p; #ifndef NDEBUG fastjet::PseudoJet const & pb = in.back().p; #endif assert(std::is_sorted(partons.begin(), partons.end(), rapidity_less{})); assert(partons.size() >= 2); assert(pa.pz() < pb.pz()); // If this is a gq event, don't subtract the Z momentum from first q fastjet::PseudoJet q = (is_gq_event ? pa - partons[0].p : pa - partons[0].p - ZBoson.p); size_t first_idx = 0; size_t last_idx = partons.size() - 1; // Unordered gluon does not contribute to the virtual corrections if (event.type() == event_type::unob) { // Gluon is partons[0] and is already subtracted // partons[1] is the backward quark q -= partons[1].p; ++first_idx; } else if (event.type() == event_type::unof) { // End sum at forward quark --last_idx; } double sum=0.; const double alpha_s = alpha_s_(mur); for(size_t j = first_idx; j < last_idx; ++j){ sum += omega0(alpha_s, mur, q)*(partons[j+1].rapidity() - partons[j].rapidity()); q -= partons[j+1].p; } if (param_.nlo.enabled){ double nlo_virtual =1.; //Only apply virtual corrections to a nlo order event. if (partons.size() == param_.nlo.nj) nlo_virtual+=sum; return nlo_virtual; } return exp(sum); } std::vector MatrixElement::virtual_corrections( Event const & event, const double mur ) const{ auto const & in = event.incoming(); auto const & out = event.outgoing(); fastjet::PseudoJet const & pa = in.front().p; #ifndef NDEBUG fastjet::PseudoJet const & pb = in.back().p; double const norm = (in.front().p + in.back().p).E(); #endif std::vector bosons = filter_AWZH_bosons(out); if(event.jets().size() != param_.nlo.nj && param_.nlo.enabled) { throw std::logic_error{ "Input event has number of jets different to stated NLO " "input in config file: " + std::to_string(event.jets().size()) + " vs " +std::to_string(param_.nlo.nj) + "\n" }; } if(bosons.size() > 2) { throw not_implemented("Emission of >2 bosons is unsupported"); } if(bosons.size() == 2) { if(bosons[0].type == pid::Wp && bosons[1].type == pid::Wp) { return virtual_corrections_WW(event, mur); } else if(bosons[0].type == pid::Wm && bosons[1].type == pid::Wm) { return virtual_corrections_WW(event, mur); } throw not_implemented("Emission of bosons of unsupported type"); } if(bosons.size() == 1) { const auto AWZH_boson = bosons[0]; if(std::abs(AWZH_boson.type) == pid::Wp){ return {virtual_corrections_W(event, mur, AWZH_boson)}; } if(AWZH_boson.type == pid::Z_photon_mix){ if(is_gluon(in.back().type)){ // This is a qg event return {virtual_corrections_Z_qg(event, mur, AWZH_boson, false)}; } if(is_gluon(in.front().type)){ // This is a gq event return {virtual_corrections_Z_qg(event, mur, AWZH_boson, true)}; } // This is a qq event return virtual_corrections_Z_qq(event, mur, AWZH_boson); } } assert(std::is_sorted(out.begin(), out.end(), rapidity_less{})); assert(out.size() >= 2); assert(pa.pz() < pb.pz()); fastjet::PseudoJet q = pa - out[0].p; std::size_t first_idx = 0; std::size_t last_idx = out.size() - 1; // if there is a Higgs boson _not_ emitted off an incoming gluon, // extremal qqbar or unordered gluon outside the extremal partons // then it is not part of the FKL ladder // and does not contribute to the virtual corrections if((out.front().type == pid::Higgs && in.front().type != pid::gluon) || event.type() == event_type::unob || event.type() == event_type::qqbar_exb){ q -= out[1].p; ++first_idx; } if((out.back().type == pid::Higgs && in.back().type != pid::gluon) || event.type() == event_type::unof || event.type() == event_type::qqbar_exf){ --last_idx; } std::size_t first_idx_qqbar = last_idx; std::size_t last_idx_qqbar = last_idx; //if central qqbar event, virtual correction do not occur between q-qbar. if(event.type() == event_type::qqbar_mid){ const auto backquark = std::find_if( begin(out) + 1, end(out) - 1 , [](Particle const & s){ return (s.type != pid::gluon && is_parton(s.type)); } ); if(backquark == end(out) || (backquark+1)->type==pid::gluon) return {0.}; last_idx = std::distance(begin(out), backquark); first_idx_qqbar = last_idx+1; } double exponent = 0; const double alpha_s = alpha_s_(mur); for(std::size_t j = first_idx; j < last_idx; ++j){ exponent += omega0(alpha_s, mur, q)*( out[j+1].rapidity() - out[j].rapidity() ); q -= out[j+1].p; } if (last_idx != first_idx_qqbar) q -= out[last_idx+1].p; for(std::size_t j = first_idx_qqbar; j < last_idx_qqbar; ++j){ exponent += omega0(alpha_s, mur, q)*( out[j+1].rapidity() - out[j].rapidity() ); q -= out[j+1].p; } assert( nearby(q, -1*pb, norm) || (out.back().type == pid::Higgs && in.back().type != pid::gluon) || event.type() == event_type::unof || event.type() == event_type::qqbar_exf ); if (param_.nlo.enabled){ const auto partons = filter_partons(event.outgoing()); double nlo_virtual =1.; //Only apply virtual corrections to a nlo order event. if (partons.size() == param_.nlo.nj) nlo_virtual+=exponent; return {nlo_virtual}; } return {std::exp(exponent)}; } namespace { //! Lipatov vertex for partons emitted into extremal jets CLHEP::HepLorentzVector CLipatov( CLHEP::HepLorentzVector const & qav, CLHEP::HepLorentzVector const & qbv, CLHEP::HepLorentzVector const & p1, CLHEP::HepLorentzVector const & p2 ) { const CLHEP::HepLorentzVector p5 = qav-qbv; const CLHEP::HepLorentzVector CL = -(qav+qbv) + p1*(qav.m2()/p5.dot(p1) + 2.*p5.dot(p2)/p1.dot(p2)) - p2*(qbv.m2()/p5.dot(p2) + 2.*p5.dot(p1)/p1.dot(p2)); return CL; } double C2Lipatov( CLHEP::HepLorentzVector const & qav, CLHEP::HepLorentzVector const & qbv, CLHEP::HepLorentzVector const & p1, CLHEP::HepLorentzVector const & p2 ){ const CLHEP::HepLorentzVector CL = CLipatov(qav, qbv, p1, p2); return -CL.dot(CL); } //! Lipatov vertex with soft subtraction for partons emitted into extremal jets double C2Lipatovots( CLHEP::HepLorentzVector const & qav, CLHEP::HepLorentzVector const & qbv, CLHEP::HepLorentzVector const & p1, CLHEP::HepLorentzVector const & p2, const double lambda ) { const double Cls=(C2Lipatov(qav, qbv, p1, p2)/(qav.m2()*qbv.m2())); const double kperp=(qav-qbv).perp(); if (kperp>lambda) return Cls; return Cls-4./(kperp*kperp); } double C2Lipatov_Mix( CLHEP::HepLorentzVector const & qav_t, CLHEP::HepLorentzVector const & qbv_t, CLHEP::HepLorentzVector const & qav_b, CLHEP::HepLorentzVector const & qbv_b, CLHEP::HepLorentzVector const & p1, CLHEP::HepLorentzVector const & p2 ) { const CLHEP::HepLorentzVector CL_t = CLipatov(qav_t, qbv_t, p1, p2); const CLHEP::HepLorentzVector CL_b = CLipatov(qav_b, qbv_b, p1, p2); return -CL_t.dot(CL_b); } double C2Lipatovots_Mix( CLHEP::HepLorentzVector const & qav_t, CLHEP::HepLorentzVector const & qbv_t, CLHEP::HepLorentzVector const & qav_b, CLHEP::HepLorentzVector const & qbv_b, CLHEP::HepLorentzVector const & p1, CLHEP::HepLorentzVector const & p2, const double lambda ) { const double Cls = C2Lipatov_Mix(qav_t, qbv_t, qav_b, qbv_b, p1, p2) / sqrt(qav_t.m2() * qbv_t.m2() * qav_b.m2() * qbv_b.m2()); const double kperp = (qav_t - qbv_t).perp(); if (kperp > lambda){ return Cls; } return Cls - 4.0 / (kperp * kperp); } CLHEP::HepLorentzVector CLipatov( CLHEP::HepLorentzVector const & qav, CLHEP::HepLorentzVector const & qbv, CLHEP::HepLorentzVector const & pim, CLHEP::HepLorentzVector const & pip, CLHEP::HepLorentzVector const & pom, CLHEP::HepLorentzVector const & pop ){ const CLHEP::HepLorentzVector p5 = qav-qbv; const CLHEP::HepLorentzVector CL = -(qav+qbv) + qav.m2()*(1./p5.dot(pip)*pip + 1./p5.dot(pop)*pop)/2. - qbv.m2()*(1./p5.dot(pim)*pim + 1./p5.dot(pom)*pom)/2. + ( pip*(p5.dot(pim)/pip.dot(pim) + p5.dot(pom)/pip.dot(pom)) + pop*(p5.dot(pim)/pop.dot(pim) + p5.dot(pom)/pop.dot(pom)) - pim*(p5.dot(pip)/pip.dot(pim) + p5.dot(pop)/pop.dot(pim)) - pom*(p5.dot(pip)/pip.dot(pom) + p5.dot(pop)/pop.dot(pom)) )/2.; return CL; } //! Lipatov vertex double C2Lipatov( // B CLHEP::HepLorentzVector const & qav, CLHEP::HepLorentzVector const & qbv, CLHEP::HepLorentzVector const & pim, CLHEP::HepLorentzVector const & pip, CLHEP::HepLorentzVector const & pom, CLHEP::HepLorentzVector const & pop ){ const CLHEP::HepLorentzVector CL = CLipatov(qav, qbv, pim, pip, pom, pop); return -CL.dot(CL); } //! Lipatov vertex with soft subtraction double C2Lipatovots( CLHEP::HepLorentzVector const & qav, CLHEP::HepLorentzVector const & qbv, CLHEP::HepLorentzVector const & pa, CLHEP::HepLorentzVector const & pb, CLHEP::HepLorentzVector const & p1, CLHEP::HepLorentzVector const & p2, const double lambda ) { const double Cls=(C2Lipatov(qav, qbv, pa, pb, p1, p2)/(qav.m2()*qbv.m2())); const double kperp=(qav-qbv).perp(); if (kperp>lambda) return Cls; return Cls-4./(kperp*kperp); } double C2Lipatov_Mix( CLHEP::HepLorentzVector const & qav_t, CLHEP::HepLorentzVector const & qbv_t, CLHEP::HepLorentzVector const & qav_b, CLHEP::HepLorentzVector const & qbv_b, CLHEP::HepLorentzVector const & pim, CLHEP::HepLorentzVector const & pip, CLHEP::HepLorentzVector const & pom, CLHEP::HepLorentzVector const & pop ) { const CLHEP::HepLorentzVector CL_t = CLipatov(qav_t, qbv_t, pim, pip, pom, pop); const CLHEP::HepLorentzVector CL_b = CLipatov(qav_b, qbv_b, pim, pip, pom, pop); return -CL_t.dot(CL_b); } double C2Lipatovots_Mix( CLHEP::HepLorentzVector const & qav_t, CLHEP::HepLorentzVector const & qbv_t, CLHEP::HepLorentzVector const & qav_b, CLHEP::HepLorentzVector const & qbv_b, CLHEP::HepLorentzVector const & pa, CLHEP::HepLorentzVector const & pb, CLHEP::HepLorentzVector const & p1, CLHEP::HepLorentzVector const & p2, const double lambda ) { const double Cls = C2Lipatov_Mix(qav_t, qbv_t, qav_b, qbv_b, pa, pb, p1, p2) / sqrt(qav_t.m2() * qbv_t.m2() * qav_b.m2() * qbv_b.m2()); const double kperp = (qav_t - qbv_t).perp(); if (kperp > lambda) { return Cls; } return Cls - 4.0 / (kperp * kperp); } /** Matrix element squared for tree-level current-current scattering * @param aptype Particle a PDG ID * @param bptype Particle b PDG ID * @param pg Unordered gluon momentum * @param pn Particle n Momentum * @param pb Particle b Momentum * @param p1 Particle 1 Momentum * @param pa Particle a Momentum * @returns ME Squared for Tree-Level Current-Current Scattering * * @note The unof contribution can be calculated by reversing the argument ordering. */ double ME_uno_current( [[maybe_unused]] ParticleID aptype, ParticleID bptype, CLHEP::HepLorentzVector const & pg, CLHEP::HepLorentzVector const & pn, CLHEP::HepLorentzVector const & pb, CLHEP::HepLorentzVector const & p1, CLHEP::HepLorentzVector const & pa ){ assert(aptype!=pid::gluon); // aptype cannot be gluon const double t1 = (pa - p1 - pg).m2(); const double t2 = (pb - pn).m2(); return K_q * K(bptype, pn, pb)*currents::ME_unob_qq(pg, p1, pa, pn, pb) / (t1 * t2); } /** Matrix element squared for tree-level current-current scattering * @param bptype Particle b PDG ID * @param pgin Incoming gluon momentum * @param p1 More backward (anti-)quark from splitting Momentum * @param p2 Less backward (anti-)quark from splitting Momentum * @param pn Particle n Momentum * @param pb Particle b Momentum * @returns ME Squared for Tree-Level Current-Current Scattering * * @note The forward qqbar contribution can be calculated by reversing the * argument ordering. */ double ME_qqbar_current( ParticleID bptype, CLHEP::HepLorentzVector const & pgin, CLHEP::HepLorentzVector const & p1, CLHEP::HepLorentzVector const & p2, CLHEP::HepLorentzVector const & pn, CLHEP::HepLorentzVector const & pb ){ const double t1 = (pgin - p1 - p2).m2(); const double t2 = (pn - pb).m2(); return K(bptype, pn, pb)*currents::ME_qqbar_qg(pb, pgin, pn, p2, p1) / (t1 * t2); } /* \brief Matrix element squared for central qqbar tree-level current-current * scattering * * @param aptype Particle a PDG ID * @param bptype Particle b PDG ID * @param nabove Number of gluons emitted before central qqbarpair * @param nbelow Number of gluons emitted after central qqbarpair * @param pa Initial state a Momentum * @param pb Initial state b Momentum * @param pq Final state qbar Momentum * @param pqbar Final state q Momentum * @param partons Vector of all outgoing partons * @returns ME Squared for qqbar_mid Tree-Level Current-Current Scattering */ double ME_qqbar_mid_current( ParticleID aptype, ParticleID bptype, int nabove, CLHEP::HepLorentzVector const & pa, CLHEP::HepLorentzVector const & pb, CLHEP::HepLorentzVector const & pq, CLHEP::HepLorentzVector const & pqbar, std::vector const & partons ){ using namespace currents; // CAM factors for the qqbar amps, and qqbar ordering (default, pq backwards) const bool swap_qqbar=pqbar.rapidity() < pq.rapidity(); CLHEP::HepLorentzVector const & p1 = partons.front(); CLHEP::HepLorentzVector const & pn = partons.back(); const double t1 = (p1 - pa).m2(); const double t2 = (pb - pn).m2(); return K(aptype, p1, pa) *K(bptype, pn, pb) *ME_Cenqqbar_qq( pa, pb, partons, swap_qqbar, nabove ) / (t1 * t2); } /** Matrix element squared for tree-level current-current scattering * @param aptype Particle a PDG ID * @param bptype Particle b PDG ID * @param pn Particle n Momentum * @param pb Particle b Momentum * @param p1 Particle 1 Momentum * @param pa Particle a Momentum * @returns ME Squared for Tree-Level Current-Current Scattering */ double ME_current( ParticleID aptype, ParticleID bptype, CLHEP::HepLorentzVector const & pn, CLHEP::HepLorentzVector const & pb, CLHEP::HepLorentzVector const & p1, CLHEP::HepLorentzVector const & pa ){ const double t1 = (p1 - pa).m2(); const double t2 = (pb - pn).m2(); return K(aptype, p1, pa) * K(bptype, pn, pb) * currents::ME_qq(p1, pa, pn, pb)/(t1 * t2); } /** Matrix element squared for tree-level current-current scattering With W+Jets * @param aptype Particle a PDG ID * @param bptype Particle b PDG ID * @param pn Particle n Momentum * @param pb Particle b Momentum * @param p1 Particle 1 Momentum * @param pa Particle a Momentum * @param wc Boolean. True->W Emitted from b. Else; emitted from leg a * @returns ME Squared for Tree-Level Current-Current Scattering */ double ME_W_current( ParticleID aptype, ParticleID bptype, CLHEP::HepLorentzVector const & pn, CLHEP::HepLorentzVector const & pb, CLHEP::HepLorentzVector const & p1, CLHEP::HepLorentzVector const & pa, CLHEP::HepLorentzVector const & plbar, CLHEP::HepLorentzVector const & pl, bool const wc, ParticleProperties const & Wprop ){ using namespace currents; assert(!(aptype==pid::gluon && bptype==pid::gluon)); if(aptype == pid::gluon || wc) { // swap currents to ensure that the W is emitted off the first one return ME_W_current(bptype, aptype, p1, pa, pn, pb, plbar, pl, false, Wprop); } // we assume that the W is emitted off a quark line // if this is not the case, we have to apply CP conjugation, // which is equivalent to swapping lepton and antilepton momenta const double current_contr = is_quark(aptype)? ME_W_qQ(p1,plbar,pl,pa,pn,pb,Wprop): ME_W_qQ(p1,pl,plbar,pa,pn,pb,Wprop); const double t1 = (pa - p1 - pl - plbar).m2(); const double tn = (pn - pb).m2(); return K(aptype, p1, pa) * K(bptype, pn, pb) * current_contr/(4.*(N_C*N_C - 1) * t1 * tn); } /** Matrix element squared for backwards uno tree-level current-current * scattering With W+Jets * * @param aptype Particle a PDG ID * @param bptype Particle b PDG ID * @param pn Particle n Momentum * @param pb Particle b Momentum * @param p1 Particle 1 Momentum * @param pa Particle a Momentum * @param pg Unordered gluon momentum * @param wc Boolean. True->W Emitted from b. Else; emitted from leg a * @returns ME Squared for unob Tree-Level Current-Current Scattering * * @note The unof contribution can be calculated by reversing the argument ordering. */ double ME_W_uno_current( ParticleID aptype, ParticleID bptype, CLHEP::HepLorentzVector const & pn, CLHEP::HepLorentzVector const & pb, CLHEP::HepLorentzVector const & p1, CLHEP::HepLorentzVector const & pa, CLHEP::HepLorentzVector const & pg, CLHEP::HepLorentzVector plbar, CLHEP::HepLorentzVector pl, bool const wc, ParticleProperties const & Wprop ){ using namespace currents; assert(bptype != pid::gluon || aptype != pid::gluon); if(aptype == pid::gluon || wc) { // emission off pb -- pn line // we assume that the W is emitted off a quark line // if this is not the case, we have to apply CP conjugation, // which is equivalent to swapping lepton and antilepton momenta if(is_antiquark(bptype)) std::swap(plbar, pl); const double t1 = (pa - p1 - pg).m2(); const double tn = (pb - pn - plbar - pl).m2(); return K_q*K_q*ME_W_unob_qQ(p1,pa,pn,pb,pg,plbar,pl,Wprop)/(4.*(N_C*N_C - 1) * t1 * tn); } // emission off pa -- p1 line if(is_antiquark(aptype)) std::swap(plbar, pl); const double t1 = (pa - p1 - pg - plbar - pl).m2(); const double tn = (pb - pn).m2(); return K(bptype, pn, pb)/C_F*ME_Wuno_qQ(p1,pa,pn,pb,pg,plbar,pl,Wprop)/(t1 * tn); } /** \brief Matrix element squared for backward qqbar tree-level current-current * scattering With W+Jets * * @param bptype Particle b PDG ID * @param pa Initial state a Momentum * @param pb Initial state b Momentum * @param pq Final state q Momentum * @param pqbar Final state qbar Momentum * @param pn Final state n Momentum * @param plbar Final state anti-lepton momentum * @param pl Final state lepton momentum * @param wc Boolean. True->W Emitted from b. Else; emitted from leg a * @returns ME Squared for qqbarb Tree-Level Current-Current Scattering * * @note calculate forwards qqbar contribution by reversing argument ordering. */ double ME_W_qqbar_current( ParticleID bptype, CLHEP::HepLorentzVector const & pa, CLHEP::HepLorentzVector const & pb, CLHEP::HepLorentzVector pq, CLHEP::HepLorentzVector pqbar, CLHEP::HepLorentzVector const & pn, CLHEP::HepLorentzVector const & plbar, CLHEP::HepLorentzVector const & pl, bool const wc, ParticleProperties const & Wprop ){ using namespace currents; if(is_anyquark(bptype) && wc) { // W Must be emitted from forwards leg. const double t1 = (pa - pq - pqbar).m2(); const double tn = (pb - pn - pl - plbar).m2(); return ME_W_Exqqbar_QQq( pb, pa, pn, pq, pqbar, plbar, pl, is_antiquark(bptype), Wprop )/(t1 * tn); } const double t1 = (pa - pl - plbar - pq - pqbar).m2(); const double tn = (pb - pn).m2(); return K(bptype, pn, pb)/C_F * ME_WExqqbar_qqbarQ(pa, pqbar, plbar, pl, pq, pn, pb, Wprop) / (t1 * tn); } /* \brief Matrix element squared for central qqbar tree-level current-current * scattering With W+Jets * * @param aptype Particle a PDG ID * @param bptype Particle b PDG ID * @param nabove Number of gluons emitted before central qqbarpair * @param nbelow Number of gluons emitted after central qqbarpair * @param pa Initial state a Momentum * @param pb Initial state b Momentum\ * @param pq Final state qbar Momentum * @param pqbar Final state q Momentum * @param partons Vector of all outgoing partons * @param plbar Final state anti-lepton momentum * @param pl Final state lepton momentum * @param wqq Boolean. True siginfies W boson is emitted from Central qqbar * @param wc Boolean. wc=true signifies w boson emitted from leg b; if wqq=false. * @returns ME Squared for qqbar_mid Tree-Level Current-Current Scattering */ double ME_W_qqbar_mid_current( ParticleID aptype, ParticleID bptype, int nabove, int nbelow, CLHEP::HepLorentzVector const & pa, CLHEP::HepLorentzVector const & pb, CLHEP::HepLorentzVector const & pq, CLHEP::HepLorentzVector const & pqbar, std::vector const & partons, CLHEP::HepLorentzVector const & plbar, CLHEP::HepLorentzVector const & pl, bool const wqq, bool const wc, ParticleProperties const & Wprop ){ using namespace currents; // CAM factors for the qqbar amps, and qqbar ordering (default, pq backwards) const bool swap_qqbar=pqbar.rapidity() < pq.rapidity(); double wt=1.; if (aptype==pid::gluon) wt*=K_g(partons.front(),pa)/C_F; if (bptype==pid::gluon) wt*=K_g(partons.back(),pb)/C_F; if(wqq) return wt*ME_WCenqqbar_qq(pa, pb, pl, plbar, partons, is_antiquark(aptype),is_antiquark(bptype), swap_qqbar, nabove, Wprop); return wt*ME_W_Cenqqbar_qq(pa, pb, pl, plbar, partons, is_antiquark(aptype), is_antiquark(bptype), swap_qqbar, nabove, nbelow, wc, Wprop); } /** Matrix element squared for tree-level current-current scattering With Z+Jets * @param aptype Particle a PDG ID * @param bptype Particle b PDG ID * @param pn Particle n Momentum * @param pb Particle b Momentum * @param p1 Particle 1 Momentum * @param pa Particle a Momentum * @param plbar Final state positron momentum * @param pl Final state electron momentum * @param Zprop Z properties * @param stw2 Value of sin(theta_w)^2 * @param ctw Value of cos(theta_w) * @returns ME Squared for Tree-Level Current-Current Scattering */ std::vector ME_Z_current( const ParticleID aptype, const ParticleID bptype, CLHEP::HepLorentzVector const & pn, CLHEP::HepLorentzVector const & pb, CLHEP::HepLorentzVector const & p1, CLHEP::HepLorentzVector const & pa, CLHEP::HepLorentzVector const & plbar, CLHEP::HepLorentzVector const & pl, ParticleProperties const & Zprop, const double stw2, const double ctw ){ using namespace currents; + + const auto pZ = pl + plbar; + + const double pref = K(aptype, p1, pa) * K(bptype, pn, pb)/(4.*(N_C*N_C-1)); + // we know they are not both gluons assert(!is_gluon(aptype) || !is_gluon(bptype)); + if(is_anyquark(aptype) && is_gluon(bptype)){ // This is a qg event - return { ME_Z_qg(pa,pb,p1,pn,plbar,pl,aptype,bptype,Zprop,stw2,ctw) }; + const double t1 = (pa-p1-pZ).m2(); + const double tn = (pb-pn ).m2(); + return { pref*ME_Z_qg(pa,pb,p1,pn,plbar,pl,aptype,bptype,Zprop,stw2,ctw)/(t1 * tn) }; } + if(is_gluon(aptype) && is_anyquark(bptype)){ // This is a gq event - return { ME_Z_qg(pb,pa,pn,p1,plbar,pl,bptype,aptype,Zprop,stw2,ctw) }; + const double t1 = (pa-p1 ).m2(); + const double tn = (pb-pn-pZ).m2(); + return { pref*ME_Z_qg(pb,pa,pn,p1,plbar,pl,bptype,aptype,Zprop,stw2,ctw)/(t1 * tn) }; } - assert(is_anyquark(aptype) && is_anyquark(bptype)); + // This is a qq event - return ME_Z_qQ(pa,pb,p1,pn,plbar,pl,aptype,bptype,Zprop,stw2,ctw); + assert(is_anyquark(aptype) && is_anyquark(bptype)); + const double t1_top = (pa-p1-pZ).m2(); + const double t2_top = (pb-pn ).m2(); + + const double t1_bot = (pa-p1 ).m2(); + const double t2_bot = (pb-pn-pZ).m2(); + std::vector res = ME_Z_qQ(pa,pb,p1,pn,plbar,pl,aptype,bptype,Zprop,stw2,ctw); + assert(res.size() == 3); + + res[0] *= pref/(t1_top * t2_top); + res[1] *= pref/(t1_bot * t2_bot); + res[2] *= pref/sqrt(t1_top * t2_top * t1_bot * t2_bot); + + return res; } /** Matrix element squared for backwards uno tree-level current-current * scattering With Z+Jets * * @param aptype Particle a PDG ID * @param bptype Particle b PDG ID * @param pn Particle n Momentum * @param pb Particle b Momentum * @param p1 Particle 1 Momentum * @param pa Particle a Momentum * @param pg Unordered gluon momentum * @param plbar Final state positron momentum * @param pl Final state electron momentum * @param Zprop Z properties * @param stw2 Value of sin(theta_w)^2 * @param ctw Value of cos(theta_w) * @returns ME Squared for unob Tree-Level Current-Current Scattering * * @note The unof contribution can be calculated by reversing the argument ordering. */ std::vector ME_Z_uno_current( const ParticleID aptype, const ParticleID bptype, CLHEP::HepLorentzVector const & pn, CLHEP::HepLorentzVector const & pb, CLHEP::HepLorentzVector const & p1, CLHEP::HepLorentzVector const & pa, CLHEP::HepLorentzVector const & pg, CLHEP::HepLorentzVector const & plbar, CLHEP::HepLorentzVector const & pl, ParticleProperties const & Zprop, const double stw2, const double ctw ){ using namespace currents; // we know they are not both gluons assert(!is_gluon(aptype) || !is_gluon(bptype)); if (is_anyquark(aptype) && is_gluon(bptype)) { // This is a qg event return { ME_Zuno_qg(pa,pb,pg,p1,pn,plbar,pl,aptype,bptype,Zprop,stw2,ctw) }; } if (is_gluon(aptype) && is_anyquark(bptype)) { // This is a gq event return { ME_Zuno_qg(pb,pa,pg,pn,p1,plbar,pl,bptype,aptype,Zprop,stw2,ctw) }; } assert(is_anyquark(aptype) && is_anyquark(bptype)); // This is a qq event return ME_Zuno_qQ(pa,pb,pg,p1,pn,plbar,pl,aptype,bptype,Zprop,stw2,ctw); } /** \brief Matrix element squared for tree-level current-current scattering with Higgs * @param aptype Particle a PDG ID * @param bptype Particle b PDG ID * @param pn Particle n Momentum * @param pb Particle b Momentum * @param p1 Particle 1 Momentum * @param pa Particle a Momentum * @param qH t-channel momentum before Higgs * @param qHp1 t-channel momentum after Higgs * @returns ME Squared for Tree-Level Current-Current Scattering with Higgs */ double ME_Higgs_current( ParticleID aptype, ParticleID bptype, CLHEP::HepLorentzVector const & pn, CLHEP::HepLorentzVector const & pb, CLHEP::HepLorentzVector const & p1, CLHEP::HepLorentzVector const & pa, CLHEP::HepLorentzVector const & qH, // t-channel momentum before Higgs CLHEP::HepLorentzVector const & qHp1, // t-channel momentum after Higgs double mt, bool include_bottom, double mb, double vev ){ const double t1 = (pa - p1).m2(); const double tn = (pb - pn).m2(); return K(aptype, p1, pa) *K(bptype, pn, pb) *currents::ME_H_qQ( pn,pb,p1,pa,-qHp1,-qH,mt,include_bottom,mb,vev ) / (t1 * tn * qH.m2() * qHp1.m2()); } /** \brief Current matrix element squared with Higgs and unordered backward emission * @param aptype Particle A PDG ID * @param bptype Particle B PDG ID * @param pn Particle n Momentum * @param pb Particle b Momentum * @param pg Unordered back Particle Momentum * @param p1 Particle 1 Momentum * @param pa Particle a Momentum * @param qH t-channel momentum before Higgs * @param qHp1 t-channel momentum after Higgs * @returns ME Squared with Higgs and unordered backward emission * * @note This function assumes unordered gluon backwards from pa-p1 current. * For unof, reverse call order */ double ME_Higgs_current_uno( ParticleID aptype, ParticleID bptype, CLHEP::HepLorentzVector const & pg, CLHEP::HepLorentzVector const & pn, CLHEP::HepLorentzVector const & pb, CLHEP::HepLorentzVector const & p1, CLHEP::HepLorentzVector const & pa, CLHEP::HepLorentzVector const & qH, // t-channel momentum before Higgs CLHEP::HepLorentzVector const & qHp1, // t-channel momentum after Higgs double mt, bool include_bottom, double mb, double vev ){ const double t1 = (pa - p1 - pg).m2(); const double tn = (pn - pb).m2(); return K(aptype, p1, pa) *K(bptype, pn, pb) *currents::ME_H_unob_qQ( pg,p1,pa,pn,pb,-qH,-qHp1,mt,include_bottom,mb,vev ) / (t1 * qH.m2() * qHp1.m2() * tn); } /** Matrix element squared for tree-level scattering with WW+Jets * @param aptype Particle a PDG ID * @param bptype Particle b PDG ID * @param pn Particle n Momentum * @param pb Particle b Momentum * @param p1 Particle 1 Momentum * @param pa Particle a Momentum * @param pl1bar Particle l1bar Momentum * @param pl1 Particle l1 Momentum * @param pl2bar Particle l2bar Momentum * @param pl2 Particle l2 Momentum * @returns ME Squared for Tree-Level Current-Current Scattering */ std::vector ME_WW_current( ParticleID aptype, ParticleID bptype, CLHEP::HepLorentzVector const & pn, CLHEP::HepLorentzVector const & pb, CLHEP::HepLorentzVector const & p1, CLHEP::HepLorentzVector const & pa, CLHEP::HepLorentzVector const & pl1bar, CLHEP::HepLorentzVector const & pl1, CLHEP::HepLorentzVector const & pl2bar, CLHEP::HepLorentzVector const & pl2, ParticleProperties const & Wprop ){ using namespace currents; if (aptype > 0 && bptype > 0) return ME_WW_qQ(p1, pl1bar, pl1, pa, pn, pl2bar, pl2, pb, Wprop); if (aptype < 0 && bptype > 0) return ME_WW_qbarQ(p1, pl1bar, pl1, pa, pn, pl2bar, pl2, pb, Wprop); if (aptype > 0 && bptype < 0) return ME_WW_qQbar(p1, pl1bar, pl1, pa, pn, pl2bar, pl2, pb, Wprop); if (aptype < 0 && bptype < 0) return ME_WW_qbarQbar(p1, pl1bar, pl1, pa, pn, pl2bar, pl2, pb, Wprop); throw std::logic_error("unreachable"); } void validate(MatrixElementConfig const & config) { #ifndef HEJ_BUILD_WITH_QCDLOOP if(!config.Higgs_coupling.use_impact_factors) { throw std::invalid_argument{ "Invalid Higgs coupling settings.\n" "HEJ without QCDloop support can only use impact factors.\n" "Set use_impact_factors to true or recompile HEJ.\n" }; } #endif if(config.Higgs_coupling.use_impact_factors && config.Higgs_coupling.mt != std::numeric_limits::infinity()) { throw std::invalid_argument{ "Conflicting settings: " "impact factors may only be used in the infinite top mass limit" }; } } } // namespace MatrixElement::MatrixElement( std::function alpha_s, MatrixElementConfig conf ): alpha_s_{std::move(alpha_s)}, param_{std::move(conf)} { validate(param_); } std::vector MatrixElement::tree_kin( Event const & ev ) const { if(!ev.valid_hej_state(param_.soft_pt_regulator)) return {0.}; if(!ev.valid_incoming()){ throw std::invalid_argument{ "Invalid momentum for one or more incoming particles. " "Incoming momenta must have vanishing mass and transverse momentum." }; } std::vector bosons = filter_AWZH_bosons(ev.outgoing()); if(bosons.empty()) { return {tree_kin_jets(ev)}; } if(bosons.size() == 1) { switch(bosons[0].type){ case pid::Higgs: return {tree_kin_Higgs(ev)}; case pid::Wp: case pid::Wm: return {tree_kin_W(ev)}; case pid::Z_photon_mix: return tree_kin_Z(ev); // TODO case pid::photon: case pid::Z: default: throw not_implemented("Emission of boson of unsupported type"); } } if(bosons.size() == 2) { if(bosons[0].type == pid::Wp && bosons[1].type == pid::Wp){ return tree_kin_WW(ev); } else if(bosons[0].type == pid::Wm && bosons[1].type == pid::Wm){ return tree_kin_WW(ev); } throw not_implemented("Emission of bosons of unsupported type"); } throw not_implemented("Emission of >2 bosons is unsupported"); } namespace { constexpr int EXTREMAL_JET_IDX = 1; constexpr int NO_EXTREMAL_JET_IDX = 0; bool treat_as_extremal(Particle const & parton){ return parton.p.user_index() == EXTREMAL_JET_IDX; } template double FKL_ladder_weight( InputIterator begin_gluon, InputIterator end_gluon, CLHEP::HepLorentzVector const & q0, CLHEP::HepLorentzVector const & pa, CLHEP::HepLorentzVector const & pb, CLHEP::HepLorentzVector const & p1, CLHEP::HepLorentzVector const & pn, double lambda ){ double wt = 1; auto qi = q0; for(auto gluon_it = begin_gluon; gluon_it != end_gluon; ++gluon_it){ assert(gluon_it->type == pid::gluon); const auto g = to_HepLorentzVector(*gluon_it); const auto qip1 = qi - g; if(treat_as_extremal(*gluon_it)){ wt *= C2Lipatovots(qip1, qi, pa, pb, lambda)*C_A; } else{ wt *= C2Lipatovots(qip1, qi, pa, pb, p1, pn, lambda)*C_A; } qi = qip1; } return wt; } template std::vector FKL_ladder_weight_mix( InputIterator begin_gluon, InputIterator end_gluon, CLHEP::HepLorentzVector const & q0_t, CLHEP::HepLorentzVector const & q0_b, CLHEP::HepLorentzVector const & pa, CLHEP::HepLorentzVector const & pb, CLHEP::HepLorentzVector const & p1, CLHEP::HepLorentzVector const & pn, const double lambda ){ double wt_top = 1; double wt_bot = 1; double wt_mix = 1; auto qi_t = q0_t; auto qi_b = q0_b; for(auto gluon_it = begin_gluon; gluon_it != end_gluon; ++gluon_it){ assert(gluon_it->type == pid::gluon); const auto g = to_HepLorentzVector(*gluon_it); const auto qip1_t = qi_t - g; const auto qip1_b = qi_b - g; if(treat_as_extremal(*gluon_it)){ wt_top *= C2Lipatovots(qip1_t, qi_t, pa, pb, lambda)*C_A; wt_bot *= C2Lipatovots(qip1_b, qi_b, pa, pb, lambda)*C_A; wt_mix *= C2Lipatovots_Mix(qip1_t, qi_t, qip1_b, qi_b, pa, pb, lambda)*C_A; } else{ wt_top *= C2Lipatovots(qip1_t, qi_t, pa, pb, p1, pn, lambda)*C_A; wt_bot *= C2Lipatovots(qip1_b, qi_b, pa, pb, p1, pn, lambda)*C_A; wt_mix *= C2Lipatovots_Mix(qip1_t, qi_t, qip1_b, qi_b, pa, pb, p1, pn, lambda)*C_A; } qi_t = qip1_t; qi_b = qip1_b; } return {wt_top, wt_bot, wt_mix}; } std::vector tag_extremal_jet_partons( Event const & ev ){ auto out_partons = filter_partons(ev.outgoing()); if(out_partons.size() == ev.jets().size()){ // no additional emissions in extremal jets, don't need to tag anything for(auto & parton: out_partons){ parton.p.set_user_index(NO_EXTREMAL_JET_IDX); } return out_partons; } auto const & jets = ev.jets(); std::vector extremal_jets; if(! is_backward_g_to_h(ev)) { auto most_backward = begin(jets); // skip jets caused by unordered emission or qqbar if(ev.type() == event_type::unob || ev.type() == event_type::qqbar_exb){ assert(jets.size() >= 2); ++most_backward; } extremal_jets.emplace_back(*most_backward); } if(! is_forward_g_to_h(ev)) { auto most_forward = end(jets) - 1; if(ev.type() == event_type::unof || ev.type() == event_type::qqbar_exf){ assert(jets.size() >= 2); --most_forward; } extremal_jets.emplace_back(*most_forward); } const auto extremal_jet_indices = ev.particle_jet_indices( extremal_jets ); assert(extremal_jet_indices.size() == out_partons.size()); for(std::size_t i = 0; i < out_partons.size(); ++i){ assert(is_parton(out_partons[i])); const int idx = (extremal_jet_indices[i]>=0)? EXTREMAL_JET_IDX: NO_EXTREMAL_JET_IDX; out_partons[i].p.set_user_index(idx); } return out_partons; } double tree_kin_jets_qqbar_mid( ParticleID aptype, ParticleID bptype, CLHEP::HepLorentzVector const & pa, CLHEP::HepLorentzVector const & pb, std::vector const & partons, double lambda ){ CLHEP::HepLorentzVector pq; CLHEP::HepLorentzVector pqbar; const auto backmidquark = std::find_if( begin(partons)+1, end(partons)-1, [](Particle const & s){ return s.type != pid::gluon; } ); assert(backmidquark!=end(partons)-1); if (is_quark(backmidquark->type)){ pq = to_HepLorentzVector(*backmidquark); pqbar = to_HepLorentzVector(*(backmidquark+1)); } else { pqbar = to_HepLorentzVector(*backmidquark); pq = to_HepLorentzVector(*(backmidquark+1)); } auto p1 = to_HepLorentzVector(partons[0]); auto pn = to_HepLorentzVector(partons[partons.size() - 1]); auto q0 = pa - p1; // t-channel momentum after qqbar auto qqbart = q0; const auto begin_ladder = cbegin(partons) + 1; const auto end_ladder_1 = (backmidquark); const auto begin_ladder_2 = (backmidquark+2); const auto end_ladder = cend(partons) - 1; for(auto parton_it = begin_ladder; parton_it < begin_ladder_2; ++parton_it){ qqbart -= to_HepLorentzVector(*parton_it); } const int nabove = std::distance(begin_ladder, backmidquark); std::vector partonsHLV; partonsHLV.reserve(partons.size()); for (std::size_t i = 0; i != partons.size(); ++i) { partonsHLV.push_back(to_HepLorentzVector(partons[i])); } const double current_factor = ME_qqbar_mid_current( aptype, bptype, nabove, pa, pb, pq, pqbar, partonsHLV ); const double ladder_factor = FKL_ladder_weight( begin_ladder, end_ladder_1, q0, pa, pb, p1, pn, lambda )*FKL_ladder_weight( begin_ladder_2, end_ladder, qqbart, pa, pb, p1, pn, lambda ); return current_factor*ladder_factor; } template double tree_kin_jets_qqbar(InIter BeginIn, InIter EndIn, partIter BeginPart, partIter EndPart, double lambda){ const auto pgin = to_HepLorentzVector(*BeginIn); const auto pb = to_HepLorentzVector(*(EndIn-1)); const auto p1 = to_HepLorentzVector(*BeginPart); const auto p2 = to_HepLorentzVector(*(BeginPart+1)); const auto pn = to_HepLorentzVector(*(EndPart-1)); assert((BeginIn)->type==pid::gluon); // Incoming a must be gluon. const double current_factor = ME_qqbar_current( (EndIn-1)->type, pgin, p1, p2, pn, pb )/(4.*(N_C*N_C - 1.)); const double ladder_factor = FKL_ladder_weight( (BeginPart+2), (EndPart-1), pgin-p1-p2, pgin, pb, p1, pn, lambda ); return current_factor*ladder_factor; } template double tree_kin_jets_uno(InIter BeginIn, InIter EndIn, partIter BeginPart, partIter EndPart, double lambda ){ const auto pa = to_HepLorentzVector(*BeginIn); const auto pb = to_HepLorentzVector(*(EndIn-1)); const auto pg = to_HepLorentzVector(*BeginPart); const auto p1 = to_HepLorentzVector(*(BeginPart+1)); const auto pn = to_HepLorentzVector(*(EndPart-1)); const double current_factor = ME_uno_current( (BeginIn)->type, (EndIn-1)->type, pg, pn, pb, p1, pa )/(4.*(N_C*N_C - 1.)); const double ladder_factor = FKL_ladder_weight( (BeginPart+2), (EndPart-1), pa-p1-pg, pa, pb, p1, pn, lambda ); return current_factor*ladder_factor; } } // namespace double MatrixElement::tree_kin_jets(Event const & ev) const { auto const & incoming = ev.incoming(); const auto partons = tag_extremal_jet_partons(ev); if (ev.type()==event_type::FKL){ const auto pa = to_HepLorentzVector(incoming[0]); const auto pb = to_HepLorentzVector(incoming[1]); const auto p1 = to_HepLorentzVector(partons.front()); const auto pn = to_HepLorentzVector(partons.back()); return ME_current( incoming[0].type, incoming[1].type, pn, pb, p1, pa )/(4.*(N_C*N_C - 1.))*FKL_ladder_weight( begin(partons) + 1, end(partons) - 1, pa - p1, pa, pb, p1, pn, param_.regulator_lambda ); } if (ev.type()==event_type::unordered_backward){ return tree_kin_jets_uno(incoming.begin(), incoming.end(), partons.begin(), partons.end(), param_.regulator_lambda); } if (ev.type()==event_type::unordered_forward){ return tree_kin_jets_uno(incoming.rbegin(), incoming.rend(), partons.rbegin(), partons.rend(), param_.regulator_lambda); } if (ev.type()==event_type::extremal_qqbar_backward){ return tree_kin_jets_qqbar(incoming.begin(), incoming.end(), partons.begin(), partons.end(), param_.regulator_lambda); } if (ev.type()==event_type::extremal_qqbar_forward){ return tree_kin_jets_qqbar(incoming.rbegin(), incoming.rend(), partons.rbegin(), partons.rend(), param_.regulator_lambda); } if (ev.type()==event_type::central_qqbar){ return tree_kin_jets_qqbar_mid(incoming[0].type, incoming[1].type, to_HepLorentzVector(incoming[0]), to_HepLorentzVector(incoming[1]), partons, param_.regulator_lambda); } throw std::logic_error("Cannot reweight non-resummable processes in Pure Jets"); } namespace { double tree_kin_W_FKL( ParticleID aptype, ParticleID bptype, CLHEP::HepLorentzVector const & pa, CLHEP::HepLorentzVector const & pb, std::vector const & partons, CLHEP::HepLorentzVector const & plbar, CLHEP::HepLorentzVector const & pl, double lambda, ParticleProperties const & Wprop ){ auto p1 = to_HepLorentzVector(partons[0]); auto pn = to_HepLorentzVector(partons[partons.size() - 1]); const auto begin_ladder = cbegin(partons) + 1; const auto end_ladder = cend(partons) - 1; bool wc = aptype==partons[0].type; //leg b emits w auto q0 = pa - p1; if(!wc) q0 -= pl + plbar; const double current_factor = ME_W_current( aptype, bptype, pn, pb, p1, pa, plbar, pl, wc, Wprop ); const double ladder_factor = FKL_ladder_weight( begin_ladder, end_ladder, q0, pa, pb, p1, pn, lambda ); return current_factor*ladder_factor; } template double tree_kin_W_uno(InIter BeginIn, partIter BeginPart, partIter EndPart, const CLHEP::HepLorentzVector & plbar, const CLHEP::HepLorentzVector & pl, double lambda, ParticleProperties const & Wprop ){ const auto pa = to_HepLorentzVector(*BeginIn); const auto pb = to_HepLorentzVector(*(BeginIn+1)); const auto pg = to_HepLorentzVector(*BeginPart); const auto p1 = to_HepLorentzVector(*(BeginPart+1)); const auto pn = to_HepLorentzVector(*(EndPart-1)); bool wc = (BeginIn)->type==(BeginPart+1)->type; //leg b emits w auto q0 = pa - p1 - pg; if(!wc) q0 -= pl + plbar; const double current_factor = ME_W_uno_current( (BeginIn)->type, (BeginIn+1)->type, pn, pb, p1, pa, pg, plbar, pl, wc, Wprop ); const double ladder_factor = FKL_ladder_weight( BeginPart+2, EndPart-1, q0, pa, pb, p1, pn, lambda ); return current_factor*ladder_factor; } template double tree_kin_W_qqbar(InIter BeginIn, partIter BeginPart, partIter EndPart, const CLHEP::HepLorentzVector & plbar, const CLHEP::HepLorentzVector & pl, double lambda, ParticleProperties const & Wprop ){ const bool swap_qqbar=is_quark(*BeginPart); const auto pa = to_HepLorentzVector(*BeginIn); const auto pb = to_HepLorentzVector(*(BeginIn+1)); const auto pq = to_HepLorentzVector(*(BeginPart+(swap_qqbar?0:1))); const auto pqbar = to_HepLorentzVector(*(BeginPart+(swap_qqbar?1:0))); const auto p1 = to_HepLorentzVector(*(BeginPart)); const auto pn = to_HepLorentzVector(*(EndPart-1)); const bool wc = (BeginIn+1)->type!=(EndPart-1)->type; //leg b emits w auto q0 = pa - pq - pqbar; if(!wc) q0 -= pl + plbar; const double current_factor = ME_W_qqbar_current( (BeginIn+1)->type, pa, pb, pq, pqbar, pn, plbar, pl, wc, Wprop ); const double ladder_factor = FKL_ladder_weight( BeginPart+2, EndPart-1, q0, pa, pb, p1, pn, lambda ); return current_factor*ladder_factor; } double tree_kin_W_qqbar_mid( ParticleID aptype, ParticleID bptype, CLHEP::HepLorentzVector const & pa, CLHEP::HepLorentzVector const & pb, std::vector const & partons, CLHEP::HepLorentzVector const & plbar, CLHEP::HepLorentzVector const & pl, double lambda, ParticleProperties const & Wprop ){ CLHEP::HepLorentzVector pq; CLHEP::HepLorentzVector pqbar; const auto backmidquark = std::find_if( begin(partons)+1, end(partons)-1, [](Particle const & s){ return s.type != pid::gluon; } ); assert(backmidquark!=end(partons)-1); if (is_quark(backmidquark->type)){ pq = to_HepLorentzVector(*backmidquark); pqbar = to_HepLorentzVector(*(backmidquark+1)); } else { pqbar = to_HepLorentzVector(*backmidquark); pq = to_HepLorentzVector(*(backmidquark+1)); } auto p1 = to_HepLorentzVector(partons.front()); auto pn = to_HepLorentzVector(partons.back()); auto q0 = pa - p1; // t-channel momentum after qqbar auto qqbart = q0; bool wqq = backmidquark->type != -(backmidquark+1)->type; // qqbar emit W bool wc = !wqq && (aptype==partons.front().type); //leg b emits w assert(!wqq || !wc); if(wqq){ // emission from qqbar qqbart -= pl + plbar; } else if(!wc) { // emission from leg a q0 -= pl + plbar; qqbart -= pl + plbar; } const auto begin_ladder = cbegin(partons) + 1; const auto end_ladder_1 = (backmidquark); const auto begin_ladder_2 = (backmidquark+2); const auto end_ladder = cend(partons) - 1; for(auto parton_it = begin_ladder; parton_it < begin_ladder_2; ++parton_it){ qqbart -= to_HepLorentzVector(*parton_it); } const int nabove = std::distance(begin_ladder, backmidquark); const int nbelow = std::distance(begin_ladder_2, end_ladder); std::vector partonsHLV; partonsHLV.reserve(partons.size()); for (std::size_t i = 0; i != partons.size(); ++i) { partonsHLV.push_back(to_HepLorentzVector(partons[i])); } const double current_factor = ME_W_qqbar_mid_current( aptype, bptype, nabove, nbelow, pa, pb, pq, pqbar, partonsHLV, plbar, pl, wqq, wc, Wprop ); const double ladder_factor = FKL_ladder_weight( begin_ladder, end_ladder_1, q0, pa, pb, p1, pn, lambda )*FKL_ladder_weight( begin_ladder_2, end_ladder, qqbart, pa, pb, p1, pn, lambda ); return current_factor*ladder_factor; } } // namespace double MatrixElement::tree_kin_W(Event const & ev) const { using namespace event_type; auto const & incoming(ev.incoming()); #ifndef NDEBUG // assert that there is exactly one decay corresponding to the W assert(ev.decays().size() == 1); auto const & w_boson{ std::find_if(ev.outgoing().cbegin(), ev.outgoing().cend(), [] (Particle const & p) -> bool { return std::abs(p.type) == ParticleID::Wp; }) }; assert(w_boson != ev.outgoing().cend()); assert( static_cast(ev.decays().cbegin()->first) == std::distance(ev.outgoing().cbegin(), w_boson) ); #endif // find decay products of W auto const & decay{ ev.decays().cbegin()->second }; assert(decay.size() == 2); assert( ( is_anylepton(decay.at(0)) && is_anyneutrino(decay.at(1)) ) || ( is_anylepton(decay.at(1)) && is_anyneutrino(decay.at(0)) ) ); // get lepton & neutrino CLHEP::HepLorentzVector plbar; CLHEP::HepLorentzVector pl; if (decay.at(0).type < 0){ plbar = to_HepLorentzVector(decay.at(0)); pl = to_HepLorentzVector(decay.at(1)); } else{ pl = to_HepLorentzVector(decay.at(0)); plbar = to_HepLorentzVector(decay.at(1)); } const auto pa = to_HepLorentzVector(incoming[0]); const auto pb = to_HepLorentzVector(incoming[1]); const auto partons = tag_extremal_jet_partons(ev); if(ev.type() == FKL){ return tree_kin_W_FKL(incoming[0].type, incoming[1].type, pa, pb, partons, plbar, pl, param_.regulator_lambda, param_.ew_parameters.Wprop()); } if(ev.type() == unordered_backward){ return tree_kin_W_uno(cbegin(incoming), cbegin(partons), cend(partons), plbar, pl, param_.regulator_lambda, param_.ew_parameters.Wprop()); } if(ev.type() == unordered_forward){ return tree_kin_W_uno(crbegin(incoming), crbegin(partons), crend(partons), plbar, pl, param_.regulator_lambda, param_.ew_parameters.Wprop()); } if(ev.type() == extremal_qqbar_backward){ return tree_kin_W_qqbar(cbegin(incoming), cbegin(partons), cend(partons), plbar, pl, param_.regulator_lambda, param_.ew_parameters.Wprop()); } if(ev.type() == extremal_qqbar_forward){ return tree_kin_W_qqbar(crbegin(incoming), crbegin(partons), crend(partons), plbar, pl, param_.regulator_lambda, param_.ew_parameters.Wprop()); } assert(ev.type() == central_qqbar); return tree_kin_W_qqbar_mid(incoming[0].type, incoming[1].type, pa, pb, partons, plbar, pl, param_.regulator_lambda, param_.ew_parameters.Wprop()); } namespace /* WW */ { std::vector tree_kin_WW_FKL( ParticleID aptype, ParticleID bptype, CLHEP::HepLorentzVector const & pa, CLHEP::HepLorentzVector const & pb, std::vector const & partons, CLHEP::HepLorentzVector const & pl1bar, CLHEP::HepLorentzVector const & pl1, CLHEP::HepLorentzVector const & pl2bar, CLHEP::HepLorentzVector const & pl2, double lambda, ParticleProperties const & Wprop ){ assert(is_anyquark(aptype)); assert(is_anyquark(bptype)); auto p1 = to_HepLorentzVector(partons[0]); auto pn = to_HepLorentzVector(partons[partons.size() - 1]); const std::vector current_factor = ME_WW_current( aptype, bptype, pn, pb, p1, pa, pl1bar, pl1, pl2bar, pl2, Wprop ); auto const begin_ladder = cbegin(partons) + 1; auto const end_ladder = cend(partons) - 1; // pa -> W1 p1, pb -> W2 + pn const auto q0 = pa - p1 - (pl1 + pl1bar); // pa -> W2 p1, pb -> W1 + pn const auto q1 = pa - p1 - (pl2 + pl2bar); const std::vector ladder_factor = FKL_ladder_weight_mix( begin_ladder, end_ladder, q0, q1, pa, pb, p1, pn, lambda ); assert(current_factor.size() == 3); assert(current_factor.size() == ladder_factor.size()); std::vector result(current_factor.size()); for(size_t i=0; i MatrixElement::tree_kin_WW(Event const & ev) const { using namespace event_type; auto const & incoming(ev.incoming()); auto const pa = to_HepLorentzVector(incoming[0]); auto const pb = to_HepLorentzVector(incoming[1]); auto const partons = tag_extremal_jet_partons(ev); // W1 & W2 assert(ev.decays().size() == 2); std::vector plbar; std::vector pl; for(auto const & decay_pair : ev.decays()) { auto const decay = decay_pair.second; // TODO: how to label W1, W2 if(decay.at(0).type < 0) { plbar.emplace_back(to_HepLorentzVector(decay.at(0))); pl .emplace_back(to_HepLorentzVector(decay.at(1))); } else { pl .emplace_back(to_HepLorentzVector(decay.at(0))); plbar.emplace_back(to_HepLorentzVector(decay.at(1))); } } if(ev.type() == FKL) { return tree_kin_WW_FKL( incoming[0].type, incoming[1].type, pa, pb, partons, plbar[0], pl[0], plbar[1], pl[1], param_.regulator_lambda, param_.ew_parameters.Wprop() ); } throw std::logic_error("Can only reweight FKL events in WW"); } namespace{ std::vector tree_kin_Z_FKL( const ParticleID aptype, const ParticleID bptype, CLHEP::HepLorentzVector const & pa, CLHEP::HepLorentzVector const & pb, std::vector const & partons, CLHEP::HepLorentzVector const & plbar, CLHEP::HepLorentzVector const & pl, const double lambda, ParticleProperties const & Zprop, const double stw2, const double ctw ){ const auto p1 = to_HepLorentzVector(partons[0]); const auto pn = to_HepLorentzVector(partons[partons.size() - 1]); const auto begin_ladder = cbegin(partons) + 1; const auto end_ladder = cend(partons) - 1; const std::vector current_factor = ME_Z_current( aptype, bptype, pn, pb, p1, pa, plbar, pl, Zprop, stw2, ctw ); std::vector ladder_factor; if(is_gluon(bptype)){ // This is a qg event const auto q0 = pa-p1-plbar-pl; ladder_factor.push_back(FKL_ladder_weight(begin_ladder, end_ladder, q0, pa, pb, p1, pn, lambda)); } else if(is_gluon(aptype)){ // This is a gq event const auto q0 = pa-p1; ladder_factor.push_back(FKL_ladder_weight(begin_ladder, end_ladder, q0, pa, pb, p1, pn, lambda)); } else { // This is a qq event const auto q0 = pa-p1-plbar-pl; const auto q1 = pa-p1; ladder_factor=FKL_ladder_weight_mix(begin_ladder, end_ladder, q0, q1, pa, pb, p1, pn, lambda); } std::vector result; for(size_t i=0; i std::vector tree_kin_Z_uno(InIter BeginIn, partIter BeginPart, partIter EndPart, const CLHEP::HepLorentzVector & plbar, const CLHEP::HepLorentzVector & pl, const double lambda, ParticleProperties const & Zprop, const double stw2, const double ctw){ const auto pa = to_HepLorentzVector(*BeginIn); const auto pb = to_HepLorentzVector(*(BeginIn+1)); const auto pg = to_HepLorentzVector(*BeginPart); const auto p1 = to_HepLorentzVector(*(BeginPart+1)); const auto pn = to_HepLorentzVector(*(EndPart-1)); const ParticleID aptype = (BeginIn)->type; const ParticleID bptype = (BeginIn+1)->type; const std::vector current_factor = ME_Z_uno_current( aptype, bptype, pn, pb, p1, pa, pg, plbar, pl, Zprop, stw2, ctw ); std::vector ladder_factor; if(is_gluon(bptype)){ // This is a qg event const auto q0 = pa-pg-p1-plbar-pl; ladder_factor.push_back(FKL_ladder_weight(BeginPart+2, EndPart-1, q0, pa, pb, p1, pn, lambda)); }else if(is_gluon(aptype)){ // This is a gq event const auto q0 = pa-pg-p1; ladder_factor.push_back(FKL_ladder_weight(BeginPart+2, EndPart-1, q0, pa, pb, p1, pn, lambda)); }else{ // This is a qq event const auto q0 = pa-pg-p1-plbar-pl; const auto q1 = pa-pg-p1; ladder_factor=FKL_ladder_weight_mix(BeginPart+2, EndPart-1, q0, q1, pa, pb, p1, pn, lambda); } std::vector result; for(size_t i=0; i MatrixElement::tree_kin_Z(Event const & ev) const { using namespace event_type; auto const & incoming(ev.incoming()); // find decay products of Z auto const & decay{ ev.decays().cbegin()->second }; assert(decay.size() == 2); assert(is_anylepton(decay.at(0)) && !is_anyneutrino(decay.at(0)) && decay.at(0).type==-decay.at(1).type); // get leptons CLHEP::HepLorentzVector plbar; CLHEP::HepLorentzVector pl; if (decay.at(0).type < 0){ plbar = to_HepLorentzVector(decay.at(0)); pl = to_HepLorentzVector(decay.at(1)); } else{ pl = to_HepLorentzVector(decay.at(0)); plbar = to_HepLorentzVector(decay.at(1)); } const auto pa = to_HepLorentzVector(incoming[0]); const auto pb = to_HepLorentzVector(incoming[1]); const auto partons = tag_extremal_jet_partons(ev); const double stw2 = param_.ew_parameters.sin2_tw(); const double ctw = param_.ew_parameters.cos_tw(); if(ev.type() == FKL){ return tree_kin_Z_FKL(incoming[0].type, incoming[1].type, pa, pb, partons, plbar, pl, param_.regulator_lambda, param_.ew_parameters.Zprop(), stw2, ctw); } if(ev.type() == unordered_backward){ return tree_kin_Z_uno(cbegin(incoming), cbegin(partons), cend(partons), plbar, pl, param_.regulator_lambda, param_.ew_parameters.Zprop(), stw2, ctw); } if(ev.type() == unordered_forward){ return tree_kin_Z_uno(crbegin(incoming), crbegin(partons), crend(partons), plbar, pl, param_.regulator_lambda, param_.ew_parameters.Zprop(), stw2, ctw); } throw std::logic_error("Can only reweight FKL or uno processes in Z+Jets"); } double MatrixElement::tree_kin_Higgs(Event const & ev) const { if(ev.outgoing().front().type == pid::Higgs){ return tree_kin_Higgs_first(ev); } if(ev.outgoing().back().type == pid::Higgs){ return tree_kin_Higgs_last(ev); } return tree_kin_Higgs_between(ev); } // kinetic matrix element square for backward Higgs emission // cf. eq:ME_h_jets_peripheral in developer manual, // but without factors \alpha_s and the FKL ladder double MatrixElement::MH2_backwardH( const ParticleID type_forward, CLHEP::HepLorentzVector const & pa, CLHEP::HepLorentzVector const & pb, CLHEP::HepLorentzVector const & pH, CLHEP::HepLorentzVector const & pn ) const { using namespace currents; const double vev = param_.ew_parameters.vev(); return K(type_forward, pn, pb)*ME_H_gq( pH, pa, pn, pb, param_.Higgs_coupling.mt, param_.Higgs_coupling.include_bottom, param_.Higgs_coupling.mb, vev )/(4.*(N_C*N_C - 1)*(pa-pH).m2()*(pb-pn).m2()); } // kinetic matrix element square for backward unordered emission // and forward g -> Higgs double MatrixElement::MH2_unob_forwardH( CLHEP::HepLorentzVector const & pa, CLHEP::HepLorentzVector const & pb, CLHEP::HepLorentzVector const & pg, CLHEP::HepLorentzVector const & p1, CLHEP::HepLorentzVector const & pH ) const { using namespace currents; const double vev = param_.ew_parameters.vev(); constexpr double K_f1 = K_q; constexpr double nhel = 4.; return K_f1*ME_juno_jgH( pg, p1, pa, pH, pb, param_.Higgs_coupling.mt, param_.Higgs_coupling.include_bottom, param_.Higgs_coupling.mb, vev )/(nhel*(N_C*N_C - 1)*(pa - p1 - pg).m2()*(pb - pH).m2()); } double MatrixElement::tree_kin_Higgs_first(Event const & ev) const { auto const & incoming = ev.incoming(); auto const & outgoing = ev.outgoing(); assert(outgoing.front().type == pid::Higgs); if(is_anyquark(incoming.front())) { assert(incoming.front().type == outgoing[1].type); return tree_kin_Higgs_between(ev); } const auto partons = tag_extremal_jet_partons(ev); const auto pa = to_HepLorentzVector(incoming.front()); const auto pb = to_HepLorentzVector(incoming.back()); const auto pH = to_HepLorentzVector(outgoing.front()); const auto end_ladder = end(partons) - ((ev.type() == event_type::unof)?2:1); const auto pn = to_HepLorentzVector(*end_ladder); const double ladder = FKL_ladder_weight( begin(partons), end_ladder, pa - pH, pa, pb, pa, pn, param_.regulator_lambda ); if(ev.type() == event_type::unof) { const auto pg = to_HepLorentzVector(outgoing.back()); return MH2_unob_forwardH( pb, pa, pg, pn, pH )*ladder; } return MH2_backwardH( incoming.back().type, pa, pb, pH, pn )*ladder; } double MatrixElement::tree_kin_Higgs_last(Event const & ev) const { auto const & incoming = ev.incoming(); auto const & outgoing = ev.outgoing(); assert(outgoing.back().type == pid::Higgs); if(is_anyquark(incoming.back())) { assert(incoming.back().type == outgoing[outgoing.size()-2].type); return tree_kin_Higgs_between(ev); } const auto partons = tag_extremal_jet_partons(ev); const auto pa = to_HepLorentzVector(incoming.front()); const auto pb = to_HepLorentzVector(incoming.back()); const auto pH = to_HepLorentzVector(outgoing.back()); auto begin_ladder = begin(partons) + 1; auto q0 = pa - to_HepLorentzVector(partons.front()); if(ev.type() == event_type::unob) { q0 -= to_HepLorentzVector(*begin_ladder); ++begin_ladder; } const auto p1 = to_HepLorentzVector(*(begin_ladder - 1)); const double ladder = FKL_ladder_weight( begin_ladder, end(partons), q0, pa, pb, p1, pb, param_.regulator_lambda ); if(ev.type() == event_type::unob) { const auto pg = to_HepLorentzVector(outgoing.front()); return MH2_unob_forwardH( pa, pb, pg, p1, pH )*ladder; } return MH2_backwardH( incoming.front().type, pb, pa, pH, p1 )*ladder; } namespace { template double tree_kin_Higgs_uno(InIter BeginIn, InIter EndIn, partIter BeginPart, partIter EndPart, CLHEP::HepLorentzVector const & qH, CLHEP::HepLorentzVector const & qHp1, double mt, bool inc_bot, double mb, double vev ){ const auto pa = to_HepLorentzVector(*BeginIn); const auto pb = to_HepLorentzVector(*(EndIn-1)); const auto pg = to_HepLorentzVector(*BeginPart); const auto p1 = to_HepLorentzVector(*(BeginPart+1)); const auto pn = to_HepLorentzVector(*(EndPart-1)); return ME_Higgs_current_uno( (BeginIn)->type, (EndIn-1)->type, pg, pn, pb, p1, pa, qH, qHp1, mt, inc_bot, mb, vev ); } } // namespace double MatrixElement::tree_kin_Higgs_between(Event const & ev) const { using namespace event_type; auto const & incoming = ev.incoming(); auto const & outgoing = ev.outgoing(); const auto the_Higgs = std::find_if( begin(outgoing), end(outgoing), [](Particle const & s){ return s.type == pid::Higgs; } ); assert(the_Higgs != end(outgoing)); const auto pH = to_HepLorentzVector(*the_Higgs); const auto partons = tag_extremal_jet_partons(ev); const auto pa = to_HepLorentzVector(incoming[0]); const auto pb = to_HepLorentzVector(incoming[1]); auto p1 = to_HepLorentzVector( partons[(ev.type() == unob)?1:0] ); auto pn = to_HepLorentzVector( partons[partons.size() - ((ev.type() == unof)?2:1)] ); auto first_after_Higgs = begin(partons) + (the_Higgs-begin(outgoing)); assert( (first_after_Higgs == end(partons) && ( (ev.type() == unob) || partons.back().type != pid::gluon )) || first_after_Higgs->rapidity() >= the_Higgs->rapidity() ); assert( (first_after_Higgs == begin(partons) && ( (ev.type() == unof) || partons.front().type != pid::gluon )) || (first_after_Higgs-1)->rapidity() <= the_Higgs->rapidity() ); // always treat the Higgs as if it were in between the extremal FKL partons if(first_after_Higgs == begin(partons)) ++first_after_Higgs; else if(first_after_Higgs == end(partons)) --first_after_Higgs; // t-channel momentum before Higgs auto qH = pa; for(auto parton_it = begin(partons); parton_it != first_after_Higgs; ++parton_it){ qH -= to_HepLorentzVector(*parton_it); } auto q0 = pa - p1; auto begin_ladder = begin(partons) + 1; auto end_ladder = end(partons) - 1; double current_factor = NAN; if(ev.type() == FKL){ current_factor = ME_Higgs_current( incoming[0].type, incoming[1].type, pn, pb, p1, pa, qH, qH - pH, param_.Higgs_coupling.mt, param_.Higgs_coupling.include_bottom, param_.Higgs_coupling.mb, param_.ew_parameters.vev() ); } else if(ev.type() == unob){ current_factor = tree_kin_Higgs_uno( begin(incoming), end(incoming), begin(partons), end(partons), qH, qH-pH, param_.Higgs_coupling.mt, param_.Higgs_coupling.include_bottom, param_.Higgs_coupling.mb, param_.ew_parameters.vev() ); const auto p_unob = to_HepLorentzVector(partons.front()); q0 -= p_unob; p1 += p_unob; ++begin_ladder; } else if(ev.type() == unof){ current_factor = tree_kin_Higgs_uno( rbegin(incoming), rend(incoming), rbegin(partons), rend(partons), qH-pH, qH, param_.Higgs_coupling.mt, param_.Higgs_coupling.include_bottom, param_.Higgs_coupling.mb, param_.ew_parameters.vev() ); pn += to_HepLorentzVector(partons.back()); --end_ladder; } else{ throw std::logic_error("Can only reweight FKL or uno processes in H+Jets"); } const double ladder_factor = FKL_ladder_weight( begin_ladder, first_after_Higgs, q0, pa, pb, p1, pn, param_.regulator_lambda )*FKL_ladder_weight( first_after_Higgs, end_ladder, qH - pH, pa, pb, p1, pn, param_.regulator_lambda ); return current_factor/(4.*(N_C*N_C-1.))*ladder_factor; } namespace { double get_AWZH_coupling(Event const & ev, double alpha_s, double alpha_w) { std::vector bosons = filter_AWZH_bosons(ev.outgoing()); if(bosons.empty()) { return 1.; } if(bosons.size() == 1) { switch(bosons[0].type){ case pid::Higgs: return alpha_s*alpha_s; case pid::Wp: case pid::Wm: return alpha_w*alpha_w; case pid::Z_photon_mix: return alpha_w*alpha_w; // TODO case pid::photon: case pid::Z: default: throw not_implemented("Emission of boson of unsupported type"); } } if(bosons.size() == 2) { if(bosons[0].type == pid::Wp && bosons[1].type == pid::Wp) { return alpha_w*alpha_w*alpha_w*alpha_w; } else if(bosons[0].type == pid::Wm && bosons[1].type == pid::Wm) { return alpha_w*alpha_w*alpha_w*alpha_w; } throw not_implemented("Emission of bosons of unsupported type"); } throw not_implemented("Emission of >2 bosons is unsupported"); } } // namespace double MatrixElement::tree_param(Event const & ev, double mur) const { assert(is_resummable(ev.type())); const auto begin_partons = ev.begin_partons(); const auto end_partons = ev.end_partons(); const auto num_partons = std::distance(begin_partons, end_partons); const double alpha_s = alpha_s_(mur); const double gs2 = 4.*M_PI*alpha_s; double res = std::pow(gs2, num_partons); if(param_.log_correction){ // use alpha_s(q_perp), evolved to mur assert(num_partons >= 2); const auto first_emission = std::next(begin_partons); const auto last_emission = std::prev(end_partons); for(auto parton = first_emission; parton != last_emission; ++parton){ res *= 1. + alpha_s/(2.*M_PI)*BETA0*std::log(mur/parton->perp()); } } return get_AWZH_coupling(ev, alpha_s, param_.ew_parameters.alpha_w())*res; } } // namespace HEJ diff --git a/src/Zjets.cc b/src/Zjets.cc index 96dd954..759f4ff 100644 --- a/src/Zjets.cc +++ b/src/Zjets.cc @@ -1,496 +1,472 @@ /** * \authors The HEJ collaboration (see AUTHORS for details) * \date 2020 * \copyright GPLv2 or later */ #include #include "HEJ/Constants.hh" #include "HEJ/EWConstants.hh" #include "HEJ/PDG_codes.hh" #include "HEJ/jets.hh" #include "HEJ/utility.hh" // generated headers #include "HEJ/currents/jV_j.hh" #include "HEJ/currents/jV_juno.hh" #include "HEJ/currents/jVuno_j.hh" namespace HEJ { namespace currents { namespace { using COM = std::complex; // Z propagator COM ZProp(const double q, ParticleProperties const & zprop){ return 1. / (q - zprop.mass*zprop.mass + COM(0.,1.)*zprop.width*zprop.mass); } // Photon propagator COM GProp(const double q) { return 1. / q; } // Weak charge template double Zq(ParticleID PID, double stw2, double ctw); // Weak charge - Positive Spin template<> double Zq( const ParticleID PID, const double stw2, const double ctw ) { using namespace pid; // quarks if (PID == d || PID == s || PID == b) return (+ 1.0 * stw2 / 3.0) / ctw; if (PID == u || PID == c) return (- 2.0 * stw2 / 3.0) / ctw; // antiquarks if (PID == d_bar || PID == s_bar || PID == b_bar) return (+ 0.5 - 1.0 * stw2 / 3.0) / ctw; if (PID == u_bar || PID == c_bar) return (- 0.5 + 2.0 * stw2 / 3.0) / ctw; // electron if (PID == electron) return stw2 / ctw; throw std::logic_error("ERROR! No weak charge found"); } // Weak charge - Negative Spin template<> double Zq( const ParticleID PID, const double stw2, const double ctw ) { using namespace pid; // quarks if (PID == d || PID == s || PID == b) return (- 0.5 + 1.0 * stw2 / 3.0) / ctw; if (PID == u || PID == c) return (+ 0.5 - 2.0 * stw2 / 3.0) / ctw; // antiquarks if (PID == d_bar || PID == s_bar || PID == b_bar) return (- 1.0 * stw2 / 3.0) / ctw; if (PID == u_bar || PID == c_bar) return (+ 2.0 * stw2 / 3.0) / ctw; // electron if (PID == electron) return (-1.0 / 2.0 + stw2) / ctw; throw std::logic_error("ERROR! No weak charge found"); } // Electric charge double Gq (const ParticleID PID) { using namespace pid; if (PID == d || PID == s || PID == b) return -1./3.; if (PID == u || PID == c) return +2./3.; if (PID == d_bar || PID == s_bar || PID == b_bar) return +1./3.; if (PID == u_bar || PID == c_bar) return -2./3.; throw std::logic_error("ERROR! No electric charge found"); } //! Prefactor for Z+Jets Contributions /** * @brief Z+Jets Contributions Prefactor * @param aptype Incoming Particle 1 type (Z emission) * @param propZ Z Propagator * @param propG Photon Propagator * @param stw2 Value of sin(theta_w)^2 * @param ctw Value of cos(theta_w) * @returns Prefactors for Z+Jets for all helicity combinations * (includes couplings and propagators) */ MultiArray Z_amp_pref( const ParticleID aptype, COM const & propZ, COM const & propG, const double stw2, const double ctw ){ using helicity::plus; using helicity::minus; const double zq_a_p = Zq(aptype, stw2, ctw); const double zq_a_m = Zq(aptype, stw2, ctw); const double ze_p = Zq(pid::electron, stw2, ctw); const double ze_m = Zq(pid::electron, stw2, ctw); const double gq_a = Gq(aptype); MultiArray res; res[ plus][ plus] = -2.*(zq_a_p * ze_p * propZ - gq_a * propG * stw2); res[ plus][minus] = -2.*(zq_a_p * ze_m * propZ - gq_a * propG * stw2); res[minus][minus] = -2.*(zq_a_m * ze_m * propZ - gq_a * propG * stw2); res[minus][ plus] = -2.*(zq_a_m * ze_p * propZ - gq_a * propG * stw2); return res; } //! Z+Jets FKL Contribution /** * @brief Z+Jets FKL Contribution * @param pa Incoming Particle 1 (Z emission) * @param pb Incoming Particle 2 * @param p1 Outgoing Particle 1 (Z emission) * @param p2 Outgoing Particle 2 * @param pep Outgoing positron * @param pem Outgoing electron * @returns j_Z^\mu j_\mu for all helicities h1, hl, h2 */ MultiArray jZ_j( const HLV & pa, const HLV & pb, const HLV & p1, const HLV & p2, const HLV & pep, const HLV & pem ){ using helicity::plus; using helicity::minus; MultiArray res; // NOLINTNEXTLINE #define ASSIGN_HEL(RES, J, H1, HL, H2) \ RES[H1][HL][H2] = J(pa, p1, pb, p2, pem, pep) ASSIGN_HEL(res, jV_j, plus, minus, minus); ASSIGN_HEL(res, jV_j, plus, minus, plus); ASSIGN_HEL(res, jV_j, plus, plus, minus); ASSIGN_HEL(res, jV_j, plus, plus, plus); #undef ASSIGN_HEL for(auto hl: {minus, plus}) { for(auto h2: {minus, plus}) { res[minus][hl][h2] = std::conj(res[plus][flip(hl)][flip(h2)]); } } return res; } // X and Y as used in contractions with unordered currents struct XY { COM X; COM Y; }; /** * @brief Z+Jets Unordered Contribution, unordered on Z side * @tparam h1 Helicity of line 1 (Z emission line) * @tparam hl Lepton Helicity * @tparam h2 Helicity of line 2 * @tparam hg Helicity of unordered gluon * @param pa Incoming Particle 1 (Z and Uno emission) * @param pb Incoming Particle 2 * @param pg Unordered Gluon * @param p1 Outgoing Particle 1 (Z and Uno emission) * @param p2 Outgoing Particle 2 * @param pep Outgoing positron * @param pem Outgoing electron * @returns X: (U1-L), Y: (U2+l) * * Calculates j_Z_{uno}^\mu j_\mu. Ie, unordered with Z emission same side. */ template XY amp_jZuno_j( const HLV & pa, const HLV & pb, const HLV & pg, const HLV & p1, const HLV & p2, const HLV & pep, const HLV & pem ){ const COM u1 = U1(p1, p2, pa, pb, pg, pem, pep); const COM u2 = U2(p1, p2, pa, pb, pg, pem, pep); const COM l = L (p1, p2, pa, pb, pg, pem, pep); return {u1 - l, u2 + l}; } MultiArray jZuno_j( const HLV & pa, const HLV & pb, const HLV & pg, const HLV & p1, const HLV & p2, const HLV & pep, const HLV & pem ){ using helicity::plus; using helicity::minus; MultiArray xy; // NOLINTNEXTLINE #define ASSIGN_HEL(XY, J, H1, H2, H3, H4) \ XY[H1][H2][H3][H4] = J(pa, pb, pg, p1, p2, pep, pem) // NOLINT ASSIGN_HEL(xy, amp_jZuno_j, minus, minus, minus, minus); ASSIGN_HEL(xy, amp_jZuno_j, minus, minus, minus, plus); ASSIGN_HEL(xy, amp_jZuno_j, minus, minus, plus, minus); ASSIGN_HEL(xy, amp_jZuno_j, minus, minus, plus, plus); ASSIGN_HEL(xy, amp_jZuno_j, minus, plus, minus, minus); ASSIGN_HEL(xy, amp_jZuno_j, minus, plus, minus, plus); ASSIGN_HEL(xy, amp_jZuno_j, minus, plus, plus, minus); ASSIGN_HEL(xy, amp_jZuno_j, minus, plus, plus, plus); ASSIGN_HEL(xy, amp_jZuno_j, plus, minus, minus, minus); ASSIGN_HEL(xy, amp_jZuno_j, plus, minus, minus, plus); ASSIGN_HEL(xy, amp_jZuno_j, plus, minus, plus, minus); ASSIGN_HEL(xy, amp_jZuno_j, plus, minus, plus, plus); ASSIGN_HEL(xy, amp_jZuno_j, plus, plus, minus, minus); ASSIGN_HEL(xy, amp_jZuno_j, plus, plus, minus, plus); ASSIGN_HEL(xy, amp_jZuno_j, plus, plus, plus, minus); ASSIGN_HEL(xy, amp_jZuno_j, plus, plus, plus, plus); #undef ASSIGN_HEL return xy; } /** * @brief Z+Jets Unordered Contribution, unordered opposite to Z side * @tparam h1 Helicity of line 1 (Z emission) * @tparam hl Lepton Helicity * @tparam h2 Helicity of line 2 (unordered emission) * @tparam hg Helicity of unordered gluon * @param pa Incoming Particle 1 (Z emission) * @param pb Incoming Particle 2 (unordered emission) * @param p1 Outgoing Particle 1 (Z emission) * @param p2 Outgoing Particle 2 (unordered emission) * @param pg Unordered Gluon * @param pep Outgoing positron * @param pem Outgoing electron * @returns X: (U1-L), Y: (U2+l) * * Calculates j_Z^\mu j_{uno}_\mu. Ie, unordered with Z emission opposite side. */ template XY amp_jZ_juno( const HLV & pa, const HLV & pb, const HLV & p1, const HLV & p2, const HLV & pg, const HLV & pep, const HLV & pem ){ const COM u1 = U1_jV(pa, p1, pb, p2, pg, pem, pep); const COM u2 = U2_jV(pa, p1, pb, p2, pg, pem, pep); const COM l = L_jV (pa, p1, pb, p2, pg, pem, pep); return {u1 - l, u2 + l}; } MultiArray jZ_juno( const HLV & pa, const HLV & pb, const HLV & p1, const HLV & p2, const HLV & pg, const HLV & pep, const HLV & pem ){ using helicity::plus; using helicity::minus; MultiArray xy; // NOLINTNEXTLINE #define ASSIGN_HEL(XY, J, H1, H2, H3, H4) \ XY[H1][H2][H3][H4] = J(pa, pb, p1, p2, pg, pep, pem) ASSIGN_HEL(xy, amp_jZ_juno, minus, minus, minus, minus); ASSIGN_HEL(xy, amp_jZ_juno, minus, minus, minus, plus); ASSIGN_HEL(xy, amp_jZ_juno, minus, minus, plus, minus); ASSIGN_HEL(xy, amp_jZ_juno, minus, minus, plus, plus); ASSIGN_HEL(xy, amp_jZ_juno, minus, plus, minus, minus); ASSIGN_HEL(xy, amp_jZ_juno, minus, plus, minus, plus); ASSIGN_HEL(xy, amp_jZ_juno, minus, plus, plus, minus); ASSIGN_HEL(xy, amp_jZ_juno, minus, plus, plus, plus); ASSIGN_HEL(xy, amp_jZ_juno, plus, minus, minus, minus); ASSIGN_HEL(xy, amp_jZ_juno, plus, minus, minus, plus); ASSIGN_HEL(xy, amp_jZ_juno, plus, minus, plus, minus); ASSIGN_HEL(xy, amp_jZ_juno, plus, minus, plus, plus); ASSIGN_HEL(xy, amp_jZ_juno, plus, plus, minus, minus); ASSIGN_HEL(xy, amp_jZ_juno, plus, plus, minus, plus); ASSIGN_HEL(xy, amp_jZ_juno, plus, plus, plus, minus); ASSIGN_HEL(xy, amp_jZ_juno, plus, plus, plus, plus); #undef ASSIGN_HEL return xy; } } // Anonymous Namespace std::vector ME_Z_qQ(const HLV & pa, const HLV & pb, const HLV & p1, const HLV & p2, const HLV & pep, const HLV & pem, const ParticleID aptype, const ParticleID bptype, ParticleProperties const & zprop, const double stw2, const double ctw ){ using helicity::minus; using helicity::plus; const HLV pZ = pep + pem; const COM propZ = ZProp(pZ.m2(), zprop); const COM propG = GProp(pZ.m2()); MultiArray pref_top = Z_amp_pref(aptype, propZ, propG, stw2, ctw); MultiArray pref_bot = Z_amp_pref(bptype, propZ, propG, stw2, ctw); MultiArray coeff_top = jZ_j(pa, pb, p1, p2, pep, pem); MultiArray coeff_bot = jZ_j(pb, pa, p2, p1, pep, pem); double sum_top=0.; double sum_bot=0.; double sum_mix=0.; for(auto h1: {minus, plus}){ for(auto hl: {minus, plus}){ for(auto h2: {minus, plus}){ const COM res_top = pref_top[h1][hl] * coeff_top[h1][hl][h2]; const COM res_bot = pref_bot[h2][hl] * coeff_bot[h2][hl][h1]; sum_top += norm(res_top); sum_bot += norm(res_bot); sum_mix += 2.0 * real(res_top * conj(res_bot)); } } } - - const double t1_top = (pa-p1-pZ).m2(); - const double t2_top = (pb-p2 ).m2(); - - const double t1_bot = (pa-p1 ).m2(); - const double t2_bot = (pb-p2-pZ).m2(); - - sum_top /= t1_top * t2_top; - sum_bot /= t1_bot * t2_bot; - sum_mix /= sqrt(t1_top * t2_top * t1_bot * t2_bot); - - // Colour factor: (CF*CA)/2 - // Colour and helicity average: 1/(4*Nc^2) - const double pref = (C_F*C_A) / (8*N_C*N_C); - - return {sum_top*pref, sum_bot*pref, sum_mix*pref}; + return {sum_top, sum_bot, sum_mix}; } double ME_Z_qg(const HLV & pa, const HLV & pb, const HLV & p1, const HLV & p2, const HLV & pep, const HLV & pem, const ParticleID aptype, const ParticleID /*bptype*/, ParticleProperties const & zprop, const double stw2, const double ctw ){ using helicity::minus; using helicity::plus; const HLV pZ = pep + pem; const COM propZ = ZProp(pZ.m2(), zprop); const COM propG = GProp(pZ.m2()); MultiArray pref = Z_amp_pref(aptype, propZ, propG, stw2, ctw); MultiArray coeff = jZ_j(pa, pb, p1, p2, pep, pem); double sum = 0.; for(auto h1: {minus, plus}){ for(auto hl: {minus, plus}){ for(auto h2: {minus, plus}){ sum += norm(pref[h1][hl] * coeff[h1][hl][h2]); } } } - sum /= (pa-p1-pZ).m2()*(pb-p2).m2(); - - // Colour factor: (CF*CA)/2 - // Colour and helicity average: 1/(4*Nc^2) - // Divide by CF because of gluon (K_g -> CA) - //! TODO explain magic 8 - sum *= C_A / (8.*N_C*N_C); - - // Multiply by CAM - return sum * K_g(p2, pb); + return sum; } std::vector ME_Zuno_qQ(const HLV & pa, const HLV & pb, const HLV & pg, const HLV & p1, const HLV & p2, const HLV & pep, const HLV & pem, const ParticleID aptype, const ParticleID bptype, ParticleProperties const & zprop, const double stw2, const double ctw ){ using helicity::minus; using helicity::plus; const HLV pZ = pep + pem; const COM propZ = ZProp(pZ.m2(), zprop); const COM propG = GProp(pZ.m2()); MultiArray prefact_top = Z_amp_pref(aptype, propZ, propG, stw2, ctw); MultiArray prefact_bot = Z_amp_pref(bptype, propZ, propG, stw2, ctw); const MultiArray coeff_top = jZuno_j(pa, pb, pg, p1, p2, pep, pem); const MultiArray coeff_bot = jZ_juno(pb, pa, p2, p1, pg, pep, pem); double sum_top=0.; double sum_bot=0.; double sum_mix=0.; for(auto h1: {minus, plus}){ for(auto hl: {minus, plus}){ for(auto h2: {minus, plus}){ for(auto hg: {minus, plus}){ const COM pref_top = prefact_top[h1][hl]; const COM x_top = coeff_top[h1][hl][h2][hg].X; const COM y_top = coeff_top[h1][hl][h2][hg].Y; const COM pref_bot = prefact_bot[h2][hl]; const COM x_bot = coeff_bot[h2][hl][h1][hg].X; const COM y_bot = coeff_bot[h2][hl][h1][hg].Y; sum_top += norm(pref_top) * (C_A*C_F*C_F/2.*(norm(x_top)+norm(y_top)) - C_F/2.*(x_top*conj(y_top)).real()); sum_bot += norm(pref_bot) * (C_A*C_F*C_F/2.*(norm(x_bot)+norm(y_bot)) - C_F/2.*(x_bot*conj(y_bot)).real()); const COM xx = C_A*C_F*C_F/2. * pref_top * x_top * conj(pref_bot * x_bot); const COM yy = C_A*C_F*C_F/2. * pref_top * y_top * conj(pref_bot * y_bot); const COM xy = -C_F/2. * (pref_top * x_top * conj(pref_bot * y_bot) + pref_top * y_top * conj(pref_bot * x_bot)); sum_mix += 2.0 * real(xx + yy + xy); } } } } const double t1_top = (pa-pg-p1-pZ).m2(); const double t2_top = (pb-p2 ).m2(); const double t1_bot = (pa-pg-p1).m2(); const double t2_bot = (pb-p2-pZ).m2(); sum_top /= t1_top * t2_top; sum_bot /= t1_bot * t2_bot; sum_mix /= sqrt(t1_top * t2_top * t1_bot * t2_bot); //Helicity sum and average over initial states const double pref = 1./(4.*C_A*C_A); return {sum_top*pref, sum_bot*pref, sum_mix*pref}; } double ME_Zuno_qg(const HLV & pa, const HLV & pb, const HLV & pg, const HLV & p1, const HLV & p2, const HLV & pep, const HLV & pem, const ParticleID aptype, const ParticleID /*bptype*/, ParticleProperties const & zprop, const double stw2, const double ctw ){ using helicity::minus; using helicity::plus; const HLV pZ = pep + pem; const COM propZ = ZProp(pZ.m2(), zprop); const COM propG = GProp(pZ.m2()); MultiArray pref = Z_amp_pref(aptype, propZ, propG, stw2, ctw); const auto coeff = jZuno_j(pa, pb, pg, p1, p2, pep, pem); double sum = 0.; for(auto h1: {minus, plus}){ for(auto hl: {minus, plus}){ for(auto h2: {minus, plus}){ for(auto hg: {minus, plus}){ const COM X = coeff[h1][hl][h2][hg].X; const COM Y = coeff[h1][hl][h2][hg].Y; sum += norm(pref[h1][hl]) * (C_A*C_F*C_F/2.*(norm(X)+norm(Y)) - C_F/2.*(X*conj(Y)).real()); } } } } sum /= (pa-pg-p1-pZ).m2()*(p2-pb).m2(); //Helicity sum and average over initial states sum /= (4.*C_A*C_A); // Multiply by CAM return sum * (K_g(p2, pb) / C_F); } } // namespace currents } // namespace HEJ