Page MenuHomeHEPForge

No OneTemporary

Index: nnlo-bridge/trunk/src/applgrid_utils.cxx
===================================================================
--- nnlo-bridge/trunk/src/applgrid_utils.cxx (revision 1536)
+++ nnlo-bridge/trunk/src/applgrid_utils.cxx (revision 1537)
@@ -1,584 +1,584 @@
//
// @file applgrid_utils.cxx
//
//
// @author M.Sutton
//
// Copyright (C) 2016 M.Sutton (sutt@cern.ch)
//
// $Id: applgrid_utils.cxx, v0.0 Mon 25 Apr 2016 15:03:34 CEST sutt $
#include "amconfig.h"
#ifdef HAVE_APPL
#include <iostream>
#include <set>
#include "appl_grid/appl_grid.h"
#include "appl_grid/appl_pdf.h"
#include "appl_grid/lumi_pdf.h"
#include "appl_grid/appl_timer.h"
#include "nnlo_common.h"
#include "nnlo_utils.h"
#include "applgrid_utils.h"
#include "phaseCache.h"
#include "mapCache.h"
extern "C" void evolvepdf_(const double& x, const double& Q, double* xf);
extern "C" double alphaspdf_(const double& Q);
extern bool first_run;
//static std::int _gridname; // = "nnlojet.root";
static std::vector<appl::grid*> _grids;
static std::map<int, appl::grid*> _gridmap;
static std::map<int, std::string> _gridnames;
static std::map<appl::grid*, mapCache*> _cachemap;
struct timeval atimer;
namespace nnlo {
void fill_appl_internal( appl::grid* _grid,
const int& order,
const double& x1, const double& x2, const double& Q2,
const double& obs, int iproc, const double* wgt );
}
std::string findname( const int& id ) {
std::map<int, std::string>::iterator gitr = _gridnames.find(id);
if ( gitr!=_gridnames.end() ) return gitr->second;
return "";
}
template<typename T>
std::ostream& operator<<( std::ostream& s, const std::vector<T>& v ) {
for ( size_t i=0 ; i<v.size() ; i++ ) s << "\t" << v[i];
return s;
}
int getorder( int iproc ) {
#if 1
/// get subprocess index
/// Fixme: just for the time being - sort out which order we
/// are dealing with properly later
int iorder = ndimcurrent_.norder; // wrong but over written later
if ( iproc>=1 && iproc<=12 ) iorder = 0;
else if ( iproc>12 && iproc<=162 ) iorder = 1;
else if ( iproc>162 ) iorder = 2;
return iorder;
#else
// 0 Born 0 1 1 0 2
//
// nv :
// V, VR, VV = 2 otherwise 0
//
// channels:
// channel_.iloops(iproc);
// VV = 2, VR V = 1 otherwise =0
// iloops + norder
/// use this ???
int nloops = channel_.iloops[iproc-1]; /// loop order
int ipars = channel_.ipars[iproc-1]; /// number of additional particles
int nv = ndimcurrent_.nv; /// 0 - tree level , 2 - virtual, (1 - dis?)
return nloops + ipars;
#endif
}
appl::grid* find( const int& id ) {
std::map<int, appl::grid*>::iterator gitr = _gridmap.find(id);
if ( gitr!=_gridmap.end() ) return gitr->second;
std::cerr << "find() requested grid id " << id << " not found" << std::endl;
return 0;
}
mapCache* findcache( appl::grid* gptr, int nproc=0 ) {
std::map<appl::grid*, mapCache*>::iterator itr = _cachemap.find(gptr);
if ( itr!=_cachemap.end() ) return itr->second;
if ( nproc>0 ) {
// std::cerr << "find() requested cachmap id " << id << " not found" << std::endl;
std::pair< std::map<appl::grid*, mapCache*>::iterator, bool > status = _cachemap.insert( std::map<appl::grid*, mapCache*>::value_type( gptr, new mapCache( nproc ) ) );
return status.first->second;
}
return 0;
}
void nnlo::init_applgrid( const int& id, const std::string& gridname_,
int nbins, double lo, double hi ) {
// From NNLOJETS common?
int leading_order = inppar_.njets;
int nloops = 2;
// Book APPLGRID
std::cout << "### nnlo::init(): Book APPLGRID ..." << std::endl;
std::cout << "\tid: " << id << "\n";
std::cout << "\tfile: " << gridname_;
std::string gridname = gridname_;
size_t applpos = gridname.find(".appl");
if ( applpos!=std::string::npos ) gridname.replace( applpos, 5, "-appl" );
if ( gridname.find(".root")==std::string::npos ) gridname += ".root";
if ( gridname!=gridname_ ) std::cout << "\t->\t" << gridname << "\n";
std::cout << "\tbins: " << nbins << "\tlo: " << lo << "\thi: " << hi << std::endl;
// If file exists, APPLGRID warmup done already
if ( file_exists( gridname ) ) {
std::cout << "################### File exists ..." << std::endl;
// Open the existing grid
// std::cout << "nnlo::init() ready " << _ready << std::endl;
std::cout << "nnlo::init() reading grid " << gridname << " ..." << std::endl;
appl::grid* _grid = new appl::grid( gridname );
_gridnames.insert( std::map<int, std::string>::value_type( id, gridname ) );
_gridmap.insert( std::map<int, appl::grid*>::value_type( id, _grid ) );
_grid->optimise(true);
first_run = false;
std::cout << *_grid << std::endl;
} else {
std::cout << "nnlo::init() booking appl-grid from scratch with name " << gridname << " ..." << std::endl;
/// Create a new grid - these should be set from some values
/// from the NNLOJET code
/// eta bins from eta0_a_gg5g0q_i1_n1_z2_q1_r001
/// need to allow setting these from the fortran ???
int Nobs = nbins;
double obsmin = lo;
double obsmax = hi; //don't change this anymore !!!!!
std::cout << "booking grid: " << id << "\tnbins " << Nobs << "\trange " << lo << " - " << hi << std::endl;
int NQ2 = 20;
double Q2min = 10;
double Q2max = 4*14000*14000;
double Q2order = 4;
int Nx = 40;
double xmin = 0.00001;
double xmax = 0.9999999;
double xorder = 5;
// int leading_order = inppar_.njets;
// int nloops = inppar_.nloop;
std::cout << " njets: " << inppar_.njets << std::endl; // << "\tnloops " << inppar_.nloop << std::endl;
//std::string genpdf = "ZJ-V.config";
//std::string genpdf = "ZJ-R.config";
//std::string genpdf = "ZJ-LO.config";
std::string genpdf = "ZJ-LO.config:ZJ-NLO.config:ZJ-NNLO.config";
//std::string genpdf = "ZJ-RR.config";
std::cout << "creating grid " << genpdf << std::endl;
// lumi_pdf lumi( genpdf );
// lumi.write_latex( std::cout );
// double apramval=5.;
// appl::grid::transformvar(apramval);
int nigrids = 0;
if ( nloops == 0 ) nigrids = 0;
if ( nloops == 1 ) nigrids = 1;
if ( nloops == 2 ) nigrids = 2;
appl::grid* _grid = new appl::grid( NQ2, Q2min, Q2max, Q2order,
Nx, xmin, xmax, xorder,
Nobs, obsmin, obsmax,
genpdf, leading_order, nigrids );
// Use the reweighting function
_grid->reweight(true);
// Add documentation
_grid->addDocumentation( banner() );
_gridnames.insert( std::map<int, std::string>::value_type( id, gridname ) );
_gridmap.insert( std::map<int, appl::grid*>::value_type( id, _grid ) );
}
}
void nnlo::fill_applgrid( const int& id,
const double& x1, const double& x2, const double& Q2,
const double& obs,
const double* wt )
{
appl::grid* _grid = find( id );
if ( _grid ) {
// 0 Born 0 1 1 0 2
//
// nv :
// V, VR, VV = 2 otherwise 0
//
// channels:
// channel_.iloops(iproc);
// VV = 2, VR V = 1 otherwise =0
// iloops + norder
int iproc = currentprocess_.iproc;
int iorder = getorder( iproc );
const lumi_pdf* pdf = dynamic_cast<const lumi_pdf*>( ( (const appl::grid*)_grid)->genpdf(iorder) );
/// use common/partid/ rather than common/idparton/ ??
// int ip1 = partid_.ip1;
// int ip2 = partid_.ip2;
// std::cout << "nnlo::fill() pdf " << pdf << "\tpartons " << ip1 << " " << ip2 << std::endl;
int iprocess = pdf->decideSubProcess( iproc );
// std::cout << "nnlo::fill() iproc " << iproc << " " << iprocess << std::endl;
static bool trimgrid = true;
if ( trimgrid ) {
atimer = appl_timer_start();
// if ( mpc == 0 ) mpc = new mapCache( pdf->size() );
std::cout << " grid size " << _grid->size()*1e-6 << " Mbytes";
for ( int iord=0 ; iord<=_grid->nloops() ; iord++ ) if ( iord!=iorder ) _grid->trim(iord);
std::cout << " -> " << _grid->size()*1e-6 << " Mbytes" << std::endl;
trimgrid = false;
}
if ( iprocess!=-1 ) {
std::vector<double> wgt(pdf->size(), 0);
wgt[iprocess] = wt[0];
// std::vector<double> weights(7,0);
// for ( int j=7 ; j-- ; ) weights[1] = wt[j];
/// NB: now pass the weights in as a parameter ...
/// chose which os the maxscl=3 weights, you actuall
/// want to use
// weights[iprocess] = gridweights_.gridwgt[0];
// std::cout << "nnlo::fill() x1 " << x1 << "\tx2 " << x2 << "\tQ2 " << Q2 << std::endl;
#if 0
std::cout << " Order: njets: " << inppar_.njets
<< "\tg.nloops(): " << _grid->nloops()
<< "\tiproc " << iproc
<< "\tiorder " << iorder << "\tnv " << ndimcurrent_.nv
<< "\tobs " << obs << "\tx: " << x1 << " " << x2 << "\tQ2 " << Q2 << "\twgt " << wt[0] << std::endl;
#endif
// fill_appl_internal( _grid, iorder, x1, x2, Q2, obs, &weights[0] );
fill_appl_internal( _grid, iorder, x1, x2, Q2, obs, iprocess, &wgt[0] );
}
}
}
double gtime = 0;
void nnlo::fill_appl_internal( appl::grid* _grid,
const int& order,
const double& x1, const double& x2, const double& Q2,
const double& obs,
int iproc, const double* wgt ) {
// static int Nsubproc = _grid->subProcesses( inppar_.nloop );
const lumi_pdf* pdf = dynamic_cast<const lumi_pdf*>(((const appl::grid*)_grid)->genpdf( ndimcurrent_.norder ) );
std::vector<double> weight(pdf->size(),0);
static double factor[4] = { 1, 1, 1, 1 };
static bool bfactor[4] = { false, false, false, false };
// int as_order = inppar_.njets + inppar_.nloop;
int as_order = inppar_.njets + order;
double alphas = alphaspdf_( std::sqrt(Q2) );
if ( !bfactor[order] ) {
bfactor[order] = true;
for ( int iorder=0 ; iorder<as_order ; iorder++ ) factor[order] *= 2*M_PI/alphas;
}
int nv = ndimcurrent_.nv;
if ( nv==0 ) {
// tree level
struct timeval _atimer = appl_timer_start();
-#define USECACHE
+ // #define USECACHE
#ifdef USECACHE
/// find the cache corresponding to this grid
mapCache* mpc = findcache( _grid, pdf->size() );
/// have the kinematics changed ?
bool mupdate = mpc->changed( obs, order, x1, x2, Q2, nv );
static int mcached = 0;
// std::cout << iproc << "\tmupdate: " << mupdate << "\tobs " << obs << "\tkin " << x1 << " " << x2 << " " << Q2 << std::endl;
/// if they have then fluch the buffer and update the grid
if ( mupdate ) {
// std::cout << iproc << "\tupdate: " << bupdate << "\tobs " << obs << std::endl;
if ( mpc->filled() ){
// std::cout << "\tflushing: mpc " << mcached << std::endl;
mapCache::const_iterator itr = mpc->begin();
mapCache::const_iterator iend = mpc->end();
while( itr!=iend ) {
// std::cout << "\tflushing obs " << itr->first << std::endl;
if ( first_run ) _grid->fill_phasespace( mpc->mx1, mpc->mx2, mpc->mQ2, itr->first, &itr->second[0], mpc->morder );
else _grid->fill_grid( mpc->mx1, mpc->mx2, mpc->mQ2, itr->first, &itr->second[0], mpc->morder );
itr++;
}
}
mpc->update( obs, order, x1, x2, Q2, nv );
mcached = 0;
}
std::vector<double>& mwgt = mpc->weights( obs );
mwgt.at( iproc ) += wgt[iproc]*factor[order];
mcached++;
// std::cout << "fill: " << pc->weights() << std::endl;
#else
weight[iproc] = wgt[iproc];
weight[iproc] *= factor[order];
if ( first_run ) _grid->fill_phasespace( x1, x2, Q2, obs, &weight[0], order );
else _grid->fill_grid( x1, x2, Q2, obs, &weight[0], order );
#endif
gtime += appl_timer_stop( _atimer );
}
else if ( nv==2 ) {
/// virtual term
/// FIXME: move all these calculation of x1, x2 etc in to the
/// calling function, to calculate once and for all for appl and fnlo,
/// and then pass in the "scaled" x1 and x2 vales
int ix = gridix_.igx; /// get this from our new, added common block
double _x1 = x1;
double _x2 = x2;
if ( ix==2 ) _x2 = x2/xregions_.xreg2;
else if ( ix==3 ) _x1 = x1/xregions_.xreg1;
else if ( ix==4 ) {
_x1 = x1/xregions_.xreg1;
_x2 = x2/xregions_.xreg2;
}
double jac = 1;
if ( jacobian_.jflag ) jac = 1/((1-x1)*(1-x2)); /// *not* 1/((1-_x1)(1-_x2))
if ( ix==2 ) jac *= 1./xregions_.xreg2;
else if ( ix==3 ) jac *= 1./xregions_.xreg1;
else if ( ix==4 ) jac *= 1./(xregions_.xreg1*xregions_.xreg2);
weight[iproc] = wgt[iproc];
weight[iproc] *= factor[order]*jac;
if ( first_run ) _grid->fill_phasespace( _x1, _x2, Q2, obs, &weight[0], order );
else _grid->fill_grid( _x1, _x2, Q2, obs, &weight[0], order );
}
}
void nnlo::fill_ref_applgrid( const int& id, const double& obs, const double* wt ) {
appl::grid* _grid = find( id );
if ( _grid ) _grid->getReference()->Fill( obs, wt[0] );
}
void nnlo::term_applgrid( const int& id, int nweights ) {
double atime = appl_timer_stop( atimer );
// Write APPLGRID
std::cout << "global fill time: " << gtime << " ms" << std::endl;
static int icount = 0;
std::cout << "### nnlo::term(): Write APPLGRID ... " << icount << "\ttime " << atime << " ms" << std::endl;
icount++;
appl::grid* _grid = find( id );
if ( _grid==0 ) return;
#ifdef USECACHE
/// flush cached weights to grid
// std::cout << "\tflushing: mpc " << std::endl;
/// get the cache correpsonding to this grid
mapCache* mpc = findcache( _grid );
mapCache::const_iterator itr = mpc->begin();
mapCache::const_iterator iend = mpc->end();
while( itr!=iend ) {
// std::cout << "\tflushing obs " << itr->first << std::endl;
if ( first_run ) _grid->fill_phasespace( mpc->mx1, mpc->mx2, mpc->mQ2, itr->first, &itr->second[0], mpc->morder );
else _grid->fill_grid( mpc->mx1, mpc->mx2, mpc->mQ2, itr->first, &itr->second[0], mpc->morder );
itr++;
}
#endif
std::string gridname = findname( id );
if ( gridname=="" ) return;
/// keep track of which rids we've already called
/// termination on
static std::set<int> term_id;
if ( term_id.find(id)!=term_id.end() ) return;
term_id.insert( id );
/// we need to scale by the bin width ???
// double nweights = icount;
/// _grids.back()->getReference()->Scale(1/(conv*binwidth[nh]/nweights));
_grid->run() = nweights;
/// hmmm, we divide by the bin width after the convolution
/// if the weights are *already* divided by the bin width, we will
/// need to scale *up* the weights in each bin by the bin width
/// as a hack ( for fixed binning ) do that here ...
// TH1D* h = _grid->getReference();
/// scale up the grid by the number of weights - vegas already
/// includes a 1/nshots term for the integration
/// the grid will always be stored "unweighted"
(*_grid) *= nweights;
/// scale up the reference histogram, since it already is correctly
/// weighted after the filling, but we scale it down by the number
/// of weights when we write it out, so that the histogram in the
/// root file is the actual cross section
/// _grids.back()->getReference()->Scale( nweights );
Normalise( _grid->getReference() );
_grid->Write( gridname );
}
#endif

File Metadata

Mime Type
text/x-diff
Expires
Tue, Nov 19, 3:46 PM (1 d, 18 h)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
3805015
Default Alt Text
(16 KB)

Event Timeline