Page Menu
Home
HEPForge
Search
Configure Global Search
Log In
Files
F19250752
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Award Token
Flag For Later
Size
17 KB
Referenced Files
None
Subscribers
None
View Options
Index: AnalysisTools/contur/bin/CLTestSingle
===================================================================
--- AnalysisTools/contur/bin/CLTestSingle (revision 200)
+++ AnalysisTools/contur/bin/CLTestSingle (revision 201)
@@ -1,379 +1,389 @@
#!/usr/bin/env python
"""\
%prog [options] <yodafile1>
Run a test on a sepcified yoda file, returns limits and plots found
against any validated contur analyses.
"""
from contur import TestingFunctions as ctr
from contur import Utils as util
import rivet
import yoda
import sys
import os
from optparse import OptionParser
parser = OptionParser(usage=__doc__)
parser.add_option("-o", "--outputdir", dest="OUTPUTDIR",
default="./plots", help="Specify output directory for output plots. \n")
parser.add_option("-g", "--grid-mode", dest="GRID_MODE", action="store_true",
default=False, help="Run in gridmode expects a prearranged grid of yoda files as input[DEPRECATED].")
parser.add_option("-p", "--make-plots", dest="MAKE_PLOTS", action="store_true",
default=False, help="Draw ratio plots.")
parser.add_option("-a", "--analysisdir", dest="ANALYSISDIR",
default="./Analysis", help="Output directory for analysis cards.")
parser.add_option("--hier-out", action="store_true", dest="HIER_OUTPUT", default=False,
help="write output dat files into a directory hierarchy which matches the analysis paths")
opts, yodafiles = parser.parse_args()
if not yodafiles and not opts.GRID_MODE:
print "Error: You need to specify some YODA files to be plotted!"
sys.exit(1)
def writeOutput2(output, h):
"Choose output file name and dir"
if opts.HIER_OUTPUT:
hparts = h.strip("/").split("/", 1)
ana = "_".join(hparts[:-1]) if len(hparts) > 1 else "ANALYSIS"
outdir = os.path.join(opts.OUTDIR, ana)
outfile = '%s.dat' % hparts[-1].replace("/", "_")
else:
hparts = h.strip("/").split("/")
outdir = opts.OUTPUTDIR
outfile = '%s.dat' % "_".join(hparts)
mkoutdir(outdir)
outfilepath = os.path.join(outdir, outfile)
f = open(outfilepath, 'w')
f.write(output)
f.close()
def mkoutdir(outdir):
"Function to make output directories"
if not os.path.exists(outdir):
try:
os.makedirs(outdir)
except:
msg = "Can't make output directory '%s'" % outdir
raise Exception(msg)
if not os.access(outdir, os.W_OK):
msg = "Can't write to output directory '%s'" % outdir
raise Exception(msg)
class Plot(dict):
"A tiny Plot object to help writing out the head in the .dat file"
def __repr__(self):
return "# BEGIN PLOT\n" + "\n".join("%s=%s" % (k, v) for k, v in self.iteritems()) + "\n# END PLOT\n\n"
def getHistos(filelist):
"""Loop over all input files. Only use the first occurrence of any REF-histogram
and the first occurrence in each MC file for every MC-histogram."""
# Stolen from rivet-cmphistos
refhistos = {}
mchistos = {}
xsec = {}
Nev = {}
# for infile in filelist:
mchistos.setdefault(filelist, {})
analysisobjects = yoda.read(filelist)
for path, ao in analysisobjects.iteritems():
if path.startswith('/_EVTCOUNT'):
Nev = ao
if path.startswith('/_XSEC'):
xsec = ao
# Conventionally don't plot data objects whose names start with an
# underscore
if os.path.basename(path).startswith("_"):
continue
if path.startswith('/REF/'):
if not refhistos.has_key(path):
refhistos[path] = ao
else:
if not mchistos[filelist].has_key(path):
mchistos[filelist][path] = ao
return refhistos, mchistos, xsec, Nev
def getRivetRefData(refhistos, anas=None):
"Find all Rivet reference data files"
rivet_data_dirs = rivet.getAnalysisRefPaths()
dirlist = []
for d in rivet_data_dirs:
if anas is None:
import glob
dirlist.append(glob.glob(os.path.join(d, '*.yoda')))
else:
dirlist.append([os.path.join(d, a + '.yoda') for a in anas])
for filelist in dirlist:
# TODO: delegate to getHistos?
for infile in filelist:
analysisobjects = yoda.read(infile)
for path, ao in analysisobjects.iteritems():
if path.startswith('/REF/'):
if not refhistos.has_key(path):
refhistos[path] = ao
if __name__ == '__main__':
# Command line parsing
opts, args = parser.parse_args()
for f in args:
if not os.access(f, os.R_OK):
print "Error: cannot read from %s" % f
sys.exit(1)
for infile in args:
# refhistos, mchistos = getHistos(filelist)
# hpaths, h2ds = [], []
# for aos in mchistos.values():
# for p in aos.keys():
# if p and p not in hpaths:
# hpaths.append(p)
# firstaop = aos[p][sorted(aos[p].keys())[0]]
# # TODO: Would be nicer to test via isHisto and dim or similar, or
# # yoda.Scatter/Histo/Profile base classes
# if type(firstaop) in (yoda.Histo2D, yoda.Profile2D) and p not in h2ds:
# h2ds.append(p)
# need an empty dict to store our results
scatterpoints = {}
masterDict = {}
heatMap = {}
+ plotdirs = [os.path.abspath(os.path.dirname(f)) for f in infile]
+ plotparser = rivet.mkStdPlotParser(plotdirs, )
+
for anatype in ctr.anapool:
masterDict[anatype] = []
# CN.anapool()
# for root, dirs, files in os.walk('.'):
# for name in files:
# fileliststatic = []
# if '.yoda' in name and 'LHC' not in name:
# yodafile = os.path.join(root, name)
# fileliststatic = str(yodafile)
# print "Found valid yoda file"
# print "Model point info"
# print name.strip('.yoda').split('_')[0] + " : " + name.strip('.yoda').split('_')[1]
# print name.strip('.yoda').split('_')[2] + " : " + name.strip('.yoda').split('_')[3]
#
# else:
# continue
refhistos, mchistos, xsec, Nev = getHistos(infile)
hpaths = []
for aos in mchistos.values():
for p in aos.keys():
if p and p not in hpaths:
hpaths.append(p)
getRivetRefData(refhistos)
mapPoints = {}
for h in hpaths:
if refhistos.has_key('/REF' + h):
refdata = refhistos['/REF' + h]
# Manually store additional plot in a function called LumiFinder, if a Lumi isn't stored vs an
# analysis name then use that info to veto testing
if ctr.LumiFinder(h)[0] == -1:
continue
# Use this switch to view individual analyses
# if '/ATLAS_2014_I1279489' not in h:
# continue
# print 'testing: ' + h
mcpath='/'+infile
mchistos[infile][h].setAnnotation('Path', mcpath + h)
lumi = ctr.LumiFinder(h)[0]
if lumi > 0:
sighisto = yoda.core.mkScatter(mchistos[infile][h])
# some special logic to deal with normalisation
normFacSig = 0.0
normFacRef = 0.0
if ctr.isNorm(h)[0] == True:
for point in refdata.points:
normFacRef += point.y
for point in sighisto.points:
normFacSig += point.y
normFacRef = ctr.isNorm(h)[1]
import numpy as np
if mchistos[infile][h].sumW2() == 0:
continue
normFacSig = (float(mchistos[infile][h].numEntries(
)) / float(Nev.numEntries()) * float(xsec.points[0].x))
CLs = []
sigCount = []
bgCount = []
bgError = []
sigError = []
# fill test results for each bin
# out=np.zeros([refdata.numPoints,1])
for i in range(0, refdata.numPoints):
global mu_test
mu_test = 1
mu_hat = 0
varmat = 0
if ctr.isNorm(h)[0] == True:
sigCount.append(mchistos[infile][h].bins[
i].sumW * lumi * normFacSig)
bgCount.append(refdata.points[
i].y * lumi * normFacRef * (refdata.points[i].xMax - refdata.points[i].xMin))
bgError.append(refdata.points[i].yErrs[
1] * lumi * normFacRef * (refdata.points[i].xMax - refdata.points[i].xMin))
# sigError.append(sighisto.points[i].yErrs[1]*lumi*normFacSig)
sigError.append(xsec.points[0].x)
else:
sigCount.append(mchistos[infile][
h].bins[i].sumW * lumi)
bgCount.append(
refdata.points[i].y * lumi * (refdata.points[i].xMax - refdata.points[i].xMin))
bgError.append(refdata.points[i].yErrs[
1] * lumi * (refdata.points[i].xMax - refdata.points[i].xMin))
sigError.append(sighisto.points[i].yErrs[
1] * lumi * (sighisto.points[i].xMax - sighisto.points[i].xMin))
# cater for the case where the refdata bin is empty,
# occurs notably in ATLAS_2014_I1307243
if refdata.points[i].y > 0:
CLs.append(ctr.confLevel([sigCount[i]], [
bgCount[i]], [bgError[i]], [sigError[i]]))
else:
CLs.append(0)
anaobjects = []
## DrawOnly is needed to keep the order in the Legend equal to the
## order of the files on the command line
drawonly = []
## Check if we have reference data for the histogram
ratioreference = None
if refhistos.has_key('/REF' + h):
refdata = refhistos['/REF' + h]
refdata.setAnnotation('ErrorBars', '1')
refdata.setAnnotation('PolyMarker', '*')
refdata.setAnnotation('ConnectBins', '0')
refdata.setAnnotation('Title', 'Data')
#if ctr.isNorm(h)[0] == True:
# refdata.setAnnotation('Scale', str(normFacRef))
#if opts.RATIO:
# ratioreference = '/REF'+h
anaobjects.append(refdata)
drawonly.append('/REF' + h)
drawonly.append(mcpath + h)
#if opts.RATIO and ratioreference is None:
# ratioreference = mcpath + h
# if opts.RATIO and len(drawonly) > 1:
# plot['RatioPlot'] = '1'
# plot['RatioPlotReference'] = ratioreference
for i in range(0, refdata.numPoints):
if ctr.isNorm(h)[0] == True:
sighisto.points[i].y=sighisto.points[i].y*normFacSig+refdata.points[i].y
sighisto.points[i].yErrs =((refdata.points[i].yErrs[1])**2 + (sighisto.points[i].yErrs[1])**2 )**0.5
else:
sighisto.points[i].y=sighisto.points[i].y+refdata.points[i].y
sighisto.points[i].yErrs =((refdata.points[i].yErrs[1])**2 + (sighisto.points[i].yErrs[1])**2 )**0.5
sighisto.title=str(max(CLs))
+ sighisto.setAnnotation('LineColor', 'red')
anaobjects.append(sighisto)
plot = Plot()
plot['DrawOnly'] = ' '.join(drawonly).strip()
plot['Legend'] = '1'
plot['MainPlot'] = '1'
plot['RatioPlotYMin'] = '1'
plot['LogY'] = '1'
plot['RatioPlot'] = '1'
+ for key, val in plotparser.getHeaders(h).iteritems():
+ plot[key] = val
+ #if plotoptions.has_key("PLOT"):
+ # for key_val in plotoptions["PLOT"]:
+ # key, val = [s.strip() for s in key_val.split("=")]
+ # plot[key] = val
#ratioreference = mcpath + h
ratioreference = '/REF'+h
plot['RatioPlotReference'] = ratioreference
output = ''
output += str(plot)
from cStringIO import StringIO
sio = StringIO()
yoda.writeFLAT(anaobjects, sio)
output += sio.getvalue()
writeOutput2(output, h)
# All these extra count checks are to stop any plots with no count in most likely bin from being entered
# into liklihood calc, should be fixed upstream
# See if any additional grouping is needed 'subpool'
if ctr.LumiFinder(h)[2]:
tempKey = ''
tempKey = h.split('/')[1] + '_' + ctr.LumiFinder(h)[2]
if tempKey not in mapPoints and bgCount[CLs.index(max(CLs))] > 0.0:
mapPoints[tempKey] = [float(max(CLs)), [sigCount[
CLs.index(max(CLs))]], [bgCount[CLs.index(max(CLs))]], [bgError[CLs.index(max(CLs))]], [sigError[CLs.index(max(CLs))]], str(h)]
elif bgCount[CLs.index(max(CLs))] > 0.0:
mapPoints[tempKey][1].append(
sigCount[CLs.index(max(CLs))])
mapPoints[tempKey][2].append(
bgCount[CLs.index(max(CLs))])
mapPoints[tempKey][3].append(
bgError[CLs.index(max(CLs))])
mapPoints[tempKey][4].append(
sigError[CLs.index(max(CLs))])
mapPoints[tempKey][5] += "," + (str(h))
else:
if h not in mapPoints and bgCount[CLs.index(max(CLs))] > 0.0:
mapPoints[h] = [float(max(CLs)), [sigCount[
CLs.index(max(CLs))]], [bgCount[CLs.index(max(CLs))]], [bgError[CLs.index(max(CLs))]], [sigError[CLs.index(max(CLs))]], str(h)]
# Scan through all points and fill each catagory, stored in masterDict,
# with the counts from each grouping if it is more sensitive than the
# previous fill
for key in mapPoints:
tempName = ctr.LumiFinder(key)[1]
mapPoints[key][0] = ctr.confLevel(mapPoints[key][1], mapPoints[key][2], mapPoints[key][3],
mapPoints[key][4])
if not masterDict[tempName]:
masterDict[tempName].append(mapPoints[key][:])
else:
#_overWriteFlag = False
#_pointExistsFlag = False
for listelement in masterDict[tempName]:
# if mapPoints[key][0] == listelement[0] and mapPoints[key][1] == listelement[1]:
# _pointExistsFlag = True
if mapPoints[key][0] > listelement[0]:
masterDict[tempName][masterDict[tempName].index(listelement)] = mapPoints[key][:]
# listelement = mapPoints[key][:]
# _overWriteFlag = True
# if _overWriteFlag == False:
# masterDict[tempName].append(mapPoints[key][:])
# else if mapPoints[key][]
# else if mapPoints[key][2] > masterDict[tempName]
#
# masterDict[]
import pickle
# print everything out
sigfinal=[]
bgfinal=[]
bgerrfinal=[]
sigerrfinal=[]
for key in masterDict:
if masterDict[key]:
sigfinal.extend(map(list,zip(*masterDict[key])[1])[0])
bgfinal.extend(map(list,zip(*masterDict[key])[2])[0])
bgerrfinal.extend(map(list,zip(*masterDict[key])[3])[0])
masterDict[key].sort(key=lambda x: x[0])
util.writeOutput(masterDict[key], key + ".dat")
with open("./ANALYSIS/" + key + '.map', 'w') as f:
pickle.dump(masterDict[key], f)
# print sigfinal
# print bgerrfinal
# print bgfinal
if len(sigfinal)>0:
print "Combined CL: " + str(ctr.confLevel(sigfinal, bgfinal, bgerrfinal,sigerrfinal))
print "Based on " +str(len(sigfinal))+ " found counting tests"
print "\nMore details output to ANALYSIS folder"
# print 'Run finished, analysis output to folder "ANALYSIS"'
File Metadata
Details
Attached
Mime Type
text/x-diff
Expires
Tue, Sep 30, 5:44 AM (5 h, 50 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
6566308
Default Alt Text
(17 KB)
Attached To
Mode
rCONTURSVN contursvn
Attached
Detach File
Event Timeline
Log In to Comment