Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 12 additions & 14 deletions testing/metrics/CMakeLists.txt
100644 → 100755
Original file line number Diff line number Diff line change
@@ -1,27 +1,25 @@
set(BASELINE_DIR "${UVCDAT_GIT_TESTDATA_DIR}/baselines/metrics")
set(BASELINE_DIR "${UVCDAT_GIT_TESTDATA_DIR}/baselines/")

# test from VCS, example of a graphics test
#cdat_add_test(vcs_verify_hurricane_marker
# "${PYTHON_EXECUTABLE}"
# ${cdat_SOURCE_DIR}/testing/vcs/test_vcs_hurricane_marker.py
# ${cdat_SOURCE_DIR}/testing/vcs/test_vcs_hurricane_marker.png
#)
# test from CDMS2, example of a test script
#cdat_add_test("CDMS_Test_01"
# "${PYTHON_EXECUTABLE}"
# ${cdat_SOURCE_DIR}/testing/cdms2/cdtest01.py)
cdat_add_test("diags_test_01"
"${PYTHON_EXECUTABLE}"
${cdat_SOURCE_DIR}/testing/metrics/diagtest01.py
${UVCMETRICS_TEST_DATA_DIRECTORY} )
--datadir=${UVCMETRICS_TEST_DATA_DIRECTORY}
--baseline=${BASELINE_DIR}/metrics/ )

cdat_add_test("diags_test_02"
"${PYTHON_EXECUTABLE}"
${cdat_SOURCE_DIR}/testing/metrics/diagtest02.py
--datadir=${UVCMETRICS_TEST_DATA_DIRECTORY}
--baseline=${BASELINE_DIR})
--baseline=${BASELINE_DIR}/metrics/ )

cdat_add_test("diags_test_03"
"${PYTHON_EXECUTABLE}"
${cdat_SOURCE_DIR}/testing/metrics/diagtest03.py
--datadir=${UVCMETRICS_TEST_DATA_DIRECTORY}
--baseline=${BASELINE_DIR})
--baseline=${BASELINE_DIR}/metrics/)

cdat_add_test("diags_test_04"
"${PYTHON_EXECUTABLE}"
${cdat_SOURCE_DIR}/testing/metrics/diagtest04.py
--datadir=${UVCMETRICS_TEST_DATA_DIRECTORY}/
--baseline=${BASELINE_DIR}/metrics/ )
102 changes: 102 additions & 0 deletions testing/metrics/diags_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,102 @@
# Compute a set of contour plots using diags (diags.py).
# First argument: --datadir=<data location> - with subdirectories cam_output and obs_atmos and baseline.
# These have sample model output, observation data, and "baseline" output which we should match.
# However, the graphical output (png files) may not match in manner suitable for automated testing.
# So the return value only depends on the numerical values in the .nc files.
# Second argument: '--keep=True' to keep (don't delete) output files*
# No attempt is made to clean up the diagnostics' cache files, which are generally in /tmp.

from metrics.common.utilities import *
from pprint import pprint
import sys, os, shutil, tempfile, subprocess
import cdms2, numpy
pth = os.path.join(os.path.dirname(__file__),"..")
sys.path.append(pth)
import checkimage
import argparse, pdb

def closeness( varname, filename, pathout, baselinepath, rtol, atol ):
fname = os.path.join( pathout, filename )
baselinefname = os.path.join( baselinepath, filename )
f = cdms2.open( fname )
g = cdms2.open( baselinefname )
fvar = f(varname)
gvar = g(varname)
close = numpy.ma.allclose( fvar, gvar, rtol=rtol, atol=atol )
if close:
print "fvar and gvar are close for", varname
else:
print "fvar and gvar differ for", varname
print "max difference", (fvar-gvar).max()
print "min difference", (fvar-gvar).min()
f.close()
g.close()
return close

def execute(test_str, plotset, obstype, varid, season, imagefilename, imagethreshold, ncfiles, rtol, atol):
print test_str

# Silence annoying messages about how to set the NetCDF file type. Anything will do.
cdms2.setNetcdfShuffleFlag(0)
cdms2.setNetcdfDeflateFlag(0)
cdms2.setNetcdfDeflateLevelFlag(0)

#get commmand line args
p = argparse.ArgumentParser(description="Basic gm testing code for vcs")
p.add_argument("--datadir", dest="datadir", help="root directory for model and obs data")
p.add_argument("--baseline", dest="baseline", help="directory with baseline files for comparing results")
p.add_argument("--keep", dest="keep", help="Iff True, will keep computed png and nc files")
args = p.parse_args(sys.argv[1:])
datadir = args.datadir
baselinepath = args.baseline
keep = args.keep

#setup paths to data
modelpath = os.path.join( datadir, 'cam_output' )
obspath = os.path.join( datadir, 'obs_atmos' )
outpath = tempfile.mkdtemp() + "/"
print "outpath=", outpath

#setup string to be executed and run script
#diagstr = "diags --outputdir '%s' --model path=%s,climos=no --obs path=%s,filter=\"f_contains('NCEP')\",climos=yes --package AMWG --set 3 --var T --seasons JJA" % (outpath, modelpath, obspath)
diagstr_parts = [ " --outputdir %s "%(outpath), " --model path=%s,climos=no "%(modelpath), " --obs path=%s,filter=\"f_contains('%s')\",climos=yes "%(obspath, obstype),
" --package AMWG ", " --set %s "%(str(plotset)), " --var %s"%(varid), " --seasons %s"%(season)]
diagstr = "diags "
for part in diagstr_parts:
diagstr += part
print 'executing '
print diagstr

# nonstandard, suitable for testing:
proc = subprocess.Popen([diagstr], shell=True)
proc_status = proc.wait()
if proc_status!=0:
raise DiagError("diags run failed")

if keep:
print "save ", imagefilename, ncfiles.keys()
print "output directory is = ", outpath
else:
# Test of graphics (png) file match:
# This just looks at combined plot, aka summary plot, which is a compound of three plots.

imagefname = os.path.join( outpath, imagefilename )
imagebaselinefname = os.path.join( baselinepath, imagefilename )
graphics_result = checkimage.check_result_image( imagefname, imagebaselinefname, imagethreshold )
print "Graphics file", imagefname, "match difference:", graphics_result

# Test of NetCDF data (nc) file match:
CLOSE = True
for ncfilename, ncvars in ncfiles.items():
for var in ncvars:
#print ncfilename, var
try:
close = closeness( var, ncfilename, outpath, baselinepath, rtol, atol )
if not close:
print var, ' in ', ncfilename, ' is not close.'
CLOSE = CLOSE and close
except:
print 'comparison failed ', ncfilename, var

#cleanup the temp files
shutil.rmtree(outpath)
Empty file modified testing/metrics/diagtest02.py
100644 → 100755
Empty file.
98 changes: 17 additions & 81 deletions testing/metrics/diagtest03.py
100644 → 100755
Original file line number Diff line number Diff line change
@@ -1,93 +1,29 @@
#!/usr/bin/env python
"""" In this file the inputs for the test are defined and passed to diags_test.execute"""
import diags_test
from metrics.packages.amwg.amwg import amwg_plot_set3

# Compute a set of contour plots using diags (diags.py).
# First argument: --datadir=<data location> - with subdirectories cam_output and obs_atmos and baseline.
# These have sample model output, observation data, and "baseline" output which we should match.
# However, the graphical output (png files) may not match in manner suitable for automated testing.
# So the return value only depends on the numerical values in the .nc files.
# Second argument: '--keep=True' to keep (don't delete) output files*
# No attempt is made to clean up the diagnostics' cache files, which are generally in /tmp.
print amwg_plot_set3.name

print 'Test 3: Diagnostic multi-line (spaghetti) plots ... ',
test_str = 'Test 3: Diagnostic multi-line (spaghetti) plots ... \n'
#run this from command line to get the files required
example = "./diagtest03.py --datadir ~/uvcmetrics_test_data/ --baseline ~/uvcdat-testdata/baselines/metrics/ --keep True"

from metrics.common.utilities import *
from pprint import pprint
import sys, os, shutil, tempfile, subprocess
import cdms2, numpy
pth = os.path.join(os.path.dirname(__file__),"..")
sys.path.append(pth)
import checkimage
import argparse

# Silence annoying messages about how to set the NetCDF file type. Anything will do.
cdms2.setNetcdfShuffleFlag(0)
cdms2.setNetcdfDeflateFlag(0)
cdms2.setNetcdfDeflateLevelFlag(0)

p = argparse.ArgumentParser(description="Basic gm testing code for vcs")
p.add_argument("--datadir", dest="datadir", help="root directory for model and obs data")
p.add_argument("--baseline", dest="baseline", help="directory with baseline files for comparing results")
p.add_argument("--keep", dest="keep", help="Iff True, will keep computed png and nc files")
args = p.parse_args(sys.argv[1:])

def closeness( varname, filename, pathout, baselinepath, rtol, atol ):
fname = os.path.join( pathout, filename )
baselinefname = os.path.join( baselinepath, filename )
f = cdms2.open( fname )
g = cdms2.open( baselinefname )
fvar = f(varname)
gvar = g(varname)
close = numpy.ma.allclose( fvar, gvar, rtol=rtol, atol=atol )
if close:
print "fvar and gvar are close for", varname
else:
print "fvar and gvar differ for", varname
print "max difference", (fvar-gvar).max()
print "min difference", (fvar-gvar).min()
f.close()
g.close()
return close

datadir = args.datadir
path1 = os.path.join( datadir, 'cam_output' )
path2 = os.path.join( datadir, 'obs_atmos' )
baselinepath = args.baseline
pathout = tempfile.mkdtemp()
print "jfp pathout=",pathout

diagstr = "diags --outputdir '%s' --model path=%s,climos=no --obs path=%s,filter=\"f_contains('NCEP')\",climos=yes --package AMWG --set 3 --var TS --seasons JJA" % (pathout,path1,path2)
# nonstandard, suitable for testing:
proc = subprocess.Popen([diagstr],shell=True)
proc_status = proc.wait()
if proc_status!=0:
raise DiagError("diags run failed")
plotset = 3
obstype = 'NCEP'
varid = 'T'
season = 'JJA'

# Test of graphics (png) file match:
# This just looks at combined plot, aka summary plot, which is a compound of three plots.
filename = 'set3_Global_JJA_TS-combined.png'
fname = os.path.join( pathout, filename )
baselinefname = os.path.join( baselinepath, filename )
threshold = 1.0e6
graphics_result = checkimage.check_result_image( fname, baselinefname, threshold )
print "Graphics file",fname,"match difference:",graphics_result
imagefilename = 'set3_Global_JJA_T-combined.png'
imagethreshold = 1.0e6
ncfiles = {}
ncfiles['set3_T_JJA_None,None.nc'] = ['set3_T_ft0_None', 'set3_T_ft1_None']
ncfiles['set3_T_JJA_difference_None,None.nc'] = ['set3_T_ft0_None_ft1_None_diff']

# Test of NetCDF data (nc) file match:
rtol = 1.0e-3
atol = 1.0e-2 # suitable for temperatures
filename = 'set3_TS_JJA_None,None.nc'
varname1 = 'set3_TS_ft0_None'
varname2 = 'set3_TS_ft1_None'
close1 = closeness( varname1, filename, pathout, baselinepath, rtol, atol )
close2 = closeness( varname2, filename, pathout, baselinepath, rtol, atol )
filename = 'set3_TS_JJA_difference_None,None.nc'
varname = 'set3_TS_ft0_None_ft1_None_diff'
close12 = closeness( varname, filename, pathout, baselinepath, rtol, atol )
close = close1 and close2 and close12

if args.keep is True:
print "saving output in",pathout
else:
shutil.rmtree(pathout)

# The exit value depends on numerical values in the NetCDF file, not on the plot.
sys.exit( close )
diags_test.execute(test_str, plotset, obstype, varid, season, imagefilename, imagethreshold, ncfiles, rtol, atol)