From 6b17573f068da48a4084e3ff6e95a9796a5a1a03 Mon Sep 17 00:00:00 2001 From: Gautam Bisht Date: Wed, 2 May 2018 13:47:50 -0700 Subject: [PATCH 01/59] Fixes creation of coupler history filename Incorporates fix from billsacks/cime@4cc4f2b [BFB] --- src/drivers/mct/main/seq_hist_mod.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/drivers/mct/main/seq_hist_mod.F90 b/src/drivers/mct/main/seq_hist_mod.F90 index 259a3472947..b643fff8eb7 100644 --- a/src/drivers/mct/main/seq_hist_mod.F90 +++ b/src/drivers/mct/main/seq_hist_mod.F90 @@ -1142,7 +1142,7 @@ subroutine seq_hist_writeaux(infodata, EClock_d, comp, flow, aname, dname, & yy = yy + yr_offset end if call shr_cal_ymdtod2string(date_str, yy, mm, dd) - write(hist_file(found), "(a6)") & + write(hist_file(found), "(6a)") & trim(case_name),'.cpl.h',trim(aname),'.',trim(date_str), '.nc' else fk1 = 2 From 3853deea52ed72762f374a92cd6529c45c91e221 Mon Sep 17 00:00:00 2001 From: James Foucar Date: Wed, 25 Apr 2018 10:29:30 -0600 Subject: [PATCH 02/59] Replace dots in branch name with underscores So that the dot cannot go on to make an invalid case name. [BFB] --- scripts/Tools/jenkins_generic_job | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/scripts/Tools/jenkins_generic_job b/scripts/Tools/jenkins_generic_job index 2373c497769..35595046441 100755 --- a/scripts/Tools/jenkins_generic_job +++ b/scripts/Tools/jenkins_generic_job @@ -1,7 +1,7 @@ #!/usr/bin/env python """ -Jenkins runs this script to perform a test of an e3sm +Jenkins runs this script to perform a test of an e3sm test suite. Essentially, a wrapper around create_test and wait_for_tests that handles cleanup of old test results and ensures that the batch system is left in a clean state. @@ -41,6 +41,9 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter CIME.utils.setup_standard_logging_options(parser) + default_baseline = CIME.utils.get_current_branch(repo=CIME.utils.get_cime_root()) + default_baseline = default_baseline.replace(".", "_") # Can't have dots + parser.add_argument("-g", "--generate-baselines", action="store_true", help="Generate baselines") @@ -59,7 +62,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("-p", "--cdash-project", default=CIME.wait_for_tests.E3SM_MAIN_CDASH, help="The name of the CDash project where results should be uploaded") - parser.add_argument("-b", "--baseline-name", default=CIME.utils.get_current_branch(repo=CIME.utils.get_cime_root()), + parser.add_argument("-b", "--baseline-name", default=default_baseline, help="Baseline name for baselines to use. Also impacts dashboard job name. Useful for testing a branch other than next or master") parser.add_argument("-O", "--override-baseline-name", From 3cefbb535431e04d726bbc6ca38c0a0249c3ff5c Mon Sep 17 00:00:00 2001 From: James Foucar Date: Wed, 25 Apr 2018 11:03:48 -0600 Subject: [PATCH 03/59] Take out slashes too --- scripts/Tools/jenkins_generic_job | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/Tools/jenkins_generic_job b/scripts/Tools/jenkins_generic_job index 35595046441..88e3299c855 100755 --- a/scripts/Tools/jenkins_generic_job +++ b/scripts/Tools/jenkins_generic_job @@ -42,7 +42,7 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter CIME.utils.setup_standard_logging_options(parser) default_baseline = CIME.utils.get_current_branch(repo=CIME.utils.get_cime_root()) - default_baseline = default_baseline.replace(".", "_") # Can't have dots + default_baseline = default_baseline.replace(".", "_").replace("/", "_") # Dots or slashes will mess things up parser.add_argument("-g", "--generate-baselines", action="store_true", help="Generate baselines") From f5dc00f8ac7c18909205aba6300dc63a49f57798 Mon Sep 17 00:00:00 2001 From: James Foucar Date: Thu, 31 May 2018 14:03:50 -0600 Subject: [PATCH 04/59] Ignore unicode chars from command output [BFB] --- scripts/lib/CIME/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/lib/CIME/utils.py b/scripts/lib/CIME/utils.py index 300cdf93a3d..d2d05ba2261 100644 --- a/scripts/lib/CIME/utils.py +++ b/scripts/lib/CIME/utils.py @@ -378,12 +378,12 @@ def run_cmd(cmd, input_str=None, from_dir=None, verbose=None, output, errput = proc.communicate(input_str) if output is not None: try: - output = output.decode('utf-8').strip() + output = output.decode('utf-8', errors='ignore').strip() except AttributeError: pass if errput is not None: try: - errput = errput.decode('utf-8').strip() + errput = errput.decode('utf-8', errors='ignore').strip() except AttributeError: pass From 858887a5fa981a10990e750a6aaa7f0d881cc3b3 Mon Sep 17 00:00:00 2001 From: noel Date: Thu, 31 May 2018 14:56:14 -0700 Subject: [PATCH 05/59] Update module versions for edison after maintenance. craype, and hdf5/netcdf related modules --- config/e3sm/machines/config_machines.xml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/config/e3sm/machines/config_machines.xml b/config/e3sm/machines/config_machines.xml index 781aba0aae0..2741fad77ec 100644 --- a/config/e3sm/machines/config_machines.xml +++ b/config/e3sm/machines/config_machines.xml @@ -125,7 +125,7 @@ craype - craype/2.5.12.3 + craype/2.5.12 craype-ivybridge pmi pmi/5.0.12 @@ -160,23 +160,23 @@ PrgEnv-intel PrgEnv-gnu/6.0.4 gcc - gcc/7.2.0 + gcc/7.3.0 cray-libsci - cray-libsci/17.12.1 + cray-libsci/18.03.1 cray-netcdf-hdf5parallel cray-hdf5-parallel cray-parallel-netcdf - cray-hdf5/1.8.16 - cray-netcdf/4.4.0 + cray-hdf5/1.10.1.1 + cray-netcdf/4.4.1.1.3 cray-netcdf-hdf5parallel - cray-netcdf-hdf5parallel/4.4.0 - cray-hdf5-parallel/1.8.16 - cray-parallel-netcdf/1.6.1 + cray-netcdf-hdf5parallel/4.4.1.1.3 + cray-hdf5-parallel/1.10.1.1 + cray-parallel-netcdf/1.8.1.3 From 614a91a49b94c4a5628ad95850471d3b7b89fb28 Mon Sep 17 00:00:00 2001 From: Robert Jacob Date: Sun, 17 Jun 2018 22:41:33 -0700 Subject: [PATCH 06/59] Add evn setting to edison to disable HD5 check Set environment variable HDF5_DISABLE_VERSION_CHECK=2 for edison --- config/e3sm/machines/config_machines.xml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/config/e3sm/machines/config_machines.xml b/config/e3sm/machines/config_machines.xml index 2741fad77ec..912a4fb6536 100644 --- a/config/e3sm/machines/config_machines.xml +++ b/config/e3sm/machines/config_machines.xml @@ -188,6 +188,8 @@ 64M spread threads + + 2 yes From be0aae3f7ea8aa7798795af2b7afb749852fe540 Mon Sep 17 00:00:00 2001 From: Azamat Mametjanov Date: Thu, 21 Jun 2018 15:59:13 -0500 Subject: [PATCH 07/59] Vectorize loops in coupler attr-vector multiplications --- src/drivers/mct/main/seq_map_mod.F90 | 54 +++++++++++++-------------- src/share/util/mct_mod.F90 | 56 ++++++---------------------- src/share/util/shr_const_mod.F90 | 3 +- 3 files changed, 41 insertions(+), 72 deletions(-) diff --git a/src/drivers/mct/main/seq_map_mod.F90 b/src/drivers/mct/main/seq_map_mod.F90 index bd888969f65..5fa449950e2 100644 --- a/src/drivers/mct/main/seq_map_mod.F90 +++ b/src/drivers/mct/main/seq_map_mod.F90 @@ -812,18 +812,16 @@ subroutine seq_map_avNormArr(mapper, av_i, av_o, norm_i, rList, norm) ! ! Local variables ! - type(mct_sMatp) :: sMatp ! sMat type(mct_aVect) :: avp_i , avp_o integer(IN) :: j,kf integer(IN) :: lsize_i,lsize_o real(r8) :: normval - character(CX) :: lrList + character(CX) :: lrList,appnd logical :: lnorm character(*),parameter :: subName = '(seq_map_avNormArr) ' character(len=*),parameter :: ffld = 'norm8wt' ! want something unique !----------------------------------------------------- - sMatp = mapper%sMatp lsize_i = mct_aVect_lsize(av_i) lsize_o = mct_aVect_lsize(av_o) @@ -832,28 +830,26 @@ subroutine seq_map_avNormArr(mapper, av_i, av_o, norm_i, rList, norm) lnorm = norm endif - if (present(norm_i) .and..not.lnorm) then - write(logunit,*) subname,' ERROR: norm_i and norm = false' - call shr_sys_abort(subname//' ERROR norm_i and norm = false') - endif - if (present(norm_i)) then - if (size(norm_i) /= lsize_i) then - write(logunit,*) subname,' ERROR: size(norm_i) ne lsize_i ',size(norm_i),lsize_i - call shr_sys_abort(subname//' ERROR size(norm_i) ne lsize_i') - endif + if (.not.lnorm) call shr_sys_abort(subname//' ERROR norm_i and norm = false') + if (size(norm_i) /= lsize_i) call shr_sys_abort(subname//' ERROR size(norm_i) ne lsize_i') endif !--- create temporary avs for mapping --- + if (lnorm .or. present(norm_i)) then + appnd = ':'//ffld + else + appnd = '' + endif if (present(rList)) then - call mct_aVect_init(avp_i, rList=trim( rList)//':'//ffld, lsize=lsize_i) - call mct_aVect_init(avp_o, rList=trim( rList)//':'//ffld, lsize=lsize_o) + call mct_aVect_init(avp_i, rList=trim( rList)//trim(appnd), lsize=lsize_i) + call mct_aVect_init(avp_o, rList=trim( rList)//trim(appnd), lsize=lsize_o) else - lrList = trim(mct_aVect_exportRList2c(av_i)) - call mct_aVect_init(avp_i, rList=trim(lrList)//':'//ffld, lsize=lsize_i) - lrList = trim(mct_aVect_exportRList2c(av_o)) - call mct_aVect_init(avp_o, rList=trim(lrList)//':'//ffld, lsize=lsize_o) + lrList = mct_aVect_exportRList2c(av_i) + call mct_aVect_init(avp_i, rList=trim(lrList)//trim(appnd), lsize=lsize_i) + lrList = mct_aVect_exportRList2c(av_o) + call mct_aVect_init(avp_o, rList=trim(lrList)//trim(appnd), lsize=lsize_o) endif !--- copy av_i to avp_i and set ffld value to 1.0 @@ -861,15 +857,18 @@ subroutine seq_map_avNormArr(mapper, av_i, av_o, norm_i, rList, norm) !--- this will do the right thing for the norm_i normalization call mct_aVect_copy(aVin=av_i, aVout=avp_i, VECTOR=mct_usevector) - kf = mct_aVect_indexRA(avp_i,ffld) - do j = 1,lsize_i - avp_i%rAttr(kf,j) = 1.0_r8 - enddo - - if (present(norm_i)) then + if (lnorm .or. present(norm_i)) then + kf = mct_aVect_indexRA(avp_i,ffld) do j = 1,lsize_i - avp_i%rAttr(:,j) = avp_i%rAttr(:,j)*norm_i(j) + avp_i%rAttr(kf,j) = 1.0_r8 enddo + + if (present(norm_i)) then + !$omp simd + do j = 1,lsize_i + avp_i%rAttr(:,j) = avp_i%rAttr(:,j)*norm_i(j) + enddo + endif endif !--- map --- @@ -878,14 +877,15 @@ subroutine seq_map_avNormArr(mapper, av_i, av_o, norm_i, rList, norm) call shr_sys_abort(subname//' ERROR: esmf SMM not supported') else ! MCT based SMM - call mct_sMat_avMult(avp_i, sMatp, avp_o, VECTOR=mct_usevector) + call mct_sMat_avMult(avp_i, mapper%sMatp, avp_o, VECTOR=mct_usevector) endif !--- renormalize avp_o by mapped norm_i --- if (lnorm) then + kf = mct_aVect_indexRA(avp_o,ffld) + !$omp simd do j = 1,lsize_o - kf = mct_aVect_indexRA(avp_o,ffld) normval = avp_o%rAttr(kf,j) if (normval /= 0.0_r8) then normval = 1.0_r8/normval diff --git a/src/share/util/mct_mod.F90 b/src/share/util/mct_mod.F90 index 1eeb0427296..7ef9e2729fa 100644 --- a/src/share/util/mct_mod.F90 +++ b/src/share/util/mct_mod.F90 @@ -1030,34 +1030,18 @@ subroutine mct_avect_vecmult(av,vec,avlist,mask_spval) if (lmspval) then -#ifdef CPP_VECTOR - do m=1,nfld -!CDIR SELECT(VECTOR) -!DIR$ CONCURRENT + !$omp simd do n=1,npts -#else - do n=1,npts - do m=1,nfld -#endif - if (.not. shr_const_isspval(av%rAttr(kfldin(m),n))) then - av%rAttr(kfldin(m),n) = av%rAttr(kfldin(m),n)*vec(n) - endif - enddo + where (.not. shr_const_isspval(av%rAttr(kfldin(:),n))) + av%rAttr(kfldin(:),n) = av%rAttr(kfldin(:),n)*vec(n) + endwhere enddo else ! lmspval -#ifdef CPP_VECTOR - do m=1,nfld -!CDIR SELECT(VECTOR) -!DIR$ CONCURRENT + !$omp simd do n=1,npts -#else - do n=1,npts - do m=1,nfld -#endif - av%rAttr(kfldin(m),n) = av%rAttr(kfldin(m),n)*vec(n) - enddo + av%rAttr(kfldin(:),n) = av%rAttr(kfldin(:),n)*vec(n) enddo endif ! lmspval @@ -1070,34 +1054,18 @@ subroutine mct_avect_vecmult(av,vec,avlist,mask_spval) if (lmspval) then -#ifdef CPP_VECTOR - do m=1,nfld -!CDIR SELECT(VECTOR) -!DIR$ CONCURRENT + !$omp simd do n=1,npts -#else - do n=1,npts - do m=1,nfld -#endif - if (.not. shr_const_isspval(av%rAttr(m,n))) then - av%rAttr(m,n) = av%rAttr(m,n)*vec(n) - endif - enddo + where (.not. shr_const_isspval(av%rAttr(:,n))) + av%rAttr(:,n) = av%rAttr(:,n)*vec(n) + endwhere enddo else ! lmspval -#ifdef CPP_VECTOR - do m=1,nfld -!CDIR SELECT(VECTOR) -!DIR$ CONCURRENT + !$omp simd do n=1,npts -#else - do n=1,npts - do m=1,nfld -#endif - av%rAttr(m,n) = av%rAttr(m,n)*vec(n) - enddo + av%rAttr(:,n) = av%rAttr(:,n)*vec(n) enddo endif ! lmspval diff --git a/src/share/util/shr_const_mod.F90 b/src/share/util/shr_const_mod.F90 index da781d3dfc4..dd57395eb4c 100644 --- a/src/share/util/shr_const_mod.F90 +++ b/src/share/util/shr_const_mod.F90 @@ -76,7 +76,8 @@ MODULE shr_const_mod !----------------------------------------------------------------------------- - logical function shr_const_isspval(rval) + elemental logical function shr_const_isspval(rval) +!$omp declare simd(shr_const_isspval) real(r8), intent(in) :: rval From 62857bb0063b9ce8741cbb0c860ba0ee022683d8 Mon Sep 17 00:00:00 2001 From: Patrick Worley Date: Fri, 22 Jun 2018 21:16:39 -0400 Subject: [PATCH 08/59] use shr_reprosum_calc in seq_diag_avect_mct When BFBFLAG is set to true and when INFO_DBUG > 1, the routine seq_diag_avect_mct uses a reproducible sum algorithm that is not as accurate as the algorihm implemented in shr_reprosum_calc. In particular, when summing a vector of INFs, the current algorithm returns zero. Here we replace the existing algorithm with a call to shr_reprosum_calc. This change is BFB for standard usage (INFO_DBUG == 1). It is not BFB with respect to the associated diagnostic, written to cpl.log, when INFO_DBUG > 1. However, these diagnostics are not used in the simulation, and simulation results will BFB. [BFB] --- src/drivers/mct/main/seq_diag_mct.F90 | 55 ++++++++------------------- 1 file changed, 16 insertions(+), 39 deletions(-) diff --git a/src/drivers/mct/main/seq_diag_mct.F90 b/src/drivers/mct/main/seq_diag_mct.F90 index efe851462c1..c6ca58e44f4 100644 --- a/src/drivers/mct/main/seq_diag_mct.F90 +++ b/src/drivers/mct/main/seq_diag_mct.F90 @@ -45,6 +45,7 @@ module seq_diag_mct use component_type_mod, only : COMPONENT_GET_DOM_CX, COMPONENT_GET_C2X_CX, & COMPONENT_GET_X2C_CX, COMPONENT_TYPE use seq_infodata_mod, only : seq_infodata_type, seq_infodata_getdata + use shr_reprosum_mod, only: shr_reprosum_calc implicit none save @@ -2237,7 +2238,9 @@ SUBROUTINE seq_diag_avect_mct(infodata, id, av, dom, gsmap, comment) integer(in) :: iam ! pe number integer(in) :: km,ka ! field indices integer(in) :: ns ! size of local AV + integer(in) :: rcode ! allocate return code real(r8), pointer :: weight(:) ! weight + real(r8), allocatable :: weighted_data(:,:) ! weighted data type(mct_string) :: mstring ! mct char type character(CL) :: lcomment ! should be long enough character(CL) :: itemc ! string converted to char @@ -2274,7 +2277,9 @@ SUBROUTINE seq_diag_avect_mct(infodata, id, av, dom, gsmap, comment) if (bfbflag) then npts = mct_aVect_lsize(AV) - allocate(weight(npts)) + allocate(weight(npts),stat=rcode) + if (rcode /= 0) call shr_sys_abort(trim(subname)//' allocate weight') + weight(:) = 1.0_r8 do n = 1,npts if (dom%data%rAttr(km,n) <= 1.0e-06_R8) then @@ -2284,57 +2289,29 @@ SUBROUTINE seq_diag_avect_mct(infodata, id, av, dom, gsmap, comment) endif enddo - allocate(maxbuf(kflds),maxbufg(kflds)) - maxbuf = 0.0_r8 + allocate(weighted_data(npts,kflds),stat=rcode) + if (rcode /= 0) call shr_sys_abort(trim(subname)//' allocate weighted_data') + weighted_data = 0.0_r8 do n = 1,npts do k = 1,kflds if (.not. shr_const_isspval(AV%rAttr(k,n))) then - maxbuf(k) = max(maxbuf(k),abs(AV%rAttr(k,n)*weight(n))) + weighted_data(n,k) = AV%rAttr(k,n)*weight(n) endif enddo enddo - call shr_mpi_max(maxbuf,maxbufg,mpicom,subname,all=.true.) - call shr_mpi_sum(npts,nptsg,mpicom,subname,all=.true.) - - do k = 1,kflds - if (maxbufg(k) < 1000.0*TINY(maxbufg(k)) .or. & - maxbufg(k) > HUGE(maxbufg(k))/(2.0_r8*nptsg)) then - maxbufg(k) = 0.0_r8 - else - maxbufg(k) = (1.1_r8) * maxbufg(k) * nptsg - endif - enddo + call shr_reprosum_calc (weighted_data, sumbufg, npts, npts, kflds, & + commid=mpicom) - allocate(isumbuf(kflds),isumbufg(kflds)) - isumbuf = 0 - ihuge = HUGE(isumbuf) - - do n = 1,npts - do k = 1,kflds - if (.not. shr_const_isspval(AV%rAttr(k,n))) then - if (abs(maxbufg(k)) > 1000.0_r8 * TINY(maxbufg)) then - isumbuf(k) = isumbuf(k) + int((AV%rAttr(k,n)*weight(n)/maxbufg(k))*ihuge,i8) - endif - endif - enddo - enddo - - call shr_mpi_sum(isumbuf,isumbufg,mpicom,subname) - - do k = 1,kflds - sumbufg(k) = isumbufg(k)*maxbufg(k)/ihuge - enddo - - deallocate(weight) - deallocate(maxbuf,maxbufg) - deallocate(isumbuf,isumbufg) + deallocate(weight, weighted_data) else npts = mct_aVect_lsize(AV) - allocate(weight(npts)) + allocate(weight(npts),stat=rcode) + if (rcode /= 0) call shr_sys_abort(trim(subname)//' allocate weight') + weight(:) = 1.0_r8 do n = 1,npts if (dom%data%rAttr(km,n) <= 1.0e-06_R8) then From e920874e1909905ac98035e0e3ce909d021e7954 Mon Sep 17 00:00:00 2001 From: Patrick Worley Date: Fri, 22 Jun 2018 22:47:45 -0400 Subject: [PATCH 09/59] Add option to allow INF or NaN summands in shr_reprosum_calc shr_reprosum_calc aborts if input summands include INF or NaN values. For debugging purposes, it can be useful to allow INF or NaN values, returning the IEEE standard results for such a situation (either NaN, positive INF, or negative INF, depending on the situation). An optional logical parameter, allow_infnan, is being added to the shr_reprosum_calc. When set to .true. the routine determines whether summands for an existing field contain NaN or INF values and returns the appropriate value without going through the reproducible sum algorithm (which is very slow and requires signficant memory when summing these special values). Other fields in a multiple field call to shr_reprosum_calc will be computed in the usual fashion. When allow_infnan == .false. or when the parameter is omitted, then the routine aborts with an informative error message when the input contain INF or NaN values, as is done currently. The default can be changed (from allow_infnan=.false. to allow_infnan=.true.) via a new optional parameter, repro_sum_allow_infnan_in, in shr_reprosum_setopts. A new drv_in namelist parameter, reprosum_allow_infnan, has also been added that will be passed to shr_reprosum_setopts to set the default. This can be set in user_nl_cpl. Since the default is not being changed, this change is BFB. If allow_infnan is set to .true., then runs that failed because of INFs or NaNs would now continue to run (longer), but jobs that did not fail with the original default will be BFB even with the default changed. [BFB] --- .../cime_config/namelist_definition_drv.xml | 13 + src/drivers/mct/main/cime_comp_mod.F90 | 3 + src/drivers/mct/shr/seq_infodata_mod.F90 | 19 +- src/share/util/shr_reprosum_mod.F90 | 243 +++++++++++++----- 4 files changed, 204 insertions(+), 74 deletions(-) diff --git a/src/drivers/mct/cime_config/namelist_definition_drv.xml b/src/drivers/mct/cime_config/namelist_definition_drv.xml index 600b56cbac7..ebc5596bf86 100644 --- a/src/drivers/mct/cime_config/namelist_definition_drv.xml +++ b/src/drivers/mct/cime_config/namelist_definition_drv.xml @@ -1470,6 +1470,19 @@ + + logical + reprosum + seq_infodata_inparm + + Allow INF and NaN in summands + default: .false. + + + .false. + + + real reprosum diff --git a/src/drivers/mct/main/cime_comp_mod.F90 b/src/drivers/mct/main/cime_comp_mod.F90 index 25dbe028360..de2439aa137 100644 --- a/src/drivers/mct/main/cime_comp_mod.F90 +++ b/src/drivers/mct/main/cime_comp_mod.F90 @@ -417,6 +417,7 @@ module cime_comp_mod logical :: shr_map_dopole ! logical for dopole in shr_map_mod logical :: domain_check ! .true. => check consistency of domains logical :: reprosum_use_ddpdd ! setup reprosum, use ddpdd + logical :: reprosum_allow_infnan ! setup reprosum, allow INF and NaN in summands real(r8) :: reprosum_diffmax ! setup reprosum, set rel_diff_max logical :: reprosum_recompute ! setup reprosum, recompute if tolerance exceeded @@ -935,6 +936,7 @@ subroutine cime_pre_init2() wall_time_limit=wall_time_limit , & force_stop_at=force_stop_at , & reprosum_use_ddpdd=reprosum_use_ddpdd , & + reprosum_allow_infnan=reprosum_allow_infnan, & reprosum_diffmax=reprosum_diffmax , & reprosum_recompute=reprosum_recompute, & max_cplstep_time=max_cplstep_time) @@ -946,6 +948,7 @@ subroutine cime_pre_init2() call shr_reprosum_setopts(& repro_sum_use_ddpdd_in = reprosum_use_ddpdd, & + repro_sum_allow_infnan_in = reprosum_allow_infnan, & repro_sum_rel_diff_max_in = reprosum_diffmax, & repro_sum_recompute_in = reprosum_recompute) diff --git a/src/drivers/mct/shr/seq_infodata_mod.F90 b/src/drivers/mct/shr/seq_infodata_mod.F90 index 82a984c77dd..47ec473c25b 100644 --- a/src/drivers/mct/shr/seq_infodata_mod.F90 +++ b/src/drivers/mct/shr/seq_infodata_mod.F90 @@ -176,6 +176,7 @@ MODULE seq_infodata_mod logical :: mct_usevector ! flag for mct vector logical :: reprosum_use_ddpdd ! use ddpdd algorithm + logical :: reprosum_allow_infnan ! allow INF and NaN summands real(SHR_KIND_R8) :: reprosum_diffmax ! maximum difference tolerance logical :: reprosum_recompute ! recompute reprosum with nonscalable algorithm ! if reprosum_diffmax is exceeded @@ -412,6 +413,7 @@ SUBROUTINE seq_infodata_Init( infodata, nmlfile, ID, pioid, cpl_tag) real(SHR_KIND_R8) :: eps_ogrid ! ocn grid error tolerance real(SHR_KIND_R8) :: eps_oarea ! ocn area error tolerance logical :: reprosum_use_ddpdd ! use ddpdd algorithm + logical :: reprosum_allow_infnan ! allow INF and NaN summands real(SHR_KIND_R8) :: reprosum_diffmax ! maximum difference tolerance logical :: reprosum_recompute ! recompute reprosum with nonscalable algorithm ! if reprosum_diffmax is exceeded @@ -452,7 +454,8 @@ SUBROUTINE seq_infodata_Init( infodata, nmlfile, ID, pioid, cpl_tag) eps_frac, eps_amask, & eps_agrid, eps_aarea, eps_omask, eps_ogrid, & eps_oarea, esmf_map_flag, & - reprosum_use_ddpdd, reprosum_diffmax, reprosum_recompute, & + reprosum_use_ddpdd, reprosum_allow_infnan, & + reprosum_diffmax, reprosum_recompute, & mct_usealltoall, mct_usevector, max_cplstep_time, model_doi_url !------------------------------------------------------------------------------- @@ -560,6 +563,7 @@ SUBROUTINE seq_infodata_Init( infodata, nmlfile, ID, pioid, cpl_tag) eps_ogrid = 1.0e-02_SHR_KIND_R8 eps_oarea = 1.0e-01_SHR_KIND_R8 reprosum_use_ddpdd = .false. + reprosum_allow_infnan = .false. reprosum_diffmax = -1.0e-8 reprosum_recompute = .false. mct_usealltoall = .false. @@ -685,6 +689,7 @@ SUBROUTINE seq_infodata_Init( infodata, nmlfile, ID, pioid, cpl_tag) infodata%eps_ogrid = eps_ogrid infodata%eps_oarea = eps_oarea infodata%reprosum_use_ddpdd = reprosum_use_ddpdd + infodata%reprosum_allow_infnan = reprosum_allow_infnan infodata%reprosum_diffmax = reprosum_diffmax infodata%reprosum_recompute = reprosum_recompute infodata%mct_usealltoall = mct_usealltoall @@ -977,7 +982,8 @@ SUBROUTINE seq_infodata_GetData_explicit( infodata, cime_model, case_name, case_ lnd_nx, lnd_ny, rof_nx, rof_ny, ice_nx, ice_ny, ocn_nx, ocn_ny, & glc_nx, glc_ny, eps_frac, eps_amask, & eps_agrid, eps_aarea, eps_omask, eps_ogrid, eps_oarea, & - reprosum_use_ddpdd, reprosum_diffmax, reprosum_recompute, & + reprosum_use_ddpdd, reprosum_allow_infnan, & + reprosum_diffmax, reprosum_recompute, & atm_resume, lnd_resume, ocn_resume, ice_resume, & glc_resume, rof_resume, wav_resume, cpl_resume, & mct_usealltoall, mct_usevector, max_cplstep_time, model_doi_url, & @@ -1085,6 +1091,7 @@ SUBROUTINE seq_infodata_GetData_explicit( infodata, cime_model, case_name, case_ real(SHR_KIND_R8), optional, intent(OUT) :: eps_ogrid ! ocn grid error tolerance real(SHR_KIND_R8), optional, intent(OUT) :: eps_oarea ! ocn area error tolerance logical, optional, intent(OUT) :: reprosum_use_ddpdd ! use ddpdd algorithm + logical, optional, intent(OUT) :: reprosum_allow_infnan ! allow INF and NaN summands real(SHR_KIND_R8), optional, intent(OUT) :: reprosum_diffmax ! maximum difference tolerance logical, optional, intent(OUT) :: reprosum_recompute ! recompute if tolerance exceeded logical, optional, intent(OUT) :: mct_usealltoall ! flag for mct alltoall @@ -1261,6 +1268,7 @@ SUBROUTINE seq_infodata_GetData_explicit( infodata, cime_model, case_name, case_ if ( present(eps_ogrid) ) eps_ogrid = infodata%eps_ogrid if ( present(eps_oarea) ) eps_oarea = infodata%eps_oarea if ( present(reprosum_use_ddpdd)) reprosum_use_ddpdd = infodata%reprosum_use_ddpdd + if ( present(reprosum_allow_infnan)) reprosum_allow_infnan = infodata%reprosum_allow_infnan if ( present(reprosum_diffmax) ) reprosum_diffmax = infodata%reprosum_diffmax if ( present(reprosum_recompute)) reprosum_recompute = infodata%reprosum_recompute if ( present(mct_usealltoall)) mct_usealltoall = infodata%mct_usealltoall @@ -1555,7 +1563,8 @@ SUBROUTINE seq_infodata_PutData_explicit( infodata, cime_model, case_name, case_ lnd_nx, lnd_ny, rof_nx, rof_ny, ice_nx, ice_ny, ocn_nx, ocn_ny, & glc_nx, glc_ny, eps_frac, eps_amask, & eps_agrid, eps_aarea, eps_omask, eps_ogrid, eps_oarea, & - reprosum_use_ddpdd, reprosum_diffmax, reprosum_recompute, & + reprosum_use_ddpdd, reprosum_allow_infnan, & + reprosum_diffmax, reprosum_recompute, & atm_resume, lnd_resume, ocn_resume, ice_resume, & glc_resume, rof_resume, wav_resume, cpl_resume, & mct_usealltoall, mct_usevector, glc_valid_input) @@ -1661,6 +1670,7 @@ SUBROUTINE seq_infodata_PutData_explicit( infodata, cime_model, case_name, case_ real(SHR_KIND_R8), optional, intent(IN) :: eps_ogrid ! ocn grid error tolerance real(SHR_KIND_R8), optional, intent(IN) :: eps_oarea ! ocn area error tolerance logical, optional, intent(IN) :: reprosum_use_ddpdd ! use ddpdd algorithm + logical, optional, intent(IN) :: reprosum_allow_infnan ! allow INF and NaN summands real(SHR_KIND_R8), optional, intent(IN) :: reprosum_diffmax ! maximum difference tolerance logical, optional, intent(IN) :: reprosum_recompute ! recompute if tolerance exceeded logical, optional, intent(IN) :: mct_usealltoall ! flag for mct alltoall @@ -1835,6 +1845,7 @@ SUBROUTINE seq_infodata_PutData_explicit( infodata, cime_model, case_name, case_ if ( present(eps_ogrid) ) infodata%eps_ogrid = eps_ogrid if ( present(eps_oarea) ) infodata%eps_oarea = eps_oarea if ( present(reprosum_use_ddpdd)) infodata%reprosum_use_ddpdd = reprosum_use_ddpdd + if ( present(reprosum_allow_infnan)) infodata%reprosum_allow_infnan = reprosum_allow_infnan if ( present(reprosum_diffmax) ) infodata%reprosum_diffmax = reprosum_diffmax if ( present(reprosum_recompute)) infodata%reprosum_recompute = reprosum_recompute if ( present(mct_usealltoall)) infodata%mct_usealltoall = mct_usealltoall @@ -2257,6 +2268,7 @@ subroutine seq_infodata_bcast(infodata,mpicom) call shr_mpi_bcast(infodata%eps_ogrid, mpicom) call shr_mpi_bcast(infodata%eps_oarea, mpicom) call shr_mpi_bcast(infodata%reprosum_use_ddpdd, mpicom) + call shr_mpi_bcast(infodata%reprosum_allow_infnan, mpicom) call shr_mpi_bcast(infodata%reprosum_diffmax, mpicom) call shr_mpi_bcast(infodata%reprosum_recompute, mpicom) call shr_mpi_bcast(infodata%mct_usealltoall, mpicom) @@ -2931,6 +2943,7 @@ SUBROUTINE seq_infodata_print( infodata ) write(logunit,F0R) subname,'eps_oarea = ', infodata%eps_oarea write(logunit,F0L) subname,'reprosum_use_ddpdd = ', infodata%reprosum_use_ddpdd + write(logunit,F0L) subname,'reprosum_allow_infnan = ', infodata%reprosum_allow_infnan write(logunit,F0R) subname,'reprosum_diffmax = ', infodata%reprosum_diffmax write(logunit,F0L) subname,'reprosum_recompute = ', infodata%reprosum_recompute diff --git a/src/share/util/shr_reprosum_mod.F90 b/src/share/util/shr_reprosum_mod.F90 index 9acfa54813c..a8ef29c1b15 100644 --- a/src/share/util/shr_reprosum_mod.F90 +++ b/src/share/util/shr_reprosum_mod.F90 @@ -38,7 +38,11 @@ module shr_reprosum_mod use shr_log_mod, only: s_loglev => shr_log_Level use shr_log_mod, only: s_logunit => shr_log_Unit use shr_sys_mod, only: shr_sys_abort - use shr_infnan_mod,only: shr_infnan_isnan, shr_infnan_isinf + use shr_infnan_mod,only: shr_infnan_inf_type, assignment(=), & + shr_infnan_posinf, shr_infnan_neginf, & + shr_infnan_nan, & + shr_infnan_isnan, shr_infnan_isinf, & + shr_infnan_isposinf, shr_infnan_isneginf use perf_mod !----------------------------------------------------------------------- @@ -86,12 +90,15 @@ module shr_reprosum_mod !---------------------------------------------------------------------------- logical :: repro_sum_use_ddpdd = .false. + logical :: repro_sum_allow_infnan = .false. + CONTAINS ! !======================================================================== ! subroutine shr_reprosum_setopts(repro_sum_use_ddpdd_in, & + repro_sum_allow_infnan_in, & repro_sum_rel_diff_max_in, & repro_sum_recompute_in, & repro_sum_master, & @@ -104,6 +111,8 @@ subroutine shr_reprosum_setopts(repro_sum_use_ddpdd_in, & !------------------------------Arguments-------------------------------- ! Use DDPDD algorithm instead of fixed precision algorithm logical, intent(in), optional :: repro_sum_use_ddpdd_in + ! Allow INF or NaN in summands + logical, intent(in), optional :: repro_sum_allow_infnan_in ! maximum permissible difference between reproducible and ! nonreproducible sums real(r8), intent(in), optional :: repro_sum_rel_diff_max_in @@ -142,6 +151,9 @@ subroutine shr_reprosum_setopts(repro_sum_use_ddpdd_in, & if ( present(repro_sum_use_ddpdd_in) ) then repro_sum_use_ddpdd = repro_sum_use_ddpdd_in endif + if ( present(repro_sum_allow_infnan_in) ) then + repro_sum_allow_infnan = repro_sum_allow_infnan_in + endif if ( present(repro_sum_rel_diff_max_in) ) then shr_reprosum_reldiffmax = repro_sum_rel_diff_max_in endif @@ -159,6 +171,14 @@ subroutine shr_reprosum_setopts(repro_sum_use_ddpdd_in, & 'distributed sum algorithm' endif + if ( repro_sum_allow_infnan ) then + write(logunit,*) 'SHR_REPROSUM_SETOPTS: ',& + 'Will calculate sum when INF or NaN are included in summands' + else + write(logunit,*) 'SHR_REPROSUM_SETOPTS: ',& + 'Will abort if INF or NaN are included in summands' + endif + if (shr_reprosum_reldiffmax >= 0._r8) then write(logunit,*) ' ',& 'with a maximum relative error tolerance of ', & @@ -185,7 +205,7 @@ end subroutine shr_reprosum_setopts ! subroutine shr_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & - nflds, ddpdd_sum, & + nflds, allow_infnan, ddpdd_sum, & arr_gbl_max, arr_gbl_max_out, & arr_max_levels, arr_max_levels_out, & gbl_max_nsummands, gbl_max_nsummands_out,& @@ -280,6 +300,10 @@ subroutine shr_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & ! use ddpdd algorithm instead ! of fixed precision algorithm + logical, intent(in), optional :: allow_infnan + ! if .true., allow INF or NaN input values. + ! if .false. (the default), then abort. + real(r8), intent(in), optional :: arr_gbl_max(nflds) ! upper bound on max(abs(arr)) @@ -312,13 +336,14 @@ subroutine shr_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & ! flag enabling/disabling testing that gmax and max_levels are ! accurate/sufficient. Default is enabled. - integer, intent(inout), optional :: repro_sum_stats(5) + integer, intent(inout), optional :: repro_sum_stats(6) ! increment running totals for ! (1) one-reduction repro_sum ! (2) two-reduction repro_sum ! (3) both types in one call ! (4) nonrepro_sum ! (5) global max nsummands reduction + ! (6) global lor 3*nflds reduction real(r8), intent(out), optional :: rel_diff(2,nflds) ! relative and absolute @@ -331,6 +356,8 @@ subroutine shr_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & ! ! Local workspace ! + logical :: abort_inf_nan ! flag indicating whether to + ! abort if INF or NaN found in input logical :: use_ddpdd_sum ! flag indicating whether to ! use shr_reprosum_ddpdd or not logical :: recompute ! flag indicating need to @@ -341,8 +368,23 @@ subroutine shr_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & ! are accurate/sufficient logical :: nan_check, inf_check ! flag on whether there are ! NaNs and INFs in input array + logical :: inf_nan_lchecks(3,nflds)! flags on whether there are + ! NaNs, positive INFs, or negative INFs + ! for each input field locally + logical :: inf_nan_gchecks(3,nflds)! flags on whether there are + ! NaNs, positive INFs, or negative INFs + ! for each input field + logical :: arr_gsum_infnan(nflds) ! flag on whether field sum is a + ! NaN or INF + + integer :: gbl_lor_red ! global lor reduction? (0/1) + integer :: gbl_max_red ! global max reduction? (0/1) + integer :: repro_sum_fast ! 1 reduction repro_sum? (0/1) + integer :: repro_sum_slow ! 2 reduction repro_sum? (0/1) + integer :: repro_sum_both ! both fast and slow? (0/1) + integer :: nonrepro_sum ! nonrepro_sum? (0/1) - integer :: num_nans, num_infs ! count of NaNs and INFs in + integer :: nan_count, inf_count ! local count of NaNs and INFs in ! input array integer :: omp_nthreads ! number of OpenMP threads integer :: mpi_comm ! MPI subcommunicator @@ -375,11 +417,6 @@ subroutine shr_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & integer :: max_levels(nflds) ! maximum number of levels of ! integer expansion to use integer :: max_level ! maximum value in max_levels - integer :: gbl_max_red ! global max local sum reduction? (0/1) - integer :: repro_sum_fast ! 1 reduction repro_sum? (0/1) - integer :: repro_sum_slow ! 2 reduction repro_sum? (0/1) - integer :: repro_sum_both ! both fast and slow? (0/1) - integer :: nonrepro_sum ! nonrepro_sum? (0/1) real(r8) :: xmax_nsummands ! dble of max_nsummands real(r8) :: arr_lsum(nflds) ! local sums @@ -396,38 +433,81 @@ subroutine shr_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & ! !----------------------------------------------------------------------- ! -! check whether input contains NaNs or INFs, and abort if so +! initialize local statistics variables + gbl_lor_red = 0 + gbl_max_red = 0 + repro_sum_fast = 0 + repro_sum_slow = 0 + repro_sum_both = 0 + nonrepro_sum = 0 - call t_startf('shr_reprosum_NaN_INF_Chk') - nan_check = .false. - inf_check = .false. - num_nans = 0 - num_infs = 0 +! set MPI communicator + if ( present(commid) ) then + mpi_comm = commid + else + mpi_comm = MPI_COMM_WORLD + endif + call t_barrierf('sync_repro_sum',mpi_comm) - nan_check = any(shr_infnan_isnan(arr)) - inf_check = any(shr_infnan_isinf(arr)) - if (nan_check .or. inf_check) then - do ifld=1,nflds - do isum=1,nsummands - if (shr_infnan_isnan(arr(isum,ifld))) then - num_nans = num_nans + 1 - endif - if (shr_infnan_isinf(arr(isum,ifld))) then - num_infs = num_infs + 1 - endif - end do - end do +! check whether should abort if input contains NaNs or INFs + abort_inf_nan = .not. repro_sum_allow_infnan + if ( present(allow_infnan) ) then + abort_inf_nan = .not. allow_infnan endif - call t_stopf('shr_reprosum_NaN_INF_Chk') - if ((num_nans > 0) .or. (num_infs > 0)) then - call mpi_comm_rank(MPI_COMM_WORLD, mypid, ierr) - write(s_logunit,37) real(num_nans,r8), real(num_infs,r8), mypid + call t_startf('shr_reprosum_INF_NaN_Chk') + +! initialize flags to indicate that no NaNs or INFs are present in the input data + inf_nan_gchecks = .false. + arr_gsum_infnan = .false. + + if (abort_inf_nan) then + +! check whether input contains NaNs or INFs, and abort if so + nan_check = any(shr_infnan_isnan(arr)) + inf_check = any(shr_infnan_isinf(arr)) + + if (nan_check .or. inf_check) then + + nan_count = count(shr_infnan_isnan(arr)) + inf_count = count(shr_infnan_isinf(arr)) + + if ((nan_count > 0) .or. (inf_count > 0)) then + call mpi_comm_rank(MPI_COMM_WORLD, mypid, ierr) + write(s_logunit,37) real(nan_count,r8), real(inf_count,r8), mypid 37 format("SHR_REPROSUM_CALC: Input contains ",e12.5, & " NaNs and ", e12.5, " INFs on process ", i7) - call shr_sys_abort("shr_reprosum_calc ERROR: NaNs or INFs in input") + call shr_sys_abort("shr_reprosum_calc ERROR: NaNs or INFs in input") + endif + + endif + + else + +! determine whether any fields contain NaNs or INFs, and avoid processing them +! via integer expansions + inf_nan_lchecks = .false. + + do ifld=1,nflds + inf_nan_lchecks(1,ifld) = any(shr_infnan_isnan(arr(:,ifld))) + inf_nan_lchecks(2,ifld) = any(shr_infnan_isposinf(arr(:,ifld))) + inf_nan_lchecks(3,ifld) = any(shr_infnan_isneginf(arr(:,ifld))) + end do + + call t_startf("repro_sum_allr_lor") + call mpi_allreduce (inf_nan_lchecks, inf_nan_gchecks, 3*nflds, & + MPI_LOGICAL, MPI_LOR, mpi_comm, ierr) + gbl_lor_red = 1 + call t_stopf("repro_sum_allr_lor") + + do ifld=1,nflds + arr_gsum_infnan(ifld) = any(inf_nan_gchecks(:,ifld)) + enddo + endif + call t_stopf('shr_reprosum_INF_NaN_Chk') + ! check whether should use shr_reprosum_ddpdd algorithm use_ddpdd_sum = repro_sum_use_ddpdd if ( present(ddpdd_sum) ) then @@ -439,21 +519,6 @@ subroutine shr_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & ! If not, always use ddpdd. use_ddpdd_sum = use_ddpdd_sum .or. (radix(0._r8) /= radix(0_i8)) -! initialize local statistics variables - gbl_max_red = 0 - repro_sum_fast = 0 - repro_sum_slow = 0 - repro_sum_both = 0 - nonrepro_sum = 0 - -! set MPI communicator - if ( present(commid) ) then - mpi_comm = commid - else - mpi_comm = MPI_COMM_WORLD - endif - call t_barrierf('sync_repro_sum',mpi_comm) - if ( use_ddpdd_sum ) then call t_startf('shr_reprosum_ddpdd') @@ -548,8 +613,8 @@ subroutine shr_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & endif call shr_reprosum_int(arr, arr_gsum, nsummands, dsummands, & nflds, arr_max_shift, arr_gmax_exp, & - arr_max_levels, max_level, validate, & - recompute, omp_nthreads, mpi_comm) + arr_max_levels, max_level, arr_gsum_infnan, & + validate, recompute, omp_nthreads, mpi_comm) ! record statistics, etc. repro_sum_fast = 1 @@ -598,13 +663,15 @@ subroutine shr_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & do ifld=1,nflds arr_exp_tlmin = MAXEXPONENT(1._r8) arr_exp_tlmax = MINEXPONENT(1._r8) - do isum=isum_beg(ithread),isum_end(ithread) - if (arr(isum,ifld) .ne. 0.0_r8) then - arr_exp = exponent(arr(isum,ifld)) - arr_exp_tlmin = min(arr_exp,arr_exp_tlmin) - arr_exp_tlmax = max(arr_exp,arr_exp_tlmax) - endif - end do + if (.not. arr_gsum_infnan(ifld)) then + do isum=isum_beg(ithread),isum_end(ithread) + if (arr(isum,ifld) .ne. 0.0_r8) then + arr_exp = exponent(arr(isum,ifld)) + arr_exp_tlmin = min(arr_exp,arr_exp_tlmin) + arr_exp_tlmax = max(arr_exp,arr_exp_tlmax) + endif + end do + endif arr_tlmin_exp(ifld,ithread) = arr_exp_tlmin arr_tlmax_exp(ifld,ithread) = arr_exp_tlmax end do @@ -628,9 +695,9 @@ subroutine shr_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & arr_gmax_exp(:) = -arr_gextremes(1:nflds,1) arr_gmin_exp(:) = arr_gextremes(1:nflds,2) -! if a field is identically zero, arr_gmin_exp still equals MAXEXPONENT -! and arr_gmax_exp still equals MINEXPONENT. In this case, set -! arr_gmin_exp = arr_gmax_exp = MINEXPONENT +! if a field is identically zero or contains INFs or NaNs, arr_gmin_exp +! still equals MAXEXPONENT and arr_gmax_exp still equals MINEXPONENT. +! In this case, set arr_gmin_exp = arr_gmax_exp = MINEXPONENT do ifld=1,nflds arr_gmin_exp(ifld) = min(arr_gmax_exp(ifld),arr_gmin_exp(ifld)) enddo @@ -695,10 +762,10 @@ subroutine shr_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & ! calculate sum validate = .false. - call shr_reprosum_int(arr, arr_gsum, nsummands, dsummands, nflds, & - arr_max_shift, arr_gmax_exp, max_levels, & - max_level, validate, recompute, & - omp_nthreads, mpi_comm) + call shr_reprosum_int(arr, arr_gsum, nsummands, dsummands, & + nflds, arr_max_shift, arr_gmax_exp, & + max_levels, max_level, arr_gsum_infnan, & + validate, recompute, omp_nthreads, mpi_comm) endif @@ -720,13 +787,17 @@ subroutine shr_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & !$omp default(shared) & !$omp private(ifld, isum) do ifld=1,nflds - do isum=1,nsummands - arr_lsum(ifld) = arr(isum,ifld) + arr_lsum(ifld) - end do + if (.not. arr_gsum_infnan(ifld)) then + do isum=1,nsummands + arr_lsum(ifld) = arr(isum,ifld) + arr_lsum(ifld) + end do + endif end do + call t_startf("nonrepro_sum_allr_r8") call mpi_allreduce (arr_lsum, arr_gsum_fast, nflds, & MPI_REAL8, MPI_SUM, mpi_comm, ierr) + call t_stopf("nonrepro_sum_allr_r8") call t_stopf('nonrepro_sum') @@ -748,6 +819,25 @@ subroutine shr_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & endif endif +! Set field sums to NaN and INF, as needed + do ifld=1,nflds + if (arr_gsum_infnan(ifld)) then + if (inf_nan_gchecks(1,ifld)) then + ! NaN => NaN + arr_gsum(ifld) = shr_infnan_nan + else if (inf_nan_gchecks(2,ifld) .and. inf_nan_gchecks(3,ifld)) then + ! posINF and negINF => NaN + arr_gsum(ifld) = shr_infnan_nan + else if (inf_nan_gchecks(2,ifld)) then + ! posINF only => posINF + arr_gsum(ifld) = shr_infnan_posinf + else if (inf_nan_gchecks(3,ifld)) then + ! negINF only => negINF + arr_gsum(ifld) = shr_infnan_neginf + endif + endif + end do + ! return statistics if ( present(repro_sum_stats) ) then repro_sum_stats(1) = repro_sum_stats(1) + repro_sum_fast @@ -755,6 +845,7 @@ subroutine shr_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & repro_sum_stats(3) = repro_sum_stats(3) + repro_sum_both repro_sum_stats(4) = repro_sum_stats(4) + nonrepro_sum repro_sum_stats(5) = repro_sum_stats(5) + gbl_max_red + repro_sum_stats(6) = repro_sum_stats(6) + gbl_lor_red endif @@ -766,7 +857,7 @@ end subroutine shr_reprosum_calc subroutine shr_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & arr_max_shift, arr_gmax_exp, max_levels, & - max_level, validate, recompute, & + max_level, skip_field, validate, recompute, & omp_nthreads, mpi_comm ) !---------------------------------------------------------------------- ! @@ -798,9 +889,14 @@ subroutine shr_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & integer, intent(in) :: mpi_comm ! MPI subcommunicator real(r8), intent(in) :: arr(dsummands,nflds) - ! input array + ! input array - logical, intent(in):: validate + logical, intent(in) :: skip_field(nflds) + ! flag indicating whether the sum for this field should be + ! computed or not (used to skip over fields containing + ! NaN or INF summands) + + logical, intent(in) :: validate ! flag indicating that accuracy of solution generated from ! arr_gmax_exp and max_levels should be tested @@ -920,8 +1016,10 @@ subroutine shr_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & max_error(ifld,ithread) = 0 not_exact(ifld,ithread) = 0 - i8_arr_tlsum_level(:,ifld,ithread) = 0_i8 + + if (skip_field(ifld)) cycle + do isum=isum_beg(ithread),isum_end(ithread) arr_remainder = 0.0_r8 @@ -1370,8 +1468,11 @@ subroutine shr_reprosum_ddpdd (arr, arr_gsum, nsummands, dsummands, & enddo + call t_startf("repro_sum_allr_c16") call mpi_allreduce (arr_lsum_dd, arr_gsum_dd, nflds, & MPI_COMPLEX16, mpi_sumdd, mpi_comm, ierr) + call t_stopf("repro_sum_allr_c16") + do ifld=1,nflds arr_gsum(ifld) = real(arr_gsum_dd(ifld)) enddo From 440db83e0fff8701b984ef872d75e54a1c38ac1c Mon Sep 17 00:00:00 2001 From: Azamat Mametjanov Date: Wed, 27 Jun 2018 20:36:47 -0500 Subject: [PATCH 10/59] Minor cleanup --- src/drivers/mct/main/seq_diag_mct.F90 | 60 ++++++++++----------------- 1 file changed, 23 insertions(+), 37 deletions(-) diff --git a/src/drivers/mct/main/seq_diag_mct.F90 b/src/drivers/mct/main/seq_diag_mct.F90 index c6ca58e44f4..b1f80d865cc 100644 --- a/src/drivers/mct/main/seq_diag_mct.F90 +++ b/src/drivers/mct/main/seq_diag_mct.F90 @@ -2253,11 +2253,8 @@ SUBROUTINE seq_diag_avect_mct(infodata, id, av, dom, gsmap, comment) ! print instantaneous budget data !------------------------------------------------------------------------------- - call seq_comm_setptrs(ID,& - mpicom=mpicom, iam=iam) - - call seq_infodata_GetData(infodata,& - bfbflag=bfbflag) + call seq_comm_setptrs(ID, mpicom=mpicom, iam=iam) + call seq_infodata_GetData(infodata, bfbflag=bfbflag) lcomment = '' if (present(comment)) then @@ -2270,25 +2267,23 @@ SUBROUTINE seq_diag_avect_mct(infodata, id, av, dom, gsmap, comment) km = mct_aVect_indexRA(dom%data,'mask') ka = mct_aVect_indexRA(dom%data,afldname) kflds = mct_aVect_nRattr(AV) - allocate(sumbuf(kflds),sumbufg(kflds)) - - sumbuf = 0.0_r8 + allocate(sumbufg(kflds),stat=rcode) + if (rcode /= 0) call shr_sys_abort(trim(subname)//' allocate sumbufg') - if (bfbflag) then - - npts = mct_aVect_lsize(AV) - allocate(weight(npts),stat=rcode) - if (rcode /= 0) call shr_sys_abort(trim(subname)//' allocate weight') + npts = mct_aVect_lsize(AV) + allocate(weight(npts),stat=rcode) + if (rcode /= 0) call shr_sys_abort(trim(subname)//' allocate weight') - weight(:) = 1.0_r8 - do n = 1,npts - if (dom%data%rAttr(km,n) <= 1.0e-06_R8) then - weight(n) = 0.0_r8 - else - weight(n) = dom%data%rAttr(ka,n)*shr_const_rearth*shr_const_rearth - endif - enddo + weight(:) = 1.0_r8 + do n = 1,npts + if (dom%data%rAttr(km,n) <= 1.0e-06_R8) then + weight(n) = 0.0_r8 + else + weight(n) = dom%data%rAttr(ka,n)*shr_const_rearth*shr_const_rearth + endif + enddo + if (bfbflag) then allocate(weighted_data(npts,kflds),stat=rcode) if (rcode /= 0) call shr_sys_abort(trim(subname)//' allocate weighted_data') @@ -2304,22 +2299,12 @@ SUBROUTINE seq_diag_avect_mct(infodata, id, av, dom, gsmap, comment) call shr_reprosum_calc (weighted_data, sumbufg, npts, npts, kflds, & commid=mpicom) - deallocate(weight, weighted_data) + deallocate(weighted_data) else - - npts = mct_aVect_lsize(AV) - allocate(weight(npts),stat=rcode) - if (rcode /= 0) call shr_sys_abort(trim(subname)//' allocate weight') - - weight(:) = 1.0_r8 - do n = 1,npts - if (dom%data%rAttr(km,n) <= 1.0e-06_R8) then - weight(n) = 0.0_r8 - else - weight(n) = dom%data%rAttr(ka,n)*shr_const_rearth*shr_const_rearth - endif - enddo + allocate(sumbuf(kflds),stat=rcode) + if (rcode /= 0) call shr_sys_abort(trim(subname)//' allocate sumbuf') + sumbuf = 0.0_r8 do n = 1,npts do k = 1,kflds @@ -2332,9 +2317,10 @@ SUBROUTINE seq_diag_avect_mct(infodata, id, av, dom, gsmap, comment) !--- global reduction --- call shr_mpi_sum(sumbuf,sumbufg,mpicom,subname) - deallocate(weight) + deallocate(sumbuf) endif + deallocate(weight) if (iam == 0) then ! write(logunit,*) 'sdAV: *** writing ',trim(lcomment),': k fld min/max/sum ***' @@ -2351,7 +2337,7 @@ SUBROUTINE seq_diag_avect_mct(infodata, id, av, dom, gsmap, comment) call shr_sys_flush(logunit) endif - deallocate(sumbuf,sumbufg) + deallocate(sumbufg) 100 format('comm_diag ',a3,1x,a4,1x,i3,es26.19,1x,a,1x,a) 101 format('comm_diag ',a3,1x,a4,1x,i3,es26.19,1x,a) From 3bffe6889caf5206218e30c98fd9b5036d9b95f1 Mon Sep 17 00:00:00 2001 From: James Foucar Date: Fri, 8 Jun 2018 14:45:03 -0600 Subject: [PATCH 11/59] Update E3SM config_compilers.xml to V2 [BFB] --- config/e3sm/machines/config_compilers.xml | 3341 +++++++++++++-------- 1 file changed, 2038 insertions(+), 1303 deletions(-) diff --git a/config/e3sm/machines/config_compilers.xml b/config/e3sm/machines/config_compilers.xml index 9e74919031b..92a6bbcbf4b 100644 --- a/config/e3sm/machines/config_compilers.xml +++ b/config/e3sm/machines/config_compilers.xml @@ -1,6 +1,5 @@ - - - + + + + -D_USE_FLOW_CONTROL + FALSE - -D_USE_FLOW_CONTROL + + + -h noomp + + + + -DFORTRANUNDERSCORE -DNO_R16 -DCPRCRAY + -DDIR=NOOP + -DDIR=NOOP + + + -s real64 + + + -O2 -f free -N 255 -h byteswapio -em + -h noomp + -g -trapuv -Wuninitialized + + + -O0 + -h noomp + + TRUE + + -Wl,--allow-multiple-definition -h byteswapio + -h noomp + + - - - - -I$(CLM_PFLOTRAN_SOURCE_DIR) - -DCLM_PFLOTRAN - -DCOLUMN_MODE - -L$(CLM_PFLOTRAN_SOURCE_DIR) -lpflotran $(PETSC_LIB) + + + -mcmodel=medium + -fopenmp + -g -Wall -Og -fbacktrace -fcheck=bounds -ffpe-trap=invalid,zero,overflow + -O + -std=c99 + + + -D CISM_GNU=ON + + + + -DFORTRANUNDERSCORE -DNO_R16 -DCPRGNU + + FORTRAN + + -fdefault-real-8 + + + + -mcmodel=medium -fconvert=big-endian -ffree-line-length-none -ffixed-line-length-none + -fopenmp + -g -Wall -Og -fbacktrace -fcheck=bounds -ffpe-trap=invalid,zero,overflow + -O + + + -O0 + + + -ffixed-form + + + -ffree-form + + FALSE + + -fopenmp + + mpicc + mpicxx + mpif90 + gcc + g++ + gfortran + TRUE - + + + -mcmodel=medium + -fopenmp + -g -Wall -Og -fbacktrace -fcheck=bounds -ffpe-trap=invalid,zero,overflow + -O + + + -D CISM_GNU=ON + + + + -DFORTRANUNDERSCORE -DNO_R16 -DCPRGNU + + FORTRAN + + -fdefault-real-8 + + + + -mcmodel=medium -fconvert=big-endian -ffree-line-length-none -ffixed-line-length-none + -fopenmp + -g -Wall -Og -fbacktrace -fcheck=bounds -ffpe-trap=invalid,zero,overflow + -O + + + -O0 + + + -ffixed-form + + + -ffree-form + + FALSE + + -fopenmp + + mpicc + mpicxx + mpif90 + gcc + g++ + gfortran + TRUE + - - -DFORTRAN_SAME -DCPRIBM + -DFORTRAN_SAME -DCPRIBM + -WF,-D - -g -qfullpath -qmaxmem=-1 -qphsinfo - -qsuffix=f=f -qfixed=132 - -qsuffix=f=f90:cpp=F90 - -g -qfullpath -qmaxmem=-1 -qphsinfo - -qrealsize=8 - -O2 -qstrict -Q - -O3 - -qsmp=omp -qsuppress=1520-045 - -qsmp=omp -qsuppress=1520-045 - -qsmp=omp:noopt -qsuppress=1520-045 - -qsmp=omp:noopt -qsuppress=1520-045 - -qinitauto=7FF7FFFF -qflttrap=ov:zero:inv:en - -C + + -qrealsize=8 + + + -g -qfullpath -qmaxmem=-1 -qphsinfo + -O2 -qstrict -Q + -qsmp=omp -qsuppress=1520-045 + -qsmp=omp:noopt -qsuppress=1520-045 + -qinitauto=7FF7FFFF -qflttrap=ov:zero:inv:en + -C + + + -qsuffix=f=f -qfixed=132 + + + -qsuffix=f=f90:cpp=F90 + TRUE - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -DFORTRANUNDERSCORE -DNO_SHR_VMATH -DNO_R16 -DCPRPGI - -gopt -time - - - - - -mp - -mp - -mp - - -Mfixed - -Mfree - -r8 - - -i4 -gopt -time -Mstack_arrays -Mextend -byteswapio -Mflushz -Kieee - -O0 -g -Ktrap=fp -Mbounds -Kieee - - -mp - - -O0 -g -Ktrap=fp -Mbounds -Kieee - -Mnovect - -Mnovect - -Mnovect - -Mnovect - -Mnovect - -Mnovect - -time -Wl,--allow-multiple-definition - pgcc - pgf95 - pgc++ - mpicc - mpif90 - mpicxx - - CXX - - - - FALSE - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -DFORTRANUNDERSCORE -DNO_SHR_VMATH -DNO_R16 -DUSE_CUDA_FORTRAN -DCPRPGI - -time - - - - - -mp - -mp - -mp - - -Mfixed - -Mfree - -r8 - - -i4 -time -Mstack_arrays -Mextend -byteswapio -Mflushz -Kieee - - -O0 -g -Ktrap=fp -Mbounds -Kieee - - -mp - - -O0 -g -Ktrap=fp -Mbounds -Kieee - -Mnovect - -Mnovect - -Mnovect - -Mnovect - -Mnovect - -Mnovect - -time -Wl,--allow-multiple-definition -acc - pgcc - pgf95 - pgc++ - mpicc - mpif90 - mpicxx - - CXX - - - FALSE - - -DFORTRANUNDERSCORE -DNO_R16 -DCPRINTEL - -openmp - -openmp - -openmp - -free - -fixed -132 - + -DFORTRANUNDERSCORE -DNO_R16 -DCPRINTEL + + + -cxxlib + + FORTRAN + + -r8 + + + -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -fp-model source + -openmp + - -O0 -g -check uninit -check bounds -check pointers -fpe0 -check noarg_temp_created - -O2 -debug minimal - -O2 -debug minimal - -O0 -g - -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -fp-model source - -O2 -fp-model precise -std=gnu99 - -O0 - -openmp - -r8 - ifort - icc - icpc - mpif90 + -O0 -g -check uninit -check bounds -check pointers -fpe0 -check noarg_temp_created + -O2 -debug minimal + + + -O0 + -openmp + + + -fixed -132 + + + -free + + TRUE + + -openmp + mpicc mpicxx - FORTRAN - -cxxlib + mpif90 + icc + icpc + ifort TRUE - TRUE - - - - -mcmodel medium -shared-intel - - -DFORTRANUNDERSCORE -DNO_R16 -DCPRINTEL - -openmp - -openmp - -openmp - -free - -fixed -132 - -O0 -g -check uninit -check bounds -check pointers -fpe0 - -O2 - -fp-model source -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs - -O2 -fp-model precise - -O0 - -r8 - ifort - icc - icpc - mpif90 + + -O2 -fp-model precise + -openmp + + + + -DFORTRANUNDERSCORE -DNO_R16 -DCPRINTEL + -DCPRINTEL + + + -cxxlib + + FORTRAN + + -r8 + + + -fp-model source -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs + -openmp + -O0 -g -check uninit -check bounds -check pointers -fpe0 + -O2 + + + -O0 + + + -fixed -132 + + + -free + + TRUE + + -openmp + mpicc mpicxx - FORTRAN - -cxxlib - -DCPRINTEL + mpif90 + icc + icpc + ifort + + $SHELL{$NETCDF_PATH/bin/nf-config --flibs} + TRUE - TRUE - $(shell $(NETCDF_PATH)/bin/nf-config --flibs) - - -DFORTRANUNDERSCORE -DNO_R16 -DCPRINTEL - -openmp - -openmp - -openmp - -free - -fixed -132 - -O0 -g -check uninit -check bounds -check pointers -fpe0 - -O2 - -mmic -fp-model source -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs - -mmic -O2 -fp-model precise -DFORTRANUNDERSCOR - --host=x86_64-k1om-linux --build=x86_64-unknown-linux - -O0 -mmic - -r8 - ifort - icc - icpc - mpiifort + + -mmic -O2 -fp-model precise -DFORTRANUNDERSCOR + -openmp + + + --host=x86_64-k1om-linux --build=x86_64-unknown-linux + + + + -DFORTRANUNDERSCORE -DNO_R16 -DCPRINTEL + -DCPRINTEL + + + -cxxlib + + FORTRAN + + -r8 + + + -mmic -fp-model source -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs + -openmp + -O0 -g -check uninit -check bounds -check pointers -fpe0 + -O2 + + + -O0 -mmic + + + -fixed -132 + + + -free + + TRUE + + -openmp + -mmic + mpiicc mpiicpc - FORTRAN - -cxxlib + mpiifort + icc + icpc + ifort + + $SHELL{$NETCDF_PATH/bin/nf-config --flibs} + TRUE - TRUE - -DCPRINTEL - -mmic - $(shell $(NETCDF_PATH)/bin/nf-config --flibs) - - -DFORTRANUNDERSCORE -DNO_R16 -DCPRINTEL - -openmp - -openmp - -openmp - -free - -fixed -132 - -O0 -g -check uninit -check bounds -check pointers -fpe0 - -O2 - -mmic -fp-model source -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs - -mmic -O2 -fp-model precise -DFORTRANUNDERSCOR - --host=x86_64-k1om-linux --build=x86_64-unknown-linux - -O0 -mmic - -r8 - ifort - icc - icpc - mpiifort + + -mmic -O2 -fp-model precise -DFORTRANUNDERSCOR + -openmp + + + --host=x86_64-k1om-linux --build=x86_64-unknown-linux + + + + -DFORTRANUNDERSCORE -DNO_R16 -DCPRINTEL + -DCPRINTEL + + + -cxxlib + + FORTRAN + + -r8 + + + -mmic -fp-model source -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs + -openmp + -O0 -g -check uninit -check bounds -check pointers -fpe0 + -O2 + + + -O0 -mmic + + + -fixed -132 + + + -free + + TRUE + + -openmp + -mmic + mpiicc mpiicpc - FORTRAN - -cxxlib + mpiifort + icc + icpc + ifort + + $SHELL{$NETCDF_PATH/bin/nf-config --flibs} + TRUE - TRUE - -DCPRINTEL - -mmic - $(shell $(NETCDF_PATH)/bin/nf-config --flibs) - - - -DFORTRANUNDERSCORE -DNO_R16 -DCPRGNU - -fopenmp - -fopenmp - -fopenmp - -D CISM_GNU=ON - -ffixed-form - -ffree-form - -g -Wall -Og -fbacktrace -fcheck=bounds -ffpe-trap=invalid,zero,overflow - -O - -g -Wall -Og -fbacktrace -fcheck=bounds -ffpe-trap=invalid,zero,overflow - -O - - -mcmodel=medium -fconvert=big-endian -ffree-line-length-none -ffixed-line-length-none - -mcmodel=medium - -O0 - -fdefault-real-8 - gfortran - gcc - g++ - mpif90 - mpicc - mpicxx - FORTRAN - TRUE + + + -g + -std=c99 + + + -DFORTRANUNDERSCORE -DNO_CRAY_POINTERS -DNO_SHR_VMATH -DCPRNAG + + + -r8 + + + + + + + + -wmismatch=mpi_send,mpi_recv,mpi_bcast,mpi_allreduce,mpi_reduce,mpi_isend,mpi_irecv,mpi_irsend,mpi_rsend,mpi_gatherv,mpi_gather,mpi_scatterv,mpi_allgather,mpi_alltoallv,mpi_file_read_all,mpi_file_write_all,mpibcast,mpiscatterv,mpi_alltoallw,nfmpi_get_vara_all,NFMPI_IPUT_VARA,NFMPI_GET_VAR_ALL,NFMPI_PUT_VARA,NFMPI_PUT_ATT_REAL,NFMPI_PUT_ATT_DOUBLE,NFMPI_PUT_ATT_INT,NFMPI_GET_ATT_REAL,NFMPI_GET_ATT_INT,NFMPI_GET_ATT_DOUBLE,NFMPI_PUT_VARA_DOUBLE_ALL,NFMPI_PUT_VARA_REAL_ALL,NFMPI_PUT_VARA_INT_ALL -convert=BIG_ENDIAN + + -ieee=full -O2 + -g -time -f2003 -ieee=stop + + + -C=all -g -time -f2003 -ieee=stop + -gline + -openmp + + + $FFLAGS + -ieee=full + + + + -g -time -f2003 -ieee=stop + -gline + -openmp + + + -fixed + + + -free + FALSE - - - - - -DFORTRANUNDERSCORE -DNO_R16 -DCPRGNU - -fopenmp - -fopenmp - -fopenmp - -D CISM_GNU=ON - -ffixed-form - -ffree-form - -g -Wall -Og -fbacktrace -fcheck=bounds -ffpe-trap=invalid,zero,overflow - -O - -g -Wall -Og -fbacktrace -fcheck=bounds -ffpe-trap=invalid,zero,overflow - -O - - -mcmodel=medium -fconvert=big-endian -ffree-line-length-none -ffixed-line-length-none - -mcmodel=medium - -O0 - -fdefault-real-8 - gfortran - gcc - g++ + + -openmp + + mpicc mpif90 - mpicc - mpicxx - FORTRAN - TRUE - FALSE + gcc + nagfor - - -DFORTRANUNDERSCORE -DNO_R16 -DCPRPATHSCALE - -mp - -mp - -mp - -g -trapuv -Wuninitialized - -O -extend_source -ftpp -fno-second-underscore -funderscoring -byteswapio - -O0 - -r8 - mpif90 - mpicc + + -mp + + + + -DFORTRANUNDERSCORE -DNO_R16 -DCPRPATHSCALE + + + -r8 + + + -O -extend_source -ftpp -fno-second-underscore -funderscoring -byteswapio + -mp + -g -trapuv -Wuninitialized + + + -O0 + FALSE - - - - - -DFORTRANUNDERSCORE -DNO_R16 -DCPRCRAY - -DDIR=NOOP - -h noomp - -h noomp - -h noomp - -DDIR=NOOP - -g -trapuv -Wuninitialized - -O2 -f free -N 255 -h byteswapio -em - -O0 - -h noomp - -s real64 - -Wl,--allow-multiple-definition -h byteswapio - TRUE - - - - - - nagfor + + -mp + + mpicc mpif90 - gcc - mpicc + - -DFORTRANUNDERSCORE -DNO_CRAY_POINTERS -DNO_SHR_VMATH -DCPRNAG - - - - - - - -wmismatch=mpi_send,mpi_recv,mpi_bcast,mpi_allreduce,mpi_reduce,mpi_isend,mpi_irecv,mpi_irsend,mpi_rsend,mpi_gatherv,mpi_gather,mpi_scatterv,mpi_allgather,mpi_alltoallv,mpi_file_read_all,mpi_file_write_all,mpibcast,mpiscatterv,mpi_alltoallw,nfmpi_get_vara_all,NFMPI_IPUT_VARA,NFMPI_GET_VAR_ALL,NFMPI_PUT_VARA,NFMPI_PUT_ATT_REAL,NFMPI_PUT_ATT_DOUBLE,NFMPI_PUT_ATT_INT,NFMPI_GET_ATT_REAL,NFMPI_GET_ATT_INT,NFMPI_GET_ATT_DOUBLE,NFMPI_PUT_VARA_DOUBLE_ALL,NFMPI_PUT_VARA_REAL_ALL,NFMPI_PUT_VARA_INT_ALL -convert=BIG_ENDIAN - $(FFLAGS) - - - -ieee=full -O2 - -ieee=full - - -g -time -f2003 -ieee=stop - -g - -std=c99 - - - - -C=all -g -time -f2003 -ieee=stop - -gline - - - - -g -time -f2003 -ieee=stop - -gline - - -openmp - -openmp - -openmp - - -r8 - -fixed - -free + + + -gopt -time + + -mp + + + + + + + + + + + + + + + + + + + + + + + + + + + -DFORTRANUNDERSCORE -DNO_SHR_VMATH -DNO_R16 -DCPRPGI + + CXX + + -r8 + + + -i4 -gopt -time -Mstack_arrays -Mextend -byteswapio -Mflushz -Kieee + + -mp + -O0 -g -Ktrap=fp -Mbounds -Kieee + -Mnovect + -Mnovect + -Mnovect + -Mnovect + -Mnovect + -Mnovect + + + -O0 -g -Ktrap=fp -Mbounds -Kieee + + -mp + + + -Mfixed + + + -Mfree + + + FALSE + + -time -Wl,--allow-multiple-definition + + -mp + + mpicc + mpicxx + mpif90 + pgcc + pgc++ + pgf95 - - -DSYSDARWIN - -all_load + + + -time + + -mp + + + + + + + + + + + + + + + + + + + + + + + + + + + -DFORTRANUNDERSCORE -DNO_SHR_VMATH -DNO_R16 -DUSE_CUDA_FORTRAN -DCPRPGI + + CXX + + -r8 + + + -i4 -time -Mstack_arrays -Mextend -byteswapio -Mflushz -Kieee + + -mp + + -O0 -g -Ktrap=fp -Mbounds -Kieee + -Mnovect + -Mnovect + -Mnovect + -Mnovect + -Mnovect + -Mnovect + + + -O0 -g -Ktrap=fp -Mbounds -Kieee + + -mp + + + -Mfixed + + + -Mfree + + + + FALSE + + -time -Wl,--allow-multiple-definition -acc + + -mp + + mpicc + mpicxx + mpif90 + pgcc + pgc++ + pgf95 - - -heap-arrays - -mkl=cluster - -mkl=cluster - -mkl=cluster - -mkl=cluster - -mkl=cluster - -mkl=cluster - -mkl + + + -qarch=auto -qtune=auto -qcache=auto + + /usr/bin/bash + + -qarch=auto -qtune=auto -qcache=auto -qsclk=micro + -qspill=6000 + + + -qsigtrap=xl__trcedump + -bdatapsize:64K -bstackpsize:64K -btextpsize:32K + + mpcc_r + mpxlf2003_r + cc_r + xlf2003_r + + -lmassv -lessl + -lmass + + + + + + -O3 -qstrict + -qtune=440 -qarch=440d + + + --build=powerpc-bgp-linux --host=powerpc64-suse-linux + + + -DLINUX -DnoI8 + + + -qtune=440 -qarch=440d + -O3 -qstrict -Q + -qinitauto=FF911299 -qflttrap=ov:zero:inv:en + -qextname=flush + + + -Wl,--relax -Wl,--allow-multiple-definition + + + -L/bgl/BlueLight/ppcfloor/bglsys/lib -lmpich.rts -lmsglayer.rts -lrts.rts -ldevices.rts + + blrts_xlc + blrts_xlf2003 + mpich.rts + /bgl/BlueLight/ppcfloor/bglsys + blrts_xlc + blrts_xlf2003 - - USERDEFINED_MUST_EDIT_THIS - - # USERDEFINED $(shell $(NETCDF_PATH)/bin/nf-config --flibs) - - - - - + + + -qtune=450 -qarch=450 -I/bgsys/drivers/ppcfloor/arch/include/ + + + --build=powerpc-bgp-linux --host=powerpc64-suse-linux + + + -DLINUX -DnoI8 + + + -qspillsize=2500 -qtune=450 -qarch=450 + -qextname=flush + + + -Wl,--relax -Wl,--allow-multiple-definition + + + + + + -qsmp=omp:nested_par -qsuppress=1520-045 + -qsmp=omp:nested_par:noopt -qsuppress=1520-045 + + + --build=powerpc-bgp-linux --host=powerpc64-suse-linux + + + -DLINUX + + + -g -qfullpath -qmaxmem=-1 -qspillsize=2500 -qextname=flush -qphsinfo + -O3 -qstrict -Q + -qsmp=omp:nested_par -qsuppress=1520-045 + -qsmp=omp:nested_par:noopt -qsuppress=1520-045 + + + -Wl,--relax -Wl,--allow-multiple-definition + - ftn - cc - CC - ftn + + -DCMAKE_SYSTEM_NAME=Catamount + + + -DLINUX + -DHAVE_NANOTIME -DBIT64 -DHAVE_VPRINTF -DHAVE_BACKTRACE -DHAVE_SLASHPROC -DHAVE_COMM_F2C -DHAVE_TIMES -DHAVE_GETTIMEOFDAY + cc CC + ftn mpich - $(MPICH_DIR) - $(NETCDF_DIR) - $(PARALLEL_NETCDF_DIR) + $ENV{MPICH_DIR} + $ENV{NETCDF_DIR} lustre - -DLINUX - -DHAVE_NANOTIME -DBIT64 -DHAVE_VPRINTF -DHAVE_BACKTRACE -DHAVE_SLASHPROC -DHAVE_COMM_F2C -DHAVE_TIMES -DHAVE_GETTIMEOFDAY - -DCMAKE_SYSTEM_NAME=Catamount - - - - -O2 -debug minimal -qno-opt-dynamic-align - -qopenmp - -qopenmp - -qopenmp - -qopenmp - --host=Linux - -L$(NETCDF_DIR) -lnetcdff -Wl,--as-needed,-L$(NETCDF_DIR)/lib -lnetcdff -lnetcdf - -mkl -lpthread - ifort - icc - icpc - $(PETSC_DIR) + $ENV{PARALLEL_NETCDF_DIR} + cc + CC + ftn - - - -DFORTRANUNDERSCORE -DNO_R16 -DCPRINTEL - -qopenmp - -qopenmp - -qopenmp - -free - -fixed -132 - -O0 -g -check uninit -check bounds -check pointers -fpe0 -check noarg_temp_created - -O2 -debug minimal -qno-opt-dynamic-align - -O2 -debug minimal - -O0 -g - -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -fp-model consistent -fimf-use-svml - -O2 -fp-model precise -std=gnu99 - -O0 - -qopenmp - -r8 - ifort - icc - icpc - FORTRAN - -cxxlib - TRUE - TRUE - --host=Linux - -L$(NETCDF_DIR) -lnetcdff -Wl,--as-needed,-L$(NETCDF_DIR)/lib -lnetcdff -lnetcdf - -mkl -lpthread - ifort - icc - icpc - $(PETSC_DIR) + + + -DSYSDARWIN + + + -all_load + + + + + + -heap-arrays + + + -mkl=cluster + -mkl=cluster + -mkl=cluster + -mkl=cluster + -mkl=cluster + -mkl=cluster + -mkl + + + + + + -mcmodel medium -shared-intel + - - -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -fp-model consistent -fimf-use-svml - -O2 -debug minimal -qno-opt-dynamic-align - -qopenmp - -qopenmp - -qopenmp - -qopenmp - --host=Linux - -L$(NETCDF_DIR) -lnetcdff -Wl,--as-needed,-L$(NETCDF_DIR)/lib -lnetcdff -lnetcdf - -mkl -lpthread - -DARCH_MIC_KNL - ifort - icc - icpc - $(PETSC_DIR) - - - - -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -fp-model consistent -fimf-use-svml - -O2 -debug minimal -qno-opt-dynamic-align - -qopenmp - -qopenmp - -qopenmp - -qopenmp - --host=Linux - -L$(NETCDF_DIR) -lnetcdff -Wl,--as-needed,-L$(NETCDF_DIR)/lib -lnetcdff -lnetcdf - -mkl -lpthread - ifort - icc - icpc - $(PETSC_DIR) + + /projects/install/rhel6-x86_64/ACME/AlbanyTrilinos/Albany/build/install + + -O2 + + + --host=Linux + + + -lstdc++ -lmpi_cxx + + + -O2 + + + $SHELL{$NETCDF_PATH/bin/nf-config --flibs} -lblas -llapack + + + + + + -DHAVE_NANOTIME -DBIT64 -DHAVE_SLASHPROC -DHAVE_GETTIMEOFDAY + + gpfs + + $SHELL{$NETCDF_PATH/bin/nf-config --flibs} -llapack -lblas + + + + + + -qopenmp -static-intel + -heap-arrays + + + -DHAVE_SLASHPROC + + + -O2 -debug minimal -qno-opt-dynamic-align + -qopenmp -static-intel + -heap-arrays + + + -qopenmp -static-intel + + + -qopenmp -static-intel + + gpfs + + $SHELL{$NETCDF_PATH/bin/nf-config --flibs} -Wl,-rpath -Wl,$NETCDF_PATH/lib + -mkl + - - - - impi - mpiifort - mpiicc - mpiicpc - -xMIC-AVX512 - -axMIC-AVX512 -xCORE-AVX2 + + gpfs + + $SHELL{$NETCDF_PATH/bin/nf-config --flibs} -llapack -lblas + -rpath $NETCDF_PATH/lib + - - -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -fp-model consistent -fimf-use-svml - -O2 -debug minimal -qno-opt-dynamic-align - - -qopenmp - -qopenmp - -qopenmp - -qopenmp - --host=Linux - - impi - mpif90 - mpicc - mpicxx - ifort - icc - icpc - - - - -xCORE-AVX2 - -xCORE-AVX2 - - -DLINUX - -DHAVE_NANOTIME -DBIT64 -DHAVE_VPRINTF -DHAVE_BACKTRACE -DHAVE_SLASHPROC -DHAVE_COMM_F2C -DHAVE_TIMES -DHAVE_GETTIMEOFDAY - - $(TACC_HDF5_DIR) - - $(TACC_PNETCDF_DIR) - $(TACC_NETCDF_DIR) - -L$(NETCDF_PATH) -lnetcdff -Wl,--as-needed,-L$(NETCDF_PATH)/lib -lnetcdff -lnetcdf - - $(TACC_NETCDF_DIR) - -L$(NETCDF_PATH) -lnetcdff -Wl,--as-needed,-L$(NETCDF_PATH)/lib -lnetcdff -lnetcdf - - -mkl -lpthread - -DARCH_MIC_KNL - - $(PETSC_DIR) + + /soft/climate/AlbanyTrilinos_06262017/Albany/buildintel/install + + -qopenmp + + + -DHAVE_SLASHPROC + + + -lstdc++ + + + -O2 -debug minimal -qno-opt-dynamic-align + -qopenmp + + + -qopenmp + + + -qopenmp + + mpiicc + mpiicpc + mpiifort + + $SHELL{nf-config --flibs} -mkl + - - -O2 - -g -traceback -O0 -fpe0 -check all -check noarg_temp_created -ftrapuv - -O2 - --host=Linux - -L$(NETCDF_DIR) -lnetcdff -Wl,--as-needed,-L$(NETCDF_DIR)/lib -lnetcdff -lnetcdf - ${MKLROOT}/lib/intel64/libmkl_scalapack_lp64.a -Wl,--start-group ${MKLROOT}/lib/intel64/libmkl_intel_lp64.a ${MKLROOT}/lib/intel64/libmkl_core.a ${MKLROOT}/lib/intel64/libmkl_sequential.a -Wl,--end-group ${MKLROOT}/lib/intel64/libmkl_blacs_intelmpi_lp64.a -lpthread -lm - -DHAVE_PAPI - ftn - cc - CC + + /soft/climate/AlbanyTrilinos_06262017/Albany/build/install + + -DHAVE_NANOTIME -DBIT64 -DHAVE_SLASHPROC -DHAVE_GETTIMEOFDAY + + + -lstdc++ + + mpi + /blues/gpfs/home/software/spack/opt/spack/linux-x86_64/gcc-5.3.0/mvapich2-2.2b-sdh7nhddicl4sh5mgxjyzxtxox3ajqey + $ENV{NETCDFROOT} + gpfs + $ENV{PNETCDFROOT} + + $SHELL{$NETCDF_PATH/bin/nf-config --flibs} -llapack -lblas + - - $(NETCDF_PATH) - -framework Accelerate - -L$(NETCDF_PATH)/lib -lnetcdff -lnetcdf + + mpi + /soft/mvapich2/2.2b_psm/intel-15.0 + $ENV{NETCDFROOT} + gpfs + $ENV{PNETCDFROOT} + + $SHELL{$NETCDF_PATH/bin/nf-config --flibs} -llapack -lblas + -Wl,-rpath -Wl,$ENV{NETCDFROOT}/lib + -mkl=cluster + -mkl=cluster + -mkl=cluster + -mkl=cluster + -mkl=cluster + -mkl=cluster + -mkl + + + + + mpi + mpich + /soft/openmpi/1.8.2/intel-13.1 + /soft/mpich2/1.4.1-intel-13.1 + $ENV{NETCDFROOT} + gpfs + $ENV{PNETCDFROOT} + + $SHELL{$NETCDF_PATH/bin/nf-config --flibs} -llapack -lblas + -mkl=cluster + -mkl=cluster + -mkl=cluster + -mkl=cluster + -mkl=cluster + -mkl=cluster + -mkl + + + + + mpi + /home/robl/soft/mpich-3.1.4-nag-6.0 + $ENV{NETCDFROOT} + gpfs + $ENV{PNETCDFROOT} + + $(shell $NETCDF_PATH/bin/nf-config --flibs) $SHELL{$NETCDF_PATH/bin/nc-config --libs} -llapack -lblas + - - $(NETCDF_PATH) - $(PNETCDF_PATH) - $(shell $(NETCDF_PATH)/bin/nf-config --flibs) + + mpi + mpi + mpich + /soft/openmpi/1.8.2/pgi-13.9 + /soft/mpich2/1.4.1-pgi-13.9/ + $ENV{NETCDFROOT} + gpfs + $ENV{PNETCDFROOT} + + $SHELL{$NETCDF_PATH/bin/nf-config --flibs} -llapack -lblas + -rpath $ENV{NETCDFROOT}/lib + + + + + + -O2 + + + --host=Linux + + + -DNO_SHR_VMATH -DCNL + + + -O2 + -g -traceback -O0 -fpe0 -check all -check noarg_temp_created -ftrapuv + + + -llapack -lblas + + mpich + /usr/local/tools/mvapich2-intel-2.1/ + /usr/local/tools/netcdf-intel-4.1.3 + + $SHELL{/usr/local/tools/netcdf-intel-4.1.3/bin/nc-config --flibs} + + + + + + -DNO_SHR_VMATH -DCNL + + + -Wl,-rpath /usr/local/tools/netcdf-pgi-4.1.3/lib -llapack -lblas + + mpich + /usr/local/tools/mvapich2-pgi-1.7/ + /usr/local/tools/netcdf-pgi-4.1.3 + + $SHELL{/usr/local/tools/netcdf-pgi-4.1.3/bin/nc-config --flibs} + + + + + + -fopenmp + + + -D CISM_GNU=ON + + + -DFORTRANUNDERSCORE -DNO_R16 + + + FORTRAN + + -fdefault-real-8 + + + + -O -fconvert=big-endian -ffree-line-length-none -ffixed-line-length-none -fno-range-check + -fopenmp + -g -Wall + + + + -O0 + + + -ffixed-form + + + -ffree-form + + /software/dev_tools/swtree/cs400_centos7.2_pe2016-08/hdf5-parallel/1.8.17/centos7.2_gnu5.3.0 + /software/tools/compilers/intel_2017/mkl/lib/intel64 + + -fopenmp + -L$NETCDF_PATH/lib -Wl,-rpath=$NETCDF_PATH/lib -lnetcdff -lnetcdf \ + + + mpicc + mpic++ + mpif90 + /software/dev_tools/swtree/cs400_centos7.2_pe2016-08/netcdf-hdf5parallel/4.3.3.1/centos7.2_gnu5.3.0 + gcc + gcpp + gfortran + TRUE - - -O2 - -O2 - --host=Linux - $(NETCDFROOT) - $(PNETCDFROOT) - $SEMS_PFUNIT_ROOT - $(shell $(NETCDF_PATH)/bin/nf-config --flibs) -lblas -llapack - -lstdc++ -lmpi_cxx - /projects/install/rhel6-x86_64/ACME/AlbanyTrilinos/Albany/build/install + + + --host=Linux --enable-filesystem-hints=lustre + + + -DLINUX + + + -g -traceback -O0 -fpe0 -check all -check noarg_temp_created -ftrapuv + + $ENV{NETCDF_HOME} + lustre + $ENV{PNETCDFROOT} + + -L$NETCDF_PATH/lib -lnetcdf -lnetcdff -lpmi -L$ENV{MKL_PATH}/lib/intel64 -lmkl_rt + -mkl=cluster + -mkl + + + + + + -DnoI8 + + + + -C=all -g -O0 -v + -C=all -g -nan -O0 -v + + + + + $ENV{MPI_LIB} + $ENV{NETCDF_ROOT} + lustre + $ENV{PNETCDFROOT} + + -L$ENV{NETCDF_ROOT}/lib -lnetcdf -lnetcdff -L$ENV{MKL_PATH} -lmkl_rt + - - -O2 - -O2 - --host=Linux - $(NETCDFROOT) - $(PNETCDFROOT) - $(shell $(NETCDF_PATH)/bin/nf-config --flibs) -lblas -llapack - -lstdc++ -lmpi_cxx - /projects/install/rhel6-x86_64/ACME/AlbanyTrilinos/Albany/build/install + + /projects/ccsm/libs/AlbanyTrilinos/Albany/build/install + + -DMPASLI_EXTERNAL_INTERFACE_DISABLE_MANGLING + + + -llapack -lblas -L$ENV{IBM_MAIN_DIR}/xlf/bg/14.1/bglib64 -lxlfmath -lxlf90_r -lxlopt -lxl -L$ENV{IBM_MAIN_DIR}/xlsmp/bg/3.1/bglib64 -lxlsmp + + CXX + /soft/libraries/hdf5/1.8.14/cnk-xl/current/ + mpixlf77_r + mpixlc_r + /soft/compilers/bgclang/mpi/bgclang/bin/mpic++11 + mpixlf2003_r + /soft/libraries/netcdf/4.3.3-f4.4.1/cnk-xl/current/ + /soft/libraries/petsc/3.5.3.1 + gpfs + /soft/libraries/pnetcdf/1.6.0/cnk-xl/current/ + mpixlc_r + mpixlf2003_r + + -L$NETCDF_PATH/lib -lnetcdff -lnetcdf -L$HDF5_PATH/lib -lhdf5_hl -lhdf5 -L/soft/libraries/alcf/current/xl/ZLIB/lib -lz -L/soft/libraries/alcf/current/xl/LAPACK/lib -llapack -L/soft/libraries/alcf/current/xl/BLAS/lib -lblas -L/bgsys/drivers/ppcfloor/comm/sys/lib + -L$ENV{IBM_MAIN_DIR}/xlf/bg/14.1/bglib64 -lxlfmath -lxlf90_r -lxlopt -lxl -L$ENV{IBM_MAIN_DIR}/xlsmp/bg/3.1/bglib64 -lxlsmp + + TRUE - - -O2 - -O2 - --host=Linux - $(shell $(NETCDF_PATH)/bin/nf-config --flibs) -lblas -llapack - -lstdc++ -lmpi_cxx - /projects/install/rhel6-x86_64/ACME/AlbanyTrilinos/Albany/build/install + + + -O2 + + + --host=Linux + + + -DLINUX + + + -O2 + -g -traceback -O0 -fpe0 -check all -check noarg_temp_created -ftrapuv + + $ENV{NETCDF_HOME} + lustre + $ENV{PNETCDFROOT} + + -L$NETCDF_PATH/lib -lnetcdf -lnetcdff -lpmi -L$ENV{MKL_PATH} -lmkl_rt + + + + + + -O2 -kind=byte + + + --host=Linux + + + -DLINUX + + + -O2 -kind=byte + -C=all -g -O0 -v + + $ENV{NETCDF_HOME} + lustre + $ENV{PNETCDFROOT} + + -L$NETCDF_PATH/lib -lnetcdf -lnetcdff -lpmi -L$ENV{MKL_PATH} -lmkl_rt + + + + + + -O2 + + + --host=Linux + + + -DLINUX + + + -O2 + -g -traceback -O0 -fpe0 -check all -check noarg_temp_created -ftrapuv + -C -Mbounds -traceback -Mchkfpstk -Mchkstk -Mdalign -Mdepchk -Mextend -Miomutex -Mrecursive -Ktrap=fp -O0 -g -byteswapio -Meh_frame + + $ENV{NETCDF_HOME} + lustre + $ENV{PNETCDFROOT} + + -L$NETCDF_PATH/lib -lnetcdf -lnetcdff -lpmi -L$ENV{MPI_LIB} -lmpich + + + + + + -qopenmp + + + --host=Linux + + + -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -fp-model consistent -fimf-use-svml + -O2 -debug minimal -qno-opt-dynamic-align + -qopenmp + + + -qopenmp + + + -qopenmp + + $ENV{PETSC_DIR} + icc + icpc + ifort + + -L$ENV{NETCDF_DIR} -lnetcdff -Wl,--as-needed,-L$ENV{NETCDF_DIR}/lib -lnetcdff -lnetcdf + -mkl -lpthread + + + + + + -qopenmp + -axMIC-AVX512 -xCORE-AVX2 + + + --host=Linux + + + -DARCH_MIC_KNL + + + -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -fp-model consistent -fimf-use-svml + -O2 -debug minimal -qno-opt-dynamic-align + -qopenmp + -xMIC-AVX512 + + + -qopenmp + + + -qopenmp + + mpiicc + mpiicpc + mpiifort + + impi + $ENV{PETSC_DIR} + icc + icpc + ifort + + -L$ENV{NETCDF_DIR} -lnetcdff -Wl,--as-needed,-L$ENV{NETCDF_DIR}/lib -lnetcdff -lnetcdf + -mkl -lpthread + + + + + + -O2 + + + --host=Linux + + + -DLINUX + + + -O2 + + $ENV{NETCDF_HOME} + lustre + + -L$NETCDF_PATH/lib -lnetcdf -lnetcdff -lpmi + + + + + + -DHAVE_VPRINTF -DHAVE_GETTIMEOFDAY -DHAVE_BACKTRACE + + $ENV{NETCDF_HOME} + + -L$ENV{NETCDF_HOME}/lib/ -lnetcdff -lnetcdf -lcurl -llapack -lblas + + + + + + -qopenmp + + + --host=Linux + + + -O2 -debug minimal -qno-opt-dynamic-align + -qopenmp + + + -qopenmp + + + -qopenmp + + $ENV{PETSC_DIR} + icc + icpc + ifort + + -L$ENV{NETCDF_DIR} -lnetcdff -Wl,--as-needed,-L$ENV{NETCDF_DIR}/lib -lnetcdff -lnetcdf + -mkl -lpthread + - - -O2 - -O2 - $(NETCDFROOT) - $(PNETCDFROOT) - /projects/ccsm/pfunit/3.2.9/mpi-serial - $(MPIHOME) - /projects/ccsm/esmf-6.3.0rp1/lib/libO/Linux.intel.64.openmpi.default - --host=Linux - $(shell $(NETCDF_PATH)/bin/nf-config --flibs) -L/projects/ccsm/BLAS-intel -lblas_LINUX -L$ENV{MKL_LIBS} -lmkl_rt - lustre - -mkl=cluster - -mkl - /projects/ccsm/AlbanyTrilinos_06262017/Albany/build/install + + + + -O2 -fp-model precise -std=gnu99 + -qopenmp + -O2 -debug minimal + -O0 -g + + + --host=Linux + + + -DFORTRANUNDERSCORE -DNO_R16 -DCPRINTEL + + + -cxxlib + + FORTRAN + + -r8 + + + -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -fp-model consistent -fimf-use-svml + -qopenmp + -O0 -g -check uninit -check bounds -check pointers -fpe0 -check noarg_temp_created + -O2 -debug minimal -qno-opt-dynamic-align + + + -O0 + -qopenmp + + + -fixed -132 + + + -free + + TRUE + + -qopenmp + + $ENV{PETSC_DIR} + icc + icc + icpc + icpc + ifort + ifort + + -L$ENV{NETCDF_DIR} -lnetcdff -Wl,--as-needed,-L$ENV{NETCDF_DIR}/lib -lnetcdff -lnetcdf + -mkl -lpthread + + TRUE - - -O2 - -O2 - $(NETCDFROOT) - $(PNETCDFROOT) - /projects/ccsm/pfunit/3.2.9/mpi-serial - /opt/openmpi-1.8-intel + + + + -O2 + + + --host=Linux + + + -DHAVE_PAPI + + + -O2 + -g -traceback -O0 -fpe0 -check all -check noarg_temp_created -ftrapuv + + cc + CC + ftn + + -L$ENV{NETCDF_DIR} -lnetcdff -Wl,--as-needed,-L$ENV{NETCDF_DIR}/lib -lnetcdff -lnetcdf + $ENV{MKLROOT}/lib/intel64/libmkl_scalapack_lp64.a -Wl,--start-group $ENV{MKLROOT}/lib/intel64/libmkl_intel_lp64.a $ENV{MKLROOT}/lib/intel64/libmkl_core.a $ENV{MKLROOT}/lib/intel64/libmkl_sequential.a -Wl,--end-group $ENV{MKLROOT}/lib/intel64/libmkl_blacs_intelmpi_lp64.a -lpthread -lm + + + + + /projects/ccsm/AlbanyTrilinos_06262017/Albany/build/install + + -O2 + + + --host=Linux + /projects/ccsm/esmf-6.3.0rp1/lib/libO/Linux.intel.64.openmpi.default - --host=Linux - $(shell $(NETCDF_PATH)/bin/nf-config --flibs) -L/projects/ccsm/BLAS-intel -lblas_LINUX + + -O2 + + /opt/openmpi-1.8-intel + $ENV{NETCDFROOT} + /projects/ccsm/pfunit/3.2.9/mpi-serial lustre - -mkl=cluster - -mkl - /projects/ccsm/AlbanyTrilinos_06262017/Albany/build/install + $ENV{PNETCDFROOT} + + $SHELL{$NETCDF_PATH/bin/nf-config --flibs} -L/projects/ccsm/BLAS-intel -lblas_LINUX + -mkl=cluster + -mkl + - - /usr/local/tools/netcdf-pgi-4.1.3 - /usr/local/tools/mvapich2-pgi-1.7/ - mpich - -DNO_SHR_VMATH -DCNL - $(shell /usr/local/tools/netcdf-pgi-4.1.3/bin/nc-config --flibs) - -Wl,-rpath /usr/local/tools/netcdf-pgi-4.1.3/lib -llapack -lblas + + + -lstdc++ -lmpi_cxx + + mpicc + mpic++ + mpif90 + lustre + gcc + g++ + gfortran + + $SHELL{$NETCDF_PATH/bin/nf-config --flibs} -llapack -lblas + $ENV{MKLROOT}/lib/intel64/libmkl_scalapack_lp64.a -Wl,--start-group $ENV{MKLROOT}/lib/intel64/libmkl_intel_lp64.a $ENV{MKLROOT}/lib/intel64/libmkl_core.a $ENV{MKLROOT}/lib/intel64/libmkl_sequential.a -Wl,--end-group $ENV{MKLROOT}/lib/intel64/libmkl_blacs_intelmpi_lp64.a -lpthread -lm -z muldefs + - - -O2 - -O2 - /usr/local/tools/netcdf-intel-4.1.3 - --host=Linux - /usr/local/tools/mvapich2-intel-2.1/ - mpich - -DNO_SHR_VMATH -DCNL - -g -traceback -O0 -fpe0 -check all -check noarg_temp_created -ftrapuv - $(shell /usr/local/tools/netcdf-intel-4.1.3/bin/nc-config --flibs) - -llapack -lblas + + + -lstdc++ -lmpi_cxx + + mpicc + mpic++ + mpif90 + lustre + icc + icpc + ifort + + $SHELL{$NETCDF_PATH/bin/nf-config --flibs} -llapack -lblas + + + + + + -O2 -fp-model precise -I/soft/intel/x86_64/2013/composer_xe_2013/composer_xe_2013_sp1.3.174/mkl/include + -openmp + + + -DFORTRANUNDERSCORE -DNO_R16 + -DCPRINTEL + + + -cxxlib + + FORTRAN + + -r8 + + + -fp-model source -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -I/soft/intel/x86_64/2013/composer_xe_2013/composer_xe_2013_sp1.3.174/mkl/include + -openmp + -O0 -g -check uninit -check bounds -check pointers -fpe0 + -O2 + + + -O0 + + + -fixed -132 + + + -free + + + -openmp + -lnetcdff + + mpiicc + mpiicpc + mpiifort + icc + icpc + ifort + + -L/soft/netcdf/fortran-4.4-intel-sp1-update3-parallel/lib -lnetcdff -L/soft/hdf5/hdf5-1.8.13-intel-2013-sp1-update3-impi-5.0.0.028/lib -openmp -fPIC -lnetcdf -lnetcdf -L/soft/intel/x86_64/2013/composer_xe_2013/composer_xe_2013_sp1.3.174/mkl/lib/intel64 -lmkl_intel_lp64 -lmkl_core -lmkl_intel_thread -lpthread -lm + + TRUE - - /usr/local/tools/netcdf-pgi-4.1.3 - /usr/local/tools/mvapich2-pgi-1.7/ - mpich - -DNO_SHR_VMATH -DCNL - $(shell /usr/local/tools/netcdf-pgi-4.1.3/bin/nc-config --flibs) - -Wl,-rpath /usr/local/tools/netcdf-pgi-4.1.3/lib -llapack -lblas + + + -DHAVE_VPRINTF -DHAVE_GETTIMEOFDAY + + /global/software/sl-6.x86_64/modules/intel/2016.1.150/lapack/3.6.0-intel/lib + $ENV{NETCDF_DIR} + + -lnetcdff -lnetcdf -mkl + - - -O2 - -O2 - /usr/local/tools/netcdf-intel-4.1.3 - --host=Linux - /usr/local/tools/mvapich2-intel-2.1/ - mpich - -DNO_SHR_VMATH -DCNL - -g -traceback -O0 -fpe0 -check all -check noarg_temp_created -ftrapuv - $(shell /usr/local/tools/netcdf-intel-4.1.3/bin/nc-config --flibs) - -llapack -lblas + + + -DHAVE_VPRINTF -DHAVE_GETTIMEOFDAY + + /global/software/sl-6.x86_64/modules/intel/2016.1.150/lapack/3.6.0-intel/lib + $ENV{NETCDF_DIR} + + -lnetcdff -lnetcdf -mkl + - - -O2 - -O2 - /usr/workspace/wsa/climdat/spack/opt/spack/linux-rhel7-x86_64/intel-17.0.0/netcdf-fortran-4.4.4-4naprkre2m7kriadyxwboauil7nc3jtc/ - --host=Linux - /usr/tce/modulefiles/MPI/intel/17.0.0/mvapich2/2.2/ - mpi - -DNO_SHR_VMATH -DCNL - -g -traceback -O0 -fpe0 -check all -check noarg_temp_created -ftrapuv - $(shell /usr/workspace/wsa/climdat/spack/opt/spack/linux-rhel7-x86_64/intel-17.0.0/netcdf-fortran-4.4.4-4naprkre2m7kriadyxwboauil7nc3jtc/bin/nf-config --flibs) - -llapack -lblas + + $ENV{NETCDF_PATH} + $ENV{PNETCDF_PATH} + + $SHELL{$NETCDF_PATH/bin/nf-config --flibs} + + + + + + -framework Accelerate + + $ENV{NETCDF_PATH} + + -L$NETCDF_PATH/lib -lnetcdff -lnetcdf + + + + + /projects/install/rhel6-x86_64/ACME/AlbanyTrilinos/Albany/build/install + + -O2 + + + --host=Linux + + + -lstdc++ -lmpi_cxx + + + -O2 + -I$ENV{NETCDFROOT}/include + + $ENV{NETCDFROOT} + $ENV{SEMS_PFUNIT_ROOT} + $ENV{PNETCDFROOT} + + $SHELL{$NETCDF_PATH/bin/nf-config --flibs} -lblas -llapack + + + + + /projects/install/rhel6-x86_64/ACME/AlbanyTrilinos/Albany/build/install + + -O2 + + + --host=Linux + + + -lstdc++ -lmpi_cxx + + + -O2 + + $ENV{NETCDFROOT} + $ENV{PNETCDFROOT} + + $SHELL{$NETCDF_PATH/bin/nf-config --flibs} -lblas -llapack + + + + + + -O2 -fp-model precise -I/soft/intel/x86_64/2013/composer_xe_2013/composer_xe_2013_sp1.3.174/mkl/inc +lude + -openmp + + + -DFORTRANUNDERSCORE -DNO_R16 + -DCPRINTEL + + + -cxxlib + + FORTRAN + + -r8 + + + -fp-model source -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -I/soft/i +ntel/x86_64/2013/composer_xe_2013/composer_xe_2013_sp1.3.174/mkl/include + -openmp + -O0 -g -check uninit -check bounds -check pointers -fpe0 + -O2 + + + -O0 + + + -fixed -132 + + + -free + + + -openmp + -lnetcdff + + mpiicc + mpiicpc + mpiifort + icc + icpc + ifort + + -L/soft/netcdf/fortran-4.4-intel-sp1-update3-parallel/lib -lnetcdff -L/soft/hdf5/hdf5-1.8.13-intel-2013-sp1-update3-impi-5.0.0.028/lib -openmp -fPIC -lnetcdf -lnetcdf -L/soft/intel/x86_64/2013/composer_xe_2013/composer_xe_2013_sp1.3.174/mkl/lib/intel64 -lmkl_intel_lp64 -lmkl_core -lmkl_intel_thread -lpthread -lm + + TRUE + + + + /projects/ccsm/libs/AlbanyTrilinos/Albany/build/install + + -DMPASLI_EXTERNAL_INTERFACE_DISABLE_MANGLING + + + -llapack -lblas -L$ENV{IBM_MAIN_DIR}/xlf/bg/14.1/bglib64 -lxlfmath -lxlf90_r -lxlopt -lxl -L$ENV{IBM_MAIN_DIR}/xlsmp/bg/3.1/bglib64 -lxlsmp + + CXX + /soft/libraries/hdf5/1.8.14/cnk-xl/current/ + + + mpixlf77_r + mpixlc_r + /soft/compilers/bgclang/mpi/bgclang/bin/mpic++11 + mpixlf2003_r + /soft/libraries/netcdf/4.3.3-f4.4.1/cnk-xl/current/ + /soft/libraries/petsc/3.5.3.1 + gpfs + /soft/libraries/pnetcdf/1.6.0/cnk-xl/current/ + mpixlc_r + mpixlf2003_r + + -L$NETCDF_PATH/lib -lnetcdff -lnetcdf -L$HDF5_PATH/lib -lhdf5_hl -lhdf5 -L/soft/libraries/alcf/current/xl/ZLIB/lib -lz -L/soft/libraries/alcf/current/xl/LAPACK/lib -llapack -L/soft/libraries/alcf/current/xl/BLAS/lib -lblas -L/bgsys/drivers/ppcfloor/comm/sys/lib + -L$ENV{IBM_MAIN_DIR}/xlf/bg/14.1/bglib64 -lxlfmath -lxlf90_r -lxlopt -lxl -L$ENV{IBM_MAIN_DIR}/xlsmp/bg/3.1/bglib64 -lxlsmp + + TRUE + /projects/cesm/devtools/mpich-3.0.4-gcc4.8.1/bin/mpicc + /projects/cesm/devtools/mpich-3.0.4-gcc4.8.1/bin/mpif90 /projects/cesm/devtools/netcdf-4.1.3-gcc4.8.1-mpich3.0.4/ - -L/user/lib64 -llapack -lblas -lnetcdff - /projects/cesm/devtools/gcc-4.8.1/bin/gfortran /projects/cesm/devtools/gcc-4.8.1/bin/gcc /projects/cesm/devtools/gcc-4.8.1/bin/g++ - /projects/cesm/devtools/mpich-3.0.4-gcc4.8.1/bin/mpicc - /projects/cesm/devtools/mpich-3.0.4-gcc4.8.1/bin/mpif90 + /projects/cesm/devtools/gcc-4.8.1/bin/gfortran + + -L/user/lib64 -llapack -lblas -lnetcdff + /home/zdr/opt/netcdf-4.1.3_pgf95 - - -DFORTRANUNDERSCORE -DNO_R16 - -fopenmp - -fopenmp - -fopenmp - -D CISM_GNU=ON - -ffixed-form - -ffree-form - -g -Wall - - -O -fconvert=big-endian -ffree-line-length-none -ffixed-line-length-none -fno-range-check - -O0 - -fdefault-real-8 - gfortran - gcc - gcpp - mpif90 - mpicc - mpic++ - FORTRAN - TRUE - /software/dev_tools/swtree/cs400_centos7.2_pe2016-08/netcdf-hdf5parallel/4.3.3.1/centos7.2_gnu5.3.0 - /software/dev_tools/swtree/cs400_centos7.2_pe2016-08/hdf5-parallel/1.8.17/centos7.2_gnu5.3.0 - /software/tools/compilers/intel_2017/mkl/lib/intel64 - /software/tools/compilers/intel_2017/mkl/lib/intel64 - -L$(NETCDF_PATH)/lib -Wl,-rpath=$(NETCDF_PATH)/lib -lnetcdff -lnetcdf \ - -L$(HDF5_PATH)/lib -Wl,-rpath=$(HDF5_PATH)/lib -lhdf5_hl -lhdf5 \ - -L$(LAPACK_LIBDIR) -Wl,-rpath=$(LAPACK_LIBDIR) \ - -L$(BLAS_LIBDIR) -Wl,-rpath=$(BLAS_LIBDIR) \ - -Wl,--start-group -lmkl_blas95_lp64 -lmkl_lapack95_lp64 \ - -lmkl_scalapack_lp64 -lmkl_gf_lp64 -lmkl_intel_lp64 \ - -lmkl_core -lmkl_gnu_thread -lmkl_blacs_openmpi_lp64 \ - -Wl,--end-group -lgomp -lrt - - - - - -O2 - -O2 - --host=Linux - lustre - TRUE - $(shell nf-config --flibs) -mkl=cluster - $(shell nf-config --flibs) -mkl=cluster - $(shell nf-config --flibs) -mkl=cluster - $(shell nf-config --flibs) -mkl=cluster - $(shell nf-config --flibs) -mkl=cluster - $(shell nf-config --flibs) -mkl=cluster - - -L/opt/cray/netcdf/4.4.1.1.3/INTEL/16.0/lib -lnetcdff -L/opt/cray/hdf5/1.10.0.3/GNU/4.9/lib -lnetcdf -mkl - - - - -O2 - -O2 - -target-cpu=istanbul - --host=Linux + + + -O2 + + + --host=Linux + + + -DLINUX + + + -O2 + + $ENV{NETCDF_LIB}/.. lustre - $(shell nf-config --flibs) - $(shell nf-config --flibs) - $(shell nf-config --flibs) - $(shell nf-config --flibs) - $(shell nf-config --flibs) - $(shell nf-config --flibs) - - -L/opt/cray/netcdf/4.4.1.1.3/PGI/15.3/lib -lnetcdff -L/opt/cray/hdf5/1.10.0.3/GNU/4.9/lib -lnetcdf - -lfmpich -lmpichf90_pgi $(PGI_PATH)/linux86-64/$(PGI_VERSION)/lib/f90main.o /opt/gcc/default/snos/lib64/libstdc++.a - TRUE - ftn - cc - CC - /ccs/proj/cli106/AlbanyTrilinos/Albany/build/install - - - - -O2 - -O2 - --host=Linux + + -L$NETCDF_PATH/lib -lnetcdf -lnetcdff -lpmi + + + + + + -O2 + + + --host=Linux + + + -DLINUX + + + -O2 + + $ENV{NETCDF_LIB}/.. lustre - $(shell $(NETCDF_PATH)/bin/nf-config --flibs) - -lfmpich -lmpichf90_pgi $(PGI_PATH)/linux86-64/$(PGI_VERSION)/lib/f90main.o - TRUE - -ta=nvidia,cc35,cuda7.5 - - - - /usr/bin/bash - xlf2003_r - mpxlf2003_r - cc_r - mpcc_r - -qarch=auto -qtune=auto -qcache=auto - -qarch=auto -qtune=auto -qcache=auto -qsclk=micro - -qsigtrap=xl__trcedump - -qspill=6000 - -bdatapsize:64K -bstackpsize:64K -btextpsize:32K - -lmassv -lessl - -lmass - - - - -qtune=440 -qarch=440d - -qtune=440 -qarch=440d - /bgl/BlueLight/ppcfloor/bglsys - mpich.rts - blrts_xlf2003 - blrts_xlf2003 - blrts_xlc - blrts_xlc - -O3 -qstrict - -O3 -qstrict -Q - -qinitauto=FF911299 -qflttrap=ov:zero:inv:en - -L/bgl/BlueLight/ppcfloor/bglsys/lib -lmpich.rts -lmsglayer.rts -lrts.rts -ldevices.rts - -DLINUX -DnoI8 - --build=powerpc-bgp-linux --host=powerpc64-suse-linux - -Wl,--relax -Wl,--allow-multiple-definition - -qextname=flush - - - - -qtune=450 -qarch=450 -I/bgsys/drivers/ppcfloor/arch/include/ - -qspillsize=2500 -qtune=450 -qarch=450 - -DLINUX -DnoI8 - --build=powerpc-bgp-linux --host=powerpc64-suse-linux - -Wl,--relax -Wl,--allow-multiple-definition - -qextname=flush + + -L$NETCDF_PATH/lib -lnetcdf -lnetcdff -lpmi + + + + + + -O2 + + + --host=Linux + + + -DNO_SHR_VMATH -DCNL + + + -O2 + -g -traceback -O0 -fpe0 -check all -check noarg_temp_created -ftrapuv + + + -llapack -lblas + + mpi + /usr/tce/modulefiles/MPI/intel/17.0.0/mvapich2/2.2/ + /usr/workspace/wsa/climdat/spack/opt/spack/linux-rhel7-x86_64/intel-17.0.0/netcdf-fortran-4.4.4-4naprkre2m7kriadyxwboauil7nc3jtc/ + + $SHELL{/usr/workspace/wsa/climdat/spack/opt/spack/linux-rhel7-x86_64/intel-17.0.0/netcdf-fortran-4.4.4-4naprkre2m7kriadyxwboauil7nc3jtc/bin/nf-config --flibs} + - - - -g -qfullpath -qmaxmem=-1 -qspillsize=2500 -qextname=flush -qphsinfo - -O3 -qstrict -Q - -DLINUX - --build=powerpc-bgp-linux --host=powerpc64-suse-linux - -Wl,--relax -Wl,--allow-multiple-definition - -qsmp=omp:nested_par -qsuppress=1520-045 - -qsmp=omp:nested_par -qsuppress=1520-045 - -qsmp=omp:nested_par:noopt -qsuppress=1520-045 - -qsmp=omp:nested_par:noopt -qsuppress=1520-045 + + /projects/ccsm/AlbanyTrilinos_06262017/Albany/build/install + + -O2 + + + --host=Linux + + /projects/ccsm/esmf-6.3.0rp1/lib/libO/Linux.intel.64.openmpi.default + + -O2 + + $ENV{MPIHOME} + $ENV{NETCDFROOT} + /projects/ccsm/pfunit/3.2.9/mpi-serial + lustre + $ENV{PNETCDFROOT} + + $SHELL{$NETCDF_PATH/bin/nf-config --flibs} -L/projects/ccsm/BLAS-intel -lblas_LINUX -L$ENV{MKL_LIBS} -lmkl_rt + -mkl=cluster + -mkl + - - - mpixlf2003_r - mpixlf2003_r - mpixlc_r - mpixlc_r - - - mpixlf77_r - /soft/libraries/netcdf/4.3.3-f4.4.1/cnk-xl/current/ - /soft/libraries/pnetcdf/1.6.0/cnk-xl/current/ - /soft/libraries/hdf5/1.8.14/cnk-xl/current/ - -L$(NETCDF_PATH)/lib -lnetcdff -lnetcdf -L$(HDF5_PATH)/lib -lhdf5_hl -lhdf5 -L/soft/libraries/alcf/current/xl/ZLIB/lib -lz -L/soft/libraries/alcf/current/xl/LAPACK/lib -llapack -L/soft/libraries/alcf/current/xl/BLAS/lib -lblas -L/bgsys/drivers/ppcfloor/comm/sys/lib - -L$(IBM_MAIN_DIR)/xlf/bg/14.1/bglib64 -lxlfmath -lxlf90_r -lxlopt -lxl -L$(IBM_MAIN_DIR)/xlsmp/bg/3.1/bglib64 -lxlsmp - gpfs - /soft/libraries/petsc/3.5.3.1 - TRUE - CXX - -llapack -lblas -L$(IBM_MAIN_DIR)/xlf/bg/14.1/bglib64 -lxlfmath -lxlf90_r -lxlopt -lxl -L$(IBM_MAIN_DIR)/xlsmp/bg/3.1/bglib64 -lxlsmp - -DMPASLI_EXTERNAL_INTERFACE_DISABLE_MANGLING - /soft/compilers/bgclang/mpi/bgclang/bin/mpic++11 - /projects/ccsm/libs/AlbanyTrilinos/Albany/build/install + + + -std=c99 + + + --host=Linux + + + -DLINUX -DCPRINTEL + + + -O2 -debug minimal + -g -traceback -O0 -fpe0 -check all -check noarg_temp_created -ftrapuv + + $ENV{NETCDF_PATH} + lustre + $ENV{PNETCDFROOT} + + -L$NETCDF_PATH/lib -lnetcdf -lnetcdff -lpmi -L$ENV{MKLROOT} -lmkl_rt + + + + + + -O2 + + + --host=Linux + + + -DLINUX + + + -O2 + + $ENV{NETCDF_LIB}/.. + lustre + + -L$NETCDF_PATH/lib -lnetcdf -lnetcdff -lpmi + + + + + + + -qopenmp + -xCORE-AVX2 + + + --host=Linux + + + -DLINUX + -DHAVE_NANOTIME -DBIT64 -DHAVE_VPRINTF -DHAVE_BACKTRACE -DHAVE_SLASHPROC -DHAVE_COMM_F2C -DHAVE_TIMES -DHAVE_GETTIMEOFDAY + -DARCH_MIC_KNL + + + -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -fp-model consistent -fimf-use-svml + -O2 -debug minimal -qno-opt-dynamic-align + -qopenmp + + + -xCORE-AVX2 + + + -qopenmp + + $ENV{TACC_HDF5_DIR} + + -qopenmp + + mpicc + mpicxx + mpif90 + impi + $ENV{TACC_NETCDF_DIR} + $ENV{TACC_NETCDF_DIR} + $ENV{PETSC_DIR} + $ENV{TACC_PNETCDF_DIR} + icc + icpc + ifort + + -L$NETCDF_PATH -lnetcdff -Wl,--as-needed,-L$NETCDF_PATH/lib -lnetcdff -lnetcdf + -L$NETCDF_PATH -lnetcdff -Wl,--as-needed,-L$NETCDF_PATH/lib -lnetcdff -lnetcdf + -mkl -lpthread + + + + + + -O2 + + + --host=Linux + + + -O2 + + + -L$ENV{NETCDF_C_PATH}/lib -lnetcdf -L$ENV{NETCDF_FORTRAN_PATH}/lib -lnetcdff -L$ENV{ESSL_PATH}/lib64 -lessl -L$ENV{NETLIB_LAPACK_PATH}/lib -llapack + -L$PNETCDF_PATH/lib -lpnetcdf -L$ENV{HDF5_PATH}/lib -lhdf5_hl -lhdf5 + + gpfs - - xlf_r - xlc_r - mpixlf + + + --host=Linux + + + -DLINUX + + + -qzerosize -qfree=f90 -qxlf2003=polymorphic + -qspillsize=2500 -qextname=flush + + + -O0 -g -qfree=f90 + + + -lxlopt -lxl -lxlsmp -L$ENV{NETCDF_C_PATH}/lib -lnetcdf -L$ENV{NETCDF_FORTRAN_PATH}/lib -lnetcdff -L$ENV{ESSL_PATH}/lib64 -lessl -L$ENV{NETLIB_LAPACK_PATH}/lib -llapack + -L$PNETCDF_PATH/lib -lpnetcdf -L$ENV{HDF5_PATH}/lib -lhdf5_hl -lhdf5 + -Wl,--relax -Wl,--allow-multiple-definition + mpixlc mpixlC - --host=Linux + mpixlf gpfs - -qzerosize -qfree=f90 -qxlf2003=polymorphic - -O0 -g -qfree=f90 - -lxlopt -lxl -lxlsmp -L$(NETCDF_C_PATH)/lib -lnetcdf -L$(NETCDF_FORTRAN_PATH)/lib -lnetcdff -L$(ESSL_PATH)/lib64 -lessl -L$(NETLIB_LAPACK_PATH)/lib -llapack - -L$(PNETCDF_PATH)/lib -lpnetcdf -L$(HDF5_PATH)/lib -lhdf5_hl -lhdf5 - -qspillsize=2500 -qextname=flush - -DLINUX - -Wl,--relax -Wl,--allow-multiple-definition + xlc_r + xlf_r - - pgfortran - pgcc - mpif90 + + + -O2 + + + --host=Linux + + + -O2 -DSUMMITDEV_PGI + + + -L$ENV{NETCDF_C_PATH}/lib -lnetcdf -L$ENV{NETCDF_FORTRAN_PATH}/lib -lnetcdff -L$ENV{ESSL_PATH}/lib64 -lessl -L$ENV{NETLIB_LAPACK_PATH}/lib -llapack + -L$PNETCDF_PATH/lib -lpnetcdf -L$ENV{HDF5_PATH}/lib -lhdf5_hl -lhdf5 + mpicc mpiCC - --host=Linux + mpif90 gpfs - -O2 - -O2 -DSUMMITDEV_PGI - -L$(NETCDF_C_PATH)/lib -lnetcdf -L$(NETCDF_FORTRAN_PATH)/lib -lnetcdff -L$(ESSL_PATH)/lib64 -lessl -L$(NETLIB_LAPACK_PATH)/lib -llapack - -L$(PNETCDF_PATH)/lib -lpnetcdf -L$(HDF5_PATH)/lib -lhdf5_hl -lhdf5 + pgcc + pgfortran - - pgfortran - pgcc - mpif90 + + + -O2 + + + --host=Linux + + + -O2 -DSUMMITDEV_PGI + + + -ta=tesla:cc70,pinned + -L$ENV{NETCDF_C_PATH}/lib -lnetcdf -L$ENV{NETCDF_FORTRAN_PATH}/lib -lnetcdff -L$ENV{ESSL_PATH}/lib64 -lessl -L$ENV{NETLIB_LAPACK_PATH}/lib -llapack + -L$PNETCDF_PATH/lib -lpnetcdf -L$ENV{HDF5_PATH}/lib -lhdf5_hl -lhdf5 + mpicc mpiCC - --host=Linux - gpfs - -O2 - -O2 -DSUMMITDEV_PGI - -ta=tesla:cc70,pinned - -L$(NETCDF_C_PATH)/lib -lnetcdf -L$(NETCDF_FORTRAN_PATH)/lib -lnetcdff -L$(ESSL_PATH)/lib64 -lessl -L$(NETLIB_LAPACK_PATH)/lib -llapack - -L$(PNETCDF_PATH)/lib -lpnetcdf -L$(HDF5_PATH)/lib -lhdf5_hl -lhdf5 - - - - --host=Linux + mpif90 gpfs - -O2 - -O2 - -L$(NETCDF_C_PATH)/lib -lnetcdf -L$(NETCDF_FORTRAN_PATH)/lib -lnetcdff -L$(ESSL_PATH)/lib64 -lessl -L$(NETLIB_LAPACK_PATH)/lib -llapack - -L$(PNETCDF_PATH)/lib -lpnetcdf -L$(HDF5_PATH)/lib -lhdf5_hl -lhdf5 + pgcc + pgfortran - - xlf_r - xlc_r - mpixlf + + + --host=Linux + + + -qzerosize -qfree=f90 -qxlf2003=polymorphic + + + -O0 -g -qfree=f90 + + + -L$ENV{NETCDF_C_PATH}/lib -lnetcdf -L$ENV{NETCDF_FORTRAN_PATH}/lib -lnetcdff -L$PNETCDF_PATH/lib -lpnetcdf -L$ENV{HDF5_PATH}/lib -lhdf5_hl -lhdf5 -L$ENV{ESSL_PATH}/lib64 -lessl -L$ENV{NETLIB_LAPACK_PATH}/lib64 -llapack + mpixlc mpixlC - --host=Linux + mpixlf lustre - -qzerosize -qfree=f90 -qxlf2003=polymorphic - -O0 -g -qfree=f90 - -L$(NETCDF_C_PATH)/lib -lnetcdf -L$(NETCDF_FORTRAN_PATH)/lib -lnetcdff -L$(PNETCDF_PATH)/lib -lpnetcdf -L$(HDF5_PATH)/lib -lhdf5_hl -lhdf5 -L$(ESSL_PATH)/lib64 -lessl -L$(NETLIB_LAPACK_PATH)/lib64 -llapack + xlc_r + xlf_r - - pgfortran - pgcc - mpif90 + + + -O2 + + + --host=Linux + + + -O2 -DSUMMITDEV_PGI + + + -L$ENV{NETCDF_C_PATH}/lib -lnetcdf -L$ENV{NETCDF_FORTRAN_PATH}/lib -lnetcdff -L$PNETCDF_PATH/lib -lpnetcdf -L$ENV{HDF5_PATH}/lib -lhdf5_hl -lhdf5 -L$ENV{ESSL_PATH}/lib64 -lessl -L$ENV{NETLIB_LAPACK_PATH}/lib -llapack + mpicc mpiCC - --host=Linux + mpif90 lustre - -O2 - -O2 -DSUMMITDEV_PGI - -L$(NETCDF_C_PATH)/lib -lnetcdf -L$(NETCDF_FORTRAN_PATH)/lib -lnetcdff -L$(PNETCDF_PATH)/lib -lpnetcdf -L$(HDF5_PATH)/lib -lhdf5_hl -lhdf5 -L$(ESSL_PATH)/lib64 -lessl -L$(NETLIB_LAPACK_PATH)/lib -llapack + pgcc + pgfortran - - pgfortran - pgcc - mpif90 + + + -O2 + + + --host=Linux + + + -O2 -DSUMMITDEV_PGI + + + + -ta=tesla:cc60,cuda8.0,pinned + -L$ENV{NETCDF_C_PATH}/lib -lnetcdf -L$ENV{NETCDF_FORTRAN_PATH}/lib -lnetcdff -L$PNETCDF_PATH/lib -lpnetcdf -L$ENV{HDF5_PATH}/lib -lhdf5_hl -lhdf5 -L$ENV{ESSL_PATH}/lib64 -lessl -L$ENV{NETLIB_LAPACK_PATH}/lib -llapack + mpicc mpiCC - --host=Linux + mpif90 lustre - -O2 - -O2 -DSUMMITDEV_PGI - - -ta=tesla:cc60,cuda8.0,pinned - -L$(NETCDF_C_PATH)/lib -lnetcdf -L$(NETCDF_FORTRAN_PATH)/lib -lnetcdff -L$(PNETCDF_PATH)/lib -lpnetcdf -L$(HDF5_PATH)/lib -lhdf5_hl -lhdf5 -L$(ESSL_PATH)/lib64 -lessl -L$(NETLIB_LAPACK_PATH)/lib -llapack - - - - mpixlf2003_r - mpixlf2003_r - mpixlc_r - mpixlc_r - mpixlf77_r - /soft/libraries/netcdf/4.3.3-f4.4.1/cnk-xl/current/ - /soft/libraries/pnetcdf/1.6.0/cnk-xl/current/ - /soft/libraries/hdf5/1.8.14/cnk-xl/current/ - -L$(NETCDF_PATH)/lib -lnetcdff -lnetcdf -L$(HDF5_PATH)/lib -lhdf5_hl -lhdf5 -L/soft/libraries/alcf/current/xl/ZLIB/lib -lz -L/soft/libraries/alcf/current/xl/LAPACK/lib -llapack -L/soft/libraries/alcf/current/xl/BLAS/lib -lblas -L/bgsys/drivers/ppcfloor/comm/sys/lib - -L$(IBM_MAIN_DIR)/xlf/bg/14.1/bglib64 -lxlfmath -lxlf90_r -lxlopt -lxl -L$(IBM_MAIN_DIR)/xlsmp/bg/3.1/bglib64 -lxlsmp - gpfs - /soft/libraries/petsc/3.5.3.1 - TRUE - CXX - -llapack -lblas -L$(IBM_MAIN_DIR)/xlf/bg/14.1/bglib64 -lxlfmath -lxlf90_r -lxlopt -lxl -L$(IBM_MAIN_DIR)/xlsmp/bg/3.1/bglib64 -lxlsmp - -DMPASLI_EXTERNAL_INTERFACE_DISABLE_MANGLING - /soft/compilers/bgclang/mpi/bgclang/bin/mpic++11 - /projects/ccsm/libs/AlbanyTrilinos/Albany/build/install + pgcc + pgfortran - - ifort + + + -O2 + + + --host=Linux + + + -DNO_SHR_VMATH -DCNL + + + -O2 + -g -traceback -O0 -fpe0 -check all -check noarg_temp_created -ftrapuv + + + -llapack -lblas + + mpich + /usr/local/tools/mvapich2-intel-2.1/ + /usr/local/tools/netcdf-intel-4.1.3 + + $SHELL{/usr/local/tools/netcdf-intel-4.1.3/bin/nc-config --flibs} + + + + + + -DNO_SHR_VMATH -DCNL + + + -Wl,-rpath /usr/local/tools/netcdf-pgi-4.1.3/lib -llapack -lblas + + mpich + /usr/local/tools/mvapich2-pgi-1.7/ + /usr/local/tools/netcdf-pgi-4.1.3 + + $SHELL{/usr/local/tools/netcdf-pgi-4.1.3/bin/nc-config --flibs} + + + + + + --host=Linux + + + -O2 + + + -O2 + + + $SHELL{nf-config --flibs} + + + + + + -qopenmp + + + --host=Linux + + + -DARCH_MIC_KNL + + + -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -fp-model consistent + -O2 -debug minimal -qno-opt-dynamic-align -fp-speculation=off + -qopenmp + + + -qopenmp + + + -qopenmp + icc icpc - -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -fp-model consistent - -O2 -debug minimal -qno-opt-dynamic-align -fp-speculation=off - -qopenmp - -qopenmp - -qopenmp - -qopenmp - --host=Linux - -L$(NETCDF_DIR)/lib -lnetcdff -L$(NETCDF_DIR)/lib -lnetcdf -Wl,-rpath -Wl,$(NETCDF_DIR)/lib - -mkl -lpthread - -DARCH_MIC_KNL - - - - $(shell nf-config --flibs) - --host=Linux - -O2 - -O2 - - - - $(PNETCDFROOT) - $(NETCDFROOT) - /soft/openmpi/1.8.2/pgi-13.9 - /soft/mpich2/1.4.1-pgi-13.9/ - mpi - mpi - mpich - $(shell $(NETCDF_PATH)/bin/nf-config --flibs) -llapack -lblas - -rpath $(NETCDFROOT)/lib - gpfs - - - - $(PNETCDFROOT) - $(NETCDFROOT) - /soft/openmpi/1.8.2/intel-13.1 - /soft/mpich2/1.4.1-intel-13.1 - mpi - mpich - $(shell $(NETCDF_PATH)/bin/nf-config --flibs) -llapack -lblas - gpfs - -mkl=cluster - -mkl=cluster - -mkl=cluster - -mkl=cluster - -mkl=cluster - -mkl=cluster - -mkl - - - - $(PNETCDFROOT) - $(NETCDFROOT) - /soft/mvapich2/2.2b_psm/intel-15.0 - mpi - $(shell $(NETCDF_PATH)/bin/nf-config --flibs) -llapack -lblas - -Wl,-rpath -Wl,$(NETCDFROOT)/lib - -mkl=cluster - -mkl=cluster - -mkl=cluster - -mkl=cluster - -mkl=cluster - -mkl=cluster - -mkl - gpfs - - - - $(PNETCDFROOT) - $(NETCDFROOT) - /blues/gpfs/home/software/spack/opt/spack/linux-x86_64/gcc-5.3.0/mvapich2-2.2b-sdh7nhddicl4sh5mgxjyzxtxox3ajqey - mpi - $(shell $(NETCDF_PATH)/bin/nf-config --flibs) -llapack -lblas - -DHAVE_NANOTIME -DBIT64 -DHAVE_SLASHPROC -DHAVE_GETTIMEOFDAY - gpfs - -lstdc++ - /soft/climate/AlbanyTrilinos_06262017/Albany/build/install - - - - $(PNETCDFROOT) - $(NETCDFROOT) - /home/robl/soft/mpich-3.1.4-nag-6.0 - mpi - $(shell $(NETCDF_PATH)/bin/nf-config --flibs) $(shell $(NETCDF_PATH)/bin/nc-config --libs) -llapack -lblas - gpfs - - - - -O2 -debug minimal -qno-opt-dynamic-align - -qopenmp -static-intel - -qopenmp -static-intel - -qopenmp -static-intel - -qopenmp -static-intel - -heap-arrays - -heap-arrays - $(shell $(NETCDF_PATH)/bin/nf-config --flibs) -Wl,-rpath -Wl,$(NETCDF_PATH)/lib - -mkl - -DHAVE_SLASHPROC - gpfs - - - - $(shell $(NETCDF_PATH)/bin/nf-config --flibs) -llapack -lblas - -DHAVE_NANOTIME -DBIT64 -DHAVE_SLASHPROC -DHAVE_GETTIMEOFDAY - gpfs - - - - $(shell $(NETCDF_PATH)/bin/nf-config --flibs) -llapack -lblas - -rpath $(NETCDF_PATH)/lib - gpfs - - - - -O2 -debug minimal -qno-opt-dynamic-align - -qopenmp - -qopenmp - -qopenmp - -qopenmp - $(shell nf-config --flibs) -mkl - -DHAVE_SLASHPROC - mpiifort - mpiicc - mpiicpc - -lstdc++ - /soft/climate/AlbanyTrilinos_06262017/Albany/buildintel/install - - - - -O2 - -O2 - $(NETCDF_HOME) - --host=Linux - lustre - -DLINUX - -L$(NETCDF_PATH)/lib -lnetcdf -lnetcdff -lpmi - - - - -O2 - -O2 - $(NETCDF_LIB)/.. - --host=Linux - lustre - -DLINUX - -L$(NETCDF_PATH)/lib -lnetcdf -lnetcdff -lpmi - - - - -O2 - -O2 - $(NETCDF_LIB)/.. - --host=Linux - lustre - -DLINUX - -L$(NETCDF_PATH)/lib -lnetcdf -lnetcdff -lpmi - - - - - -O2 - -O2 - $(NETCDF_LIB)/.. - --host=Linux - lustre - -DLINUX - -L$(NETCDF_PATH)/lib -lnetcdf -lnetcdff -lpmi - - - - -std=c99 - --host=Linux - -DLINUX -DCPRINTEL - -O2 -debug minimal - -g -traceback -O0 -fpe0 -check all -check noarg_temp_created -ftrapuv - $(NETCDF_PATH) - lustre - $(PNETCDFROOT) - -L$(NETCDF_PATH)/lib -lnetcdf -lnetcdff -lpmi -L$(MKLROOT) -lmkl_rt - - - - $(NETCDF_HOME) - $(PNETCDFROOT) - lustre - --host=Linux --enable-filesystem-hints=lustre - -DLINUX - -g -traceback -O0 -fpe0 -check all -check noarg_temp_created -ftrapuv - -L$(NETCDF_PATH)/lib -lnetcdf -lnetcdff -lpmi -L$(MKL_PATH)/lib/intel64 -lmkl_rt - -mkl=cluster - -mkl - - - - $(NETCDF_ROOT) - $(PNETCDFROOT) - lustre - $(MPI_LIB) - -DnoI8 - - - -C=all -g -O0 -v - -C=all -g -nan -O0 -v - -L$(NETCDF_ROOT)/lib -lnetcdf -lnetcdff -L$(MKL_PATH) -lmkl_rt - - - - -O2 - -O2 - -g -traceback -O0 -fpe0 -check all -check noarg_temp_created -ftrapuv - $(NETCDF_HOME) - $(PNETCDFROOT) - --host=Linux + ifort + + -L$ENV{NETCDF_DIR}/lib -lnetcdff -L$ENV{NETCDF_DIR}/lib -lnetcdf -Wl,-rpath -Wl,$ENV{NETCDF_DIR}/lib + -mkl -lpthread + + + + + + -O2 + + + --host=Linux + + + -O2 + lustre - -DLINUX - -C -Mbounds -traceback -Mchkfpstk -Mchkstk -Mdalign -Mdepchk -Mextend -Miomutex -Mrecursive -Ktrap=fp -O0 -g -byteswapio -Meh_frame - -L$(NETCDF_PATH)/lib -lnetcdf -lnetcdff -lpmi -L$(MPI_LIB) -lmpich + + $SHELL{nf-config --flibs} -mkl=cluster + $SHELL{nf-config --flibs} -mkl=cluster + $SHELL{nf-config --flibs} -mkl=cluster + $SHELL{nf-config --flibs} -mkl=cluster + $SHELL{nf-config --flibs} -mkl=cluster + $SHELL{nf-config --flibs} -mkl=cluster + + -L/opt/cray/netcdf/4.4.1.1.3/INTEL/16.0/lib -lnetcdff -L/opt/cray/hdf5/1.10.0.3/GNU/4.9/lib -lnetcdf -mkl + + TRUE - - -O2 - -O2 - $(NETCDF_HOME) - $(PNETCDFROOT) - --host=Linux + + /ccs/proj/cli106/AlbanyTrilinos/Albany/build/install + + -O2 + + + --host=Linux + + + -lfmpich -lmpichf90_pgi $ENV{PGI_PATH}/linux86-64/$ENV{PGI_VERSION}/lib/f90main.o /opt/gcc/default/snos/lib64/libstdc++.a + + + -O2 + -target-cpu=istanbul + + cc + CC + ftn lustre - -DLINUX - -g -traceback -O0 -fpe0 -check all -check noarg_temp_created -ftrapuv - -L$(NETCDF_PATH)/lib -lnetcdf -lnetcdff -lpmi -L$(MKL_PATH) -lmkl_rt + + $SHELL{nf-config --flibs} + $SHELL{nf-config --flibs} + $SHELL{nf-config --flibs} + $SHELL{nf-config --flibs} + $SHELL{nf-config --flibs} + $SHELL{nf-config --flibs} + + -L/opt/cray/netcdf/4.4.1.1.3/PGI/15.3/lib -lnetcdff -L/opt/cray/hdf5/1.10.0.3/GNU/4.9/lib -lnetcdf + + TRUE - - -O2 -kind=byte - -O2 -kind=byte - $(NETCDF_HOME) - $(PNETCDFROOT) - --host=Linux + + + -O2 + + + --host=Linux + + + -lfmpich -lmpichf90_pgi $ENV{PGI_PATH}/linux86-64/$ENV{PGI_VERSION}/lib/f90main.o + + + -O2 + + + -ta=nvidia,cc35,cuda7.5 + lustre - -DLINUX - -C=all -g -O0 -v - -L$(NETCDF_PATH)/lib -lnetcdf -lnetcdff -lpmi -L$(MKL_PATH) -lmkl_rt - - - - - lustre - mpicc - mpif90 - mpic++ - gfortran - gcc - g++ - $(shell $(NETCDF_PATH)/bin/nf-config --flibs) -llapack -lblas - ${MKLROOT}/lib/intel64/libmkl_scalapack_lp64.a -Wl,--start-group ${MKLROOT}/lib/intel64/libmkl_intel_lp64.a ${MKLROOT}/lib/intel64/libmkl_core.a ${MKLROOT}/lib/intel64/libmkl_sequential.a -Wl,--end-group ${MKLROOT}/lib/intel64/libmkl_blacs_intelmpi_lp64.a -lpthread -lm -z muldefs - -lstdc++ -lmpi_cxx + + $SHELL{$NETCDF_PATH/bin/nf-config --flibs} + + TRUE - - lustre - mpicc - mpif90 - mpic++ - ifort - icc - icpc - $(shell $(NETCDF_PATH)/bin/nf-config --flibs) -llapack -lblas - -lstdc++ -lmpi_cxx + + + + + + + + + + + USERDEFINED_MUST_EDIT_THIS + + + # USERDEFINED $SHELL{$NETCDF_PATH/bin/nf-config --flibs} + - - mpicc - mpif90 - mpic++ - pgfortran - pgcc - pgc++ - $(shell $(NETCDF_PATH)/bin/nf-config --flibs) -llapack -lblas - $(TRILINOS_PATH) + + $ENV{ALBANY_PATH} + + -lstdc++ -lmpi_cxx + + mpicc + mpic++ + mpif90 + gcc + g++ + gfortran + + $SHELL{$NETCDF_PATH/bin/nf-config --flibs} -llapack -lblas + + $ENV{TRILINOS_PATH} - mpicc - mpif90 - mpic++ - ifort - icc - icpc - $(shell $(NETCDF_PATH)/bin/nf-config --flibs) -llapack -lblas - $(TRILINOS_PATH) - -mkl=cluster - -mkl=cluster - -mkl=cluster - -mkl=cluster - -mkl=cluster - -mkl=cluster - -mkl + mpicc + mpic++ + mpif90 + icc + icpc + ifort + + $SHELL{$NETCDF_PATH/bin/nf-config --flibs} -llapack -lblas + -mkl=cluster + -mkl=cluster + -mkl=cluster + -mkl=cluster + -mkl=cluster + -mkl=cluster + -mkl + + $ENV{TRILINOS_PATH} - - mpicc - mpif90 - mpic++ - gfortran - gcc - g++ - $(shell $(NETCDF_PATH)/bin/nf-config --flibs) -llapack -lblas - -lstdc++ -lmpi_cxx - $(TRILINOS_PATH) - $(ALBANY_PATH) - - - - $(NETCDF_DIR) - -lnetcdff -lnetcdf -mkl - -DHAVE_VPRINTF -DHAVE_GETTIMEOFDAY - /global/software/sl-6.x86_64/modules/intel/2016.1.150/lapack/3.6.0-intel/lib - - - - $(NETCDF_DIR) - -lnetcdff -lnetcdf -mkl - -DHAVE_VPRINTF -DHAVE_GETTIMEOFDAY - /global/software/sl-6.x86_64/modules/intel/2016.1.150/lapack/3.6.0-intel/lib - - - - - -DFORTRANUNDERSCORE -DNO_R16 - -openmp - -openmp - -openmp - -free - -fixed -132 - -O0 -g -check uninit -check bounds -check pointers -fpe0 - -O2 - -fp-model source -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -I/soft/i -ntel/x86_64/2013/composer_xe_2013/composer_xe_2013_sp1.3.174/mkl/include - -O2 -fp-model precise -I/soft/intel/x86_64/2013/composer_xe_2013/composer_xe_2013_sp1.3.174/mkl/inc -lude - -O0 - -r8 - ifort icc icpc - mpiifort - mpiicc - mpiicpc - FORTRAN - -cxxlib - -DCPRINTEL - TRUE - -lnetcdff - -L/soft/netcdf/fortran-4.4-intel-sp1-update3-parallel/lib -lnetcdff -L/soft/hdf5/hdf5-1.8.13-intel-2013-sp1-update3-impi-5.0.0.028/lib -openmp -fPIC -lnetcdf -lnetcdf -L/soft/intel/x86_64/2013/composer_xe_2013/composer_xe_2013_sp1.3.174/mkl/lib/intel64 -lmkl_intel_lp64 -lmkl_core -lmkl_intel_thread -lpthread -lm - - - - -DFORTRANUNDERSCORE -DNO_R16 - -openmp - -openmp - -openmp - -free - -fixed -132 - -O0 -g -check uninit -check bounds -check pointers -fpe0 - -O2 - -fp-model source -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -I/soft/intel/x86_64/2013/composer_xe_2013/composer_xe_2013_sp1.3.174/mkl/include - -O2 -fp-model precise -I/soft/intel/x86_64/2013/composer_xe_2013/composer_xe_2013_sp1.3.174/mkl/include - -O0 - -r8 - ifort - icc - icpc - mpiifort - mpiicc - mpiicpc - FORTRAN - -cxxlib - -DCPRINTEL - TRUE - -lnetcdff - -L/soft/netcdf/fortran-4.4-intel-sp1-update3-parallel/lib -lnetcdff -L/soft/hdf5/hdf5-1.8.13-intel-2013-sp1-update3-impi-5.0.0.028/lib -openmp -fPIC -lnetcdf -lnetcdf -L/soft/intel/x86_64/2013/composer_xe_2013/composer_xe_2013_sp1.3.174/mkl/lib/intel64 -lmkl_intel_lp64 -lmkl_core -lmkl_intel_thread -lpthread -lm - - - - -DHAVE_VPRINTF -DHAVE_GETTIMEOFDAY -DHAVE_BACKTRACE - -L$(NETCDF_HOME)/lib/ -lnetcdff -lnetcdf -lcurl -llapack -lblas - $(NETCDF_HOME) - - - - - -std=c99 + + mpicc + mpic++ + mpif90 + pgcc + pgc++ + pgfortran + + $SHELL{$NETCDF_PATH/bin/nf-config --flibs} -llapack -lblas + + $ENV{TRILINOS_PATH} From e2ba026ba9d56a5caf9a852bfe170ae3fb916f37 Mon Sep 17 00:00:00 2001 From: James Foucar Date: Mon, 11 Jun 2018 11:34:32 -0600 Subject: [PATCH 12/59] Remove pop support --- config/e3sm/machines/config_compilers.xml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/config/e3sm/machines/config_compilers.xml b/config/e3sm/machines/config_compilers.xml index 92a6bbcbf4b..abb90634d8d 100644 --- a/config/e3sm/machines/config_compilers.xml +++ b/config/e3sm/machines/config_compilers.xml @@ -77,9 +77,6 @@ for mct, etc. - - -D_USE_FLOW_CONTROL - FALSE @@ -90,7 +87,6 @@ for mct, etc. -DFORTRANUNDERSCORE -DNO_R16 -DCPRCRAY - -DDIR=NOOP -DDIR=NOOP @@ -246,7 +242,6 @@ for mct, etc. -qsmp=omp -qsuppress=1520-045 -qsmp=omp:noopt -qsuppress=1520-045 -qinitauto=7FF7FFFF -qflttrap=ov:zero:inv:en - -C -qsuffix=f=f -qfixed=132 From 7953a5313a4c53e166a87ed2b512637da216d9bb Mon Sep 17 00:00:00 2001 From: James Foucar Date: Mon, 11 Jun 2018 15:30:06 -0600 Subject: [PATCH 13/59] Cannot use $NETCDF_PATH unless it is defined in the block Use ENV to retrieve it from the environment. --- config/e3sm/machines/config_compilers.xml | 30 +++++++++++------------ 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/config/e3sm/machines/config_compilers.xml b/config/e3sm/machines/config_compilers.xml index abb90634d8d..38420533f1d 100644 --- a/config/e3sm/machines/config_compilers.xml +++ b/config/e3sm/machines/config_compilers.xml @@ -348,7 +348,7 @@ for mct, etc. icpc ifort - $SHELL{$NETCDF_PATH/bin/nf-config --flibs} + $SHELL{$ENV{NETCDF_PATH}/bin/nf-config --flibs} TRUE @@ -400,7 +400,7 @@ for mct, etc. icpc ifort - $SHELL{$NETCDF_PATH/bin/nf-config --flibs} + $SHELL{$ENV{NETCDF_PATH}/bin/nf-config --flibs} TRUE @@ -452,7 +452,7 @@ for mct, etc. icpc ifort - $SHELL{$NETCDF_PATH/bin/nf-config --flibs} + $SHELL{$ENV{NETCDF_PATH}/bin/nf-config --flibs} TRUE @@ -858,7 +858,7 @@ for mct, etc. -O2 - $SHELL{$NETCDF_PATH/bin/nf-config --flibs} -lblas -llapack + $SHELL{$ENV{NETCDF_PATH}/bin/nf-config --flibs} -lblas -llapack @@ -868,7 +868,7 @@ for mct, etc. gpfs - $SHELL{$NETCDF_PATH/bin/nf-config --flibs} -llapack -lblas + $SHELL{$ENV{NETCDF_PATH}/bin/nf-config --flibs} -llapack -lblas @@ -893,7 +893,7 @@ for mct, etc. gpfs - $SHELL{$NETCDF_PATH/bin/nf-config --flibs} -Wl,-rpath -Wl,$NETCDF_PATH/lib + $SHELL{$ENV{NETCDF_PATH}/bin/nf-config --flibs} -Wl,-rpath -Wl,$ENV{NETCDF_PATH}/lib -mkl @@ -901,8 +901,8 @@ for mct, etc. gpfs - $SHELL{$NETCDF_PATH/bin/nf-config --flibs} -llapack -lblas - -rpath $NETCDF_PATH/lib + $SHELL{$ENV{NETCDF_PATH}/bin/nf-config --flibs} -llapack -lblas + -rpath $ENV{NETCDF_PATH}/lib @@ -1092,6 +1092,7 @@ for mct, etc. -ffree-form /software/dev_tools/swtree/cs400_centos7.2_pe2016-08/hdf5-parallel/1.8.17/centos7.2_gnu5.3.0 + /software/dev_tools/swtree/cs400_centos7.2_pe2016-08/netcdf-hdf5parallel/4.3.3.1/centos7.2_gnu5.3.0 /software/tools/compilers/intel_2017/mkl/lib/intel64 -fopenmp @@ -1101,7 +1102,6 @@ for mct, etc. mpicc mpic++ mpif90 - /software/dev_tools/swtree/cs400_centos7.2_pe2016-08/netcdf-hdf5parallel/4.3.3.1/centos7.2_gnu5.3.0 gcc gcpp gfortran @@ -1485,7 +1485,7 @@ for mct, etc. g++ gfortran - $SHELL{$NETCDF_PATH/bin/nf-config --flibs} -llapack -lblas + $SHELL{$ENV{NETCDF_PATH}/bin/nf-config --flibs} -llapack -lblas $ENV{MKLROOT}/lib/intel64/libmkl_scalapack_lp64.a -Wl,--start-group $ENV{MKLROOT}/lib/intel64/libmkl_intel_lp64.a $ENV{MKLROOT}/lib/intel64/libmkl_core.a $ENV{MKLROOT}/lib/intel64/libmkl_sequential.a -Wl,--end-group $ENV{MKLROOT}/lib/intel64/libmkl_blacs_intelmpi_lp64.a -lpthread -lm -z muldefs @@ -1502,7 +1502,7 @@ for mct, etc. icpc ifort - $SHELL{$NETCDF_PATH/bin/nf-config --flibs} -llapack -lblas + $SHELL{$ENV{NETCDF_PATH}/bin/nf-config --flibs} -llapack -lblas @@ -2222,7 +2222,7 @@ ntel/x86_64/2013/composer_xe_2013/composer_xe_2013_sp1.3.174/mkl/include lustre - $SHELL{$NETCDF_PATH/bin/nf-config --flibs} + $SHELL{$ENV{NETCDF_PATH}/bin/nf-config --flibs} TRUE @@ -2256,7 +2256,7 @@ ntel/x86_64/2013/composer_xe_2013/composer_xe_2013_sp1.3.174/mkl/include g++ gfortran - $SHELL{$NETCDF_PATH/bin/nf-config --flibs} -llapack -lblas + $SHELL{$ENV{NETCDF_PATH}/bin/nf-config --flibs} -llapack -lblas $ENV{TRILINOS_PATH} @@ -2269,7 +2269,7 @@ ntel/x86_64/2013/composer_xe_2013/composer_xe_2013_sp1.3.174/mkl/include icpc ifort - $SHELL{$NETCDF_PATH/bin/nf-config --flibs} -llapack -lblas + $SHELL{$ENV{NETCDF_PATH}/bin/nf-config --flibs} -llapack -lblas -mkl=cluster -mkl=cluster -mkl=cluster @@ -2289,7 +2289,7 @@ ntel/x86_64/2013/composer_xe_2013/composer_xe_2013_sp1.3.174/mkl/include pgc++ pgfortran - $SHELL{$NETCDF_PATH/bin/nf-config --flibs} -llapack -lblas + $SHELL{$ENV{NETCDF_PATH}/bin/nf-config --flibs} -llapack -lblas $ENV{TRILINOS_PATH} From 6a85d5c6184cf8736d12fb7563145a265c89b6d0 Mon Sep 17 00:00:00 2001 From: James Foucar Date: Wed, 13 Jun 2018 10:59:51 -0600 Subject: [PATCH 14/59] Fix up titan --- config/e3sm/machines/config_compilers.xml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/config/e3sm/machines/config_compilers.xml b/config/e3sm/machines/config_compilers.xml index 38420533f1d..b7dd0ea3963 100644 --- a/config/e3sm/machines/config_compilers.xml +++ b/config/e3sm/machines/config_compilers.xml @@ -2158,7 +2158,6 @@ ntel/x86_64/2013/composer_xe_2013/composer_xe_2013_sp1.3.174/mkl/include -O2 - lustre $SHELL{nf-config --flibs} -mkl=cluster $SHELL{nf-config --flibs} -mkl=cluster @@ -2187,10 +2186,6 @@ ntel/x86_64/2013/composer_xe_2013/composer_xe_2013_sp1.3.174/mkl/include -O2 -target-cpu=istanbul - cc - CC - ftn - lustre $SHELL{nf-config --flibs} $SHELL{nf-config --flibs} @@ -2220,7 +2215,6 @@ ntel/x86_64/2013/composer_xe_2013/composer_xe_2013_sp1.3.174/mkl/include -ta=nvidia,cc35,cuda7.5 - lustre $SHELL{$ENV{NETCDF_PATH}/bin/nf-config --flibs} From c1630086069b93f78946e3ec946cd1331668da29 Mon Sep 17 00:00:00 2001 From: James Foucar Date: Wed, 13 Jun 2018 15:21:01 -0400 Subject: [PATCH 15/59] Fixups for PNETCDF --- config/e3sm/machines/config_compilers.xml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/config/e3sm/machines/config_compilers.xml b/config/e3sm/machines/config_compilers.xml index b7dd0ea3963..37095b61926 100644 --- a/config/e3sm/machines/config_compilers.xml +++ b/config/e3sm/machines/config_compilers.xml @@ -1921,7 +1921,7 @@ ntel/x86_64/2013/composer_xe_2013/composer_xe_2013_sp1.3.174/mkl/include -L$ENV{NETCDF_C_PATH}/lib -lnetcdf -L$ENV{NETCDF_FORTRAN_PATH}/lib -lnetcdff -L$ENV{ESSL_PATH}/lib64 -lessl -L$ENV{NETLIB_LAPACK_PATH}/lib -llapack - -L$PNETCDF_PATH/lib -lpnetcdf -L$ENV{HDF5_PATH}/lib -lhdf5_hl -lhdf5 + -L$ENV{PNETCDF_PATH}/lib -lpnetcdf -L$ENV{HDF5_PATH}/lib -lhdf5_hl -lhdf5 gpfs @@ -1942,7 +1942,7 @@ ntel/x86_64/2013/composer_xe_2013/composer_xe_2013_sp1.3.174/mkl/include -lxlopt -lxl -lxlsmp -L$ENV{NETCDF_C_PATH}/lib -lnetcdf -L$ENV{NETCDF_FORTRAN_PATH}/lib -lnetcdff -L$ENV{ESSL_PATH}/lib64 -lessl -L$ENV{NETLIB_LAPACK_PATH}/lib -llapack - -L$PNETCDF_PATH/lib -lpnetcdf -L$ENV{HDF5_PATH}/lib -lhdf5_hl -lhdf5 + -L$ENV{PNETCDF_PATH}/lib -lpnetcdf -L$ENV{HDF5_PATH}/lib -lhdf5_hl -lhdf5 -Wl,--relax -Wl,--allow-multiple-definition mpixlc @@ -1965,7 +1965,7 @@ ntel/x86_64/2013/composer_xe_2013/composer_xe_2013_sp1.3.174/mkl/include -L$ENV{NETCDF_C_PATH}/lib -lnetcdf -L$ENV{NETCDF_FORTRAN_PATH}/lib -lnetcdff -L$ENV{ESSL_PATH}/lib64 -lessl -L$ENV{NETLIB_LAPACK_PATH}/lib -llapack - -L$PNETCDF_PATH/lib -lpnetcdf -L$ENV{HDF5_PATH}/lib -lhdf5_hl -lhdf5 + -L$ENV{PNETCDF_PATH}/lib -lpnetcdf -L$ENV{HDF5_PATH}/lib -lhdf5_hl -lhdf5 mpicc mpiCC @@ -1988,7 +1988,7 @@ ntel/x86_64/2013/composer_xe_2013/composer_xe_2013_sp1.3.174/mkl/include -ta=tesla:cc70,pinned -L$ENV{NETCDF_C_PATH}/lib -lnetcdf -L$ENV{NETCDF_FORTRAN_PATH}/lib -lnetcdff -L$ENV{ESSL_PATH}/lib64 -lessl -L$ENV{NETLIB_LAPACK_PATH}/lib -llapack - -L$PNETCDF_PATH/lib -lpnetcdf -L$ENV{HDF5_PATH}/lib -lhdf5_hl -lhdf5 + -L$ENV{PNETCDF_PATH}/lib -lpnetcdf -L$ENV{HDF5_PATH}/lib -lhdf5_hl -lhdf5 mpicc mpiCC @@ -2009,7 +2009,7 @@ ntel/x86_64/2013/composer_xe_2013/composer_xe_2013_sp1.3.174/mkl/include -O0 -g -qfree=f90 - -L$ENV{NETCDF_C_PATH}/lib -lnetcdf -L$ENV{NETCDF_FORTRAN_PATH}/lib -lnetcdff -L$PNETCDF_PATH/lib -lpnetcdf -L$ENV{HDF5_PATH}/lib -lhdf5_hl -lhdf5 -L$ENV{ESSL_PATH}/lib64 -lessl -L$ENV{NETLIB_LAPACK_PATH}/lib64 -llapack + -L$ENV{NETCDF_C_PATH}/lib -lnetcdf -L$ENV{NETCDF_FORTRAN_PATH}/lib -lnetcdff -L$ENV{PNETCDF_PATH}/lib -lpnetcdf -L$ENV{HDF5_PATH}/lib -lhdf5_hl -lhdf5 -L$ENV{ESSL_PATH}/lib64 -lessl -L$ENV{NETLIB_LAPACK_PATH}/lib64 -llapack mpixlc mpixlC @@ -2030,7 +2030,7 @@ ntel/x86_64/2013/composer_xe_2013/composer_xe_2013_sp1.3.174/mkl/include -O2 -DSUMMITDEV_PGI - -L$ENV{NETCDF_C_PATH}/lib -lnetcdf -L$ENV{NETCDF_FORTRAN_PATH}/lib -lnetcdff -L$PNETCDF_PATH/lib -lpnetcdf -L$ENV{HDF5_PATH}/lib -lhdf5_hl -lhdf5 -L$ENV{ESSL_PATH}/lib64 -lessl -L$ENV{NETLIB_LAPACK_PATH}/lib -llapack + -L$ENV{NETCDF_C_PATH}/lib -lnetcdf -L$ENV{NETCDF_FORTRAN_PATH}/lib -lnetcdff -L$ENV{PNETCDF_PATH}/lib -lpnetcdf -L$ENV{HDF5_PATH}/lib -lhdf5_hl -lhdf5 -L$ENV{ESSL_PATH}/lib64 -lessl -L$ENV{NETLIB_LAPACK_PATH}/lib -llapack mpicc mpiCC @@ -2053,7 +2053,7 @@ ntel/x86_64/2013/composer_xe_2013/composer_xe_2013_sp1.3.174/mkl/include -ta=tesla:cc60,cuda8.0,pinned - -L$ENV{NETCDF_C_PATH}/lib -lnetcdf -L$ENV{NETCDF_FORTRAN_PATH}/lib -lnetcdff -L$PNETCDF_PATH/lib -lpnetcdf -L$ENV{HDF5_PATH}/lib -lhdf5_hl -lhdf5 -L$ENV{ESSL_PATH}/lib64 -lessl -L$ENV{NETLIB_LAPACK_PATH}/lib -llapack + -L$ENV{NETCDF_C_PATH}/lib -lnetcdf -L$ENV{NETCDF_FORTRAN_PATH}/lib -lnetcdff -L$ENV{PNETCDF_PATH}/lib -lpnetcdf -L$ENV{HDF5_PATH}/lib -lhdf5_hl -lhdf5 -L$ENV{ESSL_PATH}/lib64 -lessl -L$ENV{NETLIB_LAPACK_PATH}/lib -llapack mpicc mpiCC From 35137b3f6c7828959a208a2ea34be6a438718fce Mon Sep 17 00:00:00 2001 From: James Foucar Date: Wed, 13 Jun 2018 16:22:06 -0600 Subject: [PATCH 16/59] Remove dupes --- config/e3sm/machines/config_compilers.xml | 3 --- 1 file changed, 3 deletions(-) diff --git a/config/e3sm/machines/config_compilers.xml b/config/e3sm/machines/config_compilers.xml index 37095b61926..df7543c7727 100644 --- a/config/e3sm/machines/config_compilers.xml +++ b/config/e3sm/machines/config_compilers.xml @@ -1413,11 +1413,8 @@ for mct, etc. $ENV{PETSC_DIR} icc - icc - icpc icpc ifort - ifort -L$ENV{NETCDF_DIR} -lnetcdff -Wl,--as-needed,-L$ENV{NETCDF_DIR}/lib -lnetcdff -lnetcdf -mkl -lpthread From 11ee135d313bca19fd46d09d573fc620daa6210c Mon Sep 17 00:00:00 2001 From: Benjamin Hillman Date: Tue, 3 Jul 2018 16:36:58 -0600 Subject: [PATCH 17/59] Fix domain specification for T42 Fix domain specification for T42 configuration. This previously omitted the ice domain, so when trying to build a T42_T42 grid configuration, the ice domain would remain unset. This is relevant to the single column model, which uses the T42 grid by default. --- config/e3sm/config_grids.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/e3sm/config_grids.xml b/config/e3sm/config_grids.xml index b6a5ecb7454..373000f97d1 100644 --- a/config/e3sm/config_grids.xml +++ b/config/e3sm/config_grids.xml @@ -1367,7 +1367,7 @@ 128 64 $DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.T42_USGS.111004.nc - $DIN_LOC_ROOT/atm/cam/ocnfrac/domain.camocn.64x128_USGS_070807.nc + $DIN_LOC_ROOT/atm/cam/ocnfrac/domain.camocn.64x128_USGS_070807.nc T42 is Gaussian grid: From f91303dc9e3291f4f11d79505510f9139a90418e Mon Sep 17 00:00:00 2001 From: noel Date: Tue, 3 Jul 2018 18:11:00 -0700 Subject: [PATCH 18/59] For all 3 NERSC machines (cori-knl, cori-haswell,edison), update the version of netcdf/hdf5. Also turn off file locking HDF5_USE_FILE_LOCKING=FALSE. And turn on logging of MPI rank with compute node, which adds a lot to e3sm.log* --- config/e3sm/machines/config_machines.xml | 29 +++++++++++++----------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/config/e3sm/machines/config_machines.xml b/config/e3sm/machines/config_machines.xml index 65e88b9a67f..6017bc3111c 100644 --- a/config/e3sm/machines/config_machines.xml +++ b/config/e3sm/machines/config_machines.xml @@ -169,12 +169,12 @@ cray-netcdf-hdf5parallel cray-hdf5-parallel cray-parallel-netcdf + cray-netcdf/4.4.1.1.6 cray-hdf5/1.10.1.1 - cray-netcdf/4.4.1.1.3 cray-netcdf-hdf5parallel - cray-netcdf-hdf5parallel/4.4.1.1.3 + cray-netcdf-hdf5parallel/4.4.1.1.6 cray-hdf5-parallel/1.10.1.1 cray-parallel-netcdf/1.8.1.3 @@ -189,7 +189,8 @@ spread threads - 2 + + FALSE yes @@ -304,13 +305,13 @@ cray-netcdf-hdf5parallel cray-hdf5-parallel cray-parallel-netcdf - cray-hdf5/1.10.0.3 - cray-netcdf/4.4.1.1.3 + cray-netcdf/4.4.1.1.6 + cray-hdf5/1.10.1.1 cray-netcdf-hdf5parallel - cray-netcdf-hdf5parallel/4.4.1.1.3 - cray-hdf5-parallel/1.10.0.3 + cray-netcdf-hdf5parallel/4.4.1.1.6 + cray-hdf5-parallel/1.10.1.1 cray-parallel-netcdf/1.8.1.3 @@ -326,11 +327,12 @@ 1 1 - + 1 128M spread threads + FALSE yes @@ -458,13 +460,13 @@ cray-netcdf-hdf5parallel cray-hdf5-parallel cray-parallel-netcdf - cray-hdf5/1.10.0.3 - cray-netcdf/4.4.1.1.3 + cray-netcdf/4.4.1.1.6 + cray-hdf5/1.10.1.1 cray-netcdf-hdf5parallel - cray-netcdf-hdf5parallel/4.4.1.1.3 - cray-hdf5-parallel/1.10.0.3 + cray-netcdf-hdf5parallel/4.4.1.1.6 + cray-hdf5-parallel/1.10.1.1 cray-parallel-netcdf/1.8.1.3 @@ -481,11 +483,12 @@ 1 1 - + 1 128M spread threads + FALSE From feefcb0c2951f2b7714f1715f1cbf65363455aac Mon Sep 17 00:00:00 2001 From: Jon Wolfe Date: Thu, 5 Jul 2018 16:48:56 -0500 Subject: [PATCH 19/59] CIME scripts changes to support ne4_oQU480 and T62_oQU480 configurations --- config/e3sm/config_grids.xml | 56 +++++++++++++++++++ .../mct/cime_config/config_component_e3sm.xml | 2 + 2 files changed, 58 insertions(+) diff --git a/config/e3sm/config_grids.xml b/config/e3sm/config_grids.xml index b6a5ecb7454..a53b3f1fc89 100644 --- a/config/e3sm/config_grids.xml +++ b/config/e3sm/config_grids.xml @@ -513,6 +513,16 @@ + + ne4np4 + ne4np4 + oQU480 + r05 + null + null + oQU480 + + ne4np4 ne4np4 @@ -991,6 +1001,16 @@ gx1v6 + + T62 + T62 + oQU480 + rx1 + null + null + oQU480 + + T62 T62 @@ -1346,6 +1366,7 @@ $DIN_LOC_ROOT/share/domains/domain.lnd.T62_gx1v6.090320.nc $DIN_LOC_ROOT/share/domains/domain.lnd.T62_gx3v7.090911.nc $DIN_LOC_ROOT/share/domains/domain.lnd.T62_mpasgx1.150903.nc + $DIN_LOC_ROOT/share/domains/domain.lnd.T62_oQU480.151209.nc $DIN_LOC_ROOT/share/domains/domain.lnd.T62_oQU240.151209.nc $DIN_LOC_ROOT/share/domains/domain.lnd.T62_oQU240wLI_mask.160929.nc $DIN_LOC_ROOT/share/domains/domain.lnd.T62_oQU120.151209.nc @@ -1380,7 +1401,9 @@ 866 1 + $DIN_LOC_ROOT/share/domains/domain.lnd.ne4np4_oQU480.180702.nc $DIN_LOC_ROOT/share/domains/domain.lnd.ne4np4_oQU240.160614.nc + $DIN_LOC_ROOT/share/domains/domain.ocn.ne4np4_oQU480.180702.nc $DIN_LOC_ROOT/share/domains/domain.ocn.ne4np4_oQU240.160614.nc ne4np4 is Spectral Elem 7.5-deg grid: @@ -1694,6 +1717,13 @@ tx0.1v2 is an old mask used for CONUS: + + 1791 + 1 + $DIN_LOC_ROOT/share/domains/domain.ocn.oQU480.151209.nc + oQU480 is an MPAS ocean mesh with quasi-uniform 480 km grid cells, nominally 4 degree resolution: + + 7153 1 @@ -1819,6 +1849,14 @@ cpl/gridmaps/gx3v7/ + + cpl/gridmaps/ne4np4/map_ne4np4_to_oQU480_aave.180702.nc + cpl/gridmaps/ne4np4/map_ne4np4_to_oQU480_conserve.180702.nc + cpl/gridmaps/ne4np4/map_ne4np4_to_oQU480_conserve.180702.nc + cpl/gridmaps/oQU480/map_oQU480_to_ne4np4_aave.180702.nc + cpl/gridmaps/oQU480/map_oQU480_to_ne4np4_aave.180702.nc + + cpl/gridmaps/ne4np4/map_ne4np4_to_oQU240_aave.160614.nc cpl/gridmaps/ne4np4/map_ne4np4_to_oQU240_aave.160614.nc @@ -2036,6 +2074,14 @@ cpl/gridmaps/mpasgx1/map_mpasgx1_TO_T62_aave.150827.nc + + cpl/gridmaps/T62/map_T62_TO_oQU480_aave.151209.nc + cpl/gridmaps/T62/map_T62_TO_oQU480_patc.151209.nc + cpl/gridmaps/T62/map_T62_TO_oQU480_blin.151209.nc + cpl/gridmaps/oQU480/map_oQU480_TO_T62_aave.151209.nc + cpl/gridmaps/oQU480/map_oQU480_TO_T62_aave.151209.nc + + cpl/gridmaps/T62/map_T62_TO_oQU240_aave.151209.nc cpl/gridmaps/T62/map_T62_TO_oQU240_patc.151209.nc @@ -2385,6 +2431,11 @@ + + cpl/cpl6/map_rx1_to_oQU480_nn_151209.nc + cpl/cpl6/map_rx1_to_oQU480_nn_151209.nc + + cpl/cpl6/map_rx1_to_oQU240_nn.160527.nc cpl/cpl6/map_rx1_to_oQU240_nn.160527.nc @@ -2455,6 +2506,11 @@ cpl/cpl6/map_rx1_to_oRRS15to5_nn.160527.nc + + cpl/cpl6/map_r05_to_oQU480_nn.180702.nc + cpl/cpl6/map_r05_to_oQU480_nn.180702.nc + + cpl/cpl6/map_r05_to_oQU240_nn.160714.nc cpl/cpl6/map_r05_to_oQU240_nn.160714.nc diff --git a/src/drivers/mct/cime_config/config_component_e3sm.xml b/src/drivers/mct/cime_config/config_component_e3sm.xml index 8bcd2964082..03eec9f9948 100644 --- a/src/drivers/mct/cime_config/config_component_e3sm.xml +++ b/src/drivers/mct/cime_config/config_component_e3sm.xml @@ -279,6 +279,7 @@ 1 1 24 + 12 12 12 24 @@ -376,6 +377,7 @@ 1 1 24 + 6 12 12 24 From e9906475c6d525ef5a2877159101c8217ce3b7a0 Mon Sep 17 00:00:00 2001 From: Gautam Bisht Date: Mon, 9 Jul 2018 11:07:12 -0700 Subject: [PATCH 20/59] Adds a SMS land test for a BGC compset An integration test is added for BGCEXP_BCRC_CNPECACNT_1850 compset --- config/e3sm/tests.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/config/e3sm/tests.py b/config/e3sm/tests.py index 9c404b02ad9..dd5161168b1 100644 --- a/config/e3sm/tests.py +++ b/config/e3sm/tests.py @@ -86,7 +86,8 @@ "PEM_Ln9.ne30_oECv3_ICG.A_WCYCL1850S", "ERP_Ld3.ne30_oECv3_ICG.A_WCYCL1850S", "SMS.f09_g16_a.MALI", - "SMS_D_Ln5.conusx4v1_conusx4v1.FC5AV1C-L", + "SMS_D_Ln6.conusx4v1_conusx4v1.FC5AV1C-L", + ("SMS.ne30_oECv3.BGCEXP_BCRC_CNPECACNT_1850","clm-bgcexp"), ("SMS.ne30_oECv3.BGCEXP_BCRC_CNPRDCTC_1850","clm-bgcexp")) ), #e3sm tests for extra coverage From 1a0417d609421a206003950fbf8a562e3eda907a Mon Sep 17 00:00:00 2001 From: James Foucar Date: Mon, 9 Jul 2018 14:17:37 -0700 Subject: [PATCH 21/59] Fix qopenmp issues, remove old intel specs --- config/e3sm/machines/config_compilers.xml | 224 +--------------------- 1 file changed, 8 insertions(+), 216 deletions(-) diff --git a/config/e3sm/machines/config_compilers.xml b/config/e3sm/machines/config_compilers.xml index df7543c7727..ed7fa654891 100644 --- a/config/e3sm/machines/config_compilers.xml +++ b/config/e3sm/machines/config_compilers.xml @@ -255,7 +255,7 @@ for mct, etc. -O2 -fp-model precise -std=gnu99 - -openmp + -qopenmp -O2 -debug minimal -O0 -g @@ -272,7 +272,7 @@ for mct, etc. -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -fp-model source - -openmp + -qopenmp - -DFORTRANUNDERSCORE -DNO_R16 -DCPRINTEL - -DCPRINTEL - - - -cxxlib - - FORTRAN - - -r8 - - - -fp-model source -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs - -openmp - -O0 -g -check uninit -check bounds -check pointers -fpe0 - -O2 - - - -O0 + -qopenmp -fixed -132 @@ -339,7 +294,7 @@ for mct, etc. TRUE - -openmp + -qopenmp mpicc mpicxx @@ -347,113 +302,6 @@ for mct, etc. icc icpc ifort - - $SHELL{$ENV{NETCDF_PATH}/bin/nf-config --flibs} - - TRUE - - - - - -mmic -O2 -fp-model precise -DFORTRANUNDERSCOR - -openmp - - - --host=x86_64-k1om-linux --build=x86_64-unknown-linux - - - - -DFORTRANUNDERSCORE -DNO_R16 -DCPRINTEL - -DCPRINTEL - - - -cxxlib - - FORTRAN - - -r8 - - - -mmic -fp-model source -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs - -openmp - -O0 -g -check uninit -check bounds -check pointers -fpe0 - -O2 - - - -O0 -mmic - - - -fixed -132 - - - -free - - TRUE - - -openmp - -mmic - - mpiicc - mpiicpc - mpiifort - icc - icpc - ifort - - $SHELL{$ENV{NETCDF_PATH}/bin/nf-config --flibs} - - TRUE - - - - - -mmic -O2 -fp-model precise -DFORTRANUNDERSCOR - -openmp - - - --host=x86_64-k1om-linux --build=x86_64-unknown-linux - - - - -DFORTRANUNDERSCORE -DNO_R16 -DCPRINTEL - -DCPRINTEL - - - -cxxlib - - FORTRAN - - -r8 - - - -mmic -fp-model source -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs - -openmp - -O0 -g -check uninit -check bounds -check pointers -fpe0 - -O2 - - - -O0 -mmic - - - -fixed -132 - - - -free - - TRUE - - -openmp - -mmic - - mpiicc - mpiicpc - mpiifort - icc - icpc - ifort - - $SHELL{$ENV{NETCDF_PATH}/bin/nf-config --flibs} - TRUE @@ -874,7 +722,7 @@ for mct, etc. - -qopenmp -static-intel + -static-intel -heap-arrays @@ -882,14 +730,14 @@ for mct, etc. -O2 -debug minimal -qno-opt-dynamic-align - -qopenmp -static-intel + -static-intel -heap-arrays - -qopenmp -static-intel + -static-intel - -qopenmp -static-intel + -static-intel gpfs @@ -908,9 +756,6 @@ for mct, etc. /soft/climate/AlbanyTrilinos_06262017/Albany/buildintel/install - - -qopenmp - -DHAVE_SLASHPROC @@ -919,14 +764,7 @@ for mct, etc. -O2 -debug minimal -qno-opt-dynamic-align - -qopenmp - - -qopenmp - - - -qopenmp - mpiicc mpiicpc mpiifort @@ -1248,23 +1086,13 @@ for mct, etc. - - -qopenmp - --host=Linux -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -fp-model consistent -fimf-use-svml -O2 -debug minimal -qno-opt-dynamic-align - -qopenmp - - -qopenmp - - - -qopenmp - $ENV{PETSC_DIR} icc icpc @@ -1277,7 +1105,6 @@ for mct, etc. - -qopenmp -axMIC-AVX512 -xCORE-AVX2 @@ -1289,15 +1116,8 @@ for mct, etc. -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -fp-model consistent -fimf-use-svml -O2 -debug minimal -qno-opt-dynamic-align - -qopenmp -xMIC-AVX512 - - -qopenmp - - - -qopenmp - mpiicc mpiicpc mpiifort @@ -1344,22 +1164,12 @@ for mct, etc. - - -qopenmp - --host=Linux -O2 -debug minimal -qno-opt-dynamic-align - -qopenmp - - -qopenmp - - - -qopenmp - $ENV{PETSC_DIR} icc icpc @@ -1862,7 +1672,6 @@ ntel/x86_64/2013/composer_xe_2013/composer_xe_2013_sp1.3.174/mkl/include - -qopenmp -xCORE-AVX2 @@ -1876,18 +1685,11 @@ ntel/x86_64/2013/composer_xe_2013/composer_xe_2013_sp1.3.174/mkl/include -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -fp-model consistent -fimf-use-svml -O2 -debug minimal -qno-opt-dynamic-align - -qopenmp -xCORE-AVX2 - - -qopenmp - $ENV{TACC_HDF5_DIR} - - -qopenmp - mpicc mpicxx mpif90 @@ -2116,9 +1918,6 @@ ntel/x86_64/2013/composer_xe_2013/composer_xe_2013_sp1.3.174/mkl/include - - -qopenmp - --host=Linux @@ -2128,14 +1927,7 @@ ntel/x86_64/2013/composer_xe_2013/composer_xe_2013_sp1.3.174/mkl/include -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -fp-model consistent -O2 -debug minimal -qno-opt-dynamic-align -fp-speculation=off - -qopenmp - - -qopenmp - - - -qopenmp - icc icpc ifort From 4cce38986f3c28a4245d86bc0e1bb92886d7793b Mon Sep 17 00:00:00 2001 From: Patrick Worley Date: Tue, 10 Jul 2018 20:23:40 -0400 Subject: [PATCH 22/59] Add system workload provenance capture on Summit Add capture of system status and current workload for Summit to CIME performance provenance capture logic. [BFB] --- scripts/lib/CIME/provenance.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/scripts/lib/CIME/provenance.py b/scripts/lib/CIME/provenance.py index 8b14b5d80b5..4770008abbc 100644 --- a/scripts/lib/CIME/provenance.py +++ b/scripts/lib/CIME/provenance.py @@ -23,6 +23,8 @@ def _get_batch_job_id_for_syslog(case): return os.environ["SLURM_JOB_ID"] elif mach in ['mira', 'theta']: return os.environ["COBALT_JOBID"] + elif mach in ['summit']: + return os.environ["LSB_JOBID"] except: pass @@ -195,6 +197,13 @@ def _save_prerun_timing_e3sm(case, lid): full_cmd = cmd + " " + filename run_cmd_no_fail(full_cmd + "." + lid, from_dir=full_timing_dir) gzip_existing_file(os.path.join(full_timing_dir, filename + "." + lid)) + elif mach == "summit": + for cmd, filename in [("bjobs -u all >", "bjobsu_all"), + ("bjobs -r -u all -o 'jobid slots exec_host' >", "bjobsru_allo"), + ("bjobs -l -UF %s >" % job_id, "bjobslUF_jobid")]: + full_cmd = cmd + " " + filename + run_cmd_no_fail(full_cmd + "." + lid, from_dir=full_timing_dir) + gzip_existing_file(os.path.join(full_timing_dir, filename + "." + lid)) # copy/tar SourceModes source_mods_dir = os.path.join(caseroot, "SourceMods") @@ -360,6 +369,9 @@ def _save_postrun_timing_e3sm(case, lid): globs_to_copy.append("%s*cobaltlog" % job_id) elif mach in ["edison", "cori-haswell", "cori-knl"]: globs_to_copy.append("%s*run*%s" % (case.get_value("CASE"), job_id)) + elif mach == "summit": + globs_to_copy.append("e3sm.stderr.%s" % job_id) + globs_to_copy.append("e3sm.stdout.%s" % job_id) globs_to_copy.append("logs/run_environment.txt.{}".format(lid)) globs_to_copy.append(os.path.join(rundir, "e3sm.log.{}.gz".format(lid))) From c1e4e78b8a4e32bf7fc6fc239b50bf5cca5ebdb5 Mon Sep 17 00:00:00 2001 From: Patrick Worley Date: Tue, 10 Jul 2018 20:27:43 -0400 Subject: [PATCH 23/59] Enable job progress monitoring on Summit Add syslog.summit checkpointing script for monitoring job progress. [BFB] --- config/e3sm/machines/syslog.summit | 76 ++++++++++++++++++++++++++++++ 1 file changed, 76 insertions(+) create mode 100755 config/e3sm/machines/syslog.summit diff --git a/config/e3sm/machines/syslog.summit b/config/e3sm/machines/syslog.summit new file mode 100755 index 00000000000..767219db109 --- /dev/null +++ b/config/e3sm/machines/syslog.summit @@ -0,0 +1,76 @@ +#!/bin/csh -f +# summit syslog script: +# mach_syslog + +set sample_interval = $1 +set jid = $2 +set lid = $3 +set run = $4 +set timing = $5 +set dir = $6 + +# Wait until some model output appears before saving output file. +# Target length was determined empirically, so it may need to be adjusted in the future. +# (Note that calling script 'touch'es the e3sm log file before spawning this script, so that 'wc' does not fail.) +set outtarget = 16 +set outlth = 0 +while ($outlth < $outtarget) + sleep 10 + set outlth = `wc \-l $run/e3sm.log.$lid | sed 's/ *\([0-9]*\) *.*/\1/' ` +end + +set time_left = `bjobs -noheader -hms -o "time_left" $jid` +set remaining_hours = `echo $time_left | sed 's/^\([0-9]*\):\([0-9]*\):\([0-9]*\) *[XLE]/\1/' ` +set remaining_mins = `echo $time_left | sed 's/^\([0-9]*\):\([0-9]*\):\([0-9]*\) *[XLE]/\2/' ` +set remaining_secs = `echo $time_left | sed 's/^\([0-9]*\):\([0-9]*\):\([0-9]*\) *[XLE]/\3/' ` +if ("X$remaining_hours" == "X") set remaining_hours = 0 +if ("X$remaining_mins" == "X") set remaining_mins = 0 +if ("X$remaining_secs" == "X") set remaining_secs = 0 +@ remaining = 3600 * $remaining_hours + 60 * $remaining_mins + $remaining_secs +cat > $run/Walltime.Remaining < $dir/bjobsru_all.$lid.$remaining + bjobs -r -u all -o 'jobid slots exec_host' > $dir/bjobsru_allo.$lid.$remaining +endif + +while ($remaining > 0) + echo "Wallclock time remaining: $remaining" >> $dir/atm.log.$lid.step + grep -Fa -e "nstep" -e "model date" $run/*atm.log.$lid | tail -n 4 >> $dir/atm.log.$lid.step + echo "Wallclock time remaining: $remaining" >> $dir/lnd.log.$lid.step + grep -Fa -e "timestep" -e "model date" $run/*lnd.log.$lid | tail -n 4 >> $dir/lnd.log.$lid.step + echo "Wallclock time remaining: $remaining" >> $dir/ocn.log.$lid.step + grep -Fa -e "timestep" -e "Step number" -e "model date" $run/*ocn.log.$lid | tail -n 4 >> $dir/ocn.log.$lid.step + echo "Wallclock time remaining: $remaining" >> $dir/ice.log.$lid.step + grep -Fa -e "timestep" -e "istep" -e "model date" $run/*ice.log.$lid | tail -n 4 >> $dir/ice.log.$lid.step + echo "Wallclock time remaining: $remaining" >> $dir/rof.log.$lid.step + grep -Fa "model date" $run/*rof.log.$lid | tail -n 4 >> $dir/rof.log.$lid.step + grep -Fa "model date" $run/*cpl.log.$lid > $dir/cpl.log.$lid.step-all + echo "Wallclock time remaining: $remaining" >> $dir/cpl.log.$lid.step + tail -n 4 $dir/cpl.log.$lid.step-all >> $dir/cpl.log.$lid.step + /bin/cp --preserve=timestamps -u $timing/* $dir + bjobs -r -u all > $dir/bjobsru_all.$lid.$remaining + bjobs -r -u all -o 'jobid slots exec_host' > $dir/bjobsru_allo.$lid.$remaining + chmod a+r $dir/* + # sleep $sample_interval + set sleep_remaining = $sample_interval + while ($sleep_remaining > 120) + sleep 120 + @ sleep_remaining = $sleep_remaining - 120 + end + sleep $sleep_remaining + set time_left = `bjobs -noheader -hms -o "time_left" $jid` + set remaining_hours = `echo $time_left | sed 's/^\([0-9]*\):\([0-9]*\):\([0-9]*\) *[XLE]/\1/' ` + set remaining_mins = `echo $time_left | sed 's/^\([0-9]*\):\([0-9]*\):\([0-9]*\) *[XLE]/\2/' ` + set remaining_secs = `echo $time_left | sed 's/^\([0-9]*\):\([0-9]*\):\([0-9]*\) *[XLE]/\3/' ` + if ("X$remaining_hours" == "X") set remaining_hours = 0 + if ("X$remaining_mins" == "X") set remaining_mins = 0 + if ("X$remaining_secs" == "X") set remaining_secs = 0 + @ remaining = 3600 * $remaining_hours + 60 * $remaining_mins + $remaining_secs + cat > $run/Walltime.Remaining << EOF2 +$remaining $sample_interval +EOF2 + +end From f90a16e524c4b80f4b5b15ee2f4ec6a8fc1c5a56 Mon Sep 17 00:00:00 2001 From: James Foucar Date: Thu, 12 Jul 2018 11:47:36 -0600 Subject: [PATCH 24/59] Update CIME to ESMCI cime5.7.0-3 (#2437) Update CIME to ESMCI cime5.7.0-3 Squash merge of jgfouca/branch-for-to-acme-2018-07-12 Bug fixes: Another critical V2 build fix. [BFB] --- scripts/Tools/e3sm_cime_merge | 9 +- scripts/Tools/e3sm_cime_split | 9 +- scripts/lib/CIME/BuildTools/possiblevalues.py | 112 ++++++++------- scripts/lib/CIME/BuildTools/valuesetting.py | 43 +++++- scripts/lib/CIME/XML/compilerblock.py | 7 +- scripts/lib/CIME/XML/compilers.py | 29 ++-- scripts/lib/e3sm_cime_mgmt.py | 20 +-- scripts/tests/scripts_regression_tests.py | 129 ++++++++++++++++-- 8 files changed, 268 insertions(+), 90 deletions(-) mode change 100644 => 100755 scripts/tests/scripts_regression_tests.py diff --git a/scripts/Tools/e3sm_cime_merge b/scripts/Tools/e3sm_cime_merge index a66ceaa69d5..b0ec5e276b5 100755 --- a/scripts/Tools/e3sm_cime_merge +++ b/scripts/Tools/e3sm_cime_merge @@ -37,17 +37,20 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("--squash", action="store_true", help="Do the merge as squashy as possible") + parser.add_argument("--auto-conf", action="store_true", + help="Try to automatically resolve conflicts") + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) expect(not (args.resume and args.abort), "Makes no sense to abort and resume") expect(not (args.abort and args.squash), "Makes no sense to abort and squash") - return args.repo, args.resume, args.abort, args.squash + return args.repo, args.resume, args.abort, args.squash, args.auto_conf ############################################################################### def _main_func(description): ############################################################################### - repo, resume, abort, squash = parse_command_line(sys.argv, description) + repo, resume, abort, squash, auto_conf = parse_command_line(sys.argv, description) if repo is not None: os.chdir(repo) @@ -55,7 +58,7 @@ def _main_func(description): if abort: abort_merge() else: - e3sm_cime_merge(resume, squash=squash) + e3sm_cime_merge(resume, squash=squash, auto_conf=auto_conf) ############################################################################### diff --git a/scripts/Tools/e3sm_cime_split b/scripts/Tools/e3sm_cime_split index 8a6e68d5bf2..ed4936a553a 100755 --- a/scripts/Tools/e3sm_cime_split +++ b/scripts/Tools/e3sm_cime_split @@ -38,17 +38,20 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("--squash", action="store_true", help="Do the split as squashy as possible") + parser.add_argument("--auto-conf", action="store_true", + help="Try to automatically resolve conflicts") + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) expect(not (args.resume and args.abort), "Makes no sense to abort and resume") expect(not (args.abort and args.squash), "Makes no sense to abort and squash") - return args.repo, args.resume, args.abort, args.squash + return args.repo, args.resume, args.abort, args.squash, args.auto_conf ############################################################################### def _main_func(description): ############################################################################### - repo, resume, abort, squash = parse_command_line(sys.argv, description) + repo, resume, abort, squash, auto_conf = parse_command_line(sys.argv, description) if repo is not None: os.chdir(repo) @@ -56,7 +59,7 @@ def _main_func(description): if abort: abort_split() else: - e3sm_cime_split(resume, squash=squash) + e3sm_cime_split(resume, squash=squash, auto_conf=auto_conf) ############################################################################### diff --git a/scripts/lib/CIME/BuildTools/possiblevalues.py b/scripts/lib/CIME/BuildTools/possiblevalues.py index 339805fa7f3..837c702824c 100644 --- a/scripts/lib/CIME/BuildTools/possiblevalues.py +++ b/scripts/lib/CIME/BuildTools/possiblevalues.py @@ -22,12 +22,11 @@ class PossibleValues(object): append_settings - A dictionary of lists of possible appending settings for the variable, with the specificity of each list as the associated dictionary key. - depends - The current list of variables that this variable depends on - to get its value. Public methods: add_setting ambiguity_check + dependencies to_cond_trees """ @@ -38,17 +37,20 @@ def __init__(self, name, setting, specificity, depends): arguments are the same as for append_match. """ self.name = name - self.depends = depends # If this is an appending setting, its specificity can't cause it - # to overwrite other settings, but we want to keep track of it. + # to overwrite other settings. if setting.do_append: self.settings = [] - self.append_settings = {specificity: [setting]} - self._specificity = 0 + self.append_settings = [setting] + self._specificities = [] + self._depends = [] + self._append_depends = depends else: self.settings = [setting] - self.append_settings = {} - self._specificity = specificity + self.append_settings = [] + self._specificities = [specificity] + self._depends = [depends] + self._append_depends = set() def add_setting(self, setting, specificity, depends): """Add a possible value for a variable. @@ -56,48 +58,53 @@ def add_setting(self, setting, specificity, depends): Arguments: setting - A ValueSetting to start the list. specificity - An integer representing how specific the setting is. - Only the initial settings with the highest - specificity and appending settings with at least that - specificity will actually be kept in the list. The - lowest allowed specificity is 0. + Low-specificity settings that will never be used will be + dropped from the list. The lowest allowed specificity is + 0. depends - A set of variable names, specifying the variables that have to be set before this setting can be used (e.g. if SLIBS refers to NETCDF_PATH, then NETCDF_PATH has to be set first). + >>> from CIME.BuildTools.valuesetting import ValueSetting >>> a = ValueSetting('foo', False, dict(), [], []) >>> b = ValueSetting('bar', False, dict(), [], []) >>> vals = PossibleValues('var', a, 0, {'dep1'}) >>> vals.add_setting(b, 1, {'dep2'}) >>> a not in vals.settings and b in vals.settings True - >>> 'dep1' not in vals.depends and 'dep2' in vals.depends + >>> 'dep1' not in vals.dependencies() and 'dep2' in vals.dependencies() True >>> vals.add_setting(a, 1, {'dep1'}) >>> a in vals.settings and b in vals.settings True - >>> 'dep1' in vals.depends and 'dep2' in vals.depends + >>> 'dep1' in vals.dependencies() and 'dep2' in vals.dependencies() True """ if setting.do_append: - # Appending settings with at least the current level of - # specificity should be kept. - if specificity >= self._specificity: - if specificity not in self.append_settings: - self.append_settings[specificity] = [] - self.append_settings[specificity].append(setting) - self.depends |= depends + self.append_settings.append(setting) + self._append_depends |= depends else: - # Add equally specific settings to the list. - if specificity == self._specificity: - self.settings.append(setting) - self.depends |= depends - # Replace the list if the setting is more specific. - elif specificity > self._specificity: - self.settings = [setting] - self._specificity = specificity - self.depends = depends - # Do nothing if the setting is less specific. + mark_deletion = [] + for i in range(len(self.settings)): + other_setting = self.settings[i] + other_specificity = self._specificities[i] + # Ignore this case if it's less specific than one we have. + if other_specificity > specificity: + if other_setting.has_special_case(setting): + return + # Override cases that are less specific than this one. + elif other_specificity < specificity: + if setting.has_special_case(other_setting): + mark_deletion.append(i) + mark_deletion.reverse() + for i in mark_deletion: + del self.settings[i] + del self._specificities[i] + del self._depends[i] + self.settings.append(setting) + self._specificities.append(specificity) + self._depends.append(depends) def ambiguity_check(self): """Check the current list of settings for ambiguity. @@ -105,13 +112,23 @@ def ambiguity_check(self): This function raises an error if an ambiguity is found. """ for i in range(len(self.settings)-1): - for other in self.settings[i+1:]: + for j in range(i+1, len(self.settings)): + if self._specificities[i] != self._specificities[j]: + continue + other = self.settings[j] expect(not self.settings[i].is_ambiguous_with(other), "Variable "+self.name+" is set ambiguously in " "config_compilers.xml. Check the file for these " "conflicting settings: \n1: {}\n2: {}".format( self.settings[i].conditions, other.conditions)) + def dependencies(self): + """Returns a set of names of variables needed to set this variable.""" + depends = self._append_depends.copy() + for other in self._depends: + depends |= other + return depends + def to_cond_trees(self): """Convert this object to a pair of MacroConditionTree objects. @@ -119,21 +136,24 @@ def to_cond_trees(self): frozen and we're ready to convert it into an actual text file. This object is checked for ambiguities before conversion. - The return value is a tuple of two trees. The first contains all - initial settings, and the second contains all appending settings. - If either would be empty, None is returned instead. + The return value is a tuple of two items. The first is a dict of + condition trees containing all initial settings, with the specificities + as the dictionary keys. The second is a single tree containing all + appending settings. If the appending tree would be empty, None is + returned instead. """ self.ambiguity_check() - if self.settings: - normal_tree = MacroConditionTree(self.name, self.settings) - else: - normal_tree = None - append_settings = [] - for specificity in self.append_settings: - if specificity >= self._specificity: - append_settings += self.append_settings[specificity] - if append_settings: - append_tree = MacroConditionTree(self.name, append_settings) + # Get all values of specificity for which we need to make a tree. + specificities = sorted(list(set(self._specificities))) + # Build trees, starting from the least specific and working up. + normal_trees = {} + for specificity in specificities: + settings_for_tree = [self.settings[i] + for i in range(len(self.settings)) + if self._specificities[i] == specificity] + normal_trees[specificity] = MacroConditionTree(self.name, settings_for_tree) + if self.append_settings: + append_tree = MacroConditionTree(self.name, self.append_settings) else: append_tree = None - return (normal_tree, append_tree) + return (normal_trees, append_tree) diff --git a/scripts/lib/CIME/BuildTools/valuesetting.py b/scripts/lib/CIME/BuildTools/valuesetting.py index 9348e2e0516..7b58f5efdaa 100644 --- a/scripts/lib/CIME/BuildTools/valuesetting.py +++ b/scripts/lib/CIME/BuildTools/valuesetting.py @@ -25,6 +25,7 @@ class ValueSetting(object): Public methods: is_ambiguous_with + has_special_case """ def __init__(self, value, do_append, conditions, set_up, tear_down, force_no_append=False): # pylint: disable=too-many-arguments @@ -98,11 +99,47 @@ def is_ambiguous_with(self, other): if self.conditions[var_name] != other.conditions[var_name]: return False # Specificity check. - # One setting being more specific than the other is equivalent to - # its set of conditions being a proper superset of the others. self_set = set(self.conditions.keys()) other_set = set(other.conditions.keys()) - if self_set < other_set or self_set > other_set: + if self_set < other_set or other_set < self_set: return False # Any situation we couldn't resolve is ambiguous. return True + + def has_special_case(self, other): + """Check to see if another setting is a special case of this one. + + The purpose of this routine is to see if one of the settings requires + conditions that are a strict subset of another's conditions. This is + used to check whether a setting can be thrown out entirely in favor of a + more general, but machine-specific setting. + + >>> a = ValueSetting('foo', False, {"DEBUG": "TRUE"}, [], []) + >>> b = ValueSetting('bar', False, {"DEBUG": "TRUE", "MPILIB": "mpich2"}, [], []) + >>> c = ValueSetting('bar', False, {"DEBUG": "TRUE", "compile_threaded": "false"}, [], []) + >>> d = ValueSetting('foo', False, {"DEBUG": "FALSE"}, [], []) + >>> a.has_special_case(b) + True + >>> b.has_special_case(a) + False + >>> a.has_special_case(c) + True + >>> c.has_special_case(a) + False + >>> b.has_special_case(c) + False + >>> c.has_special_case(b) + False + >>> a.has_special_case(a) + True + >>> d.has_special_case(a) + False + >>> d.has_special_case(b) + False + """ + for var_name in self.conditions: + if var_name not in other.conditions: + return False + if self.conditions[var_name] != other.conditions[var_name]: + return False + return True diff --git a/scripts/lib/CIME/XML/compilerblock.py b/scripts/lib/CIME/XML/compilerblock.py index 1ea533f3ad2..679b360c6d9 100644 --- a/scripts/lib/CIME/XML/compilerblock.py +++ b/scripts/lib/CIME/XML/compilerblock.py @@ -188,9 +188,9 @@ def _elem_to_setting(self, elem): value_text = self._handle_references(elem, set_up, tear_down, depends) # Create the setting object. - append = self._db.name(elem) == "append" or (self._db.name(elem) == "base" and self._compiler and self._db.compiler != self._compiler) + append = self._db.name(elem) == "append" setting = ValueSetting(value_text, append, - conditions, set_up, tear_down, force_no_append=self._db.name(elem) == "base") + conditions, set_up, tear_down) return (setting, depends) @@ -208,8 +208,7 @@ def _add_elem_to_lists(self, name, elem, value_lists): value_lists[name] = PossibleValues(name, setting, self._specificity, depends) else: - specificity = 0 if len(elem.xml_element.attrib) else self._specificity - value_lists[name].add_setting(setting, specificity,depends) + value_lists[name].add_setting(setting, self._specificity,depends) def add_settings_to_lists(self, flag_vars, value_lists): """Add all data in the element to lists of settings. diff --git a/scripts/lib/CIME/XML/compilers.py b/scripts/lib/CIME/XML/compilers.py index 656ab4bcd7b..072bb1bcd52 100644 --- a/scripts/lib/CIME/XML/compilers.py +++ b/scripts/lib/CIME/XML/compilers.py @@ -164,11 +164,11 @@ def write_macros_file(self, macros_file="Macros.make", output_format="make", xml if isinstance(macros_file, six.string_types): with open(macros_file, "w") as macros: - self._write_macros_file_v2(format_, macros) + self._write_macros_file(format_, macros) else: - self._write_macros_file_v2(format_, macros_file, xml) + self._write_macros_file(format_, macros_file, xml) - def _write_macros_file_v2(self, build_system, output, xml=None): + def _write_macros_file(self, build_system, output, xml=None): """Write a Macros file for this machine. Arguments: @@ -210,29 +210,34 @@ def _write_macros_file_v2(self, build_system, output, xml=None): # Variables that are ready to be written. ready_variables = [ var_name for var_name in value_lists - if value_lists[var_name].depends <= vars_written + if value_lists[var_name].dependencies() <= vars_written ] expect(len(ready_variables) > 0, - "The file {} has bad references. " + "The file {} has bad $VAR references. " "Check for circular references or variables that " - "are in a tag but not actually defined.".format(self.filename)) - big_normal_tree = None + "are used in a $VAR but not actually defined.".format(self.filename)) + big_normal_trees = {} big_append_tree = None for var_name in ready_variables: # Note that we're writing this variable. vars_written.add(var_name) # Make the conditional trees and write them out. - normal_tree, append_tree = \ + normal_trees, append_tree = \ value_lists[var_name].to_cond_trees() - big_normal_tree = merge_optional_trees(normal_tree, - big_normal_tree) + for spec in normal_trees: + if spec in big_normal_trees: + big_normal_trees[spec] = merge_optional_trees(normal_trees[spec], + big_normal_trees[spec]) + else: + big_normal_trees[spec] = normal_trees[spec] big_append_tree = merge_optional_trees(append_tree, big_append_tree) # Remove this variable from the list of variables to handle # next iteration. del value_lists[var_name] - if big_normal_tree is not None: - big_normal_tree.write_out(writer) + specificities = sorted(list(big_normal_trees.keys())) + for spec in specificities: + big_normal_trees[spec].write_out(writer) if big_append_tree is not None: big_append_tree.write_out(writer) diff --git a/scripts/lib/e3sm_cime_mgmt.py b/scripts/lib/e3sm_cime_mgmt.py index 7277c7eef5f..a4ba270c31e 100644 --- a/scripts/lib/e3sm_cime_mgmt.py +++ b/scripts/lib/e3sm_cime_mgmt.py @@ -143,10 +143,10 @@ def handle_easy_conflicts(is_merge): return rv ############################################################################### -def handle_conflicts(is_merge=False): +def handle_conflicts(is_merge=False, auto_conf=False): ############################################################################### logging.info("There are conflicts, analyzing...") - remaining_conflicts = handle_easy_conflicts(is_merge) + remaining_conflicts = handle_easy_conflicts(is_merge) if auto_conf else True if remaining_conflicts: expect(False, "There are merge conflicts. Please fix, commit, and re-run this tool with --resume") else: @@ -154,12 +154,12 @@ def handle_conflicts(is_merge=False): run_cmd_no_fail("git commit --no-edit") ############################################################################### -def do_subtree_pull(squash=False): +def do_subtree_pull(squash=False, auto_conf=False): ############################################################################### stat = run_cmd("git subtree pull {} --prefix=cime {} master".format("--squash" if squash else "", ESMCI_REMOTE_NAME), verbose=True)[0] if stat != 0: - handle_conflicts(is_merge=True) + handle_conflicts(is_merge=True, auto_conf=auto_conf) ############################################################################### def make_pr_branch(branch, branch_head): @@ -169,12 +169,12 @@ def make_pr_branch(branch, branch_head): return branch ############################################################################### -def merge_branch(branch, squash=False): +def merge_branch(branch, squash=False, auto_conf=False): ############################################################################### stat = run_cmd("git merge {} -m 'Merge {}' -X rename-threshold=25 {}".format("--squash" if squash else "", branch, branch), verbose=True)[0] if stat != 0: - handle_conflicts() + handle_conflicts(auto_conf=auto_conf) ############################################################################### def delete_tag(tag, remote="origin"): @@ -183,7 +183,7 @@ def delete_tag(tag, remote="origin"): run_cmd_no_fail("git push {} :refs/tags/{}".format(remote, tag), verbose=True) ############################################################################### -def e3sm_cime_split(resume, squash=False): +def e3sm_cime_split(resume, squash=False, auto_conf=False): ############################################################################### if not resume: setup() @@ -205,7 +205,7 @@ def e3sm_cime_split(resume, squash=False): raise # upstream merge, potential conflicts - merge_branch("{}/master".format(ESMCI_REMOTE_NAME), squash=squash) + merge_branch("{}/master".format(ESMCI_REMOTE_NAME), squash=squash, auto_conf=auto_conf) else: old_split_tag, new_split_tag = get_split_tag(expected_num=2) @@ -215,7 +215,7 @@ def e3sm_cime_split(resume, squash=False): run_cmd_no_fail("git push -u {} {}".format(ESMCI_REMOTE_NAME, pr_branch), verbose=True) ############################################################################### -def e3sm_cime_merge(resume, squash=False): +def e3sm_cime_merge(resume, squash=False, auto_conf=False): ############################################################################### if not resume: setup() @@ -232,7 +232,7 @@ def e3sm_cime_merge(resume, squash=False): raise # potential conflicts - do_subtree_pull(squash=squash) + do_subtree_pull(squash=squash, auto_conf=auto_conf) else: old_merge_tag, new_merge_tag = get_merge_tag(expected_num=2) diff --git a/scripts/tests/scripts_regression_tests.py b/scripts/tests/scripts_regression_tests.py old mode 100644 new mode 100755 index 575c9ec1568..fecaa83ba55 --- a/scripts/tests/scripts_regression_tests.py +++ b/scripts/tests/scripts_regression_tests.py @@ -2542,14 +2542,14 @@ def test_machine_specific_append_flags(self): tester = self.xml_to_tester(xml1+xml2) tester.assert_variable_matches("FFLAGS", "^(-delicious -cake|-cake -delicious)$") - def test_machine_specific_base_over_append_flags(self): - """Test that machine-specific base flags override default append flags.""" + def test_machine_specific_base_keeps_append_flags(self): + """Test that machine-specific base flags don't override default append flags.""" xml1 = """-delicious""" xml2 = """-cake""".format(self.test_machine) tester = self.xml_to_tester(xml1+xml2) - tester.assert_variable_equals("FFLAGS", "-cake") + tester.assert_variable_equals("FFLAGS", "-cake -delicious") tester = self.xml_to_tester(xml2+xml1) - tester.assert_variable_equals("FFLAGS", "-cake") + tester.assert_variable_equals("FFLAGS", "-cake -delicious") def test_machine_specific_base_and_append_flags(self): """Test that machine-specific base flags coexist with machine-specific append flags.""" @@ -2639,29 +2639,140 @@ def test_variable_insertion_with_machine_specific_setting(self): with assertRaisesRegex(self,SystemExit, err_msg): self.xml_to_tester(xml1+xml2+xml3) - def test_conditional_override(self): - """Test that conditional overrides work correctly""" + def test_override_with_machine_and_new_attributes(self): + """Test that overrides with machine-specific settings with added attributes work correctly.""" xml1 = """ icc mpicxx mpif90 mpicc - +""".format(self.test_compiler) + xml2 = """ mpifoo mpiffoo mpifouc -""".format(self.test_compiler, self.test_compiler, self.test_machine, self.test_mpilib) +""".format(self.test_compiler, self.test_machine, self.test_mpilib) - tester = self.xml_to_tester(xml1) + tester = self.xml_to_tester(xml1+xml2) tester.assert_variable_equals("SCC", "icc", env={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib}) tester.assert_variable_equals("MPICXX", "mpifoo", env={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib}) tester.assert_variable_equals("MPIFC", "mpiffoo", env={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib}) tester.assert_variable_equals("MPICC", "mpicc", env={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib}) + tester = self.xml_to_tester(xml2+xml1) + + tester.assert_variable_equals("SCC", "icc", env={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib}) + tester.assert_variable_equals("MPICXX", "mpifoo", env={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib}) + tester.assert_variable_equals("MPIFC", "mpiffoo", env={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib}) + tester.assert_variable_equals("MPICC", "mpicc", env={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib}) + + def test_override_with_machine_and_same_attributes(self): + """Test that machine-specific conditional overrides with the same attribute work correctly.""" + xml1 = """ + + mpifc +""".format(self.test_compiler, self.test_mpilib) + xml2 = """ + + mpif90 + +""".format(self.test_machine, self.test_compiler, self.test_mpilib) + + tester = self.xml_to_tester(xml1+xml2) + + tester.assert_variable_equals("MPIFC", "mpif90", env={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib}) + + tester = self.xml_to_tester(xml2+xml1) + + tester.assert_variable_equals("MPIFC", "mpif90", env={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib}) + + def test_appends_not_overriden(self): + """Test that machine-specific base value changes don't interfere with appends.""" + xml1=""" + + + -base1 + -debug1 + +""".format(self.test_compiler) + + xml2=""" + + + -base2 + -debug2 + +""".format(self.test_machine, self.test_compiler) + + tester = self.xml_to_tester(xml1+xml2) + + tester.assert_variable_equals("FFLAGS", "-base2", env={"COMPILER": self.test_compiler}) + tester.assert_variable_equals("FFLAGS", "-base2 -debug2", env={"COMPILER": self.test_compiler, "DEBUG": "TRUE"}) + tester.assert_variable_equals("FFLAGS", "-base2 -debug1", env={"COMPILER": self.test_compiler, "DEBUG": "FALSE"}) + + tester = self.xml_to_tester(xml2+xml1) + + tester.assert_variable_equals("FFLAGS", "-base2", env={"COMPILER": self.test_compiler}) + tester.assert_variable_equals("FFLAGS", "-base2 -debug2", env={"COMPILER": self.test_compiler, "DEBUG": "TRUE"}) + tester.assert_variable_equals("FFLAGS", "-base2 -debug1", env={"COMPILER": self.test_compiler, "DEBUG": "FALSE"}) + + def test_multilevel_specificity(self): + """Check that settings with multiple levels of machine-specificity can be resolved.""" + xml1=""" + + mpifc +""" + + xml2=""" + + mpif03 +""".format(self.test_os, self.test_mpilib) + + xml3=""" + + mpif90 +""".format(self.test_machine) + + # To verify order-independence, test every possible ordering of blocks. + testers = [] + testers.append(self.xml_to_tester(xml1+xml2+xml3)) + testers.append(self.xml_to_tester(xml1+xml3+xml2)) + testers.append(self.xml_to_tester(xml2+xml1+xml3)) + testers.append(self.xml_to_tester(xml2+xml3+xml1)) + testers.append(self.xml_to_tester(xml3+xml1+xml2)) + testers.append(self.xml_to_tester(xml3+xml2+xml1)) + + for tester in testers: + tester.assert_variable_equals("MPIFC", "mpif90", env={"COMPILER": self.test_compiler, "MPILIB": self.test_mpilib, "DEBUG": "TRUE"}) + tester.assert_variable_equals("MPIFC", "mpif03", env={"COMPILER": self.test_compiler, "MPILIB": self.test_mpilib, "DEBUG": "FALSE"}) + tester.assert_variable_equals("MPIFC", "mpifc", env={"COMPILER": self.test_compiler, "MPILIB": "NON_MATCHING_MPI", "DEBUG": "FALSE"}) + + def test_remove_dependency_issues(self): + """Check that overridden settings don't cause inter-variable dependencies.""" + xml1=""" + + ${SFC} +""" + + xml2=""" +""".format(self.test_machine) + """ + ${MPIFC} + mpif90 +""" + + tester = self.xml_to_tester(xml1+xml2) + tester.assert_variable_equals("SFC", "mpif90") + tester.assert_variable_equals("MPIFC", "mpif90") + + tester = self.xml_to_tester(xml2+xml1) + tester.assert_variable_equals("SFC", "mpif90") + tester.assert_variable_equals("MPIFC", "mpif90") + + ############################################################################### class I_TestCMakeMacros(H_TestMakeMacros): ############################################################################### From ea3bdad2eba9f9c08908c49733efc31cc8a8c18a Mon Sep 17 00:00:00 2001 From: Gautam Bisht Date: Fri, 13 Jul 2018 07:38:22 -0700 Subject: [PATCH 25/59] Corrects the simulation length of FC5AV1C-L test --- config/e3sm/tests.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/e3sm/tests.py b/config/e3sm/tests.py index dd5161168b1..45306c43348 100644 --- a/config/e3sm/tests.py +++ b/config/e3sm/tests.py @@ -86,7 +86,7 @@ "PEM_Ln9.ne30_oECv3_ICG.A_WCYCL1850S", "ERP_Ld3.ne30_oECv3_ICG.A_WCYCL1850S", "SMS.f09_g16_a.MALI", - "SMS_D_Ln6.conusx4v1_conusx4v1.FC5AV1C-L", + "SMS_D_Ln5.conusx4v1_conusx4v1.FC5AV1C-L", ("SMS.ne30_oECv3.BGCEXP_BCRC_CNPECACNT_1850","clm-bgcexp"), ("SMS.ne30_oECv3.BGCEXP_BCRC_CNPRDCTC_1850","clm-bgcexp")) ), From ce709f4365a80b4acf915255e304abb482f9259c Mon Sep 17 00:00:00 2001 From: James Foucar Date: Fri, 13 Jul 2018 11:06:39 -0600 Subject: [PATCH 26/59] Fix test_status determination of overall test status Any fail in a core phase should cause the test status to be FAIL. --- scripts/lib/CIME/test_status.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/scripts/lib/CIME/test_status.py b/scripts/lib/CIME/test_status.py index 6707422aba7..c2e007fd923 100644 --- a/scripts/lib/CIME/test_status.py +++ b/scripts/lib/CIME/test_status.py @@ -282,15 +282,17 @@ def get_overall_test_status(self, wait_for_run=False, check_throughput=False, ch >>> _test_helper2('PASS ERS.foo.A SHAREDLIB_BUILD\nPEND ERS.foo.A RUN') 'PEND' >>> _test_helper2('FAIL ERS.foo.A MODEL_BUILD\nPEND ERS.foo.A RUN') - 'PEND' + 'FAIL' >>> _test_helper2('PASS ERS.foo.A MODEL_BUILD\nPASS ERS.foo.A RUN') 'PASS' >>> _test_helper2('PASS ERS.foo.A RUN\nFAIL ERS.foo.A TPUTCOMP') 'PASS' >>> _test_helper2('PASS ERS.foo.A RUN\nFAIL ERS.foo.A TPUTCOMP', check_throughput=True) 'FAIL' - >>> _test_helper2('PASS ERS.foo.A RUN\nFAIL ERS.foo.A NLCOMP') + >>> _test_helper2('PASS ERS.foo.A MODEL_BUILD\nPASS ERS.foo.A RUN\nFAIL ERS.foo.A NLCOMP') 'NLFAIL' + >>> _test_helper2('PASS ERS.foo.A MODEL_BUILD\nPEND ERS.foo.A RUN\nFAIL ERS.foo.A NLCOMP') + 'PEND' >>> _test_helper2('PASS ERS.foo.A RUN\nFAIL ERS.foo.A MEMCOMP') 'PASS' >>> _test_helper2('PASS ERS.foo.A RUN\nFAIL ERS.foo.A NLCOMP', ignore_namelists=True) @@ -317,6 +319,8 @@ def get_overall_test_status(self, wait_for_run=False, check_throughput=False, ch 'FAIL' >>> _test_helper2('PASS ERS.foo.A MODEL_BUILD\nPASS ERS.foo.A RUN', wait_for_run=True) 'PASS' + >>> _test_helper2('PASS ERS.foo.A MODEL_BUILD\nFAIL ERS.foo.A RUN\nPEND ERS.foo.A COMPARE') + 'FAIL' """ rv = TEST_PASS_STATUS run_phase_found = False @@ -342,6 +346,9 @@ def get_overall_test_status(self, wait_for_run=False, check_throughput=False, ch elif (rv in [NAMELIST_FAIL_STATUS, TEST_PASS_STATUS] and phase == BASELINE_PHASE): rv = TEST_DIFF_STATUS + elif phase in CORE_PHASES: + return TEST_FAIL_STATUS + else: rv = TEST_FAIL_STATUS From 35331ca4e7c3495d9408acdf84e96d3129e533c1 Mon Sep 17 00:00:00 2001 From: noel Date: Fri, 13 Jul 2018 10:16:00 -0700 Subject: [PATCH 27/59] Update module versions for Cori (haswell and KNL) after software maintenance. In particular, the default gcc version we were using was removed. We also update the mpich version. Many tests were run that are passing, except for a few known failures using gnu 7.3. Modules updates: craype 2.5.13 -> 2.5.14 cray-mpich 7.6.2 -> 7.7.0 gcc 6.3.0 -> 7.3.0 cray-libsci 17.09.01 -> 18.03.1 impi 2018.up1 -> 2018.up2 --- config/e3sm/machines/config_machines.xml | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/config/e3sm/machines/config_machines.xml b/config/e3sm/machines/config_machines.xml index 6017bc3111c..2b0425277cb 100644 --- a/config/e3sm/machines/config_machines.xml +++ b/config/e3sm/machines/config_machines.xml @@ -275,11 +275,11 @@ craype - craype/2.5.13 + craype/2.5.14 pmi/5.0.13 cray-mpich - cray-mpich/7.6.2 + cray-mpich/7.7.0 @@ -291,9 +291,9 @@ PrgEnv-intel PrgEnv-gnu/6.0.4 gcc - gcc/6.3.0 + gcc/7.3.0 cray-libsci - cray-libsci/17.09.1 + cray-libsci/18.03.1 @@ -419,11 +419,11 @@ - cray-mpich cray-mpich/7.6.2 + cray-mpich cray-mpich/7.7.0 - cray-mpich impi/2018.up1 + cray-mpich impi/2018.up2 @@ -435,9 +435,9 @@ PrgEnv-intel PrgEnv-gnu/6.0.4 gcc - gcc/6.3.0 + gcc/7.3.0 cray-libsci - cray-libsci/17.09.1 + cray-libsci/18.03.1 @@ -449,7 +449,7 @@ - craype craype/2.5.13 + craype craype/2.5.14 pmi pmi/5.0.13 craype-haswell @@ -498,7 +498,7 @@ ofi gni yes - /global/common/cori/software/libfabric/1.5.0/gnu/lib/libfabric.so + /global/common/cori/software/libfabric/1.6.1/gnu/lib/libfabric.so /usr/lib64/slurmpmi/libpmi.so From 00525c01f9ee134421f23147bc1986de0e9d3e5c Mon Sep 17 00:00:00 2001 From: noel Date: Fri, 13 Jul 2018 10:42:26 -0700 Subject: [PATCH 28/59] Replace MPASSI with MPASCICE for all PE layouts (only affects NERSC machines) Minor update to cori-knl ne11 layout. [bfb] --- config/e3sm/allactive/config_pesall.xml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/config/e3sm/allactive/config_pesall.xml b/config/e3sm/allactive/config_pesall.xml index 9a3c2dbbfd2..a10e515fbfc 100644 --- a/config/e3sm/allactive/config_pesall.xml +++ b/config/e3sm/allactive/config_pesall.xml @@ -2649,14 +2649,14 @@ 64 128 - 384 - 384 - 384 + 363 + 363 + 363 128 128 64 64 - 384 + 363 2 @@ -8112,7 +8112,7 @@ - + "edison ne4 coupled compest on 6 nodes, OCN by itself on 2 nodes sypd=45.2" 96 @@ -8427,7 +8427,7 @@ - + cori-knl, hires (18to6) G case on 150 nodes, 64x2, sypd=0.5 64 128 @@ -8456,7 +8456,7 @@ - + cori-knl, lowres (60to30) G case on 16 nodes, 64x2, sypd=2.42 64 128 From a8f0eaf387932490a8bdeea3d2569bc9486f0875 Mon Sep 17 00:00:00 2001 From: Joseph H Kennedy Date: Fri, 13 Jul 2018 17:49:13 -0400 Subject: [PATCH 29/59] Change e3sm_integration NCK.ne4_... test to ne11 The NCK.ne4_oQU240.A_WCYCL1850 multi-instance test fails due to a missing mpas-cice graph.info file. This changes the test to NCK.ne11_oQU240.A_WCYCL1850 becasause the ne11 and oQU240 grids match. This test can use ne4 once the matching oQU480 grid is available, which will be introduced via PR E3SM-Project/E3SM#2426 Fixes E3SM-Project/E3SM#2444 --- config/e3sm/tests.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/e3sm/tests.py b/config/e3sm/tests.py index 6ddd7752575..d3de579d18a 100644 --- a/config/e3sm/tests.py +++ b/config/e3sm/tests.py @@ -80,7 +80,7 @@ ("SMS_D_Ld1.ne30_oECv3_ICG.A_WCYCL1850S_CMIP6","allactive-v1cmip6"), "ERS_Ln9.ne4_ne4.FC5AV1C-L", #"ERT_Ld31.ne16_g37.B1850C5",#add this line back in with the new correct compset - "NCK.ne4_oQU240.A_WCYCL1850", + "NCK.ne11_oQU240.A_WCYCL1850", ("PET.f19_g16.X","allactive-mach-pet"), ("PET.f45_g37_rx1.A","allactive-mach-pet"), ("PET_Ln9_PS.ne30_oECv3_ICG.A_WCYCL1850S","allactive-mach-pet"), From 92f6305e26b3ae0669ab2ffd91159740a063571a Mon Sep 17 00:00:00 2001 From: James Foucar Date: Tue, 17 Jul 2018 17:40:03 -0600 Subject: [PATCH 30/59] Add no run capability to wait_for_tests Also, big steps to imrpoving the robustness and testing of TestStatus.get_overall_test_status. --- scripts/Tools/wait_for_tests | 10 ++- scripts/lib/CIME/test_status.py | 131 +++++++++++++++++++---------- scripts/lib/CIME/wait_for_tests.py | 18 ++-- 3 files changed, 107 insertions(+), 52 deletions(-) diff --git a/scripts/Tools/wait_for_tests b/scripts/Tools/wait_for_tests index 13d61c1d14c..12566bc267d 100755 --- a/scripts/Tools/wait_for_tests +++ b/scripts/Tools/wait_for_tests @@ -43,6 +43,9 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("-n", "--no-wait", action="store_true", help="Do not wait for tests to finish") + parser.add_argument("--no-run", action="store_true", + help="Do not expect run phase to be completed") + parser.add_argument("-t", "--check-throughput", action="store_true", help="Fail if throughput check fails (fail if tests slow down)") @@ -72,12 +75,12 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - return args.paths, args.no_wait, args.check_throughput, args.check_memory, args.ignore_namelist_diffs, args.ignore_memleak, args.cdash_build_name, args.cdash_project, args.cdash_build_group, args.timeout, args.force_log_upload + return args.paths, args.no_wait, args.check_throughput, args.check_memory, args.ignore_namelist_diffs, args.ignore_memleak, args.cdash_build_name, args.cdash_project, args.cdash_build_group, args.timeout, args.force_log_upload, args.no_run ############################################################################### def _main_func(description): ############################################################################### - test_paths, no_wait, check_throughput, check_memory, ignore_namelist_diffs, ignore_memleak, cdash_build_name, cdash_project, cdash_build_group, timeout, force_log_upload = \ + test_paths, no_wait, check_throughput, check_memory, ignore_namelist_diffs, ignore_memleak, cdash_build_name, cdash_project, cdash_build_group, timeout, force_log_upload, no_run = \ parse_command_line(sys.argv, description) sys.exit(0 if CIME.wait_for_tests.wait_for_tests(test_paths, @@ -90,7 +93,8 @@ def _main_func(description): cdash_project=cdash_project, cdash_build_group=cdash_build_group, timeout=timeout, - force_log_upload=force_log_upload) + force_log_upload=force_log_upload, + no_run=no_run) else CIME.utils.TESTS_FAILED_ERR_CODE) ############################################################################### diff --git a/scripts/lib/CIME/test_status.py b/scripts/lib/CIME/test_status.py index c2e007fd923..344e1bcd2a3 100644 --- a/scripts/lib/CIME/test_status.py +++ b/scripts/lib/CIME/test_status.py @@ -27,7 +27,7 @@ from collections import OrderedDict -import os +import os, itertools TEST_STATUS_FILENAME = "TestStatus" @@ -92,13 +92,23 @@ def _test_helper1(file_contents): ts._parse_test_status(file_contents) # pylint: disable=protected-access return ts._phase_statuses # pylint: disable=protected-access -def _test_helper2(file_contents, wait_for_run=False, check_throughput=False, check_memory=False, ignore_namelists=False): - ts = TestStatus(test_dir="/", test_name="ERS.foo.A") - ts._parse_test_status(file_contents) # pylint: disable=protected-access - return ts.get_overall_test_status(wait_for_run=wait_for_run, - check_throughput=check_throughput, - check_memory=check_memory, - ignore_namelists=ignore_namelists) +def _test_helper2(file_contents, wait_for_run=False, check_throughput=False, check_memory=False, ignore_namelists=False, no_run=False): + lines = file_contents.splitlines() + rv = None + for perm in itertools.permutations(lines): + ts = TestStatus(test_dir="/", test_name="ERS.foo.A") + ts._parse_test_status("\n".join(perm)) # pylint: disable=protected-access + the_status = ts.get_overall_test_status(wait_for_run=wait_for_run, + check_throughput=check_throughput, + check_memory=check_memory, + ignore_namelists=ignore_namelists, + no_run=no_run) + if rv is not None and the_status != rv: + return "{} != {}".format(rv, the_status) + else: + rv = the_status + + return rv class TestStatus(object): @@ -271,7 +281,54 @@ def _parse_test_status_file(self): with open(self._filename, "r") as fd: self._parse_test_status(fd.read()) - def get_overall_test_status(self, wait_for_run=False, check_throughput=False, check_memory=False, ignore_namelists=False, ignore_memleak=False): + def _get_overall_status_based_on_phases(self, phases, wait_for_run=False, check_throughput=False, check_memory=False, ignore_namelists=False, ignore_memleak=False, no_run=False): + + rv = TEST_PASS_STATUS + run_phase_found = False + for phase in phases: # ensure correct order of processing phases + if phase in self._phase_statuses: + data = self._phase_statuses[phase] + else: + continue + + status = data[0] + if phase == RUN_PHASE: + run_phase_found = True + + if phase in [SUBMIT_PHASE, RUN_PHASE] and no_run: + break + + if (status == TEST_PEND_STATUS): + rv = TEST_PEND_STATUS + + elif (status == TEST_FAIL_STATUS): + if ( (not check_throughput and phase == THROUGHPUT_PHASE) or + (not check_memory and phase == MEMCOMP_PHASE) or + (ignore_namelists and phase == NAMELIST_PHASE) or + (ignore_memleak and phase == MEMLEAK_PHASE) ): + continue + + if (phase == NAMELIST_PHASE): + if (rv == TEST_PASS_STATUS): + rv = NAMELIST_FAIL_STATUS + + elif (rv in [NAMELIST_FAIL_STATUS, TEST_PASS_STATUS] and phase == BASELINE_PHASE): + rv = TEST_DIFF_STATUS + + elif phase in CORE_PHASES: + return TEST_FAIL_STATUS + + else: + rv = TEST_FAIL_STATUS + + # The test did not fail but the RUN phase was not found, so if the user requested + # that we wait for the RUN phase, then the test must still be considered pending. + if rv != TEST_FAIL_STATUS and not run_phase_found and wait_for_run: + rv = TEST_PEND_STATUS + + return rv + + def get_overall_test_status(self, wait_for_run=False, check_throughput=False, check_memory=False, ignore_namelists=False, ignore_memleak=False, no_run=False): r""" Given the current phases and statuses, produce a single results for this test. Preference is given to PEND since we don't want to stop waiting for a test @@ -321,40 +378,28 @@ def get_overall_test_status(self, wait_for_run=False, check_throughput=False, ch 'PASS' >>> _test_helper2('PASS ERS.foo.A MODEL_BUILD\nFAIL ERS.foo.A RUN\nPEND ERS.foo.A COMPARE') 'FAIL' + >>> _test_helper2('PASS ERS.foo.A MODEL_BUILD\nPEND ERS.foo.A RUN', no_run=True) + 'PASS' """ - rv = TEST_PASS_STATUS - run_phase_found = False - for phase, data in self._phase_statuses.items(): - status = data[0] - if phase == RUN_PHASE: - run_phase_found = True - - if (status == TEST_PEND_STATUS): - rv = TEST_PEND_STATUS - - elif (status == TEST_FAIL_STATUS): - if ( (not check_throughput and phase == THROUGHPUT_PHASE) or - (not check_memory and phase == MEMCOMP_PHASE) or - (ignore_namelists and phase == NAMELIST_PHASE) or - (ignore_memleak and phase == MEMLEAK_PHASE) ): - continue - - if (phase == NAMELIST_PHASE): - if (rv == TEST_PASS_STATUS): - rv = NAMELIST_FAIL_STATUS - - elif (rv in [NAMELIST_FAIL_STATUS, TEST_PASS_STATUS] and phase == BASELINE_PHASE): - rv = TEST_DIFF_STATUS - - elif phase in CORE_PHASES: - return TEST_FAIL_STATUS - - else: - rv = TEST_FAIL_STATUS + # Core phases take priority + core_rv = self._get_overall_status_based_on_phases(CORE_PHASES, + wait_for_run=wait_for_run, + check_throughput=check_throughput, + check_memory=check_memory, + ignore_namelists=ignore_namelists, + ignore_memleak=ignore_memleak, + no_run=no_run) + if core_rv != TEST_PASS_STATUS: + return core_rv + else: + phase_order = list(CORE_PHASES) + phase_order.extend([item for item in self._phase_statuses if item not in CORE_PHASES]) - # The test did not fail but the RUN phase was not found, so if the user requested - # that we wait for the RUN phase, then the test must still be considered pending. - if rv != TEST_FAIL_STATUS and not run_phase_found and wait_for_run: - rv = TEST_PEND_STATUS + return self._get_overall_status_based_on_phases(phase_order, + wait_for_run=wait_for_run, + check_throughput=check_throughput, + check_memory=check_memory, + ignore_namelists=ignore_namelists, + ignore_memleak=ignore_memleak, + no_run=no_run) - return rv diff --git a/scripts/lib/CIME/wait_for_tests.py b/scripts/lib/CIME/wait_for_tests.py index 66e0ca32290..578476cfadd 100644 --- a/scripts/lib/CIME/wait_for_tests.py +++ b/scripts/lib/CIME/wait_for_tests.py @@ -283,20 +283,25 @@ def create_cdash_xml(results, cdash_build_name, cdash_project, cdash_build_group run_cmd_no_fail("ctest -VV -D NightlySubmit", verbose=True) ############################################################################### -def wait_for_test(test_path, results, wait, check_throughput, check_memory, ignore_namelists, ignore_memleak): +def wait_for_test(test_path, results, wait, check_throughput, check_memory, ignore_namelists, ignore_memleak, no_run): ############################################################################### if (os.path.isdir(test_path)): test_status_filepath = os.path.join(test_path, TEST_STATUS_FILENAME) else: test_status_filepath = test_path + + import pdb + pdb.set_trace() + logging.debug("Watching file: '{}'".format(test_status_filepath)) while (True): if (os.path.exists(test_status_filepath)): ts = TestStatus(test_dir=os.path.dirname(test_status_filepath)) test_name = ts.get_name() - test_status = ts.get_overall_test_status(wait_for_run=True, # Important + test_status = ts.get_overall_test_status(wait_for_run=not no_run, # Important + no_run=no_run, check_throughput=check_throughput, check_memory=check_memory, ignore_namelists=ignore_namelists, ignore_memleak=ignore_memleak) @@ -318,12 +323,12 @@ def wait_for_test(test_path, results, wait, check_throughput, check_memory, igno break ############################################################################### -def wait_for_tests_impl(test_paths, no_wait=False, check_throughput=False, check_memory=False, ignore_namelists=False, ignore_memleak=False): +def wait_for_tests_impl(test_paths, no_wait=False, check_throughput=False, check_memory=False, ignore_namelists=False, ignore_memleak=False, no_run=False): ############################################################################### results = queue.Queue() for test_path in test_paths: - t = threading.Thread(target=wait_for_test, args=(test_path, results, not no_wait, check_throughput, check_memory, ignore_namelists, ignore_memleak)) + t = threading.Thread(target=wait_for_test, args=(test_path, results, not no_wait, check_throughput, check_memory, ignore_namelists, ignore_memleak, no_run)) t.daemon = True t.start() @@ -360,14 +365,15 @@ def wait_for_tests(test_paths, cdash_project=E3SM_MAIN_CDASH, cdash_build_group=CDASH_DEFAULT_BUILD_GROUP, timeout=None, - force_log_upload=False): + force_log_upload=False, + no_run=True): ############################################################################### # Set up signal handling, we want to print results before the program # is terminated set_up_signal_handlers() with Timeout(timeout, action=signal_handler): - test_results = wait_for_tests_impl(test_paths, no_wait, check_throughput, check_memory, ignore_namelists, ignore_memleak) + test_results = wait_for_tests_impl(test_paths, no_wait, check_throughput, check_memory, ignore_namelists, ignore_memleak, no_run) all_pass = True for test_name, test_data in sorted(test_results.items()): From 9da17f9bc5b29a07eff0117d241f3a6ffb120347 Mon Sep 17 00:00:00 2001 From: James Foucar Date: Tue, 17 Jul 2018 17:46:28 -0600 Subject: [PATCH 31/59] Remove pdb statement --- scripts/lib/CIME/wait_for_tests.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/scripts/lib/CIME/wait_for_tests.py b/scripts/lib/CIME/wait_for_tests.py index 578476cfadd..03e77bf97b7 100644 --- a/scripts/lib/CIME/wait_for_tests.py +++ b/scripts/lib/CIME/wait_for_tests.py @@ -290,10 +290,6 @@ def wait_for_test(test_path, results, wait, check_throughput, check_memory, igno else: test_status_filepath = test_path - - import pdb - pdb.set_trace() - logging.debug("Watching file: '{}'".format(test_status_filepath)) while (True): From 9ed3eed26f3a0282564d8cb5fa234d06b2854468 Mon Sep 17 00:00:00 2001 From: Sarat Sreepathi Date: Wed, 18 Jul 2018 17:43:00 -0400 Subject: [PATCH 32/59] MKL Bug fix: Update Intel compiler version Resolves an application crash due to a MKL bug. Application crash was first reported by Balwinder while running SMS_D_Ln1.ne30_ne30.FC5AV1C-L.titan_intel. Pat identified the root cause: MKL routine vdln() along with DEBUG flag -fpe0. Note that this works fine with -fpe1 or higher. It also works fine if 'cnt' (the vector length) is < 10 in the following source, and fails if 'cnt" is >= 10. A reproducer was sent to OLCF to isolate the issue. OLCF suggested latest compiler version which fixed this issue. --- config/e3sm/machines/config_machines.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/e3sm/machines/config_machines.xml b/config/e3sm/machines/config_machines.xml index 2b0425277cb..7a877a11d53 100644 --- a/config/e3sm/machines/config_machines.xml +++ b/config/e3sm/machines/config_machines.xml @@ -2124,7 +2124,7 @@ cray-libsci cray-mpich atp - intel/18.0.0.128 + intel/18.0.1.163 cray-mpich/7.6.3 atp/2.1.1 From c1d281da822fd280064789d9bcca6cdfdafb94f9 Mon Sep 17 00:00:00 2001 From: Michael Deakin Date: Wed, 18 Jul 2018 18:42:46 -0600 Subject: [PATCH 33/59] configurations to build on blake and white, Skylake and P100 testbeds respectively --- config/e3sm/machines/config_batch.xml | 12 +++ config/e3sm/machines/config_compilers.xml | 59 +++++++++++++++ config/e3sm/machines/config_machines.xml | 89 +++++++++++++++++++++++ 3 files changed, 160 insertions(+) diff --git a/config/e3sm/machines/config_batch.xml b/config/e3sm/machines/config_batch.xml index 97d15b9e421..e815a49767d 100644 --- a/config/e3sm/machines/config_batch.xml +++ b/config/e3sm/machines/config_batch.xml @@ -517,6 +517,18 @@ + + + rhel7G + + + + + + blake + + + --ntasks-per-node={{ tasks_per_node }} diff --git a/config/e3sm/machines/config_compilers.xml b/config/e3sm/machines/config_compilers.xml index ed7fa654891..41ff1e5c542 100644 --- a/config/e3sm/machines/config_compilers.xml +++ b/config/e3sm/machines/config_compilers.xml @@ -1444,6 +1444,65 @@ for mct, etc. + + /ascldap/users/mdeakin/tpl-sources/kokkos/bin/nvcc_wrapper + /ascldap/users/mdeakin/white/kokkos-release + + -expt-extended-lambda -DCUDA_BUILD + + + + + + -DFORTRANUNDERSCORE -DNO_R16 -DCPRINTEL + + + -xCORE_AVX512 -mkl -std=gnu99 + -O3 -g -debug minimal + -O0 -g + + + -xCORE_AVX512 -mkl -std=c++11 + -O3 -g -debug minimal + -O0 -g + -qopenmp + + + -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -xCORE_AVX512 -mkl + -qopenmp + -O3 -g -debug minimal + -O0 -g -check uninit -check bounds -check pointers -fpe0 -check noarg_temp_created + + + -mkl + -qopenmp + -L$(NETCDF_FORTRAN_PATH)/lib64 + + + -O0 + -qopenmp + + + -fixed -132 + + + -free + + TRUE + + -r8 + + ifort + icc + icpc + FORTRAN + + -cxxlib + + TRUE + /ascldap/users/mdeakin/blake/kokkos-release + + -O2 -fp-model precise -I/soft/intel/x86_64/2013/composer_xe_2013/composer_xe_2013_sp1.3.174/mkl/inc diff --git a/config/e3sm/machines/config_machines.xml b/config/e3sm/machines/config_machines.xml index 2b0425277cb..70b572ae293 100644 --- a/config/e3sm/machines/config_machines.xml +++ b/config/e3sm/machines/config_machines.xml @@ -728,6 +728,95 @@ + + IBM Power 8 Testbed machine + white + lsf + LINUX + e3sm_developer + gnu + openmpi + $ENV{HOME}/projects/e3sm/scratch/$CASE/run + $ENV{HOME}/projects/e3sm/scratch/$CASE/bld + $ENV{HOME}/projects/e3sm/cesm-inputdata + $ENV{HOME}/projects/e3sm/ptclm-data + $ENV{HOME}/projects/e3sm/scratch/archive/$CASE + csm/$CASE + $ENV{HOME}/projects/e3sm/scratch + $ENV{HOME}/projects/e3sm/baselines/$COMPILER + $CCSMROOT/tools/cprnc/build/cprnc + mdeakin at sandia dot gov + 32 + 4 + 1 + + /usr/share/Modules/init/sh + /usr/share/Modules/init/python.py + module + module + + cuda/9.0.176 + gcc/7.2.0 + zlib/1.2.8 + openblas/0.2.20/gcc/7.2.0 + openmpi/2.1.2/gcc/7.2.0/cuda/9.0.176 + netcdf-exo/4.4.1.1/openmpi/2.1.2/gcc/7.2.0/cuda/9.0.176 + + + + $ENV{NETCDF_ROOT} + $ENV{NETCDFF_ROOT} + + + mpirun + + + + + + Skylake Testbed machine + blake + slurm + LINUX + e3sm_developer + intel18 + openmpi + $ENV{HOME}/projects/e3sm/scratch/$CASE/run + $ENV{HOME}/projects/e3sm/scratch/$CASE/bld + $ENV{HOME}/projects/e3sm/cesm-inputdata + $ENV{HOME}/projects/e3sm/ptclm-data + $ENV{HOME}/projects/e3sm/scratch/archive/$CASE + csm/$CASE + $ENV{HOME}/projects/e3sm/scratch + $ENV{HOME}/projects/e3sm/baselines/$COMPILER + $CCSMROOT/tools/cprnc/build/cprnc + mdeakin at sandia dot gov + 48 + 48 + 48 + + /usr/share/Modules/init/sh + /usr/share/Modules/init/python.py + module + module + + zlib/1.2.11 + intel/compilers/18.1.163 + openmpi/2.1.2/intel/18.1.163 + hdf5/1.10.1/openmpi/2.1.2/intel/18.1.163 + netcdf-exo/4.4.1.1/openmpi/2.1.2/intel/18.1.163 + + + + $ENV{NETCDF_ROOT} + $ENV{NETCDFF_ROOT} + + + mpirun + + + + Linux workstation for ANL compute.*mcs.anl.gov From 2543fed08230cabd80bf4d7ddffcbac0683cffe2 Mon Sep 17 00:00:00 2001 From: Michael Deakin Date: Wed, 18 Jul 2018 18:59:09 -0600 Subject: [PATCH 34/59] Fix to the CIME makefile for building with separate netcdf-c and netcdf-fortran libraries --- scripts/Tools/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/Tools/Makefile b/scripts/Tools/Makefile index 2d8e68a6dce..b98d887707e 100644 --- a/scripts/Tools/Makefile +++ b/scripts/Tools/Makefile @@ -180,7 +180,7 @@ ifdef NETCDF_C_PATH LIB_NETCDF_C:=$(NETCDF_C_PATH)/lib endif ifndef LIB_NETCDF_FORTRAN - LIB_NETCDF_FORTRAN:=$(NETCDF_C_PATH)/lib + LIB_NETCDF_FORTRAN:=$(NETCDF_FORTRAN_PATH)/lib endif else ifdef NETCDF_FORTRAN_PATH $(error "NETCDF_FORTRAN_PATH specified without NETCDF_C_PATH") From bd05a941ef5e5dea7958eaaf8933d6413768f4c8 Mon Sep 17 00:00:00 2001 From: James Foucar Date: Thu, 19 Jul 2018 15:47:54 -0600 Subject: [PATCH 35/59] Fix wait_for_test test reporting Add some new tests too. [BFB] --- scripts/lib/CIME/wait_for_tests.py | 2 +- scripts/tests/scripts_regression_tests.py | 67 ++++++++++++++++++++--- 2 files changed, 61 insertions(+), 8 deletions(-) diff --git a/scripts/lib/CIME/wait_for_tests.py b/scripts/lib/CIME/wait_for_tests.py index 03e77bf97b7..c86482e0ec8 100644 --- a/scripts/lib/CIME/wait_for_tests.py +++ b/scripts/lib/CIME/wait_for_tests.py @@ -362,7 +362,7 @@ def wait_for_tests(test_paths, cdash_build_group=CDASH_DEFAULT_BUILD_GROUP, timeout=None, force_log_upload=False, - no_run=True): + no_run=False): ############################################################################### # Set up signal handling, we want to print results before the program # is terminated diff --git a/scripts/tests/scripts_regression_tests.py b/scripts/tests/scripts_regression_tests.py index fecaa83ba55..90e694907a1 100755 --- a/scripts/tests/scripts_regression_tests.py +++ b/scripts/tests/scripts_regression_tests.py @@ -672,18 +672,28 @@ class M_TestWaitForTests(unittest.TestCase): def setUp(self): ########################################################################### self._testroot = os.path.join(TEST_ROOT,"TestWaitForTests") + + # basic tests self._testdir_all_pass = os.path.join(self._testroot, 'scripts_regression_tests.testdir_all_pass') self._testdir_with_fail = os.path.join(self._testroot, 'scripts_regression_tests.testdir_with_fail') self._testdir_unfinished = os.path.join(self._testroot, 'scripts_regression_tests.testdir_unfinished') self._testdir_unfinished2 = os.path.join(self._testroot, 'scripts_regression_tests.testdir_unfinished2') - testdirs = [self._testdir_all_pass, self._testdir_with_fail, self._testdir_unfinished, self._testdir_unfinished2] - for testdir in testdirs: + + # live tests + self._testdir_teststatus1 = os.path.join(self._testroot, 'scripts_regression_tests.testdir_teststatus1') + self._testdir_teststatus2 = os.path.join(self._testroot, 'scripts_regression_tests.testdir_teststatus2') + + self._testdirs = [self._testdir_all_pass, self._testdir_with_fail, self._testdir_unfinished, self._testdir_unfinished2, + self._testdir_teststatus1, self._testdir_teststatus2] + basic_tests = self._testdirs[:self._testdirs.index(self._testdir_teststatus1)] + + for testdir in self._testdirs: if os.path.exists(testdir): shutil.rmtree(testdir) os.makedirs(testdir) for r in range(10): - for testdir in testdirs: + for testdir in basic_tests: os.makedirs(os.path.join(testdir, str(r))) make_fake_teststatus(os.path.join(testdir, str(r)), "Test_%d" % r, TEST_PASS_STATUS, RUN_PHASE) @@ -691,6 +701,11 @@ def setUp(self): make_fake_teststatus(os.path.join(self._testdir_unfinished, "5"), "Test_5", TEST_PEND_STATUS, RUN_PHASE) make_fake_teststatus(os.path.join(self._testdir_unfinished2, "5"), "Test_5", TEST_PASS_STATUS, SUBMIT_PHASE) + integration_tests = self._testdirs[len(basic_tests):] + for integration_test in integration_tests: + os.makedirs(os.path.join(integration_test, "0")) + make_fake_teststatus(os.path.join(integration_test, "0"), "Test_0", TEST_PASS_STATUS, CORE_PHASES[0]) + # Set up proxy if possible self._unset_proxy = setup_proxy() @@ -699,9 +714,11 @@ def setUp(self): ########################################################################### def tearDown(self): ########################################################################### - shutil.rmtree(self._testdir_all_pass) - shutil.rmtree(self._testdir_with_fail) - shutil.rmtree(self._testdir_unfinished) + do_teardown = sys.exc_info() == (None, None, None) + + if do_teardown: + for testdir in self._testdirs: + shutil.rmtree(testdir) kill_subprocesses() @@ -720,7 +737,7 @@ def simple_test(self, testdir, expected_results, extra_args="", build_name=None) from_dir=testdir, expected_stat=expected_stat) lines = [line for line in output.splitlines() if line.startswith("Test '")] - self.assertEqual(len(lines), 10) + self.assertEqual(len(lines), len(expected_results)) for idx, line in enumerate(lines): testname, status = parse_test_status(line) self.assertEqual(status, expected_results[idx]) @@ -873,6 +890,42 @@ def test_wait_for_test_cdash_kill(self): # TODO: Any further checking of xml output worth doing? + ########################################################################### + def live_test_impl(self, testdir, expected_results, last_phase, last_status): + ########################################################################### + run_thread = threading.Thread(target=self.threaded_test, args=(testdir, expected_results)) + run_thread.daemon = True + run_thread.start() + + time.sleep(5) + + self.assertTrue(run_thread.isAlive(), msg="wait_for_tests should have waited") + + for core_phase in CORE_PHASES[1:]: + with TestStatus(test_dir=os.path.join(self._testdir_teststatus1, "0")) as ts: + ts.set_status(core_phase, last_status if core_phase == last_phase else TEST_PASS_STATUS) + + time.sleep(5) + + if core_phase != last_phase: + self.assertTrue(run_thread.isAlive(), msg="wait_for_tests should have waited after passing phase {}".format(core_phase)) + else: + run_thread.join(timeout=10) + self.assertFalse(run_thread.isAlive(), msg="wait_for_tests should have finished after phase {}".format(core_phase)) + break + + self.assertTrue(self._thread_error is None, msg="Thread had failure: %s" % self._thread_error) + + ########################################################################### + def test_wait_for_test_test_status_integration_pass(self): + ########################################################################### + self.live_test_impl(self._testdir_teststatus1, ["PASS"], RUN_PHASE, TEST_PASS_STATUS) + + ########################################################################### + def test_wait_for_test_test_status_integration_submit_fail(self): + ########################################################################### + self.live_test_impl(self._testdir_teststatus1, ["FAIL"], SUBMIT_PHASE, TEST_FAIL_STATUS) + ############################################################################### class TestCreateTestCommon(unittest.TestCase): ############################################################################### From d952d4b12f8ae756b3657ebc0650b8d5866e1f41 Mon Sep 17 00:00:00 2001 From: Michael Deakin Date: Fri, 20 Jul 2018 12:18:58 -0600 Subject: [PATCH 36/59] Add CXXFLAGS and KOKKOS_PATH to the xml schema for config_compilers --- config/xml_schemas/config_compilers_v2.xsd | 2 ++ 1 file changed, 2 insertions(+) diff --git a/config/xml_schemas/config_compilers_v2.xsd b/config/xml_schemas/config_compilers_v2.xsd index d76e52315a4..d84ee24990d 100644 --- a/config/xml_schemas/config_compilers_v2.xsd +++ b/config/xml_schemas/config_compilers_v2.xsd @@ -141,6 +141,7 @@ + @@ -157,6 +158,7 @@ + From 18734255036f03f801cf4bf19a05fa861a8ec3af Mon Sep 17 00:00:00 2001 From: Patrick Worley Date: Sun, 22 Jul 2018 17:53:47 -0700 Subject: [PATCH 37/59] Output MPI task to compute node mapping Currently MPI task to compute node mapping information is output in two locations, once in CAM, where it is truncated after the first 256 MPI tasks, and once in CLM, where it is truncated after the first 100 MPI tasks, both only for the respective component. This is not useful in current production runs. The use of environment variables, such as MPICH_CPUMASK_DISPLAY on Cray systems, generate data that are unnecessarily verbose for our needs. Here a share routine is introduced that writes out one line per compute node. Each line contains the compute node name and the list of MPI tasks assigned to that node for a given communicator. This is then called in the driver and writes out the task-to-node mapping for the entire coupled model. A separate branch will then introduce this into the individual components, replacing the current logic in both CAM and CLM, for example. The share routine also optionally returns the number of compute nodes and the task-to-node mapping, which is needed in the internal CAM load balancing. [BFB] --- src/drivers/mct/shr/seq_comm_mct.F90 | 24 ++- src/share/util/shr_taskmap_mod.F90 | 308 +++++++++++++++++++++++++++ 2 files changed, 326 insertions(+), 6 deletions(-) create mode 100644 src/share/util/shr_taskmap_mod.F90 diff --git a/src/drivers/mct/shr/seq_comm_mct.F90 b/src/drivers/mct/shr/seq_comm_mct.F90 index 25065e2836c..ab73da8e857 100644 --- a/src/drivers/mct/shr/seq_comm_mct.F90 +++ b/src/drivers/mct/shr/seq_comm_mct.F90 @@ -16,12 +16,14 @@ module seq_comm_mct !!! the namelist). ARE OTHER PROTECTIONS/CHECKS NEEDED??? - use mct_mod , only : mct_world_init, mct_world_clean, mct_die - use shr_sys_mod , only : shr_sys_abort, shr_sys_flush - use shr_mpi_mod , only : shr_mpi_chkerr, shr_mpi_bcast, shr_mpi_max - use shr_file_mod, only : shr_file_getUnit, shr_file_freeUnit - use esmf , only : ESMF_LogKind_Flag, ESMF_LOGKIND_NONE - use esmf , only : ESMF_LOGKIND_SINGLE, ESMF_LOGKIND_MULTI + use mct_mod , only : mct_world_init, mct_world_clean, mct_die + use shr_sys_mod , only : shr_sys_abort, shr_sys_flush + use shr_mpi_mod , only : shr_mpi_chkerr, shr_mpi_bcast, shr_mpi_max + use shr_file_mod , only : shr_file_getUnit, shr_file_freeUnit + use shr_taskmap_mod, only : shr_taskmap_write + use perf_mod , only : t_startf, t_stopf + use esmf , only : ESMF_LogKind_Flag, ESMF_LOGKIND_NONE + use esmf , only : ESMF_LOGKIND_SINGLE, ESMF_LOGKIND_MULTI implicit none @@ -291,6 +293,16 @@ subroutine seq_comm_init(global_comm_in, driver_comm_in, nmlfile, drv_comm_id) call shr_sys_abort(trim(subname)//' ERROR decomposition error ') endif + ! output task-to-node mapping + if (mype == 0) then + write(logunit,100) global_numpes +100 format(//,i7,' pes participating in computation of coupled model') + call shr_sys_flush(logunit) + endif + call t_startf("shr_taskmap_write") + call shr_taskmap_write(logunit, GLOBAL_COMM_IN, 'GLOBAL') + call t_stopf("shr_taskmap_write") + ! Initialize gloiam on all IDs global_mype = mype diff --git a/src/share/util/shr_taskmap_mod.F90 b/src/share/util/shr_taskmap_mod.F90 new file mode 100644 index 00000000000..735889d5f8a --- /dev/null +++ b/src/share/util/shr_taskmap_mod.F90 @@ -0,0 +1,308 @@ +module shr_taskmap_mod +!----------------------------------------------------------------------- +! +! Purpose: +! Output mapping of MPI tasks to nodes for a specified +! communicator +! +! Methods: +! Use mpi_get_processor_name to identify the node that an MPI +! task for a given communicator is assigned to. Gather these +! data to task 0 and then write out the list of MPI +! tasks associated with each node using the designated unit +! number +! +! Author: P. Worley +! +!----------------------------------------------------------------------- + +!----------------------------------------------------------------------- +!- use statements ------------------------------------------------------ +!----------------------------------------------------------------------- + use shr_sys_mod, only: shr_sys_abort + +!----------------------------------------------------------------------- +!- module boilerplate -------------------------------------------------- +!----------------------------------------------------------------------- + implicit none + include 'mpif.h' + private + save ! Make the default access private + +!----------------------------------------------------------------------- +! Public interfaces ---------------------------------------------------- +!----------------------------------------------------------------------- + public :: & + shr_taskmap_write ! write out list of nodes + ! with list of assigned MPI tasks + ! for a given communicator + + CONTAINS + +! +!======================================================================== +! + subroutine shr_taskmap_write (unit_num, comm_id, comm_name, & + save_nnodes, save_task_node_map) + +!----------------------------------------------------------------------- +! Purpose: Write out list of nodes used by processes in a given +! communicator. For each node output the list of MPI tasks +! assigned to it. +! Author: P. Worley +!----------------------------------------------------------------------- +!------------------------------Arguments-------------------------------- + integer, intent(in) :: unit_num ! unit number for output + integer, intent(in) :: comm_id ! MPI communicator + character(*), intent(in) :: comm_name ! MPI communicator label + + integer, intent(out), optional :: save_nnodes + ! return number of nodes + integer, intent(out), optional :: save_task_node_map(:) + ! return task-to-node map + +!---------------------------Local Workspace----------------------------- + integer :: iam ! task id in comm_id + integer :: npes ! number of MPI tasks in comm_id + integer :: ier ! return error status + integer :: max_len ! maximum name length + integer :: length ! node name length + integer :: c, i, j ! loop indices + integer :: nnodes ! number of nodes + + ! flag to indicate whether returning number of nodes + logical :: broadcast_nnodes + + ! flag to indicate whether returning task-to-node mapping + logical :: broadcast_task_node_map + + ! mapping of tasks to ordered list of nodes + integer, allocatable :: task_node_map(:) + + ! number of MPI tasks per node + integer, allocatable :: node_task_cnt(:) + integer, allocatable :: node_task_tmpcnt(:) + + ! MPI tasks ordered by nodes to which they are assigned + integer, allocatable :: node_task_map(:) + + ! offset into node_task_map for processes assigned to given node + integer, allocatable :: node_task_offset(:) + + logical :: masterproc ! masterproc flag + logical :: done ! search completion flag + + ! node names for each mpi task + character(len=mpi_max_processor_name) :: tmp_name + character, allocatable :: task_node_name(:) ! for this task + character, allocatable :: task_node_names(:) ! for all tasks + + ! node names without duplicates + character(len=mpi_max_processor_name), allocatable :: node_names(:) + + ! routine name, for error reporting + character(*),parameter :: subname = "(shr_taskmap_write)" + +!----------------------------------------------------------------------- + ! + ! Get my id + ! + call mpi_comm_rank (comm_id, iam, ier) + if (iam == 0) then + masterproc = .true. + else + masterproc = .false. + end if + + ! + ! Get number of MPI tasks + ! + call mpi_comm_size (comm_id, npes, ier) + + ! + ! Determine whether returning number of nodes + ! + broadcast_nnodes = .false. + if (present(save_nnodes)) then + broadcast_nnodes = .true. + endif + + ! + ! Determine whether returning task-to-node mapping information + ! + broadcast_task_node_map = .false. + if (present(save_task_node_map)) then + if (size(save_task_node_map) >= npes) then + broadcast_task_node_map = .true. + else + call shr_sys_abort(trim(subname)//': array for task-to-node mapping data too small') + endif + endif + + ! + ! Allocate arrays for collecting node names + ! + max_len = mpi_max_processor_name + allocate ( task_node_name(max_len), stat=ier ) + if (ier /= 0) & + call shr_sys_abort(trim(subname)//': allocate task_node_name failed') + + allocate ( task_node_names(max_len*npes), stat=ier ) + if (ier /= 0) & + call shr_sys_abort(trim(subname)//': allocate task_node_names failed') + + ! + ! Get node names and send to root. + ! (Assume that processor names are node names.) + ! + call mpi_get_processor_name (tmp_name, length, ier) + task_node_name(:) = ' ' + do i = 1, length + task_node_name(i) = tmp_name(i:i) + end do + + ! + ! Gather node names + ! + task_node_names(:) = ' ' + call mpi_gather (task_node_name, max_len, mpi_character, & + task_node_names, max_len, mpi_character, & + 0, comm_id, ier) + + if (masterproc) then + ! + ! Identify nodes and task/node mapping. + ! + allocate ( task_node_map(0:npes-1), stat=ier ) + if (ier /= 0) & + call shr_sys_abort(trim(subname)//': allocate task_node_map failed') + task_node_map(:) = -1 + + allocate ( node_names(0:npes-1), stat=ier ) + if (ier /= 0) & + call shr_sys_abort(trim(subname)//': allocate node_names failed') + node_names(:) = ' ' + + allocate ( node_task_cnt(0:npes-1), stat=ier ) + if (ier /= 0) & + call shr_sys_abort(trim(subname)//': allocate node_task_cnt failed') + node_task_cnt(:) = 0 + + do c=1,max_len + tmp_name(c:c) = task_node_names(c) + enddo + + node_names(0) = trim(tmp_name) + task_node_map(0) = 0 + node_task_cnt(0) = 1 + nnodes = 1 + + do i=1,npes-1 + do c=1,max_len + tmp_name(c:c) = task_node_names(i*max_len+c) + enddo + + j = 0 + done = .false. + do while ((.not. done) .and. (j < nnodes)) + if (trim(node_names(j)) .eq. trim(tmp_name)) then + task_node_map(i) = j + node_task_cnt(j) = node_task_cnt(j) + 1 + done = .true. + endif + j = j + 1 + enddo + + if (.not. done) then + node_names(nnodes) = trim(tmp_name) + task_node_map(i) = nnodes + node_task_cnt(nnodes) = 1 + nnodes = nnodes + 1 + endif + + enddo + + ! + ! Identify node/task mapping. + ! + allocate ( node_task_offset(0:nnodes-1), stat=ier ) + if (ier /= 0) & + call shr_sys_abort(trim(subname)//': allocate node_task_offset failed') + node_task_offset(:) = 0 + + do j=1,nnodes-1 + node_task_offset(j) = node_task_offset(j-1) + node_task_cnt(j-1) + enddo + + allocate ( node_task_tmpcnt(0:nnodes-1), stat=ier ) + if (ier /= 0) & + call shr_sys_abort(trim(subname)//': allocate node_task_tmpcnt failed') + node_task_tmpcnt(:) = 0 + + allocate ( node_task_map(0:npes-1), stat=ier ) + if (ier /= 0) & + call shr_sys_abort(trim(subname)//': allocate node_task_map failed') + node_task_map(:) = -1 + + do i=0,npes-1 + j = task_node_map(i) + node_task_map(node_task_offset(j) + node_task_tmpcnt(j)) = i + node_task_tmpcnt(j) = node_task_tmpcnt(j) + 1 + enddo + + ! + ! Output node/task mapping + ! + write(unit_num,*) '-----------------------------------' + write(unit_num,*) trim(comm_name),': ',nnodes,' NODES, ',npes,' MPI TASKS' + write(unit_num,*) 'NODE NAME : ',trim(comm_name),' TASK #' + do j=0,nnodes-1 + write(unit_num,101,advance='no') trim(node_names(j)) +101 format(a," : ") + do i=node_task_offset(j),node_task_offset(j)+node_task_cnt(j)-1 + write(unit_num,102,advance='no') node_task_map(i) + enddo +102 format(I7, " ") + write(unit_num,103,advance='no') +103 format(/) + enddo + write(unit_num,*) '-----------------------------------' + + if (broadcast_nnodes) then + save_nnodes = nnodes + endif + + if (broadcast_task_node_map) then + do i=0,npes-1 + save_task_node_map(i+1) = task_node_map(i) + enddo + endif + + deallocate(node_task_map) + deallocate(node_task_tmpcnt) + deallocate(node_task_offset) + deallocate(node_task_cnt) + deallocate(node_names) + deallocate(task_node_map) + + else + + endif + + if (broadcast_nnodes) then + call mpi_bcast(save_nnodes, 1, mpi_integer, 0, comm_id, ier) + endif + + if (broadcast_task_node_map) then + call mpi_bcast(save_task_node_map, npes, mpi_integer, 0, comm_id, ier) + endif + + deallocate(task_node_name) + deallocate(task_node_names) + + end subroutine shr_taskmap_write + +! +!======================================================================== +! +end module shr_taskmap_mod From 22e8dcd73a009accc37487c8c7afa22217a6174e Mon Sep 17 00:00:00 2001 From: Patrick Worley Date: Sun, 22 Jul 2018 19:12:37 -0700 Subject: [PATCH 38/59] Unset environment variables to output task-to-node mapping With the call to the shr_taskmap_write routine in the driver, the mapping data generated by the system when setting the corresponding environment variable is redundant. This is removed for the systems currently setting the variable. [BFB] --- config/e3sm/machines/config_machines.xml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/config/e3sm/machines/config_machines.xml b/config/e3sm/machines/config_machines.xml index 70b572ae293..8da3ca7d2d6 100644 --- a/config/e3sm/machines/config_machines.xml +++ b/config/e3sm/machines/config_machines.xml @@ -183,7 +183,7 @@ 1 1 - 1 + 64M spread @@ -327,7 +327,7 @@ 1 1 - 1 + 128M spread @@ -483,7 +483,7 @@ 1 1 - 1 + 128M spread @@ -1245,7 +1245,7 @@ - $SHELL{t=$ENV{OMP_NUM_THREADS};b=0;r=$[36/$t];while [ $r -gt 0 ];do printf "$b-$[$b+$t-1]:";((r--));((b=b+t));done;} + 1 @@ -1681,7 +1681,7 @@ - 1 + -e OMP_NUM_THREADS=$ENV{OMP_NUM_THREADS} -e OMP_STACKSIZE=128M -e KMP_AFFINITY=granularity=thread,scatter @@ -2247,7 +2247,7 @@ $MPILIB 1 1 - 1 + 128M 128M From 3d7ef21d13960a1ba7d782c2a0feb4890e152ec0 Mon Sep 17 00:00:00 2001 From: Robert Jacob Date: Mon, 23 Jul 2018 06:42:27 -0700 Subject: [PATCH 39/59] Fix archiving of cam and clm history cam and clm had the wrong entries for hist_file_extension which meant that history files were not copied to the short-term archive. Also add addition rest_file_extension entries for mpas components. --- config/e3sm/config_archive.xml | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/config/e3sm/config_archive.xml b/config/e3sm/config_archive.xml index 106557e553e..a878daffa8d 100644 --- a/config/e3sm/config_archive.xml +++ b/config/e3sm/config_archive.xml @@ -4,7 +4,8 @@ [ri] rh\d* rs - [eh] + h\d*.*\.nc$ + e nhfil rpointer.atm$NINST_STRING @@ -24,7 +25,8 @@ r rh\d? - h\d* + h\d*.*\.nc$ + e locfnh rpointer.lnd$NINST_STRING @@ -73,6 +75,7 @@ rst + rst.am.timeSeriesStatsMonthly hist unset @@ -92,6 +95,7 @@ rst + rst.am.timeSeriesStatsMonthly hist unset @@ -111,6 +115,7 @@ rst + rst.am.timeSeriesStatsMonthly hist unset From 2b64d034d0bcf52eff2ae853c878b634a54b8394 Mon Sep 17 00:00:00 2001 From: Patrick Worley Date: Mon, 23 Jul 2018 08:36:40 -0700 Subject: [PATCH 40/59] Modify task map output format The goal is for the output to be concise, human-readable, and easily extracted and parsed for subsequent archiving and analysis. Adding a node number to the node name and also the number of MPI tasks associated with each node should make it easier to postprocess without seriously degrading conciseness, and also has improved human-readability, [BFB] --- src/share/util/shr_taskmap_mod.F90 | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/src/share/util/shr_taskmap_mod.F90 b/src/share/util/shr_taskmap_mod.F90 index 735889d5f8a..8e485655175 100644 --- a/src/share/util/shr_taskmap_mod.F90 +++ b/src/share/util/shr_taskmap_mod.F90 @@ -253,20 +253,22 @@ subroutine shr_taskmap_write (unit_num, comm_id, comm_name, & ! ! Output node/task mapping ! - write(unit_num,*) '-----------------------------------' - write(unit_num,*) trim(comm_name),': ',nnodes,' NODES, ',npes,' MPI TASKS' - write(unit_num,*) 'NODE NAME : ',trim(comm_name),' TASK #' + write(unit_num,100) '--------------------------------------------------------------' +100 format(a) + write(unit_num,101) trim(comm_name),nnodes,npes +101 format(a,' communicator : ',I6,' nodes, ',I7,' MPI tasks') + write(unit_num,100) 'COMMUNICATOR NODE # [NODE NAME] : (# OF MPI TASKS) TASK # LIST' do j=0,nnodes-1 - write(unit_num,101,advance='no') trim(node_names(j)) -101 format(a," : ") + write(unit_num,102,advance='no') trim(comm_name),j,trim(node_names(j)), node_task_cnt(j) +102 format(a,' NODE ',I6,' [ ',a,' ] : ( ',I7,' MPI TASKS )') do i=node_task_offset(j),node_task_offset(j)+node_task_cnt(j)-1 - write(unit_num,102,advance='no') node_task_map(i) + write(unit_num,103,advance='no') node_task_map(i) enddo -102 format(I7, " ") - write(unit_num,103,advance='no') -103 format(/) +103 format(I7, " ") + write(unit_num,104,advance='no') +104 format(/) enddo - write(unit_num,*) '-----------------------------------' + write(unit_num,100) '--------------------------------------------------------------' if (broadcast_nnodes) then save_nnodes = nnodes From a5f9b782b27fc41ae2f9c7c6ce89580d49df082d Mon Sep 17 00:00:00 2001 From: Patrick Worley Date: Mon, 23 Jul 2018 11:57:45 -0700 Subject: [PATCH 41/59] Uncomment MV2_CPU_MAPPING definition for Anvil Commenting out defining the MV2_CPU_MAPPING variable for Anvil broke XML parsing for Titan builds. Uncommenting this out until the POC for Anvil can determine what should be done here. [BFB] --- config/e3sm/machines/config_machines.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/e3sm/machines/config_machines.xml b/config/e3sm/machines/config_machines.xml index 8da3ca7d2d6..48581ae925c 100644 --- a/config/e3sm/machines/config_machines.xml +++ b/config/e3sm/machines/config_machines.xml @@ -1245,7 +1245,7 @@ - + $SHELL{t=$ENV{OMP_NUM_THREADS};b=0;r=$[36/$t];while [ $r -gt 0 ];do printf "$b-$[$b+$t-1]:";((r--));((b=b+t));done;} 1 From 185d3963564a7e714fda3090e8dc6d99cac45321 Mon Sep 17 00:00:00 2001 From: Patrick Worley Date: Thu, 26 Jul 2018 10:05:38 -0700 Subject: [PATCH 42/59] Modify driver output format Tweaking the output from driver immediately preceding the task-to-compute node mapping output. [BFB] --- src/drivers/mct/shr/seq_comm_mct.F90 | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/drivers/mct/shr/seq_comm_mct.F90 b/src/drivers/mct/shr/seq_comm_mct.F90 index ab73da8e857..c48b70c968a 100644 --- a/src/drivers/mct/shr/seq_comm_mct.F90 +++ b/src/drivers/mct/shr/seq_comm_mct.F90 @@ -224,6 +224,7 @@ subroutine seq_comm_init(global_comm_in, driver_comm_in, nmlfile, drv_comm_id) integer, pointer :: comps(:) ! array with component ids integer, pointer :: comms(:) ! array with mpicoms integer :: nu + character(len=8) :: c_global_numpes ! global number of pes character(len=seq_comm_namelen) :: valid_comps(ncomps) integer :: & @@ -295,8 +296,9 @@ subroutine seq_comm_init(global_comm_in, driver_comm_in, nmlfile, drv_comm_id) ! output task-to-node mapping if (mype == 0) then - write(logunit,100) global_numpes -100 format(//,i7,' pes participating in computation of coupled model') + write(c_global_numpes,'(i8)') global_numpes + write(logunit,100) trim(adjustl(c_global_numpes)) +100 format(/,a,' pes participating in computation of coupled model') call shr_sys_flush(logunit) endif call t_startf("shr_taskmap_write") From 4c4f8dde8f4f765ba9251e64ea5df8ea74824d84 Mon Sep 17 00:00:00 2001 From: Patrick Worley Date: Fri, 27 Jul 2018 14:22:59 -0700 Subject: [PATCH 43/59] Remove unnecessary white space in task-to-node map output Initial implementation wrote out all integers as I7 or I8. Here adjustl() and trim() are used to reduce field delimiters to a single white space. [BFB] --- src/share/util/shr_taskmap_mod.F90 | 34 +++++++++++++++++++++++------- 1 file changed, 26 insertions(+), 8 deletions(-) diff --git a/src/share/util/shr_taskmap_mod.F90 b/src/share/util/shr_taskmap_mod.F90 index 8e485655175..1befb33961e 100644 --- a/src/share/util/shr_taskmap_mod.F90 +++ b/src/share/util/shr_taskmap_mod.F90 @@ -100,6 +100,13 @@ subroutine shr_taskmap_write (unit_num, comm_id, comm_name, & ! node names without duplicates character(len=mpi_max_processor_name), allocatable :: node_names(:) + ! string versions of numerical values + character(len=8) :: c_npes ! number of MPI tasks + character(len=8) :: c_nnodes ! number of nodes + character(len=8) :: c_nodeid ! node id + character(len=8) :: c_node_npes ! number of MPI tasks for a given node + character(len=8) :: c_taskid ! MPI task id + ! routine name, for error reporting character(*),parameter :: subname = "(shr_taskmap_write)" @@ -254,19 +261,30 @@ subroutine shr_taskmap_write (unit_num, comm_id, comm_name, & ! Output node/task mapping ! write(unit_num,100) '--------------------------------------------------------------' -100 format(a) - write(unit_num,101) trim(comm_name),nnodes,npes -101 format(a,' communicator : ',I6,' nodes, ',I7,' MPI tasks') +100 format(a) + + write(c_npes,'(i8)') npes + write(c_nnodes,'(i8)') nnodes + write(unit_num,101) trim(comm_name), trim(adjustl(c_nnodes)), trim(adjustl(c_npes)) +101 format(a,' communicator : ',a,' nodes, ',a,' MPI tasks') + write(unit_num,100) 'COMMUNICATOR NODE # [NODE NAME] : (# OF MPI TASKS) TASK # LIST' + do j=0,nnodes-1 - write(unit_num,102,advance='no') trim(comm_name),j,trim(node_names(j)), node_task_cnt(j) -102 format(a,' NODE ',I6,' [ ',a,' ] : ( ',I7,' MPI TASKS )') + write(c_nodeid,'(i8)') j + write(c_node_npes,'(i8)') node_task_cnt(j) + write(unit_num,102,advance='no') trim(comm_name), trim(adjustl(c_nodeid)), & + trim(node_names(j)), trim(adjustl(c_node_npes)) +102 format(a,' NODE ',a,' [ ',a,' ] : ( ',a,' MPI TASKS )') + do i=node_task_offset(j),node_task_offset(j)+node_task_cnt(j)-1 - write(unit_num,103,advance='no') node_task_map(i) + write(c_taskid,'(i8)') node_task_map(i) + write(unit_num,103,advance='no') trim(adjustl(c_taskid)) +103 format(' ',a) enddo -103 format(I7, " ") + write(unit_num,104,advance='no') -104 format(/) +104 format(/) enddo write(unit_num,100) '--------------------------------------------------------------' From 37fed55716c1bb953122e8520bcf38ae9405ff27 Mon Sep 17 00:00:00 2001 From: Gautam Bisht Date: Mon, 30 Jul 2018 08:55:48 -0700 Subject: [PATCH 44/59] Adds four ERS tests for land BCG Adds ERS test for ECA and CTS for 1850 and 20-th century transient at f19_g16 resolution with spun up initial conditions --- config/e3sm/tests.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/config/e3sm/tests.py b/config/e3sm/tests.py index de3e252da8c..43d1d5e4360 100644 --- a/config/e3sm/tests.py +++ b/config/e3sm/tests.py @@ -21,6 +21,10 @@ ("ERS.f09_g16.I1850CLM45CN","clm-bgcinterface"), "ERS.ne11_oQU240.I20TRCLM45", ("ERS.f19_g16.I1850CNRDCTCBC","clm-rd"), + ("ERS.f19_g16.I1850GSWCNPECACNTBC","clm-eca_f19_g16_I1850GSWCNPECACNTBC"), + ("ERS.f19_g16.I20TRGSWCNPECACNTBC","clm-eca_f19_g16_I20TRGSWCNPECACNTBC"), + ("ERS.f19_g16.I1850GSWCNPRDCTCBC","clm-ctc_f19_g16_I1850GSWCNPRDCTCBC"), + ("ERS.f19_g16.I20TRGSWCNPRDCTCBC","clm-ctc_f19_g16_I20TRGSWCNPRDCTCBC"), "ERS.f09_g16.ICLM45BC") ), From a636e978cbaec5e3a75b06ad8c6a883628f7d4b6 Mon Sep 17 00:00:00 2001 From: noel Date: Mon, 30 Jul 2018 11:37:51 -0700 Subject: [PATCH 45/59] Update edison to use cray-mpich 7.7.0 from 7.6.0 after software maintenance removed the 7.6.0 version. --- config/e3sm/machines/config_machines.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/e3sm/machines/config_machines.xml b/config/e3sm/machines/config_machines.xml index c41d837524e..4f0f7f421bc 100644 --- a/config/e3sm/machines/config_machines.xml +++ b/config/e3sm/machines/config_machines.xml @@ -130,7 +130,7 @@ pmi pmi/5.0.12 cray-mpich - cray-mpich/7.6.0 + cray-mpich/7.7.0 From f3a8f2f851296eb88a354c13e788fa767b01b3a9 Mon Sep 17 00:00:00 2001 From: noel Date: Mon, 30 Jul 2018 18:18:35 -0700 Subject: [PATCH 46/59] For edison, add command "module load pe_archive" to allow using older modules, such as cray-mpich/7.6.0 that was recently removed. --- config/e3sm/machines/config_machines.xml | 1 + 1 file changed, 1 insertion(+) diff --git a/config/e3sm/machines/config_machines.xml b/config/e3sm/machines/config_machines.xml index 912a4fb6536..3d3d2b69eb1 100644 --- a/config/e3sm/machines/config_machines.xml +++ b/config/e3sm/machines/config_machines.xml @@ -130,6 +130,7 @@ pmi pmi/5.0.12 cray-mpich + pe_archive cray-mpich/7.6.0 From a67734ee666ff0f6b59cf72efbb65a622ec80fa1 Mon Sep 17 00:00:00 2001 From: Azamat Mametjanov Date: Tue, 31 Jul 2018 17:16:50 +0000 Subject: [PATCH 47/59] Avoid empty env blocks --- config/e3sm/machines/config_machines.xml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/config/e3sm/machines/config_machines.xml b/config/e3sm/machines/config_machines.xml index 48581ae925c..dd63a6647f1 100644 --- a/config/e3sm/machines/config_machines.xml +++ b/config/e3sm/machines/config_machines.xml @@ -1675,14 +1675,12 @@ 1 1 + /projects/ccsm/acme/tools/mpas 2 - - - -e OMP_NUM_THREADS=$ENV{OMP_NUM_THREADS} -e OMP_STACKSIZE=128M -e KMP_AFFINITY=granularity=thread,scatter From a815c56ea6486ce4e9c1a868899afc53d2ffaeb9 Mon Sep 17 00:00:00 2001 From: James Foucar Date: Thu, 2 Aug 2018 14:46:35 -0600 Subject: [PATCH 48/59] Progress --- scripts/lib/CIME/utils.py | 2 +- scripts/lib/CIME/wait_for_tests.py | 125 ++++++++++++++++------ scripts/tests/scripts_regression_tests.py | 6 +- 3 files changed, 95 insertions(+), 38 deletions(-) diff --git a/scripts/lib/CIME/utils.py b/scripts/lib/CIME/utils.py index 3d595f387f0..abbb18a7e58 100644 --- a/scripts/lib/CIME/utils.py +++ b/scripts/lib/CIME/utils.py @@ -409,7 +409,7 @@ def run_cmd(cmd, input_str=None, from_dir=None, verbose=None, arg_stderr = _convert_to_fd(arg_stdout, from_dir) if (verbose != False and (verbose or logger.isEnabledFor(logging.DEBUG))): - logger.info("RUN: {}".format(cmd)) + logger.info("RUN: {}\nFROM: {}".format(cmd, os.getcwd() if from_dir is None else from_dir)) if (input_str is not None): stdin = subprocess.PIPE diff --git a/scripts/lib/CIME/wait_for_tests.py b/scripts/lib/CIME/wait_for_tests.py index c86482e0ec8..90fee8c2600 100644 --- a/scripts/lib/CIME/wait_for_tests.py +++ b/scripts/lib/CIME/wait_for_tests.py @@ -51,20 +51,8 @@ def get_test_output(test_path): return "" ############################################################################### -def create_cdash_test_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname): +def create_cdash_xml_boiler(phase, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, git_commit): ############################################################################### - # We assume all cases were created from the same code repo - first_result_case = os.path.dirname(list(results.items())[0][1][0]) - try: - srcroot = run_cmd_no_fail("./xmlquery --value CIMEROOT", from_dir=first_result_case) - except: - # Use repo containing this script as last resort - srcroot = CIME.utils.get_cime_root() - - git_commit = CIME.utils.get_current_commit(repo=srcroot) - - data_rel_path = os.path.join("Testing", utc_time) - site_elem = xmlet.Element("Site") if ("JENKINS_START_TIME" in os.environ): @@ -79,40 +67,90 @@ def create_cdash_test_xml(results, cdash_build_name, cdash_build_group, utc_time site_elem.attrib["Hostname"] = hostname site_elem.attrib["OSVersion"] = "Commit: {}{}".format(git_commit, time_info_str) - testing_elem = xmlet.SubElement(site_elem, "Testing") + phase_elem = xmlet.SubElement(site_elem, phase) + + xmlet.SubElement(phase_elem, "StartDateTime").text = time.ctime(current_time) + xmlet.SubElement(phase_elem, "Start{}Time".format("Test" if phase == "Testing" else phase)).text = str(int(current_time)) + + return site_elem, phase_elem + +############################################################################### +def create_cdash_config_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit): +############################################################################### + site_elem, config_elem = create_cdash_xml_boiler("Configure", cdash_build_name, cdash_build_group, utc_time, current_time, hostname, git_commit) + + xmlet.SubElement(config_elem, "ConfigureCommand").text = "namelists" + + config_results = [] + for test_name in sorted(results): + test_status = results[test_name][1] + config_results.append("{} Config {}".format(test_name, "PASS" if test_status != NAMELIST_FAIL_STATUS else "NML DIFF")) + + xmlet.SubElement(config_elem, "Log").text = "\n".join(config_results) + + for test_name in sorted(results): + test_status = results[test_name][1] + if test_status == NAMELIST_FAIL_STATUS: + xmlet.SubElement(config_elem, "Warning").text = "{} Config NML DIFF".format(test_name) + + xmlet.SubElement(config_elem, "ConfigureStatus").text = "0" + xmlet.SubElement(config_elem, "ElapsedMinutes").text = "0" # Skip for now + + etree = xmlet.ElementTree(site_elem) + etree.write(os.path.join(data_rel_path, "Configure.xml")) + +############################################################################### +def create_cdash_build_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit): +############################################################################### + site_elem, build_elem = create_cdash_xml_boiler("Build", cdash_build_name, cdash_build_group, utc_time, current_time, hostname, git_commit) + + xmlet.SubElement(build_elem, "ConfigureCommand").text = "case.build" + + # build_results = [] + # for test_name in sorted(results): + # test_status = results[test_name][1] + # config_results.append("{} Config {}".format(test_name, "PASS" if test_status != NAMELIST_FAIL_STATUS else "NML DIFF")) + + # xmlet.SubElement(build_elem, "Log").text = "\n".join(config_results) + + for test_name in sorted(results): + test_path = results[test_name][0] + test_norm_path = test_path if os.path.isdir(test_path) else os.path.dirname(test_path) + if get_test_time(test_norm_path) == 0: + xmlet.SubElement(build_elem, "Error").text = "{} Pre-run errors".format(test_name) + + xmlet.SubElement(build_elem, "ElapsedMinutes").text = "0" # Skip for now - start_date_time_elem = xmlet.SubElement(testing_elem, "StartDateTime") - start_date_time_elem.text = time.ctime(current_time) + etree = xmlet.ElementTree(site_elem) + etree.write(os.path.join(data_rel_path, "Build.xml")) - start_test_time_elem = xmlet.SubElement(testing_elem, "StartTestTime") - start_test_time_elem.text = str(int(current_time)) +############################################################################### +def create_cdash_test_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit): +############################################################################### + site_elem, testing_elem = create_cdash_xml_boiler("Testing", cdash_build_name, cdash_build_group, utc_time, current_time, hostname, git_commit) test_list_elem = xmlet.SubElement(testing_elem, "TestList") for test_name in sorted(results): - test_elem = xmlet.SubElement(test_list_elem, "Test") - test_elem.text = test_name + xmlet.SubElement(test_list_elem, "Test").text = test_name for test_name in sorted(results): test_path, test_status = results[test_name] - test_passed = test_status == TEST_PASS_STATUS + test_passed = test_status in [TEST_PASS_STATUS, NAMELIST_FAIL_STATUS] test_norm_path = test_path if os.path.isdir(test_path) else os.path.dirname(test_path) full_test_elem = xmlet.SubElement(testing_elem, "Test") - if (test_passed): + if test_passed: full_test_elem.attrib["Status"] = "passed" - elif (test_status == NAMELIST_FAIL_STATUS): + elif (test_status == TEST_PEND_STATUS): full_test_elem.attrib["Status"] = "notrun" else: full_test_elem.attrib["Status"] = "failed" - name_elem = xmlet.SubElement(full_test_elem, "Name") - name_elem.text = test_name + xmlet.SubElement(full_test_elem, "Name").text = test_name - path_elem = xmlet.SubElement(full_test_elem, "Path") - path_elem.text = test_norm_path + xmlet.SubElement(full_test_elem, "Path").text = test_norm_path - full_name_elem = xmlet.SubElement(full_test_elem, "FullName") - full_name_elem.text = test_name + xmlet.SubElement(full_test_elem, "FullName").text = test_name xmlet.SubElement(full_test_elem, "FullCommandLine") # text ? @@ -132,21 +170,40 @@ def create_cdash_test_xml(results, cdash_build_name, cdash_build_group, utc_time named_measurement_elem.attrib["type"] = type_attr named_measurement_elem.attrib["name"] = name_attr - value_elem = xmlet.SubElement(named_measurement_elem, "Value") - value_elem.text = value + xmlet.SubElement(named_measurement_elem, "Value").text = value measurement_elem = xmlet.SubElement(results_elem, "Measurement") value_elem = xmlet.SubElement(measurement_elem, "Value") value_elem.text = ''.join([item for item in get_test_output(test_norm_path) if ord(item) < 128]) - elapsed_time_elem = xmlet.SubElement(testing_elem, "ElapsedMinutes") - elapsed_time_elem.text = "0" # Skip for now + xmlet.SubElement(testing_elem, "ElapsedMinutes").text = "0" # Skip for now etree = xmlet.ElementTree(site_elem) etree.write(os.path.join(data_rel_path, "Test.xml")) +############################################################################### +def create_cdash_xml_fakes(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname): +############################################################################### + # We assume all cases were created from the same code repo + first_result_case = os.path.dirname(list(results.items())[0][1][0]) + try: + srcroot = run_cmd_no_fail("./xmlquery --value CIMEROOT", from_dir=first_result_case) + except: + # Use repo containing this script as last resort + srcroot = CIME.utils.get_cime_root() + + git_commit = CIME.utils.get_current_commit(repo=srcroot) + + data_rel_path = os.path.join("Testing", utc_time) + + # create_cdash_config_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit) + + #create_cdash_build_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit) + + create_cdash_test_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit) + ############################################################################### def create_cdash_upload_xml(results, cdash_build_name, cdash_build_group, utc_time, hostname, force_log_upload): ############################################################################### @@ -276,7 +333,7 @@ def create_cdash_xml(results, cdash_build_name, cdash_project, cdash_build_group with open("Testing/TAG", "w") as tag_fd: tag_fd.write("{}\n{}\n".format(utc_time, cdash_build_group)) - create_cdash_test_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname) + create_cdash_xml_fakes(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname) create_cdash_upload_xml(results, cdash_build_name, cdash_build_group, utc_time, hostname, force_log_upload) diff --git a/scripts/tests/scripts_regression_tests.py b/scripts/tests/scripts_regression_tests.py index 90e694907a1..0100365c74b 100755 --- a/scripts/tests/scripts_regression_tests.py +++ b/scripts/tests/scripts_regression_tests.py @@ -48,10 +48,10 @@ # pragma pylint: disable=protected-access ############################################################################### -def run_cmd_assert_result(test_obj, cmd, from_dir=None, expected_stat=0, env=None): +def run_cmd_assert_result(test_obj, cmd, from_dir=None, expected_stat=0, env=None, verbose=False): ############################################################################### from_dir = os.getcwd() if from_dir is None else from_dir - stat, output, errput = run_cmd(cmd, from_dir=from_dir, env=env) + stat, output, errput = run_cmd(cmd, from_dir=from_dir, env=env, verbose=verbose) if expected_stat == 0: expectation = "SHOULD HAVE WORKED, INSTEAD GOT STAT %s" % stat else: @@ -734,7 +734,7 @@ def simple_test(self, testdir, expected_results, extra_args="", build_name=None) expected_stat = 0 if expected_results == ["PASS"]*len(expected_results) else CIME.utils.TESTS_FAILED_ERR_CODE output = run_cmd_assert_result(self, "%s/wait_for_tests -p ACME_test */TestStatus %s" % (TOOLS_DIR, extra_args), - from_dir=testdir, expected_stat=expected_stat) + from_dir=testdir, expected_stat=expected_stat, verbose=True) lines = [line for line in output.splitlines() if line.startswith("Test '")] self.assertEqual(len(lines), len(expected_results)) From f8e0a7da0358cb0a86660ac7f4ef062ce201adf4 Mon Sep 17 00:00:00 2001 From: James Foucar Date: Thu, 2 Aug 2018 14:48:41 -0600 Subject: [PATCH 49/59] Remove debug stuff --- scripts/lib/CIME/wait_for_tests.py | 4 ++-- scripts/tests/scripts_regression_tests.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/lib/CIME/wait_for_tests.py b/scripts/lib/CIME/wait_for_tests.py index 90fee8c2600..cafc21d5a4f 100644 --- a/scripts/lib/CIME/wait_for_tests.py +++ b/scripts/lib/CIME/wait_for_tests.py @@ -198,9 +198,9 @@ def create_cdash_xml_fakes(results, cdash_build_name, cdash_build_group, utc_tim data_rel_path = os.path.join("Testing", utc_time) - # create_cdash_config_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit) + create_cdash_config_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit) - #create_cdash_build_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit) + create_cdash_build_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit) create_cdash_test_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit) diff --git a/scripts/tests/scripts_regression_tests.py b/scripts/tests/scripts_regression_tests.py index 0100365c74b..4b01eac8430 100755 --- a/scripts/tests/scripts_regression_tests.py +++ b/scripts/tests/scripts_regression_tests.py @@ -734,7 +734,7 @@ def simple_test(self, testdir, expected_results, extra_args="", build_name=None) expected_stat = 0 if expected_results == ["PASS"]*len(expected_results) else CIME.utils.TESTS_FAILED_ERR_CODE output = run_cmd_assert_result(self, "%s/wait_for_tests -p ACME_test */TestStatus %s" % (TOOLS_DIR, extra_args), - from_dir=testdir, expected_stat=expected_stat, verbose=True) + from_dir=testdir, expected_stat=expected_stat) lines = [line for line in output.splitlines() if line.startswith("Test '")] self.assertEqual(len(lines), len(expected_results)) From 03d3011e0c65ee0ede6b16977c4686a905effd03 Mon Sep 17 00:00:00 2001 From: James Foucar Date: Thu, 2 Aug 2018 16:04:27 -0600 Subject: [PATCH 50/59] wait_for_test logging working --- scripts/lib/CIME/test_status.py | 6 +++ scripts/lib/CIME/wait_for_tests.py | 63 +++++++++++++++++++----------- 2 files changed, 46 insertions(+), 23 deletions(-) diff --git a/scripts/lib/CIME/test_status.py b/scripts/lib/CIME/test_status.py index 344e1bcd2a3..063dde43f2f 100644 --- a/scripts/lib/CIME/test_status.py +++ b/scripts/lib/CIME/test_status.py @@ -147,6 +147,12 @@ def __iter__(self): for phase, data in self._phase_statuses.items(): yield phase, data[0] + def __eq__(self, rhs): + return self._phase_statuses == rhs._phase_statuses + + def __ne__(self, rhs): + return not self.__eq__(rhs) + def get_name(self): return self._test_name diff --git a/scripts/lib/CIME/wait_for_tests.py b/scripts/lib/CIME/wait_for_tests.py index c86482e0ec8..87aed4a4824 100644 --- a/scripts/lib/CIME/wait_for_tests.py +++ b/scripts/lib/CIME/wait_for_tests.py @@ -291,32 +291,49 @@ def wait_for_test(test_path, results, wait, check_throughput, check_memory, igno test_status_filepath = test_path logging.debug("Watching file: '{}'".format(test_status_filepath)) + test_log_path = os.path.join(os.path.dirname(test_status_filepath), ".internal_test_status.log") - while (True): - if (os.path.exists(test_status_filepath)): - ts = TestStatus(test_dir=os.path.dirname(test_status_filepath)) - test_name = ts.get_name() - test_status = ts.get_overall_test_status(wait_for_run=not no_run, # Important - no_run=no_run, - check_throughput=check_throughput, - check_memory=check_memory, ignore_namelists=ignore_namelists, - ignore_memleak=ignore_memleak) - - if (test_status == TEST_PEND_STATUS and (wait and not SIGNAL_RECEIVED)): - time.sleep(SLEEP_INTERVAL_SEC) - logging.debug("Waiting for test to finish") - else: - results.put( (test_name, test_path, test_status) ) - break + # We don't want to make it a requirement that wait_for_tests has write access + # to all case directories + try: + fd = open(test_log_path, "w") + fd.close() + except: + test_log_path = "/dev/null" + + prior_ts = None + with open(test_log_path, "w") as log_fd: + while (True): + if (os.path.exists(test_status_filepath)): + ts = TestStatus(test_dir=os.path.dirname(test_status_filepath)) + test_name = ts.get_name() + test_status = ts.get_overall_test_status(wait_for_run=not no_run, # Important + no_run=no_run, + check_throughput=check_throughput, + check_memory=check_memory, ignore_namelists=ignore_namelists, + ignore_memleak=ignore_memleak) + + if prior_ts is not None and prior_ts != ts: + log_fd.write(ts.phase_statuses_dump()) + log_fd.write("OVERALL: {}\n\n".format(test_status)) + + prior_ts = ts + + if (test_status == TEST_PEND_STATUS and (wait and not SIGNAL_RECEIVED)): + time.sleep(SLEEP_INTERVAL_SEC) + logging.debug("Waiting for test to finish") + else: + results.put( (test_name, test_path, test_status) ) + break - else: - if (wait and not SIGNAL_RECEIVED): - logging.debug("File '{}' does not yet exist".format(test_status_filepath)) - time.sleep(SLEEP_INTERVAL_SEC) else: - test_name = os.path.abspath(test_status_filepath).split("/")[-2] - results.put( (test_name, test_path, "File '{}' doesn't exist".format(test_status_filepath)) ) - break + if (wait and not SIGNAL_RECEIVED): + logging.debug("File '{}' does not yet exist".format(test_status_filepath)) + time.sleep(SLEEP_INTERVAL_SEC) + else: + test_name = os.path.abspath(test_status_filepath).split("/")[-2] + results.put( (test_name, test_path, "File '{}' doesn't exist".format(test_status_filepath)) ) + break ############################################################################### def wait_for_tests_impl(test_paths, no_wait=False, check_throughput=False, check_memory=False, ignore_namelists=False, ignore_memleak=False, no_run=False): From 7a4215396aba2c696d6c6b39e41eb6e46888c484 Mon Sep 17 00:00:00 2001 From: James Foucar Date: Fri, 3 Aug 2018 15:42:23 -0600 Subject: [PATCH 51/59] Big update to wait_for_test/jenkins 1) Upgrade cdash XML spoofing for prettier cdash pages for e3sm 2) Add ability for jenkins jobs to use alternate baseline area, nice for test cleanup 3) New test test suite that includes DIFFs 4) Add ability to turn off all test teardowns from scripts_regr command line 5) New Jenkins test to upload a realistic dashboard result --- scripts/Tools/jenkins_generic_job | 9 ++- scripts/lib/CIME/wait_for_tests.py | 20 +++--- scripts/lib/get_tests.py | 3 + scripts/lib/jenkins_generic_job.py | 5 +- scripts/tests/scripts_regression_tests.py | 77 +++++++++++++++++++---- 5 files changed, 90 insertions(+), 24 deletions(-) diff --git a/scripts/Tools/jenkins_generic_job b/scripts/Tools/jenkins_generic_job index 88e3299c855..9a5072a0f36 100755 --- a/scripts/Tools/jenkins_generic_job +++ b/scripts/Tools/jenkins_generic_job @@ -65,6 +65,9 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("-b", "--baseline-name", default=default_baseline, help="Baseline name for baselines to use. Also impacts dashboard job name. Useful for testing a branch other than next or master") + parser.add_argument("-B", "--baseline-root", + help="Baseline area for baselines to use. Default will be config_machine value for machine") + parser.add_argument("-O", "--override-baseline-name", help="Force comparison with these baseines without impacting dashboard or test-id.") @@ -120,15 +123,15 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter args.override_baseline_name = args.baseline_name return args.generate_baselines, args.submit_to_cdash, args.no_batch, args.baseline_name, args.cdash_build_name, \ - args.cdash_project, args.test_suite, args.cdash_build_group, args.baseline_compare, args.scratch_root, args.parallel_jobs, args.walltime, args.machine, args.compiler, args.override_baseline_name + args.cdash_project, args.test_suite, args.cdash_build_group, args.baseline_compare, args.scratch_root, args.parallel_jobs, args.walltime, args.machine, args.compiler, args.override_baseline_name, args.baseline_root ############################################################################### def _main_func(description): ############################################################################### - generate_baselines, submit_to_cdash, no_batch, baseline_name, cdash_build_name, cdash_project, test_suite, cdash_build_group, baseline_compare, scratch_root, parallel_jobs, walltime, machine, compiler, real_baseline_name = \ + generate_baselines, submit_to_cdash, no_batch, baseline_name, cdash_build_name, cdash_project, test_suite, cdash_build_group, baseline_compare, scratch_root, parallel_jobs, walltime, machine, compiler, real_baseline_name, baseline_root = \ parse_command_line(sys.argv, description) - sys.exit(0 if jenkins_generic_job(generate_baselines, submit_to_cdash, no_batch, baseline_name, cdash_build_name, cdash_project, test_suite, cdash_build_group, baseline_compare, scratch_root, parallel_jobs, walltime, machine, compiler, real_baseline_name) + sys.exit(0 if jenkins_generic_job(generate_baselines, submit_to_cdash, no_batch, baseline_name, cdash_build_name, cdash_project, test_suite, cdash_build_group, baseline_compare, scratch_root, parallel_jobs, walltime, machine, compiler, real_baseline_name, baseline_root) else CIME.utils.TESTS_FAILED_ERR_CODE) ############################################################################### diff --git a/scripts/lib/CIME/wait_for_tests.py b/scripts/lib/CIME/wait_for_tests.py index cafc21d5a4f..19269d7d5da 100644 --- a/scripts/lib/CIME/wait_for_tests.py +++ b/scripts/lib/CIME/wait_for_tests.py @@ -84,15 +84,10 @@ def create_cdash_config_xml(results, cdash_build_name, cdash_build_group, utc_ti config_results = [] for test_name in sorted(results): test_status = results[test_name][1] - config_results.append("{} Config {}".format(test_name, "PASS" if test_status != NAMELIST_FAIL_STATUS else "NML DIFF")) + config_results.append("{} {} Config {}".format("" if test_status != NAMELIST_FAIL_STATUS else "CMake Warning:\n", test_name, "PASS" if test_status != NAMELIST_FAIL_STATUS else "NML DIFF")) xmlet.SubElement(config_elem, "Log").text = "\n".join(config_results) - for test_name in sorted(results): - test_status = results[test_name][1] - if test_status == NAMELIST_FAIL_STATUS: - xmlet.SubElement(config_elem, "Warning").text = "{} Config NML DIFF".format(test_name) - xmlet.SubElement(config_elem, "ConfigureStatus").text = "0" xmlet.SubElement(config_elem, "ElapsedMinutes").text = "0" # Skip for now @@ -113,11 +108,22 @@ def create_cdash_build_xml(results, cdash_build_name, cdash_build_group, utc_tim # xmlet.SubElement(build_elem, "Log").text = "\n".join(config_results) + build_results = [] for test_name in sorted(results): + build_results.append(test_name) + + xmlet.SubElement(build_elem, "Log").text = "\n".join(build_results) + + for idx, test_name in enumerate(sorted(results)): test_path = results[test_name][0] test_norm_path = test_path if os.path.isdir(test_path) else os.path.dirname(test_path) if get_test_time(test_norm_path) == 0: - xmlet.SubElement(build_elem, "Error").text = "{} Pre-run errors".format(test_name) + error_elem = xmlet.SubElement(build_elem, "Error") + xmlet.SubElement(error_elem, "Text").text = test_name + xmlet.SubElement(error_elem, "BuildLogLine").text = str(idx) + xmlet.SubElement(error_elem, "PreContext").text = test_name + xmlet.SubElement(error_elem, "PostContext").text = "" + xmlet.SubElement(error_elem, "RepeatCount").text = "0" xmlet.SubElement(build_elem, "ElapsedMinutes").text = "0" # Skip for now diff --git a/scripts/lib/get_tests.py b/scripts/lib/get_tests.py index 96eb2dab910..94e07646ae1 100644 --- a/scripts/lib/get_tests.py +++ b/scripts/lib/get_tests.py @@ -51,6 +51,9 @@ "TESTMEMLEAKPASS_P1.f09_g16.X") ), + "cime_test_all" : ("cime_test_only", "0:10:00", + ("TESTRUNDIFF_P1.f19_g16_rx1.A", )), + "cime_developer" : (None, "0:15:00", ("NCK_Ld3.f45_g37_rx1.A", "ERI.f09_g16.X", diff --git a/scripts/lib/jenkins_generic_job.py b/scripts/lib/jenkins_generic_job.py index 4efda8f7ae6..aa75b6d5648 100644 --- a/scripts/lib/jenkins_generic_job.py +++ b/scripts/lib/jenkins_generic_job.py @@ -28,7 +28,7 @@ def jenkins_generic_job(generate_baselines, submit_to_cdash, no_batch, arg_test_suite, cdash_build_group, baseline_compare, scratch_root, parallel_jobs, walltime, - machine, compiler, real_baseline_name): + machine, compiler, real_baseline_name, baseline_root): ############################################################################### """ Return True if all tests passed @@ -103,6 +103,9 @@ def jenkins_generic_job(generate_baselines, submit_to_cdash, no_batch, if walltime is not None: create_test_args.append(" --walltime " + walltime) + if baseline_root is not None: + create_test_args.append(" --baseline-root " + baseline_root) + create_test_cmd = "./create_test " + " ".join(create_test_args) if (not CIME.wait_for_tests.SIGNAL_RECEIVED): diff --git a/scripts/tests/scripts_regression_tests.py b/scripts/tests/scripts_regression_tests.py index 4b01eac8430..bec45299d7b 100755 --- a/scripts/tests/scripts_regression_tests.py +++ b/scripts/tests/scripts_regression_tests.py @@ -43,6 +43,7 @@ NO_BATCH = False NO_CMAKE = False TEST_ROOT = None +NO_TEARDOWN = False os.environ["CIME_GLOBAL_WALLTIME"] = "0:05:00" @@ -129,10 +130,10 @@ def make_fake_teststatus(path, testname, status, phase): with TestStatus(test_dir=path, test_name=testname) as ts: for core_phase in CORE_PHASES: if core_phase == phase: - ts.set_status(core_phase, status) + ts.set_status(core_phase, status, comments=("time=42" if phase == RUN_PHASE else "")) break else: - ts.set_status(core_phase, TEST_PASS_STATUS) + ts.set_status(core_phase, TEST_PASS_STATUS, comments=("time=42" if phase == RUN_PHASE else "")) ############################################################################### def parse_test_status(line): @@ -246,7 +247,7 @@ def test_b_cime_f90_unit_tests(self): @classmethod def tearDownClass(cls): - do_teardown = len(cls._do_teardown) > 0 and sys.exc_info() == (None, None, None) + do_teardown = len(cls._do_teardown) > 0 and sys.exc_info() == (None, None, None) and not NO_TEARDOWN teardown_root = True for tfile in cls._testdirs: @@ -655,7 +656,7 @@ def test_k_append_config(self): @classmethod def tearDownClass(cls): - do_teardown = len(cls._do_teardown) > 0 and sys.exc_info() == (None, None, None) + do_teardown = len(cls._do_teardown) > 0 and sys.exc_info() == (None, None, None) and not NO_TEARDOWN for tfile in cls._testdirs: if tfile not in cls._do_teardown: @@ -714,7 +715,7 @@ def setUp(self): ########################################################################### def tearDown(self): ########################################################################### - do_teardown = sys.exc_info() == (None, None, None) + do_teardown = sys.exc_info() == (None, None, None) and not NO_TEARDOWN if do_teardown: for testdir in self._testdirs: @@ -886,7 +887,7 @@ def test_wait_for_test_cdash_kill(self): xml_contents = open(xml_file, "r").read() self.assertTrue(r'Test_0Test_1Test_2Test_3Test_4Test_5Test_6Test_7Test_8Test_9' in xml_contents) - self.assertTrue(r'Test_5' in xml_contents) + self.assertTrue(r'Test_5' in xml_contents) # TODO: Any further checking of xml output worth doing? @@ -941,7 +942,7 @@ def setUp(self): self._baseline_area = os.path.join(TEST_ROOT, "baselines") self._testroot = TEST_ROOT self._hasbatch = MACHINE.has_batch_system() and not NO_BATCH - self._do_teardown = True # Will never do teardown if test failed + self._do_teardown = not NO_TEARDOWN ########################################################################### def tearDown(self): @@ -1279,6 +1280,14 @@ def setUp(self): # Change root to avoid clashing with other jenkins_generic_jobs self._jenkins_root = os.path.join(self._testdir, "J") + ########################################################################### + def tearDown(self): + ########################################################################### + TestCreateTestCommon.tearDown(self) + + if "TESTRUNDIFF_ALTERNATE" in os.environ: + del os.environ["TESTRUNDIFF_ALTERNATE"] + ########################################################################### def simple_test(self, expect_works, extra_args, build_name=None): ########################################################################### @@ -1289,7 +1298,7 @@ def simple_test(self, expect_works, extra_args, build_name=None): if CIME.utils.get_model() == "e3sm" and build_name is not None: extra_args += " -p ACME_test --submit-to-cdash --cdash-build-group=Nightly -c %s" % build_name - run_cmd_assert_result(self, "%s/jenkins_generic_job -r %s %s" % (TOOLS_DIR, self._testdir, extra_args), + run_cmd_assert_result(self, "%s/jenkins_generic_job -r %s %s -B %s" % (TOOLS_DIR, self._testdir, extra_args, self._baseline_area), from_dir=self._testdir, expected_stat=(0 if expect_works else CIME.utils.TESTS_FAILED_ERR_CODE)) ########################################################################### @@ -1301,9 +1310,9 @@ def threaded_test(self, expect_works, extra_args, build_name=None): self._thread_error = str(e) ########################################################################### - def assert_num_leftovers(self): + def assert_num_leftovers(self, suite): ########################################################################### - num_tests_in_tiny = len(get_tests.get_test_suite("cime_test_only_pass")) + num_tests_in_tiny = len(get_tests.get_test_suite(suite)) jenkins_dirs = glob.glob("%s/*%s*/" % (self._jenkins_root, self._baseline_name.capitalize())) # case dirs # scratch_dirs = glob.glob("%s/*%s*/" % (self._testroot, test_id)) # blr/run dirs @@ -1325,11 +1334,11 @@ def test_jenkins_generic_job(self): # Generate fresh baselines so that this test is not impacted by # unresolved diffs self.simple_test(True, "-t cime_test_only_pass -g -b %s" % self._baseline_name) - self.assert_num_leftovers() + self.assert_num_leftovers("cime_test_only_pass") build_name = "jenkins_generic_job_pass_%s" % CIME.utils.get_timestamp() self.simple_test(True, "-t cime_test_only_pass -b %s" % self._baseline_name, build_name=build_name) - self.assert_num_leftovers() # jenkins_generic_job should have automatically cleaned up leftovers from prior run + self.assert_num_leftovers("cime_test_only_pass") # jenkins_generic_job should have automatically cleaned up leftovers from prior run assert_dashboard_has_build(self, build_name) ########################################################################### @@ -1350,6 +1359,43 @@ def test_jenkins_generic_job_kill(self): self.assertTrue(self._thread_error is None, msg="Thread had failure: %s" % self._thread_error) assert_dashboard_has_build(self, build_name) + ########################################################################### + def test_jenkins_generic_job_realistic_dash(self): + ########################################################################### + # Unfortunately, this test is very long-running + + # Generate fresh baselines so that this test is not impacted by + # unresolved diffs + self.simple_test(False, "-t cime_test_all -g -b %s" % self._baseline_name) + self.assert_num_leftovers("cime_test_all") + + # Should create a diff + os.environ["TESTRUNDIFF_ALTERNATE"] = "True" + + # Should create a nml diff + # Modify namelist + fake_nl = """ + &fake_nml + fake_item = 'fake' + fake = .true. +/""" + baseline_glob = glob.glob(os.path.join(self._baseline_area, self._baseline_name, "TESTRUNPASS*")) + self.assertEqual(len(baseline_glob), 1, msg="Expected one match, got:\n%s" % "\n".join(baseline_glob)) + + import stat + for baseline_dir in baseline_glob: + nl_path = os.path.join(baseline_dir, "CaseDocs", "datm_in") + self.assertTrue(os.path.isfile(nl_path), msg="Missing file %s" % nl_path) + + os.chmod(nl_path, stat.S_IRUSR | stat.S_IWUSR) + with open(nl_path, "a") as nl_file: + nl_file.write(fake_nl) + + build_name = "jenkins_generic_job_mixed_%s" % CIME.utils.get_timestamp() + self.simple_test(False, "-t cime_test_all -b %s" % self._baseline_name, build_name=build_name) + self.assert_num_leftovers("cime_test_all") # jenkins_generic_job should have automatically cleaned up leftovers from prior run + assert_dashboard_has_build(self, build_name) + ############################################################################### class T_TestRunRestart(TestCreateTestCommon): ############################################################################### @@ -2931,6 +2977,7 @@ def _main_func(description): global TEST_MPILIB global TEST_ROOT global GLOBAL_TIMEOUT + global NO_TEARDOWN config = CIME.utils.get_cime_config() help_str = \ @@ -2964,6 +3011,9 @@ def _main_func(description): parser.add_argument("--no-cmake", action="store_true", help="Do not run cmake tests") + parser.add_argument("--no-teardown", action="store_true", + help="Do not delete directories left behind by testing") + parser.add_argument("--machine", help="Select a specific machine setting for cime") @@ -2988,6 +3038,7 @@ def _main_func(description): NO_BATCH = ns.no_batch NO_CMAKE = ns.no_cmake GLOBAL_TIMEOUT = ns.timeout + NO_TEARDOWN = ns.no_teardown if ns.machine is not None: MACHINE = Machines(machine=ns.machine) @@ -3054,7 +3105,7 @@ def _main_func(description): print("Detected failures, leaving directory:", TEST_ROOT) else: print("All pass, removing directory:", TEST_ROOT) - if os.path.exists(TEST_ROOT): + if os.path.exists(TEST_ROOT) and not NO_TEARDOWN: shutil.rmtree(TEST_ROOT) raise From f3c707ebd510f88401aae7476fd5ed35ce5f1cbc Mon Sep 17 00:00:00 2001 From: James Foucar Date: Fri, 3 Aug 2018 16:28:24 -0600 Subject: [PATCH 52/59] Make TESTBUILDFAIL produce a bldlog file So that uploading of log files for failed builds can be tested. --- scripts/lib/CIME/SystemTests/system_tests_common.py | 7 ++++++- scripts/tests/scripts_regression_tests.py | 5 ++--- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/scripts/lib/CIME/SystemTests/system_tests_common.py b/scripts/lib/CIME/SystemTests/system_tests_common.py index f3b7712b77c..08a8b172774 100644 --- a/scripts/lib/CIME/SystemTests/system_tests_common.py +++ b/scripts/lib/CIME/SystemTests/system_tests_common.py @@ -3,7 +3,7 @@ """ from CIME.XML.standard_module_setup import * from CIME.XML.env_run import EnvRun -from CIME.utils import append_testlog, get_model, safe_copy +from CIME.utils import append_testlog, get_model, safe_copy, get_timestamp from CIME.test_status import * from CIME.hist_utils import * from CIME.provenance import save_test_time @@ -601,6 +601,11 @@ def build_phase(self, sharedlib_only=False, model_only=False): TESTRUNPASS.build_phase(self, sharedlib_only, model_only) else: if (not sharedlib_only): + blddir = self._case.get_value("EXEROOT") + bldlog = os.path.join(blddir, "{}.bldlog.{}".format(get_model(), get_timestamp("%y%m%d-%H%M%S"))) + with open(bldlog, "w") as fd: + fd.write("BUILD FAIL: Intentional fail for testing infrastructure") + expect(False, "BUILD FAIL: Intentional fail for testing infrastructure") class TESTBUILDFAILEXC(FakeTest): diff --git a/scripts/tests/scripts_regression_tests.py b/scripts/tests/scripts_regression_tests.py index bec45299d7b..3fa2d2640c8 100755 --- a/scripts/tests/scripts_regression_tests.py +++ b/scripts/tests/scripts_regression_tests.py @@ -1329,8 +1329,6 @@ def assert_num_leftovers(self, suite): ########################################################################### def test_jenkins_generic_job(self): ########################################################################### - # Unfortunately, this test is very long-running - # Generate fresh baselines so that this test is not impacted by # unresolved diffs self.simple_test(True, "-t cime_test_only_pass -g -b %s" % self._baseline_name) @@ -1362,7 +1360,8 @@ def test_jenkins_generic_job_kill(self): ########################################################################### def test_jenkins_generic_job_realistic_dash(self): ########################################################################### - # Unfortunately, this test is very long-running + # The actual quality of the cdash results for this test can only + # be inspected manually # Generate fresh baselines so that this test is not impacted by # unresolved diffs From 0cbbc8ac54d85793854b7245861c8e34e798c332 Mon Sep 17 00:00:00 2001 From: James Foucar Date: Fri, 3 Aug 2018 16:36:08 -0600 Subject: [PATCH 53/59] Remove useless commented-out code --- scripts/lib/CIME/wait_for_tests.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/scripts/lib/CIME/wait_for_tests.py b/scripts/lib/CIME/wait_for_tests.py index 19269d7d5da..a37d5c5088b 100644 --- a/scripts/lib/CIME/wait_for_tests.py +++ b/scripts/lib/CIME/wait_for_tests.py @@ -101,13 +101,6 @@ def create_cdash_build_xml(results, cdash_build_name, cdash_build_group, utc_tim xmlet.SubElement(build_elem, "ConfigureCommand").text = "case.build" - # build_results = [] - # for test_name in sorted(results): - # test_status = results[test_name][1] - # config_results.append("{} Config {}".format(test_name, "PASS" if test_status != NAMELIST_FAIL_STATUS else "NML DIFF")) - - # xmlet.SubElement(build_elem, "Log").text = "\n".join(config_results) - build_results = [] for test_name in sorted(results): build_results.append(test_name) From 635294da052c889c3a1280c00066352501d5ea6f Mon Sep 17 00:00:00 2001 From: James Foucar Date: Mon, 6 Aug 2018 11:00:20 -0600 Subject: [PATCH 54/59] Make pylint happy --- scripts/lib/CIME/test_status.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/lib/CIME/test_status.py b/scripts/lib/CIME/test_status.py index 063dde43f2f..1847c818019 100644 --- a/scripts/lib/CIME/test_status.py +++ b/scripts/lib/CIME/test_status.py @@ -148,7 +148,7 @@ def __iter__(self): yield phase, data[0] def __eq__(self, rhs): - return self._phase_statuses == rhs._phase_statuses + return self._phase_statuses == rhs._phase_statuses # pylint: disable=protected-access def __ne__(self, rhs): return not self.__eq__(rhs) From f67e01ca21b655413fb56ffaffc123a05cb1a949 Mon Sep 17 00:00:00 2001 From: James Foucar Date: Mon, 6 Aug 2018 16:23:49 -0600 Subject: [PATCH 55/59] Progress --- config/e3sm/machines/config_machines.xml | 1 + scripts/Tools/xmlquery | 5 +- scripts/lib/CIME/provenance.py | 4 +- scripts/lib/jenkins_generic_job.py | 110 +++++++++++++++--- .../mct/cime_config/config_component_e3sm.xml | 8 ++ 5 files changed, 107 insertions(+), 21 deletions(-) diff --git a/config/e3sm/machines/config_machines.xml b/config/e3sm/machines/config_machines.xml index 4f0f7f421bc..0896f73cdce 100644 --- a/config/e3sm/machines/config_machines.xml +++ b/config/e3sm/machines/config_machines.xml @@ -671,6 +671,7 @@ /sems-data-store/ACME/timings .* /sems-data-store/ACME/cprnc/build.new/cprnc + 100 jgfouca at sandia dot gov 32 diff --git a/scripts/Tools/xmlquery b/scripts/Tools/xmlquery index 7c5fabb8b44..b1d034fbfde 100755 --- a/scripts/Tools/xmlquery +++ b/scripts/Tools/xmlquery @@ -330,10 +330,11 @@ def _main_func(description): wrapper=textwrap.TextWrapper() wrapper.subsequent_indent = "\t\t\t" wrapper.fix_sentence_endings = True + + cnt = 0 for group in sorted(iter(results)): if (len(variables) > 1 or len(results) > 1 or full) and not get_group and not value: print("\nResults in group {}".format(group)) - cnt = 0 for var in variables: if var in results[group]: if raw: @@ -344,6 +345,7 @@ def _main_func(description): if cnt > 0: sys.stdout.write(",") sys.stdout.write("{}".format(results[group][var]['value'])) + cnt += 1 elif description: if results[group][var]['desc'][0] is not None: desc_text = ' '.join(results[group][var]['desc'][0].split()) @@ -366,7 +368,6 @@ def _main_func(description): print("\t\tfile: {}".format(results[group][var]['file'][0])) else: print("\t{}: {}".format(var, results[group][var]['value'])) - cnt += 1 if (__name__ == "__main__"): _main_func(__doc__) diff --git a/scripts/lib/CIME/provenance.py b/scripts/lib/CIME/provenance.py index 4770008abbc..e3b5660a745 100644 --- a/scripts/lib/CIME/provenance.py +++ b/scripts/lib/CIME/provenance.py @@ -437,7 +437,7 @@ def get_recommended_test_time_based_on_past(baseline_root, test, raw=False): return convert_to_babylonian_time(best_walltime) except: # We NEVER want a failure here to kill the run - logger.warning("Failed to read test time: {}".format(sys.exc_info()[0])) + logger.warning("Failed to read test time: {}".format(sys.exc_info()[1])) return None @@ -453,4 +453,4 @@ def save_test_time(baseline_root, test, time_seconds): fd.write("{}\n".format(int(time_seconds))) except: # We NEVER want a failure here to kill the run - logger.warning("Failed to store test time: {}".format(sys.exc_info()[0])) + logger.warning("Failed to store test time: {}".format(sys.exc_info()[1])) diff --git a/scripts/lib/jenkins_generic_job.py b/scripts/lib/jenkins_generic_job.py index 4efda8f7ae6..03649dbc5b1 100644 --- a/scripts/lib/jenkins_generic_job.py +++ b/scripts/lib/jenkins_generic_job.py @@ -1,10 +1,10 @@ import CIME.wait_for_tests -from CIME.utils import expect +from CIME.utils import expect, run_cmd_no_fail from CIME.case import Case -import os, shutil, glob, signal, logging +import os, shutil, glob, signal, logging, threading, sys, re -############################################################################### +############################################################################## def cleanup_queue(test_root, test_id): ############################################################################### """ @@ -21,6 +21,90 @@ def cleanup_queue(test_root, test_id): case.cancel_batch_jobs(jobkills) +############################################################################### +def delete_old_test_data(mach_comp, test_id_root, scratch_root, test_root, run_area, build_area, archive_area): +############################################################################### + # Remove old dirs + for clutter_area in [scratch_root, test_root, run_area, build_area, archive_area]: + for old_file in glob.glob("{}/*{}*{}*".format(clutter_area, mach_comp, test_id_root)): + logging.info("Removing {}".format(old_file)) + if (os.path.isdir(old_file)): + shutil.rmtree(old_file) + else: + os.remove(old_file) + +############################################################################### +def scan_for_test_ids(old_test_archive, mach_comp, test_id_root): +############################################################################### + results = set([]) + test_id_re = re.compile(".+[.]([^.]+)") + for item in glob.glob("{}/{}/*{}*{}*".format(old_test_archive, "old_cases", mach_comp, test_id_root)): + filename = os.path.basename(item) + the_match = test_id_re.match(filename) + if the_match: + test_id = the_match.groups()[0] + results.add(test_id) + + return list(results) + +############################################################################### +def archive_old_test_data(machine, mach_comp, test_id_root, scratch_root, test_root, old_test_archive): +############################################################################### + + # Remove old cs.status, cs.submit. I don't think there's any value to leaving these around + # or archiving them + for old_cs_file in glob.glob("{}/cs.*".format(scratch_root)): + logging.info("Removing {}".format(old_cs_file)) + os.remove(old_cs_file) + + # Remove the old CTest XML, same reason as above + if (os.path.isdir("Testing")): + logging.info("Removing {}".format(os.path.join(os.getcwd(), "Testing"))) + shutil.rmtree("Testing") + + # Archive old data by looking at old test cases + for old_case in glob.glob("{}/*{}*{}*".format(test_root, mach_comp, test_id_root)): + exeroot, rundir, archdir = run_cmd_no_fail("./xmlquery EXEROOT RUNDIR DOUT_S_ROOT --value", from_dir=old_case).split(",") + + for the_dir, target_area in [(exeroot, "old_builds"), (rundir, "old_runs"), (archdir, "old_archives")]: + if os.path.exists(the_dir): + os.rename(the_dir, os.path.join(old_test_archive, target_area)) + + os.rename(old_case, os.path.join(old_test_archive, "old_cases")) + + # Check size of archive + bytes_of_old_test_data = run_cmd_no_fail("du -s {}".format(old_test_archive)).split()[0] + bytes_allowed = machine.get_value("MAX_GB_OLD_TEST_DATA") * 1000000000 + if bytes_of_old_test_data > bytes_allowed: + logging.info("Too much test data, {}GB > {}GB".format(bytes_of_old_test_data / 1000000000, bytes_allowed / 1000000000)) + old_test_ids = scan_for_test_ids(old_test_archive, mach_comp, test_id_root) + for old_test_id in sorted(old_test_ids): + logging.info(" Removing old data for test {}".format(old_test_id)) + for item in ["old_cases", "old_builds", "old_runs", "old_archives"]: + for dir_to_rm in glob.glob("{}/{}/*{}*{}*".format(old_test_archive, item, mach_comp, old_test_id)): + logging.info(" Removing {}".format(dir_to_rm)) + shutil.rmtree(dir_to_rm) + + bytes_of_old_test_data = run_cmd_no_fail("du -s {}".format(old_test_archive)).split()[0] + if bytes_of_old_test_data < bytes_allowed: + break + +############################################################################### +def handle_old_test_data(machine, compiler, test_id_root, scratch_root, test_root): +############################################################################### + run_area = os.path.dirname(os.path.dirname(machine.get_value("RUNDIR"))) # Assumes XXX/$CASE/run + build_area = os.path.dirname(os.path.dirname(machine.get_value("EXEROOT"))) # Assumes XXX/$CASE/build + archive_area = os.path.dirname(machine.get_value("DOUT_S_ROOT")) # Assumes XXX/archive/$CASE + old_test_archive = os.path.join(scratch_root, "old_test_archive") + + mach_comp = "{}_{}".format(machine.get_machine_name(), compiler) + + try: + archive_old_test_data(machine, mach_comp, test_id_root, scratch_root, test_root, old_test_archive) + except: + logging.warning("Archiving of old test data FAILED: {}\nDeleting data instead".format(sys.exc_info()[1])) + delete_old_test_data(mach_comp, test_id_root, scratch_root, test_root, run_area, build_area, archive_area) + ############################################################################### def jenkins_generic_job(generate_baselines, submit_to_cdash, no_batch, baseline_name, @@ -38,7 +122,6 @@ def jenkins_generic_job(generate_baselines, submit_to_cdash, no_batch, proxy = machine.get_value("PROXY") test_suite = test_suite if arg_test_suite is None else arg_test_suite test_root = os.path.join(scratch_root, "J") - run_area = os.path.dirname(os.path.dirname(machine.get_value("RUNDIR"))) if (use_batch): batch_system = machine.get_value("BATCH_SYSTEM") @@ -65,20 +148,9 @@ def jenkins_generic_job(generate_baselines, submit_to_cdash, no_batch, # the Jenkins jobs with timeouts to avoid this. # - mach_comp = "{}_{}".format(machine.get_machine_name(), compiler) - - # Remove the old CTest XML - if (os.path.isdir("Testing")): - shutil.rmtree("Testing") - - # Remove old dirs test_id_root = "J{}{}".format(baseline_name.capitalize(), test_suite.replace("e3sm_", "").capitalize()) - for clutter_area in [scratch_root, test_root, run_area]: - for old_file in glob.glob("{}/*{}*{}*".format(clutter_area, mach_comp, test_id_root)): - if (os.path.isdir(old_file)): - shutil.rmtree(old_file) - else: - os.remove(old_file) + archiver_thread = threading.Thread(target=handle_old_test_data, args=(machine, compiler, test_id_root, scratch_root, test_root)) + archiver_thread.start() # # Set up create_test command and run it @@ -135,6 +207,10 @@ def jenkins_generic_job(generate_baselines, submit_to_cdash, no_batch, cdash_project=cdash_project, cdash_build_group=cdash_build_group) + logging.info("Waiting for archiver thread") + archiver_thread.join() + logging.info("Waiting for archiver finished") + if use_batch and CIME.wait_for_tests.SIGNAL_RECEIVED: # Cleanup cleanup_queue(test_root, test_id) diff --git a/src/drivers/mct/cime_config/config_component_e3sm.xml b/src/drivers/mct/cime_config/config_component_e3sm.xml index 03eec9f9948..ccea5a9131d 100644 --- a/src/drivers/mct/cime_config/config_component_e3sm.xml +++ b/src/drivers/mct/cime_config/config_component_e3sm.xml @@ -645,6 +645,14 @@ If set to on, this component set/ grid specification is scientifically supported + + integer + 500 + case_def + env_case.xml + How much old test to allow + + integer 0,1,3,5,10,36 From 05f829980cec12a50deb56c4f4e12e953d547738 Mon Sep 17 00:00:00 2001 From: James Foucar Date: Tue, 7 Aug 2018 13:47:55 -0600 Subject: [PATCH 56/59] Be sure to upload build logs for sharedlib build fails For a long time, only model build logs were being uploaded. [BFB] --- scripts/lib/CIME/wait_for_tests.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/lib/CIME/wait_for_tests.py b/scripts/lib/CIME/wait_for_tests.py index cd40d783180..d19c2c04c5c 100644 --- a/scripts/lib/CIME/wait_for_tests.py +++ b/scripts/lib/CIME/wait_for_tests.py @@ -221,7 +221,8 @@ def create_cdash_upload_xml(results, cdash_build_name, cdash_build_group, utc_ti test_case_dir = os.path.dirname(test_path) ts = TestStatus(test_case_dir) - build_status = ts.get_status(MODEL_BUILD_PHASE) + build_status = ts.get_status(SHAREDLIB_BUILD_PHASE) + build_status = TEST_FAIL_STATUS if build_status == TEST_FAIL_STATUS else ts.get_status(MODEL_BUILD_PHASE) run_status = ts.get_status(RUN_PHASE) baseline_status = ts.get_status(BASELINE_PHASE) From b75e80514b0f0f217bf9397c11cbeb05ccda010f Mon Sep 17 00:00:00 2001 From: James Foucar Date: Wed, 8 Aug 2018 11:07:15 -0600 Subject: [PATCH 57/59] Lots of fixes --- scripts/lib/jenkins_generic_job.py | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/scripts/lib/jenkins_generic_job.py b/scripts/lib/jenkins_generic_job.py index 03649dbc5b1..96f07b8abd5 100644 --- a/scripts/lib/jenkins_generic_job.py +++ b/scripts/lib/jenkins_generic_job.py @@ -62,33 +62,43 @@ def archive_old_test_data(machine, mach_comp, test_id_root, scratch_root, test_r logging.info("Removing {}".format(os.path.join(os.getcwd(), "Testing"))) shutil.rmtree("Testing") + if not os.path.exists(old_test_archive): + os.mkdir(old_test_archive) + # Archive old data by looking at old test cases for old_case in glob.glob("{}/*{}*{}*".format(test_root, mach_comp, test_id_root)): + logging.info("arching case {}".format(old_case)) exeroot, rundir, archdir = run_cmd_no_fail("./xmlquery EXEROOT RUNDIR DOUT_S_ROOT --value", from_dir=old_case).split(",") - for the_dir, target_area in [(exeroot, "old_builds"), (rundir, "old_runs"), (archdir, "old_archives")]: + for the_dir, target_area in [(exeroot, "old_builds"), (rundir, "old_runs"), (archdir, "old_archives"), (old_case, "old_cases")]: if os.path.exists(the_dir): - os.rename(the_dir, os.path.join(old_test_archive, target_area)) + logging.info(" archiving {} to {}".format(the_dir, os.path.join(old_test_archive, target_area))) + if not os.path.exists(os.path.join(old_test_archive, target_area)): + os.mkdir(os.path.join(old_test_archive, target_area)) - os.rename(old_case, os.path.join(old_test_archive, "old_cases")) + os.rename(the_dir, os.path.join(old_test_archive, target_area, os.path.basename(old_case))) # Check size of archive - bytes_of_old_test_data = run_cmd_no_fail("du -s {}".format(old_test_archive)).split()[0] + bytes_of_old_test_data = int(run_cmd_no_fail("du -sb {}".format(old_test_archive)).split()[0]) bytes_allowed = machine.get_value("MAX_GB_OLD_TEST_DATA") * 1000000000 if bytes_of_old_test_data > bytes_allowed: - logging.info("Too much test data, {}GB > {}GB".format(bytes_of_old_test_data / 1000000000, bytes_allowed / 1000000000)) + logging.info("Too much test data, {}GB (actual) > {}GB (limit)".format(bytes_of_old_test_data / 1000000000, bytes_allowed / 1000000000)) old_test_ids = scan_for_test_ids(old_test_archive, mach_comp, test_id_root) for old_test_id in sorted(old_test_ids): - logging.info(" Removing old data for test {}".format(old_test_id)) + logging.info(" Removing old data for test {}".format(old_test_id)) for item in ["old_cases", "old_builds", "old_runs", "old_archives"]: for dir_to_rm in glob.glob("{}/{}/*{}*{}*".format(old_test_archive, item, mach_comp, old_test_id)): - logging.info(" Removing {}".format(dir_to_rm)) + logging.info(" Removing {}".format(dir_to_rm)) shutil.rmtree(dir_to_rm) - bytes_of_old_test_data = run_cmd_no_fail("du -s {}".format(old_test_archive)).split()[0] + bytes_of_old_test_data = int(run_cmd_no_fail("du -sb {}".format(old_test_archive)).split()[0]) if bytes_of_old_test_data < bytes_allowed: break + else: + logging.info("Test data is with accepted bounds, {}GB (actual) < {}GB (limit)".format(bytes_of_old_test_data / 1000000000, bytes_allowed / 1000000000)) + + ############################################################################### def handle_old_test_data(machine, compiler, test_id_root, scratch_root, test_root): ############################################################################### From 44a523e9f46cec475419c72bb03e136ea4738067 Mon Sep 17 00:00:00 2001 From: James Foucar Date: Wed, 8 Aug 2018 11:08:38 -0600 Subject: [PATCH 58/59] Restore melvin to 1TB of test data --- config/e3sm/machines/config_machines.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/e3sm/machines/config_machines.xml b/config/e3sm/machines/config_machines.xml index 0896f73cdce..9c94ade715b 100644 --- a/config/e3sm/machines/config_machines.xml +++ b/config/e3sm/machines/config_machines.xml @@ -671,7 +671,7 @@ /sems-data-store/ACME/timings .* /sems-data-store/ACME/cprnc/build.new/cprnc - 100 + 1000 jgfouca at sandia dot gov 32 From 51b0a648f494e8c8e998c6cd2b0bde82f496610d Mon Sep 17 00:00:00 2001 From: James Foucar Date: Wed, 8 Aug 2018 16:39:18 -0600 Subject: [PATCH 59/59] Make codacy happy --- scripts/lib/jenkins_generic_job.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/scripts/lib/jenkins_generic_job.py b/scripts/lib/jenkins_generic_job.py index b6d67a4dfae..18c80158223 100644 --- a/scripts/lib/jenkins_generic_job.py +++ b/scripts/lib/jenkins_generic_job.py @@ -98,7 +98,6 @@ def archive_old_test_data(machine, mach_comp, test_id_root, scratch_root, test_r else: logging.info("Test data is with accepted bounds, {}GB (actual) < {}GB (limit)".format(bytes_of_old_test_data / 1000000000, bytes_allowed / 1000000000)) - ############################################################################### def handle_old_test_data(machine, compiler, test_id_root, scratch_root, test_root): ############################################################################### @@ -111,7 +110,7 @@ def handle_old_test_data(machine, compiler, test_id_root, scratch_root, test_roo try: archive_old_test_data(machine, mach_comp, test_id_root, scratch_root, test_root, old_test_archive) - except: + except Exception: logging.warning("Archiving of old test data FAILED: {}\nDeleting data instead".format(sys.exc_info()[1])) delete_old_test_data(mach_comp, test_id_root, scratch_root, test_root, run_area, build_area, archive_area)