diff --git a/ci/cases/pr/C96C48_hybatmDA.yaml b/ci/cases/pr/C96C48_hybatmDA.yaml index 1f3e973ae7..c3aa6e8892 100644 --- a/ci/cases/pr/C96C48_hybatmDA.yaml +++ b/ci/cases/pr/C96C48_hybatmDA.yaml @@ -16,6 +16,3 @@ arguments: gfs_cyc: 1 start: cold yaml: {{ HOMEgfs }}/ci/platforms/gfs_defaults_ci.yaml - -skip_ci_on_hosts: - - hercules diff --git a/ci/cases/pr/C96_atm3DVar.yaml b/ci/cases/pr/C96_atm3DVar.yaml index 360e81e9d7..5215cb0d90 100644 --- a/ci/cases/pr/C96_atm3DVar.yaml +++ b/ci/cases/pr/C96_atm3DVar.yaml @@ -15,6 +15,3 @@ arguments: gfs_cyc: 1 start: cold yaml: {{ HOMEgfs }}/ci/platforms/gfs_defaults_ci.yaml - -skip_ci_on_hosts: - - hercules diff --git a/env/HERCULES.env b/env/HERCULES.env index 3721be2b66..6a4aad7a7d 100755 --- a/env/HERCULES.env +++ b/env/HERCULES.env @@ -12,7 +12,7 @@ fi step=$1 -export npe_node_max=40 +export npe_node_max=80 export launcher="srun -l --export=ALL" export mpmd_opt="--multi-prog --output=mpmd.%j.%t.out" @@ -26,19 +26,164 @@ export KMP_AFFINITY=scatter export OMP_STACKSIZE=2048000 export NTHSTACK=1024000000 #export LD_BIND_NOW=1 +export I_MPI_EXTRA_FILESYSTEM=1 +export I_MPI_EXTRA_FILESYSTEM_LIST=lustre ulimit -s unlimited ulimit -a -if [[ "${step}" = "waveinit" ]] || [[ "${step}" = "waveprep" ]] || [[ "${step}" = "wavepostsbs" ]] || \ - [[ "${step}" = "wavepostbndpnt" ]] || [[ "${step}" = "wavepostpnt" ]] || [[ "${step}" == "wavepostbndpntbll" ]]; then +case ${step} in + "prep" | "prepbufr") + + nth_max=$((npe_node_max / npe_node_prep)) + + export POE="NO" + export BACK=${BACK:-"YES"} + export sys_tp="HERCULES" + export launcher_PREP="srun" + ;; + "preplandobs") + + export APRUN_CALCFIMS="${launcher} -n 1" + ;; + "waveinit" | "waveprep" | "wavepostsbs" | "wavepostbndpnt" | "wavepostpnt" | "wavepostbndpntbll") export CFP_MP="YES" - if [[ "${step}" = "waveprep" ]]; then export MP_PULSE=0 ; fi + [[ "${step}" = "waveprep" ]] && export MP_PULSE=0 export wavempexec=${launcher} export wave_mpmd=${mpmd_opt} -elif [[ "${step}" = "fcst" ]]; then + ;; + "atmanlrun") + + nth_max=$((npe_node_max / npe_node_atmanlrun)) + + export NTHREADS_ATMANL=${nth_atmanlrun:-${nth_max}} + [[ ${NTHREADS_ATMANL} -gt ${nth_max} ]] && export NTHREADS_ATMANL=${nth_max} + export APRUN_ATMANL="${launcher} -n ${npe_atmanlrun} --cpus-per-task=${NTHREADS_ATMANL}" + ;; + "atmensanlrun") + + nth_max=$((npe_node_max / npe_node_atmensanlrun)) + + export NTHREADS_ATMENSANL=${nth_atmensanlrun:-${nth_max}} + [[ ${NTHREADS_ATMENSANL} -gt ${nth_max} ]] && export NTHREADS_ATMENSANL=${nth_max} + export APRUN_ATMENSANL="${launcher} -n ${npe_atmensanlrun} --cpus-per-task=${NTHREADS_ATMENSANL}" + ;; + "aeroanlrun") + + export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}" + + nth_max=$((npe_node_max / npe_node_aeroanlrun)) + + export NTHREADS_AEROANL=${nth_aeroanlrun:-${nth_max}} + [[ ${NTHREADS_AEROANL} -gt ${nth_max} ]] && export NTHREADS_AEROANL=${nth_max} + export APRUN_AEROANL="${launcher} -n ${npe_aeroanlrun} --cpus-per-task=${NTHREADS_AEROANL}" + ;; + "landanl") + + nth_max=$((npe_node_max / npe_node_landanl)) + + export NTHREADS_LANDANL=${nth_landanl:-${nth_max}} + [[ ${NTHREADS_LANDANL} -gt ${nth_max} ]] && export NTHREADS_LANDANL=${nth_max} + export APRUN_LANDANL="${launcher} -n ${npe_landanl} --cpus-per-task=${NTHREADS_LANDANL}" + + export APRUN_APPLY_INCR="${launcher} -n 6" + ;; + "ocnanalbmat") + + export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}" + + nth_max=$((npe_node_max / npe_node_ocnanalbmat)) + + export NTHREADS_OCNANAL=${nth_ocnanalbmat:-${nth_max}} + [[ ${NTHREADS_OCNANAL} -gt ${nth_max} ]] && export NTHREADS_OCNANAL=${nth_max} + export APRUN_OCNANAL="${launcher} -n ${npe_ocnanalbmat} --cpus-per-task=${NTHREADS_OCNANAL}" + ;; + "ocnanalrun") + + export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}" + + nth_max=$((npe_node_max / npe_node_ocnanalrun)) + + export NTHREADS_OCNANAL=${nth_ocnanalrun:-${nth_max}} + [[ ${NTHREADS_OCNANAL} -gt ${nth_max} ]] && export NTHREADS_OCNANAL=${nth_max} + export APRUN_OCNANAL="${launcher} -n ${npe_ocnanalrun} --cpus-per-task=${NTHREADS_OCNANAL}" + ;; + "ocnanalchkpt") + + export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}" + + nth_max=$((npe_node_max / npe_node_ocnanalchkpt)) + + export NTHREADS_OCNANAL=${nth_ocnanalchkpt:-${nth_max}} + [[ ${NTHREADS_OCNANAL} -gt ${nth_max} ]] && export NTHREADS_OCNANAL=${nth_max} + export APRUN_OCNANAL="${launcher} -n ${npe_ocnanalchkpt} --cpus-per-task=${NTHREADS_OCNANAL}" + ;; + "anal" | "analcalc") + + export MKL_NUM_THREADS=4 + export MKL_CBWR=AUTO + + export CFP_MP=${CFP_MP:-"YES"} + export USE_CFP=${USE_CFP:-"YES"} + export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}" + + nth_max=$((npe_node_max / npe_node_anal)) + + export NTHREADS_GSI=${nth_anal:-${nth_max}} + [[ ${NTHREADS_GSI} -gt ${nth_max} ]] && export NTHREADS_GSI=${nth_max} + export APRUN_GSI="${launcher} -n ${npe_gsi:-${npe_anal}} --cpus-per-task=${NTHREADS_GSI}" + + export NTHREADS_CALCINC=${nth_calcinc:-1} + [[ ${NTHREADS_CALCINC} -gt ${nth_max} ]] && export NTHREADS_CALCINC=${nth_max} + export APRUN_CALCINC="${launcher} \$ncmd --cpus-per-task=${NTHREADS_CALCINC}" + + export NTHREADS_CYCLE=${nth_cycle:-12} + [[ ${NTHREADS_CYCLE} -gt ${npe_node_max} ]] && export NTHREADS_CYCLE=${npe_node_max} + npe_cycle=${ntiles:-6} + export APRUN_CYCLE="${launcher} -n ${npe_cycle} --cpus-per-task=${NTHREADS_CYCLE}" + + export NTHREADS_GAUSFCANL=1 + npe_gausfcanl=${npe_gausfcanl:-1} + export APRUN_GAUSFCANL="${launcher} -n ${npe_gausfcanl} --cpus-per-task=${NTHREADS_GAUSFCANL}" + ;; + "sfcanl") + nth_max=$((npe_node_max / npe_node_sfcanl)) + + export NTHREADS_CYCLE=${nth_sfcanl:-14} + [[ ${NTHREADS_CYCLE} -gt ${npe_node_max} ]] && export NTHREADS_CYCLE=${npe_node_max} + npe_sfcanl=${ntiles:-6} + export APRUN_CYCLE="${launcher} -n ${npe_sfcanl} --cpus-per-task=${NTHREADS_CYCLE}" + ;; + "eobs") + + export MKL_NUM_THREADS=4 + export MKL_CBWR=AUTO + + export CFP_MP=${CFP_MP:-"YES"} + export USE_CFP=${USE_CFP:-"YES"} + export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}" + + nth_max=$((npe_node_max / npe_node_eobs)) + + export NTHREADS_GSI=${nth_eobs:-${nth_max}} + [[ ${NTHREADS_GSI} -gt ${nth_max} ]] && export NTHREADS_GSI=${nth_max} + export APRUN_GSI="${launcher} -n ${npe_gsi:-${npe_eobs}} --cpus-per-task=${NTHREADS_GSI}" + ;; + "eupd") + + export CFP_MP=${CFP_MP:-"YES"} + export USE_CFP=${USE_CFP:-"YES"} + export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}" + + nth_max=$((npe_node_max / npe_node_eupd)) + + export NTHREADS_ENKF=${nth_eupd:-${nth_max}} + [[ ${NTHREADS_ENKF} -gt ${nth_max} ]] && export NTHREADS_ENKF=${nth_max} + export APRUN_ENKF="${launcher} -n ${npe_enkf:-${npe_eupd}} --cpus-per-task=${NTHREADS_ENKF}" + ;; + "fcst" | "efcs") export OMP_STACKSIZE=512M if [[ "${CDUMP}" =~ "gfs" ]]; then @@ -53,17 +198,110 @@ elif [[ "${step}" = "fcst" ]]; then # With ESMF threading, the model wants to use the full node export APRUN_UFS="${launcher} -n ${ntasks}" unset nprocs ppn nnodes ntasks + ;; -elif [[ "${step}" = "upp" ]]; then + "upp") nth_max=$((npe_node_max / npe_node_upp)) export NTHREADS_UPP=${nth_upp:-1} [[ ${NTHREADS_UPP} -gt ${nth_max} ]] && export NTHREADS_UPP=${nth_max} export APRUN_UPP="${launcher} -n ${npe_upp} --cpus-per-task=${NTHREADS_UPP}" - -elif [[ "${step}" = "atmos_products" ]]; then + ;; + "atmos_products") export USE_CFP="YES" # Use MPMD for downstream product generation + ;; + "ecen") -fi + nth_max=$((npe_node_max / npe_node_ecen)) + + export NTHREADS_ECEN=${nth_ecen:-${nth_max}} + [[ ${NTHREADS_ECEN} -gt ${nth_max} ]] && export NTHREADS_ECEN=${nth_max} + export APRUN_ECEN="${launcher} -n ${npe_ecen} --cpus-per-task=${NTHREADS_ECEN}" + + export NTHREADS_CHGRES=${nth_chgres:-12} + [[ ${NTHREADS_CHGRES} -gt ${npe_node_max} ]] && export NTHREADS_CHGRES=${npe_node_max} + export APRUN_CHGRES="time" + + export NTHREADS_CALCINC=${nth_calcinc:-1} + [[ ${NTHREADS_CALCINC} -gt ${nth_max} ]] && export NTHREADS_CALCINC=${nth_max} + export APRUN_CALCINC="${launcher} -n ${npe_ecen} --cpus-per-task=${NTHREADS_CALCINC}" + + ;; + "esfc") + + nth_max=$((npe_node_max / npe_node_esfc)) + + export NTHREADS_ESFC=${nth_esfc:-${nth_max}} + [[ ${NTHREADS_ESFC} -gt ${nth_max} ]] && export NTHREADS_ESFC=${nth_max} + export APRUN_ESFC="${launcher} -n ${npe_esfc} --cpus-per-task=${NTHREADS_ESFC}" + + export NTHREADS_CYCLE=${nth_cycle:-14} + [[ ${NTHREADS_CYCLE} -gt ${npe_node_max} ]] && export NTHREADS_CYCLE=${npe_node_max} + export APRUN_CYCLE="${launcher} -n ${npe_esfc} --cpus-per-task=${NTHREADS_CYCLE}" + + ;; + "epos") + + nth_max=$((npe_node_max / npe_node_epos)) + + export NTHREADS_EPOS=${nth_epos:-${nth_max}} + [[ ${NTHREADS_EPOS} -gt ${nth_max} ]] && export NTHREADS_EPOS=${nth_max} + export APRUN_EPOS="${launcher} -n ${npe_epos} --cpus-per-task=${NTHREADS_EPOS}" + + ;; + "postsnd") + + export CFP_MP="YES" + + nth_max=$((npe_node_max / npe_node_postsnd)) + + export NTHREADS_POSTSND=${nth_postsnd:-1} + [[ ${NTHREADS_POSTSND} -gt ${nth_max} ]] && export NTHREADS_POSTSND=${nth_max} + export APRUN_POSTSND="${launcher} -n ${npe_postsnd} --cpus-per-task=${NTHREADS_POSTSND}" + + export NTHREADS_POSTSNDCFP=${nth_postsndcfp:-1} + [[ ${NTHREADS_POSTSNDCFP} -gt ${nth_max} ]] && export NTHREADS_POSTSNDCFP=${nth_max} + export APRUN_POSTSNDCFP="${launcher} -n ${npe_postsndcfp} ${mpmd_opt}" + + ;; + "awips") + + nth_max=$((npe_node_max / npe_node_awips)) + + export NTHREADS_AWIPS=${nth_awips:-2} + [[ ${NTHREADS_AWIPS} -gt ${nth_max} ]] && export NTHREADS_AWIPS=${nth_max} + export APRUN_AWIPSCFP="${launcher} -n ${npe_awips} ${mpmd_opt}" + + ;; + "gempak") + + export CFP_MP="YES" + + if [[ ${CDUMP} == "gfs" ]]; then + npe_gempak=${npe_gempak_gfs} + npe_node_gempak=${npe_node_gempak_gfs} + fi + + nth_max=$((npe_node_max / npe_node_gempak)) + + export NTHREADS_GEMPAK=${nth_gempak:-1} + [[ ${NTHREADS_GEMPAK} -gt ${nth_max} ]] && export NTHREADS_GEMPAK=${nth_max} + export APRUN="${launcher} -n ${npe_gempak} ${mpmd_opt}" + + ;; + "fit2obs") + + nth_max=$((npe_node_max / npe_node_fit2obs)) + + export NTHREADS_FIT2OBS=${nth_fit2obs:-1} + [[ ${NTHREADS_FIT2OBS} -gt ${nth_max} ]] && export NTHREADS_FIT2OBS=${nth_max} + export MPIRUN="${launcher} -n ${npe_fit2obs} --cpus-per-task=${NTHREADS_FIT2OBS}" + + ;; + *) + # Some other job not yet defined here + echo "WARNING: The job step ${step} does not specify Hercules-specific resources" + ;; +esac diff --git a/modulefiles/module_base.hercules.lua b/modulefiles/module_base.hercules.lua index d587b90c4f..49144efbef 100644 --- a/modulefiles/module_base.hercules.lua +++ b/modulefiles/module_base.hercules.lua @@ -8,7 +8,9 @@ prepend_path("MODULEPATH", "/work/noaa/epic/role-epic/spack-stack/hercules/spack load(pathJoin("stack-intel", os.getenv("stack_intel_ver"))) load(pathJoin("stack-intel-oneapi-mpi", os.getenv("stack_impi_ver"))) +load(pathJoin("intel-oneapi-mkl", os.getenv("intel_mkl_ver"))) load(pathJoin("python", os.getenv("python_ver"))) +load(pathJoin("perl", os.getenv("perl_ver"))) -- TODO load NCL once the SAs remove the 'depends_on' statements within it -- NCL is a static installation and does not depend on any libraries diff --git a/parm/config/gfs/config.base.emc.dyn b/parm/config/gfs/config.base.emc.dyn index 08925c397e..abfd1ba180 100644 --- a/parm/config/gfs/config.base.emc.dyn +++ b/parm/config/gfs/config.base.emc.dyn @@ -63,13 +63,20 @@ export DO_GOES="NO" # GOES products export DO_BUFRSND="NO" # BUFR sounding products export DO_GEMPAK="NO" # GEMPAK products export DO_AWIPS="NO" # AWIPS products -export DO_NPOESS="NO" # NPOESS products +export DO_NPOESS="NO" # NPOESS products export DO_TRACKER="YES" # Hurricane track verification export DO_GENESIS="YES" # Cyclone genesis verification export DO_GENESIS_FSU="NO" # Cyclone genesis verification (FSU) -export DO_VERFOZN="YES" # Ozone data assimilation monitoring -export DO_VERFRAD="YES" # Radiance data assimilation monitoring -export DO_VMINMON="YES" # GSI minimization monitoring +# The monitor is not yet supported on Hercules +if [[ "${machine}" == "HERCULES" ]]; then + export DO_VERFOZN="NO" # Ozone data assimilation monitoring + export DO_VERFRAD="NO" # Radiance data assimilation monitoring + export DO_VMINMON="NO" # GSI minimization monitoring +else + export DO_VERFOZN="YES" # Ozone data assimilation monitoring + export DO_VERFRAD="YES" # Radiance data assimilation monitoring + export DO_VMINMON="YES" # GSI minimization monitoring +fi export DO_MOS="NO" # GFS Model Output Statistics - Only supported on WCOSS2 # NO for retrospective parallel; YES for real-time parallel diff --git a/parm/config/gfs/config.resources b/parm/config/gfs/config.resources index b3319ecc1b..98ddb47a7d 100644 --- a/parm/config/gfs/config.resources +++ b/parm/config/gfs/config.resources @@ -56,7 +56,7 @@ elif [[ "${machine}" = "AWSPW" ]]; then elif [[ ${machine} = "ORION" ]]; then export npe_node_max=40 elif [[ ${machine} = "HERCULES" ]]; then - export npe_node_max=40 + export npe_node_max=80 fi if [[ ${step} = "prep" ]]; then @@ -905,13 +905,19 @@ elif [[ ${step} = "eobs" || ${step} = "eomg" ]]; then export nth_eomg=${nth_eobs} npe_node_eobs=$(echo "${npe_node_max} / ${nth_eobs}" | bc) export npe_node_eobs - export npe_node_eomg=${npe_node_eobs} export is_exclusive=True - #The number of tasks and cores used must be the same for eobs - #For S4, this is accomplished by running 10 tasks/node + # The number of tasks and cores used must be the same for eobs + # See https://github.com/NOAA-EMC/global-workflow/issues/2092 for details + # For S4, this is accomplished by running 10 tasks/node if [[ ${machine} = "S4" ]]; then export npe_node_eobs=10 + elif [[ ${machine} = "HERCULES" ]]; then + # For Hercules, this is only an issue at C384; use 20 tasks/node + if [[ ${CASE} = "C384" ]]; then + export npe_node_eobs=20 + fi fi + export npe_node_eomg=${npe_node_eobs} elif [[ ${step} = "ediag" ]]; then diff --git a/parm/config/gfs/config.ufs b/parm/config/gfs/config.ufs index 000c8b1e99..46bf89f1af 100644 --- a/parm/config/gfs/config.ufs +++ b/parm/config/gfs/config.ufs @@ -72,9 +72,12 @@ case "${machine}" in "WCOSS2") npe_node_max=128 ;; - "HERA" | "ORION" | "HERCULES") + "HERA" | "ORION" ) npe_node_max=40 ;; + "HERCULES" ) + npe_node_max=80 + ;; "JET") case "${PARTITION_BATCH}" in "xjet") diff --git a/scripts/exglobal_archive.sh b/scripts/exglobal_archive.sh index 18217f4efc..2f7e3be972 100755 --- a/scripts/exglobal_archive.sh +++ b/scripts/exglobal_archive.sh @@ -33,7 +33,9 @@ source "${HOMEgfs}/ush/file_utils.sh" [[ ! -d ${ARCDIR} ]] && mkdir -p "${ARCDIR}" nb_copy "${COM_ATMOS_ANALYSIS}/${APREFIX}gsistat" "${ARCDIR}/gsistat.${RUN}.${PDY}${cyc}" -nb_copy "${COM_CHEM_ANALYSIS}/${APREFIX}aerostat" "${ARCDIR}/aerostat.${RUN}.${PDY}${cyc}" +if [[ ${DO_AERO} = "YES" ]]; then + nb_copy "${COM_CHEM_ANALYSIS}/${APREFIX}aerostat" "${ARCDIR}/aerostat.${RUN}.${PDY}${cyc}" +fi nb_copy "${COM_ATMOS_GRIB_1p00}/${APREFIX}pgrb2.1p00.anl" "${ARCDIR}/pgbanl.${RUN}.${PDY}${cyc}.grib2" # Archive 1 degree forecast GRIB2 files for verification diff --git a/sorc/build_all.sh b/sorc/build_all.sh index ccc088acd9..23cf420f1d 100755 --- a/sorc/build_all.sh +++ b/sorc/build_all.sh @@ -129,20 +129,27 @@ build_opts["ww3prepost"]="${_verbose_opt} ${_build_ufs_opt}" # Optional DA builds if [[ "${_build_ufsda}" == "YES" ]]; then - build_jobs["gdas"]=8 - big_jobs=$((big_jobs+1)) - build_opts["gdas"]="${_verbose_opt}" + if [[ "${MACHINE_ID}" != "orion" && "${MACHINE_ID}" != "hera" ]]; then + echo "NOTE: The GDAS App is not supported on ${MACHINE_ID}. Disabling build." + else + build_jobs["gdas"]=8 + big_jobs=$((big_jobs+1)) + build_opts["gdas"]="${_verbose_opt}" + fi fi if [[ "${_build_gsi}" == "YES" ]]; then build_jobs["gsi_enkf"]=8 - big_jobs=$((big_jobs+1)) build_opts["gsi_enkf"]="${_verbose_opt}" fi if [[ "${_build_gsi}" == "YES" || "${_build_ufsda}" == "YES" ]] ; then build_jobs["gsi_utils"]=2 build_opts["gsi_utils"]="${_verbose_opt}" - build_jobs["gsi_monitor"]=1 - build_opts["gsi_monitor"]="${_verbose_opt}" + if [[ "${MACHINE_ID}" == "hercules" ]]; then + echo "NOTE: The GSI Monitor is not supported on Hercules. Disabling build." + else + build_jobs["gsi_monitor"]=1 + build_opts["gsi_monitor"]="${_verbose_opt}" + fi fi # Go through all builds and adjust CPU counts down if necessary @@ -168,7 +175,7 @@ if [[ ${requested_cpus} -lt ${_build_job_max} && ${big_jobs} -gt 0 ]]; then extra_cores=$(( _build_job_max - requested_cpus )) extra_cores=$(( extra_cores / big_jobs )) for build in "${!build_jobs[@]}"; do - if [[ "${build}" == "gdas" || "${build}" == "ufs" || "${build}" == "gsi_enkf" ]]; then + if [[ "${build}" == "gdas" || "${build}" == "ufs" ]]; then build_jobs[${build}]=$(( build_jobs[${build}] + extra_cores )) fi done diff --git a/sorc/gsi_utils.fd b/sorc/gsi_utils.fd index f371890b9f..90481d9618 160000 --- a/sorc/gsi_utils.fd +++ b/sorc/gsi_utils.fd @@ -1 +1 @@ -Subproject commit f371890b9fcb42312da5f6228d87b5a4829e7e3a +Subproject commit 90481d961854e4412ecac49991721e6e63d4b82e diff --git a/ush/hpssarch_gen.sh b/ush/hpssarch_gen.sh index 0a027c7537..c34fff1a84 100755 --- a/ush/hpssarch_gen.sh +++ b/ush/hpssarch_gen.sh @@ -355,8 +355,10 @@ if [[ ${type} == "gdas" ]]; then if [[ -s "${COM_ATMOS_ANALYSIS}/${head}oznstat" ]]; then echo "${COM_ATMOS_ANALYSIS/${ROTDIR}\//}/${head}oznstat" fi - if [[ -s "${COM_CHEM_ANALYSIS}/${head}aerostat" ]]; then - echo "${COM_CHEM_ANALYSIS/${ROTDIR}\//}/${head}aerostat" + if [[ ${DO_AERO} = "YES" ]]; then + if [[ -s "${COM_CHEM_ANALYSIS}/${head}aerostat" ]]; then + echo "${COM_CHEM_ANALYSIS/${ROTDIR}\//}/${head}aerostat" + fi fi if [[ -s "${COM_ATMOS_ANALYSIS}/${head}radstat" ]]; then echo "${COM_ATMOS_ANALYSIS/${ROTDIR}\//}/${head}radstat" diff --git a/ush/load_ufswm_modules.sh b/ush/load_ufswm_modules.sh index da3ab61818..6477a8ff39 100755 --- a/ush/load_ufswm_modules.sh +++ b/ush/load_ufswm_modules.sh @@ -12,8 +12,8 @@ ulimit_s=$( ulimit -S -s ) source "${HOMEgfs}/ush/detect_machine.sh" source "${HOMEgfs}/ush/module-setup.sh" if [[ "${MACHINE_ID}" != "noaacloud" ]]; then - module use "${HOMEgfs}/sorc/ufs_model.fd/tests" - module load modules.ufs_model.lua + module use "${HOMEgfs}/sorc/ufs_model.fd/modulefiles" + module load "ufs_${MACHINE_ID}.intel" module load prod_util if [[ "${MACHINE_ID}" = "wcoss2" ]]; then module load cray-pals diff --git a/versions/run.hercules.ver b/versions/run.hercules.ver index 4bedeb1e96..43f1b2181d 100644 --- a/versions/run.hercules.ver +++ b/versions/run.hercules.ver @@ -1,7 +1,9 @@ export stack_intel_ver=2021.9.0 export stack_impi_ver=2021.9.0 +export intel_mkl_ver=2023.1.0 export ncl_ver=6.6.2 +export perl_ver=5.36.0 source "${HOMEgfs:-}/versions/run.spack.ver"