diff --git a/modulefiles/chgres_cube.wcoss_cray b/modulefiles/chgres_cube.wcoss_cray index 1a9df23b0..d80eaaa35 100644 --- a/modulefiles/chgres_cube.wcoss_cray +++ b/modulefiles/chgres_cube.wcoss_cray @@ -7,7 +7,8 @@ module rm intel module load intel/16.3.210 module load cray-mpich/7.2.0 module load craype-haswell -module load cray-netcdf +module load cray-netcdf/4.3.3.1 +module load cray-hdf5/1.8.14 module load w3nco-intel/2.0.6 module load nemsio-intel/2.2.3 module load bacio-intel/2.0.2 @@ -15,12 +16,14 @@ module load sp-intel/2.0.2 module load sigio-intel/2.1.0 module load sfcio-intel/1.0.0 -# module use /gpfs/hps3/emc/nems/noscrub/emc.nemspara/soft/modulefiles -# module load esmf/7.1.0r -export ESMFMKFILE=/gpfs/hps3/emc/global/noscrub/George.Gayno/esmf/8_0_0_bs20/lib/esmf.mk +module use /gpfs/hps3/emc/nems/noscrub/emc.nemspara/soft/modulefiles +module load esmf/8.0.0 +module rm gcc +module load gcc/4.9.2 -export WGRIB2API_INC=/gpfs/hps3/emc/meso/save/Dusan.Jovic/wgrib2/include -export WGRIB2_LIB=/gpfs/hps3/emc/meso/save/Dusan.Jovic/wgrib2/lib/libwgrib2.a + +export WGRIB2API_INC=/gpfs/hps3/emc/global/noscrub/George.Gayno/wgrib2/include +export WGRIB2_LIB=/gpfs/hps3/emc/global/noscrub/George.Gayno/wgrib2/lib/libwgrib2.a export FCOMP=ftn export FFLAGS="-O3 -fp-model precise -g -r8 -i4 -qopenmp -convert big_endian -assume byterecl" diff --git a/modulefiles/chgres_cube.wcoss_dell_p3 b/modulefiles/chgres_cube.wcoss_dell_p3 index 61297e0cd..02c8e36e6 100644 --- a/modulefiles/chgres_cube.wcoss_dell_p3 +++ b/modulefiles/chgres_cube.wcoss_dell_p3 @@ -5,8 +5,6 @@ module load ips/18.0.1.163 module load impi/18.0.1 module load NetCDF/4.5.0 -# module load ESMF/7_1_0r -export ESMFMKFILE=/gpfs/dell2/emc/modeling/noscrub/George.Gayno/esmf_lib/8_0_0bs20/lib/libO/Linux.intel.64.intelmpi.default/esmf.mk module load w3nco/2.0.6 module load sp/2.0.2 module load nemsio/2.2.3 @@ -14,6 +12,9 @@ module load bacio/2.0.2 module load sfcio/1.0.0 module load sigio/2.1.0 +module use /gpfs/dell2/emc/modeling/noscrub/emc.nemspara/soft/modulefiles +module load esmf/8.0.0 + export WGRIB2API_INC=/u/Wesley.Ebisuzaki/home/grib2.v2.0.8.intel/lib export WGRIB2_LIB=/u/Wesley.Ebisuzaki/home/grib2.v2.0.8.intel/lib/libwgrib2.a diff --git a/reg_tests/chgres_cube/driver.cray.sh b/reg_tests/chgres_cube/driver.cray.sh index d9558d2a2..2ffd353e8 100755 --- a/reg_tests/chgres_cube/driver.cray.sh +++ b/reg_tests/chgres_cube/driver.cray.sh @@ -28,7 +28,7 @@ module rm intel module load intel/16.3.210 module load cray-mpich/7.2.0 module load craype-haswell -module load cray-netcdf +module load cray-netcdf/4.3.3.1 module load xt-lsfhpc/9.1.3 module list diff --git a/sorc/chgres_cube.fd/input_data.F90 b/sorc/chgres_cube.fd/input_data.F90 index 5795fd6f2..76a20623a 100644 --- a/sorc/chgres_cube.fd/input_data.F90 +++ b/sorc/chgres_cube.fd/input_data.F90 @@ -5531,7 +5531,7 @@ subroutine handle_grib_error(vname,lev,method,value,varnum, iret,var,var8,var3d) else call error_handler("ERROR USING MISSING_VAR_METHOD. PLEASE SET VALUES IN" // & " VARMAP TABLE TO ONE OF: set_to_fill, set_to_NaN,"// & - " , skip, or stop.") + " , skip, or stop.", 1) endif end subroutine handle_grib_error @@ -5561,7 +5561,7 @@ subroutine read_grib_soil(the_file,inv_file,vname,vname_file,dummy3d,rc) ':0.4-1 m below ground:', ':1-2 m below ground:'/) else rc = -1 - call error_handler("reading soil levels. File must have 4 soil levels.") + call error_handler("reading soil levels. File must have 4 soil levels.", rc) endif call get_var_cond(vname,this_miss_var_method=method,this_miss_var_value=value, & diff --git a/sorc/chgres_cube.fd/program_setup.f90 b/sorc/chgres_cube.fd/program_setup.f90 index 25ac2f33f..f81d20d11 100644 --- a/sorc/chgres_cube.fd/program_setup.f90 +++ b/sorc/chgres_cube.fd/program_setup.f90 @@ -277,7 +277,7 @@ subroutine read_setup_namelist if (trim(input_type) == "grib2") then if (trim(grib2_file_input_grid) == "NULL" .or. trim(grib2_file_input_grid) == "") then - call error_handler("FOR GRIB2 DATA, PLEASE PROVIDE GRIB2_FILE_INPUT_GRID") + call error_handler("FOR GRIB2 DATA, PLEASE PROVIDE GRIB2_FILE_INPUT_GRID", 1) endif endif diff --git a/util/gdas_init/config b/util/gdas_init/config new file mode 100644 index 000000000..2b4892eff --- /dev/null +++ b/util/gdas_init/config @@ -0,0 +1,80 @@ +#----------------------------------------------------------- +# +# 1) Compile the chgres_cube program. Invoke +# ./sorc/build_chgres_cube.sh +# +# 2) Ensure links to the 'fixed' directories are +# set. See the ./sorc/link_fixdirs.sh script prolog +# for details. +# +# 3) Set all config variables. See definitions +# below. +# +# 4) Invoke the driver script for your machine (with no +# arguments). +# +# Variable definitions: +# -------------------- +# EXTRACT_DIR - directory where data extracted from HPSS +# is stored. +# EXTRACT_DATA - Set to 'yes' to extract data from HPSS. +# If data has been extracted and is located +# in EXTRACT_DIR, set to 'no'. +# RUN_CHGRES - To run chgres, set to 'yes'. To extract +# data only, set to 'no'. +# yy/mm/dd/hh - The year/month/day/hour of your desired +# experiment. Currently, does not support +# pre-ENKF GFS data, prior to +# 2012 May 21 00z. +# LEVS - Number of hybrid levels plus 1. To +# run with 64 levels, set LEVS to 65. +# CRES_HIRES - Resolution of the hires component of +# your experiment. +# CRES_ENKF - Resolution of the enkf component of the +# your experiment. +# UFS_DIR - Location of your checked out UFS_UTILS +# repo. +# OUTDIR - Directory where data output from chgres is stored. +# +#----------------------------------------------------------- + +EXTRACT_DIR=/gpfs/dell1/stmp/$USER/gdas.init/input +EXTRACT_DATA=yes + +RUN_CHGRES=yes + +yy=2017 +mm=07 +dd=19 +hh=18 + +LEVS=65 + +CRES_HIRES=C192 +CRES_ENKF=C96 + +UFS_DIR=$PWD/../.. + +OUTDIR=/gpfs/dell1/stmp/$USER/gdas.init/output + +#--------------------------------------------------------- +# Dont touch anything below here. +#--------------------------------------------------------- + +gfs_ver=v15 + +# No ENKF data prior to 2012/05/21/00z +if [ $yy$mm$dd$hh -lt 2012052100 ]; then + set +x + echo FATAL ERROR: SCRIPTS DO NOT SUPPORT OLD GFS DATA + exit 2 +elif [ $yy$mm$dd$hh -lt 2016051000 ]; then + gfs_ver=v12 +elif [ $yy$mm$dd$hh -lt 2017072000 ]; then + gfs_ver=v13 +elif [ $yy$mm$dd$hh -lt 2019061200 ]; then + gfs_ver=v14 +fi + +export EXTRACT_DIR yy mm dd hh UFS_DIR OUTDIR CRES_HIRES CRES_ENKF RUN_CHGRES +export LEVS gfs_ver diff --git a/util/gdas_init/driver.cray.sh b/util/gdas_init/driver.cray.sh new file mode 100755 index 000000000..99e9bd057 --- /dev/null +++ b/util/gdas_init/driver.cray.sh @@ -0,0 +1,147 @@ +#!/bin/bash + +#---------------------------------------------------------------------- +# Driver script for running on Cray. +# +# Edit the 'config' file before running. +#---------------------------------------------------------------------- + +set -x + +module purge +module load PrgEnv-intel/5.2.56 +module rm intel +module load intel/16.3.210 +module load cray-mpich/7.2.0 +module load craype-haswell +module load cray-netcdf/4.3.3.1 +module load xt-lsfhpc/9.1.3 +module load prod_util/1.1.0 +module load hpss/4.1.0.3 +module list + +PROJECT_CODE=GFS-DEV + +source config + +if [ $EXTRACT_DATA == yes ]; then + + rm -fr $EXTRACT_DIR + mkdir -p $EXTRACT_DIR + + QUEUE=dev_transfer + + MEM=6000 + WALLT="2:00" + + case $gfs_ver in + v12 | v13) + bsub -o log.data.hires -e log.data.hires -q $QUEUE -P $PROJECT_CODE -J get.data.hires -W $WALLT \ + -R "rusage[mem=$MEM]" "./get_pre-v14.data.sh hires" + bsub -o log.data.enkf -e log.data.enkf -q $QUEUE -P $PROJECT_CODE -J get.data.enkf -W $WALLT \ + -R "rusage[mem=$MEM]" "./get_pre-v14.data.sh enkf" + DEPEND="-w ended(get.data.*)" + ;; + v14) + bsub -o log.data.hires -e log.data.hires -q $QUEUE -P $PROJECT_CODE -J get.data.hires -W $WALLT \ + -R "rusage[mem=$MEM]" "./get_v14.data.sh hires" + bsub -o log.data.enkf -e log.data.enkf -q $QUEUE -P $PROJECT_CODE -J get.data.enkf -W $WALLT \ + -R "rusage[mem=$MEM]" "./get_v14.data.sh enkf" + DEPEND="-w ended(get.data.*)" + ;; + v15) + bsub -o log.data.hires -e log.data.hires -q $QUEUE -P $PROJECT_CODE -J get.data.hires -W $WALLT \ + -R "rusage[mem=$MEM]" "./get_v15.data.sh hires" + bsub -o log.data.grp1 -e log.data.grp1 -q $QUEUE -P $PROJECT_CODE -J get.data.enkf1 -W $WALLT \ + -R "rusage[mem=$MEM]" "./get_v15.data.sh grp1" + bsub -o log.data.grp2 -e log.data.grp2 -q $QUEUE -P $PROJECT_CODE -J get.data.enkf2 -W $WALLT \ + -R "rusage[mem=$MEM]" "./get_v15.data.sh grp2" + bsub -o log.data.grp3 -e log.data.grp3 -q $QUEUE -P $PROJECT_CODE -J get.data.enkf3 -W $WALLT \ + -R "rusage[mem=$MEM]" "./get_v15.data.sh grp3" + bsub -o log.data.grp4 -e log.data.grp4 -q $QUEUE -P $PROJECT_CODE -J get.data.enkf4 -W $WALLT \ + -R "rusage[mem=$MEM]" "./get_v15.data.sh grp4" + bsub -o log.data.grp5 -e log.data.grp5 -q $QUEUE -P $PROJECT_CODE -J get.data.enkf5 -W $WALLT \ + -R "rusage[mem=$MEM]" "./get_v15.data.sh grp5" + bsub -o log.data.grp6 -e log.data.grp6 -q $QUEUE -P $PROJECT_CODE -J get.data.enkf6 -W $WALLT \ + -R "rusage[mem=$MEM]" "./get_v15.data.sh grp6" + bsub -o log.data.grp7 -e log.data.grp7 -q $QUEUE -P $PROJECT_CODE -J get.data.enkf7 -W $WALLT \ + -R "rusage[mem=$MEM]" "./get_v15.data.sh grp7" + bsub -o log.data.grp8 -e log.data.grp8 -q $QUEUE -P $PROJECT_CODE -J get.data.enkf8 -W $WALLT \ + -R "rusage[mem=$MEM]" "./get_v15.data.sh grp8" + DEPEND="-w ended(get.data.*)" + ;; + esac + +else + + DEPEND=' ' + +fi + +if [ $RUN_CHGRES == yes ]; then + MEM=2000 + QUEUE=dev + MEMBER=hires + WALLT="0:15" + NUM_NODES=1 + case $gfs_ver in + v12 | v13) + export OMP_NUM_THREADS=2 + export OMP_STACKSIZE=1024M + ;; + *) + export OMP_NUM_THREADS=1 + ;; + esac + export APRUN="aprun -j 1 -n 12 -N 12 -d ${OMP_NUM_THREADS} -cc depth" + if [ $CRES_HIRES == 'C768' ] ; then + WALLT="0:20" + NUM_NODES=3 + export APRUN="aprun -j 1 -n 36 -N 12 -d ${OMP_NUM_THREADS} -cc depth" + elif [ $CRES_HIRES == 'C1152' ] ; then + WALLT="0:20" + NUM_NODES=4 + export APRUN="aprun -j 1 -n 48 -N 12 -d ${OMP_NUM_THREADS} -cc depth" + fi + case $gfs_ver in + v12 | v13) + bsub -e log.${MEMBER} -o log.${MEMBER} -q $QUEUE -P $PROJECT_CODE -J chgres_${MEMBER} -M $MEM -W $WALLT \ + -extsched 'CRAYLINUX[]' $DEPEND "export NODES=$NUM_NODES; ./run_pre-v14.chgres.sh ${MEMBER}" + ;; + v14) + bsub -e log.${MEMBER} -o log.${MEMBER} -q $QUEUE -P $PROJECT_CODE -J chgres_${MEMBER} -M $MEM -W $WALLT \ + -extsched 'CRAYLINUX[]' $DEPEND "export NODES=$NUM_NODES; ./run_v14.chgres.sh ${MEMBER}" + ;; + v15) + bsub -e log.${MEMBER} -o log.${MEMBER} -q $QUEUE -P $PROJECT_CODE -J chgres_${MEMBER} -M $MEM -W $WALLT \ + -extsched 'CRAYLINUX[]' $DEPEND "export NODES=$NUM_NODES; ./run_v15.chgres.sh ${MEMBER}" + ;; + esac + + WALLT="0:15" + NUM_NODES=1 + export APRUN="aprun -j 1 -n 12 -N 12 -d ${OMP_NUM_THREADS} -cc depth" + MEMBER=1 + while [ $MEMBER -le 80 ]; do + if [ $MEMBER -lt 10 ]; then + MEMBER_CH="00${MEMBER}" + else + MEMBER_CH="0${MEMBER}" + fi + case $gfs_ver in + v12 | v13) + bsub -e log.${MEMBER_CH} -o log.${MEMBER_CH} -q $QUEUE -P $PROJECT_CODE -J chgres_${MEMBER_CH} -M $MEM -W $WALLT \ + -extsched 'CRAYLINUX[]' $DEPEND "export NODES=$NUM_NODES; ./run_pre-v14.chgres.sh ${MEMBER_CH}" + ;; + v14) + bsub -e log.${MEMBER_CH} -o log.${MEMBER_CH} -q $QUEUE -P $PROJECT_CODE -J chgres_${MEMBER_CH} -M $MEM -W $WALLT \ + -extsched 'CRAYLINUX[]' $DEPEND "export NODES=$NUM_NODES; ./run_v14.chgres.sh ${MEMBER_CH}" + ;; + v15) + bsub -e log.${MEMBER_CH} -o log.${MEMBER_CH} -q $QUEUE -P $PROJECT_CODE -J chgres_${MEMBER_CH} -M $MEM -W $WALLT \ + -extsched 'CRAYLINUX[]' $DEPEND "export NODES=$NUM_NODES; ./run_v15.chgres.sh ${MEMBER_CH}" + ;; + esac + MEMBER=$(( $MEMBER + 1 )) + done +fi diff --git a/util/gdas_init/driver.dell.sh b/util/gdas_init/driver.dell.sh new file mode 100755 index 000000000..27af36e35 --- /dev/null +++ b/util/gdas_init/driver.dell.sh @@ -0,0 +1,143 @@ +#!/bin/bash + +#---------------------------------------------------------------------- +# Driver script for running on Dell. +# +# Edit the 'config' file before running. +#---------------------------------------------------------------------- + +set -x + +module purge +module load EnvVars/1.0.2 +module load ips/18.0.1.163 +module load impi/18.0.1 +module load lsf/10.1 +module load HPSS/5.0.2.5 +module use /usrx/local/dev/modulefiles +module load NetCDF/4.5.0 +module load prod_util/1.1.3 +module list + +PROJECT_CODE=GFS-DEV + +source config + +if [ $EXTRACT_DATA == yes ]; then + + rm -fr $EXTRACT_DIR + mkdir -p $EXTRACT_DIR + + QUEUE=dev_transfer + + MEM=6000M + WALLT="2:00" + + case $gfs_ver in + v12 | v13 ) + bsub -o log.data.hires -e log.data.hires -q $QUEUE -P $PROJECT_CODE -J get.data.hires -W $WALLT \ + -R "affinity[core(1)]" -M $MEM "./get_pre-v14.data.sh hires" + bsub -o log.data.enkf -e log.data.enkf -q $QUEUE -P $PROJECT_CODE -J get.data.enkf -W $WALLT \ + -R "affinity[core(1)]" -M $MEM "./get_pre-v14.data.sh enkf" + DEPEND="-w ended(get.data.*)" + ;; + v14) + bsub -o log.data.hires -e log.data.hires -q $QUEUE -P $PROJECT_CODE -J get.data.hires -W $WALLT \ + -R "affinity[core(1)]" -M $MEM "./get_v14.data.sh hires" + bsub -o log.data.enkf -e log.data.enkf -q $QUEUE -P $PROJECT_CODE -J get.data.enkf -W $WALLT \ + -R "affinity[core(1)]" -M $MEM "./get_v14.data.sh enkf" + DEPEND="-w ended(get.data.*)" + ;; + v15) + bsub -o log.data.hires -e log.data.hires -q $QUEUE -P $PROJECT_CODE -J get.data.hires -W $WALLT \ + -R "affinity[core(1)]" -M $MEM "./get_v15.data.sh hires" + bsub -o log.data.grp1 -e log.data.grp1 -q $QUEUE -P $PROJECT_CODE -J get.data.enkf1 -W $WALLT \ + -R "affinity[core(1)]" -M $MEM "./get_v15.data.sh grp1" + bsub -o log.data.grp2 -e log.data.grp2 -q $QUEUE -P $PROJECT_CODE -J get.data.enkf2 -W $WALLT \ + -R "affinity[core(1)]" -M $MEM "./get_v15.data.sh grp2" + bsub -o log.data.grp3 -e log.data.grp3 -q $QUEUE -P $PROJECT_CODE -J get.data.enkf3 -W $WALLT \ + -R "affinity[core(1)]" -M $MEM "./get_v15.data.sh grp3" + bsub -o log.data.grp4 -e log.data.grp4 -q $QUEUE -P $PROJECT_CODE -J get.data.enkf4 -W $WALLT \ + -R "affinity[core(1)]" -M $MEM "./get_v15.data.sh grp4" + bsub -o log.data.grp5 -e log.data.grp5 -q $QUEUE -P $PROJECT_CODE -J get.data.enkf5 -W $WALLT \ + -R "affinity[core(1)]" -M $MEM "./get_v15.data.sh grp5" + bsub -o log.data.grp6 -e log.data.grp6 -q $QUEUE -P $PROJECT_CODE -J get.data.enkf6 -W $WALLT \ + -R "affinity[core(1)]" -M $MEM "./get_v15.data.sh grp6" + bsub -o log.data.grp7 -e log.data.grp7 -q $QUEUE -P $PROJECT_CODE -J get.data.enkf7 -W $WALLT \ + -R "affinity[core(1)]" -M $MEM "./get_v15.data.sh grp7" + bsub -o log.data.grp8 -e log.data.grp8 -q $QUEUE -P $PROJECT_CODE -J get.data.enkf8 -W $WALLT \ + -R "affinity[core(1)]" -M $MEM "./get_v15.data.sh grp8" + DEPEND="-w ended(get.data.*)" + ;; + esac + +else + + DEPEND=' ' + +fi + +if [ $RUN_CHGRES == yes ]; then + QUEUE=dev + MEMBER=hires + WALLT="0:15" + export OMP_NUM_THREADS=1 + NODES="-n 18 -R "span[ptile=9]"" + export APRUN="mpirun" + if [ $CRES_HIRES == 'C768' ] ; then + NODES="-n 24 -R "span[ptile=6]"" + elif [ $CRES_HIRES == 'C1152' ] ; then + NODES="-n 36 -R "span[ptile=6]"" + WALLT="0:20" + fi + case $gfs_ver in + v12 | v13) + export OMP_STACKSIZE=1024M + export OMP_NUM_THREADS=2 + bsub -e log.${MEMBER} -o log.${MEMBER} -q $QUEUE -P $PROJECT_CODE -J chgres_${MEMBER} -W $WALLT \ + -x $NODES -R "affinity[core(${OMP_NUM_THREADS}):distribute=balance]" $DEPEND \ + "./run_pre-v14.chgres.sh ${MEMBER}" + ;; + v14) + bsub -e log.${MEMBER} -o log.${MEMBER} -q $QUEUE -P $PROJECT_CODE -J chgres_${MEMBER} -W $WALLT \ + -x $NODES -R "affinity[core(1):distribute=balance]" $DEPEND \ + "./run_v14.chgres.sh ${MEMBER}" + ;; + v15) + bsub -e log.${MEMBER} -o log.${MEMBER} -q $QUEUE -P $PROJECT_CODE -J chgres_${MEMBER} -W $WALLT \ + -x $NODES -R "affinity[core(1):distribute=balance]" $DEPEND \ + "./run_v15.chgres.sh ${MEMBER}" + ;; + esac + + NODES="-n 18 -R "span[ptile=9]"" + WALLT="0:15" + MEMBER=1 + while [ $MEMBER -le 80 ]; do + if [ $MEMBER -lt 10 ]; then + MEMBER_CH="00${MEMBER}" + else + MEMBER_CH="0${MEMBER}" + fi + case $gfs_ver in + v12 | v13) + export OMP_STACKSIZE=1024M + export OMP_NUM_THREADS=2 + bsub -e log.${MEMBER_CH} -o log.${MEMBER_CH} -q $QUEUE -P $PROJECT_CODE -J chgres_${MEMBER_CH} -W $WALLT \ + -x $NODES -R "affinity[core(${OMP_NUM_THREADS}):distribute=balance]" $DEPEND \ + "./run_pre-v14.chgres.sh ${MEMBER_CH}" + ;; + v14) + bsub -e log.${MEMBER_CH} -o log.${MEMBER_CH} -q $QUEUE -P $PROJECT_CODE -J chgres_${MEMBER_CH} -W $WALLT \ + -x $NODES -R "affinity[core(1):distribute=balance]" $DEPEND \ + "./run_v14.chgres.sh ${MEMBER_CH}" + ;; + v15) + bsub -e log.${MEMBER_CH} -o log.${MEMBER_CH} -q $QUEUE -P $PROJECT_CODE -J chgres_${MEMBER_CH} -W $WALLT \ + -x $NODES -R "affinity[core(1):distribute=balance]" $DEPEND \ + "./run_v15.chgres.sh ${MEMBER_CH}" + ;; + esac + MEMBER=$(( $MEMBER + 1 )) + done +fi diff --git a/util/gdas_init/driver.hera.sh b/util/gdas_init/driver.hera.sh new file mode 100755 index 000000000..711bab9a0 --- /dev/null +++ b/util/gdas_init/driver.hera.sh @@ -0,0 +1,137 @@ +#!/bin/bash + +#--------------------------------------------------------------------- +# Driver script for running on Hera. +# +# Edit the 'config' file before running. +#--------------------------------------------------------------------- + +set -x + +source /apps/lmod/lmod/init/sh +module purge +module use -a /scratch2/NCEPDEV/nwprod/NCEPLIBS/modulefiles +module load intel/18.0.5.274 +module load impi/2018.0.4 +module load netcdf/4.7.0 +module load hpss +module load prod_util +module load nco/4.7.0 +module list + +PROJECT_CODE=fv3-cpu +QUEUE=batch + +source config + +if [ $EXTRACT_DATA == yes ]; then + + rm -fr $EXTRACT_DIR + mkdir -p $EXTRACT_DIR + + MEM=6000M + WALLT="2:00:00" + + case $gfs_ver in + v12 | v13) + DATAH=$(sbatch --parsable --partition=service --ntasks=1 --mem=$MEM -t $WALLT -A $PROJECT_CODE -q $QUEUE -J get_hires \ + -o log.data.hires -e log.data.hires ./get_pre-v14.data.sh hires) + DATA1=$(sbatch --parsable --partition=service --ntasks=1 --mem=$MEM -t $WALLT -A $PROJECT_CODE -q $QUEUE -J get_enkf \ + -o log.data.enkf -e log.data.enkf ./get_pre-v14.data.sh enkf) + DEPEND="-d afterok:$DATAH:$DATA1" + ;; + v14) + DATAH=$(sbatch --parsable --partition=service --ntasks=1 --mem=$MEM -t $WALLT -A $PROJECT_CODE -q $QUEUE -J get_hires \ + -o log.data.hires -e log.data.hires ./get_v14.data.sh hires) + DATA1=$(sbatch --parsable --partition=service --ntasks=1 --mem=$MEM -t $WALLT -A $PROJECT_CODE -q $QUEUE -J get_enkf \ + -o log.data.enkf -e log.data.enkf ./get_v14.data.sh enkf) + DEPEND="-d afterok:$DATAH:$DATA1" + ;; + v15) + DATAH=$(sbatch --parsable --partition=service --ntasks=1 --mem=$MEM -t $WALLT -A $PROJECT_CODE -q $QUEUE -J get_hires \ + -o log.data.hires -e log.data.hires ./get_v15.data.sh hires) + DATA1=$(sbatch --parsable --partition=service --ntasks=1 --mem=$MEM -t $WALLT -A $PROJECT_CODE -q $QUEUE -J get_grp1 \ + -o log.data.grp1 -e log.data.grp1 ./get_v15.data.sh grp1) + DATA2=$(sbatch --parsable --partition=service --ntasks=1 --mem=$MEM -t $WALLT -A $PROJECT_CODE -q $QUEUE -J get_grp2 \ + -o log.data.grp2 -e log.data.grp2 ./get_v15.data.sh grp2) + DATA3=$(sbatch --parsable --partition=service --ntasks=1 --mem=$MEM -t $WALLT -A $PROJECT_CODE -q $QUEUE -J get_grp3 \ + -o log.data.grp3 -e log.data.grp3 ./get_v15.data.sh grp3) + DATA4=$(sbatch --parsable --partition=service --ntasks=1 --mem=$MEM -t $WALLT -A $PROJECT_CODE -q $QUEUE -J get_grp4 \ + -o log.data.grp4 -e log.data.grp4 ./get_v15.data.sh grp4) + DATA5=$(sbatch --parsable --partition=service --ntasks=1 --mem=$MEM -t $WALLT -A $PROJECT_CODE -q $QUEUE -J get_grp5 \ + -o log.data.grp5 -e log.data.grp5 ./get_v15.data.sh grp5) + DATA6=$(sbatch --parsable --partition=service --ntasks=1 --mem=$MEM -t $WALLT -A $PROJECT_CODE -q $QUEUE -J get_grp6 \ + -o log.data.grp6 -e log.data.grp6 ./get_v15.data.sh grp6) + DATA7=$(sbatch --parsable --partition=service --ntasks=1 --mem=$MEM -t $WALLT -A $PROJECT_CODE -q $QUEUE -J get_grp7 \ + -o log.data.grp7 -e log.data.grp7 ./get_v15.data.sh grp7) + DATA8=$(sbatch --parsable --partition=service --ntasks=1 --mem=$MEM -t $WALLT -A $PROJECT_CODE -q $QUEUE -J get_grp8 \ + -o log.data.grp8 -e log.data.grp8 ./get_v15.data.sh grp8) + DEPEND="-d afterok:$DATAH:$DATA1:$DATA2:$DATA3:$DATA4:$DATA5:$DATA6:$DATA7:$DATA8" + ;; + esac + +else + + DEPEND=' ' + +fi + +if [ $RUN_CHGRES == yes ]; then + export APRUN=srun + MEMBER=hires + NODES=3 + WALLT="0:15:00" + export OMP_NUM_THREADS=1 + if [ $CRES_HIRES == 'C768' ] ; then + NODES=5 + elif [ $CRES_HIRES == 'C1152' ] ; then + NODES=8 + WALLT="0:20:00" + fi + case $gfs_ver in + v12 | v13) + export OMP_NUM_THREADS=4 + export OMP_STACKSIZE=1024M + sbatch --parsable --ntasks-per-node=6 --nodes=${NODES} --cpus-per-task=$OMP_NUM_THREADS \ + -t $WALLT -A $PROJECT_CODE -q $QUEUE -J chgres_${MEMBER} \ + -o log.${MEMBER} -e log.${MEMBER} ${DEPEND} run_pre-v14.chgres.sh ${MEMBER} + ;; + v14) + sbatch --parsable --ntasks-per-node=6 --nodes=${NODES} -t $WALLT -A $PROJECT_CODE -q $QUEUE -J chgres_${MEMBER} \ + -o log.${MEMBER} -e log.${MEMBER} ${DEPEND} run_v14.chgres.sh ${MEMBER} + ;; + v15) + sbatch --parsable --ntasks-per-node=6 --nodes=${NODES} -t $WALLT -A $PROJECT_CODE -q $QUEUE -J chgres_${MEMBER} \ + -o log.${MEMBER} -e log.${MEMBER} ${DEPEND} run_v15.chgres.sh ${MEMBER} + ;; + esac + + WALLT="0:15:00" + MEMBER=1 + while [ $MEMBER -le 80 ]; do + if [ $MEMBER -lt 10 ]; then + MEMBER_CH="00${MEMBER}" + else + MEMBER_CH="0${MEMBER}" + fi + case $gfs_ver in + v12 | v13) + export OMP_NUM_THREADS=2 + export OMP_STACKSIZE=1024M + sbatch --parsable --ntasks-per-node=12 --nodes=1 --cpus-per-task=$OMP_NUM_THREADS \ + -t $WALLT -A $PROJECT_CODE -q $QUEUE -J chgres_${MEMBER_CH} \ + -o log.${MEMBER_CH} -e log.${MEMBER_CH} ${DEPEND} run_pre-v14.chgres.sh ${MEMBER_CH} + ;; + v14) + sbatch --parsable --ntasks-per-node=12 --nodes=1 -t $WALLT -A $PROJECT_CODE -q $QUEUE -J chgres_${MEMBER_CH} \ + -o log.${MEMBER_CH} -e log.${MEMBER_CH} ${DEPEND} run_v14.chgres.sh ${MEMBER_CH} + ;; + v15) + sbatch --parsable --ntasks-per-node=12 --nodes=1 -t $WALLT -A $PROJECT_CODE -q $QUEUE -J chgres_${MEMBER_CH} \ + -o log.${MEMBER_CH} -e log.${MEMBER_CH} ${DEPEND} run_v15.chgres.sh ${MEMBER_CH} + ;; + esac + MEMBER=$(( $MEMBER + 1 )) + done + +fi diff --git a/util/gdas_init/get_pre-v14.data.sh b/util/gdas_init/get_pre-v14.data.sh new file mode 100755 index 000000000..386d672c0 --- /dev/null +++ b/util/gdas_init/get_pre-v14.data.sh @@ -0,0 +1,93 @@ +#!/bin/bash + +#----------------------------------------------------------- +# Retrieve data prior to v14 (the old sigio/sfcio data +# from the spectral gfs). +# +# Script works for data between 00z May 21, 2012 +# and 06z July 19, 2017. +#----------------------------------------------------------- + +bundle=$1 + +set -x + +if [ $bundle = 'hires' ]; then + + mkdir -p $EXTRACT_DIR/gdas.${yy}${mm}${dd}/${hh} + cd $EXTRACT_DIR/gdas.${yy}${mm}${dd}/${hh} + + directory=/NCEPPROD/hpssprod/runhistory/rh${yy}/${yy}${mm}/${yy}${mm}${dd} + if [ $gfs_ver = 'v12' ]; then + file=com_gfs_prod_gdas.${yy}${mm}${dd}${hh}.tar + else + file=com2_gfs_prod_gdas.${yy}${mm}${dd}${hh}.tar + fi + + htar -xvf $directory/$file ./gdas1.t${hh}z.radstat + rc=$? + [ $rc != 0 ] && exit $rc + htar -xvf $directory/$file ./gdas1.t${hh}z.abias_air + rc=$? + [ $rc != 0 ] && exit $rc + htar -xvf $directory/$file ./gdas1.t${hh}z.abias + rc=$? + [ $rc != 0 ] && exit $rc + htar -xvf $directory/$file ./gdas1.t${hh}z.abias_pc + rc=$? + [ $rc != 0 ] && exit $rc + htar -xvf $directory/$file ./gdas1.t${hh}z.sanl + rc=$? + [ $rc != 0 ] && exit $rc + htar -xvf $directory/$file ./gdas1.t${hh}z.sfcanl + rc=$? + [ $rc != 0 ] && exit $rc + +elif [ $bundle = 'enkf' ]; then + +#---------------------------------------------------------------------- +# Get the enkf tiled restart files for all members. +#---------------------------------------------------------------------- + + mkdir -p $EXTRACT_DIR/enkf.${yy}${mm}${dd}/${hh} + cd $EXTRACT_DIR/enkf.${yy}${mm}${dd}/${hh} + + directory=/NCEPPROD/hpssprod/runhistory/rh${yy}/${yy}${mm}/${yy}${mm}${dd} + if [ $gfs_ver = 'v12' ]; then + file=com_gfs_prod_enkf.${yy}${mm}${dd}_${hh}.anl.tar + else + file=com2_gfs_prod_enkf.${yy}${mm}${dd}_${hh}.anl.tar + fi + + rm -f ./list*.${bundle} + htar -tvf $directory/$file > ./list1.${bundle} + grep siganl ./list1.${bundle} > ./list2.${bundle} + grep sfcanl ./list1.${bundle} >> ./list2.${bundle} + while read -r line + do + echo ${line##*' '} >> ./list3.${bundle} + done < "./list2.${bundle}" + htar -xvf $directory/$file -L ./list3.${bundle} + rc=$? + [ $rc != 0 ] && exit $rc + + MEMBER=1 + while [ $MEMBER -le 80 ]; do + if [ $MEMBER -lt 10 ]; then + MEMBER_CH="00${MEMBER}" + else + MEMBER_CH="0${MEMBER}" + fi + mkdir -p mem${MEMBER_CH} + mv *_mem${MEMBER_CH}* ./mem${MEMBER_CH} + MEMBER=$(( $MEMBER + 1 )) + done + + rm -f *ensmean + +fi + +set +x +echo DATA PULL FOR $bundle DONE + +exit 0 diff --git a/util/gdas_init/get_v14.data.sh b/util/gdas_init/get_v14.data.sh new file mode 100755 index 000000000..3908e31df --- /dev/null +++ b/util/gdas_init/get_v14.data.sh @@ -0,0 +1,83 @@ +#!/bin/bash + +#----------------------------------------------------------- +# Retrieve gfs v14 data from hpss. +# +# v14 data starts July 19, 2017 at 12z +#----------------------------------------------------------- + +bundle=$1 + +set -x + +if [ $bundle = 'hires' ]; then + + mkdir -p $EXTRACT_DIR/gdas.${yy}${mm}${dd}/${hh} + cd $EXTRACT_DIR/gdas.${yy}${mm}${dd}/${hh} + + directory=/NCEPPROD/hpssprod/runhistory/rh${yy}/${yy}${mm}/${yy}${mm}${dd} + file=gpfs_hps_nco_ops_com_gfs_prod_gdas.${yy}${mm}${dd}${hh}.tar + + htar -xvf $directory/$file ./gdas.t${hh}z.radstat + rc=$? + [ $rc != 0 ] && exit $rc + htar -xvf $directory/$file ./gdas.t${hh}z.abias_air + rc=$? + [ $rc != 0 ] && exit $rc + htar -xvf $directory/$file ./gdas.t${hh}z.abias + rc=$? + [ $rc != 0 ] && exit $rc + htar -xvf $directory/$file ./gdas.t${hh}z.abias_pc + rc=$? + [ $rc != 0 ] && exit $rc + htar -xvf $directory/$file ./gdas.t${hh}z.atmanl.nemsio + rc=$? + [ $rc != 0 ] && exit $rc + htar -xvf $directory/$file ./gdas.t${hh}z.nstanl.nemsio + rc=$? + [ $rc != 0 ] && exit $rc + htar -xvf $directory/$file ./gdas.t${hh}z.sfcanl.nemsio + rc=$? + [ $rc != 0 ] && exit $rc + + set +x + echo DATA PULL FOR $bundle DONE + + exit 0 + +elif [ $bundle = 'enkf' ]; then + +#---------------------------------------------------------------------- +# Get the enkf tiled restart files for all members. +#---------------------------------------------------------------------- + + mkdir -p $EXTRACT_DIR/enkf.${yy}${mm}${dd}/${hh} + cd $EXTRACT_DIR/enkf.${yy}${mm}${dd}/${hh} + + directory=/NCEPPROD/hpssprod/runhistory/rh${yy}/${yy}${mm}/${yy}${mm}${dd} + file=gpfs_hps_nco_ops_com_gfs_prod_enkf.${yy}${mm}${dd}_${hh}.anl.tar + + htar -xvf $directory/$file + rc=$? + [ $rc != 0 ] && exit $rc + + MEMBER=1 + while [ $MEMBER -le 80 ]; do + if [ $MEMBER -lt 10 ]; then + MEMBER_CH="00${MEMBER}" + else + MEMBER_CH="0${MEMBER}" + fi + mkdir -p mem${MEMBER_CH} + mv *.mem${MEMBER_CH}* ./mem${MEMBER_CH} + MEMBER=$(( $MEMBER + 1 )) + done + + rm -f gdas.* + +fi + +set +x +echo DATA PULL FOR $bundle DONE + +exit 0 diff --git a/util/gdas_init/get_v15.data.sh b/util/gdas_init/get_v15.data.sh new file mode 100755 index 000000000..5326080a6 --- /dev/null +++ b/util/gdas_init/get_v15.data.sh @@ -0,0 +1,114 @@ +#!/bin/bash + +#---------------------------------------------------------------------- +# Retrieve gfs v15 data from hpss. +# +# Data available after 2019061206. +#---------------------------------------------------------------------- + +bundle=$1 + +set -x + +cd $EXTRACT_DIR + +date10_m6=`$NDATE -6 $yy$mm$dd$hh` + +echo $date10_m6 +yy_m6=$(echo $date10_m6 | cut -c1-4) +mm_m6=$(echo $date10_m6 | cut -c5-6) +dd_m6=$(echo $date10_m6 | cut -c7-8) +hh_m6=$(echo $date10_m6 | cut -c9-10) + +#---------------------------------------------------------------------- +# Get the hires tiled restart files. Need to use the 6-hour forecast files from +# the previous cycle. +#---------------------------------------------------------------------- + +if [ $bundle = 'hires' ]; then + + directory=/NCEPPROD/hpssprod/runhistory/rh${yy_m6}/${yy_m6}${mm_m6}/${yy_m6}${mm_m6}${dd_m6} + if [ $date10_m6 -lt 2020022600 ]; then + file=gpfs_dell1_nco_ops_com_gfs_prod_gdas.${yy_m6}${mm_m6}${dd_m6}_${hh_m6}.gdas_restart.tar + else + file=com_gfs_prod_gdas.${yy_m6}${mm_m6}${dd_m6}_${hh_m6}.gdas_restart.tar + fi + + rm -f ./list.hires* + touch ./list.hires3 + htar -tvf $directory/$file > ./list.hires1 + grep ${yy}${mm}${dd}.${hh} ./list.hires1 > ./list.hires2 + while read -r line + do + echo ${line##*' '} >> ./list.hires3 + done < "./list.hires2" + + htar -xvf $directory/$file -L ./list.hires3 + rc=$? + [ $rc != 0 ] && exit $rc + +#---------------------------------------------------------------------- +# Get the 'abias' and 'radstat' files from current cycle +#---------------------------------------------------------------------- + + directory=/NCEPPROD/hpssprod/runhistory/rh${yy}/${yy}${mm}/${yy}${mm}${dd} + if [ ${yy}${mm}${dd}${hh} -lt 2020022600 ]; then + file=gpfs_dell1_nco_ops_com_gfs_prod_gdas.${yy}${mm}${dd}_${hh}.gdas.tar + else + file=com_gfs_prod_gdas.${yy}${mm}${dd}_${hh}.gdas.tar + fi + + htar -xvf $directory/$file ./gdas.${yy}${mm}${dd}/${hh}/gdas.t${hh}z.radstat + rc=$? + [ $rc != 0 ] && exit $rc + htar -xvf $directory/$file ./gdas.${yy}${mm}${dd}/${hh}/gdas.t${hh}z.abias + rc=$? + [ $rc != 0 ] && exit $rc + htar -xvf $directory/$file ./gdas.${yy}${mm}${dd}/${hh}/gdas.t${hh}z.abias_air + rc=$? + [ $rc != 0 ] && exit $rc + htar -xvf $directory/$file ./gdas.${yy}${mm}${dd}/${hh}/gdas.t${hh}z.abias_int + rc=$? + [ $rc != 0 ] && exit $rc + htar -xvf $directory/$file ./gdas.${yy}${mm}${dd}/${hh}/gdas.t${hh}z.abias_pc + rc=$? + [ $rc != 0 ] && exit $rc + + rm -f ./list.hires* + +#---------------------------------------------------------------------- +# Get the enkf tiled restart files for all members. +#---------------------------------------------------------------------- + +else + + for group in $bundle + do + + directory=/NCEPPROD/hpssprod/runhistory/rh${yy_m6}/${yy_m6}${mm_m6}/${yy_m6}${mm_m6}${dd_m6} + if [ $date10_m6 -lt 2020022600 ]; then + file=gpfs_dell1_nco_ops_com_gfs_prod_enkfgdas.${yy_m6}${mm_m6}${dd_m6}_${hh_m6}.enkfgdas_restart_${group}.tar + else + file=com_gfs_prod_enkfgdas.${yy_m6}${mm_m6}${dd_m6}_${hh_m6}.enkfgdas_restart_${group}.tar + fi + + rm -f ./list*.${group} + htar -tvf $directory/$file > ./list1.${group} + grep ${yy}${mm}${dd}.${hh} ./list1.${group} > ./list2.${group} + while read -r line + do + echo ${line##*' '} >> ./list3.${group} + done < "./list2.${group}" + htar -xvf $directory/$file -L ./list3.${group} + rc=$? + [ $rc != 0 ] && exit $rc + rm -f ./list*.${group} + + done + +fi + +set +x +echo DATA PULL FOR $bundle DONE + +exit 0 diff --git a/util/gdas_init/run_pre-v14.chgres.sh b/util/gdas_init/run_pre-v14.chgres.sh new file mode 100755 index 000000000..95e5276f0 --- /dev/null +++ b/util/gdas_init/run_pre-v14.chgres.sh @@ -0,0 +1,101 @@ +#!/bin/bash + +#---------------------------------------------------------------- +# Run chgres using pre-v14 gfs data (sigio/sfcio format +# from the spectral gfs). +#---------------------------------------------------------------- + +set -x + +MEMBER=$1 + +FIX_FV3=$UFS_DIR/fix +FIX_ORO=${FIX_FV3}/fix_fv3_gmted2010 +FIX_AM=${FIX_FV3}/fix_am + +date10=$yy$mm$dd$hh +yy=$(echo $date10 | cut -c1-4) +mm=$(echo $date10 | cut -c5-6) +dd=$(echo $date10 | cut -c7-8) +hh=$(echo $date10 | cut -c9-10) + +YMDH=${yy}${mm}${dd}.${hh}0000 + +WORKDIR=$OUTDIR/work.$MEMBER + +if [ ${MEMBER} == 'hires' ]; then + CTAR=${CRES_HIRES} + INPUT_DATA_DIR="${EXTRACT_DIR}/gdas.${yy}${mm}${dd}/${hh}" + RADSTAT_DATA_DIR="${EXTRACT_DIR}/gdas.${yy}${mm}${dd}/${hh}" + OUTDIR=$OUTDIR/gdas.${yy}${mm}${dd}/${hh} + ATMFILE="gdas1.t${hh}z.sanl" + SFCFILE="gdas1.t${hh}z.sfcanl" +else + CTAR=${CRES_ENKF} + INPUT_DATA_DIR="${EXTRACT_DIR}/enkf.${yy}${mm}${dd}/${hh}/mem${MEMBER}" + RADSTAT_DATA_DIR="${EXTRACT_DIR}/enkf.${yy}${mm}${dd}/${hh}/mem${MEMBER}" + OUTDIR=$OUTDIR/enkfgdas.${yy}${mm}${dd}/${hh}/mem${MEMBER} + ATMFILE="siganl_${yy}${mm}${dd}${hh}_mem${MEMBER}" + SFCFILE="sfcanl_${yy}${mm}${dd}${hh}_mem${MEMBER}" +fi + +rm -fr $WORKDIR +mkdir -p $WORKDIR +cd $WORKDIR + +rm -fr $OUTDIR +mkdir -p $OUTDIR +mkdir -p $OUTDIR/INPUT + +cat << EOF > fort.41 + +&config + fix_dir_target_grid="${FIX_ORO}/${CTAR}/fix_sfc" + mosaic_file_target_grid="${FIX_ORO}/${CTAR}/${CTAR}_mosaic.nc" + orog_dir_target_grid="${FIX_ORO}/${CTAR}" + orog_files_target_grid="${CTAR}_oro_data.tile1.nc","${CTAR}_oro_data.tile2.nc","${CTAR}_oro_data.tile3.nc","${CTAR}_oro_data.tile4.nc","${CTAR}_oro_data.tile5.nc","${CTAR}_oro_data.tile6.nc" + data_dir_input_grid="${INPUT_DATA_DIR}" + atm_files_input_grid="$ATMFILE" + sfc_files_input_grid="$SFCFILE" + vcoord_file_target_grid="${FIX_AM}/global_hyblev.l${LEVS}.txt" + cycle_mon=$mm + cycle_day=$dd + cycle_hour=$hh + convert_atm=.true. + convert_sfc=.true. + convert_nst=.false. + input_type="gfs_spectral" + tracers_input="spfh","o3mr","clwmr" + tracers="sphum","o3mr","liq_wat" +/ +EOF + +$APRUN $UFS_DIR/exec/chgres_cube.exe +rc=$? + +if [ $rc != 0 ]; then + exit $rc +fi + +mv gfs_ctrl.nc ${OUTDIR}/INPUT + +for tile in 'tile1' 'tile2' 'tile3' 'tile4' 'tile5' 'tile6' +do + mv out.atm.${tile}.nc ${OUTDIR}/INPUT/gfs_data.${tile}.nc + mv out.sfc.${tile}.nc ${OUTDIR}/INPUT/sfc_data.${tile}.nc +done + +if [ ${MEMBER} == 'hires' ]; then + cp ${RADSTAT_DATA_DIR}/*radstat* $OUTDIR + cp ${RADSTAT_DATA_DIR}/*abias* $OUTDIR + touch $OUTDIR/gdas.t${hh}z.loginc.txt +else + touch $OUTDIR/enkfgdas.t${hh}z.loginc.txt +fi + +rm -fr $WORKDIR + +set +x +echo CHGRES COMPLETED FOR MEMBER $MEMBER + +exit 0 diff --git a/util/gdas_init/run_v14.chgres.sh b/util/gdas_init/run_v14.chgres.sh new file mode 100755 index 000000000..9cd611648 --- /dev/null +++ b/util/gdas_init/run_v14.chgres.sh @@ -0,0 +1,103 @@ +#!/bin/bash + +#---------------------------------------------------------------- +# Run chgres using gfs v14 data as input. +#---------------------------------------------------------------- + +set -x + +MEMBER=$1 + +FIX_FV3=$UFS_DIR/fix +FIX_ORO=${FIX_FV3}/fix_fv3_gmted2010 +FIX_AM=${FIX_FV3}/fix_am + +date10=$yy$mm$dd$hh +yy=$(echo $date10 | cut -c1-4) +mm=$(echo $date10 | cut -c5-6) +dd=$(echo $date10 | cut -c7-8) +hh=$(echo $date10 | cut -c9-10) + +YMDH=${yy}${mm}${dd}.${hh}0000 + +WORKDIR=$OUTDIR/work.$MEMBER + +if [ ${MEMBER} == 'hires' ]; then + CTAR=${CRES_HIRES} + INPUT_DATA_DIR="${EXTRACT_DIR}/gdas.${yy}${mm}${dd}/${hh}" + RADSTAT_DATA_DIR="${EXTRACT_DIR}/gdas.${yy}${mm}${dd}/${hh}" + OUTDIR=$OUTDIR/gdas.${yy}${mm}${dd}/${hh} + ATMFILE="gdas.t${hh}z.atmanl.nemsio" + SFCFILE="gdas.t${hh}z.sfcanl.nemsio" + NSTFILE="gdas.t${hh}z.nstanl.nemsio" +else + CTAR=${CRES_ENKF} + INPUT_DATA_DIR="${EXTRACT_DIR}/enkf.${yy}${mm}${dd}/${hh}/mem${MEMBER}" + RADSTAT_DATA_DIR="${EXTRACT_DIR}/enkf.${yy}${mm}${dd}/${hh}/mem${MEMBER}" + OUTDIR=$OUTDIR/enkfgdas.${yy}${mm}${dd}/${hh}/mem${MEMBER} + ATMFILE="gdas.t${hh}z.ratmanl.mem${MEMBER}.nemsio" + SFCFILE="gdas.t${hh}z.sfcanl.mem${MEMBER}.nemsio" + NSTFILE="gdas.t${hh}z.nstanl.mem${MEMBER}.nemsio" +fi + +rm -fr $WORKDIR +mkdir -p $WORKDIR +cd $WORKDIR + +rm -fr $OUTDIR +mkdir -p $OUTDIR +mkdir -p $OUTDIR/INPUT + +cat << EOF > fort.41 + +&config + fix_dir_target_grid="${FIX_ORO}/${CTAR}/fix_sfc" + mosaic_file_target_grid="${FIX_ORO}/${CTAR}/${CTAR}_mosaic.nc" + orog_dir_target_grid="${FIX_ORO}/${CTAR}" + orog_files_target_grid="${CTAR}_oro_data.tile1.nc","${CTAR}_oro_data.tile2.nc","${CTAR}_oro_data.tile3.nc","${CTAR}_oro_data.tile4.nc","${CTAR}_oro_data.tile5.nc","${CTAR}_oro_data.tile6.nc" + data_dir_input_grid="${INPUT_DATA_DIR}" + atm_files_input_grid="$ATMFILE" + sfc_files_input_grid="$SFCFILE" + nst_files_input_grid="$NSTFILE" + vcoord_file_target_grid="${FIX_AM}/global_hyblev.l${LEVS}.txt" + cycle_mon=$mm + cycle_day=$dd + cycle_hour=$hh + convert_atm=.true. + convert_sfc=.true. + convert_nst=.true. + input_type="gfs_gaussian" + tracers="sphum","liq_wat","o3mr" + tracers_input="spfh","clwmr","o3mr" +/ +EOF + +$APRUN $UFS_DIR/exec/chgres_cube.exe +rc=$? + +if [ $rc != 0 ]; then + exit $rc +fi + +mv gfs_ctrl.nc ${OUTDIR}/INPUT + +for tile in 'tile1' 'tile2' 'tile3' 'tile4' 'tile5' 'tile6' +do + mv out.atm.${tile}.nc ${OUTDIR}/INPUT/gfs_data.${tile}.nc + mv out.sfc.${tile}.nc ${OUTDIR}/INPUT/sfc_data.${tile}.nc +done + +if [ ${MEMBER} == 'hires' ]; then + cp ${RADSTAT_DATA_DIR}/*radstat* $OUTDIR + cp ${RADSTAT_DATA_DIR}/*abias* $OUTDIR + touch $OUTDIR/gdas.t${hh}z.loginc.txt +else + touch $OUTDIR/enkfgdas.t${hh}z.loginc.txt +fi + +rm -fr $WORKDIR + +set +x +echo CHGRES COMPLETED FOR MEMBER $MEMBER + +exit 0 diff --git a/util/gdas_init/run_v15.chgres.sh b/util/gdas_init/run_v15.chgres.sh new file mode 100755 index 000000000..22cf94d93 --- /dev/null +++ b/util/gdas_init/run_v15.chgres.sh @@ -0,0 +1,100 @@ +#!/bin/bash + +#--------------------------------------------------------------------------- +# Run chgres using gfs v15 data as input. +#--------------------------------------------------------------------------- + +set -x + +MEMBER=$1 + +FIX_FV3=$UFS_DIR/fix +FIX_ORO=${FIX_FV3}/fix_fv3_gmted2010 +FIX_AM=${FIX_FV3}/fix_am + +date10=`$NDATE -6 $yy$mm$dd$hh` +yy_d=$(echo $date10 | cut -c1-4) +mm_d=$(echo $date10 | cut -c5-6) +dd_d=$(echo $date10 | cut -c7-8) +hh_d=$(echo $date10 | cut -c9-10) + +YMDH=${yy}${mm}${dd}.${hh}0000 + +WORKDIR=$OUTDIR/work.$MEMBER + +if [ ${MEMBER} == 'hires' ]; then + CINP=C768 + CTAR=${CRES_HIRES} + INPUT_DATA_DIR="${EXTRACT_DIR}/gdas.${yy_d}${mm_d}${dd_d}/${hh_d}/RESTART" + RADSTAT_DATA_DIR="${EXTRACT_DIR}/gdas.${yy}${mm}${dd}/${hh}" + OUTDIR=$OUTDIR/gdas.${yy}${mm}${dd}/${hh} +else + CINP=C384 + CTAR=${CRES_ENKF} + INPUT_DATA_DIR="${EXTRACT_DIR}/enkfgdas.${yy_d}${mm_d}${dd_d}/${hh_d}/mem${MEMBER}/RESTART" + RADSTAT_DATA_DIR="${EXTRACT_DIR}/enkfgdas.${yy}${mm}${dd}/${hh}/mem${MEMBER}" + OUTDIR=$OUTDIR/enkfgdas.${yy}${mm}${dd}/${hh}/mem${MEMBER} +fi + +rm -fr $WORKDIR +mkdir -p $WORKDIR +cd $WORKDIR + +rm -fr $OUTDIR +mkdir -p $OUTDIR +mkdir -p $OUTDIR/INPUT + +cat << EOF > fort.41 + +&config + fix_dir_target_grid="${FIX_ORO}/${CTAR}/fix_sfc" + mosaic_file_target_grid="${FIX_ORO}/${CTAR}/${CTAR}_mosaic.nc" + orog_dir_target_grid="${FIX_ORO}/${CTAR}" + orog_files_target_grid="${CTAR}_oro_data.tile1.nc","${CTAR}_oro_data.tile2.nc","${CTAR}_oro_data.tile3.nc","${CTAR}_oro_data.tile4.nc","${CTAR}_oro_data.tile5.nc","${CTAR}_oro_data.tile6.nc" + mosaic_file_input_grid="${FIX_ORO}/${CINP}/${CINP}_mosaic.nc" + orog_dir_input_grid="${FIX_ORO}/${CINP}" + orog_files_input_grid="${CINP}_oro_data.tile1.nc","${CINP}_oro_data.tile2.nc","${CINP}_oro_data.tile3.nc","${CINP}_oro_data.tile4.nc","${CINP}_oro_data.tile5.nc","${CINP}_oro_data.tile6.nc" + data_dir_input_grid="${INPUT_DATA_DIR}" + atm_core_files_input_grid="${YMDH}.fv_core.res.tile1.nc","${YMDH}.fv_core.res.tile2.nc","${YMDH}.fv_core.res.tile3.nc","${YMDH}.fv_core.res.tile4.nc","${YMDH}.fv_core.res.tile5.nc","${YMDH}.fv_core.res.tile6.nc","${YMDH}.fv_core.res.nc" + atm_tracer_files_input_grid="${YMDH}.fv_tracer.res.tile1.nc","${YMDH}.fv_tracer.res.tile2.nc","${YMDH}.fv_tracer.res.tile3.nc","${YMDH}.fv_tracer.res.tile4.nc","${YMDH}.fv_tracer.res.tile5.nc","${YMDH}.fv_tracer.res.tile6.nc" + vcoord_file_target_grid="${FIX_AM}/global_hyblev.l${LEVS}.txt" + sfc_files_input_grid="${YMDH}.sfc_data.tile1.nc","${YMDH}.sfc_data.tile2.nc","${YMDH}.sfc_data.tile3.nc","${YMDH}.sfc_data.tile4.nc","${YMDH}.sfc_data.tile5.nc","${YMDH}.sfc_data.tile6.nc" + cycle_mon=$mm + cycle_day=$dd + cycle_hour=$hh + convert_atm=.true. + convert_sfc=.true. + convert_nst=.true. + tracers="sphum","liq_wat","o3mr","ice_wat","rainwat","snowwat","graupel" + tracers_input="sphum","liq_wat","o3mr","ice_wat","rainwat","snowwat","graupel" +/ +EOF + +$APRUN $UFS_DIR/exec/chgres_cube.exe +rc=$? + +if [ $rc != 0 ]; then + exit $rc +fi + +mv gfs_ctrl.nc ${OUTDIR}/INPUT + +for tile in 'tile1' 'tile2' 'tile3' 'tile4' 'tile5' 'tile6' +do + mv out.atm.${tile}.nc ${OUTDIR}/INPUT/gfs_data.${tile}.nc + mv out.sfc.${tile}.nc ${OUTDIR}/INPUT/sfc_data.${tile}.nc +done + +if [ ${MEMBER} == 'hires' ]; then + cp ${RADSTAT_DATA_DIR}/* $OUTDIR + touch $OUTDIR/gdas.t${hh}z.loginc.txt +else + touch $OUTDIR/enkfgdas.t${hh}z.loginc.txt +fi + +rm -fr $WORKDIR + +set +x +echo CHGRES COMPLETED FOR MEMBER $MEMBER + +exit 0