diff --git a/aces/analysis/giantcube_cuts.py b/aces/analysis/giantcube_cuts.py index bbf1dbcf..1353f793 100644 --- a/aces/analysis/giantcube_cuts.py +++ b/aces/analysis/giantcube_cuts.py @@ -77,6 +77,16 @@ makepng(data=mx.value, wcs=mx.wcs, imfn=f"{mompath}/{molname}_CubeMosaic_max.png", stretch='asinh', min_percent=0.1, max_percent=99.9) + print(f"argmax. dt={time.time() - t0}") + argmx = cube.argmax(axis=0, **howargs) + vmax = cube.spectral_axis[argmx] + hdu = mx.hdu + hdu.data = vmax.value + hdu.writeto(f"{mompath}/{molname}_CubeMosaic_vpeak.fits", overwrite=True) + # use mx.wcs + makepng(data=vmax.value, wcs=mx.wcs, imfn=f"{mompath}/{molname}_CubeMosaic_vpeak.png", + stretch='asinh', min_percent=0.1, max_percent=99.9) + if dopv: print(f"PV max 2. dt={time.time() - t0}") pv_max = cube.max(axis=2, **howargs) @@ -96,7 +106,13 @@ make_downsampled_cube(f'{cubepath}/{molname}_CubeMosaic.fits', f'{cubepath}/{molname}_CubeMosaic_downsampled9.fits') print(f"masked mom0. dt={time.time() - t0}") - std = cube.mad_std() + try: + std = cube.mad_std() + except ValueError: + # mad_std requires whole cube in memory; we can't afford that + # instead, do a cheap version of sigma clipping + std = cube.std() + std = cube.with_mask(cube < std * 5).std() mom0 = cube.with_mask(cube > std).moment0(axis=0, **howargs) mom0.write(f"{mompath}/{molname}_CubeMosaic_masked_mom0.fits", overwrite=True) makepng(data=mom0.value, wcs=mom0.wcs, imfn=f"{mompath}/{molname}_CubeMosaic_masked_mom0.png", @@ -104,7 +120,7 @@ from dask_image import ndmorph signal_mask = cube > std - signal_mask = ndmorph.binary_dilation(signal_mask, structure=np.ones([3, 3, 3]), iterations=1) + signal_mask = ndmorph.binary_dilation(signal_mask.include(), structure=np.ones([3, 3, 3]), iterations=1) mom0 = cube.with_mask(signal_mask).moment0(axis=0, **howargs) mom0.write(f"{mompath}/{molname}_CubeMosaic_masked_dilated_mom0.fits", overwrite=True) makepng(data=mom0.value, wcs=mom0.wcs, imfn=f"{mompath}/{molname}_CubeMosaic_masked_dilated_mom0.png", diff --git a/aces/analysis/imstats.py b/aces/analysis/imstats.py index 07dd0a6e..e61fe290 100644 --- a/aces/analysis/imstats.py +++ b/aces/analysis/imstats.py @@ -497,18 +497,25 @@ def default(self, obj): def savestats(basepath=basepath, - suffix='image.tt0*', filetype=".fits"): + suffix='image.tt0*', filetype=".fits", + verbose=True + ): """ filtype : str ".fits" or "" for CASA image """ + if verbose: + print("Assembling statistics") stats = assemble_stats( f"{basepath}/data/2021.1.00172.L/science_goal.uid___A001_X1590_X30a8/group.uid___A001_X1590_X30a9/*/calibrated/working/*.cont.I.iter1.{suffix}{filetype}", ditch_suffix=f".{suffix[:-1]}") with open(f'{basepath}/tables/metadata_{suffix}.json', 'w') as fh: json.dump(stats, fh, cls=MyEncoder) + if verbose: + print("Done assembling statistics & dumping them to file") + requested = get_requested_sens() meta_keys = ['region', 'band', 'array', 'robust', 'suffix', @@ -521,6 +528,9 @@ def savestats(basepath=basepath, req_keys = ['B3_res', 'B3_sens', ] req_keys_head = ['Req_Res', 'Req_Sens'] + if verbose: + print("Assembling table rows") + rows = [] for entry in stats: band = entry['meta']['band'] @@ -540,15 +550,20 @@ def savestats(basepath=basepath, tbl.add_column(Column(name='BeamVsReq', data=(tbl['bmaj'] * tbl['bmin'])**0.5 / tbl['Req_Res'])) tbl.add_column(Column(name='BmajVsReq', data=tbl['bmaj'] / tbl['Req_Res'])) + if verbose: + print("Writing tables") tbl.write(f'{basepath}/tables/metadata_{suffix.strip("*")}.ecsv', overwrite=True) tbl.write(f'{basepath}/tables/metadata_{suffix.strip("*")}.html', format='ascii.html', overwrite=True) tbl.write(f'{basepath}/tables/metadata_{suffix.strip("*")}.tex', overwrite=True) tbl.write(f'{basepath}/tables/metadata_{suffix.strip("*")}.js.html', format='jsviewer', overwrite=True) + if verbose: + print("Done writing tables") return tbl def main(): return savestats() + print("Finished with imstats.main()") diff --git a/aces/analysis/parse_contdotdat.py b/aces/analysis/parse_contdotdat.py index 25e46b4f..eb8b9fb4 100644 --- a/aces/analysis/parse_contdotdat.py +++ b/aces/analysis/parse_contdotdat.py @@ -191,7 +191,8 @@ def cont_channel_selection_to_contdotdat(cont_channel_selection, msname, except ModuleNotFoundError: # if using this not in a casa6 environment - pass + def contchannels_to_linechannels(*args, **kwargs): + raise NotImplementedError # flagchannels='0:0~60;180~300;2790~2880;3280~3360;3460~3490;3830~3839,1:60~130;200~250;320~420;580~650;1000~1040;1200~1360;1420~1460;1720~1790;1860~1919,2:40~300;630~700;800~1000;1440~1640;1780~1919,3:100~150;470~540;640~820;920~980;1220~1260;1370~1420;1710~1780,4:0~60;180~300;2790~2880;3280~3360;3460~3490;3830~3839,5:60~130;200~250;320~420;580~650;1000~1040;1200~1360;1420~1460;1720~1790;1860~1919,6:40~300;630~700;800~1000;1440~1640;1780~1919,7:100~150;470~540;640~820;920~980;1220~1260;1370~1420;1710~1780,8:0~60;180~300;2790~2880;3280~3360;3460~3490;3830~3839,9:60~130;200~250;320~420;580~650;1000~1040;1200~1360;1420~1460;1720~1790;1860~1919,10:40~300;630~700;800~1000;1440~1640;1780~1919,11:100~150;470~540;640~820;920~980;1220~1260;1370~1420;1710~1780,12:0~60;180~300;900~1050;1860~1950;2100~2140;2230~2280;2790~2880;3050~3100;3280~3360;3460~3490;3590~3650;3830~3839,13:60~130;200~250;265~285;320~420;435~460;580~650;670~700;760~810;1000~1040;1200~1360;1420~1460;1720~1790;1800~1840;1860~1919,14:40~300;630~700;800~1000;1440~1640;1780~1919,15:100~150;470~540;640~820;920~980;1170~1190;1220~1260;1370~1420;1710~1780' # freqsel = cont_channel_selection_to_contdotdat(flagchannels, ('./science_goal.uid___A001_X1290_X44/group.uid___A001_X1290_X45/member.uid___A001_X1290_X46/calibrated/calibrated_final.ms/'), spw_mapping={0:25,1:27,2:29,3:31}) diff --git a/aces/analysis/statcont_cubes.py b/aces/analysis/statcont_cubes.py index 0b91cf80..9f2b072a 100644 --- a/aces/analysis/statcont_cubes.py +++ b/aces/analysis/statcont_cubes.py @@ -261,9 +261,11 @@ def main(): try: SpectralCube.read(fn).beam except NoBeamError: + print(f"Neither {fn} nor {contsubfn} have a beam") raise ValueError(f"Neither {fn} nor {contsubfn} have a beam") except AttributeError: SpectralCube.read(fn).beams + print(f"{fn} is a multi-beam cube") raise AttributeError(f"{fn} is a multi-beam cube") redo = True except AttributeError: diff --git a/aces/hipergator_scripts/job_runner.py b/aces/hipergator_scripts/job_runner.py index 31e9e5d3..b755fb51 100644 --- a/aces/hipergator_scripts/job_runner.py +++ b/aces/hipergator_scripts/job_runner.py @@ -299,6 +299,7 @@ def main(): if continue_started_only: msfiles = glob.glob(f'{workdir}/{tempdir_name}/*.ms') if len(msfiles) == 0: + print("No MS files found; skipping") continue else: msfstr = '\n'.join(msfiles) @@ -333,11 +334,16 @@ def main(): else os.path.join(datapath, os.path.basename(vis)).replace("targets", "target") for vis in tcpars['vis']] for ii, vis in enumerate(tcpars['vis']): - try: - assert os.path.exists(vis), vis - except AssertionError: - assert os.path.exists(vis.replace("_line", "")) - tcpars['vis'][ii] = vis.replace("_line", "") + if not os.path.exists(vis): + if os.path.exists(vis.replace("_line", "")): + tcpars['vis'][ii] = vis.replace("_line", "") + elif os.path.exists(vis.replace("_target_line", "")): + tcpars['vis'][ii] = vis.replace("_target_line", "") + elif os.path.exists(vis.replace("_target", "")): + tcpars['vis'][ii] = vis.replace("_target", "") + else: + matches = glob.glob(vis.replace(".ms", "*")) + raise IOError(f"MS {vis} does not exist, nor do any of its variants. glob={matches}") if 'nchan' not in tcpars: print(tcpars) print(sous, gous, mous) diff --git a/aces/hipergator_scripts/run_mosaic_12m.sh b/aces/hipergator_scripts/run_mosaic_12m.sh index cab344eb..e1840e1e 100644 --- a/aces/hipergator_scripts/run_mosaic_12m.sh +++ b/aces/hipergator_scripts/run_mosaic_12m.sh @@ -8,8 +8,8 @@ #SBATCH --time=96:00:00 # Time limit hrs:min:sec #SBATCH --qos=astronomy-dept-b #SBATCH --account=astronomy-dept -#SBATCH --output=/blue/adamginsburg/adamginsburg/ACES/logs/ACES_mosaic12m_cont_%j.log -#SBATCH --job-name=ACES_mosaic12m_cont +#SBATCH --output=/blue/adamginsburg/adamginsburg/ACES/logs/ACES_mosaic12m_%j.log +#SBATCH --job-name=ACES_mosaic12m #SBATCH --export=ALL @@ -42,7 +42,7 @@ echo "test import" #echo "Make TP mosaic" #/orange/adamginsburg/miniconda3/envs/python39/bin/aces_mosaic_TP || exit 1 echo "Make 12m mosaic" -/orange/adamginsburg/miniconda3/envs/python39/bin/aces_mosaic_12m --contonly || exit 1 +/orange/adamginsburg/miniconda3/envs/python39/bin/aces_mosaic_12m || exit 1 # technically shouldn't need to be re-run, but as I add new mosaics, it will # (but this causes a 'failed' error) diff --git a/aces/hipergator_scripts/run_pipeline_slurm_burst_mpi.sh b/aces/hipergator_scripts/run_pipeline_slurm_burst_mpi.sh index 03853321..ddaa1c92 100644 --- a/aces/hipergator_scripts/run_pipeline_slurm_burst_mpi.sh +++ b/aces/hipergator_scripts/run_pipeline_slurm_burst_mpi.sh @@ -29,6 +29,8 @@ export ACES_ROOTDIR="/orange/adamginsburg/ACES/" CASAVERSION=casa-6.2.1-7-pipeline-2021.2.0.128 CASAVERSION=casa-6.4.3-2-pipeline-2021.3.0.17 CASAVERSION=casa-6.4.1-12-pipeline-2022.2.0.68 +# required for QA3 version of field af +CASAVERSION=casa-6.5.4-9-pipeline-2023.1.0.125 export CASAPATH=/orange/adamginsburg/casa/${CASAVERSION} export MPICASA=${CASAPATH}/bin/mpicasa diff --git a/aces/imaging/make_mosaic.py b/aces/imaging/make_mosaic.py index d1c9ab3a..eb1832f0 100644 --- a/aces/imaging/make_mosaic.py +++ b/aces/imaging/make_mosaic.py @@ -79,7 +79,7 @@ def read_as_2d(fn, minval=None): def get_peak(fn, slab_kwargs=None, rest_value=None, suffix="", save_file=True, - folder=None, threshold=None): + folder=None, threshold=None, rel_threshold=None, fail_on_zeros=True): print(".", end='', flush=True) outfn = fn.replace(".fits", "") + f"{suffix}_max.fits" if folder is not None: @@ -87,8 +87,13 @@ def get_peak(fn, slab_kwargs=None, rest_value=None, suffix="", save_file=True, if os.path.exists(outfn): hdu = fits.open(outfn) proj = Projection.from_hdu(hdu) + if rel_threshold is not None: + threshold = rel_threshold * proj.max() + print(f"Set threshold to {threshold} based on rel_threshold={rel_threshold}") if threshold is not None: proj[proj < threshold] = 0 + if fail_on_zeros and np.nansum(proj) == 0: + raise ValueError(f"File {fn} reduced to all zeros") return proj else: ft = 'fits' if fn.endswith(".fits") else "casa_image" @@ -116,8 +121,14 @@ def get_peak(fn, slab_kwargs=None, rest_value=None, suffix="", save_file=True, mxjy = mxjy.with_beam(beam, raise_error_jybm=False) mx = mxjy.to(u.K, equivalencies=equiv) + if fail_on_zeros and np.nansum(mx.value) == 0: + raise ValueError(f"File {fn} reduced to all zeros") if save_file: mx.hdu.writeto(outfn) + + if rel_threshold is not None: + threshold = rel_threshold * mx.max() + print(f"Set threshold to {threshold} based on rel_threshold={rel_threshold}") if threshold is not None: mx[mx.value < threshold] = 0 return mx @@ -153,6 +164,20 @@ def get_m0(fn, slab_kwargs=None, rest_value=None, suffix="", save_file=True, fol return moment0 +def check_hdus(hdus): + bad = 0 + for hdu in hdus: + if isinstance(hdu, fits.PrimaryHDU): + ttl = np.nansum(hdu.data) + else: + ttl = np.nansum(hdu) + if ttl == 0: + bad = bad + 1 + + if bad > 0: + raise ValueError(f"Found {bad} bad HDUs (all zero)") + + def makepng(data, wcs, imfn, footprint=None, **norm_kwargs): import pylab as pl from astropy import visualization @@ -192,6 +217,9 @@ def make_mosaic(twod_hdus, name, norm_kwargs={}, slab_kwargs=None, array='7m', folder=None, # must be specified though basepath='./'): + """ + Given a long list of 2D HDUs and an output name, make a giant mosaic. + """ if target_header is None: log.info(f"Finding WCS for {len(twod_hdus)} HDUs") @@ -257,6 +285,10 @@ def repr_function(*args, **kwargs): if commonbeam is not None: header.update(cb.to_header_keywords()) + assert not np.all(np.isnan(prjarr)) + + log.info(f"DEBUG: footprint[prjarr==0] = {footprint[prjarr == 0]}") + outfile = f'{basepath}/mosaics/{folder}/{array}_{name}_mosaic.fits' log.info(f"Writing reprojected data to {outfile}") fits.PrimaryHDU(data=prjarr, header=header).writeto(outfile, overwrite=True) @@ -367,7 +399,7 @@ def repr_function(*args, **kwargs): return cb -def all_lines(header, parallel=False, array='12m', glob_suffix='cube.I.iter1.image.pbcor', +def all_lines(header, parallel=False, array='12m', glob_suffixes=('cube.I.iter1.image.pbcor.statcont.contsub.fits', 'cube.I.manual.image.pbcor.statcont.contsub.fits'), lines='all', folder='', globdir='calibrated/working', use_weights=True): @@ -388,10 +420,14 @@ def all_lines(header, parallel=False, array='12m', glob_suffix='cube.I.iter1.ima log.info(f"{array} {line} {restf}") - filelist = glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/{globdir}/*spw{spwn}.{glob_suffix}') + filelist = [] + for glob_suffix in glob_suffixes: + for spwpre in ('spw', 'sci'): + filelist += glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/{globdir}/*{spwpre}{spwn}.{glob_suffix}') + log.debug(f"Filelist={filelist}") assert len(filelist) > 0 if use_weights: - weightfiles = [fn.replace("image.pbcor", "weight") for fn in filelist] + weightfiles = [fn.replace("image.pbcor.statcont.contsub.fits", "weight") for fn in filelist] for ii, (ifn, wfn) in enumerate(zip(list(filelist), list(weightfiles))): if not os.path.exists(wfn): log.error(f"Missing file {wfn}") @@ -408,36 +444,42 @@ def all_lines(header, parallel=False, array='12m', glob_suffix='cube.I.iter1.ima filelist, ) hdus = [x.hdu for x in hdus] + check_hdus(hdus) if use_weights: wthdus = pool.map(partial(get_peak, **{'slab_kwargs': {'lo': -2 * u.km / u.s, 'hi': 2 * u.km / u.s}, 'rest_value': restf}, suffix=f'_{line}', - threshold=0.5, # pb limit + rel_threshold=0.25, # pb limit ), weightfiles) wthdus = [x.hdu for x in wthdus] + check_hdus(wthdus) else: hdus = [get_peak(fn, slab_kwargs={'lo': -200 * u.km / u.s, 'hi': 200 * u.km / u.s}, rest_value=restf, suffix=f'_{line}').hdu for fn in filelist] + check_hdus(hdus) print(flush=True) if use_weights: wthdus = [get_peak(fn, slab_kwargs={'lo': -2 * u.km / u.s, 'hi': 2 * u.km / u.s}, rest_value=restf, suffix=f'_{line}', - threshold=0.5, # pb limit + rel_threshold=0.25, # pb limit ).hdu for fn in weightfiles] + check_hdus(wthdus) print(flush=True) if parallel: - print(f"Starting function make_mosaic for {line} peak intensity") + print(f"Starting function make_mosaic for {line} peak intensity (parallel mode)") proc = Process(target=make_mosaic, args=(hdus, ), kwargs=dict(name=f'{line}_max', cbar_unit='K', norm_kwargs=dict(max_cut=5, min_cut=-0.01, stretch='asinh'), array=array, basepath=basepath, weights=wthdus if use_weights else None, + folder=folder, target_header=header)) proc.start() processes.append(proc) else: + print(f"Starting function make_mosaic for {line} peak intensity") make_mosaic(hdus, name=f'{line}_max', cbar_unit='K', norm_kwargs=dict(max_cut=5, min_cut=-0.01, stretch='asinh'), array=array, basepath=basepath, weights=wthdus if use_weights else None, @@ -458,13 +500,16 @@ def all_lines(header, parallel=False, array='12m', glob_suffix='cube.I.iter1.ima proc = Process(target=make_mosaic, args=(m0hdus, ), kwargs=dict(name=f'{line}_m0', cbar_unit='K km/s', norm_kwargs=dict(max_cut=20, min_cut=-1, stretch='asinh'), + folder=folder, array=array, basepath=basepath, weights=wthdus if use_weights else None, target_header=header)) proc.start() processes.append(proc) else: make_mosaic(m0hdus, name=f'{line}_m0', cbar_unit='K km/s', norm_kwargs=dict(max_cut=20, min_cut=-1, stretch='asinh'), - array=array, basepath=basepath, weights=wthdus if use_weights else None, target_header=header) + array=array, basepath=basepath, + folder=folder, + weights=wthdus if use_weights else None, target_header=header) del m0hdus if use_weights: del wthdus diff --git a/aces/imaging/mosaic_12m.py b/aces/imaging/mosaic_12m.py index 694453e1..e24246d8 100644 --- a/aces/imaging/mosaic_12m.py +++ b/aces/imaging/mosaic_12m.py @@ -70,7 +70,9 @@ def all_lines(*args, folder='12m_flattened', **kwargs): def rms_(*args, **kwargs): - return rms(prefix='12m_continuum', threshold=3, **kwargs) + return rms(prefix='12m_continuum', + folder='continuum', + threshold=3, **kwargs) def main(): @@ -107,6 +109,7 @@ def main(): # do this _after_ if not options.contonly: + print("Running all_lines") all_lines(header) if failure: @@ -135,10 +138,14 @@ def main_(): all_lines(header) -def check_files(filelist): +def check_files(filelist, funcname=None): + if funcname is not None: + logprint(f"Checking files for {funcname}") uidtb = Table.read(f'{basepath}/reduction_ACES/aces/data/tables/aces_SB_uids.csv') for row in uidtb: - matches = [row['12m MOUS ID'] in fn for fn in filelist] + matches = [(row['12m MOUS ID'] in fn) or + (f'_{row["Obs ID"]}.' in os.path.basename(fn)) + for fn in filelist] print(row['Obs ID'], sum(matches)) if sum(matches) != 1: for fn in filelist: @@ -158,14 +165,14 @@ def check_files(filelist): def continuum(header): - logprint("12m continuum") + logprint("12m continuum: default/product version") filelist = glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/product/*25_27_29_31_33_35*cont.I.tt0.pbcor.fits') filelist += glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/product/*25_27_29_31_33_35*cont.I.manual.pbcor.tt0.fits') filelist += glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/manual/*25_27_29_31_33_35*.tt0.pbcor.fits') - check_files(filelist) + check_files(filelist, funcname='continuum') - print("Read as 2d for files: ", end=None, flush=True) + print("CONTINUUM (default/product version) Read as 2d for files: ", end=None, flush=True) hdus = [read_as_2d(fn) for fn in filelist] for hdu, fn in zip(hdus, filelist): if isinstance(hdu, fits.HDUList): @@ -177,6 +184,8 @@ def continuum(header): #weightfiles += glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/manual/*.pb.tt0.fits') weightfiles = [fn.replace(".image.tt0.pbcor", ".weight.tt0").replace(".I.tt0.pbcor", ".I.weight.tt0").replace('manual.pbcor.tt0', 'manual.weight.tt0') for fn in filelist] + # for product version, we need to use what we're given... + weightfiles = [fn.replace(".weight.tt0.fits", ".pb.tt0.fits").replace(".weight.tt0", ".pb.tt0.fits") for fn in weightfiles if not os.path.exists(fn)] assert len(weightfiles) == len(filelist) wthdus = [read_as_2d(fn, minval=0.5) for fn in weightfiles] print(flush=True) @@ -205,13 +214,13 @@ def continuum(header): def reimaged(header): - logprint("12m continuum reimaged") - filelist = glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/calibrated/working/*25_27_29_31_33_35*cont.I*image.tt0.pbcor') + logprint("12m continuum reimaged (glob is *.spw25_27_29_31_33_35.cont.I*image.tt0.pbcor)") + filelist = glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/calibrated/working/*.spw25_27_29_31_33_35.cont.I*image.tt0.pbcor') #filelist += glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/manual/*25_27_29_31_33_35*cont*tt0.pbcor.fits') - check_files(filelist) + check_files(filelist, funcname='reimaged') - print("Read as 2d for files: ", end=None, flush=True) + print("Read as 2d for files (reimaged): ", end=None, flush=True) hdus = [read_as_2d(fn) for fn in filelist] print(flush=True) logprint(filelist) @@ -222,6 +231,7 @@ def reimaged(header): #assert len(weightfiles) == len(filelist) #for missing in set(weightfiles_) - set(weightfiles): # logprint(f"Missing {missing}") + print("Read as 2d for weightfiles (reimaged): ", end=None, flush=True) wthdus = [read_as_2d(fn, minval=0.5) for fn in weightfiles] print(flush=True) make_mosaic(hdus, name='continuum_commonbeam_circular_reimaged', @@ -247,16 +257,24 @@ def reimaged(header): folder='continuum' ) + # feather with non-reimaged 7m (never did 7m reimaging) + feath = uvcombine.feather_simple(f'{basepath}/mosaics/12m_flattened/12m_continuum_commonbeam_circular_reimaged_mosaic.fits', + f'{basepath}/mosaics/7m_flattened/7m_continuum_commonbeam_circular_mosaic.fits') + fits.PrimaryHDU(data=feath.real, + header=fits.getheader(f'{basepath}/mosaics/12m_flattened/12m_continuum_commonbeam_circular_reimaged_mosaic.fits') + ).writeto(f'{basepath}/mosaics/7m_flattened/feather_7m12m_continuum_commonbeam_circular_reimaged_mosaic.fits', + overwrite=True) -def reimaged_high(header): - for spw, name in zip(('33_35', '25_27'), ('reimaged_high', 'reimaged_low')): - logprint(f"12m continuum {name}") - filelist = glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/calibrated/working/*spw{spw}*cont.I*image.tt0.pbcor') - filelist += glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/manual/*spw{spw}*cont*tt0.pbcor.fits') - check_files(filelist) +def reimaged_high(header, spws=('33_35', '25_27'), spw_names=('reimaged_high', 'reimaged_low')): + for spw, name in zip(spws, spw_names): + logprint(f"12m continuum {name} in reimaged_high") + filelist = glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/calibrated/working/*.spw{spw}.cont.I*image.tt0.pbcor') + filelist += glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/manual/*.spw{spw}.cont*tt0.pbcor.fits') - print("Read as 2d for files: ", end=None, flush=True) + check_files(filelist, funcname='reimaged_high') + + print(f"Read as 2d for files (reimaged {name}): ", end=None, flush=True) hdus = [read_as_2d(fn) for fn in filelist] print(flush=True) #weightfiles = [x.replace(".image.tt0.pbcor", ".pb.tt0") for x in filelist] @@ -288,11 +306,11 @@ def reimaged_high(header): def residuals(header): logprint("12m continuum residuals") for spw, name in zip(('25_27_29_31_33_35', '33_35', '25_27'), ('reimaged', 'reimaged_high', 'reimaged_low')): - filelist = glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/calibrated/working/*spw{spw}*cont.I.iter1.residual.tt0') - filelist += glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/calibrated/working/*spw{spw}*cont.I.manual.residual.tt0') - filelist += glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/manual/*spw{spw}*cont.I*.residual.tt0') + filelist = glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/calibrated/working/*.spw{spw}*cont.I.iter1.residual.tt0') + filelist += glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/calibrated/working/*.spw{spw}*cont.I.manual.residual.tt0') + filelist += glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/manual/*.spw{spw}*cont.I*.residual.tt0') - check_files(filelist) + check_files(filelist, funcname='residuals') # check that field am, which is done, is included assert any([f'uid___A001_X15a0_X184.Sgr_A_star_sci.spw{spw}.cont.I.manual.residual.tt0' in fn @@ -472,24 +490,57 @@ def starstarmap_with_kwargs(pool, fn, kwargs_iter): return pool.istarmap(apply_kwargs, args_for_starmap) +def get_weightfile(filename, spw): + suffixes = ('.cube.I.iter1.reclean.weight', + '.cube.I.iter1.weight', + '.cube.I.manual.weight', + '.cube.I.manual.reclean.weight', + ) + spw_pre = ('.spw', '_sci') + flist = [] + for pre in spw_pre: + for suf in suffixes: + flist += glob.glob(os.path.join( + os.path.dirname(filename), + f"*{pre}{spw}*{suf}")) + + # dig through the sous/gous/mous for the appropriate weightfile + if len(flist) != 1: + uidtb = Table.read(f'{basepath}/reduction_ACES/aces/data/tables/aces_SB_uids.csv') + uidtb.add_index('Obs ID') + sgmpath = os.path.join(basepath, 'data/2021.1.00172.L/science_goal.uid___A001_X1590_X30a8/group.uid___A001_X1590_X30a9/member.uid___A001_') + if 'Sgr_A_st' in filename: + fieldid = filename.split("Sgr_A_st_")[-1].split(".")[0] + mousid = uidtb.loc[fieldid]['12m MOUS ID'] + for pre in spw_pre: + for suf in suffixes: + flist += glob.glob(f'{sgmpath}{mousid}/calibrated/working/*{pre}{spw}*{suf}') + + # we have to have exactly 1 match + assert len(flist) == 1, str(flist) + return flist[0] + + def make_giant_mosaic_cube_cs21(**kwargs): """ Sep 2023: Fields ar and ad are excluded because of their beams ad shouldn't be, but it is. """ - filelist = glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/calibrated/working/*spw33.cube.I.iter1.image.pbcor.statcont.contsub.fits') - filelist += glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/calibrated/working/*spw33.cube.I.manual*image.pbcor.statcont.contsub.fits') - filelist += glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/calibrated/working/*sci33.cube.I.manual*image.pbcor.statcont.contsub.fits') - filelist += glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/calibrated/working/*spw33.cube.I.iter1.reclean*image.pbcor.statcont.contsub.fits') + #filelist = glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/calibrated/working/*spw33.cube.I.iter1.image.pbcor.statcont.contsub.fits') + #filelist += glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/calibrated/working/*spw33.cube.I.manual*image.pbcor.statcont.contsub.fits') + #filelist += glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/calibrated/working/*sci33.cube.I.manual*image.pbcor.statcont.contsub.fits') + #filelist += glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/calibrated/working/*spw33.cube.I.iter1.reclean*image.pbcor.statcont.contsub.fits') # this next line is only for field am and should be removed b/c we need the .pb/.weight #filelist += glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/manual/*33.cube.I.manual.pbcor.fits') + # TODO when feathered data are ready + filelist = glob.glob('/orange/adamginsburg/ACES/upload/Feather_12m_7m_TP/SPW33/cubes/Sgr_A_st_*.TP_7M_12M_feather_all.SPW_33.image.statcont.contsub.fits') print(f"Found {len(filelist)} CS 2-1-containing spw33 files") check_files(filelist) - weightfilelist = [fn.replace(".image.pbcor.statcont.contsub.fits", ".weight").replace(".pbcor.statcont.contsub.fits", ".weight") for fn in filelist] + weightfilelist = [get_weightfile(fn, spw=33) for fn in filelist] for fn in weightfilelist: assert os.path.exists(fn) @@ -520,12 +571,13 @@ def make_giant_mosaic_cube_sio21(**kwargs): filelist += glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/calibrated/working/*spw27.cube.I.manual*image.pbcor.statcont.contsub.fits') filelist += glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/calibrated/working/*sci27.cube.I.manual*image.pbcor.statcont.contsub.fits') # should exist in cal/working filelist += glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/manual/*27.cube.I.manual.pbcor.fits') + filelist = glob.glob('/orange/adamginsburg/ACES/upload/Feather_12m_7m_TP/SPW27/cubes/Sgr_A_st_*.TP_7M_12M_feather_all.SPW_27.image.statcont.contsub.fits') print(f"Found {len(filelist)} SiO 2-1-containing spw27 files") check_files(filelist) - weightfilelist = [fn.replace(".image.pbcor.statcont.contsub.fits", ".weight").replace(".pbcor.statcont.contsub.fits", ".weight") for fn in filelist] + weightfilelist = [get_weightfile(fn, spw=27) for fn in filelist] for fn in weightfilelist: assert os.path.exists(fn) @@ -537,6 +589,7 @@ def make_giant_mosaic_cube_sio21(**kwargs): cdelt_kms=cdelt_kms, cubename='SiO21', nchan=350, + # field am is 3.22" beam_threshold=3.1 * u.arcsec, channelmosaic_directory=f'{basepath}/mosaics/SiO21_Channels/', **kwargs,) @@ -549,16 +602,17 @@ def make_giant_mosaic_cube_sio21(**kwargs): def make_giant_mosaic_cube_hnco(**kwargs): - filelist = glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/calibrated/working/*spw31.cube.I.iter1.image.pbcor.statcont.contsub.fits') - filelist += glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/calibrated/working/*spw31.cube.I.manual*image.pbcor.statcont.contsub.fits') - filelist += glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/calibrated/working/*sci31.cube.I.manual*image.pbcor.statcont.contsub.fits') - #filelist += glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/manual/*31.cube.I.manual.pbcor.fits') + # filelist = glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/calibrated/working/*spw31.cube.I.iter1.image.pbcor.statcont.contsub.fits') + # filelist += glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/calibrated/working/*spw31.cube.I.manual*image.pbcor.statcont.contsub.fits') + # filelist += glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/calibrated/working/*sci31.cube.I.manual*image.pbcor.statcont.contsub.fits') + # filelist += glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/manual/*31.cube.I.manual.pbcor.fits') + filelist = glob.glob('/orange/adamginsburg/ACES/upload/Feather_12m_7m_TP/SPW31/cubes/Sgr_A_st_*.TP_7M_12M_feather_all.hnco43.image.statcont.contsub.fits') print(f"Found {len(filelist)} HNCO-containing spw31 files") check_files(filelist) - weightfilelist = [fn.replace(".image.pbcor.statcont.contsub.fits", ".weight").replace(".pbcor.statcont.contsub.fits", ".weight") for fn in filelist] + weightfilelist = [get_weightfile(fn, spw=31) for fn in filelist] for fn in weightfilelist: assert os.path.exists(fn) @@ -569,7 +623,7 @@ def make_giant_mosaic_cube_hnco(**kwargs): reference_frequency=restfrq, cdelt_kms=cdelt_kms, cubename='HNCO', - nchan=1200, + nchan=1, beam_threshold=3.2 * u.arcsec, channelmosaic_directory=f'{basepath}/mosaics/HNCO_Channels/', fail_if_cube_dropped=False, @@ -583,17 +637,18 @@ def make_giant_mosaic_cube_hnco(**kwargs): def make_giant_mosaic_cube_hc3n(**kwargs): - filelist = glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/calibrated/working/*spw35.cube.I.iter1.image.pbcor.statcont.contsub.fits') - filelist += glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/calibrated/working/*spw35.cube.I.manual*image.pbcor.statcont.contsub.fits') - filelist += glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/calibrated/working/*sci35.cube.I.manual*image.pbcor.statcont.contsub.fits') - filelist += glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/calibrated/working/*spw35.cube.I.iter1.reclean.image.pbcor.statcont.contsub.fits') + #filelist = glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/calibrated/working/*spw35.cube.I.iter1.image.pbcor.statcont.contsub.fits') + #filelist += glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/calibrated/working/*spw35.cube.I.manual*image.pbcor.statcont.contsub.fits') + #filelist += glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/calibrated/working/*sci35.cube.I.manual*image.pbcor.statcont.contsub.fits') + #filelist += glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/calibrated/working/*spw35.cube.I.iter1.reclean.image.pbcor.statcont.contsub.fits') #filelist += glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/manual/*35.cube.I.manual.pbcor.fits') + filelist = glob.glob('/orange/adamginsburg/ACES/upload/Feather_12m_7m_TP/SPW35/cubes/Sgr_A_st_*.TP_7M_12M_feather_all.SPW_35.image.statcont.contsub.fits') print(f"Found {len(filelist)} HC3N-containing spw35 files") check_files(filelist) - weightfilelist = [fn.replace(".image.pbcor.statcont.contsub.fits", ".weight").replace(".pbcor.statcont.contsub.fits", ".weight") for fn in filelist] + weightfilelist = [get_weightfile(fn, spw=35) for fn in filelist] for fn in weightfilelist: assert os.path.exists(fn) @@ -649,16 +704,17 @@ def make_giant_mosaic_cube_hnco_TP7m12m(**kwargs): def make_giant_mosaic_cube_hcop(**kwargs): - filelist = glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/calibrated/working/*spw29.cube.I.iter1.image.pbcor.statcont.contsub.fits') - filelist += glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/calibrated/working/*spw29.cube.I.manual*image.pbcor.statcont.contsub.fits') - filelist += glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/calibrated/working/*sci29.cube.I.manual*image.pbcor.statcont.contsub.fits') + #filelist = glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/calibrated/working/*spw29.cube.I.iter1.image.pbcor.statcont.contsub.fits') + #filelist += glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/calibrated/working/*spw29.cube.I.manual*image.pbcor.statcont.contsub.fits') + #filelist += glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/calibrated/working/*sci29.cube.I.manual*image.pbcor.statcont.contsub.fits') #filelist += glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/manual/*29.cube.I.manual.pbcor.fits') + filelist = glob.glob('/orange/adamginsburg/ACES/upload/Feather_12m_7m_TP/SPW29/cubes/Sgr_A_st_*.TP_7M_12M_feather_all.hco+10.image.statcont.contsub.fits') print(f"Found {len(filelist)} HCOP-containing spw29 files") check_files(filelist) - weightfilelist = [fn.replace(".image.pbcor.statcont.contsub.fits", ".weight").replace(".pbcor.statcont.contsub.fits", ".weight") for fn in filelist] + weightfilelist = [get_weightfile(fn, spw=29) for fn in filelist] for fn in weightfilelist: assert os.path.exists(fn) @@ -721,18 +777,19 @@ def make_giant_mosaic_cube_hnco_TP7m(**kwargs): def make_giant_mosaic_cube_so32(**kwargs): - filelist = glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/calibrated/working/*spw33.cube.I.iter1.image.pbcor.statcont.contsub.fits') - filelist += glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/calibrated/working/*spw33.cube.I.manual*image.pbcor.statcont.contsub.fits') - filelist += glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/calibrated/working/*sci33.cube.I.manual*image.pbcor.statcont.contsub.fits') - filelist += glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/calibrated/working/*spw33.cube.I.iter1.reclean*image.pbcor.statcont.contsub.fits') - #filelist += glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/manual/*33.cube.I.manual.pbcor.fits') - filelist = sorted(filelist) + #filelist = glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/calibrated/working/*spw33.cube.I.iter1.image.pbcor.statcont.contsub.fits') + #filelist += glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/calibrated/working/*spw33.cube.I.manual*image.pbcor.statcont.contsub.fits') + #filelist += glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/calibrated/working/*sci33.cube.I.manual*image.pbcor.statcont.contsub.fits') + #filelist += glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/calibrated/working/*spw33.cube.I.iter1.reclean*image.pbcor.statcont.contsub.fits') + ##filelist += glob.glob(f'{basepath}/rawdata/2021.1.00172.L/s*/g*/m*/manual/*33.cube.I.manual.pbcor.fits') + #filelist = sorted(filelist) + filelist = sorted(glob.glob('/orange/adamginsburg/ACES/upload/Feather_12m_7m_TP/SPW33/cubes/Sgr_A_st_*.TP_7M_12M_feather_all.SPW_33.image.statcont.contsub.fits')) print(f"Found {len(filelist)} SO 3-2-containing spw33 files") check_files(filelist) - weightfilelist = [fn.replace(".image.pbcor.statcont.contsub.fits", ".weight").replace(".pbcor.statcont.contsub.fits", ".weight") for fn in filelist] + weightfilelist = [get_weightfile(fn, spw=33) for fn in filelist] for fn in weightfilelist: assert os.path.exists(fn) diff --git a/aces/imaging/parallel_tclean.py b/aces/imaging/parallel_tclean.py index aaccbc9d..53fdf0bb 100644 --- a/aces/imaging/parallel_tclean.py +++ b/aces/imaging/parallel_tclean.py @@ -254,7 +254,7 @@ def test_valid(vis): scriptname = os.path.join(workdir, f"{imagename}_parallel_script.py") with open(scriptname, 'w') as fh: fh.write(script) - print(f"Wrote sript {scriptname}") + print(f"Wrote script {scriptname}") diff --git a/aces/imaging/write_tclean_scripts.py b/aces/imaging/write_tclean_scripts.py index 7d8cefe1..128a4762 100644 --- a/aces/imaging/write_tclean_scripts.py +++ b/aces/imaging/write_tclean_scripts.py @@ -20,6 +20,7 @@ import string from aces import conf from aces.pipeline_scripts.merge_tclean_commands import get_commands +from aces.analysis.parse_contdotdat import contchannels_to_linechannels if os.getenv('DUMMYRUN'): def tclean(**kwargs): @@ -121,6 +122,9 @@ def main(): imtype = tcpars['specmode'] + # force pbcor + tcpars['pbcor'] = True + tempdir_name = f'{temp_workdir}/{field}_{spwsel}_{imtype}_{config}_{mous}' assert any(x in tempdir_name for x in ('7M', 'TM1', 'TP')) @@ -153,24 +157,110 @@ def main(): if 'aggregate' in spwsel: def rename(x): + # x = x.replace(".ms", f"_{spwsel}.ms") + return os.path.join(tempdir_name, os.path.basename(x)) + + def rename_agg(x): + x = x.replace(".ms", f"_{spwsel}.ms") return os.path.join(tempdir_name, os.path.basename(x)) + # This was a great idea, but it totally didn't work because there are multiple steps involved + # You can't just split out the channels you want, you have to flag them, then average them, + # then remove the flags. + + # Alternative approach: + # copy over MS + # flag out non-continuum channels [requires inverting selection] + # split + splitcmd = copycmds = "\n".join( ["import shutil"] + [textwrap.dedent( f""" - try: - logprint('Copying {x} to {rename(x)}.') - shutil.copytree('{x}', '{rename(x)}') - logprint('Successfully copied {x} to {rename(x)}.') - except FileExistsError as ex: - logprint(f'MS file already copied: {{ex}}. Proceeding.') + if not os.path.exists("{rename(x)}"): + try: + logprint('Copying {x} to {rename(x)}.') + shutil.copytree('{x}', '{rename(x)}') + logprint('Successfully copied {x} to {rename(x)}.') + except FileExistsError as ex: + logprint(f'MS file already copied: {{ex}}. Proceeding.') """) for x in tcpars['vis'] ]) + splitcmd += "\n\n################\n\n" + + splitcmd += "\n".join( + [textwrap.dedent( + f""" + from aces.analysis.parse_contdotdat import contchannels_to_linechannels + + if not os.path.exists("{rename_agg(x)}"): + freqs = {{}} + visfile = "{rename(x)}" + assert 'orange' not in visfile + + spw_selection = "{spw_selection}" + spws_for_loop = [int(x.split(":")[0]) for x in spw_selection.split(",")] + contsel = ";".join([x.split(":")[1] for x in spw_selection.split(",")]) + spws_to_split = ",".join(map(str, spws_for_loop)) + + ms.open(visfile) + for spw in spws_for_loop: + try: + freqs[spw] = ms.cvelfreqs(spwid=[spw], outframe='LSRK') + except TypeError: + freqs[spw] = ms.cvelfreqs(spwids=[spw], outframe='LSRK') + + linechannels, linefracs = contchannels_to_linechannels(contsel, freqs, return_fractions=True) + + logprint("Line fractions are: {{0}}".format(linefracs)) + logprint("Cont channels are: {{0}}".format(contsel)) + logprint("Line channels are: {{0}}".format(linechannels)) + logprint("spws to split are: {{0}}".format(spws_to_split)) + flagdata(vis=visfile, mode='manual', spw=linechannels, flagbackup=False, + datacolumn='{tcpars['datacolumn'] if 'datacolumn' in tcpars else 'corrected'}') + + result = split(vis=visfile, + outputvis="{rename_agg(x)}", + spw=spws_to_split, + width=10, + datacolumn='{tcpars['datacolumn'] if 'datacolumn' in tcpars else 'corrected'}', + field='Sgr_A_star',) + + if not os.path.exists("{rename_agg(x)}"): + logprint("Output vis {rename_agg(x)} does not exist - forcing datacolumn='data'") + logprint("USING DATACOLUMN=DATA! This could be a problem!") + flagdata(vis=visfile, mode='manual', spw=linechannels, flagbackup=False, datacolumn='data') + result = split(vis=visfile, + outputvis="{rename_agg(x)}", + width=10, + spw=spws_to_split, + datacolumn='data', + field='Sgr_A_star',) + + if not os.path.exists("{rename_agg(x)}"): + raise ValueError("Split failed") + + ms.close() + + assert 'orange' not in visfile + shutil.rmtree(visfile) + """) + for x, spw_selection in zip(tcpars['vis'], tcpars['spw']) + ] + ) + #splitcmd = copycmds = splitcmd + "\n".join( + + # now that we've split the data, we don't want to try to downselect again later + tcpars['spw'] = '' + #print(f"Reduced SPW selection to {tcpars['spw']} since the data should be appropriately split") + # hard code that parallel = False for non-MPI runs tcpars['parallel'] = False + + # all 'vis' must be renamed because of their new locations + tcpars['vis'] = [rename_agg(x) for x in tcpars["vis"]] else: spw = int(spwsel.lstrip('spw')) @@ -201,12 +291,11 @@ def rename(x): spw={spw}) """) for vis in tcpars['vis']] - # ONLY for line cubes, which are individually split out: - # the spw selection should now be 'everything in the MS' + # Only for lines tcpars['spw'] = '' - # all 'vis' must be renamed because of their new locations - tcpars['vis'] = [rename(x) for x in tcpars["vis"]] + # all 'vis' must be renamed because of their new locations + tcpars['vis'] = [rename(x) for x in tcpars["vis"]] savecmds = textwrap.dedent( f""" @@ -228,8 +317,9 @@ def savedata(): else: if os.path.exists(target): logprint(f'Removing {{target}} because it exists') - assert 'iter1' in f'{{os.path.basename(fn)}}' # sanity check - don't remove important directories! + assert ('iter1' in target) or ('cont.I.manual' in target) # sanity check - don't remove important directories! assert realtarget != realfn + assert 'orange' not in target shutil.rmtree(target) shutil.copytree(fn, target)\n\n """) @@ -243,10 +333,11 @@ def savedata(): target = f'{tempdir_name}/{{os.path.basename(fn)}}' if os.path.exists(target): logprint(f'Removing {{target}} because it exists') - assert 'iter1' in target # sanity check - don't remove important directories! + assert ('iter1' in target) or ('cont.I.manual' in target) # sanity check - don't remove important directories! if fn.endswith('.fits'): os.remove(target) else: + assert 'orange' not in target shutil.rmtree(target) if fn.endswith('.fits'): shutil.copy(fn, '{tempdir_name}/') @@ -266,18 +357,21 @@ def savedata(): target = f'{workingpath}/{{os.path.basename(fn)}}' if os.path.exists(target): logprint(f'Removing {{target}} because it exists') - assert 'iter1' in target # sanity check - don't remove important directories! + assert ('iter1' in target) or ('cont.I.manual' in target) # sanity check - don't remove important directories! if fn.endswith('.fits'): os.remove(target) else: + assert 'orange' not in target shutil.rmtree(target) shutil.move(fn, '{workingpath}/')\n\n""") + "\n".join([f"shutil.rmtree('{tempdir_name}/{os.path.basename(x)}')" for x in tcpars['vis']]) ) # use local name instead #tcpars['imagename'] = os.path.join(tempdir_name, os.path.basename(tcpars['imagename'])) + else: + raise ValueError("Script is no longer designed to work w/o a tempdir. It might, but you should manually disable this and do some sanity checks") - print(f"Creating script for {partype} tclean in {workingpath} for sb {sbname} ") + print(f"Creating script for {partype} {spwsel} tclean in {workingpath} for sb {sbname}: {partype}_{sbname}_{spwsel}.py ") # with kwargs: \n{tcpars}") with open(f"{partype}_{sbname}_{spwsel}.py", "w") as fh: @@ -293,6 +387,7 @@ def logprint(string): print(string, flush=True) logprint(f"Casalog file is {{casalog.logfile()}}") + logprint(f'Started CASA in {os.getcwd()}') mpi_ntasks = os.getenv('mpi_ntasks') if mpi_ntasks is not None: @@ -313,7 +408,7 @@ def logprint(string): logprint(f"Temporary directory used is {{tempdir_name}}") """)) - fh.write("logprint(f'Started CASA in {os.getcwd()}')\n") + fh.write("logprint(f'Current directory is {os.getcwd()}')\n") # tclean if temp_workdir: @@ -377,6 +472,7 @@ def logprint(string): calcres=True, # sadly must always calcres, even when redundant **tclean_pars) savedata()\n + logprint("Done with clean loop")\n """)) expected_imname = (os.path.basename(tcpars['imagename']) + @@ -386,6 +482,7 @@ def logprint(string): check_exists = textwrap.dedent(f""" if not os.path.exists('{expected_imname}'): + print(os.listdir()) raise IOError('Expected output file {expected_imname} does not exist.') sys.exit(1) \n\n""") diff --git a/aces/pipeline_scripts/aggregate_high_tclean_commands.json b/aces/pipeline_scripts/aggregate_high_tclean_commands.json index d658f97e..62600f4c 100644 --- a/aces/pipeline_scripts/aggregate_high_tclean_commands.json +++ b/aces/pipeline_scripts/aggregate_high_tclean_commands.json @@ -10307,6 +10307,7 @@ 1400, 3000 ], + "pbcor": true, "cell": "0.3arcsec", "deconvolver": "mtmfs", "niter": 1000000, @@ -10344,6 +10345,7 @@ 1400, 3000 ], + "pbcor": true, "cell": "0.3arcsec", "deconvolver": "mtmfs", "niter": 1000000, diff --git a/aces/pipeline_scripts/override_tclean_commands.json b/aces/pipeline_scripts/override_tclean_commands.json index f2a9770e..531c8d4c 100644 --- a/aces/pipeline_scripts/override_tclean_commands.json +++ b/aces/pipeline_scripts/override_tclean_commands.json @@ -37,6 +37,7 @@ 1400, 3000 ], + "pbcor": true, "cell": "0.3arcsec", "deconvolver": "mtmfs", "niter": 1000000, @@ -1104,6 +1105,13 @@ } }, "Sgr_A_st_ag_03_TM1": { + "tclean_cont_pars": { + "aggregate_low": { + "datacolumn": "data", + "niter": 20000, + "cyclefactor": 3 + } + }, "tclean_cube_pars": { "spw33": { "nchan": -1, @@ -1559,10 +1567,16 @@ "Sgr_A_st_b_03_TM1": { "tclean_cont_pars": { "aggregate": { - "threshold": "0.010Jy" + "threshold": "0.010Jy", + "datacolumn": "data" + }, + "aggregate_low": { + "threshold": "0.010Jy", + "datacolumn": "data" }, "aggregate_high": { - "threshold": "0.010Jy" + "threshold": "0.010Jy", + "datacolumn": "data" } }, "tclean_cube_pars": { @@ -1702,6 +1716,17 @@ } }, "Sgr_A_st_aj_03_TM1": { + "tclean_cont_pars": { + "aggregate_low": { + "datacolumn": "data" + }, + "aggregate_high": { + "datacolumn": "data" + }, + "aggregate": { + "datacolumn": "data" + } + }, "tclean_cube_pars": { "spw33": { "vis": [ @@ -1851,6 +1876,17 @@ } }, "Sgr_A_st_p_03_TM1": { + "tclean_cont_pars": { + "aggregate_low": { + "datacolumn": "data" + }, + "aggregate_high": { + "datacolumn": "data" + }, + "aggregate": { + "datacolumn": "data" + } + }, "tclean_cube_pars": { "spw25": { "usemask": "pb", @@ -2008,6 +2044,13 @@ } }, "Sgr_A_st_i_03_TM1": { + "tclean_cont_pars": { + "aggregate_low": { + "datacolumn": "data", + "niter": 20000, + "cyclefactor": 3 + } + }, "tclean_cube_pars": { "spw25": { "nchan": 1912, @@ -2186,6 +2229,17 @@ } }, "Sgr_A_st_v_03_TM1": { + "tclean_cont_pars": { + "aggregate_low": { + "datacolumn": "data" + }, + "aggregate_high": { + "datacolumn": "data" + }, + "aggregate": { + "datacolumn": "data" + } + }, "tclean_cube_pars": { "spw25": { "nchan": 1914, @@ -2339,6 +2393,21 @@ } }, "Sgr_A_st_aa_03_TM1": { + "tclean_cont_pars": { + "aggregate": { + "cyclefactor": 3, + "pblimit": 0.1 + }, + "aggregate_high": { + "cyclefactor": 3, + "pblimit": 0.1 + + }, + "aggregate_low": { + "cyclefactor": 3, + "pblimit": 0.1 + } + }, "tclean_cube_pars": { "spw25": { "nchan": 1912, @@ -2434,6 +2503,17 @@ } }, "Sgr_A_st_ah_03_TM1": { + "tclean_cont_pars": { + "aggregate_low": { + "datacolumn": "data" + }, + "aggregate_high": { + "datacolumn": "data" + }, + "aggregate": { + "datacolumn": "data" + } + }, "tclean_cube_pars": { "spw25": { "nchan": 1912, @@ -3433,6 +3513,17 @@ } }, "Sgr_A_st_l_03_TM1": { + "tclean_cont_pars": { + "aggregate_low": { + "datacolumn": "data" + }, + "aggregate_high": { + "datacolumn": "data" + }, + "aggregate": { + "datacolumn": "data" + } + }, "tclean_cube_pars": { "spw25": { "nchan": 1912, diff --git a/aces/retrieval_scripts/retrieve_data.py b/aces/retrieval_scripts/retrieve_data.py index 2f06aa7a..b894cd92 100644 --- a/aces/retrieval_scripts/retrieve_data.py +++ b/aces/retrieval_scripts/retrieve_data.py @@ -6,12 +6,13 @@ import sys -def main(): - if len(sys.argv) > 1: - username = sys.argv[1] - print(f"Using username {username}") - else: - username = six.moves.input("Username: ") +def main(username=None): + if username is None: + if len(sys.argv) > 1: + username = sys.argv[1] + print(f"Using username {username}") + else: + username = six.moves.input("Username: ") for server_url in ('https://almascience.eso.org', 'https://almascience.nrao.edu', 'https://almascience.nao.ac.jp'): print(f"Logging in to ALMA at server {server_url}", flush=True) diff --git a/setup.cfg b/setup.cfg index 5f6e2a93..66ebd3e3 100644 --- a/setup.cfg +++ b/setup.cfg @@ -14,7 +14,7 @@ github_project = ACES-CMZ/reduction_ACES [options] zip_safe = False packages = find: -python_requires = >=3.8 +python_requires = >=3.6 setup_requires = setuptools_scm install_requires = astropy @@ -22,7 +22,6 @@ install_requires = scipy numpy matplotlib - ghapi radio-beam spectral-cube regions