From 1c054d892777b5271fb5c3664b80e99a6518e6a8 Mon Sep 17 00:00:00 2001 From: rcooper295 <57679854+rcooper295@users.noreply.github.com> Date: Mon, 25 Mar 2024 15:37:39 -0400 Subject: [PATCH] JP-3153: post-commissioning AMI3 updates (#7862) Co-authored-by: Brett <brettgraham@gmail.com> Co-authored-by: Ned Molter <emolter@users.noreply.github.com> --- CHANGES.rst | 15 +- docs/jwst/ami_analyze/description.rst | 95 ++- docs/jwst/ami_average/description.rst | 9 + docs/jwst/ami_normalize/description.rst | 36 +- docs/jwst/data_products/product_types.rst | 100 +-- docs/jwst/data_products/science_products.rst | 137 ++-- docs/jwst/pipeline/calwebb_ami3.rst | 93 +-- docs/jwst/references_general/nrm_reffile.inc | 43 ++ .../jwst/references_general/nrm_selection.inc | 13 + .../references_general/references_general.rst | 4 - jwst/ami/__init__.py | 9 +- jwst/ami/ami_analyze.py | 187 +++-- jwst/ami/ami_analyze_step.py | 121 ++-- jwst/ami/ami_average.py | 2 - jwst/ami/ami_average_step.py | 1 + jwst/ami/ami_normalize.py | 22 +- jwst/ami/ami_normalize_step.py | 9 +- jwst/ami/analyticnrm2.py | 22 - jwst/ami/bp_fix.py | 394 +++++++++++ jwst/ami/find_affine2d_parameters.py | 6 +- jwst/ami/hexee.py | 10 - jwst/ami/hextransformee.py | 59 +- jwst/ami/instrument_data.py | 346 +++++++--- jwst/ami/leastsqnrm.py | 386 +++++++---- jwst/ami/lg_model.py | 331 +++++---- jwst/ami/mask_definitions.py | 17 +- jwst/ami/matrix_dft.py | 2 - jwst/ami/nrm_core.py | 639 ++++-------------- jwst/ami/oifits.py | 590 ++++++++++++++++ jwst/ami/tests/test_ami_analyze.py | 42 +- jwst/ami/tests/test_ami_interface.py | 73 +- jwst/ami/tests/test_ami_leastsqnrm.py | 15 + jwst/ami/tests/test_ami_utils.py | 28 + jwst/ami/utils.py | 590 +++++++++++----- jwst/ami/webb_psf.py | 81 --- jwst/associations/lib/rules_level2_base.py | 6 +- jwst/associations/lib/rules_level3_base.py | 6 +- jwst/lib/suffix.py | 12 +- jwst/pipeline/calwebb_ami3.py | 101 +-- jwst/regtest/test_niriss_ami3.py | 28 +- pyproject.toml | 1 + 41 files changed, 2949 insertions(+), 1732 deletions(-) create mode 100644 docs/jwst/references_general/nrm_reffile.inc create mode 100644 docs/jwst/references_general/nrm_selection.inc create mode 100644 jwst/ami/bp_fix.py create mode 100644 jwst/ami/oifits.py create mode 100644 jwst/ami/tests/test_ami_leastsqnrm.py create mode 100644 jwst/ami/tests/test_ami_utils.py delete mode 100644 jwst/ami/webb_psf.py diff --git a/CHANGES.rst b/CHANGES.rst index 61ba8c5718..e7cc7a8cc5 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -9,9 +9,22 @@ ami --- -- Replaced use of deprecated ``scipy.integrate.simps`` +- Replaced use of deprecated ``scipy.integrate.simps`` with ``scipy.integrate.simpson``. [#8320] +- Overhaul of AMI processing. See documentation for full details. [#7862] + +- Additional optional input arguments for greater user processing flexibility. + See documentation for details. [#7862] + +- Bad pixel correction applied to data using new NRM reference file to calculate + complex visibility support (M. Ireland method implemented by J. Kammerer). [#7862] + +- Make ``AmiAnalyze`` and ``AmiNormalize`` output conform to the OIFITS standard. [#7862] + +- Disable ``AmiAverage`` step. [#7862] + + associations ------------ diff --git a/docs/jwst/ami_analyze/description.rst b/docs/jwst/ami_analyze/description.rst index ae2fa25e75..c0e2d27016 100644 --- a/docs/jwst/ami_analyze/description.rst +++ b/docs/jwst/ami_analyze/description.rst @@ -20,7 +20,9 @@ the SUB80 subarray, in order to reduce execution time. Arguments --------- -The ``ami_analyze`` step has four optional arguments: +The ``ami_analyze`` step has several optional arguments. In most cases the +default arguments will be suitable but more advanced users may wish to test +other options: :--oversample: The oversampling factor to be used in the model fit (default=3). @@ -33,48 +35,89 @@ The ``ami_analyze`` step has four optional arguments: :--rotation_search: List of start, stop, and step values that define the list of rotation search values. The default setting of '-3 3 1' results in search values of [-3, -2, -1, 0, 1, 2, 3]. - + +:--bandpass: Synphot spectrum or suitable array to override filter/source + (default=None) + +:--usebp: If True, exclude pixels marked DO_NOT_USE from fringe fitting + (default=True) + +:--firstfew: If not None, process only the first few integrations (default=None) + +:--chooseholes: If not None, fit only certain fringes e.g. ['B4','B5','B6','C2'] + (default=None) + +:--affine2d: User-defined Affine2d object (default=None) + +:--run_bpfix: Run Fourier bad pixel fix on cropped data (default=True) + Inputs ------ -2D calibrated image +3D calibrated image ^^^^^^^^^^^^^^^^^^^ -:Data model: `~jwst.datamodels.ImageModel` -:File suffix: _cal +:Data model: `~jwst.datamodels.DataModel` +:File suffix: _calints -The ``ami_analyze`` step takes a single calibrated image as input, which should be -the "_cal" product resulting from :ref:`calwebb_image2 <calwebb_image2>` processing. +The ``ami_analyze`` step takes a single calibrated image cube as input, which should be +the "_calints" product resulting from :ref:`calwebb_image2 <calwebb_image2>` processing. Multiple exposures can be processed via use of an ASN file that is used as input -to the :ref:`calwebb_ami3 <calwebb_ami3>` pipeline. The ``ami_analyze`` step itself does -not accept an ASN as input. +to the :ref:`calwebb_ami3 <calwebb_ami3>` pipeline. **Note:** The ``ami_analyze`` step will also +accept a 2D "_cal" product but errors will not be computed in the output. +The ``ami_analyze`` step itself does not accept an ASN as input. Outputs ------- +The ``ami_analyze`` step produces three output files. The first two (``_ami-oi.fits`` and ``_amimulti-oi.fits``) contain the interferometric observables, and the third (``_amilg.fits``) contains the data, LG model, and residuals. These are described in more detail below. + +The output file name syntax is exposure-based, using the input file name as the root, with +the addition of the association candidate ID and the "_ami-oi", "_amimulti-oi", or "amilg" product type suffix, e.g. +"jw87600027001_02101_00002_nis_a3001_ami-oi.fits." + +Interferometric observables +^^^^^^^^^^^^^^^^^^^^^^^^^^^ +:Data model: `~jwst.datamodels.AmiOIModel` +:File suffix: _ami-oi.fits, _amimulti-oi.fits + +. The inteferometric observables are saved as OIFITS files, a registered FITS format +for optical interferometry, containing the following list of extensions: + +1) ``OI_ARRAY``: AMI subaperture information +2) ``OI_TARGET``: target properties +3) ``OI_T3``: extracted closure amplitudes, phases +4) ``OI_VIS``: extracted visibility (fringe) amplitudes, phases +5) ``OI_VIS2``: squared visibility (fringe) amplitudes +6) ``OI_WAVELENGTH``: filter information + +For more information on the format and contents of OIFITS files, see the `OIFITS2 standard <https://doi.org/10.1051/0004-6361/201526405>`_. + +The _ami-oi.fits file contains tables of median observables over all integrations of the input file. Errors +are computed as the sigma-clipped standard deviation over integrations. +The _amimulti-oi.fits file contains observables for each integration, and does not contain error estimates. The +structure is the same as the _ami-oi.fits file, but the following data columns are 2D, with the second dimension being +the number of integrations: "PISTONS", "PIST_ERR", "VISAMP", "VISAMPERR", "VISPHI", "VISPHIERR", "VIS2DATA", "VIS2ERR", "T3AMP", "T3AMPERR", "T3PHI", "T3PHIERR". + LG model parameters ^^^^^^^^^^^^^^^^^^^ -:Data model: `~jwst.datamodels.AmiLgModel` -:File suffix: _ami +:Data model: `~jwst.datamodels.AmiLgFitModel` +:File suffix: _amilg.fits -The ``ami_analyze`` step produces a single output file, containing the -following list of extensions: +The _amilg.fits output file contains the cropped and cleaned data, model, and residuals (data - model) as well as +the parameters of the best-fit LG model. It contains the following extensions: -1) ``FIT``: a 2D image of the fitted model -2) ``RESID``: a 2D image of the fit residuals -3) ``CLOSURE_AMP``: table of closure amplitudes -4) ``CLOSURE_PHA``: table of closure phases -5) ``FRINGE_AMP``: table of fringe amplitudes -6) ``FRINGE_PHA``: table of fringe phases -7) ``PUPIL_PHA``: table of pupil phases -8) ``SOLNS``: table of fringe coefficients - -The output file name syntax is exposure-based, using the input file name as the root, with -the addition of the association candidate ID and the "_ami" product type suffix, e.g. -"jw87600027001_02101_00002_nis_a3001_ami.fits." +1) ``CTRD``: a 3D image of the centered, cropped data +2) ``N_CTRD``: a 3D image CTRD normalized by data peak +3) ``FIT``: a 3D image of the best-fit model +4) ``N_FIT``: a 3D image of FIT normalized by data peak +5) ``RESID``: a 3D image of the fit residuals +6) ``N_RESID``: a 3D image of RESID normalized by data peak +7) ``SOLNS``: table of fringe coefficients Reference Files --------------- -The ``ami_analyze`` step uses a THROUGHPUT reference file. +The ``ami_analyze`` step uses a THROUGHPUT reference file and NRM reference file. .. include:: ../references_general/throughput_reffile.inc +.. include:: ../references_general/nrm_reffile.inc diff --git a/docs/jwst/ami_average/description.rst b/docs/jwst/ami_average/description.rst index 15d531e6c3..c48ef05853 100644 --- a/docs/jwst/ami_average/description.rst +++ b/docs/jwst/ami_average/description.rst @@ -3,6 +3,15 @@ Description :Class: `jwst.ami.AmiAverageStep` :Alias: ami_average + +.. Attention:: + The ``ami_average`` step has been removed from the default ami3 pipeline + until the effects of averaging multiple exposures has been more thoroughly examined. + It may be updated in the future; until then the legacy code is left in place and skipped by default. It + does not use the OIFITS-format (`~jwst.datamodels.AmiOIModel`) input that the current + ``ami_analyze`` step produces. It uses the deprecated `~jwst.datamodels.AmiLgModel` + for both input and output. + The ``ami_average`` step is one of the AMI-specific steps in the ``ami`` sub-package and is part of Stage 3 :ref:`calwebb_ami3 <calwebb_ami3>` processing. diff --git a/docs/jwst/ami_normalize/description.rst b/docs/jwst/ami_normalize/description.rst index cf71517bd8..a0109ebe7b 100644 --- a/docs/jwst/ami_normalize/description.rst +++ b/docs/jwst/ami_normalize/description.rst @@ -19,31 +19,33 @@ The ``ami_normalize`` step does not have any step-specific arguments. Inputs ------ -LG model parameters -^^^^^^^^^^^^^^^^^^^ -:Data model: `~jwst.datamodels.AmiLgModel` -:File suffix: _amiavg and _psf-amiavg - -The ``ami_normalize`` step takes two inputs: the first is the LG results for -a science target and the second is the LG results for the PSF target. These should -be the "_amiavg" and "_psf-amiavg" products resulting from the -:ref:`ami_average <ami_average_step>` step. The inputs can be in the form of file -names or `~jwst.datamodels.AmiLgModel` data models. +Interferometric Observables +^^^^^^^^^^^^^^^^^^^^^^^^^^^ +:Data model: `~jwst.datamodels.AmiOIModel` +:File suffix: _ami-oi.fits and/or _psf-ami-oi.fits + +The ``ami_normalize`` step takes two inputs: the first is the +interferometric observables for a science target and the second +is the interferometric observables for the PSF target. These should +be the _ami-oi.fits and _psf-ami-oi.fits products resulting from the +:ref:`ami_analyze <ami_analyze_step>` step, or two _ami-oi.fits files if the steps +are run independently. The inputs can be in the form of filenames or +`~jwst.datamodels.AmiOIModel` data models. Outputs ------- -Normalized LG model parameters -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -:Data model: `~jwst.datamodels.AmiLgModel` -:File suffix: _aminorm +Normalized Interferometric Observables +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +:Data model: `~jwst.datamodels.AmiOIModel` +:File suffix: _aminorm-oi.fits -The output is a new LG product for the science target in which the closure -phases and fringe amplitudes have been normalized using the PSF target +The output is a new set of interferometric observables for the science target +in which the closure phases and fringe amplitudes have been normalized using the PSF target closure phases and fringe amplitudes. The remaining components of the science target data model are left unchanged. The output file name syntax is source-based, using the product name specified in the input ASN file and having a product type -of "_aminorm", e.g. "jw87600-a3001_t001_niriss_f480m-nrm_aminorm.fits." +of "_aminorm-oi.fits", e.g. "jw87600-a3001_t001_niriss_f480m-nrm_aminorm-oi.fits." Reference Files --------------- diff --git a/docs/jwst/data_products/product_types.rst b/docs/jwst/data_products/product_types.rst index 149ac07534..d4db56c18d 100644 --- a/docs/jwst/data_products/product_types.rst +++ b/docs/jwst/data_products/product_types.rst @@ -72,54 +72,56 @@ Stage 2 Data Products Stage 3 Data Products +++++++++++++++++++++ -+------------------------------------------------+----------------------+----------------------------+------+-----------------------+--------------------------------------+ -| Pipeline | Input | Outputs | Base | Units | | Description | -+================================================+======================+============================+======+=======================+======================================+ -| :ref:`calwebb_image3 <calwebb_image3>` | :ref:`cal <cal>` | :ref:`crf <crf>` | Exp | MJy/sr, MJy [#1]_ | | 2-D CR-flagged calibrated data | -| | +----------------------------+------+ +--------------------------------------+ -| | | :ref:`i2d <i2d>` | Src | | | 2-D resampled imaging data | -| | +----------------------------+ +-----------------------+--------------------------------------+ -| | | :ref:`cat <cat>` | | N/A | | Source catalog | -| | +----------------------------+ +-----------------------+--------------------------------------+ -| | | :ref:`segm <segm>` | | N/A | | Segmentation map | -+------------------------------------------------+----------------------+----------------------------+------+-----------------------+--------------------------------------+ -| :ref:`calwebb_spec3 <calwebb_spec3>` | :ref:`cal <cal>` | :ref:`crf <crf>` | Exp | MJy/sr, MJy [#1]_ | | 2-D CR-flagged calibrated data | -| | +----------------------------+------+ +--------------------------------------+ -| | | :ref:`s2d <s2d>` | Src | | | 2-D resampled spectroscopic data; | -| | | | | | | Non-IFU | -| | +----------------------------+ | +--------------------------------------+ -| | | :ref:`s3d <s3d>` | | | | 3-D resampled spectroscopic data; | -| | | | | | | NIRSpec IFU and MIRI MRS | -| | +----------------------------+ +-----------------------+--------------------------------------+ -| | | :ref:`x1d <x1d>` | | various | | 1-D extracted spectroscopic data | -| | +----------------------------+ +-----------------------+--------------------------------------+ -| | | :ref:`c1d <c1d>` | | various | | 1-D combined spectroscopic data | -+------------------------------------------------+----------------------+----------------------------+------+-----------------------+--------------------------------------+ -| :ref:`calwebb_ami3 <calwebb_ami3>` | :ref:`cal <cal>` | :ref:`ami <ami>` | Exp | N/A | | Fringe parameters (per exposure) | -| | +----------------------------+------+ +--------------------------------------+ -| | | :ref:`amiavg <ami>` | Src | | | Averaged fringe parameters | -| | +----------------------------+ | +--------------------------------------+ -| | | :ref:`aminorm <ami>` | | | | Normalized fringe parameters | -+------------------------------------------------+----------------------+----------------------------+------+-----------------------+--------------------------------------+ -| :ref:`calwebb_coron3 <calwebb_coron3>` | :ref:`calints <cal>` | :ref:`crfints <crf>` | Exp | MJy/sr, MJy [#1]_ | | 3-D CR-flagged calibrated data | -| | +----------------------------+------+ +--------------------------------------+ -| | | :ref:`psfstack <psfstack>` | Src | | | PSF library images | -| | +----------------------------+------+ +--------------------------------------+ -| | | :ref:`psfalign <psfalign>` | Exp | | | Aligned PSF images | -| | +----------------------------+------+ +--------------------------------------+ -| | | :ref:`psfsub <psfsub>` | Exp | | | PSF-subtracted images | -| | +----------------------------+------+ +--------------------------------------+ -| | | :ref:`i2d <i2d>` | Src | | | 2-D resampled PSF-subtracted image | -+------------------------------------------------+----------------------+----------------------------+------+-----------------------+--------------------------------------+ -| :ref:`calwebb_tso3 <calwebb_tso3>` | :ref:`calints <cal>` | :ref:`crfints <crfints>` | Exp | MJy/sr, MJy [#1]_ | | 3-D CR-flagged calibrated data | -| | +----------------------------+------+-----------------------+--------------------------------------+ -| | | :ref:`phot <phot>` | Src | N/A | | TSO imaging photometry catalog | -| | +----------------------------+ +-----------------------+--------------------------------------+ -| | | :ref:`x1dints <x1dints>` | | various | | TSO 1-D extracted spectra | -| | +----------------------------+ +-----------------------+--------------------------------------+ -| | | :ref:`whtlt <whtlt>` | | N/A | | TSO spectral white-light catalog | -+------------------------------------------------+----------------------+----------------------------+------+-----------------------+--------------------------------------+ -| :ref:`calwebb_wfs-image3 <calwebb_wfs-image3>` | :ref:`cal <cal>` | :ref:`wfscmb <wfscmb>` | Src | MJy/sr, MJy [#1]_ | | 2-D combined WFS&C image | -+------------------------------------------------+----------------------+----------------------------+------+-----------------------+--------------------------------------+ ++------------------------------------------------+----------------------+---------------------------------+------+-----------------------+--------------------------------------+ +| Pipeline | Input | Outputs | Base | Units | | Description | ++================================================+======================+=================================+======+=======================+======================================+ +| :ref:`calwebb_image3 <calwebb_image3>` | :ref:`cal <cal>` | :ref:`crf <crf>` | Exp | MJy/sr, MJy [#1]_ | | 2-D CR-flagged calibrated data | +| | +---------------------------------+------+ +--------------------------------------+ +| | | :ref:`i2d <i2d>` | Src | | | 2-D resampled imaging data | +| | +---------------------------------+ +-----------------------+--------------------------------------+ +| | | :ref:`cat <cat>` | | N/A | | Source catalog | +| | +---------------------------------+ +-----------------------+--------------------------------------+ +| | | :ref:`segm <segm>` | | N/A | | Segmentation map | ++------------------------------------------------+----------------------+---------------------------------+------+-----------------------+--------------------------------------+ +| :ref:`calwebb_spec3 <calwebb_spec3>` | :ref:`cal <cal>` | :ref:`crf <crf>` | Exp | MJy/sr, MJy [#1]_ | | 2-D CR-flagged calibrated data | +| | +---------------------------------+------+ +--------------------------------------+ +| | | :ref:`s2d <s2d>` | Src | | | 2-D resampled spectroscopic data; | +| | | | | | | Non-IFU | +| | +---------------------------------+ | +--------------------------------------+ +| | | :ref:`s3d <s3d>` | | | | 3-D resampled spectroscopic data; | +| | | | | | | NIRSpec IFU and MIRI MRS | +| | +---------------------------------+ +-----------------------+--------------------------------------+ +| | | :ref:`x1d <x1d>` | | various | | 1-D extracted spectroscopic data | +| | +---------------------------------+ +-----------------------+--------------------------------------+ +| | | :ref:`c1d <c1d>` | | various | | 1-D combined spectroscopic data | ++------------------------------------------------+----------------------+---------------------------------+------+-----------------------+--------------------------------------+ +| :ref:`calwebb_ami3 <calwebb_ami3>` | :ref:`calints <cal>` | :ref:`ami-oi <ami-oi>` | Exp | N/A | | Averaged observables | +| | +---------------------------------+ +-----------------------+--------------------------------------+ +| | | :ref:`amimulti-oi <amimulti-oi>`| | N/A | | Observables (per integration) | +| | +---------------------------------+ +-----------------------+--------------------------------------+ +| | | :ref:`amilg <amilg>` | | N/A | | Fringe parameters, best-fit models | +| | +---------------------------------+------+-----------------------+--------------------------------------+ +| | | :ref:`aminorm-oi <aminorm-oi>` | Src | | | Normalized observables | ++------------------------------------------------+----------------------+---------------------------------+------+-----------------------+--------------------------------------+ +| :ref:`calwebb_coron3 <calwebb_coron3>` | :ref:`calints <cal>` | :ref:`crfints <crf>` | Exp | MJy/sr, MJy [#1]_ | | 3-D CR-flagged calibrated data | +| | +---------------------------------+------+ +--------------------------------------+ +| | | :ref:`psfstack <psfstack>` | Src | | | PSF library images | +| | +---------------------------------+------+ +--------------------------------------+ +| | | :ref:`psfalign <psfalign>` | Exp | | | Aligned PSF images | +| | +---------------------------------+------+ +--------------------------------------+ +| | | :ref:`psfsub <psfsub>` | Exp | | | PSF-subtracted images | +| | +---------------------------------+------+ +--------------------------------------+ +| | | :ref:`i2d <i2d>` | Src | | | 2-D resampled PSF-subtracted image | ++------------------------------------------------+----------------------+---------------------------------+------+-----------------------+--------------------------------------+ +| :ref:`calwebb_tso3 <calwebb_tso3>` | :ref:`calints <cal>` | :ref:`crfints <crfints>` | Exp | MJy/sr, MJy [#1]_ | | 3-D CR-flagged calibrated data | +| | +---------------------------------+------+-----------------------+--------------------------------------+ +| | | :ref:`phot <phot>` | Src | N/A | | TSO imaging photometry catalog | +| | +---------------------------------+ +-----------------------+--------------------------------------+ +| | | :ref:`x1dints <x1dints>` | | various | | TSO 1-D extracted spectra | +| | +---------------------------------+ +-----------------------+--------------------------------------+ +| | | :ref:`whtlt <whtlt>` | | N/A | | TSO spectral white-light catalog | ++------------------------------------------------+----------------------+---------------------------------+------+-----------------------+--------------------------------------+ +| :ref:`calwebb_wfs-image3 <calwebb_wfs-image3>` | :ref:`cal <cal>` | :ref:`wfscmb <wfscmb>` | Src | MJy/sr, MJy [#1]_ | | 2-D combined WFS&C image | ++------------------------------------------------+----------------------+---------------------------------+------+-----------------------+--------------------------------------+ .. [#1] NIRSpec and NIRISS SOSS point sources have MJy units; all others are MJy/sr diff --git a/docs/jwst/data_products/science_products.rst b/docs/jwst/data_products/science_products.rst index 75f987d9a6..eea9e2096a 100644 --- a/docs/jwst/data_products/science_products.rst +++ b/docs/jwst/data_products/science_products.rst @@ -762,50 +762,97 @@ structure shown below. based on read noise only. - ADSF: The data model meta data. -.. _ami: -.. _amiavg: -.. _aminorm: - -AMI data: ``ami``, ``amiavg``, and ``aminorm`` -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -AMI derived data created by the :ref:`ami_analyze <ami_analyze_step>`, -:ref:`ami_average <ami_average_step>`, and :ref:`ami_normalize <ami_normalize_step>` steps, -as part of the :ref:`calwebb_ami3 <calwebb_ami3>` pipeline, are stored in FITS files that -contain a mixture of images and binary table extensions. The output format of all three -pipeline steps is the same, encapsulated within a `~jwst.datamodels.AmiLgModel` data model. -The overall layout of the corresponding FITS files is as follows: - -+-----+-------------+----------+-----------+-----------------+ -| HDU | EXTNAME | HDU Type | Data Type | Dimensions | -+=====+=============+==========+===========+=================+ -| 0 | N/A | primary | N/A | N/A | -+-----+-------------+----------+-----------+-----------------+ -| 1 | FIT | IMAGE | float32 | ncols x nrows | -+-----+-------------+----------+-----------+-----------------+ -| 2 | RESID | IMAGE | float32 | ncols x nrows | -+-----+-------------+----------+-----------+-----------------+ -| 3 | CLOSURE_AMP | BINTABLE | float64 | 1 col x 35 rows | -+-----+-------------+----------+-----------+-----------------+ -| 4 | CLOSURE_PHA | BINTABLE | float64 | 1 col x 35 rows | -+-----+-------------+----------+-----------+-----------------+ -| 5 | FRINGE_AMP | BINTABLE | float64 | 1 col x 21 rows | -+-----+-------------+----------+-----------+-----------------+ -| 6 | FRINGE_PHA | BINTABLE | float64 | 1 col x 21 rows | -+-----+-------------+----------+-----------+-----------------+ -| 7 | PUPIL_PHA | BINTABLE | float64 | 1 col x 7 rows | -+-----+-------------+----------+-----------+-----------------+ -| 8 | SOLNS | BINTABLE | float64 | 1 col x 44 rows | -+-----+-------------+----------+-----------+-----------------+ -| 9 | ASDF | BINTABLE | N/A | variable | -+-----+-------------+----------+-----------+-----------------+ - - - FIT: A 2-D image of the fitted model. - - RESID: A 2-D image of the fit residuals. - - CLOSURE_AMP: A table of closure amplitudes. - - CLOSURE_PHA: A table of closure phases. - - FRINGE_AMP: A table of fringe amplitudes. - - FRINGE_PHA: A table of fringe phases. - - PUPIL_PHA: A table of pupil phases. - - SOLNS: A table of fringe coefficients. +.. _ami-oi: +.. _amimulti-oi: +.. _amilg: +.. _aminorm-oi: + + +AMI data: ``ami-oi``, ``amimulti-oi``, ``amilg``, and ``aminorm-oi`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +AMI derived data created by the :ref:`ami_analyze <ami_analyze_step>` +and :ref:`ami_normalize <ami_normalize_step>` steps +as part of the :ref:`calwebb_ami3 <calwebb_ami3>` pipeline are stored in OIFITS files. +These are a particular type of FITS files containing several binary table extensions +and are encapsulated within a `~jwst.datamodels.AmiOIModel` data model. +There are two additional outputs of the :ref:`ami_analyze <ami_analyze_step>` intended +to enable a more detailed look at the data. The ``amimulti-oi`` file contains per-integration +interferometric observables and is also a contained in a `~jwst.datamodels.AmiOIModel`, +while the ``amilg`` product is a primarily image-based FITS file containing the +cropped data, model, and residuals as well as the best-fit model parameters. It +is contained in a `~jwst.datamodels.AmiLgFitModel` data model. + +The :ref:`ami_normalize <ami_normalize_step>` step produces an ``aminorm-oi`` product, +which is also contained in a `~jwst.datamodels.AmiOIModel`. The model conforms to the standard +defined in `OIFITS2 standard <https://doi.org/10.1051/0004-6361/201526405>`_. + +In the per-integration ``amimulti-oi`` products the "OI_ARRAY", "OI_T3", "OI_VIS", +and "OI_VIS2" extensions each contain 2D data columns whose second dimension equals +the number of integrations. In the averaged ``ami-oi`` product and normalized ``aminorm-oi`` +products, these columns have a single dimension whose length is independent of the number +of integrations. + +The overall structure of the OIFITS files (``ami-oi``, ``amimulti-oi``, and +``aminorm-oi`` products) is as follows: + ++-----+--------------+----------+-----------+------------------+ +| HDU | EXTNAME | HDU Type | Data Type | Dimensions | ++=====+==============+==========+===========+==================+ +| 0 | PRIMARY | primary | N/A | N/A | ++-----+--------------+----------+-----------+------------------+ +| 1 | OI_ARRAY | BINTABLE | N/A | variable | ++-----+--------------+----------+-----------+------------------+ +| 2 | OI_TARGET | BINTABLE | N/A | variable | ++-----+--------------+----------+-----------+------------------+ +| 3 | OI_T3 | BINTABLE | N/A | 14 cols x 35 rows| ++-----+--------------+----------+-----------+------------------+ +| 4 | OI_VIS | BINTABLE | N/A | 12 cols x 21 rows| ++-----+--------------+----------+-----------+------------------+ +| 5 | OI_VIS2 | BINTABLE | N/A | 10 col x 21 rows | ++-----+--------------+----------+-----------+------------------+ +| 6 | OI_WAVELENGTH| BINTABLE | N/A | variable | ++-----+--------------+----------+-----------+------------------+ +| 7 | ASDF | BINTABLE | N/A | variable | ++-----+--------------+----------+-----------+------------------+ + + - OI_ARRAY: AMI subaperture information + - OI_TARGET: Target properties + - OI_T3: Table of closure amplitudes, phases + - OI_VIS: Table of visibility (fringe) amplitudes, phases + - OI_VIS2: Table of squared visibility (fringe) amplitudes + - OI_WAVELENGTH: Filter information - ADSF: The data model meta data. + +The overall structure of the ``amilg`` FITS files is as follows: + ++-----+-------------+----------+-----------+-----------------------+ +| HDU | EXTNAME | HDU Type | Data Type | Dimensions | ++=====+=============+==========+===========+=======================+ +| 0 | PRIMARY | primary | N/A | N/A | ++-----+-------------+----------+-----------+-----------------------+ +| 1 | CTRD | IMAGE | float32 | nints x ncols x nrows | ++-----+-------------+----------+-----------+-----------------------+ +| 2 | N_CTRD | IMAGE | float32 | nints x ncols x nrows | ++-----+-------------+----------+-----------+-----------------------+ +| 3 | FIT | IMAGE | float32 | nints x ncols x nrows | ++-----+-------------+----------+-----------+-----------------------+ +| 4 | N_FIT | IMAGE | float32 | nints x ncols x nrows | ++-----+-------------+----------+-----------+-----------------------+ +| 5 | RESID | IMAGE | float32 | nints x ncols x nrows | ++-----+-------------+----------+-----------+-----------------------+ +| 6 | N_RESID | IMAGE | float32 | nints x ncols x nrows | ++-----+-------------+----------+-----------+-----------------------+ +| 7 | SOLNS | BINTABLE | float64 | nints cols x 44 rows | ++-----+-------------+----------+-----------+-----------------------+ +| 8 | ASDF | BINTABLE | N/A | variable | ++-----+-------------+----------+-----------+-----------------------+ + + - CTRD: A 3D image of the centered, cropped data. + - N_CTRD: A 3D image CTRD normalized by data peak. + - FIT: 3D image of the best-fit model. + - N_FIT: A 3D image of FIT normalized by data peak. + - RESID: A 3D image of the fit residuals. + - N_RESID: A 3D image of RESID normalized by data peak. + - SOLNS: A table of fringe coefficients. + - ADSF: The data model meta data. \ No newline at end of file diff --git a/docs/jwst/pipeline/calwebb_ami3.rst b/docs/jwst/pipeline/calwebb_ami3.rst index 435490d8c0..c3f494861f 100644 --- a/docs/jwst/pipeline/calwebb_ami3.rst +++ b/docs/jwst/pipeline/calwebb_ami3.rst @@ -17,8 +17,6 @@ The steps applied by the ``calwebb_ami3`` pipeline are shown below. +==========================================+ | :ref:`ami_analyze <ami_analyze_step>` | +------------------------------------------+ -| :ref:`ami_average <ami_average_step>` | -+------------------------------------------+ | :ref:`ami_normalize <ami_normalize_step>`| +------------------------------------------+ @@ -27,47 +25,38 @@ exposures, the pipeline will: #. apply the :ref:`ami_analyze <ami_analyze_step>` step to each input exposure independently, computing fringe parameters for each -#. apply the :ref:`ami_average <ami_average_step>` step to compute the average of the - :ref:`ami_analyze <ami_analyze_step>` results for all of the science target exposures, - and the average for all of the reference PSF results (if present) -#. apply the :ref:`ami_normalize <ami_normalize_step>` step to correct the average science - target results using the average reference PSF results (if present) +#. apply the :ref:`ami_normalize <ami_normalize_step>` step to correct the science + target results using the reference PSF results (if present) If no reference PSF target exposures are present in the input ASN file, the ``ami_normalize`` step is skipped. Arguments --------- -The ``calwebb_ami3`` pipeline has one optional argument:: - - --save_averages boolean default=False - -If set to ``True``, the results of the :ref:`ami_average <ami_average_step>` step will be saved -to a file. If not, the results of the :ref:`ami_average <ami_average_step>` step are passed -along in memory to the :ref:`ami_normalize <ami_normalize_step>` step. +The ``calwebb_ami3`` pipeline does not currently use any optional arguments. Inputs ------ -2D calibrated images +3D calibrated images ^^^^^^^^^^^^^^^^^^^^ -:Data model: `~jwst.datamodels.ImageModel` -:File suffix: _cal +:Data model: `~jwst.datamodels.DataModel` +:File suffix: _calints The inputs to ``calwebb_ami3`` need to be in the form of an ASN file that lists multiple science target exposures, and optionally reference PSF exposures as well. -The individual exposures must be in the form of calibrated ("_cal") products from +The individual exposures must be in the form of 3D calibrated ("_calints") products from :ref:`calwebb_image2 <calwebb_image2>` processing. -An example ASN file containing 2 science target and 2 reference PSF target exposures is +An example ASN file containing one science target and one reference PSF target exposure is shown below. Only 1 product is defined, corresponding to the science target, with members consisting of exposures for both the science target and the reference PSF target, as indicated by the "exptype" values for each. :: {"asn_type": "ami3", - "asn_rule": "discover_Asn_AMI", + "asn_rule": DMS_Level3_Base", "program": "10005", "asn_id": "a3001", "target": "t001", @@ -75,16 +64,10 @@ as indicated by the "exptype" values for each. "products": [ {"name": "jw10005-a3001_t001_niriss_f277w-nrm", "members": [ - {"expname": "jw10005007001_02102_00001_nis_cal.fits", + {"expname": "jw10005007001_02102_00001_nis_calints.fits", "exptype": "psf" }, - {"expname": "jw10005027001_02102_00001_nis_cal.fits", - "exptype": "psf" - }, - {"expname": "jw10005004001_02102_00001_nis_cal.fits", - "exptype": "science" - }, - {"expname": "jw10005001001_02102_00001_nis_cal.fits", + {"expname": "jw10005004001_02102_00001_nis_calints.fits", "exptype": "science" } ] @@ -95,41 +78,27 @@ as indicated by the "exptype" values for each. Outputs ------- -Fringe parameter tables -^^^^^^^^^^^^^^^^^^^^^^^ +Interferometric observables +^^^^^^^^^^^^^^^^^^^^^^^^^^^ +:Data model: `~jwst.datamodels.AmiOIModel` +:File suffix: _ami-oi.fits -:Data model: `~jwst.datamodels.AmiLgModel` -:File suffix: _ami For every input exposure, the fringe parameters and closure phases calculated -by the :ref:`ami_analyze <ami_analyze_step>` step are saved to an "_ami" product file, which -is a FITS table containing the fringe parameters and closure phases. Product names -use the input "_cal" exposure-based file name, with the association candidate ID -included and the product type changed to "_ami", e.g. -"jw93210001001_03101_00001_nis_a0003_ami.fits." - -Averaged fringe parameters table -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -:Data model: `~jwst.datamodels.AmiLgModel` -:File suffix: _amiavg or _psf-amiavg - -If multiple target or reference PSF exposures are used as input and the -"--save_averages" parameter is set to ``True``, the :ref:`ami_average <ami_average_step>` step -will save averaged results for the target in an "_amiavg" product and for the -reference PSF in a "_psf-amiavg" product. The file name root will use the -source-based output product name given in the ASN file. These files are the -same FITS table format as the "_ami" products. - -Normalized fringe parameters table -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -:Data model: `~jwst.datamodels.AmiLgModel` -:File suffix: _aminorm - -If reference PSF exposures are included in the input ASN, the averaged AMI results -for the target will be normalized by the averaged AMI results for the reference PSF, -via the :ref:`ami_normalize <ami_normalize_step>` step, and will be saved to an "_aminorm" -product file. This file has the same FITS table format as the "_ami" products. +by the :ref:`ami_analyze <ami_analyze_step>` step are saved to an "_ami-oi.fits" product file, +which is a FITS table of median observables over all integrations of the input file. +Product names use the input "_calints" exposure-based file name, with the association candidate ID +included and the product type changed to "_ami-oi.fits", e.g. +"jw93210001001_03101_00001_nis_a0003_ami-oi.fits." + +Normalized Interferometric Observables +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +:Data model: `~jwst.datamodels.AmiOIModel` +:File suffix: _aminorm-oi.fits + +If reference PSF exposures are included in the input ASN, the AMI results +for the target will be normalized by the AMI results for the reference PSF, +via the :ref:`ami_normalize <ami_normalize_step>` step, and will be saved to an "_aminorm-oi.fits" +product file. This file has the same FITS table format as the "_ami-oi.fits" products. The file name root uses the source-based output product name given in the ASN file, -e.g. "jw93210-a0003_t001_niriss_f480m-nrm_aminorm.fits." +e.g. "jw93210-a0003_t001_niriss_f480m-nrm_aminorm-oi.fits." diff --git a/docs/jwst/references_general/nrm_reffile.inc b/docs/jwst/references_general/nrm_reffile.inc new file mode 100644 index 0000000000..299dd4876e --- /dev/null +++ b/docs/jwst/references_general/nrm_reffile.inc @@ -0,0 +1,43 @@ +.. _nrm_reffile: + +NRM Reference File +^^^^^^^^^^^^^^^^^^ + +:REFTYPE: NRM +:Data model: `~jwst.datamodels.NRMModel` + +The NRM reference file contains a 2-D model of the pupil mask. + +.. include:: ../references_general/nrm_selection.inc + +.. include:: ../includes/standard_keywords.inc + +Type Specific Keywords for NRM +++++++++++++++++++++++++++++++ +In addition to the standard reference file keywords listed above, +the following keywords are *required* in NRM reference files, +because they are used as CRDS selectors +(see :ref:`nrm_selectors`): + +========= ============================== +Keyword Data Model Name +========= ============================== +EXP_TYPE model.meta.exposure.type +========= ============================== + + +Reference File Format ++++++++++++++++++++++ +NRM reference files are FITS format, with one IMAGE extension. +The FITS primary HDU does not contain a data array. +The format and content of the file is as follows: + +======= ======== ===== ============== ========= +EXTNAME XTENSION NAXIS Dimensions Data type +======= ======== ===== ============== ========= +NRM IMAGE 2 1024 x 1024 float +======= ======== ===== ============== ========= + +The ``NRM`` array contains the 2-D image representing the geometry +of the non-redundant mask in the pupil wheel of NIRISS. This mask +enables the Aperture Masking Interferometry (AMI) mode. diff --git a/docs/jwst/references_general/nrm_selection.inc b/docs/jwst/references_general/nrm_selection.inc new file mode 100644 index 0000000000..9a0a335b67 --- /dev/null +++ b/docs/jwst/references_general/nrm_selection.inc @@ -0,0 +1,13 @@ +.. _nrm_selectors: + +Reference Selection Keywords for NRM +++++++++++++++++++++++++++++++++++++ +CRDS selects appropriate NRM references based on the following keywords. +NRM is not applicable for instruments not in the table. +All keywords used for file selection are *required*. + +========== ================================================ +Instrument Keywords +========== ================================================ +NIRISS INSTRUME, DATE-OBS, TIME-OBS +========== ================================================ \ No newline at end of file diff --git a/docs/jwst/references_general/references_general.rst b/docs/jwst/references_general/references_general.rst index a007a39e87..417b6fd894 100644 --- a/docs/jwst/references_general/references_general.rst +++ b/docs/jwst/references_general/references_general.rst @@ -50,8 +50,6 @@ documentation on each reference file. +===============================================+==================================================+ | :ref:`align_refs <align_refs_step>` | :ref:`PSFMASK <psfmask_reffile>` | +-----------------------------------------------+--------------------------------------------------+ -| :ref:`ami_analyze <ami_analyze_step>` | :ref:`THROUGHPUT <throughput_reffile>` | -+-----------------------------------------------+--------------------------------------------------+ | :ref:`assign_wcs <assign_wcs_step>` | :ref:`CAMERA <camera_reffile>` | + +--------------------------------------------------+ | | :ref:`COLLIMATOR <collimator_reffile>` | @@ -284,8 +282,6 @@ documentation on each reference file. +--------------------------------------------------+-----------------------------------------------+ | :ref:`SUPERBIAS <superbias_reffile>` | :ref:`superbias <superbias_step>` | +--------------------------------------------------+-----------------------------------------------+ -| :ref:`THROUGHPUT <throughput_reffile>` | :ref:`ami_analyze <ami_analyze_step>` | -+--------------------------------------------------+-----------------------------------------------+ | :ref:`TRAPDENSITY <trapdensity_reffile>` | :ref:`persistence <persistence_step>` | +--------------------------------------------------+-----------------------------------------------+ | :ref:`TRAPPARS <trappars_reffile>` | :ref:`persistence <persistence_step>` | diff --git a/jwst/ami/__init__.py b/jwst/ami/__init__.py index dc2780067d..d94ce42030 100644 --- a/jwst/ami/__init__.py +++ b/jwst/ami/__init__.py @@ -1,5 +1,10 @@ from .ami_analyze_step import AmiAnalyzeStep -from .ami_average_step import AmiAverageStep + +# from .ami_average_step import AmiAverageStep from .ami_normalize_step import AmiNormalizeStep -__all__ = ['AmiAnalyzeStep', 'AmiAverageStep', 'AmiNormalizeStep'] +__all__ = [ + "AmiAnalyzeStep", + # 'AmiAverageStep', + "AmiNormalizeStep", +] diff --git a/jwst/ami/ami_analyze.py b/jwst/ami/ami_analyze.py index 306b87b93d..06d31eab03 100644 --- a/jwst/ami/ami_analyze.py +++ b/jwst/ami/ami_analyze.py @@ -1,51 +1,88 @@ # Module for applying the LG-PLUS algorithm to an AMI exposure import logging import numpy as np +import copy from .find_affine2d_parameters import find_rotation from . import instrument_data from . import nrm_core -from .utils import img_median_replace - -from astropy import units as u +from . import utils log = logging.getLogger(__name__) log.setLevel(logging.DEBUG) -def apply_LG_plus(input_model, filter_model, oversample, rotation, - psf_offset, rotsearch_parameters): +def apply_LG_plus( + input_model, + throughput_model, + nrm_model, + oversample, + rotation, + psf_offset, + rotsearch_parameters, + bandpass, + usebp, + firstfew, + chooseholes, + affine2d, + run_bpfix, +): """ - Short Summary - ------------- Applies the image plane algorithm to an AMI image Parameters ---------- input_model : data model object AMI science image to be analyzed - - filter_model : filter model object - filter throughput reference data - + throughput_model : data model object + Filter throughput data oversample : integer Oversampling factor - rotation : float (degrees) Initial guess at rotation of science image relative to model + psf_offset : string (two floats) + PSF offset values to use to create the model array\ + rotsearch_parameters : string ('start stop step') + Rotation search parameters + bandpass : synphot spectrum or array + Synphot spectrum or array to override filter/source + usebp : boolean + If True, exclude pixels marked DO_NOT_USE from fringe fitting + firstfew : integer + If not None, process only the first few integrations + chooseholes : string + If not None, fit only certain fringes e.g. ['B4','B5','B6','C2'] + affine2d : user-defined Affine2D object + None or user-defined Affine2d object + run_bpfix : boolean + Run Fourier bad pixel fix on cropped data Returns ------- - output_model : Fringe model object - Fringe analysis data + oifitsmodel: AmiOIModel object + AMI tables of median observables from LG algorithm fringe fitting in OIFITS format + oifitsmodel_multi: AmiOIModel object + AMI tables of observables for each integration from LG algorithm fringe fitting in OIFITS format + amilgmodel: AmiLGFitModel object + AMI cropped data, model, and residual data from LG algorithm fringe fitting """ # Create copy of input_model to avoid overwriting input - input_copy = input_model.copy() + input_copy = copy.deepcopy(input_model) + + # If the input image is 2D, expand all relevant extensions to be 3D + # Incl. those not currently used? + if len(input_model.data.shape) == 2: + input_copy.data = np.expand_dims(input_copy.data, axis=0) + input_copy.dq = np.expand_dims(input_copy.dq, axis=0) + # input_copy.err = np.expand_dims(input_copy.err, axis=0) + # input_copy.var_poisson = np.expand_dims(input_copy.var_poisson, axis=0) + # input_copy.var_rnoise = np.expand_dims(input_copy.var_rnoise, axis=0) + # input_copy.var_flat = np.expand_dims(input_copy.var_flat, axis=0) # If the input data were taken in full-frame mode, extract a region # equivalent to the SUB80 subarray mode to make execution time acceptable. - if input_model.meta.subarray.name.upper() == 'FULL': + if input_model.meta.subarray.name.upper() == "FULL": log.info("Extracting 80x80 subarray from full-frame data") xstart = 1045 ystart = 1 @@ -53,19 +90,14 @@ def apply_LG_plus(input_model, filter_model, oversample, rotation, ysize = 80 xstop = xstart + xsize - 1 ystop = ystart + ysize - 1 - input_copy.data = input_copy.data[ystart - 1:ystop, xstart - 1:xstop] - input_copy.dq = input_copy.dq[ystart - 1:ystop, xstart - 1:xstop] - input_copy.err = input_copy.err[ystart - 1:ystop, xstart - 1:xstop] - - # Replace NaN's and DO_NOT_USE pixels in the input image - # with median of surrounding pixel values in a 3x3 box - box_size = 3 - input_copy = img_median_replace(input_copy, box_size) + input_copy.data = input_copy.data[:, ystart - 1:ystop, xstart - 1:xstop] + input_copy.dq = input_copy.dq[:, ystart - 1:ystop, xstart - 1:xstop] + input_copy.err = input_copy.err[:, ystart - 1:ystop, xstart - 1:xstop] data = input_copy.data - dim = data.shape[1] + dim = data.shape[-1] # 80 px - # Set transformation parameters: + # Initialize transformation parameters: # mx, my: dimensionless magnifications # sx, sy: dimensionless shears # x0, y0: offsets in pupil space @@ -77,32 +109,85 @@ def apply_LG_plus(input_model, filter_model, oversample, rotation, yo = 0.0 psf_offset_ff = None - - lamc = 4.3e-6 - oversample = 11 - bandpass = np.array([(1.0, lamc), ]) - pixelscale_as = 0.0656 - arcsec2rad = u.arcsec.to(u.rad) - PIXELSCALE_r = pixelscale_as * arcsec2rad - holeshape = 'hex' - filt = "F430M" - rotsearch_d = np.append(np.arange(rotsearch_parameters[0], rotsearch_parameters[1], rotsearch_parameters[2]), - rotsearch_parameters[1]) - - log.info(f'Initial values to use for rotation search {rotsearch_d}') - - affine2d = find_rotation(data[:, :], psf_offset, rotsearch_d, - mx, my, sx, sy, xo, yo, - PIXELSCALE_r, dim, bandpass, oversample, holeshape) - - niriss = instrument_data.NIRISS(filt, bandpass=bandpass, affine2d=affine2d) - - ff_t = nrm_core.FringeFitter(niriss, psf_offset_ff=psf_offset_ff, + # get filter, pixel scale from input_model, + # make bandpass array for find_rotation, instrument_data calls + filt = input_copy.meta.instrument.filter + pscaledegx, pscaledegy = utils.degrees_per_pixel(input_copy) + # model requires single pixel scale, so average X and Y scales + # (this is done again in instrument_data?) + pscale_deg = np.mean([pscaledegx, pscaledegy]) + PIXELSCALE_r = np.deg2rad(pscale_deg) + holeshape = "hex" + + # Throughput (combined filter and source spectrum) calculated here + bandpass = utils.handle_bandpass(bandpass, throughput_model) + + rotsearch_d = np.append( + np.arange( + rotsearch_parameters[0], rotsearch_parameters[1], rotsearch_parameters[2] + ), + rotsearch_parameters[1], + ) + + log.info(f"Initial values to use for rotation search: {rotsearch_d}") + if affine2d is None: + # affine2d object, can be overridden by user input affine. + # do rotation search on uncropped median image (assuming rotation constant over exposure) + # replace remaining NaNs in median image with median of surrounding 8 (non-NaN) pixels + # (only used for centroiding in rotation search) + meddata = np.median(data, axis=0) + nan_locations = np.where(np.isnan(meddata)) + log.info( + f"Replacing {len(nan_locations[0])} NaNs in median image with median of surrounding pixels" + ) + box_size = 3 + hbox = int(box_size / 2) + for i_pos in range(len(nan_locations[0])): + y_box = nan_locations[0][i_pos] + x_box = nan_locations[1][i_pos] + box = meddata[ + y_box - hbox:y_box + hbox + 1, x_box - hbox:x_box + hbox + 1 + ] + median_fill = np.nanmedian(box) + if np.isnan(median_fill): + median_fill = 0 # not ideal + meddata[y_box, x_box] = median_fill + + affine2d = find_rotation( + meddata, + psf_offset, + rotsearch_d, + mx, + my, + sx, + sy, + xo, + yo, + PIXELSCALE_r, + dim, + bandpass, + oversample, + holeshape, + ) + + niriss = instrument_data.NIRISS(filt, + nrm_model, + bandpass=bandpass, + affine2d=affine2d, + firstfew=firstfew, + usebp=usebp, + chooseholes=chooseholes, + run_bpfix=run_bpfix) + + ff_t = nrm_core.FringeFitter(niriss, + psf_offset_ff=psf_offset_ff, oversample=oversample) - output_model = ff_t.fit_fringes_all(input_copy) + oifitsmodel, oifitsmodel_multi, amilgmodel = ff_t.fit_fringes_all(input_copy) - # Copy header keywords from input to output - output_model.update(input_model, only="PRIMARY") + # Copy header keywords from input to outputs + oifitsmodel.update(input_model, only="PRIMARY") + oifitsmodel_multi.update(input_model, only="PRIMARY") + amilgmodel.update(input_model, only="PRIMARY") - return output_model + return oifitsmodel, oifitsmodel_multi, amilgmodel diff --git a/jwst/ami/ami_analyze_step.py b/jwst/ami/ami_analyze_step.py index eb41e46762..ddf6d3fd5f 100755 --- a/jwst/ami/ami_analyze_step.py +++ b/jwst/ami/ami_analyze_step.py @@ -7,19 +7,30 @@ class AmiAnalyzeStep(Step): - """Performs analysis of an AMI mode exposure by applying the LG algorithm. - """ + """Performs analysis of an AMI mode exposure by applying the LG algorithm.""" class_alias = "ami_analyze" spec = """ oversample = integer(default=3, min=1) # Oversampling factor rotation = float(default=0.0) # Rotation initial guess [deg] - psf_offset = string(default='0.0 0.0') # Psf offset values to use to create the model array + psf_offset = string(default='0.0 0.0') # PSF offset values to use to create the model array rotation_search = string(default='-3 3 1') # Rotation search parameters: start, stop, step + bandpass = any(default=None) # Synphot spectrum or array to override filter/source + usebp = boolean(default=True) # If True, exclude pixels marked DO_NOT_USE from fringe fitting + firstfew = integer(default=None) # If not None, process only the first few integrations + chooseholes = string(default=None) # If not None, fit only certain fringes e.g. ['B4','B5','B6','C2'] + affine2d = any(default=None) # None or user-defined Affine2d object + run_bpfix = boolean(default=True) # Run Fourier bad pixel fix on cropped data """ - reference_file_types = ['throughput'] + reference_file_types = ['throughput', 'nrm'] + + def save_model(self, model, *args, **kwargs): + # Override save_model to change suffix based on list of results + if "idx" in kwargs and kwargs.get("suffix", None) is None: + kwargs["suffix"] = ["ami-oi", "amimulti-oi", "amilg"][kwargs.pop("idx")] + return Step.save_model(self, model, *args, **kwargs) def process(self, input): """ @@ -32,53 +43,73 @@ def process(self, input): Returns ------- - result: AmiLgModel object - AMI image to which the LG fringe detection has been applied + oifitsmodel: AmiOIModel object + AMI tables of median observables from LG algorithm fringe fitting in OIFITS format + oifitsmodel_multi: AmiOIModel object + AMI tables of observables for each integration from LG algorithm fringe fitting in OIFITS format + amilgmodel: AmiLGFitModel object + AMI cropped data, model, and residual data from LG algorithm fringe fitting """ # Retrieve the parameter values oversample = self.oversample rotate = self.rotation + bandpass = self.bandpass + usebp = self.usebp + firstfew = self.firstfew + chooseholes = self.chooseholes + affine2d = self.affine2d + run_bpfix = self.run_bpfix # pull out parameters that are strings and change to floats psf_offset = [float(a) for a in self.psf_offset.split()] rotsearch_parameters = [float(a) for a in self.rotation_search.split()] - self.log.info(f'Oversampling factor = {oversample}') - self.log.info(f'Initial rotation guess = {rotate} deg') - self.log.info(f'Initial values to use for psf offset = {psf_offset}') - - # Open the input data model - try: - input_model = datamodels.ImageModel(input) - except ValueError as err: - raise RuntimeError(f"{err}. Input must be a 2D ImageModel.") - - # check for 2D data array - if len(input_model.data.shape) != 2: - raise RuntimeError("Only 2D ImageModel data can be processed") - - # Get the name of the filter throughput reference file to use - throughput_reffile = self.get_reference_file(input_model, 'throughput') - self.log.info(f'Using filter throughput reference file {throughput_reffile}') - - # Check for a valid reference file - if throughput_reffile == 'N/A': - self.log.warning('No THROUGHPUT reference file found') - self.log.warning('AMI analyze step will be skipped') - raise RuntimeError("No throughput reference file found. " - "ami_analyze cannot continue.") - - # Open the filter throughput reference file - throughput_model = datamodels.ThroughputModel(throughput_reffile) - - # Apply the LG+ methods to the data - result = ami_analyze.apply_LG_plus(input_model, throughput_model, - oversample, rotate, - psf_offset, - rotsearch_parameters) - - # Close the reference file and update the step status - throughput_model.close() - result.meta.cal_step.ami_analyze = 'COMPLETE' - - return result + self.log.info(f"Oversampling factor = {oversample}") + self.log.info(f"Initial rotation guess = {rotate} deg") + self.log.info(f"Initial values to use for psf offset = {psf_offset}") + + # Make sure oversample is odd + if oversample % 2 == 0: + raise ValueError("Oversample value must be an odd integer.") + + # Open the input data model. Can be 2D or 3D image + with datamodels.open(input) as input_model: + # Get the name of the filter throughput reference file to use + throughput_reffile = self.get_reference_file(input_model, 'throughput') + self.log.info(f'Using filter throughput reference file {throughput_reffile}') + + # Check for a valid reference file or user-provided bandpass + if (throughput_reffile == 'N/A') & (bandpass is None): + raise RuntimeError("No THROUGHPUT reference file found. " + "ami_analyze cannot continue.") + + # Get the name of the NRM reference file to use + nrm_reffile = self.get_reference_file(input_model, 'nrm') + self.log.info(f'Using NRM reference file {nrm_reffile}') + + with ( + datamodels.ThroughputModel(throughput_reffile) as throughput_model, + datamodels.NRMModel(nrm_reffile) as nrm_model, + ): + # Apply the LG+ methods to the data + oifitsmodel, oifitsmodel_multi, amilgmodel = ami_analyze.apply_LG_plus( + input_model, + throughput_model, + nrm_model, + oversample, + rotate, + psf_offset, + rotsearch_parameters, + bandpass, + usebp, + firstfew, + chooseholes, + affine2d, + run_bpfix, + ) + + amilgmodel.meta.cal_step.ami_analyze = 'COMPLETE' + oifitsmodel.meta.cal_step.ami_analyze = 'COMPLETE' + oifitsmodel_multi.meta.cal_step.ami_analyze = 'COMPLETE' + + return oifitsmodel, oifitsmodel_multi, amilgmodel diff --git a/jwst/ami/ami_average.py b/jwst/ami/ami_average.py index 34fa9715cf..f3fe315586 100644 --- a/jwst/ami/ami_average.py +++ b/jwst/ami/ami_average.py @@ -11,8 +11,6 @@ def average_LG(lg_products): """ - Short Summary - ------------- Averages the LG results for a set of AMI exposures Parameters diff --git a/jwst/ami/ami_average_step.py b/jwst/ami/ami_average_step.py index e6a36c79cd..231c9453a3 100755 --- a/jwst/ami/ami_average_step.py +++ b/jwst/ami/ami_average_step.py @@ -12,6 +12,7 @@ class AmiAverageStep(Step): class_alias = "ami_average" spec = """ + skip = True # Do not run this step """ def flatten_input(self, input_items): diff --git a/jwst/ami/ami_normalize.py b/jwst/ami/ami_normalize.py index 08d7a6ae37..e609ca57a0 100644 --- a/jwst/ami/ami_normalize.py +++ b/jwst/ami/ami_normalize.py @@ -4,6 +4,7 @@ # import logging +from . import oifits log = logging.getLogger(__name__) log.addHandler(logging.NullHandler()) @@ -11,33 +12,26 @@ def normalize_LG(target_model, reference_model): """ - Short Summary - ------------- Normalizes the LG results for a science target by the LG results for a reference target Parameters ---------- - target_model: AmiLgModel data model + target_model: AmiOIModel data model The target data to be normalized - reference_model: AmiLgModel data model + reference_model: AmiOIModel data model The reference data Returns ------- - output_model: AmiLgModel data model - Normalized fringe data for the target + output_model: AmiOIModel data model + Normalized interferometric observables for the target """ - # Create the output model as a copy of the input target model - output_model = target_model.copy() - - # Apply the normalizations to the target data - output_model.closure_phase_table['coeffs'] -= \ - reference_model.closure_phase_table['coeffs'] - output_model.fringe_amp_table['coeffs'] /= \ - reference_model.fringe_amp_table['coeffs'] + # Initialize the calibration (normalization) class and apply the normalizations + norm_model = oifits.CalibOifits(target_model, reference_model) + output_model = norm_model.calibrate() # Return the normalized target model return output_model diff --git a/jwst/ami/ami_normalize_step.py b/jwst/ami/ami_normalize_step.py index 1c739d8ccc..7eafdf8d32 100755 --- a/jwst/ami/ami_normalize_step.py +++ b/jwst/ami/ami_normalize_step.py @@ -15,6 +15,7 @@ class AmiNormalizeStep(Step): class_alias = "ami_normalize" spec = """ + suffix = string(default='aminorm-oi') """ def process(self, target, reference): @@ -32,18 +33,18 @@ def process(self, target, reference): Returns ------- - result: AmiLgModel object + result: AmiOIModel object AMI data model that's been normalized """ # Open the target and reference input models - target_model = datamodels.AmiLgModel(target) - reference_model = datamodels.AmiLgModel(reference) + target_model = datamodels.AmiOIModel(target) + reference_model = datamodels.AmiOIModel(reference) # Call the normalization routine result = ami_normalize.normalize_LG(target_model, reference_model) - result.meta.cal_step.ami_normalize = 'COMPLETE' + result.meta.cal_step.ami_normalize = "COMPLETE" # Close the input models target_model.close() diff --git a/jwst/ami/analyticnrm2.py b/jwst/ami/analyticnrm2.py index ed7cc99903..ab53bf5a2b 100755 --- a/jwst/ami/analyticnrm2.py +++ b/jwst/ami/analyticnrm2.py @@ -15,8 +15,6 @@ def jinc(x, y): """ - Short Summary - ------------- Compute 2d Jinc for given coordinates Parameters @@ -40,8 +38,6 @@ def jinc(x, y): def ffc(kx, ky, **kwargs): """ - Short Summary - ------------- Calculate cosine terms of analytic model. Parameters @@ -84,8 +80,6 @@ def ffc(kx, ky, **kwargs): def ffs(kx, ky, **kwargs): """ - Short Summary - ------------- Calculate sine terms of analytic model. Parameters @@ -127,8 +121,6 @@ def ffs(kx, ky, **kwargs): def harmonicfringes(**kwargs): """ - Short Summary - ------------- Calculate the sine and cosine fringes. This is in image space and, for later versions, this works in the oversampled space that is each slice of the model. @@ -220,8 +212,6 @@ def phasor(kx, ky, hx, hy, lam, phi_m, pitch, affine2d): def image_center(fov, oversample, psf_offset): """ - Short Summary - ------------- Calculate the Image center location in oversampled pixels Parameters @@ -249,8 +239,6 @@ def image_center(fov, oversample, psf_offset): def interf(kx, ky, **kwargs): """ - Short Summary - ------------- Calculate the complex amplitudes for all holes. Parameters @@ -301,8 +289,6 @@ def model_array(ctrs, lam, oversample, pitch, fov, d, psf_offset=(0, 0), phi=None, shape='circ', affine2d=None): """ - Short Summary - ------------- Create a model using the specified wavelength. Parameters @@ -383,8 +369,6 @@ def model_array(ctrs, lam, oversample, pitch, fov, d, psf_offset=(0, 0), def asf(detpixel, fov, oversample, ctrs, d, lam, phi, psf_offset, affine2d): """ - Short Summary - ------------- Calculate the Amplitude Spread Function (a.k.a. image plane complex amplitude) for a circular aperture. @@ -435,8 +419,6 @@ def asf(detpixel, fov, oversample, ctrs, d, lam, phi, psf_offset, affine2d): def asffringe(detpixel, fov, oversample, ctrs, lam, phi, psf_offset, affine2d): """ - Short Summary - ------------- Amplitude Spread Function (a.k.a. image plane complex amplitude) for a fringe @@ -485,8 +467,6 @@ def asffringe(detpixel, fov, oversample, ctrs, lam, phi, psf_offset, affine2d): def asf_hex(detpixel, fov, oversample, ctrs, d, lam, phi, psf_offset, affine2d): """ - Short Summary - ------------- Amplitude Spread Function (a.k.a. image plane complex amplitude) for a hexagonal aperture @@ -539,8 +519,6 @@ def asf_hex(detpixel, fov, oversample, ctrs, d, lam, phi, psf_offset, affine2d): def psf(detpixel, fov, oversample, ctrs, d, lam, phi, psf_offset, affine2d, shape='circ'): """ - Short Summary - ------------- Calculate the PSF for the requested shape Parameters diff --git a/jwst/ami/bp_fix.py b/jwst/ami/bp_fix.py new file mode 100644 index 0000000000..1c21a7a485 --- /dev/null +++ b/jwst/ami/bp_fix.py @@ -0,0 +1,394 @@ +import numpy as np +import logging + +from copy import deepcopy +from poppy import matrixDFT +from scipy.ndimage import median_filter + +from stdatamodels.jwst.datamodels import dqflags + +log = logging.getLogger(__name__) +log.setLevel(logging.DEBUG) +logging.captureWarnings(True) + + +"""pipeline implementation of Jens Kammerer's bp_fix code based on Ireland 2013 algorithm""" + +micron = 1.0e-6 +filts = ["F277W", "F380M", "F430M", "F480M", "F356W", "F444W"] +# TO DO: get these from some context-controlled place +filtwl_d = { # pivot wavelengths + "F277W": 2.776e-6, # less than Nyquist + "F380M": 3.828e-6, + "F430M": 4.286e-6, + "F480M": 4.817e-6, + "F356W": 3.595e-6, # semi-forbidden + "F444W": 4.435e-6, # semi-forbidden +} +filthp_d = { # half power limits + "F277W": (2.413e-6, 3.142e-6), + "F380M": (3.726e-6, 3.931e-6), + "F430M": (4.182e-6, 4.395e-6), + "F480M": (4.669e-6, 4.971e-6), + "F356W": (3.141e-6, 4.068e-6), + "F444W": (3.880e-6, 5.023e-6), +} +WL_OVERSIZEFACTOR = ( + 0.1 # increase filter wl support by this amount to 'oversize' in wl space +) + +DIAM = 6.559348 # / Flat-to-flat distance across pupil in V3 axis +PUPLDIAM = 6.603464 # / Full pupil file size, incl padding. +PUPL_CRC = 6.603464 # / Circumscribing diameter for JWST primary + + +def create_wavelengths(filtername): + """ + Extend filter support slightly past half power points. + Filter transmissions are quasi-rectangular. + + Parameters + ---------- + filtername: string + AMI filter name + """ + wl_ctr = filtwl_d[filtername] + wl_hps = filthp_d[filtername] + # both positive quantities below - left is lower wl, rite is higher wl + dleft = (wl_ctr - wl_hps[0]) * (1 + WL_OVERSIZEFACTOR) + drite = (-wl_ctr + wl_hps[1]) * (1 + WL_OVERSIZEFACTOR) + + return (wl_ctr, wl_ctr - dleft, wl_ctr + drite) + + +def calc_pupil_support(filtername, sqfov_npix, pxsc_rad, pupil_mask): + """ + Calculate psf at low, center, and high wavelengths of filter. + Coadd psfs and perform fft-style transform of image w/ dft + + Parameters + ---------- + filtername: string + AMI filter name + + sqfov_npix: float + Square field of view in number of pixels + + pxsc_rad: float + Detector pixel scale in rad/px + + pupil_mask: array + Pupil mask model (NRM) + + Returns + ------- + Absolute value of FT(image) in filter - the CV Vsq array + + + """ + wls = create_wavelengths(filtername) + log.info( + f" {filtername}: {wls[0] / micron:.3f} to {wls[2] / micron:.3f} micron" + ) + detimage = np.zeros((sqfov_npix, sqfov_npix), float) + for wl in wls: + psf = calcpsf(wl, sqfov_npix, pxsc_rad, pupil_mask) + detimage += psf + + return transform_image(detimage) + + +def transform_image(image): + """ + Take Fourier transform of image + + Parameters + ---------- + image: numpy array + Science image + + Returns + ------- + Absolute value of FT(image) + + """ + ft = matrixDFT.MatrixFourierTransform() + ftimage = ft.perform( + image, image.shape[0], image.shape[0] + ) # fake the no-loss fft w/ dft + + return np.abs(ftimage) + + +def calcpsf(wl, fovnpix, pxsc_rad, pupil_mask): + """ + Calculate the PSF + + Parameters + ---------- + wl: float + Wavelength (meters) + + fovnpix: float + Square field of view in number of pixels + + pxsc_rad: float + Detector pixel scale in rad/px + + pupil_mask: array + Pupil mask model (NRM) + + Returns + ------- + image_intensity: numpy array + Monochromatic unnormalized psf + """ + reselt = wl / PUPLDIAM # radian + nlamD = fovnpix * pxsc_rad / reselt # Soummer nlamD FOV in reselts + # instantiate an mft object: + ft = matrixDFT.MatrixFourierTransform() + + image_field = ft.perform(pupil_mask, nlamD, fovnpix) + image_intensity = (image_field * image_field.conj()).real + + return image_intensity + + +def bad_pixels(data, median_size, median_tres): + """ + Identify bad pixels by subtracting median-filtered data and searching for + outliers. + + Parameters + ---------- + data: numpy array + Science data + median_size: float + Median filter size (pixels) + median_tres: float + Empirically determined threshold + + Returns + ------- + pxdq: int array + Bad pixel mask identified by median filtering + """ + + mfil_data = median_filter(data, size=median_size) + diff_data = np.abs(data - mfil_data) + pxdq = diff_data > median_tres * np.median(diff_data) + pxdq = pxdq.astype("int") + + log.info( + " Identified %.0f bad pixels (%.2f%%)" + % (np.sum(pxdq), np.sum(pxdq) / np.prod(pxdq.shape) * 100.0) + ) + log.info(" %.3f" % np.max(diff_data / np.median(diff_data))) + + return pxdq + + +def fourier_corr(data, pxdq, fmas): + """ + Compute and apply the bad pixel corrections based on Section 2.5 of + Ireland 2013. + + Parameters + ---------- + data: numpy array + Science data + pxdq: numpy array + Bad pixel mask + fmas: + FT of science data + + Returns + ------- + data_out: numpy array + Corrected science data + + References + ---------- + M. J. Ireland, Phase errors in diffraction-limited imaging: contrast limits + for sparse aperture masking, Monthly Notices of the Royal Astronomical + Society, Volume 433, Issue 2, 01 August 2013, Pages 1718–1728, + https://doi.org/10.1093/mnras/stt859 + """ + + # Get the dimensions. + ww = np.where(pxdq > 0.5) + ww_ft = np.where(fmas) + + # Compute the B_Z matrix from Section 2.5 of Ireland 2013. This matrix + # maps the bad pixels onto their Fourier power in the domain Z, which is + # the complement of the pupil support. + B_Z = np.zeros((len(ww[0]), len(ww_ft[0]) * 2)) + xh = data.shape[0] // 2 + yh = data.shape[1] // 2 + xx, yy = np.meshgrid( + 2.0 * np.pi * np.arange(yh + 1) / data.shape[1], + 2.0 + * np.pi + * (((np.arange(data.shape[0]) + xh) % data.shape[0]) - xh) + / data.shape[0], + ) + for i in range(len(ww[0])): + cdft = np.exp(-1j * (ww[0][i] * yy + ww[1][i] * xx)) + B_Z[i, :] = np.append(cdft[ww_ft].real, cdft[ww_ft].imag) + + # Compute the corrections for the bad pixels using the Moore-Penrose pseudo + # inverse of B_Z (Equation 19 of Ireland 2013). + B_Z_ct = np.transpose(np.conj(B_Z)) + B_Z_mppinv = np.dot(B_Z_ct, np.linalg.inv(np.dot(B_Z, B_Z_ct))) + + # Apply the corrections for the bad pixels. + data_out = deepcopy(data) + data_out[ww] = 0.0 + data_ft = np.fft.rfft2(data_out)[ww_ft] + corr = -np.real(np.dot(np.append(data_ft.real, data_ft.imag), B_Z_mppinv)) + data_out[ww] += corr + + return data_out + + +def fix_bad_pixels(data, pxdq0, filt, pxsc, nrm_model): + """ + Apply the Fourier bad pixel correction to pixels + flagged DO_NOT_USE or JUMP_DET. + Original code implementation by Jens Kammerer. + + Parameters + ---------- + data: array + Cropped science data + pxdq0: array + Cropped DQ array + filt: string + AMI filter name + pxsc: float + Pixel scale, mas/pixel + nrm_model: datamodel object + NRM pupil datamodel + + Returns + ------- + data: numpy array + Corrected data + pxdq: + Mask of bad pixels, updated if new ones were found + + """ + DO_NOT_USE = dqflags.pixel["DO_NOT_USE"] + JUMP_DET = dqflags.pixel["JUMP_DET"] + dq_dnu = pxdq0 & DO_NOT_USE == DO_NOT_USE + dq_jump = pxdq0 & JUMP_DET == JUMP_DET + dqmask = dq_dnu | dq_jump + + pxdq = np.where(dqmask, pxdq0, 0) + nflagged_dnu = np.count_nonzero(pxdq) + log.info("%i pixels flagged DO_NOT_USE in cropped data" % nflagged_dnu) + + # DNU, some other pixels are now NaNs in cal level products. + # Replace them with 0, then + # add DO_NOT_USE flags to positions in DQ array so they will be corrected. + nanidxlist = np.argwhere(np.isnan(data)) + if len(nanidxlist) > 1: + log.info("Identified %i NaN pixels to correct" % len(nanidxlist)) + for idx in nanidxlist: + data[idx[0], idx[1], idx[2]] = 0 + pxdq0[idx[0], idx[1], idx[2]] += 1 # add DNU flag to each nan pixel + + # These values are taken from the JDox and the SVO Filter Profile + # Service. + diam = PUPLDIAM # m + gain = 1.61 # e-/ADU + rdns = 18.32 # e- + pxsc_rad = (pxsc / 1000) * np.pi / (60 * 60 * 180) + + # These values were determined empirically for NIRISS/AMI and need to be + # tweaked for any other instrument. + median_size = 3 # pix + median_tres = 50.0 # JK: changed from 28 to 20 in order to capture all bad pixels + + pupil_mask = nrm_model.nrm + imsz = data.shape + sh = imsz[-1] // 2 # half size, even + # Compute field-of-view and Fourier sampling. + fov = 2 * sh * pxsc / 1000.0 # arcsec + fsam = filtwl_d[filt] / (fov / 3600.0 / 180.0 * np.pi) # m/pix + log.info(" FOV = %.1f arcsec, Fourier sampling = %.3f m/pix" % (fov, fsam)) + + # + cvis = calc_pupil_support(filt, 2 * sh, pxsc_rad, pupil_mask) + cvis /= np.max(cvis) + fmas = cvis < 1e-3 # 1e-3 seems to be a reasonable threshold + fmas = np.fft.fftshift(fmas)[:, : 2 * sh // 2 + 1] + + # Compute the pupil mask. This mask defines the region where we are + # measuring the noise. It looks like 15 lambda/D distance from the PSF + # is reasonable. + ramp = np.arange(2 * sh) - 2 * sh // 2 + xx, yy = np.meshgrid(ramp, ramp) + dist = np.sqrt(xx**2 + yy**2) + pmas = ( + dist > 9.0 * filtwl_d[filt] / diam * 180.0 / np.pi * 1000.0 * 3600.0 / pxsc + ) + + # Go through all frames. + for j in range(imsz[0]): + log.info(" Frame %.0f of %.0f" % (j + 1, imsz[0])) + + # Now cut out the subframe. + # no need to cut out sub-frame; data already cropped + # odd/even size issues? + data_cut = deepcopy(data[j, :-1, :-1]) + data_orig = deepcopy(data_cut) + pxdq_cut = deepcopy(pxdq[j, :-1, :-1]) + pxdq_cut = pxdq_cut > 0.5 + # Correct the bad pixels. This is an iterative process. After each + # iteration, we check whether new (residual) bad pixels are + # identified. If so, we re-compute the corrections. If not, we + # terminate the iteration. + for k in range(10): + # Correct the bad pixels. + data_cut = fourier_corr(data_cut, pxdq_cut, fmas) + if k == 0: + temp = deepcopy(data_cut) + + # Identify residual bad pixels by looking at the high spatial + # frequency part of the image. + fmas_data = np.real(np.fft.irfft2(np.fft.rfft2(data_cut) * fmas)) + + # Analytically determine the noise (Poisson noise + read noise) + # and normalize the high spatial frequency part of the image + # by it, then identify residual bad pixels. + mfil_data = median_filter(data_cut, size=median_size) + nois = np.sqrt(mfil_data / gain + rdns**2) + fmas_data /= nois + temp = bad_pixels( + fmas_data, median_size=median_size, median_tres=median_tres + ) + + # Check which bad pixels are new. Also, compare the + # analytically determined noise with the empirically measured + # noise. + pxdq_new = np.sum(temp[pxdq_cut < 0.5]) + log.info( + " Iteration %.0f: %.0f new bad pixels, sdev of norm noise = %.3f" + % (k + 1, pxdq_new, np.std(fmas_data[pmas])) + ) + + # If no new bad pixels were identified, terminate the + # iteration. + if pxdq_new == 0.0: + break + + # If new bad pixels were identified, add them to the bad pixel + # map. + pxdq_cut = ((pxdq_cut > 0.5) | (temp > 0.5)).astype("int") + + # Put the modified frames back into the data cube. + data[j, :-1, :-1] = fourier_corr(data_orig, pxdq_cut, fmas) + pxdq[j, :-1, :-1] = pxdq_cut + + return data, pxdq diff --git a/jwst/ami/find_affine2d_parameters.py b/jwst/ami/find_affine2d_parameters.py index 895d7e8dc5..f21609e92e 100644 --- a/jwst/ami/find_affine2d_parameters.py +++ b/jwst/ami/find_affine2d_parameters.py @@ -14,8 +14,6 @@ def create_afflist_rot(rotdegs): """ - Short Summary - ------------- Create a list of affine objects with various rotations to use in order to go through and find which fits an image plane data best. @@ -40,8 +38,6 @@ def create_afflist_rot(rotdegs): def find_rotation(imagedata, psf_offset, rotdegs, mx, my, sx, sy, xo, yo, pixel, npix, bandpass, over, holeshape): """ - Short Summary - ------------- Create an affine2d object using the known rotation and scale. Parameters @@ -102,7 +98,7 @@ def find_rotation(imagedata, psf_offset, rotdegs, mx, my, sx, sy, xo, yo, crosscorr_rots = [] for (rot, aff) in zip(rotdegs, affine2d_list): - jw = lg_model.NrmModel(mask='jwst', holeshape=holeshape, over=over, affine2d=aff) + jw = lg_model.NrmModel(mask='jwst_g7s6c', holeshape=holeshape, over=over, affine2d=aff) jw.set_pixelscale(pixel) # psf_offset in data coords & pixels. Does it get rotated? Second order errors poss. diff --git a/jwst/ami/hexee.py b/jwst/ami/hexee.py index 6af162e46e..9d7cf37c71 100755 --- a/jwst/ami/hexee.py +++ b/jwst/ami/hexee.py @@ -19,8 +19,6 @@ def g_eeAG(xi, eta, **kwargs): """ - Short Summary - ------------- Calculate the Fourier transform of one half of a hexagon that is bisected from one corner to its diametrically opposite corner. { DG: how does this compare to g_eeGEN() ? } @@ -79,8 +77,6 @@ def g_eeAG(xi, eta, **kwargs): def glimit(xi, eta, **kwargs): """ - Short Summary - ------------- Calculate the analytic limit of the Fourier transform of one half of the hexagon along eta=0. @@ -135,8 +131,6 @@ def glimit(xi, eta, **kwargs): def centralpix_limit(): """ - Short Summary - ------------- Calculate the analytic limit of the Fourier transform of one half of the hexagon at the origin. @@ -157,8 +151,6 @@ def centralpix_limit(): def mas2rad(mas): """ - Short Summary - ------------- Convert angle in milli arc-sec to radians Parameters @@ -178,8 +170,6 @@ def mas2rad(mas): def hex_eeAG(s=(121, 121), c=None, d=0.80, lam=4.3e-6, pitch=mas2rad(65)): """ - Short Summary - ------------- Calculate the hexagonal hole Fourier transform by adding the transforms of the 2 symmetric parts. diff --git a/jwst/ami/hextransformee.py b/jwst/ami/hextransformee.py index 442e06d188..267ac169fe 100755 --- a/jwst/ami/hextransformee.py +++ b/jwst/ami/hextransformee.py @@ -4,8 +4,6 @@ def gfunction(xi, eta, **kwargs): """ - Short Summary - ------------- Calculate the Fourier transform of one half of a hexagon that is bisected from one corner to its diametrically opposite corner. @@ -38,10 +36,10 @@ def gfunction(xi, eta, **kwargs): Fourier transform of one half of a hexagon. """ - c = kwargs['c'] - pixel = kwargs['pixel'] - d = kwargs['d'] - lam = kwargs['lam'] + c = kwargs["c"] + pixel = kwargs["pixel"] + d = kwargs["d"] + lam = kwargs["lam"] xi = (d / lam) * pixel * (xi - c[0]) eta = (d / lam) * pixel * (eta - c[1]) affine2d = kwargs["affine2d"] @@ -51,25 +49,28 @@ def gfunction(xi, eta, **kwargs): xip, etap = affine2d.distortFargs(xi, eta) - if kwargs['minus'] is True: + if kwargs["minus"] is True: xip = -1 * xip - g = np.exp(-i * Pi * (2 * etap / np.sqrt(3) + xip)) * \ - ( - (np.sqrt(3) * etap - 3 * xip) * - (np.exp(i * Pi * np.sqrt(3) * etap) - np.exp(i * Pi * (4 * etap / np.sqrt(3) + xip))) + - (np.sqrt(3) * etap + 3 * xip) * - (np.exp(i * Pi * etap / np.sqrt(3)) - np.exp(i * Pi * xip)) - ) / \ - (4 * Pi * Pi * (etap * etap * etap - 3 * etap * xip * xip)) + g = ( + np.exp(-i * Pi * (2 * etap / np.sqrt(3) + xip)) + * ( + (np.sqrt(3) * etap - 3 * xip) + * ( + np.exp(i * Pi * np.sqrt(3) * etap) + - np.exp(i * Pi * (4 * etap / np.sqrt(3) + xip)) + ) + + (np.sqrt(3) * etap + 3 * xip) + * (np.exp(i * Pi * etap / np.sqrt(3)) - np.exp(i * Pi * xip)) + ) + / (4 * Pi * Pi * (etap * etap * etap - 3 * etap * xip * xip)) + ) return g * affine2d.distortphase(xi, eta) def hextransform(s=None, c=None, d=None, lam=None, pitch=None, affine2d=None): """ - Short Summary - ------------- Calculate the complex array analytical transform of a (distorted if necessary) hexagon Parameters @@ -115,11 +116,25 @@ def hextransform(s=None, c=None, d=None, lam=None, pitch=None, affine2d=None): if abs(d1) < 0.5 * eps_offset: # might have the singular central pixel here c_adjust[1] = c[1] + eps_offset - hex_complex = np.fromfunction(gfunction, s, d=d, c=c_adjust, lam=lam, - pixel=pitch, affine2d=affine2d, minus=False) + \ - np.fromfunction(gfunction, s, d=d, c=c_adjust, lam=lam, - pixel=pitch, affine2d=affine2d, minus=True) - + hex_complex = np.fromfunction( + gfunction, + s, + d=d, + c=c_adjust, + lam=lam, + pixel=pitch, + affine2d=affine2d, + minus=False, + ) + np.fromfunction( + gfunction, + s, + d=d, + c=c_adjust, + lam=lam, + pixel=pitch, + affine2d=affine2d, + minus=True, + ) hex_complex[int(c[0]), int(c[1])] = (np.sqrt(3) / 2.0) return hex_complex diff --git a/jwst/ami/instrument_data.py b/jwst/ami/instrument_data.py index 578ed5bf71..7b5e11614d 100755 --- a/jwst/ami/instrument_data.py +++ b/jwst/ami/instrument_data.py @@ -5,109 +5,100 @@ import logging import numpy as np -from scipy.integrate import simpson as simps from .mask_definitions import NRM_mask_definitions from . import utils +from . import bp_fix +from stdatamodels.jwst.datamodels import dqflags +import copy + log = logging.getLogger(__name__) log.setLevel(logging.DEBUG) -um = 1.0e-6 - class NIRISS: - def __init__(self, filt, - objname="obj", - src="A0V", + def __init__(self, + filt, + nrm_model, chooseholes=None, affine2d=None, bandpass=None, - **kwargs): + usebp=True, + firstfew=None, + rotsearch_parameters=None, + oversample=None, + psf_offset=None, + run_bpfix=True + ): """ - Short Summary - ------------ Initialize NIRISS class - Either user has webbpsf and filter file can be read, or this will use a - tophat and give a warning Parameters ---------- filt: string filter name - objname: string - name of object - - src: string - spectral type - chooseholes: list None, or e.g. ['B2', 'B4', 'B5', 'B6'] for a four-hole mask affine2d: Affine2d object Affine2d object - bandpass: list - None or [(wt,wlen),(wt,wlen),...]. Monochromatic would be e.g. [(1.0, 4.3e-6)] + bandpass: synphot spectrum or array + None, synphot object or [(wt,wlen),(wt,wlen),...]. Monochromatic would be e.g. [(1.0, 4.3e-6)] Explicit bandpass arg will replace *all* niriss filter-specific variables with - the given bandpass, so you can simulate 21cm psfs through something called "F430M"! + the given bandpass, so you could simulate, for example, a 21cm psf through something called "F430M"! + + usebp : boolean + If True, exclude pixels marked DO_NOT_USE from fringe fitting + + firstfew : integer + If not None, process only the first few integrations + + chooseholes : string + If not None, fit only certain fringes e.g. ['B4','B5','B6','C2'] + + affine2d : Affine2D object + None or user-defined Affine2d object + + run_bpfix : boolean + Run Fourier bad pixel fix on cropped data """ + self.run_bpfix = run_bpfix + self.usebp = usebp self.chooseholes = chooseholes - self.objname = objname self.filt = filt - - # 12 waves in f430 - data analysis: - self.lam_bin = {"F277W": 50, "F380M": 20, "F430M": 40, "F480M": 30} - - # use 150 for 3 waves ax f430m; nominal values - self.lam_c = {"F277W": 2.77e-6, # central wavelength (SI) - "F380M": 3.8e-6, - "F430M": 4.28521033106325E-06, - "F480M": 4.8e-6} - self.lam_w = {"F277W": 0.2, "F380M": 0.1, "F430M": 0.0436, "F480M": 0.08} # fractional filter width - - self.throughput = utils.tophatfilter(self.lam_c[self.filt], self.lam_w[self.filt], npoints=11) - - # update nominal filter parameters with those of the filter read in and used in the analysis... - # Weighted mean wavelength in meters, etc, etc "central wavelength" for the filter: - thru_st = np.stack(self.throughput, axis=1) - thru_st_0 = thru_st[0, :] - thru_st_1 = thru_st[1, :] - - num = (thru_st_0 * thru_st_1).sum() - den = thru_st[0, :].sum() - self.lam_c[self.filt] = num / den - - area = simps(thru_st_0, x=thru_st_1) - ew = area / thru_st_0.max() # equivalent width - - beta = ew / self.lam_c[self.filt] # fractional bandpass - self.lam_w[self.filt] = beta - - if bandpass is not None: - bandpass = np.array(bandpass) # type simplification - wt = bandpass[:, 0] - wl = bandpass[:, 1] - cw = (wl * wt).sum() / wt.sum() # Weighted mean wavelength in meters "central wavelength" - area = simps(wt, x=wl) - ew = area / wt.max() # equivalent width - beta = ew / cw # fractional bandpass - self.lam_c = {"F277W": cw, "F380M": cw, "F430M": cw, "F480M": cw, } - self.lam_w = {"F277W": beta, "F380M": beta, "F430M": beta, "F480M": beta} - self.throughput = bandpass - - self.wls = [self.throughput, ] + self.throughput = bandpass + self.firstfew = firstfew + self.nrm_model = nrm_model + + self.lam_c, self.lam_w = utils.get_cw_beta(self.throughput) + self.wls = [ + self.throughput, + ] # Wavelength info for NIRISS bands F277W, F380M, F430M, or F480M - self.wavextension = ([self.lam_c[self.filt], ], [self.lam_w[self.filt], ]) + self.wavextension = ( + [ + self.lam_c, + ], + [ + self.lam_w, + ], + ) self.nwav = 1 # only one NRM on JWST: - self.telname = "NIRISS" + self.telname = "JWST" self.instrument = "NIRISS" self.arrname = "jwst_g7s6c" self.holeshape = "hex" - self.mask = NRM_mask_definitions(maskname=self.arrname, chooseholes=chooseholes, holeshape=self.holeshape) + self.mask = NRM_mask_definitions( + maskname=self.arrname, + chooseholes=self.chooseholes, + holeshape=self.holeshape, + ) + # save affine deformation of pupil object or create a no-deformation object. # We apply this when sampling the PSF, not to the pupil geometry. # This will set a default Ideal or a measured rotation, for example, @@ -115,9 +106,9 @@ def __init__(self, filt, # Separating detector tilt pixel scale effects from pupil distortion effects is # yet to be determined... see comments in Affine class definition. if affine2d is None: - self.affine2d = utils.Affine2d(mx=1.0, my=1.0, - sx=0.0, sy=0.0, - xo=0.0, yo=0.0, name="Ideal") + self.affine2d = utils.Affine2d( + mx=1.0, my=1.0, sx=0.0, sy=0.0, xo=0.0, yo=0.0, name="Ideal" + ) else: self.affine2d = affine2d @@ -127,13 +118,16 @@ def __init__(self, filt, # for the threshold application. # Data reduction gurus: tweak the threshold value with experience... # Gurus: tweak cvsupport with use... - self.cvsupport_threshold = {"F277W": 0.02, "F380M": 0.02, "F430M": 0.02, "F480M": 0.02} + self.cvsupport_threshold = { + "F277W": 0.02, + "F380M": 0.02, + "F430M": 0.02, + "F480M": 0.02, + } self.threshold = self.cvsupport_threshold[filt] def set_pscale(self, pscalex_deg=None, pscaley_deg=None): """ - Short Summary - ------------ Override pixel scale in header Parameters @@ -158,9 +152,9 @@ def set_pscale(self, pscalex_deg=None, pscaley_deg=None): def read_data_model(self, input_model): """ - Short Summary - ------------- - Retrieve info from input data model + Retrieve info from input data model and store in NIRISS class. + Trim refpix and roughly center science data and dq array. + Run Fourier bad pixel correction before returning science data. Parameters ---------- @@ -169,24 +163,157 @@ def read_data_model(self, input_model): Returns ------- - Data and parameters from input data model + scidata_ctrd: numpy array + Cropped, centered, optionally cleaned AMI data + dqmask_ctrd: + Cropped, centered mask of bad pixels """ - # The info4oif_dict will get pickled to disk when we write txt files of results. - # That way we don't drag in objects like instrument_data into code that reads text results - # and writes oifits files - a simple built-in dictionary is the only object used in this transfer. - self.telname = "JWST" - - # To use ami_sim's eg 65.6 mas/pixel scale we hardcode it here.,, - pscalex_deg = 65.6 / (1000 * 60 * 60) - pscaley_deg = 65.6 / (1000 * 60 * 60) - # Whatever we did set is averaged for isotropic pixel scale here - self.pscale_mas = 0.5 * (pscalex_deg + pscaley_deg) * (60 * 60 * 1000) - self.pscale_rad = utils.mas2rad(self.pscale_mas) - self.mask = NRM_mask_definitions(maskname=self.arrname, chooseholes=self.chooseholes, - holeshape=self.holeshape) + # all instrumentdata attributes will be available when oifits files written out + scidata = copy.deepcopy(np.array(input_model.data)) + bpdata = copy.deepcopy(np.array(input_model.dq)) + + # pixel scale recalculated and averaged + pscaledegx, pscaledegy = utils.degrees_per_pixel(input_model) + # At some point we want to use different X and Y pixel scales + pscale_deg = np.mean([pscaledegx, pscaledegy]) + self.pscale_rad = np.deg2rad(pscale_deg) + self.pscale_mas = pscale_deg * (60 * 60 * 1000) + self.pav3 = input_model.meta.pointing.pa_v3 + self.vparity = input_model.meta.wcsinfo.vparity + self.v3iyang = input_model.meta.wcsinfo.v3yangle + self.parangh = input_model.meta.wcsinfo.roll_ref + self.crpix1 = input_model.meta.wcsinfo.crpix1 + self.crpix2 = input_model.meta.wcsinfo.crpix2 + self.pupil = input_model.meta.instrument.pupil + self.proposer_name = input_model.meta.target.proposer_name + if input_model.meta.target.catalog_name == "UNKNOWN": + objname = input_model.meta.target.proposer_name + else: + objname = input_model.meta.target.catalog_name + self.objname = objname.replace("-", " ") + self.pi_name = input_model.meta.program.pi_name + self.ra = input_model.meta.target.ra + self.dec = input_model.meta.target.dec + self.pmra = input_model.meta.target.proper_motion_ra + self.pmdec = input_model.meta.target.proper_motion_dec + self.ra_uncertainty = input_model.meta.target.ra_uncertainty + self.dec_uncertainty = input_model.meta.target.dec_uncertainty + + + datestr = input_model.meta.visit.start_time.replace(" ", "T") + self.date = datestr # is this the right start time? + self.year = datestr[:4] + self.month = datestr[5:7] + self.day = datestr[8:10] + effinttm = input_model.meta.exposure.effective_exposure_time + nints = input_model.meta.exposure.nints + # if 2d input, model has already been expanded to 3d, so check 0th dimension + if input_model.data.shape[0] == 1: + self.itime = effinttm * nints + else: + self.itime = effinttm + if self.firstfew is not None: + if scidata.shape[0] > self.firstfew: + log.info(f"Analyzing only the first {self.firstfew:d} integrations") + scidata = scidata[: self.firstfew, :, :] + bpdata = bpdata[: self.firstfew, :, :] + else: + log.warning(f"Input firstfew={self.firstfew:d} is greater than the number of integrations") + log.warning("All integrations will be analyzed") + self.nwav = scidata.shape[0] + [self.wls.append(self.wls[0]) for f in range(self.nwav - 1)] + # Rotate mask hole centers by pav3 + v3i_yang to be in equatorial coordinates + ctrs_sky = self.mast2sky() + oifctrs = np.zeros(self.mask.ctrs.shape) + oifctrs[:, 0] = ctrs_sky[:, 1].copy() * -1 + oifctrs[:, 1] = ctrs_sky[:, 0].copy() * -1 + self.ctrs_eqt = oifctrs + self.ctrs_inst = self.mask.ctrs + self.hdia = self.mask.hdia + self.nslices = self.nwav + + # Trim refpix from all slices + scidata = scidata[:, 4:, :] + bpdata = bpdata[:, 4:, :] + + # find peak in median of refpix-trimmed scidata + med_im = np.median(scidata, axis=0) + + # Use median image to find big CR hits not already caught by pipeline + std_im = np.std(scidata, axis=0) + mediandiff = np.empty_like(scidata) + mediandiff[:, :, :] = scidata - med_im + nsigma = 10 + outliers = np.where(mediandiff > nsigma * std_im) + outliers2 = np.argwhere(mediandiff > nsigma * std_im) + + dqvalues = bpdata[outliers] + log.info( + f"{len(dqvalues)} additional pixels >10-sig from median of stack found" + ) + # decompose DQ values to check if they are already flagged DNU + count = 0 + for loc, dq_value in zip(outliers2, dqvalues): + bitarr = np.binary_repr(dq_value) + bad_types = [] + for i, elem in enumerate(bitarr[::-1]): + if elem == str(1): + badval = 2**i + key = next( + key for key, value in dqflags.pixel.items() if value == badval + ) + bad_types.append(key) + if "DO_NOT_USE" not in bad_types: + bpdata[loc[0], loc[1], loc[2]] += 1 + count += 1 + log.info(f"{count} DO_NOT_USE flags added to DQ array for found outlier pixels") + + # Roughly center scidata, bpdata around peak pixel position + peakx, peaky, r = utils.min_distance_to_edge(med_im) + scidata_ctrd = scidata[ + :, int(peakx - r):int(peakx + r + 1), int(peaky - r):int(peaky + r + 1) + ] + bpdata_ctrd = bpdata[ + :, int(peakx - r):int(peakx + r + 1), int(peaky - r):int(peaky + r + 1) + ] + + log.info( + "Cropping all integrations to %ix%i pixels around peak (%i,%i)" + % (2 * r + 1, 2 * r + 1, peakx + 4, peaky) + ) # +4 because of trimmed refpx + # apply bp fix here + if self.run_bpfix: + log.info('Applying Fourier bad pixel correction to cropped data, updating DQ array') + scidata_ctrd, bpdata_ctrd = bp_fix.fix_bad_pixels( + scidata_ctrd, + bpdata_ctrd, + input_model.meta.instrument.filter, + self.pscale_mas, + self.nrm_model, + ) + else: + log.info("Not running Fourier bad pixel fix") + + self.rootfn = input_model.meta.filename.replace(".fits", "") + + # all info needed to write out oifits should be stored in NIRISS object attributes + + # Make a bad pixel mask, either from real DQ data or zeros if usebp=False + if self.usebp: + log.info( + "usebp flag set to TRUE: bad pixels will be excluded from model fit" + ) + DO_NOT_USE = dqflags.pixel["DO_NOT_USE"] + JUMP_DET = dqflags.pixel["JUMP_DET"] + dq_dnu = bpdata_ctrd & DO_NOT_USE == DO_NOT_USE + dq_jump = bpdata_ctrd & JUMP_DET == JUMP_DET + dqmask_ctrd = dq_dnu | dq_jump + else: + log.info("usebp flag set to FALSE: all pixels will be used in model fit") + dqmask_ctrd = np.zeros_like(scidata_ctrd) - return input_model.data + return scidata_ctrd, dqmask_ctrd def reset_nwav(self, nwav): """ @@ -201,3 +328,42 @@ def reset_nwav(self, nwav): ------- """ self.nwav = nwav + + def mast2sky(self): + """ + Rotate hole center coordinates: + Clockwise by the V3 position angle - V3I_YANG from north in degrees if VPARITY = -1 + Counterclockwise by the V3 position angle - V3I_YANG from north in degrees if VPARITY = 1 + Hole center coords are in the V2, V3 plane in meters. + + Returns + ------- + ctrs_rot: array + Rotated coordinates to be put in OIFITS files. + """ + mask_ctrs = copy.deepcopy(self.mask.ctrs) + # rotate by an extra 90 degrees (RAC 9/21) + # these coords are just used to orient output in OIFITS files + # NOT used for the fringe fitting itself + mask_ctrs = utils.rotate2dccw(mask_ctrs, np.pi / 2.0) + vpar = self.vparity # Relative sense of rotation between Ideal xy and V2V3 + rot_ang = self.pav3 - self.v3iyang # subject to change! + + if self.pav3 == 0.0: + return mask_ctrs + else: + # Using rotate2sccw, which rotates **vectors** CCW in a fixed coordinate system, + # so to rotate coord system CW instead of the vector, reverse sign of rotation angle. Double-check comment + if vpar == -1: + # rotate clockwise <rotate coords clockwise> + ctrs_rot = utils.rotate2dccw(mask_ctrs, np.deg2rad(-rot_ang)) + log.info( + f"Rotating mask hole centers clockwise by {rot_ang:.3f} degrees" + ) + else: + # counterclockwise <rotate coords counterclockwise> + ctrs_rot = utils.rotate2dccw(mask_ctrs, np.deg2rad(rot_ang)) + log.info( + f"Rotating mask hole centers counterclockwise by {rot_ang:.3f} degrees" + ) + return ctrs_rot diff --git a/jwst/ami/leastsqnrm.py b/jwst/ami/leastsqnrm.py index e819d767c8..2bc81fce0c 100644 --- a/jwst/ami/leastsqnrm.py +++ b/jwst/ami/leastsqnrm.py @@ -13,8 +13,6 @@ def flip(holearray): """ - Short Summary - ------------- Change sign of 2nd coordinate of holes Parameters @@ -36,8 +34,6 @@ def flip(holearray): def rotatevectors(vectors, thetarad): """ - Short Summary - ------------- Rotate vectors by specified angle Parameters @@ -57,8 +53,9 @@ def rotatevectors(vectors, thetarad): c, s = (np.cos(thetarad), np.sin(thetarad)) ctrs_rotated = [] for vector in vectors: - ctrs_rotated.append([c * vector[0] - s * vector[1], - s * vector[0] + c * vector[1]]) + ctrs_rotated.append( + [c * vector[0] - s * vector[1], s * vector[0] + c * vector[1]] + ) rot_vectors = np.array(ctrs_rotated) @@ -67,8 +64,6 @@ def rotatevectors(vectors, thetarad): def mas2rad(mas): """ - Short Summary - ------------- Convert angle in milli arc-sec to radians Parameters @@ -82,14 +77,12 @@ def mas2rad(mas): angle in radians """ - rad = mas * (10**(-3)) / (3600 * 180 / np.pi) + rad = mas * (10 ** (-3)) / (3600 * 180 / np.pi) return rad def rad2mas(rad): """ - Short Summary - ------------- Convert input angle in radians to milli arc sec Parameters @@ -102,15 +95,13 @@ def rad2mas(rad): mas: float input angle in milli arc sec """ - mas = rad * (3600. * 180 / np.pi) * 10.**3 + mas = rad * (3600.0 * 180 / np.pi) * 10.0**3 return mas def sin2deltapistons(coeffs): """ - Short Summary - ------------- Each baseline has one sine and one cosine fringe with a coefficient that depends on the piston difference between the two holes that make the baseline. For a 7-hole mask there are 21 baselines and therefore there @@ -138,8 +129,6 @@ def sin2deltapistons(coeffs): def cos2deltapistons(coeffs): """ - Short Summary - ------------- Each baseline has one sine and one cosine fringe with a coefficient that depends on the piston difference between the two holes that make the baseline. For a 7-hole mask there are 21 baselines and therefore there @@ -171,8 +160,6 @@ def cos2deltapistons(coeffs): def replacenan(array): """ - Short Summary - ------------- Replace singularities encountered in the analytical hexagon Fourier transform with the analytically derived limits. @@ -194,8 +181,6 @@ def replacenan(array): def primarybeam(kx, ky): """ - Short Summary - ------------- Calculate the envelope intensity for circular holes & monochromatic light Parameters @@ -208,9 +193,14 @@ def primarybeam(kx, ky): env_int: 2D float array envelope intensity for circular holes & monochromatic light """ - R = (primarybeam.d / primarybeam.lam) * primarybeam.pitch * \ - np.sqrt((kx - primarybeam.offx) * (kx - primarybeam.offx) + - (ky - primarybeam.offy) * (ky - primarybeam.offy)) + R = ( + (primarybeam.d / primarybeam.lam) + * primarybeam.pitch + * np.sqrt( + (kx - primarybeam.offx) * (kx - primarybeam.offx) + + (ky - primarybeam.offy) * (ky - primarybeam.offy) + ) + ) pb = replacenan(jv(1, np.pi * R) / (2.0 * R)) pb = pb.transpose() @@ -222,8 +212,6 @@ def primarybeam(kx, ky): def hexpb(): """ - Short Summary - ------------- Calculate the primary beam for hexagonal holes. Parameters @@ -235,16 +223,19 @@ def hexpb(): pb * pb.conj(): 2D float array primary beam for hexagonal holes """ - pb = hexee.hex_eeAG(s=hexpb.size, c=(hexpb.offx, hexpb.offy), - d=hexpb.d, lam=hexpb.lam, pitch=hexpb.pitch) + pb = hexee.hex_eeAG( + s=hexpb.size, + c=(hexpb.offx, hexpb.offy), + d=hexpb.d, + lam=hexpb.lam, + pitch=hexpb.pitch, + ) return pb * pb.conj() def ffc(kx, ky): """ - Short Summary - ------------- Calculate cosine terms of analytic model. Parameters @@ -257,16 +248,21 @@ def ffc(kx, ky): cos_array: 2D float array cosine terms of analytic model """ - cos_array = 2 * np.cos(2 * np.pi * ffc.pitch * - ((kx - ffc.offx) * (ffc.ri[0] - ffc.rj[0]) + - (ky - ffc.offy) * (ffc.ri[1] - ffc.rj[1])) / ffc.lam) + cos_array = 2 * np.cos( + 2 + * np.pi + * ffc.pitch + * ( + (kx - ffc.offx) * (ffc.ri[0] - ffc.rj[0]) + + (ky - ffc.offy) * (ffc.ri[1] - ffc.rj[1]) + ) + / ffc.lam + ) return cos_array def ffs(kx, ky): """ - Short Summary - ------------- Calculate sine terms of analytic model. Parameters @@ -279,18 +275,24 @@ def ffs(kx, ky): sin_array: 2D float array sine terms of analytic model """ - sin_array = -2 * np.sin(2 * np.pi * ffs.pitch * - ((kx - ffs.offx) * (ffs.ri[0] - ffs.rj[0]) + - (ky - ffs.offy) * (ffs.ri[1] - ffs.rj[1])) / ffs.lam) + sin_array = -2 * np.sin( + 2 + * np.pi + * ffs.pitch + * ( + (kx - ffs.offx) * (ffs.ri[0] - ffs.rj[0]) + + (ky - ffs.offy) * (ffs.ri[1] - ffs.rj[1]) + ) + / ffs.lam + ) return sin_array -def model_array(ctrs, lam, oversample, pitch, fov, d, - centering='PIXELCENTERED', shape='circ'): +def model_array( + ctrs, lam, oversample, pitch, fov, d, centering="PIXELCENTERED", shape="circ" +): """ - Short Summary - ------------- Create a model using the specified wavelength. Parameters @@ -331,20 +333,20 @@ def model_array(ctrs, lam, oversample, pitch, fov, d, ffmodel: list of 3 2D float arrays model array """ - if centering == 'PIXELCORNER': + if centering == "PIXELCORNER": off = np.array([0.0, 0.0]) - elif centering == 'PIXELCENTERED': + elif centering == "PIXELCENTERED": off = np.array([0.5, 0.5]) else: off = centering - log.debug('------------------') - log.debug('Model Parameters:') - log.debug('------------------') - log.debug('pitch:%s fov:%s oversampling:%s ', pitch, fov, oversample) - log.debug('centers:%s', ctrs) - log.debug('wavelength:%s centering:%s off:%s ', lam, centering, off) - log.debug('shape:%s d:%s ', shape, d) + log.debug("------------------") + log.debug("Model Parameters:") + log.debug("------------------") + log.debug("pitch:%s fov:%s oversampling:%s ", pitch, fov, oversample) + log.debug("centers:%s", ctrs) + log.debug("wavelength:%s centering:%s off:%s ", lam, centering, off) + log.debug("shape:%s d:%s ", shape, d) # primary beam parameters: primarybeam.shape = shape @@ -400,23 +402,33 @@ def model_array(ctrs, lam, oversample, pitch, fov, d, ffmodel.append(np.transpose(np.fromfunction(ffc, ffc.size))) ffmodel.append(np.transpose(np.fromfunction(ffs, ffs.size))) - if shape == 'circ': # if unspecified (default), or specified as 'circ' + if shape == "circ": # if unspecified (default), or specified as 'circ' return np.fromfunction(primarybeam, ffc.size), ffmodel - elif shape == 'hex': + elif shape == "hex": return hexpb(), ffmodel else: - log.critical('Must provide a valid hole shape. Current supported shapes \ - are circ and hex.') + log.critical( + "Must provide a valid hole shape. Current supported shapes \ + are circ and hex." + ) return None -def weighted_operations(img, model, weights): +def weighted_operations(img, model, dqm=None): """ - Short Summary - ------------- Performs least squares matrix operations to solve A x = b, where A is the model, b is the data (image), and x is the coefficient vector we are solving - for. In 2-D, data x = inv(At.A).(At.b) + for. + + Here we are weighting data by Poisson variance: + x = inv(At.W.A).(At.W.b) + where W is a diagonal matrix of weights w_i, + weighting each data point i by the inverse of its variance: + w_i = 1 / sigma_i^2 + For photon noise, the data, i.e. the image values b_i have variance + proportional to b_i with an e.g. ADU to electrons conversion factor. + If this factor is the same for all pixels, we do not need to include + it here. Parameters ---------- @@ -426,8 +438,8 @@ def weighted_operations(img, model, weights): model: 2D float array analytic model - weights: 2D float array - values of weights + dqm: 2D bool array + bad pixel mask Returns ------- @@ -436,48 +448,65 @@ def weighted_operations(img, model, weights): res: 2D float array residual; difference between model and fit + + Notes + ----- + Use matrix_operations() for equal weighting of data. """ - clist = weights.reshape(weights.shape[0] * weights.shape[1])**2 + + # Remove not-to-be-fit data from the flattened "img" data vector flatimg = img.reshape(np.shape(img)[0] * np.shape(img)[1]) - nanlist = np.where(np.isnan(flatimg)) - flatimg = np.delete(flatimg, nanlist) - clist = np.delete(clist, nanlist) - # A - flatmodel_nan = model.reshape(np.shape(model)[0] * np.shape(model)[1], - np.shape(model)[2]) - flatmodel = np.zeros((len(flatimg), np.shape(model)[2])) + flatdqm = dqm.reshape(np.shape(img)[0] * np.shape(img)[1]) + if dqm is not None: + nanlist = np.where(flatdqm) # where DO_NOT_USE up. + else: + nanlist = (np.array(()),) # shouldn't occur w/MAST JWST data + + # see original linearfit https://github.com/agreenbaum/ImPlaneIA: + # agreenbaum committed on May 21, 2017 1 parent 3e0fb8b + # commit bf02eb52c5813cb5d77036174a7caba703f9d366 + # + flatimg = np.delete(flatimg, nanlist) # DATA values + + # photon noise variance - proportional to ADU + # (for roughly uniform adu2electron factor) + variance = np.abs(flatimg) + # this resets the weights of pixels with negative or unity values to zero + # we ignore data with unity or lower values - weight it not-at-all.. + weights = np.where(flatimg <= 1.0, 0.0, 1.0 / np.sqrt(variance)) # anand 2022 Jan + + log.debug(f"{len(nanlist[0]):d} bad pixels skipped in weighted fringefitter") + + # A - but delete all pixels flagged by dq array + flatmodel_nan = model.reshape( + np.shape(model)[0] * np.shape(model)[1], np.shape(model)[2] + ) + flatmodel = np.zeros((len(flatimg), np.shape(model)[2])) for fringe in range(np.shape(model)[2]): flatmodel[:, fringe] = np.delete(flatmodel_nan[:, fringe], nanlist) - # At (A transpose) - flatmodeltransp = flatmodel.transpose() - # At.C.A (makes square matrix) - CdotA = flatmodel.copy() + # A.w + Aw = flatmodel * weights[:, np.newaxis] + bw = flatimg * weights + # resids are pixel value residuals, flattened to 1d vector + x, rss, rank, singvals = np.linalg.lstsq(Aw, bw) - for i in range(flatmodel.shape[1]): - CdotA[:, i] = clist * flatmodel[:, i] - - modelproduct = np.dot(flatmodeltransp, CdotA) - # At.C.b - Cdotb = clist * flatimg - data_vector = np.dot(flatmodeltransp, Cdotb) - # inv(At.C.A) - inverse = linalg.inv(modelproduct) + # actual residuals in image: + res = flatimg - np.dot(flatmodel, x) - x = np.dot(inverse, data_vector) - res = np.dot(flatmodel, x) - flatimg + # put bad pixels back naninsert = nanlist[0] - np.arange(len(nanlist[0])) + # calculate residuals with fixed but unused bad pixels as nans res = np.insert(res, naninsert, np.nan) res = res.reshape(img.shape[0], img.shape[1]) - return x, res + cond = None + return x, res, cond, singvals # no condition number yet... -def matrix_operations(img, model, flux=None, linfit=False): +def matrix_operations(img, model, flux=None, linfit=False, dqm=None): """ - Short Summary - ------------- Use least squares matrix operations to solve A x = b, where A is the model, b is the data (img), and x is the coefficient vector we are solving for. In 2-D, data x = inv(At.A).(At.b). If a flux is given, it will be used it @@ -494,6 +523,9 @@ def matrix_operations(img, model, flux=None, linfit=False): flux: float normalization factor + dqm: 2D bool array + bad pixel mask slice + Returns ------- x: 1D float array @@ -506,25 +538,51 @@ def matrix_operations(img, model, flux=None, linfit=False): condition number of the inverse of the product of model and its transpose """ + flatimg = img.reshape(np.shape(img)[0] * np.shape(img)[1]) - nanlist = np.where(np.isnan(flatimg)) + flatdqm = dqm.reshape(np.shape(img)[0] * np.shape(img)[1]) + log.info("fringefitting.leastsqnrm.matrix_operations(): ", end="") + log.info(f"\n\timg {img.shape:} \n\tdqm {dqm.shape:}", end="") + log.info( + f"\n\tL x W = {img.shape[0]:d} x {img.shape[1]:d} = {img.shape[0] * img.shape[1]:d}", + end="", + ) + log.info(f"\n\tflatimg {flatimg.shape:}", end="") + log.info(f"\n\tflatdqm {flatdqm.shape:}", end="") + + + log.info("\n\ttype(dqm)", type(dqm), end="") + if dqm is not None: + nanlist = np.where(flatdqm) # where DO_NOT_USE up. + else: + nanlist = (np.array(()),) # shouldn't occur w/MAST JWST data + + log.info(f"\n\ttype(nanlist) {type(nanlist):}, len={len(nanlist):}", end="") + log.info(f"\n\tnumber of nanlist pixels: {len(nanlist[0]):d} items", end="") + log.info(f"\n\t{len(nanlist[0]):d} DO_NOT_USE pixels found in data slice", end="") + flatimg = np.delete(flatimg, nanlist) + log.info(f"\n\tflatimg {flatimg.shape:} after deleting {len(nanlist[0]):d}", end="") + if flux is not None: flatimg = flux * flatimg / flatimg.sum() # A - flatmodel_nan = model.reshape(np.shape(model)[0] * np.shape(model)[1], - np.shape(model)[2]) - + flatmodel_nan = model.reshape( + np.shape(model)[0] * np.shape(model)[1], np.shape(model)[2] + ) flatmodel = np.zeros((len(flatimg), np.shape(model)[2])) - - log.debug('Matrix_opers - flat model dimensions: %s', np.shape(flatmodel)) - log.debug('Matrix_opers - flat image dimensions: %s', np.shape(flatimg)) + log.info(f"\n\tflatmodel_nan {flatmodel_nan.shape:}", end="") + log.info(f"\n\tflatmodel {flatmodel.shape:}", end="") + log.info( + f"\n\tdifference {flatmodel_nan.shape[0] - flatmodel.shape[0]:}", end="" + ) + log.info("flat model dimensions ", np.shape(flatmodel)) + log.info("flat image dimensions ", np.shape(flatimg)) for fringe in range(np.shape(model)[2]): flatmodel[:, fringe] = np.delete(flatmodel_nan[:, fringe], nanlist) - # At (A transpose) flatmodeltransp = flatmodel.transpose() # At.A (makes square matrix) @@ -536,24 +594,26 @@ def matrix_operations(img, model, flux=None, linfit=False): cond = np.linalg.cond(inverse) x = np.dot(inverse, data_vector) - res = np.dot(flatmodel, x) - flatimg + res = flatimg - np.dot(flatmodel, x) + + # put bad pixels back naninsert = nanlist[0] - np.arange(len(nanlist[0])) + # calculate residuals with fixed but unused bad pixels as nans res = np.insert(res, naninsert, np.nan) res = res.reshape(img.shape[0], img.shape[1]) - log.debug('------------------') - log.debug('Matrix Operations:') - log.debug('------------------') - log.debug('model flux:%s data flux:%s flat model dimensions:%s ', flux, - flatimg.sum(), np.shape(flatmodel)) - log.debug('flat image dimensions:%s model transpose dimensions:%s ', - np.shape(flatimg), np.shape(flatmodeltransp)) - log.debug('transpose * image data dimensions:%s flatimg * transpose' + - 'dimensions:%s ', np.shape(data_vector), np.shape(inverse)) + log.info("model flux", flux) + log.info("data flux", flatimg.sum()) + log.info("flat model dimensions ", np.shape(flatmodel)) + log.info("model transpose dimensions ", np.shape(flatmodeltransp)) + log.info("flat image dimensions ", np.shape(flatimg)) + log.info("transpose * image data dimensions", np.shape(data_vector)) + log.info("flat img * transpose dimensions", np.shape(inverse)) if linfit: try: from linearfit import linearfit + # dependent variables M = np.mat(flatimg) @@ -579,19 +639,20 @@ def matrix_operations(img, model, flux=None, linfit=False): result.inverse_covariance_matrix = [] linfit_result = result + log.info("Returned linearfit result") except ImportError: linfit_result = None + log.info("linearfit module not imported, no covariances saved.") else: linfit_result = None + log.info("linearfit not attempted, no covariances saved.") return x, res, cond, linfit_result def multiplyenv(env, fringeterms): """ - Short Summary - ------------- Multiply the envelope by each fringe 'image'. Parameters @@ -609,33 +670,25 @@ def multiplyenv(env, fringeterms): """ # The envelope has size (fov, fov). This multiplies the envelope by each # of the 43 slices in the fringe model - full = np.ones((np.shape(fringeterms)[1], np.shape(fringeterms)[2], - np.shape(fringeterms)[0] + 1)) + full = np.ones( + ( + np.shape(fringeterms)[1], + np.shape(fringeterms)[2], + np.shape(fringeterms)[0] + 1, + ) + ) for i, val in enumerate(fringeterms): full[:, :, i] = env * fringeterms[i] - log.debug('Total number of fringe terms: %s', len(fringeterms) - 1) + log.debug("Total number of fringe terms: %s", len(fringeterms) - 1) return full def tan2visibilities(coeffs): """ - Long Summary - ------------ - Technically the fit measures phase AND amplitude, so to retrieve the - phase we need to consider both sin and cos terms. Consider one fringe: - A { cos(kx)cos(dphi) + sin(kx)sin(dphi) } = - A(a cos(kx) + b sin(kx)), where a = cos(dphi) and b = sin(dphi) - and A is the fringe amplitude, therefore coupling a and b. - In practice we measure A*a and A*b from the coefficients, so: - Ab/Aa = b/a = tan(dphi) - call a' = A*a and b' = A*b (we actually measure a', b') - (A*sin(dphi))^2 + (A*cos(dphi)^2) = A^2 = a'^2 + b'^2 - Short Summary - ------------- From the solution to the fit, calculate the fringe amplitude and phase. Parameters @@ -646,15 +699,31 @@ def tan2visibilities(coeffs): ------- amp, delta: 1D float array, 1D float array fringe amplitude & phase + + Notes + ----- + Technically the fit measures phase AND amplitude, so to retrieve the + phase we need to consider both sin and cos terms. Consider one fringe: + A { cos(kx)cos(dphi) + sin(kx)sin(dphi) } = + A(a cos(kx) + b sin(kx)), where a = cos(dphi) and b = sin(dphi) + and A is the fringe amplitude, therefore coupling a and b. + In practice we measure A*a and A*b from the coefficients, so: + Ab/Aa = b/a = tan(dphi) + call a' = A*a and b' = A*b (we actually measure a', b') + (A*sin(dphi))^2 + (A*cos(dphi)^2) = A^2 = a'^2 + b'^2 + + """ delta = np.zeros(int((len(coeffs) - 1) / 2)) amp = np.zeros(int((len(coeffs) - 1) / 2)) for q in range(int((len(coeffs) - 1) / 2)): - delta[q] = (np.arctan2(coeffs[2 * q + 2], coeffs[2 * q + 1])) - amp[q] = np.sqrt(coeffs[2 * q + 2]**2 + coeffs[2 * q + 1]**2) + delta[q] = np.arctan2(coeffs[2 * q + 2], coeffs[2 * q + 1]) + amp[q] = np.sqrt(coeffs[2 * q + 2] ** 2 + coeffs[2 * q + 1] ** 2) - log.debug(f"tan2visibilities: shape coeffs:{np.shape(coeffs)} " - f"shape delta:{np.shape(delta)}") + log.debug( + f"tan2visibilities: shape coeffs:{np.shape(coeffs)} " + f"shape delta:{np.shape(delta)}" + ) # returns fringe amplitude & phase return amp, delta @@ -662,8 +731,6 @@ def tan2visibilities(coeffs): def populate_antisymmphasearray(deltaps, n=7): """ - Short Summary - ------------- Populate the antisymmetric fringe phase array: fringephasearray[0,q+1:] = coeffs[0:6] @@ -703,8 +770,6 @@ def populate_antisymmphasearray(deltaps, n=7): def populate_symmamparray(amps, n=7): """ - Short Summary - ------------- Populate the symmetric fringe amplitude array Parameters @@ -737,8 +802,6 @@ def populate_symmamparray(amps, n=7): def redundant_cps(deltaps, n=7): """ - Short Summary - ------------- Calculate closure phases for each set of 3 holes Parameters @@ -762,9 +825,11 @@ def redundant_cps(deltaps, n=7): for kk in range(n - 2): for ii in range(n - kk - 2): for jj in range(n - kk - ii - 2): - cps[nn + jj] = arr[kk, ii + kk + 1] \ - + arr[ii + kk + 1, jj + ii + kk + 2] \ + cps[nn + jj] = ( + arr[kk, ii + kk + 1] + + arr[ii + kk + 1, jj + ii + kk + 2] + arr[jj + ii + kk + 2, kk] + ) nn += jj + 1 @@ -773,8 +838,6 @@ def redundant_cps(deltaps, n=7): def closurephase(deltap, n=7): """ - Short Summary - ------------- Calculate closure phases between each pair of holes Parameters @@ -792,30 +855,49 @@ def closurephase(deltap, n=7): """ # p is a triangular matrix set up to calculate closure phases if n == 7: - p = np.array([deltap[:6], deltap[6:11], deltap[11:15], - deltap[15:18], deltap[18:20], deltap[20:]], dtype=object) + p = np.array( + [ + deltap[:6], + deltap[6:11], + deltap[11:15], + deltap[15:18], + deltap[18:20], + deltap[20:], + ], + dtype=object, + ) elif n == 10: - p = np.array([deltap[:9], deltap[9:17], deltap[17:24], - deltap[24:30], deltap[30:35], deltap[35:39], - deltap[39:42], deltap[42:44], deltap[44:]], dtype=object) + p = np.array( + [ + deltap[:9], + deltap[9:17], + deltap[17:24], + deltap[24:30], + deltap[30:35], + deltap[35:39], + deltap[39:42], + deltap[42:44], + deltap[44:], + ], + dtype=object, + ) else: - log.critical('invalid hole number: %s', n) + log.critical("invalid hole number: %s", n) # calculates closure phases for general N-hole mask (with p-array set # up properly above) cps = np.zeros((n - 1) * (n - 2) // 2) for j1 in range(n - 2): for j2 in range(n - 2 - j1): - cps[int(j1 * ((n + (n - 3) - j1) / 2.0)) + j2] = \ + cps[int(j1 * ((n + (n - 3) - j1) / 2.0)) + j2] = ( p[j1][0] + p[j1 + 1][j2] - p[j1][j2 + 1] + ) return cps def closure_amplitudes(amps, n=7): """ - Short Summary - ------------- Calculate closure amplitudes Parameters @@ -840,10 +922,14 @@ def closure_amplitudes(amps, n=7): for jj in range(n - ii - 3): for kk in range(n - jj - ii - 3): for ll in range(n - jj - ii - kk - 3): - cas[nn + ll] = arr[ii, jj + ii + 1] \ - * arr[ll + ii + jj + kk + 3, kk + jj + ii + 2] \ - / (arr[ii, kk + ii + jj + 2] * - arr[jj + ii + 1, ll + ii + jj + kk + 3]) + cas[nn + ll] = ( + arr[ii, jj + ii + 1] + * arr[ll + ii + jj + kk + 3, kk + jj + ii + 2] + / ( + arr[ii, kk + ii + jj + 2] + * arr[jj + ii + 1, ll + ii + jj + kk + 3] + ) + ) nn = nn + ll + 1 return cas diff --git a/jwst/ami/lg_model.py b/jwst/ami/lg_model.py index 954b2f8a5b..9c475d3e36 100755 --- a/jwst/ami/lg_model.py +++ b/jwst/ami/lg_model.py @@ -2,24 +2,17 @@ # Lacour-Greenbaum algorithm. First written by Alexandra Greenbaum in 2014. import logging import numpy as np -from astropy.io import fits from . import leastsqnrm as leastsqnrm from . import analyticnrm2 from . import utils +from . import mask_definitions log = logging.getLogger(__name__) log.addHandler(logging.NullHandler()) log.setLevel(logging.DEBUG) -# define phi at the center of F430M band: -phi_nb = np.array([0.028838669455909766, -0.061516214504502634, - 0.12390958557781348, -0.020389361461019516, - 0.016557347248600723, -0.03960017912525625, - -0.04779984719154552]) # phi in waves -phi_nb = phi_nb * 4.3e-6 # phi_nb in m - m = 1.0 mm = 1.0e-3 * m um = 1.0e-6 * m @@ -27,7 +20,7 @@ class NrmModel: - ''' + """ A class for conveniently dealing with an "NRM object" This should be able to take an NRM_mask_definitions object for mask geometry. Defines mask geometry and detector-scale parameters. @@ -37,14 +30,23 @@ class NrmModel: Masks: gpi_g10s40, jwst, visir Algorithm documented in Greenbaum, A. Z., Pueyo, L. P., Sivaramakrishnan, A., and Lacour, S., Astrophysical Journal vol. 798, Jan 2015. - ''' + """ - def __init__(self, mask=None, holeshape="circ", pixscale=None, - over=1, pixweight=None, datapath="", phi=None, - refdir="", chooseholes=False, affine2d=None, **kwargs): + def __init__( + self, + mask=None, + holeshape="circ", + pixscale=None, + over=1, + pixweight=None, + datapath="", + phi=None, + refdir="", + chooseholes=False, + affine2d=None, + **kwargs, + ): """ - Short Summary - ------------- Set attributes of NrmModel class. Parameters @@ -89,80 +91,30 @@ def __init__(self, mask=None, holeshape="circ", pixscale=None, self.over = over self.pixweight = pixweight - mask = "jwst" - self.maskname = mask - - holedict = {} - # Assemble holes by actual open segment names. Either the full mask or - # the subset-of-holes mask will be V2-reversed after the as_designed - # centers are installed in the object. - allholes = ('b4', 'c2', 'b5', 'b2', 'c1', 'b6', 'c6') - holedict['b4'] = [0.00000000, -2.640000] # B4 -> B4 - holedict['c2'] = [-2.2863100, 0.0000000] # C5 -> C2 - holedict['b5'] = [2.2863100, -1.3200001] # B3 -> B5 - holedict['b2'] = [-2.2863100, 1.3200001] # B6 -> B2 - holedict['c1'] = [-1.1431500, 1.9800000] # C6 -> C1 - holedict['b6'] = [2.2863100, 1.3200001] # B2 -> B6 - holedict['c6'] = [1.1431500, 1.9800000] # C1 -> C6 - - if mask.lower() == 'jwst': - if chooseholes is not False: - # holes B4 B5 C6 asbuilt for orientation testing - holelist = [] - for h in allholes: - if h in chooseholes: - holelist.append(holedict[h]) - self.ctrs_asdesigned = np.array(holelist) - else: - # the REAL THING - as_designed 7 hole, m in PM space, - # no distortion, shape (7,2) - # below: as-designed -> as-built mapping - self.ctrs_asdesigned = np.array([ - [0.00000000, -2.640000], # B4 -> B4 - [-2.2863100, 0.0000000], # C5 -> C2 - [2.2863100, -1.3200001], # B3 -> B5 - [-2.2863100, 1.3200001], # B6 -> B2 - [-1.1431500, 1.9800000], # C6 -> C1 - [2.2863100, 1.3200001], # B2 -> B6 - [1.1431500, 1.9800000]]) # C1 -> C6 - - self.d = 0.82 * m - self.D = 6.5 * m - else: - self.ctrs, self.d, self.D = np.array(mask.ctrs), mask.hdia, \ - mask.activeD - - if mask.lower() == 'jwst': - """ - Preserve ctrs.as_designed (treat as immutable) - Reverse V2 axis coordinates to close C5 open C2, and others - follow suit... - Preserve ctrs.as_built (treat as immutable) - """ - self.ctrs_asbuilt = self.ctrs_asdesigned.copy() - - # create 'live' hole centers in an ideal, orthogonal undistorted - # xy pupil space, - # eg maps open hole C5 in as_designed to C2 as_built, eg C4 - # unaffected.... - self.ctrs_asbuilt[:, 0] *= -1 - - # LG++ rotate hole centers by 90 deg to match MAST o/p DMS PSF with - # no affine2d transformations 8/2018 AS - # LG++ The above aligns the hole pattern with the hex analytic FT, - # flat top & bottom as seen in DMS data. 8/2018 AS - # overwrites attributes: - self.ctrs_asbuilt = utils.rotate2dccw(self.ctrs_asbuilt, np.pi / 2.0) - - # create 'live' hole centers in an ideal, orthogonal undistorted xy - # pupil space, - self.ctrs = self.ctrs_asbuilt.copy() + # mask = "jwst" + # self.maskname = mask + + # get these from mask_definitions instead + if mask is None: + log.info("No mask name specified for model, using jwst_g7s6c") + mask = mask_definitions.NRM_mask_definitions( + maskname="jwst_g7s6c", chooseholes=chooseholes, holeshape="hex" + ) + elif isinstance(mask, str): + mask = mask_definitions.NRM_mask_definitions( + maskname=mask, chooseholes=chooseholes, holeshape="hex" + ) + self.ctrs = mask.ctrs + self.d = mask.hdia + self.D = mask.activeD self.N = len(self.ctrs) self.datapath = datapath self.refdir = refdir self.fmt = "%10.4e" + # get latest OPD from WebbPSF? + if phi: # meters of OPD at central wavelength if phi == "perfect": self.phi = np.zeros(self.N) # backwards compatibility @@ -180,16 +132,14 @@ def __init__(self, mask=None, holeshape="circ", pixscale=None, # We apply this when sampling the PSF, not to the pupil geometry. if affine2d is None: - self.affine2d = utils.Affine2d(mx=1.0, my=1.0, - sx=0.0, sy=0.0, - xo=0.0, yo=0.0, name="Ideal") + self.affine2d = utils.Affine2d( + mx=1.0, my=1.0, sx=0.0, sy=0.0, xo=0.0, yo=0.0, name="Ideal" + ) else: self.affine2d = affine2d def simulate(self, fov=None, bandpass=None, over=None, psf_offset=(0, 0)): - ''' - Short Summary - ------------- + """ Simulate a detector-scale psf using parameters input from the call and already stored in the object,and generate a simulation fits header storing all of the parameters used to generate that psf. If the input @@ -213,35 +163,31 @@ def simulate(self, fov=None, bandpass=None, over=None, psf_offset=(0, 0)): ------- Object's 'psf': float 2D array simulated psf - ''' - self.simhdr = fits.PrimaryHDU().header + """ # First set up conditions for choosing various parameters self.bandpass = bandpass if over is None: over = 1 # ? Always comes in as integer. - self.simhdr['OVER'] = (over, 'sim pix = det pix/over') - self.simhdr['PIX_OV'] = (self.pixel / float(over), - 'Sim pixel scale in radians') - self.psf_over = np.zeros((over * fov, over * fov)) nspec = 0 # accumulate polychromatic oversampled psf in the object for w, l in bandpass: # w: wavelength's weight, l: lambda (wavelength) - self.psf_over += w * analyticnrm2.psf(self.pixel, # det pixel, rad - fov, # in detpix number - over, - self.ctrs, - self.d, l, - self.phi, - psf_offset, # det pixels - self.affine2d, - shape=self.holeshape) + self.psf_over += w * analyticnrm2.psf( + self.pixel, # det pixel, rad + fov, # in detpix number + over, + self.ctrs, + self.d, + l, + self.phi, + psf_offset, # det pixels + self.affine2d, + shape=self.holeshape, + ) # offset signs fixed to agree w/DS9, +x shifts ctr R, +y shifts up - self.simhdr["WAVL{0}".format(nspec)] = (l, "wavelength (m)") - self.simhdr["WGHT{0}".format(nspec)] = (w, "weight") nspec += 1 # store the detector pixel scale psf in the object @@ -249,11 +195,10 @@ def simulate(self, fov=None, bandpass=None, over=None, psf_offset=(0, 0)): return self.psf - def make_model(self, fov=None, bandpass=None, over=1, psf_offset=(0, 0), - pixscale=None): + def make_model( + self, fov=None, bandpass=None, over=1, psf_offset=(0, 0), pixscale=None + ): """ - Short Summary - ------------- Generates the fringe model with the attributes of the object using a bandpass that is either a single wavelength or a list of tuples of the form [(weight1, wavl1), (weight2, wavl2),...]. The model is @@ -288,7 +233,7 @@ def make_model(self, fov=None, bandpass=None, over=1, psf_offset=(0, 0), self.over = over - if hasattr(self, 'pixscale_measured'): + if hasattr(self, "pixscale_measured"): if self.pixscale_measured is not None: self.modelpix = self.pixscale_measured @@ -306,44 +251,57 @@ def make_model(self, fov=None, bandpass=None, over=1, psf_offset=(0, 0), self.model = np.zeros((self.fov, self.fov, self.N * (self.N - 1) + 2)) self.model_beam = np.zeros((self.over * self.fov, self.over * self.fov)) - self.fringes = np.zeros((self.N * (self.N - 1) + 1, self.over * self.fov, - self.over * self.fov)) + self.fringes = np.zeros( + (self.N * (self.N - 1) + 1, self.over * self.fov, self.over * self.fov) + ) for w, l in bandpass: # w: weight, l: lambda (wavelength) # model_array returns the envelope and fringe model as a list of # oversampled fov x fov slices - pb, ff = analyticnrm2.model_array(self.modelctrs, l, self.over, - self.modelpix, self.fov, self.d, - shape=self.holeshape, - psf_offset=psf_offset, - affine2d=self.affine2d) - - log.debug("Passed to model_array: psf_offset: {0}". - format(psf_offset)) - log.debug("Primary beam in the model created: {0}". - format(pb)) + pb, ff = analyticnrm2.model_array( + self.modelctrs, + l, + self.over, + self.modelpix, + self.fov, + self.d, + shape=self.holeshape, + psf_offset=psf_offset, + affine2d=self.affine2d, + ) + + log.debug("Passed to model_array: psf_offset: {0}".format(psf_offset)) + log.debug("Primary beam in the model created: {0}".format(pb)) self.model_beam += pb self.fringes += ff # this routine multiplies the envelope by each fringe "image" self.model_over = leastsqnrm.multiplyenv(pb, ff) - model_binned = np.zeros((self.fov, self.fov, - self.model_over.shape[2])) + model_binned = np.zeros((self.fov, self.fov, self.model_over.shape[2])) # loop over slices "sl" in the model for sl in range(self.model_over.shape[2]): - model_binned[:, :, sl] = utils.rebin(self.model_over[:, :, sl], - (self.over, self.over)) + model_binned[:, :, sl] = utils.rebin( + self.model_over[:, :, sl], (self.over, self.over) + ) self.model += w * model_binned return self.model - def fit_image(self, image, reference=None, pixguess=None, rotguess=0, - psf_offset=(0, 0), modelin=None, savepsfs=False): + def fit_image( + self, + image, + reference=None, + pixguess=None, + rotguess=0, + psf_offset=(0, 0), + modelin=None, + savepsfs=False, + dqm=None, + weighted=False, + ): """ - Short Summary - ------------- Run a least-squares fit on an input image; find the appropriate wavelength scale and rotation. If a model is not specified then this method will find the appropriate wavelength scale, rotation (and @@ -379,11 +337,18 @@ def fit_image(self, image, reference=None, pixguess=None, rotguess=0, savepsfs: boolean save the psfs for writing to file (currently unused) + dqm: 2D array + bad pixel mask of same dimensions as image + + weighted: boolean + weight + Returns ------- None """ self.model_in = modelin + self.weighted = weighted self.saveval = savepsfs if modelin is None: # No model provided @@ -395,43 +360,47 @@ def fit_image(self, image, reference=None, pixguess=None, rotguess=0, if reference is None: self.reference = image if np.isnan(image.any()): - raise ValueError("Must have non-NaN image to " + - "crosscorrelate for scale. Reference " + - "image should also be centered.") + raise ValueError( + "Must have non-NaN image to " + + "crosscorrelate for scale. Reference " + + "image should also be centered." + ) else: self.reference = reference self.pixscale_measured = self.pixel self.fov = image.shape[0] - self.fittingmodel = self.make_model(self.fov, - bandpass=self.bandpass, - over=self.over, rotate=True, - psf_offset=self.bestcenter, - pixscale=self.pixel) + self.fittingmodel = self.make_model( + self.fov, + bandpass=self.bandpass, + over=self.over, + rotate=True, + psf_offset=self.bestcenter, + pixscale=self.pixel, + ) else: self.fittingmodel = modelin - - self.soln, self.residual, self.cond, \ - self.linfit_result = \ - leastsqnrm.matrix_operations(image, self.fittingmodel) + if self.weighted is False: + self.soln, self.residual, self.cond, self.linfit_result = ( + leastsqnrm.matrix_operations(image, self.fittingmodel, dqm=dqm) + ) + else: + self.soln, self.residual, self.cond, self.singvals = ( + leastsqnrm.weighted_operations(image, self.fittingmodel, dqm=dqm) + ) self.rawDC = self.soln[-1] self.flux = self.soln[0] self.soln = self.soln / self.soln[0] # fringephase now in radians - self.fringeamp, self.fringephase = \ - leastsqnrm.tan2visibilities(self.soln) - self.fringepistons = utils.fringes2pistons(self.fringephase, - len(self.ctrs)) - self.redundant_cps = leastsqnrm.redundant_cps(self.fringephase, - n=self.N) + self.fringeamp, self.fringephase = leastsqnrm.tan2visibilities(self.soln) + self.fringepistons = utils.fringes2pistons(self.fringephase, len(self.ctrs)) + self.redundant_cps = leastsqnrm.redundant_cps(self.fringephase, n=self.N) self.redundant_cas = leastsqnrm.closure_amplitudes(self.fringeamp, n=self.N) def create_modelpsf(self): """ - Short Summary - ------------- Make an image from the object's model and fit solutions, by setting the NrmModel object's modelpsf attribute @@ -453,11 +422,10 @@ def create_modelpsf(self): return None - def improve_scaling(self, img, scaleguess=None, rotstart=0.0, - centering='PIXELCENTERED'): + def improve_scaling( + self, img, scaleguess=None, rotstart=0.0, centering="PIXELCENTERED" + ): """ - Short Summary - ------------- Determine the scale and rotation that best fits the data. Correlations are calculated in the image plane, in anticipation of data with many bad pixels. @@ -487,7 +455,7 @@ def improve_scaling(self, img, scaleguess=None, rotstart=0.0, self.gof: float goodness of fit """ - if not hasattr(self, 'bandpass'): + if not hasattr(self, "bandpass"): raise ValueError("This obj has no specified bandpass/wavelength") reffov = img.shape[0] @@ -499,7 +467,7 @@ def improve_scaling(self, img, scaleguess=None, rotstart=0.0, # User can specify a reference set of phases (m) at an earlier point so # that all PSFs are simulated with those phase pistons (e.g. measured # from data at an earlier iteration - if not hasattr(self, 'refphi'): + if not hasattr(self, "refphi"): self.refphi = np.zeros(len(self.ctrs)) else: pass @@ -508,15 +476,20 @@ def improve_scaling(self, img, scaleguess=None, rotstart=0.0, for q, scal in enumerate(self.scallist): self.test_pixscale = self.pixel * scal self.pixscales[q] = self.test_pixscale - psf = self.simulate(bandpass=self.bandpass, fov=reffov, - pixel=self.test_pixscale, centering=centering) + psf = self.simulate( + bandpass=self.bandpass, + fov=reffov, + pixel=self.test_pixscale, + centering=centering, + ) pixscl_corrlist[q, :, :] = run_data_correlate(img, psf) self.pixscl_corr[q] = np.max(pixscl_corrlist[q]) if True in np.isnan(self.pixscl_corr): raise ValueError("Correlation produced NaNs, check your work!") self.pixscale_optimal, scal_maxy = utils.findmax( - mag=self.pixscales, vals=self.pixscl_corr) + mag=self.pixscales, vals=self.pixscl_corr + ) self.pixscale_factor = self.pixscale_optimal / self.pixel radlist = self.rotlist_rad @@ -525,18 +498,25 @@ def improve_scaling(self, img, scaleguess=None, rotstart=0.0, self.rots = radlist for q, rad in enumerate(radlist): - psf = self.simulate(bandpass=self.bandpass, fov=reffov, - pixel=self.pixscale_optimal, rotate=rad, - centering=centering) + psf = self.simulate( + bandpass=self.bandpass, + fov=reffov, + pixel=self.pixscale_optimal, + rotate=rad, + centering=centering, + ) corrlist[q, :, :] = run_data_correlate(psf, img) self.corrs[q] = np.max(corrlist[q]) self.rot_measured, maxy = utils.findmax(mag=self.rots, vals=self.corrs) - self.refpsf = self.simulate(bandpass=self.bandpass, - pixel=self.pixscale_factor * self.pixel, - fov=reffov, rotate=self.rot_measured, - centering=centering) + self.refpsf = self.simulate( + bandpass=self.bandpass, + pixel=self.pixscale_factor * self.pixel, + fov=reffov, + rotate=self.rot_measured, + centering=centering, + ) try: self.gof = goodness_of_fit(img, self.refpsf) @@ -547,8 +527,6 @@ def improve_scaling(self, img, scaleguess=None, rotstart=0.0, def set_pistons(self, phi_m): """ - Short Summary - ------------- Set piston's phi in meters of OPD at center wavelength LG++ Parameters @@ -565,8 +543,6 @@ def set_pistons(self, phi_m): def set_pixelscale(self, pixel_rad): """ - Short Summary - ------------- Set the detector pixel scale Parameters @@ -583,8 +559,6 @@ def set_pixelscale(self, pixel_rad): def goodness_of_fit(data, bestfit, diskR=8): """ - Short Summary - ------------- Calculate goodness of fit between the data and the fit. Parameters @@ -603,8 +577,11 @@ def goodness_of_fit(data, bestfit, diskR=8): gof: float goodness of fit """ - mask = np.ones(data.shape) + utils.makedisk(data.shape[0], 2) -\ - utils.makedisk(data.shape[0], diskR) + mask = ( + np.ones(data.shape) + + utils.makedisk(data.shape[0], 2) + - utils.makedisk(data.shape[0], diskR) + ) difference = np.ma.masked_invalid(mask * (bestfit - data)) @@ -617,8 +594,6 @@ def goodness_of_fit(data, bestfit, diskR=8): def run_data_correlate(data, model): """ - Short Summary - ------------- Calculate correlation between data and model Parameters @@ -636,7 +611,7 @@ def run_data_correlate(data, model): """ sci = data - log.debug('shape sci: %s', np.shape(sci)) + log.debug("shape sci: %s", np.shape(sci)) cor = utils.rcrosscorrelate(sci, model) diff --git a/jwst/ami/mask_definitions.py b/jwst/ami/mask_definitions.py index 5d7c052bfb..edf6936531 100755 --- a/jwst/ami/mask_definitions.py +++ b/jwst/ami/mask_definitions.py @@ -17,8 +17,6 @@ class NRM_mask_definitions(): def __init__(self, maskname=None, rotdeg=None, holeshape="circ", rescale=False, chooseholes=None): """ - Short Summary - ------------- Set attributes of NRM_mask_definitions class. Parameters @@ -67,8 +65,6 @@ def __init__(self, maskname=None, rotdeg=None, holeshape="circ", rescale=False, def showmask(self): """ - Short Summary - ------------- Calculate the diameter of the smallest centered circle (D) enclosing the live mask area @@ -89,8 +85,6 @@ def showmask(self): def jwst_g7s6_centers_asbuilt(chooseholes=None): # was jwst_g7s6_centers_asdesigned """ - Short Summary - ------------- Calculate hole centers with appropriate rotation Parameters @@ -100,7 +94,8 @@ def jwst_g7s6_centers_asbuilt(chooseholes=None): # was jwst_g7s6_centers_asdesi Returns ------- - Actual hole centers + ctrs_asbuilt: array + Actual hole centers [meters] """ holedict = {} # as_built names, C2 open, C5 closed, but as designed coordinates @@ -163,8 +158,7 @@ def jwst_g7s6_centers_asbuilt(chooseholes=None): # was jwst_g7s6_centers_asdesi def jwst_g7s6c(chooseholes=None): """ - Short Summary - ------------- + Calculate hole centers with appropriate rotation Parameters @@ -174,7 +168,10 @@ def jwst_g7s6c(chooseholes=None): Returns ------- - ?, Actual hole centers + f2f: float + flat-to-flat distance of mask holes + ctrs_asbuilt: array + Actual hole centers [meters] """ diff --git a/jwst/ami/matrix_dft.py b/jwst/ami/matrix_dft.py index dbbae28984..63ab8074b3 100644 --- a/jwst/ami/matrix_dft.py +++ b/jwst/ami/matrix_dft.py @@ -54,8 +54,6 @@ def matrix_dft(plane, nlamD, npix, offset=None, inverse=False, centering=FFTSTYLE): """ - Summary - ------- Perform a matrix discrete Fourier transform with selectable output sampling and centering. Where parameters can be supplied as either scalars or 2-tuples, the first element of the 2-tuple is used for the diff --git a/jwst/ami/nrm_core.py b/jwst/ami/nrm_core.py index 1a47ae3149..b65ce3059d 100755 --- a/jwst/ami/nrm_core.py +++ b/jwst/ami/nrm_core.py @@ -3,17 +3,13 @@ # Module for fringe fitting class # Original python was by A. Greenbaum & A. Sivaramakrishnan -import os import logging - import numpy as np -from scipy.special import comb -from scipy.stats import mstats - -from stdatamodels.jwst import datamodels - from . import lg_model from . import utils +from . import oifits + +from stdatamodels.jwst import datamodels log = logging.getLogger(__name__) log.setLevel(logging.DEBUG) @@ -22,8 +18,6 @@ class FringeFitter: def __init__(self, instrument_data, **kwargs): """ - Short Summary - ------------- For the given information on the instrument and mask, calculate the fringe observables (visibilities and closure phases in the image plane. @@ -63,13 +57,20 @@ def __init__(self, instrument_data, **kwargs): self.npix = kwargs["npix"] else: self.npix = 'default' + # Default: unweighted fit + self.weighted = False + if "weighted" in kwargs: + self.weighted = kwargs["weighted"] + if self.weighted is True: + log.info("leastsqnrm.weighted_operations() - weighted by Poisson variance") + else: + log.info("leastsqnrm.matrix_operations() - equally-weighted") def fit_fringes_all(self, input_model): """ - Short Summary - ------------ Extract the input data from input_model, and generate the best model to match the data (centering, scaling, rotation) + May allow parallelization by integration (later) Parameters ---------- @@ -78,530 +79,158 @@ def fit_fringes_all(self, input_model): Returns ------- - output_model: Fringe model object - Fringe analysis data + output_model: AmiOIModel object + AMI tables of median observables from LG algorithm fringe fitting in OIFITS format + output_model_multi: AmiOIModel object + AMI tables of observables for each integration from LG algorithm fringe fitting in OIFITS format + lgfit: + AMI cropped data, model, and residual data from LG algorithm fringe fitting """ - self.scidata = self.instrument_data.read_data_model(input_model) + # scidata, dqmask are already centered around peak + self.scidata, self.dqmask = self.instrument_data.read_data_model(input_model) - nrm = lg_model.NrmModel(mask=self.instrument_data.mask, - pixscale=self.instrument_data.pscale_rad, - holeshape=self.instrument_data.holeshape, - affine2d=self.instrument_data.affine2d, - over=self.oversample) + # list for nrm objects for each slc + self.nrm_list = [] - nrm.bandpass = self.instrument_data.wls[0] + for slc in range(self.instrument_data.nwav): + self.nrm_list.append(self.fit_fringes_single_integration(slc)) - if self.npix == 'default': - self.npix = self.scidata[:, :].shape[0] - - # New or modified in LG++ - # center the image on its peak pixel: - # AS subtract 1 from "r" below for testing >1/2 pixel offsets - # AG 03-2019 -- is above comment still relevant? - - if self.instrument_data.arrname == "NIRC2_9NRM": - self.ctrd = utils.center_imagepeak(self.scidata[0, :, :], - r=(self.npix - 1) // 2 - 2, cntrimg=False) - elif self.instrument_data.arrname == "gpi_g10s40": - self.ctrd = utils.center_imagepeak(self.scidata[0, :, :], - r=(self.npix - 1) // 2 - 2, cntrimg=True) - else: - self.ctrd = utils.center_imagepeak(self.scidata[:, :]) - nrm.reference = self.ctrd # self. is the cropped image centered on the brightest pixel - if self.psf_offset_ff is None: - # returned values have offsets x-y flipped: - # Finding centroids the Fourier way assumes no bad pixels case - Fourier domain mean slope - centroid = utils.find_centroid(self.ctrd, self.instrument_data.threshold) # offsets from brightest pixel ctr - # use flipped centroids to update centroid of image for JWST - check parity for GPI, Vizier,... - # pixel coordinates: - note the flip of [0] and [1] to match DS9 view + # Now save final output model(s) of all slices, averaged slices to AmiOiModels + # averaged + oifits_model = oifits.RawOifits(self) + output_model = oifits_model.make_oifits() - nrm.xpos = centroid[1] # flip 0 and 1 to convert - nrm.ypos = centroid[0] # flip 0 and 1 - nrm.psf_offset = nrm.xpos, nrm.ypos # renamed .bestcenter to .psf_offset - else: - nrm.psf_offset = self.psf_offset_ff # user-provided psf_offsetoffsets from array center are here. + # multi-integration + oifits_model_multi = oifits.RawOifits(self,method='multi') + output_model_multi = oifits_model_multi.make_oifits() - nrm.make_model(fov=self.ctrd.shape[0], bandpass=nrm.bandpass, - over=self.oversample, - psf_offset=nrm.psf_offset, - pixscale=nrm.pixel) - - nrm.fit_image(self.ctrd, modelin=nrm.model, psf_offset=nrm.psf_offset) + # Save cropped/centered data, model, residual in AmiLgFitModel + lgfit = self.make_lgfitmodel() + return output_model, output_model_multi, lgfit + + def make_lgfitmodel(self): """ - Attributes now stored in nrm object: - - ----------------------------------------------------------------------------- - soln --- resulting sin/cos coefficients from least squares fitting - fringephase --- baseline phases in radians - fringeamp --- baseline amplitudes (flux normalized) - redundant_cps --- closure phases in radians - redundant_cas --- closure amplitudes - residual --- fit residuals [data - model solution] - cond --- matrix condition for inversion - fringepistons --- zero-mean piston opd in radians on each hole (eigenphases) - ----------------------------------------------------------------------------- - """ - nrm.create_modelpsf() - - output_model = datamodels.AmiLgModel( - fit_image=nrm.modelpsf, - resid_image=nrm.residual, - closure_amp_table=np.asarray(nrm.redundant_cas), - closure_phase_table=np.asarray(nrm.redundant_cps), - fringe_amp_table=np.asarray(nrm.fringeamp), - fringe_phase_table=np.asarray(nrm.fringephase), - pupil_phase_table=np.asarray(nrm.fringepistons), - solns_table=np.asarray(nrm.soln)) - - return output_model - - -class Calibrate: - """ - A class to calibrate raw frame phases, closure phase, and squared visibilities. - (The option to save to oifits files will be included in a future version.) - """ - - def __init__(self, objpaths, instrument_data, savedir=None, extra_dimension=None, **kwargs): - """ - Summary - ------- - Get statistics on each set of exposures, subtract calibrator phases from - target phases, and divide target visibilities by cal visibilities. - - This will load all the observations into attributes: - cp_mean_cal ... size [ncals, naxis2, ncp] - cp_err_cal ... size [ncals, naxis2, ncp] - v2_mean_cal ... size [ncals, naxis2, nbl] - v2_err_cal ... size [ncals, naxis2, nbl] - cp_mean_tar ... size [naxis2, nccp] - cp_err_tar ... size [naxis2, ncp] - v2_mean_tar ... size [naxis2, nbl] - v2_err_tar ... size [naxis2, nbl] - - Parameters - ---------- - objpaths: list (currently not used) - list of directory paths (e.g. [tgtpth, calpth1, calpth2, ...] - containing fringe observables for tgt, cal1, [cal2,...]. The - first path is the target. One or more calibrators follow. - This parameter used to be the parameter 'paths'. - - instrument_data - jwst.ami.instrument_data.NIRISS object - Information on the mask geometry (namely # holes), instrument, - wavelength obs mode. - - savedir: string - directory for output results (currently not used) - - extra_dimension: string (not currently used) - dataset has an extra dimension for wavelength or polarization, for - example. The value in each object level folder containing additional - layer of data. Used to be parameter 'sub_dir_tag'. - - kwargs option: - vflag: float - visibility flag, to specify a cutoff and only accept exposures - with higher visibilities - """ - # Added 10/14/2016 -- here's a visibilities flag, where you can specify a - # cutoff and only accept exposures with higher visibilities - # Or maybe we should have it just reject outliers? - # Let's have vflag be rejection of the fraction provided - # e.g. if vflag=0.25, then exposures with the 25% lowest avg visibilities will be flagged - if "vflag" in kwargs.keys(): - self.vflag = kwargs['vflag'] - else: - self.vflag = 0.0 - - # if no savedir specified, default is current working directory - if savedir is None: - self.savedir = os.getcwd() - else: - self.savedir = savedir - - try: - os.listdir(savedir) - except FileNotFoundError: - os.mkdir(savedir) - self.savedir = savedir - - # number of calibrators being used: - self.ncals = len(objpaths) - 1 # number of calibrators, if zero, set to 1 - if self.ncals == 0: # No calibrators given - self.ncals = 1 # to avoid empty arrays - self.nobjs = len(objpaths) # number of total objects - - self.N = len(instrument_data.mask.ctrs) - self.nbl = int(self.N * (self.N - 1) / 2) - self.ncp = int(comb(self.N, 3)) - self.instrument_data = instrument_data - - # Additional axis (e.g., wavelength axis) - # Can be size one. Defined by instrument_data wavelength array - self.naxis2 = instrument_data.nwav - - # Set up all the arrays - # Cal arrays have ncal axis, "wavelength" axis, and ncp axis - self.cp_mean_cal = np.zeros((self.ncals, self.naxis2, self.ncp)) - self.cp_err_cal = np.zeros((self.ncals, self.naxis2, self.ncp)) - self.v2_mean_cal = np.zeros((self.ncals, self.naxis2, self.nbl)) - self.v2_err_cal = np.zeros((self.ncals, self.naxis2, self.nbl)) - self.pha_mean_cal = np.zeros((self.ncals, self.naxis2, self.nbl)) - self.pha_err_cal = np.zeros((self.ncals, self.naxis2, self.nbl)) - - # target arrays have "wavelength" axis and ncp axis - self.cp_mean_tar = np.zeros((self.naxis2, self.ncp)) - self.cp_err_tar = np.zeros((self.naxis2, self.ncp)) - self.v2_mean_tar = np.zeros((self.naxis2, self.nbl)) - self.v2_err_tar = np.zeros((self.naxis2, self.nbl)) - self.pha_mean_tar = np.zeros((self.naxis2, self.nbl)) - self.pha_err_tar = np.zeros((self.naxis2, self.nbl)) - - # is there a subdirectory (e.g. for the exposure -- need to make this default) - if extra_dimension is not None: - self.extra_dimension = extra_dimension - for ii in range(self.nobjs): - exps = [f for f in os.listdir(objpaths[ii]) - if self.extra_dimension in f and - os.path.isdir(os.path.join(objpaths[ii], f))] - nexps = len(exps) - - amp = np.zeros((self.naxis2, nexps, self.nbl)) - pha = np.zeros((self.naxis2, nexps, self.nbl)) - cps = np.zeros((self.naxis2, nexps, self.ncp)) - - # Create the cov matrix arrays - if ii == 0: - self.cov_mat_tar = np.zeros((self.naxis2, nexps, nexps)) - self.sigmasquared_tar = np.zeros((self.naxis2, nexps)) - self.cov_mat_cal = np.zeros((self.naxis2, nexps, nexps)) - self.sigmasquared_cal = np.zeros((self.naxis2, nexps)) - else: - pass - expflag = [] - for qq in range(nexps): - # nwav files - cpfiles = [f for f in os.listdir(objpaths[ii] + exps[qq]) if "CPs" in f] - - ampfiles = [f for f in os.listdir(objpaths[ii] + exps[qq]) - if "amplitudes" in f] - phafiles = [f for f in os.listdir(objpaths[ii] + exps[qq]) if "phase" in f] - - amp[0, qq, :] = np.loadtxt(objpaths[ii] + exps[qq] + "/" + ampfiles[0]) - cps[0, qq, :] = np.loadtxt(objpaths[ii] + exps[qq] + "/" + cpfiles[0]) - pha[0, qq, :] = np.loadtxt(objpaths[ii] + exps[qq] + "/" + phafiles[0]) - - # 10/14/2016 -- flag the exposure if we get amplitudes > 1 - # Also flag the exposure if vflag is set, to reject fraction indicated - if True in (amp[:, qq, :] > 1): - expflag.append(qq) - - if self.vflag > 0.0: - self.ncut = int(self.vflag * nexps) # how many are we cutting out - sorted_exps = np.argsort(amp.mean(axis=(0, -1))) - cut_exps = sorted_exps[:self.ncut] # flag the ncut lowest exposures - expflag = expflag + list(cut_exps) - - # Create the cov matrix arrays - if ii == 0: - rearr = np.rollaxis(cps, -1, 0).reshape(self.naxis2 * self.ncp, nexps) - R_i = rearr - rearr.mean(axis=1)[:, None] - R_j = R_i.T - self.cov = np.dot(R_i, R_j) / (nexps - 1) - else: - rearr = np.rollaxis(cps, -1, 0).reshape(self.naxis2 * self.ncp, nexps) - R_i = rearr - rearr.mean(axis=1)[:, None] - R_j = R_i.T - self.cov += np.dot(R_i, R_j) / (nexps - 1) - - # Also adding a mask to calib steps - - if ii == 0: - # closure phases and squared visibilities - self.cp_mean_tar[0, :], self.cp_err_tar[0, :], \ - self.v2_mean_tar[0, :], self.v2_err_tar[0, :], \ - self.pha_mean_tar[0, :], self.pha_err_tar = \ - self.calib_steps(cps[0, :, :], amp[0, :, :], pha[0, :, :], nexps, expflag=expflag) - - else: - # closure phases and visibilities - self.cp_mean_cal[ii - 1, 0, :], self.cp_err_cal[ii - 1, 0, :], \ - self.v2_mean_cal[ii - 1, 0, :], self.v2_err_cal[ii - 1, 0, :], \ - self.pha_mean_cal[ii - 1, 0, :], self.pha_err_cal[ii - 1, 0, :] = \ - self.calib_steps(cps[0, :, :], amp[0, :, :], pha[0, :, :], nexps, expflag=expflag) - else: - for ii in range(self.nobjs): - - cpfiles = [f for f in os.listdir(objpaths[ii]) if "CPs" in f] - ampfiles = [f for f in os.listdir(objpaths[ii]) if "amplitudes" in f] - phafiles = [f for f in os.listdir(objpaths[ii]) if "phase" in f] - nexps = len(cpfiles) - - amp = np.zeros((nexps, self.nbl)) - pha = np.zeros((nexps, self.nbl)) - cps = np.zeros((nexps, self.ncp)) - - expflag = [] - for qq in range(nexps): - amp[qq, :] = np.loadtxt(objpaths[ii] + "/" + ampfiles[qq]) - if True in (amp[qq, :] > 1): - expflag.append(qq) - - pha[qq, :] = np.loadtxt(objpaths[ii] + "/" + phafiles[qq]) - cps[qq, :] = np.loadtxt(objpaths[ii] + "/" + cpfiles[qq]) - - # Covariance 06/27/2017 - if ii == 0: - rearr = np.rollaxis(cps, -1, 0).reshape(self.ncp, nexps) - R_i = rearr - rearr.mean(axis=1)[:, None] - R_j = R_i.T - self.cov = np.dot(R_i, R_j) / (nexps - 1) - else: - rearr = np.rollaxis(cps, -1, 0).reshape(self.ncp, nexps) - R_i = rearr - rearr.mean(axis=1)[:, None] - R_j = R_i.T - self.cov += np.dot(R_i, R_j) / (nexps - 1) - - # Oct 14 2016 -- adding in a visibilities flag. Can't be >1 that doesn't make sense. - # Also adding a mask to calib steps - if ii == 0: - # closure phases and squared visibilities - self.cp_mean_tar[0, :], self.cp_err_tar[0, :], \ - self.v2_mean_tar[0, :], self.v2_err_tar[0, :], \ - self.pha_mean_tar[0, :], self.pha_err_tar[0, :] = \ - self.calib_steps(cps, amp, pha, nexps, expflag=expflag) - else: - # Fixed clunkiness! - # closure phases and visibilities - self.cp_mean_cal[ii - 1, 0, :], self.cp_err_cal[ii - 1, 0, :], \ - self.v2_mean_cal[ii - 1, 0, :], self.v2_err_cal[ii - 1, 0, :], \ - self.pha_mean_cal[ii - 1, 0, :], self.pha_err_cal[ii - 1, 0, :] = \ - self.calib_steps(cps, amp, pha, nexps, expflag=expflag) - - # Combine mean calibrator values and errors - self.cp_mean_tot = np.zeros(self.cp_mean_cal[0].shape) - self.cp_err_tot = self.cp_mean_tot.copy() - self.v2_mean_tot = np.zeros(self.v2_mean_cal[0].shape) - self.v2_err_tot = self.v2_mean_tot.copy() - self.pha_mean_tot = np.zeros(self.pha_mean_cal[0].shape) - self.pha_err_tot = self.pha_mean_tot.copy() - for ww in range(self.ncals): - self.cp_mean_tot += self.cp_mean_cal[ww] - self.cp_err_tot += self.cp_err_cal[ww]**2 - self.v2_mean_tot += self.v2_mean_cal[ww] - self.v2_err_tot += self.v2_err_cal[ww]**2 - self.pha_mean_tot += self.pha_mean_cal[ww] - self.pha_err_tot += self.pha_err_cal[ww]**2 - self.cp_mean_tot = self.cp_mean_tot / self.ncals - self.cp_err_tot = np.sqrt(self.cp_err_tot) - self.v2_mean_tot = self.v2_mean_tot / self.ncals - self.v2_err_tot = np.sqrt(self.v2_err_tot) - self.pha_mean_tot = self.pha_mean_tot / self.ncals - self.pha_err_tot = np.sqrt(self.pha_err_tot) - - # Calibrate - self.cp_calibrated = self.cp_mean_tar - self.cp_mean_tot - self.cp_err_calibrated = np.sqrt(self.cp_err_tar**2 + self.cp_err_tot**2) - self.v2_calibrated = self.v2_mean_tar / self.v2_mean_tot - self.v2_err_calibrated = np.sqrt(self.v2_err_tar**2 + self.v2_err_tot**2) - self.pha_calibrated = self.pha_mean_tar - self.pha_mean_tot - self.pha_err_calibrated = np.sqrt(self.pha_err_tar**2 + self.pha_err_tot**2) - - # convert to degrees - self.cp_calibrated_deg = self.cp_calibrated * 180 / np.pi - self.cp_err_calibrated_deg = self.cp_err_calibrated * 180 / np.pi - self.pha_calibrated_deg = self.pha_calibrated * 180 / np.pi - self.pha_err_calibrated_deg = self.pha_err_calibrated * 180 / np.pi - - def calib_steps(self, cps, amps, pha, nexp, expflag=None): - """ - Short Summary - ------------- - Calculates closure phase and mean squared visibilities & standard error, - and apply the exposure flag. + Populate the LGFitModel with the output of the fringe fitting + (LG algorithm) Parameters ---------- - cps: 1D float array - closure phases - - amps: 1D float array - fringe visibility between each pair of holes - - pha: 1D float array - phases - - nexp: integer - number of exposures - - expflag: integer default=None - number of flagged exposures Returns ------- - meancp: float - mean of closure phases - - errcp: float - error of closure phases - - meanv2: float - mean squared visibilities - - errv2: float - mean squared error of visibilities - - meanpha: float - mean of phases - - errpha: float - error of phases - """ - - # 10/14/16 Change flags exposures where vis > 1 anywhere - # Apply the exposure flag - expflag = None - cpmask = np.zeros(cps.shape, dtype=bool) - blmask = np.zeros(amps.shape, dtype=bool) - if expflag is not None: - nexp -= len(expflag) # don't count the bad exposures - cpmask[expflag, :] = True - blmask[expflag, :] = True - else: - pass - - meancp = np.ma.masked_array(cps, mask=cpmask).mean(axis=0) - meanv2 = np.ma.masked_array(amps, mask=blmask).mean(axis=0)**2 - meanpha = np.ma.masked_array(pha, mask=blmask).mean(axis=0)**2 - - errcp = np.sqrt(mstats.moment(np.ma.masked_array(cps, mask=cpmask), moment=2, axis=0)) / np.sqrt(nexp) - errv2 = np.sqrt(mstats.moment(np.ma.masked_array(amps**2, mask=blmask), moment=2, axis=0)) / np.sqrt(nexp) - errpha = np.sqrt(mstats.moment(np.ma.masked_array(pha, mask=blmask), moment=2, axis=0)) / np.sqrt(nexp) - - # Set cutoff accd to Kraus 2008 - 2/3 of median - errcp[errcp < (2 / 3.0) * np.median(errcp)] = (2 / 3.0) * np.median(errcp) - errpha[errpha < (2 / 3.0) * np.median(errpha)] = (2 / 3.0) * np.median(errpha) - errv2[errv2 < (2 / 3.0) * np.median(errv2)] = (2 / 3.0) * np.median(errv2) - - return meancp, errcp, meanv2, errv2, meanpha, errpha - - def save_to_txt(self): - """ - Short Summary - ------------ - Set up arrays to write to text files (not currently used) + m: AmiLgFitModel object + LG analysis centered data, fit, residual, and model info + """ + nslices = len(self.nrm_list) + # 3d arrays of centered data, models, and residuals (data - model) + ctrd_arr = np.zeros((nslices,self.scidata.shape[1],self.scidata.shape[2])) + n_ctrd_arr = np.zeros((nslices,self.scidata.shape[1],self.scidata.shape[2])) + model_arr = np.zeros((nslices,self.scidata.shape[1],self.scidata.shape[2])) + n_model_arr = np.zeros((nslices,self.scidata.shape[1],self.scidata.shape[2])) + resid_arr = np.zeros((nslices,self.scidata.shape[1],self.scidata.shape[2])) + n_resid_arr = np.zeros((nslices,self.scidata.shape[1],self.scidata.shape[2])) + # Model parameters + solns_arr = np.zeros((nslices,44)) + + for i,nrmslc in enumerate(self.nrm_list): + datapeak = nrmslc.reference.max() + ctrd_arr[i,:,:] = nrmslc.reference + n_ctrd_arr[i,:,:] = nrmslc.reference/datapeak + model_arr[i,:,:] = nrmslc.modelpsf + n_model_arr[i,:,:] = nrmslc.modelpsf/datapeak + resid_arr[i,:,:] = nrmslc.residual + n_resid_arr[i,:,:] = nrmslc.residual/datapeak + solns_arr[i,:] = nrmslc.soln + + # Populate datamodel + m = datamodels.AmiLgFitModel() + m.centered_image = ctrd_arr + m.norm_centered_image = n_ctrd_arr + m.fit_image = model_arr + m.norm_fit_image = n_model_arr + m.resid_image = resid_arr + m.norm_resid_image = n_resid_arr + m.solns_table = np.recarray(solns_arr.shape[0], dtype=[('coeffs', 'f8', solns_arr.shape[1])], buf=solns_arr) + + return m + + + def fit_fringes_single_integration(self, slc): + """ + Generate the best model to + match a single slice Parameters ---------- - None + slc: numpy array + 2D slice of data Returns ------- - None - """ - tag = "_deg.txt" - - fns = ["cps" + tag, "cperr" + tag, "v2" + tag, "v2err" + tag] - arrs = [self.cp_calibrated_deg, self.cp_err_calibrated_deg, - self.v2_calibrated, self.v2_err_calibrated, - self.pha_calibrated_deg, self.pha_err_calibrated_deg] - self._save_txt(fns, arrs) + nrm: NrmModel object + Model with best fit results + + Notes + ----- + After nrm.fit_image is called, these attributes are stored in nrm object: - def _save_txt(self, fns, arrays): - """ - Short Summary - ------------- - Write calibration arrays to text files (not currently used) - - Parameters - ---------- - fns: list of strings - names of 4 output files - - arrays: 1D float arrays - 4 arrays for closure phases, fringe visibilities, and errors - - Returns - ------- - None + ----------------------------------------------------------------------------- + soln --- resulting sin/cos coefficients from least squares fitting + fringephase --- baseline phases in radians + fringeamp --- baseline amplitudes (flux normalized) + redundant_cps --- closure phases in radians + redundant_cas --- closure amplitudes + residual --- fit residuals [data - model solution] + cond --- matrix condition for inversion + fringepistons --- zero-mean piston opd in radians on each hole (eigenphases) + ----------------------------------------------------------------------------- + """ - np.savetxt(self.savedir + "/" + fns[0], arrays[0]) - np.savetxt(self.savedir + "/" + fns[1], arrays[1]) - np.savetxt(self.savedir + "/" + fns[2], arrays[2]) - np.savetxt(self.savedir + "/" + fns[3], arrays[3]) + nrm = lg_model.NrmModel(mask=self.instrument_data.mask, + pixscale=self.instrument_data.pscale_rad, + holeshape=self.instrument_data.holeshape, + affine2d=self.instrument_data.affine2d, + over=self.oversample) - return None + nrm.bandpass = self.instrument_data.wls[0] - def save_to_oifits(self, **kwargs): - """ - Short Summary - ------------- - Save calibrated quantities to oifits files, with updated metadata - (not yet supported). + if self.npix == 'default': + self.npix = self.scidata[slc,:, :].shape[0] - Parameters - ---------- - kwargs options: - phaseceil: float: - value for keyword + self.ctrd = self.scidata[slc] + self.dqslice = self.dqmask[slc] - clip: integer ? - clipping factor ? + nrm.reference = self.ctrd # self.ctrd is the cropped image centered on the brightest pixel - parang_range: float - range of values of phase amplitudes ? + if self.psf_offset_ff is None: + # returned values have offsets x-y flipped: + # Finding centroids the Fourier way assumes no bad pixels case - Fourier domain mean slope + centroid = utils.find_centroid(self.ctrd, self.instrument_data.threshold) # offsets from brightest pixel ctr + # use flipped centroids to update centroid of image for JWST - check parity for GPI, Vizier,... + # pixel coordinates: - note the flip of [0] and [1] to match DS9 view + nrm.xpos = centroid[1] # flip 0 and 1 to convert + nrm.ypos = centroid[0] # flip 0 and 1 + nrm.psf_offset = nrm.xpos, nrm.ypos # renamed .bestcenter to .psf_offset + else: + nrm.psf_offset = self.psf_offset_ff # user-provided psf_offsetoffsets from array center are here. - avparang float - average of range of values of phase amplitudes ? + nrm.make_model(fov=self.ctrd.shape[0], + bandpass=nrm.bandpass, + over=self.oversample, + psf_offset=nrm.psf_offset, + pixscale=nrm.pixel) - Returns - ------- - None - """ + nrm.fit_image(self.ctrd, + modelin=nrm.model, + psf_offset=nrm.psf_offset, + dqm=self.dqslice, + weighted=self.weighted) - # look for kwargs, e.g., phaseceil, anything else? - if "phaseceil" in list(kwargs.keys()): - self.phaseceil = kwargs["phaseceil"] - else: - # default for flagging closure phases (deg) - self.phaseceil = 1.0e2 # degrees - - if "clip" in kwargs.keys(): - self.clip_wls = kwargs["clip"] - nwav = self.naxis2 - 2 * self.clip_wls - covshape = (self.naxis2 * self.ncp) - (2 * self.clip_wls * self.ncp) - clippedcov = np.zeros((covshape, covshape)) - for k in range(self.ncp): - clippedcov[nwav * k:nwav * (k + 1), nwav * k:nwav * (k + 1)] = \ - self.cov[self.naxis2 * k + self.clip_wls:self.naxis2 * (k + 1) - self.clip_wls, - self.naxis2 * k + self.clip_wls:self.naxis2 * (k + 1) - self.clip_wls] - else: - # default is no clipping - maybe could set instrument-dependent clip in future - self.clip_wls = None - clippedcov = self.cov - - if not hasattr(self.instrument_data, "parang_range"): - self.instrument_data.parang_range = 0.0 - if not hasattr(self.instrument_data, "avparang"): - self.instrument_data.avparang = 0.0 - self.obskeywords = { - 'path': self.savedir + "/", - 'year': self.instrument_data.year, - 'month': self.instrument_data.month, - 'day': self.instrument_data.day, - 'TEL': self.instrument_data.telname, - 'instrument': self.instrument_data.instrument, - 'arrname': self.instrument_data.arrname, - 'object': self.instrument_data.objname, - 'RA': self.instrument_data.ra, - 'DEC': self.instrument_data.dec, - 'PARANG': self.instrument_data.avparang, - 'PARANGRANGE': self.instrument_data.parang_range, - 'PA': self.instrument_data.pa, - 'phaseceil': self.phaseceil, - 'covariance': clippedcov} + nrm.create_modelpsf() + # model now stored as nrm.modelpsf, also nrm.residual + self.nrm = nrm # this gets updated with each slice + return nrm # to fit_fringes_all, where the output model will be created from list of nrm objects \ No newline at end of file diff --git a/jwst/ami/oifits.py b/jwst/ami/oifits.py new file mode 100644 index 0000000000..f045288d4e --- /dev/null +++ b/jwst/ami/oifits.py @@ -0,0 +1,590 @@ +#! /usr/bin/env python +""" +RawOifits class: takes fringefitter class, which contains nrm_list and instrument_data attributes, +all info needed to write oifits. +populate structure needed to write out oifits files according to schema. +averaged and multi-integration versions, sigma-clipped stats over ints + +CalibOifits class: takes two AmiOIModel datamodels and produces a final calibrated datamodel. +""" +import numpy as np + +from scipy.special import comb +from astropy.stats import sigma_clipped_stats +from astropy.time.core import Time + +from stdatamodels.jwst import datamodels + + +class RawOifits: + def __init__(self, fringefitter, method="median"): + """ + Class to store AMI data in the format required to write out to OIFITS files + Angular quantities of input are in radians from fringe fitting; converted to degrees for saving. + + Parameters + ---------- + fringefitter: FringeFitter object + Object containing nrm_list attribute (list of nrm objects) + and other info needed for OIFITS files + method: string + Method to average observables: mean or median. Default median. + + Notes + ----- + Based on ObservablesFromText from ImPlaneIA, e.g. + https://github.com/anand0xff/ImPlaneIA/blob/master/nrm_analysis/misctools/implane2oifits.py#L32 + """ + self.fringe_fitter = fringefitter + self.n_holes = 7 + + self.nslices = len(self.fringe_fitter.nrm_list) # n ints + self.n_baselines = int(comb(self.n_holes, 2)) # 21 + self.n_closure_phases = int(comb(self.n_holes, 3)) # 35 + self.n_closure_amplitudes = int(comb(self.n_holes, 4)) + + self.method = method + + self.ctrs_eqt = self.fringe_fitter.instrument_data.ctrs_eqt + self.ctrs_inst = self.fringe_fitter.instrument_data.ctrs_inst + self.pa = ( + self.fringe_fitter.instrument_data.pav3 + ) # header pav3, not including v3i_yang?? + + self.bholes, self.bls = self._makebaselines() + self.tholes, self.tuv = self._maketriples_all() + + def make_obsarrays(self): + """ + Make arrays of observables of the correct shape for saving to datamodels + """ + # arrays of observables, (nslices,nobservables) shape. + self.fringe_phases = np.zeros((self.nslices, self.n_baselines)) + self.fringe_amplitudes = np.zeros((self.nslices, self.n_baselines)) + self.closure_phases = np.zeros((self.nslices, self.n_closure_phases)) + self.closure_amplitudes = np.zeros((self.nslices, self.n_closure_amplitudes)) + self.pistons = np.zeros((self.nslices, self.n_holes)) + # model parameters + self.solns = np.zeros((self.nslices, 44)) + + for i, nrmslc in enumerate(self.fringe_fitter.nrm_list): + self.fringe_phases[i, :] = np.rad2deg(nrmslc.fringephase) # FPs in degrees + self.fringe_amplitudes[i, :] = nrmslc.fringeamp + self.closure_phases[i, :] = np.rad2deg(nrmslc.redundant_cps) # CPs in degrees + self.closure_amplitudes[i, :] = nrmslc.redundant_cas + self.pistons[i, :] = np.rad2deg( + nrmslc.fringepistons + ) # segment pistons in degrees + self.solns[i, :] = nrmslc.soln + + self.fringe_amplitudes_squared = self.fringe_amplitudes ** 2 # squared visibilities + + def make_oifits(self): + """ + Perform final manipulations of observable arrays, calculate uncertainties, and + populate AmiOIModel + + Returns + ------- + m: AmiOIModel + Fully populated datamodel + + """ + self.make_obsarrays() + instrument_data = self.fringe_fitter.instrument_data + observation_date = Time( + "%s-%s-%s" + % (instrument_data.year, instrument_data.month, instrument_data.day), + format="fits", + ) + + # central wavelength, equiv. width from effstims used for fringe fitting + wl = instrument_data.lam_c + e_wl = instrument_data.lam_c * instrument_data.lam_w + + # Index 0 and 1 reversed to get the good u-v coverage (same fft) + ucoord = self.bls[:, 1] + vcoord = self.bls[:, 0] + + v1coord = self.tuv[:, 0, 0] + u1coord = self.tuv[:, 0, 1] + v2coord = self.tuv[:, 1, 0] + u2coord = self.tuv[:, 1, 1] + + flagVis = [False] * self.n_baselines + flagT3 = [False] * self.n_closure_phases + + # do the things done by populate_nrm here + # average or don't, and get uncertainties + # Unwrap phases + shift2pi = np.zeros(self.closure_phases.shape) + shift2pi[self.closure_phases >= 6] = 2 * np.pi + shift2pi[self.closure_phases <= -6] = -2 * np.pi + self.closure_phases -= shift2pi + + if self.method not in ["mean", "median", "multi"]: + self.method = "median" + # set these as attributes (some may exist and be overwritten) + if self.method == "multi": + self.vis2 = self.fringe_amplitudes_squared.T + self.e_vis2 = np.zeros(self.vis2.shape) + self.visamp = self.fringe_amplitudes.T + self.e_visamp = np.zeros(self.visamp.shape) + self.visphi = self.fringe_phases.T + self.e_visphi = np.zeros(self.visphi.shape) + self.closure_phases = self.closure_phases.T + self.e_cp = np.zeros(self.closure_phases.shape) + self.camp = self.closure_amplitudes.T + self.e_camp = np.zeros(self.camp.shape) + self.pist = self.pistons.T + self.e_pist = np.zeros(self.pist.shape) + + # apply sigma-clipping to uncertainties + # sigma_clipped_stats returns mean, median, stddev. nsigma=3, niters=5 + elif self.method == "median": + _, self.vis2, self.e_vis2 = sigma_clipped_stats(self.fringe_amplitudes_squared, axis=0) + _, self.visamp, self.e_visamp = sigma_clipped_stats(self.fringe_amplitudes, axis=0) + _, self.visphi, self.e_visphi = sigma_clipped_stats(self.fringe_phases, axis=0) + _, self.closure_phases, self.e_cp = sigma_clipped_stats(self.closure_phases, axis=0) + _, self.camp, self.e_camp = sigma_clipped_stats(self.closure_amplitudes, axis=0) + _, self.pist, self.e_pist = sigma_clipped_stats(self.pistons, axis=0) + + else: # take the mean + self.vis2, _, self.e_vis2 = sigma_clipped_stats(self.fringe_amplitudes_squared, axis=0) + self.visamp, _, self.e_visamp = sigma_clipped_stats(self.fringe_amplitudes, axis=0) + self.visphi, _, self.e_visphi = sigma_clipped_stats(self.fringe_phases, axis=0) + self.closure_phases, _, self.e_cp = sigma_clipped_stats(self.closure_phases, axis=0) + self.camp, _, self.e_camp = sigma_clipped_stats(self.closure_amplitudes, axis=0) + self.pist, _, self.e_pist = sigma_clipped_stats(self.pistons, axis=0) + + # prepare arrays for OI_ARRAY ext + self.staxy = instrument_data.ctrs_inst + N_ap = len(self.staxy) + tel_name = ["A%i" % x for x in np.arange(N_ap) + 1] + sta_name = tel_name + diameter = [0] * N_ap + + staxyz = [] + for x in self.staxy: + a = list(x) + line = [a[0], a[1], 0] + staxyz.append(line) + + sta_index = np.arange(N_ap) + 1 + + pscale = instrument_data.pscale_mas / 1000.0 # arcsec + isz = self.fringe_fitter.scidata.shape[ + 1 + ] # Size of the image to extract NRM data + fov = [pscale * isz] * N_ap + fovtype = ["RADIUS"] * N_ap + + m = datamodels.AmiOIModel() + self.init_oimodel_arrays(m) + + # primary header keywords + m.meta.telescope = instrument_data.telname + m.meta.origin = "STScI" + m.meta.instrument.name = instrument_data.instrument + m.meta.program.pi_name = instrument_data.pi_name + m.meta.target.proposer_name = instrument_data.proposer_name + m.meta.observation.date = observation_date.fits + m.meta.oifits.array_name = instrument_data.arrname + m.meta.oifits.instrument_mode = instrument_data.pupil + + # oi_array extension data + m.array["TEL_NAME"] = tel_name + m.array["STA_NAME"] = sta_name + m.array["STA_INDEX"] = sta_index + m.array["DIAMETER"] = diameter + m.array["STAXYZ"] = staxyz + m.array["FOV"] = fov + m.array["FOVTYPE"] = fovtype + m.array["CTRS_EQT"] = instrument_data.ctrs_eqt + m.array["PISTONS"] = self.pist + m.array["PIST_ERR"] = self.e_pist + + # oi_target extension data + m.target['TARGET_ID'] = [1] + m.target['TARGET'] = instrument_data.objname + m.target['RAEP0'] = instrument_data.ra + m.target['DECEP0'] = instrument_data.dec + m.target['EQUINOX'] = [2000] + m.target['RA_ERR'] = instrument_data.ra_uncertainty + m.target['DEC_ERR'] = instrument_data.dec_uncertainty + m.target['SYSVEL'] = [0] + m.target['VELTYP'] = ['UNKNOWN'] + m.target['VELDEF'] = ['OPTICAL'] + m.target['PMRA'] = instrument_data.pmra + m.target['PMDEC'] = instrument_data.pmdec + m.target['PMRA_ERR'] = [0] + m.target['PMDEC_ERR'] = [0] + m.target['PARALLAX'] = [0] + m.target['PARA_ERR'] = [0] + m.target['SPECTYP'] = ['UNKNOWN'] + + # oi_vis extension data + m.vis['TARGET_ID'] = 1 + m.vis['TIME'] = 0 + m.vis['MJD'] = observation_date.mjd + m.vis['INT_TIME'] = instrument_data.itime + m.vis['VISAMP'] = self.visamp + m.vis['VISAMPERR'] = self.e_visamp + m.vis['VISPHI'] = self.visphi + m.vis['VISPHIERR'] = self.e_visphi + m.vis['UCOORD'] = ucoord + m.vis['VCOORD'] = vcoord + m.vis['STA_INDEX'] = self._format_STAINDEX_V2(self.bholes) + m.vis['FLAG'] = flagVis + + # oi_vis2 extension data + m.vis2['TARGET_ID'] = 1 + m.vis2['TIME'] = 0 + m.vis2['MJD'] = observation_date.mjd + m.vis2['INT_TIME'] = instrument_data.itime + m.vis2['VIS2DATA'] = self.vis2 + m.vis2['VIS2ERR'] = self.e_vis2 + m.vis2['UCOORD'] = ucoord + m.vis2['VCOORD'] = vcoord + m.vis2['STA_INDEX'] = self._format_STAINDEX_V2(self.bholes) + m.vis2['FLAG'] = flagVis + + # oi_t3 extension data + m.t3['TARGET_ID'] = 1 + m.t3['TIME'] = 0 + m.t3['MJD'] = observation_date.mjd + m.t3['T3AMP'] = self.camp + m.t3['T3AMPERR'] = self.e_camp + m.t3['T3PHI'] = self.closure_phases + m.t3['T3PHIERR'] = self.e_cp + m.t3['U1COORD'] = u1coord + m.t3['V1COORD'] = v1coord + m.t3['U2COORD'] = u2coord + m.t3['V2COORD'] = v2coord + m.t3['STA_INDEX'] = self._format_STAINDEX_T3(self.tholes) + m.t3['FLAG'] = flagT3 + + # oi_wavelength extension data + m.wavelength["EFF_WAVE"] = wl + m.wavelength["EFF_BAND"] = e_wl + + return m + + def init_oimodel_arrays(self, oimodel): + """ + Set dtypes and initialize shapes for AmiOiModel arrays, + depending on if averaged or multi-integration version. + + Parameters + ---------- + oimodel: AmiOIModel object + empty model + """ + if self.method == "multi": + # update dimensions of arrays for multi-integration oifits + target_dtype = oimodel.target.dtype + wavelength_dtype = np.dtype([("EFF_WAVE", "<f4"), ("EFF_BAND", "<f4")]) + array_dtype = np.dtype( + [ + ("TEL_NAME", "S16"), + ("STA_NAME", "S16"), + ("STA_INDEX", "<i2"), + ("DIAMETER", "<f4"), + ("STAXYZ", "<f8", (3,)), + ("FOV", "<f8"), + ("FOVTYPE", "S6"), + ("CTRS_EQT", "<f8", (2,)), + ("PISTONS", "<f8", (self.nslices,)), + ("PIST_ERR", "<f8", (self.nslices,)), + ] + ) + vis_dtype = np.dtype( + [ + ("TARGET_ID", "<i2"), + ("TIME", "<f8"), + ("MJD", "<f8"), + ("INT_TIME", "<f8"), + ("VISAMP", "<f8", (self.nslices,)), + ("VISAMPERR", "<f8", (self.nslices,)), + ("VISPHI", "<f8", (self.nslices,)), + ("VISPHIERR", "<f8", (self.nslices,)), + ("UCOORD", "<f8"), + ("VCOORD", "<f8"), + ("STA_INDEX", "<i2", (2,)), + ("FLAG", "i1"), + ] + ) + vis2_dtype = np.dtype( + [ + ("TARGET_ID", "<i2"), + ("TIME", "<f8"), + ("MJD", "<f8"), + ("INT_TIME", "<f8"), + ("VIS2DATA", "<f8", (self.nslices,)), + ("VIS2ERR", "<f8", (self.nslices,)), + ("UCOORD", "<f8"), + ("VCOORD", "<f8"), + ("STA_INDEX", "<i2", (2,)), + ("FLAG", "i1"), + ] + ) + t3_dtype = np.dtype( + [ + ("TARGET_ID", "<i2"), + ("TIME", "<f8"), + ("MJD", "<f8"), + ("INT_TIME", "<f8"), + ("T3AMP", "<f8", (self.nslices,)), + ("T3AMPERR", "<f8", (self.nslices,)), + ("T3PHI", "<f8", (self.nslices,)), + ("T3PHIERR", "<f8", (self.nslices,)), + ("U1COORD", "<f8"), + ("V1COORD", "<f8"), + ("U2COORD", "<f8"), + ("V2COORD", "<f8"), + ("STA_INDEX", "<i2", (3,)), + ("FLAG", "i1"), + ] + ) + else: + target_dtype = oimodel.target.dtype + wavelength_dtype = oimodel.wavelength.dtype + array_dtype = np.dtype( + [ + ("TEL_NAME", "S16"), + ("STA_NAME", "S16"), + ("STA_INDEX", "<i2"), + ("DIAMETER", "<f4"), + ("STAXYZ", "<f8", (3,)), + ("FOV", "<f8"), + ("FOVTYPE", "S6"), + ("CTRS_EQT", "<f8", (2,)), + ("PISTONS", "<f8"), + ("PIST_ERR", "<f8"), + ] + ) + vis_dtype = oimodel.vis.dtype + vis2_dtype = oimodel.vis2.dtype + t3_dtype = oimodel.t3.dtype + oimodel.array = np.zeros(self.n_holes, dtype=array_dtype) + oimodel.target = np.zeros(1, dtype=target_dtype) + oimodel.vis = np.zeros(self.n_baselines, dtype=vis_dtype) + oimodel.vis2 = np.zeros(self.n_baselines, dtype=vis2_dtype) + oimodel.t3 = np.zeros(self.n_closure_phases, dtype=t3_dtype) + oimodel.wavelength = np.zeros(1, dtype=wavelength_dtype) + + def _maketriples_all(self): + """ + Calculate all three-hole combinations, baselines + + Returns + ------- + tarray: integer array + Triple hole indices (0-indexed), + float array of two uv vectors in all triangles + """ + tlist = [] + for i in range(self.n_holes): + for j in range(self.n_holes): + for k in range(self.n_holes): + if i < j and j < k: + tlist.append((i, j, k)) + tarray = np.array(tlist).astype(int) + + tname = [] + uvlist = [] + # foreach row of 3 elts... + for triple in tarray: + tname.append("{0:d}_{1:d}_{2:d}".format(triple[0], triple[1], triple[2])) + + uvlist.append( + ( + self.ctrs_eqt[triple[0]] - self.ctrs_eqt[triple[1]], + self.ctrs_eqt[triple[1]] - self.ctrs_eqt[triple[2]], + ) + ) + + return tarray, np.array(uvlist) + + def _makebaselines(self): + """ + Calculate all hole pairs, baselines + + Returns + ------- + barray: list + Hole pairs indices, 0-indexed + float array of baselines + """ + blist = [] + bllist = [] + for i in range(self.n_holes): + for j in range(self.n_holes): + if i < j: + blist.append((i, j)) + bllist.append(self.ctrs_eqt[i] - self.ctrs_eqt[j]) + return np.array(blist).astype(int), np.array(bllist) + + def _format_STAINDEX_T3(self, tab): + """ + Converts sta_index to save oifits T3 in the appropriate format + + Parameters + ---------- + tab: array + table of indices + + Returns + ------- + sta_index: list + Hole triples indices + int array of triangles + """ + sta_index = [] + for x in tab: + ap1 = int(x[0]) + ap2 = int(x[1]) + ap3 = int(x[2]) + if np.min(tab) == 0: + line = np.array([ap1, ap2, ap3]) + 1 + else: + line = np.array([ap1, ap2, ap3]) + sta_index.append(line) + return sta_index + + def _format_STAINDEX_V2(self, tab): + """ + Converts sta_index to save oifits V2 in the appropriate format + + Parameters + ---------- + tab: array + table of indices + + Returns + ------- + sta_index: list + Hole baseline indices + int array of baselines + """ + sta_index = [] + for x in tab: + ap1 = int(x[0]) + ap2 = int(x[1]) + if np.min(tab) == 0: + line = np.array([ap1, ap2]) + 1 # RAC 2/2021 + else: + line = np.array([ap1, ap2]) + sta_index.append(line) + return sta_index + + +class CalibOifits: + def __init__(self, targoimodel, caloimodel): + """ + Calibrate (normalize) an AMI observation by subtracting closure phases + of a reference star from those of a target and dividing visibility amplitudes + of the target by those of the reference star. + + Parameters + ---------- + targoimodel: AmiOIModlel, target + caloimodel: AmiOIModlel, reference star (calibrator) + """ + self.targoimodel = targoimodel + self.caloimodel = caloimodel + self.calib_oimodel = targoimodel.copy() + + def update_dtype(self): + """ + Modify the dtype of OI array to include different pistons columns + for calibrated OIFITS files + """ + nrows = 7 + modified_dtype = np.dtype( + [ + ("TEL_NAME", "S16"), + ("STA_NAME", "S16"), + ("STA_INDEX", "<i2"), + ("DIAMETER", "<f4"), + ("STAXYZ", "<f8", (3,)), + ("FOV", "<f8"), + ("FOVTYPE", "S6"), + ("CTRS_EQT", "<f8", (2,)), + ("PISTON_T", "<f8"), + ("PISTON_C", "<f8"), + ("PIST_ERR", "<f8"), + ] + ) + self.calib_oimodel.array = np.zeros(nrows, dtype=modified_dtype) + + def calibrate(self): + """ + Apply the calibration (normalization) routine to calibrate the + target AmiOIModel by the calibrator (reference star) AmiOIModel + + Returns + ------- + calib_oimodel: AmiOIModel + Calibrated AMI datamodel + """ + cp_out = self.targoimodel.t3["T3PHI"] - self.caloimodel.t3["T3PHI"] + sqv_out = self.targoimodel.vis2["VIS2DATA"] / self.caloimodel.vis2["VIS2DATA"] + va_out = self.targoimodel.vis["VISAMP"] / self.caloimodel.vis["VISAMP"] + # using standard propagation of error for multiplication/division + # which assumes uncorrelated Gaussian errors (questionable) + cperr_t = self.targoimodel.t3["T3PHIERR"] + cperr_c = self.caloimodel.t3["T3PHIERR"] + sqverr_c = self.targoimodel.vis2["VIS2ERR"] + sqverr_t = self.caloimodel.vis2["VIS2ERR"] + vaerr_t = self.targoimodel.vis["VISAMPERR"] + vaerr_c = self.caloimodel.vis["VISAMPERR"] + cperr_out = np.sqrt(cperr_t**2.0 + cperr_c**2.0) + sqverr_out = sqv_out * np.sqrt( + (sqverr_t / self.targoimodel.vis2["VIS2DATA"]) ** 2.0 + + (sqverr_c / self.caloimodel.vis2["VIS2DATA"]) ** 2.0 + ) + vaerr_out = va_out * np.sqrt( + (vaerr_t / self.targoimodel.vis["VISAMP"]) ** 2.0 + + (vaerr_c / self.caloimodel.vis["VISAMP"]) ** 2.0 + ) + + pistons_t = self.targoimodel.array["PISTONS"] + pisterr_t = self.targoimodel.array["PIST_ERR"] + pistons_c = self.caloimodel.array["PISTONS"] + pisterr_c = self.caloimodel.array["PIST_ERR"] + # sum in quadrature errors from target and calibrator pistons + pisterr_out = np.sqrt(pisterr_t**2 + pisterr_c**2) + + # update OI array, which is currently all zeros, with input oi array + # and updated piston columns + self.update_dtype() + self.calib_oimodel.array["TEL_NAME"] = self.targoimodel.array["TEL_NAME"] + self.calib_oimodel.array["STA_NAME"] = self.targoimodel.array["STA_NAME"] + self.calib_oimodel.array["STA_INDEX"] = self.targoimodel.array["STA_INDEX"] + self.calib_oimodel.array["DIAMETER"] = self.targoimodel.array["DIAMETER"] + self.calib_oimodel.array["STAXYZ"] = self.targoimodel.array["STAXYZ"] + self.calib_oimodel.array["FOV"] = self.targoimodel.array["FOV"] + self.calib_oimodel.array["FOVTYPE"] = self.targoimodel.array["FOVTYPE"] + self.calib_oimodel.array["CTRS_EQT"] = self.targoimodel.array["CTRS_EQT"] + self.calib_oimodel.array["PISTON_T"] = pistons_t + self.calib_oimodel.array["PISTON_C"] = pistons_c + self.calib_oimodel.array["PIST_ERR"] = pisterr_out + + # update calibrated oimodel arrays with calibrated observables + self.calib_oimodel.t3["T3PHI"] = cp_out + self.calib_oimodel.t3["T3PHIERR"] = cperr_out + self.calib_oimodel.vis2["VIS2DATA"] = sqv_out + self.calib_oimodel.vis2["VIS2ERR"] = sqverr_out + self.calib_oimodel.vis["VISAMP"] = va_out + self.calib_oimodel.vis["VISAMPERR"] = vaerr_out + + self.calib_oimodel.array["PISTON_T"] = pistons_t + self.calib_oimodel.array["PISTON_C"] = pistons_c + self.calib_oimodel.array["PIST_ERR"] = pisterr_out + + # add calibrated header keywords + calname = self.caloimodel.meta.target.proposer_name # name of calibrator star + self.calib_oimodel.meta.oifits.calib = calname + + return self.calib_oimodel diff --git a/jwst/ami/tests/test_ami_analyze.py b/jwst/ami/tests/test_ami_analyze.py index 8fb464b44e..ac8e9ed0fe 100755 --- a/jwst/ami/tests/test_ami_analyze.py +++ b/jwst/ami/tests/test_ami_analyze.py @@ -10,7 +10,7 @@ from stdatamodels.jwst import datamodels -from jwst.ami import utils, leastsqnrm, hexee, webb_psf +from jwst.ami import utils, leastsqnrm, hexee from jwst.ami.leastsqnrm import hexpb, ffc, ffs, closure_amplitudes from jwst.ami.leastsqnrm import closurephase, redundant_cps from jwst.ami.leastsqnrm import populate_symmamparray @@ -1006,46 +1006,6 @@ def test_analyticnrm2_phasor(): assert_allclose(result, true_result, atol=1e-7) - -# --------------------------------------------------------------- -# webb_psf module test: -# - - -def test_webb_psf(): - """Test of PSF() in the webb_psf module: - Create a Throughput datamodel, having a dummy filter bandpass data - that peaks at 1.0 at the center and decreases in the wings. - """ - min_wl = 5000.0 # lowest wavelength - max_wl = 100000.0 # highest wavelength - - nelem = 28 - - wavelength = np.linspace(min_wl, max_wl, nelem, endpoint=True, dtype=np.float32) - throughput = create_throughput(nelem) - dtype = np.dtype([('wavelength', '<f4'), ('throughput', '<f4')]) - - filt_tab = np.array(list(zip(wavelength, throughput)), dtype=dtype) - filter_model = datamodels.ThroughputModel(filter_table=filt_tab) - - bindown = 4 - - band = webb_psf.get_webbpsf_filter(filter_model, specbin=bindown) - true_band = np.array( - [ - [4.05621603e-01, 1.37962969e-06], - [8.10614496e-01, 2.78703703e-06], - [9.50576201e-01, 4.19444444e-06], - [9.74027127e-01, 5.60185185e-06], - [9.01925057e-01, 7.00925932e-06], - [6.51473783e-01, 8.41666679e-06], - ] - ) - - assert_allclose(band, true_band, atol=1e-7) - - # --------------------------------------------------------------- # utility functions: diff --git a/jwst/ami/tests/test_ami_interface.py b/jwst/ami/tests/test_ami_interface.py index 595bf7dc4c..2da7d16eb3 100644 --- a/jwst/ami/tests/test_ami_interface.py +++ b/jwst/ami/tests/test_ami_interface.py @@ -1,34 +1,79 @@ +import numpy as np import pytest import stpipe +from stpipe import crds_client from stdatamodels.jwst import datamodels from jwst.ami import AmiAnalyzeStep -def test_ami_analyze_cube_fail(): - """Make sure ami_analyze fails if input is CubeModel (_calints)""" - model = datamodels.CubeModel((25, 19, 19)) +@pytest.fixture() +def mock_nrm_reference_file(tmp_path, monkeypatch): + """ + At the moment nrm only exists on CRDS-TEST so this + fixture will mock fetching any 'nrm' reference file + """ + fn = tmp_path / "nrm.fits" + + # make a fake nrm file + m = datamodels.NRMModel() + m.nrm = np.zeros((1024, 1024), dtype='f4') + m.save(fn) + + original_get_reference_file = crds_client.get_reference_file + + def mock_get_reference_file(dataset, reference_file_type, observatory): + if reference_file_type == 'nrm': + return str(fn) + return original_get_reference_file(dataset, reference_file_type, observatory) + + monkeypatch.setattr(crds_client, "get_reference_file", mock_get_reference_file) + + +@pytest.fixture() +def example_model(mock_nrm_reference_file): + model = datamodels.CubeModel((2, 80, 80)) + # some non-zero data is required as this step will center + # the image and find the centroid (both fail with all zeros) + model.data[:, 24, 24] = 1 + model.data[:, 28, 28] = 1 model.meta.instrument.name = "NIRISS" model.meta.instrument.filter = "F277W" - model.meta.observation.date = "2019-01-01" + model.meta.subarray.name = "SUB80" + model.meta.observation.date = "2021-12-26" model.meta.observation.time = "00:00:00" - with pytest.raises(RuntimeError): - AmiAnalyzeStep.call(model) + model.meta.target.proposer_name = "" + model.meta.program.pi_name = "someone" + model.meta.target.catalog_name = "" + model.meta.visit.start_time = "2022-06-05 12:15:41.5020000" + model.meta.pointing.pa_v3 = 171.8779402866089 + model.meta.wcsinfo.v3yangle = 0.56126717 + model.meta.filename = "test_calints.fits" + model.meta.instrument.pupil = "NRM" + model.meta.exposure.type = "NIS_AMI" + return model + +@pytest.mark.parametrize("oversample", [2, 4]) +def test_ami_analyze_even_oversample_fail(example_model, oversample): + """Make sure ami_analyze fails if oversample is even""" + with pytest.raises(ValueError, match="Oversample value must be an odd integer."): + AmiAnalyzeStep.call(example_model, oversample=oversample) -def test_ami_analyze_no_reffile_fail(monkeypatch): + +@pytest.mark.skip("reference files are not currently used") +def test_ami_analyze_no_reffile_fail(monkeypatch, example_model): """Make sure that ami_analyze fails if no throughput reffile is available""" - model = datamodels.ImageModel((19, 19)) - model.meta.instrument.name = "NIRISS" - model.meta.instrument.filter = "F277W" - model.meta.observation.date = "2019-01-01" - model.meta.observation.time = "00:00:00" def mockreturn(input_model, reftype, observatory=None, asn_exptypes=None): return "N/A" monkeypatch.setattr(stpipe.crds_client, 'get_reference_file', mockreturn) - with pytest.raises(RuntimeError): - AmiAnalyzeStep.call(model) + with pytest.raises(RuntimeError, match="No throughput reference file found."): + AmiAnalyzeStep.call(example_model) + + +def test_ami_analyze_step(example_model): + AmiAnalyzeStep.call(example_model) diff --git a/jwst/ami/tests/test_ami_leastsqnrm.py b/jwst/ami/tests/test_ami_leastsqnrm.py new file mode 100644 index 0000000000..1522353842 --- /dev/null +++ b/jwst/ami/tests/test_ami_leastsqnrm.py @@ -0,0 +1,15 @@ +import numpy as np + +from jwst.ami import leastsqnrm + + +def test_weighted_operations(): + img = np.arange(1, 5, dtype='f4').reshape((2, 2)) + model = np.arange(8, dtype='f4').reshape((2, 2, 2)) + dq = np.zeros((2, 2), dtype='int32') + dq[1, 1] = 1 + x, res, cond, singvals = leastsqnrm.weighted_operations(img, model, dq) + + np.testing.assert_almost_equal(x, [-0.5, 1]) + assert cond is None + np.testing.assert_almost_equal(singvals, [4.4870429, 0.1819676]) diff --git a/jwst/ami/tests/test_ami_utils.py b/jwst/ami/tests/test_ami_utils.py new file mode 100644 index 0000000000..eace9faa13 --- /dev/null +++ b/jwst/ami/tests/test_ami_utils.py @@ -0,0 +1,28 @@ + +import numpy as np +import pytest + +import jwst.ami.utils as utils + + +@pytest.mark.parametrize("shape, center", [ + ((10, 10), (4.5, 4.5)), + ((11, 11), (5, 5)), +]) +def test_centerpoint(shape, center): + assert utils.centerpoint(shape) == center + + +def test_find_centroid(): + arr = np.zeros((30, 30), dtype='f4') + arr[15, 15] = 1 + thresh = 0.02 + assert np.allclose(utils.find_centroid(arr, thresh), (0.5, 0.5)) + + +@pytest.mark.parametrize("mas, rad", [ + (206264.8062471, 0.001), + (103132403.12355, 0.5), +]) +def test_mas2rad(mas, rad): + assert np.isclose(utils.mas2rad(mas), rad) diff --git a/jwst/ami/utils.py b/jwst/ami/utils.py index 9a8f5320b5..c106d15591 100644 --- a/jwst/ami/utils.py +++ b/jwst/ami/utils.py @@ -5,13 +5,17 @@ import logging import numpy as np import numpy.fft as fft +from scipy.integrate import simpson +from astropy import units as u + +import synphot log = logging.getLogger(__name__) log.setLevel(logging.DEBUG) log.addHandler(logging.NullHandler()) -class Affine2d(): +class Affine2d: """ A class to help implement the Bracewell Fourier 2D affine transformation theorem to calculate appropriate coordinate grids in the Fourier domain (eg @@ -83,11 +87,18 @@ class Affine2d(): Code by Anand Sivaramakrishnan 2018 """ - def __init__(self, mx=None, my=None, sx=None, sy=None, xo=None, yo=None, - rotradccw=None, name="Affine"): + def __init__( + self, + mx=None, + my=None, + sx=None, + sy=None, + xo=None, + yo=None, + rotradccw=None, + name="Affine", + ): """ - Short Summary - ------------- Initialize with transformation constants Parameters @@ -148,13 +159,12 @@ def __init__(self, mx=None, my=None, sx=None, sy=None, xo=None, yo=None, Since this uses an offset xo yo in pixels of the affine transformation, these are *NOT* affected by the 'oversample' in image space. The vector it is dotted with is in image space.""" - self.phase_2vector = np.array((my * xo - sx * yo, - mx * yo - sy * xo)) / self.determinant + self.phase_2vector = ( + np.array((my * xo - sx * yo, mx * yo - sy * xo)) / self.determinant + ) def forward(self, point): """ - Short Summary - ------------- Create the forward affine transformation, in ideal-to-distorted coordinates. Parameters @@ -167,15 +177,17 @@ def forward(self, point): trans_point: float, float coordinates in distorted space """ - trans_point = np.array((self.mx * point[0] + self.sx * point[1] + self.xo, - self.my * point[1] + self.sy * point[0] + self.yo)) + trans_point = np.array( + ( + self.mx * point[0] + self.sx * point[1] + self.xo, + self.my * point[1] + self.sy * point[0] + self.yo, + ) + ) return trans_point def reverse(self, point): """ - Short Summary - ------------- Create the reverse affine transformation, in distorted-to-ideal coordinates. Parameters @@ -188,18 +200,26 @@ def reverse(self, point): trans_point: float, float coordinates in ideal space """ - trans_point = np.array( - (self.my * point[0] - self.sx * point[1] - - self.my * self.xo + self.sx * self.yo, - self.mx * point[1] - self.sy * point[0] - - self.mx * self.yo + self.sy * self.xo)) * self.determinant + trans_point = ( + np.array( + ( + self.my * point[0] + - self.sx * point[1] + - self.my * self.xo + + self.sx * self.yo, + self.mx * point[1] + - self.sy * point[0] + - self.mx * self.yo + + self.sy * self.xo, + ) + ) + * self.determinant + ) return trans_point def distortFargs(self, u, v): """ - Short Summary - ------------- Implement the (u,v) to (u',v') change in arguments of F. See class documentation of Bracewell Fourier 2D affine transformation theorem. @@ -224,8 +244,6 @@ def distortFargs(self, u, v): def distortphase(self, u, v): """ - Short Summary - ------------- Calculate the phase term in the Bracewell Fourier 2D affine transformation theorem. The phase term is: @@ -246,15 +264,18 @@ def distortphase(self, u, v): phase: complex array phase term divided by the determinant. """ - phase = np.exp(2 * np.pi * 1j / self.determinant * - (self.phase_2vector[0] * u + self.phase_2vector[1] * v)) + phase = np.exp( + 2 + * np.pi + * 1j + / self.determinant + * (self.phase_2vector[0] * u + self.phase_2vector[1] * v) + ) return phase def get_rotd(self): """ - Short Summary - ------------- Calculate the rotation that was used to creat a pure rotation affine2d object. @@ -276,8 +297,6 @@ def get_rotd(self): def affinepars2header(hdr, affine2d): """ - Short Summary - ------------- Write the affine2d parameters into fits header (will be modified or deleted in later build) @@ -294,22 +313,20 @@ def affinepars2header(hdr, affine2d): fits header, updated with affine2d parameters """ - hdr['affine'] = (affine2d.name, 'Affine2d in pupil: name') - hdr['aff_mx'] = (affine2d.mx, 'Affine2d in pupil: xmag') - hdr['aff_my'] = (affine2d.my, 'Affine2d in pupil: ymag') - hdr['aff_sx'] = (affine2d.sx, 'Affine2d in pupil: xshear') - hdr['aff_sy'] = (affine2d.sx, 'Affine2d in pupil: yshear') - hdr['aff_xo'] = (affine2d.xo, 'Affine2d in pupil: x offset') - hdr['aff_yo'] = (affine2d.yo, 'Affine2d in pupil: y offset') - hdr['aff_dev'] = ('analyticnrm2', 'dev_phasor') + hdr["affine"] = (affine2d.name, "Affine2d in pupil: name") + hdr["aff_mx"] = (affine2d.mx, "Affine2d in pupil: xmag") + hdr["aff_my"] = (affine2d.my, "Affine2d in pupil: ymag") + hdr["aff_sx"] = (affine2d.sx, "Affine2d in pupil: xshear") + hdr["aff_sy"] = (affine2d.sx, "Affine2d in pupil: yshear") + hdr["aff_xo"] = (affine2d.xo, "Affine2d in pupil: x offset") + hdr["aff_yo"] = (affine2d.yo, "Affine2d in pupil: y offset") + hdr["aff_dev"] = ("analyticnrm2", "dev_phasor") return hdr def makedisk(N, R, ctr=(0, 0)): """ - Short Summary - ------------- Calculate a 'disk', an array whose values =1 in a circular region near the center of the array, and =0 elsewhere. @@ -349,8 +366,6 @@ def makedisk(N, R, ctr=(0, 0)): def trim(m, s): """ - Short Summary - ------------- Remove the edge pixels from an index mask m. Parameters @@ -371,8 +386,9 @@ def trim(m, s): # Go through all indices in the mask: # the x & y lists test for any index being an edge index - if none are # on the edge, remember the indices in new list - if (m[0][ii] == 0 or m[1][ii] == 0 or m[0][ii] == s - 1 or - m[1][ii] == s - 1) is False: + if ( + m[0][ii] == 0 or m[1][ii] == 0 or m[0][ii] == s - 1 or m[1][ii] == s - 1 + ) is False: xl.append(m[0][ii]) yl.append(m[1][ii]) m_masked = (np.asarray(xl), np.asarray(yl)) @@ -382,8 +398,6 @@ def trim(m, s): def avoidhexsingularity(rotation): """ - Short Summary - ------------- Avoid rotation of exact multiples of 15 degrees to avoid NaN's in hextransformee() @@ -407,10 +421,8 @@ def avoidhexsingularity(rotation): return rotation_adjusted -def center_imagepeak(img, r='default', cntrimg=True): +def center_imagepeak(img, r="default", cntrimg=True): """ - Short Summary - ------------- Calculate a cropped version of the input image centered on the peak pixel. Parameters @@ -430,21 +442,21 @@ def center_imagepeak(img, r='default', cntrimg=True): Cropped to place the brightest pixel at the center of the img array """ peakx, peaky, h = min_distance_to_edge(img, cntrimg=cntrimg) - log.debug(' peakx=%g, peaky=%g, distance to edge=%g', peakx, peaky, h) - if r == 'default': + log.debug(" peakx=%g, peaky=%g, distance to edge=%g", peakx, peaky, h) + if r == "default": r = h.copy() else: pass - cropped = img[int(peakx - r):int(peakx + r + 1), int(peaky - r):int(peaky + r + 1)] + cropped = img[ + int(peakx - r):int(peakx + r + 1), int(peaky - r):int(peaky + r + 1) + ] return cropped def centerpoint(s): """ - Short Summary - ------------- Calculate center of image, accounting for odd/even pixel size; used for jinc() and hex transform functions. @@ -461,10 +473,8 @@ def centerpoint(s): return (0.5 * s[0] - 0.5, 0.5 * s[1] - 0.5) -def min_distance_to_edge(img, cntrimg=True): +def min_distance_to_edge(img, cntrimg=False): """ - Short Summary - ------------- Calculate the coordinates of the brightest pixel, and the distance between the brightest pixel and the nearest edge of the input array. @@ -506,12 +516,25 @@ def min_distance_to_edge(img, cntrimg=True): def find_centroid(a, thresh): """ - Short Summary - ------------- Calculate the centroid of the image - Long Summary - ------------ + Parameters + ---------- + a: 2D square float + input image array + + thresh: float + Threshold for the absolute value of the FT(a). Normalize abs(CV = FT(a)) + for unity peak, and define the support of good CV when this is above + threshold, then find the phase slope of the CV only over this support. + + Returns + ------- + htilt, vtilt: float, float + Centroid of a, as offset from array center, as calculated by the DFT's. + + Notes + ----- Original domain a, Fourier domain CV sft square image a to CV array, no loss or oversampling - like an fft. Normalize peak of abs(CV) to unity @@ -531,21 +554,6 @@ def find_centroid(a, thresh): center using: image_center = utils.centerpoint(s) + np.array((centroid[1], centroid[0]) and everything holds together sensibly looking at DS9 images of a. - - Parameters - ---------- - a: 2D square float - input image array - - thresh: float - Threshold for the absolute value of the FT(a). Normalize abs(CV = FT(a)) - for unity peak, and define the support of good CV when this is above - threshold, then find the phase slope of the CV only over this support. - - Returns - ------- - htilt, vtilt: float, float - Centroid of a, as offset from array center, as calculated by the DFT's. """ ft = matrix_dft.MatrixFourierTransform() @@ -565,8 +573,6 @@ def find_centroid(a, thresh): def quadratic_extremum(p): """ - Short Summary - ------------- Calculate maximum of the quadratic Parameters @@ -586,8 +592,6 @@ def quadratic_extremum(p): def findpeak_1d(yvec, xvec): """ - Short Summary - ------------- Calculate the fit function extreme for a given input vector Parameters @@ -609,12 +613,23 @@ def findpeak_1d(yvec, xvec): def findslope(a, m): """ - Short Summary - ------------- Find slopes of an array - Long Summary - ------------ + Parameters + ---------- + a: 2D float array + phase slope array + + m: 2D array, integer + mask array + + Returns + ------- + slopes: 2D float array + slopes + + Notes + ----- Find slopes of an array, over pixels not bordering the edge of the array. There should be valid data on either side of every pixel selected by the mask m. a is in radians of phase (in Fourier domain) when used in NRM/KP @@ -637,19 +652,6 @@ def findslope(a, m): a.shape[0 or 1]/(2 pi) Multiply the measured phase slope by this gain for pixels of incoming array centroid shift away from array center. - - Parameters - ---------- - a: 2D float array - phase slope array - - m: 2D array, integer - mask array - - Returns - ------- - slopes: 2D float array - slopes """ a_up = np.zeros(a.shape) a_dn = np.zeros(a.shape) @@ -672,10 +674,14 @@ def findslope(a, m): tilt = (a_r - a_l) / 2.0, (a_up - a_dn) / 2.0 # raw estimate of phase slope c = centerpoint(a.shape) C = (int(c[0]), int(c[1])) - sigh, sigv = tilt[0][C[0] - 1:C[0] + 1, C[1] - 1:C[1] + 1].std(), \ - tilt[1][C[0] - 1:C[0] + 1, C[1] - 1:C[1] + 1].std() - avgh, avgv = tilt[0][C[0] - 1:C[0] + 1, C[1] - 1:C[1] + 1].mean(), \ - tilt[1][C[0] - 1:C[0] + 1, C[1] - 1:C[1] + 1].mean() + sigh, sigv = ( + tilt[0][C[0] - 1:C[0] + 1, C[1] - 1:C[1] + 1].std(), + tilt[1][C[0] - 1:C[0] + 1, C[1] - 1:C[1] + 1].std(), + ) + avgh, avgv = ( + tilt[0][C[0] - 1:C[0] + 1, C[1] - 1:C[1] + 1].mean(), + tilt[1][C[0] - 1:C[0] + 1, C[1] - 1:C[1] + 1].mean(), + ) # second stage mask cleaning: 5 sig rejection of mask newmaskh = np.where(np.abs(tilt[0] - avgh) < 5 * sigh) @@ -694,8 +700,6 @@ def findslope(a, m): def quadratic(p, x): """ - Short Summary - ------------- Calculate value of x at minimum or maximum value of y, (value of quadratic function at argument) @@ -727,12 +731,21 @@ def quadratic(p, x): def makeA(nh): """ - Long Summary - ------------- Writes the 'NRM matrix' that gets pseudo-inverted to provide (arbitrarily constrained) zero-mean phases of the holes. Algorithm is taken verbatim from Anand's pseudoinverse.py + Parameters + ---------- + nh: integer + number of holes in NR mask + + Returns + ------- + matrixA: 2D float array + nh columns, nh(nh-1)/2 rows (eg 21 for nh=7) + Notes + ----- Ax = b where x are the nh hole phases, b the nh(nh-1)/2 fringe phases, and A the NRM matrix @@ -756,19 +769,9 @@ def makeA(nh): When tested against Alex'' nrm_model.py 'piston_phase' text output of fringe phases, these signs appear to be correct - anand@stsci.edu 12 Nov 2014 - - Parameters - ---------- - nh: integer - number of holes in NR mask - - Returns - ------- - matrixA: 2D float array - nh columns, nh(nh-1)/2 rows (eg 21 for nh=7) """ - log.debug('-------') - log.debug(' makeA:') + log.debug("-------") + log.debug(" makeA:") ncols = (nh * (nh - 1)) // 2 nrows = nh @@ -780,22 +783,20 @@ def makeA(nh): if h1 >= nh: break else: - log.debug(' row: %s, h1: %s, h2: %s', row, h1, h2) + log.debug(" row: %s, h1: %s, h2: %s", row, h1, h2) matrixA[row, h2] = -1 matrixA[row, h1] = +1 row += 1 - log.debug('matrixA:') - log.debug(' %s', matrixA) + log.debug("matrixA:") + log.debug(" %s", matrixA) return matrixA def fringes2pistons(fringephases, nholes): """ - Short Summary - ------------- For nrm_model.py to use to extract pistons out of fringes, given its hole bookkeeping, which apparently matches that of this module, and is the same as Noah Gamper's. @@ -821,8 +822,6 @@ def fringes2pistons(fringephases, nholes): def rebin(a=None, rc=(2, 2)): """ - Short Summary - ------------- Perform simple-minded flux-conserving binning using specified binning kernel, clipping trailing size mismatch: eg a 10x3 array binned by 3 results in a 3x1 array @@ -847,8 +846,6 @@ def rebin(a=None, rc=(2, 2)): def krebin(a, shape): """ - Short Summary - ------------- Klaus P's fastrebin from web Parameters @@ -872,8 +869,6 @@ def krebin(a, shape): def rcrosscorrelate(a=None, b=None): """ - Short Summary - ------------- Calculate cross correlation of two identically-shaped real arrays Parameters @@ -896,8 +891,6 @@ def rcrosscorrelate(a=None, b=None): def lambdasteps(lam, frac_width, steps=4): """ - Short Summary - ------------- Create array of increments of lambda Parameters @@ -922,16 +915,15 @@ def lambdasteps(lam, frac_width, steps=4): steps = steps / 2.0 # add some very small number to the end to include the last number. - lambda_array = np.arange(-1 * frac * lam + lam, frac * lam + lam + 10e-10, - frac * lam / steps) + lambda_array = np.arange( + -1 * frac * lam + lam, frac * lam + lam + 10e-10, frac * lam / steps + ) return lambda_array def tophatfilter(lam_c, frac_width, npoints=10): """ - Short Summary - ------------- Create tophat filter list from array of lambda values Parameters @@ -959,8 +951,6 @@ def tophatfilter(lam_c, frac_width, npoints=10): def crosscorrelate(a=None, b=None): """ - Short Summary - ------------- Calculate cross correlation of two identically-shaped real or complex arrays @@ -978,7 +968,7 @@ def crosscorrelate(a=None, b=None): complex array that is the correlation of the two input arrays. """ if a.shape != b.shape: - log.critical('crosscorrelate: need identical arrays') + log.critical("crosscorrelate: need identical arrays") return None fac = np.sqrt(a.shape[0] * a.shape[1]) @@ -987,27 +977,25 @@ def crosscorrelate(a=None, b=None): B = fft.fft2(b) / fac c = fft.ifft2(A * B.conj()) * fac * fac - log.debug('----------------') - log.debug(' crosscorrelate:') - log.debug(' a: %s:', a) - log.debug(' A: %s:', A) - log.debug(' b: %s:', b) - log.debug(' B: %s:', B) - log.debug(' c: %s:', c) - log.debug(' a.sum: %s:', a.sum()) - log.debug(' b.sum: %s:', b.sum()) - log.debug(' c.sum: %s:', c.sum()) - log.debug(' a.sum*b.sum: %s:', a.sum() * b.sum()) - log.debug(' c.sum.real: %s:', c.sum().real) - log.debug(' a.sum*b.sum/c.sum.real: %s:', a.sum() * b.sum() / c.sum().real) + log.debug("----------------") + log.debug(" crosscorrelate:") + log.debug(" a: %s:", a) + log.debug(" A: %s:", A) + log.debug(" b: %s:", b) + log.debug(" B: %s:", B) + log.debug(" c: %s:", c) + log.debug(" a.sum: %s:", a.sum()) + log.debug(" b.sum: %s:", b.sum()) + log.debug(" c.sum: %s:", c.sum()) + log.debug(" a.sum*b.sum: %s:", a.sum() * b.sum()) + log.debug(" c.sum.real: %s:", c.sum().real) + log.debug(" a.sum*b.sum/c.sum.real: %s:", a.sum() * b.sum() / c.sum().real) return fft.fftshift(c) def rotate2dccw(vectors, thetarad): """ - Short Summary - ------------- Apply a CCW rotation to the given vectors. For a positive (CCW) rotation: x decreases under slight rotation y increases under slight rotation @@ -1028,8 +1016,9 @@ def rotate2dccw(vectors, thetarad): c, s = (np.cos(thetarad), np.sin(thetarad)) ctrs_rotated = [] for vector in vectors: - ctrs_rotated.append([c * vector[0] - s * vector[1], - s * vector[0] + c * vector[1]]) + ctrs_rotated.append( + [c * vector[0] - s * vector[1], s * vector[0] + c * vector[1]] + ) rot_vectors = np.array(ctrs_rotated) return rot_vectors @@ -1037,8 +1026,6 @@ def rotate2dccw(vectors, thetarad): def findmax(mag, vals, mid=1.0): """ - Short Summary - ------------- Fit a quadratic to the given input arrays mag and vals, and calculate the value of mag at the extreme value of vals. @@ -1062,7 +1049,7 @@ def findmax(mag, vals, mid=1.0): value of vals corresponding to maxx """ p = np.polyfit(mag, vals, 2) - fitr = np.arange(0.95 * mid, 1.05 * mid, .01) + fitr = np.arange(0.95 * mid, 1.05 * mid, 0.01) maxx, maxy, fitc = quadratic(p, fitr) return maxx, maxy @@ -1070,8 +1057,6 @@ def findmax(mag, vals, mid=1.0): def pix_median_fill_value(input_array, input_dq_array, bsize, xc, yc): """ - Short Summary - ------------- For the pixel specified by (xc, yc), calculate the median value of the good values within the box of size bsize neighboring pixels. If any of the box is outside the data, 0 will be returned. @@ -1100,16 +1085,15 @@ def pix_median_fill_value(input_array, input_dq_array, bsize, xc, yc): # Extract the region of interest for the data try: - data_array = input_array[yc - hbox:yc + hbox + 1, xc - hbox: xc + hbox + 1] - dq_array = input_dq_array[yc - hbox:yc + hbox + 1, xc - hbox: xc + hbox + 1] + data_array = input_array[yc - hbox:yc + hbox + 1, xc - hbox:xc + hbox + 1] + dq_array = input_dq_array[yc - hbox:yc + hbox + 1, xc - hbox:xc + hbox + 1] except IndexError: # If the box is outside the data, return 0 - log.warning('Box for median filter is outside the data') - return 0. + log.warning("Box for median filter is outside the data") + return 0.0 # only keep pixels not flagged with DO_NOT_USE - wh_good = np.where((np.bitwise_and(dq_array, dqflags.pixel['DO_NOT_USE']) - == 0)) + wh_good = np.where((np.bitwise_and(dq_array, dqflags.pixel["DO_NOT_USE"]) == 0)) filtered_array = data_array[wh_good] # compute the median, excluding NaN's @@ -1117,16 +1101,14 @@ def pix_median_fill_value(input_array, input_dq_array, bsize, xc, yc): # check for bad result if np.isnan(median_value): - log.warning('Median filter returned NaN; setting value to 0.') - median_value = 0. + log.warning("Median filter returned NaN; setting value to 0.") + median_value = 0.0 return median_value def mas2rad(mas): """ - Short Summary - ------------- Convert angle in milli arc-sec to radians Parameters @@ -1139,14 +1121,12 @@ def mas2rad(mas): rad: float angle in radians """ - rad = mas * (10**(-3)) / (3600 * 180 / np.pi) + rad = mas * (10 ** (-3)) / (3600 * 180 / np.pi) return rad def img_median_replace(img_model, box_size): """ - Short Summary - ------------- Replace bad pixels (either due to a DQ value of DO_NOT_USE or having a value of NaN) with the median value of surrounding good pixels. @@ -1166,24 +1146,278 @@ def img_median_replace(img_model, box_size): input_dq = img_model.dq num_nan = np.count_nonzero(np.isnan(input_data)) - num_dq_bad = np.count_nonzero(input_dq == dqflags.pixel['DO_NOT_USE']) + num_dq_bad = np.count_nonzero(input_dq == dqflags.pixel["DO_NOT_USE"]) # check to see if any of the pixels are bad - if (num_nan + num_dq_bad > 0): + if num_nan + num_dq_bad > 0: - log.info(f'Applying median filter for {num_nan} NaN and {num_dq_bad} DO_NOT_USE pixels') - bad_locations = np.where(np.isnan(input_data) | - np.equal(input_dq, - dqflags.pixel['DO_NOT_USE'])) + log.info( + f"Applying median filter for {num_nan} NaN and {num_dq_bad} DO_NOT_USE pixels" + ) + bad_locations = np.where( + np.isnan(input_data) | np.equal(input_dq, dqflags.pixel["DO_NOT_USE"]) + ) # fill the bad pixel values with the median of the data in a box region for i_pos in range(len(bad_locations[0])): y_box_pos = bad_locations[0][i_pos] x_box_pos = bad_locations[1][i_pos] - median_fill = pix_median_fill_value(input_data, input_dq, - box_size, x_box_pos, y_box_pos) + median_fill = pix_median_fill_value( + input_data, input_dq, box_size, x_box_pos, y_box_pos + ) input_data[y_box_pos, x_box_pos] = median_fill img_model.data = input_data return img_model + + +def get_filt_spec(throughput_model): + """ + Load filter throughput data into synphot spectrum object + + Parameters + ---------- + throughput_model: ThroughputModel + Datamodel containing normalized fractional throughput + data for one of the four AMI filters + + Returns + ------- + band: synphot Spectrum object + """ + thruput = throughput_model.filter_table + wl_list = np.asarray([tup[0] for tup in thruput]) # angstroms + tr_list = np.asarray([tup[1] for tup in thruput]) + band = synphot.spectrum.SpectralElement( + synphot.models.Empirical1D, points=wl_list, lookup_table=tr_list, keep_neg=False + ) + return band + +def get_flat_spec(): + """ + Produce a synphot spectrum object with constant (unity) flux + + Returns + ------- + flatspec: synphot Spectrum object + """ + flatspec = synphot.SourceSpectrum(synphot.models.ConstFlux1D, amplitude=1) + + return flatspec + + +def combine_src_filt(bandpass, srcspec, trim=0.01, nlambda=19): + """ + Get the observed spectrum through a filter. + Largely copied from Poppy instrument.py + Define nlambda bins of wavelengths, calculate effstim for each, normalize by effstim total. + nlambda should be calculated so there are ~10 wavelengths per resolution element (19 should work) + + Parameters + ---------- + bandpass: synphot Spectrum + filter bandpass (from get_filt_spec) + srcspec: synphot Spectrum + source spectrum (from get_src_spec) + trim: float, None + if not None, trim bandpass to where throughput greater than trim + nlambda: integer + number of wavelengths across filter to return + + Returns + ------- + finalsrc: numpy array + Array of shape (nlambda,2) containing wavelengths, final throughputs + + """ + + wl_filt, th_filt = bandpass._get_arrays(bandpass.waveset) + + if trim: + log.debug("Trimming bandpass to above %.1e throughput" % trim) + goodthru = np.where(np.asarray(th_filt) > trim) + low_idx, high_idx = goodthru[0][0], goodthru[0][-1] + wl_filt, th_filt = wl_filt[low_idx:high_idx], th_filt[low_idx:high_idx] + ptsin = len(wl_filt) + if nlambda is None: + nlambda = ptsin # Don't bin throughput + # get effstim for bins of wavelengths + minwave, maxwave = wl_filt.min(), wl_filt.max() # trimmed or not + wave_bin_edges = np.linspace(minwave, maxwave, nlambda + 1) + wavesteps = (wave_bin_edges[:-1] + wave_bin_edges[1:]) / 2 + deltawave = wave_bin_edges[1] - wave_bin_edges[0] + area = 1 * (u.m * u.m) + effstims = [] + + binfac = ptsin // nlambda + log.debug(("Binning spectrum by %i: from %i points to %i points" % (binfac, ptsin, nlambda))) + for wave in wavesteps: + log.debug(f"\t Integrating across band centered at {wave.to(u.micron):.2f} " + f"with width {deltawave.to(u.micron):.2f}") + box = synphot.spectrum.SpectralElement(synphot.models.Box1D, amplitude=1, x_0=wave, + width=deltawave) * bandpass + + binset = np.linspace(wave - deltawave, wave + deltawave, 30) + binset = binset[binset >= 0] # remove any negative values + result = synphot.observation.Observation(srcspec, box, binset=binset).effstim( + "count", area=area + ) + effstims.append(result) + + effstims = u.Quantity(effstims) + effstims /= effstims.sum() # Normalized count rate (total=1) is unitless + wave_m = wavesteps.to_value(u.m) # convert to meters + effstims = effstims.to_value() # strip units + + finalsrc = np.array( + (effstims, wave_m) + ).T # this is the order expected by InstrumentData + + return finalsrc + + +def get_cw_beta(bandpass): + """ + Convert input bandpass array into format expected by code: + Weighted mean wavelength of each wavelength bin, fractional bandpass + + Parameters: + ----------- + bandpass: array + Array of weights, wavelengths + Returns: + -------- + bandpass: array + Weighted mean wavelength in meters, fractional bandpass + """ + wt = bandpass[:, 0] + wl = bandpass[:, 1] + cw = ( + wl * wt + ).sum() / wt.sum() # Weighted mean wavelength in meters "central wavelength" + area = simpson(wt, x=wl) + ew = area / wt.max() # equivalent width + beta = ew / cw # fractional bandpass + return cw, beta + +def handle_bandpass(bandpass, throughput_model): + """ + Logic for determining what to do with the input bandpass. + If user-provided, return in format expected by code. If none, + fetch filter throughput and combine with flat spectrum to + produce appropriate array. + + Parameters: + ----------- + bandpass: Synphot spectrum or array, or None + User-defined bandpass to override filter/source + throughput_model: ThroughputModel + Datamodel containing filter throughput info. + Will not be used if bandpass is not None. + + Returns: + -------- + bandpass: array + Array of weights, wavelengths used to generate model + """ + if bandpass is not None: + log.info( + "User-defined bandpass provided: OVERWRITING ALL NIRISS-SPECIFIC FILTER/BANDPASS VARIABLES" + ) + # bandpass can be user-defined synphot object or appropriate array + if isinstance(bandpass, synphot.spectrum.SpectralElement): + log.info("User-defined synphot spectrum provided") + wl, wt = bandpass._get_arrays(bandpass.waveset) + bandpass = np.array((wt, wl)).T + else: + log.info("User-defined bandpass array provided") + bandpass = np.array(bandpass) + + else: + # Default behavior: get the filter and source spectrum + log.info(f'Reading throughput model data for {throughput_model.meta.instrument.filter}.') + filt_spec = get_filt_spec(throughput_model) + log.info('Using flat spectrum model.') + flat_spec = get_flat_spec() + nspecbin = 19 # how many wavelngth bins used across bandpass -- affects runtime + bandpass = combine_src_filt( + filt_spec, + flat_spec, + trim=0.01, + nlambda=nspecbin, + ) + + return bandpass + + +def _cdmatrix_to_sky(vec, cd11, cd12, cd21, cd22): + """ + Convert the CD matrix into RA, Dec pixel scale. + + Parameters: + ---------- + vec: array + 2d, units of pixels + cd11: float + Linear transform matrix element (axis 1 w.r.t x) + cd12: float + Linear transform matrix element (axis 1 w.r.t y) + cd21: float + Linear transform matrix element (axis 2 w.r.t x) + cd22: float + Linear transform matrix element (axis 2 w.r.t y) + + Returns: + -------- + Array containing x pixel scale vector, y pixel scale vector + + Notes: + ------ + Use the global header values explicitly, for clarity. + CD inputs are 4 scalars, conceptually 2x2 array in units degrees/pixel + + """ + return np.array((cd11 * vec[0] + cd12 * vec[1], cd21 * vec[0] + cd22 * vec[1])) + + +def degrees_per_pixel(datamodel): + """ + Get pixel scale info from data model. + If it fails to find the right keywords, use 0.0656 as/pixel + + Parameters: + ---------- + datamodel: datamodel object + + Returns: + -------- + pixel scale in degrees/pixel + """ + wcsinfo = datamodel.meta.wcsinfo._instance + if ( + "cd1_1" in wcsinfo + and "cd1_2" in wcsinfo + and "cd2_1" in wcsinfo + and "cd2_2" in wcsinfo + ): + cd11 = datamodel.meta.wcsinfo.cd1_1 + cd12 = datamodel.meta.wcsinfo.cd1_2 + cd21 = datamodel.meta.wcsinfo.cd2_1 + cd22 = datamodel.meta.wcsinfo.cd2_2 + # Create unit vectors in detector pixel X and Y directions, units: detector pixels + dxpix = np.array((1.0, 0.0)) # axis 1 step + dypix = np.array((0.0, 1.0)) # axis 2 step + # transform pixel x and y steps to RA-tan, Dec-tan degrees + dxsky = _cdmatrix_to_sky(dxpix, cd11, cd12, cd21, cd22) + dysky = _cdmatrix_to_sky(dypix, cd11, cd12, cd21, cd22) + log.debug("Used CD matrix for pixel scales") + return np.linalg.norm(dxsky, ord=2), np.linalg.norm(dysky, ord=2) + elif "cdelt1" in wcsinfo and "cdelt2" in wcsinfo: + return datamodel.meta.wcsinfo.cdelt1, datamodel.meta.wcsinfo.cdelt2 + log.debug("Used CDELT[12] for pixel scales") + else: + log.warning( + "WARNING: NIRISS pixel scales not in header. Using 65.6 mas in deg/pix" + ) + return 65.6 / (60.0 * 60.0 * 1000), 65.6 / (60.0 * 60.0 * 1000) diff --git a/jwst/ami/webb_psf.py b/jwst/ami/webb_psf.py deleted file mode 100644 index 723294bf23..0000000000 --- a/jwst/ami/webb_psf.py +++ /dev/null @@ -1,81 +0,0 @@ - -import logging -import numpy as np - -from . import utils -from . import nrm_consts - -log = logging.getLogger(__name__) -log.addHandler(logging.NullHandler()) - - -def get_webbpsf_filter(filter_model, specbin=None, trim=False): - """ - Short Summary - ------------- - Load the filter bandpass data using files from WebbPSF - - Parameters - ---------- - filter_model: filter model object - filter throughput reference data - - specbin: integer - factor to bin spectrum down by - - trim: boolean - trims below 0.8 and above 1.2 lambda_c (dg - is this right?) - - Returns - ------- - spec - 2D float array - filter bandpass data: [weight, wavelength_in_microns] - """ - W = 1 # remove confusion - wavelength index - T = 0 # remove confusion - trans index after reading in... - - nwave = len(filter_model.filter_table['wavelength']) - tmp_array = np.zeros((nwave, 2)) - - # input in angst _ANGSTROM = 1.0e-10 - tmp_array[:, W] = filter_model.filter_table['wavelength'] * 1.0e-10 - - # weights (peak unity, unnormalized sum) - tmp_array[:, T] = filter_model.filter_table['throughput'] - - # remove leading and trailing throughput lines with 'flag' array of indices - flag = np.where(tmp_array[:, T] != 0)[0] - spec = tmp_array[flag, :] - - # rebin as desired - fewer wavelengths for debugging quickly - if specbin is not None: - smallshape = spec.shape[0] // specbin - - log.debug(' bin filter data by %d from %d to %d', specbin, - spec.shape[0], smallshape) - - spec = spec[:smallshape * specbin, :] # clip trailing - spec = utils.krebin(spec, (smallshape, 2)) - spec[:, W] = spec[:, W] / float(specbin) # krebin added up waves - spec[:, T] = spec[:, T] / float(specbin) # krebin added up trans too - log.debug(' post-bin shape %s', spec.shape) - - if trim: - log.debug(' TRIMing filter data ...') - wl = spec[:, W].copy() - tr = spec[:, T].copy() - idx = np.where((wl > (1.0 - 0.5 * trim[1]) * trim[0]) - & (wl < (1.0 + 0.5 * trim[1]) * trim[0])) - wl = wl[idx] - tr = tr[idx] - spec = np.zeros((len(idx[0]), 2)) - spec[:, 1] = wl - spec[:, 0] = tr - log.debug(' post-trim shape %s', spec.shape) - - log.debug(' final filter shape %s', spec.shape) - log.debug(' %d filter samples between %.3f and %.3f um', - len(spec[:, 0]), spec[0, W] / nrm_consts.um_, - spec[-1, W] / nrm_consts.um_) - - return spec diff --git a/jwst/associations/lib/rules_level2_base.py b/jwst/associations/lib/rules_level2_base.py index fb25c81447..9243b35537 100644 --- a/jwst/associations/lib/rules_level2_base.py +++ b/jwst/associations/lib/rules_level2_base.py @@ -213,9 +213,9 @@ def make_member(self, item): 'expname': Utility.rename_to_level2a( item['filename'], use_integrations=(self.is_item_coron(item) | - # NIS_AMI currently uses rate files; - # uncomment the next line to switch to rateints - # self.is_item_ami(item) | + # NIS_AMI used to use rate files; + # updated to use rateints + self.is_item_ami(item) | self.is_item_tso(item)), ), 'exptype': self.get_exposure_type(item), diff --git a/jwst/associations/lib/rules_level3_base.py b/jwst/associations/lib/rules_level3_base.py index cd7d8c4ba1..f342e080b3 100644 --- a/jwst/associations/lib/rules_level3_base.py +++ b/jwst/associations/lib/rules_level3_base.py @@ -268,9 +268,9 @@ def make_member(self, item): item['filename'], exp_type=item['exp_type'], use_integrations=(self.is_item_coron(item) | - # NIS_AMI currently uses cal files; - # uncomment the next line to switch to calints - # self.is_item_ami(item) | + # NIS_AMI used to use cal files; + # now switched to calints + self.is_item_ami(item) | self.is_item_tso(item)), member_exptype=exptype ) diff --git a/jwst/lib/suffix.py b/jwst/lib/suffix.py index eae3fcab10..559fbd8b64 100644 --- a/jwst/lib/suffix.py +++ b/jwst/lib/suffix.py @@ -36,7 +36,7 @@ # have to exist. Used by `find_suffixes` to # add to the result it produces. SUFFIXES_TO_ADD = [ - 'ami', 'amiavg', 'aminorm', + 'ami', 'amiavg', 'aminorm', 'ami-oi', 'aminorm-oi', 'blot', 'bsub', 'bsubints', 'c1d', 'cal', 'calints', 'cat', 'crf', 'crfints', 'dark', @@ -52,7 +52,15 @@ # Suffixes that are discovered but should not be considered. # Used by `find_suffixes` to remove undesired values it has found. -SUFFIXES_TO_DISCARD = ['engdblogstep', 'functionwrapper', 'pipeline', 'rscd_step', 'step', 'systemcall'] +SUFFIXES_TO_DISCARD = [ + 'ami_average', + 'engdblogstep', + 'functionwrapper', + 'pipeline', + 'rscd_step', + 'step', + 'systemcall', +] # Calculated suffixes. diff --git a/jwst/pipeline/calwebb_ami3.py b/jwst/pipeline/calwebb_ami3.py index 9d28e59392..ee58f4ac79 100644 --- a/jwst/pipeline/calwebb_ami3.py +++ b/jwst/pipeline/calwebb_ami3.py @@ -6,9 +6,7 @@ # step imports from ..ami import ami_analyze_step -from ..ami import ami_average_step from ..ami import ami_normalize_step -from ..model_blender import blendmeta __all__ = ['Ami3Pipeline'] @@ -22,19 +20,15 @@ class Ami3Pipeline(Pipeline): Ami3Pipeline: Apply all level-3 calibration steps to an association of level-2b AMI exposures. Included steps are: ami_analyze (fringe detection) - ami_average (average results of fringe detection) ami_normalize (normalize results by reference target) """ class_alias = "calwebb_ami3" - spec = """ - save_averages = boolean(default=True) - """ # Define aliases to steps step_defs = {'ami_analyze': ami_analyze_step.AmiAnalyzeStep, - 'ami_average': ami_average_step.AmiAverageStep, + # 'ami_average': ami_average_step.AmiAverageStep, 'ami_normalize': ami_normalize_step.AmiNormalizeStep } @@ -80,15 +74,15 @@ def process(self, input): # Do the LG analysis for this image log.debug('Do LG processing for member %s', input_file) - result = self.ami_analyze(input_file) + result1, result2, result3 = self.ami_analyze(input_file) - # Save the LG analysis results to a file - result.meta.asn.pool_name = asn['asn_pool'] - result.meta.asn.table_name = op.basename(asn.filename) - self.save_model(result, output_file=input_file, suffix='ami', asn_id=asn_id) + # Save the averaged LG analysis results to a file + result1.meta.asn.pool_name = asn['asn_pool'] + result1.meta.asn.table_name = op.basename(asn.filename) + self.save_model(result1, output_file=input_file, suffix='ami-oi', asn_id=asn_id) # Save the result for use as input to ami_average - targ_lg.append(result) + targ_lg.append(result1) # Run ami_analyze on all the psf members psf_lg = [] @@ -96,69 +90,32 @@ def process(self, input): # Do the LG analysis for this image log.debug('Do LG processing for member %s', input_file) - result = self.ami_analyze(input_file) + result1, result2, result3 = self.ami_analyze(input_file) # Save the LG analysis results to a file - result.meta.asn.pool_name = asn['asn_pool'] - result.meta.asn.table_name = op.basename(asn.filename) - self.save_model(result, output_file=input_file, suffix='ami', asn_id=asn_id) + result1.meta.asn.pool_name = asn['asn_pool'] + result1.meta.asn.table_name = op.basename(asn.filename) + self.save_model(result1, output_file=input_file, suffix='psf-ami-oi', asn_id=asn_id) # Save the result for use as input to ami_average - psf_lg.append(result) - - # Average the reference PSF image results - psf_avg = None - if len(psf_files) > 0: - self.log.debug('Calling ami_average for PSF results ...') - psf_avg = self.ami_average(psf_lg) - - # Save the results to a file, if requested - if self.save_averages: - psf_avg.meta.asn.pool_name = asn['asn_pool'] - psf_avg.meta.asn.table_name = op.basename(asn.filename) - - # Perform blending of metadata for all inputs to this - # output file - self.log.info('Blending metadata for averaged psf') - blendmeta.blendmodels(psf_avg, inputs=psf_lg) - self.save_model(psf_avg, suffix='psf-amiavg') - del psf_lg - - # Average the science target image results - if len(targ_files) > 0: - self.log.debug('Calling ami_average for target results ...') - targ_avg = self.ami_average(targ_lg) - - # Save the results to a file, if requested - if self.save_averages: - targ_avg.meta.asn.pool_name = asn['asn_pool'] - targ_avg.meta.asn.table_name = op.basename(asn.filename) - - # Perform blending of metadata for all inputs to this - # output file - self.log.info('Blending metadata for averaged target') - blendmeta.blendmodels(targ_avg, inputs=targ_lg) - self.save_model(targ_avg, suffix='amiavg') - del targ_lg - - # Now that all LGAVG products have been produced, do - # normalization of the target results by the reference - # results, if reference results exist - if psf_avg is not None: - - result = self.ami_normalize(targ_avg, psf_avg) - - # Save the result - result.meta.asn.pool_name = asn['asn_pool'] - result.meta.asn.table_name = op.basename(asn.filename) - - # Perform blending of metadata for all inputs to this output file - self.log.info('Blending metadata for PSF normalized target') - blendmeta.blendmodels(result, inputs=[targ_avg, psf_avg]) - self.save_model(result, suffix='aminorm') - result.close() - del psf_avg - del targ_avg + psf_lg.append(result1) + + # Normalize all target results by matching psf results + # assuming one ref star exposure per targ exposure + if (len(psf_files) > 0) & (len(targ_files) > 0): + for (targ, psf) in zip(targ_lg,psf_lg): + result = self.ami_normalize(targ, psf) + # Save the result + result.meta.asn.pool_name = asn['asn_pool'] + result.meta.asn.table_name = op.basename(asn.filename) + + # Perform blending of metadata for all inputs to this output file + # self.log.info('Blending metadata for PSF normalized target') + self.save_model(result, suffix='aminorm-oi') + result.close() + del psf_lg + del targ_lg + # We're done log.info('... ending calwebb_ami3') diff --git a/jwst/regtest/test_niriss_ami3.py b/jwst/regtest/test_niriss_ami3.py index ce59cfd78c..b7b8630e9c 100644 --- a/jwst/regtest/test_niriss_ami3.py +++ b/jwst/regtest/test_niriss_ami3.py @@ -3,14 +3,12 @@ from jwst.stpipe import Step -from jwst.ami import AmiAnalyzeStep - @pytest.fixture(scope="module") def run_pipeline(rtdata_module): """Run calwebb_ami3 on NIRISS AMI data.""" rtdata = rtdata_module - rtdata.get_asn("niriss/ami/jw01093-c1000_20221110t003218_ami3_002_asn.json") + rtdata.get_asn("niriss/ami/ami3_test_asn.json") # Run the calwebb_ami3 pipeline on the association args = ["calwebb_ami3", rtdata.input] @@ -20,13 +18,12 @@ def run_pipeline(rtdata_module): @pytest.mark.bigdata -@pytest.mark.parametrize("obs", ["001", "004", "006"]) -@pytest.mark.parametrize("exp", ["00001", "00002"]) -def test_niriss_ami3_exp(run_pipeline, obs, exp, fitsdiff_default_kwargs): +@pytest.mark.parametrize("obs, suffix", [("012", "ami-oi"), ("015", "psf-ami-oi")]) +def test_niriss_ami3_exp(run_pipeline, obs, suffix, fitsdiff_default_kwargs): """Check exposure-level results of calwebb_ami3""" rtdata = run_pipeline - output = "jw01093" + obs + "001_03106_" + exp + "_nis_c1000_ami.fits" + output = f"jw01093{obs}001_03102_00001_nis_a3002_{suffix}.fits" rtdata.output = output rtdata.get_truth("truth/test_niriss_ami3/" + output) @@ -36,12 +33,11 @@ def test_niriss_ami3_exp(run_pipeline, obs, exp, fitsdiff_default_kwargs): @pytest.mark.bigdata -@pytest.mark.parametrize("suffix", ["amiavg", "psf-amiavg", "aminorm"]) -def test_niriss_ami3_product(run_pipeline, suffix, fitsdiff_default_kwargs): +def test_niriss_ami3_product(run_pipeline, fitsdiff_default_kwargs): """Check final products of calwebb_ami3""" rtdata = run_pipeline - output = "jw01093-c1000_t001_niriss_f380m-nrm-sub80_" + suffix + ".fits" + output = "jw01093012001_aminorm-oi.fits" rtdata.output = output rtdata.get_truth("truth/test_niriss_ami3/" + output) @@ -49,15 +45,3 @@ def test_niriss_ami3_product(run_pipeline, suffix, fitsdiff_default_kwargs): diff = FITSDiff(rtdata.output, rtdata.truth, **fitsdiff_default_kwargs) assert diff.identical, diff.report() - -@pytest.mark.bigdata -def test_ami_analyze_with_nans(rtdata, fitsdiff_default_kwargs): - """Test the AmiAnalyzeStep using an input image with NaNs""" - data = rtdata.get_data('niriss/ami/jw00042004001_01101_00005_nis_withNAN_cal.fits') - - AmiAnalyzeStep.call(data, save_results=True) - rtdata.output = 'jw00042004001_01101_00005_nis_withNAN_amianalyzestep.fits' - - rtdata.get_truth('truth/test_niriss_ami3/jw00042004001_01101_00005_nis_withNAN_amianalyzestep.fits') - diff = FITSDiff(rtdata.output, rtdata.truth, **fitsdiff_default_kwargs) - assert diff.identical, diff.report() diff --git a/pyproject.toml b/pyproject.toml index 93d98f33d8..2dbcb3965e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -39,6 +39,7 @@ dependencies = [ "stpipe>=0.5.2,<0.6.0", "stsci.image>=2.3.5", "stsci.imagestats>=1.6.3", + "synphot>=1.2", "tweakwcs>=0.8.6", "asdf-astropy>=0.3.0", "wiimatch>=0.2.1",