diff --git a/.azure-pipelines.yml b/.azure-pipelines.yml index 70ac2a67f..3bc3425c2 100644 --- a/.azure-pipelines.yml +++ b/.azure-pipelines.yml @@ -54,7 +54,7 @@ jobs: sudo curl -L -o /usr/local/bin/cmake-easyinstall https://git.io/JvLxY sudo chmod a+x /usr/local/bin/cmake-easyinstall if [ "${WARPX_CI_OPENPMD:-FALSE}" == "TRUE" ]; then - cmake-easyinstall --prefix=/usr/local git+https://github.com/openPMD/openPMD-api.git@0.13.2 \ + cmake-easyinstall --prefix=/usr/local git+https://github.com/openPMD/openPMD-api.git@0.14.2 \ -DopenPMD_USE_PYTHON=OFF -DBUILD_TESTING=OFF -DBUILD_EXAMPLES=OFF -DBUILD_CLI_TOOLS=OFF python -m pip install --upgrade openpmd-api fi diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index 31cf244d6..41cc12285 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -2,6 +2,10 @@ name: 🐧 CUDA on: [push, pull_request] +concurrency: + group: ${{ github.head_ref }}-cuda + cancel-in-progress: true + jobs: # Ref.: # https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/ubuntu18.04/10.1/base/Dockerfile @@ -17,7 +21,7 @@ jobs: run: | .github/workflows/dependencies/nvcc11.sh export CEI_SUDO="sudo" - cmake-easyinstall --prefix=/usr/local git+https://github.com/openPMD/openPMD-api.git@0.13.2 -DopenPMD_USE_PYTHON=OFF -DBUILD_TESTING=OFF -DBUILD_EXAMPLES=OFF -DBUILD_CLI_TOOLS=OFF + cmake-easyinstall --prefix=/usr/local git+https://github.com/openPMD/openPMD-api.git@0.14.2 -DopenPMD_USE_PYTHON=OFF -DBUILD_TESTING=OFF -DBUILD_EXAMPLES=OFF -DBUILD_CLI_TOOLS=OFF - name: build WarpX run: | export PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin:${PATH} diff --git a/.github/workflows/hip.yml b/.github/workflows/hip.yml index 5f5d3923e..ce1dbf24f 100644 --- a/.github/workflows/hip.yml +++ b/.github/workflows/hip.yml @@ -2,6 +2,10 @@ name: 🐧 HIP on: [push, pull_request] +concurrency: + group: ${{ github.head_ref }}-hip + cancel-in-progress: true + jobs: build_hip: name: HIP SP diff --git a/.github/workflows/intel.yml b/.github/workflows/intel.yml index feb928260..8ba2f2543 100644 --- a/.github/workflows/intel.yml +++ b/.github/workflows/intel.yml @@ -2,6 +2,10 @@ name: 🐧 Intel on: [push, pull_request] +concurrency: + group: ${{ github.head_ref }}-intel + cancel-in-progress: true + jobs: # Ref.: https://github.com/rscohn2/oneapi-ci # intel-basekit intel-hpckit are too large in size @@ -26,7 +30,7 @@ jobs: sudo curl -L -o /usr/local/bin/cmake-easyinstall https://git.io/JvLxY sudo chmod a+x /usr/local/bin/cmake-easyinstall export CEI_SUDO="sudo" - CXX=$(which icpc) CC=$(which icc) cmake-easyinstall --prefix=/usr/local git+https://github.com/openPMD/openPMD-api.git@0.13.2 -DopenPMD_USE_PYTHON=OFF -DBUILD_TESTING=OFF -DBUILD_EXAMPLES=OFF -DBUILD_CLI_TOOLS=OFF + CXX=$(which icpc) CC=$(which icc) cmake-easyinstall --prefix=/usr/local git+https://github.com/openPMD/openPMD-api.git@0.14.2 -DopenPMD_USE_PYTHON=OFF -DBUILD_TESTING=OFF -DBUILD_EXAMPLES=OFF -DBUILD_CLI_TOOLS=OFF - name: build WarpX run: | set +e diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index a9e782542..daef221ac 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -2,6 +2,10 @@ name: 🍏 macOS on: [push, pull_request] +concurrency: + group: ${{ github.head_ref }}-macos + cancel-in-progress: true + jobs: build_gcc9: name: AppleClang diff --git a/.github/workflows/source.yml b/.github/workflows/source.yml index 54c7884a5..0ce6072d9 100644 --- a/.github/workflows/source.yml +++ b/.github/workflows/source.yml @@ -8,6 +8,10 @@ name: 📜 Source on: [push, pull_request] +concurrency: + group: ${{ github.head_ref }}-source + cancel-in-progress: true + jobs: style: diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index 88088d224..ab7565efb 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -2,6 +2,10 @@ name: 🐧 OpenMP on: [push, pull_request] +concurrency: + group: ${{ github.head_ref }}-ubuntu + cancel-in-progress: true + jobs: build_cxxminimal: name: GCC Minimal w/o MPI diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 2a8b0cbd4..017ca881d 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -2,6 +2,10 @@ name: 🪟 Windows on: [push, pull_request] +concurrency: + group: ${{ github.head_ref }}-windows + cancel-in-progress: true + jobs: build_win_msvc: name: MSVC C++17 w/o MPI diff --git a/.lgtm.yml b/.lgtm.yml index d2ee57db0..5b290596e 100644 --- a/.lgtm.yml +++ b/.lgtm.yml @@ -1,4 +1,4 @@ -# Copyright 2019 Axel Huebl +# Copyright 2019-2021 Axel Huebl # # This file is part of WarpX. # diff --git a/CMakeLists.txt b/CMakeLists.txt index 40f1a714a..1c877d373 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -56,10 +56,10 @@ option(WarpX_LIB "Build WarpX as a shared library" OFF) option(WarpX_MPI "Multi-node support (message-passing)" ON) option(WarpX_OPENPMD "openPMD I/O (HDF5, ADIOS)" OFF) option(WarpX_PSATD "spectral solver support" OFF) +option(WarpX_SENSEI "SENSEI in situ diagnostics" OFF) option(WarpX_QED "QED support (requires PICSAR)" ON) option(WarpX_QED_TABLE_GEN "QED table generation (requires PICSAR and Boost)" OFF) option(WarpX_MAG_LLG "LLG for magnetization modeling" ON) -# TODO: sensei, legacy hdf5? set(WarpX_DIMS_VALUES 2 3 RZ) set(WarpX_DIMS 3 CACHE STRING "Simulation dimensionality (2/3/RZ)") diff --git a/Docs/Doxyfile b/Docs/Doxyfile index 0bd23fc05..42df37ee6 100644 --- a/Docs/Doxyfile +++ b/Docs/Doxyfile @@ -792,7 +792,7 @@ WARN_LOGFILE = INPUT = ../Source/ ../Tools/ ../Regression/Checksum/ RECURSIVE = YES - + # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses # libiconv (or the iconv built into libc) for the transcoding. See the libiconv @@ -2047,24 +2047,24 @@ INCLUDE_FILE_PATTERNS = # recursively expanded use the := operator instead of the = operator. # This tag requires that the tag ENABLE_PREPROCESSING is set to YES. -PREDEFINED = AMREX_Linux=1 \ - AMREX_PARTICLES=1 \ - AMREX_USE_MPI=1 \ - AMREX_USE_OMP=1 \ - AMREX_SPACEDIM=3 \ - AMREX_TINY_PROFILING=1 \ - BL_Linux=1 \ - BL_USE_MPI=1 \ - BL_USE_OMP=1 \ - BL_USE_SENSEI_INSITU=1 \ - WARPX=1 \ - WARPX_DIM_RZ=1 \ - WARPX_DIM_XZ=1 \ - WARPX_USE_GPU=1 \ - WARPX_USE_OPENPMD=1 \ - WARPX_USE_PSATD=1 \ - WARPX_QED=1 \ - WARPX_QED_TABLE_GEN=1 \ +PREDEFINED = AMREX_Linux=1 \ + AMREX_PARTICLES=1 \ + AMREX_USE_MPI=1 \ + AMREX_USE_OMP=1 \ + AMREX_SPACEDIM=3 \ + AMREX_TINY_PROFILING=1 \ + BL_Linux=1 \ + BL_USE_MPI=1 \ + BL_USE_OMP=1 \ + AMREX_USE_SENSEI_INSITU=1 \ + WARPX=1 \ + WARPX_DIM_RZ=1 \ + WARPX_DIM_XZ=1 \ + WARPX_USE_GPU=1 \ + WARPX_USE_OPENPMD=1 \ + WARPX_USE_PSATD=1 \ + WARPX_QED=1 \ + WARPX_QED_TABLE_GEN=1 \ # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this diff --git a/Docs/source/dataanalysis/inputs.2d b/Docs/source/dataanalysis/inputs.2d index 389766f13..4c5fe7efb 100644 --- a/Docs/source/dataanalysis/inputs.2d +++ b/Docs/source/dataanalysis/inputs.2d @@ -13,7 +13,8 @@ insitu.int = 2 insitu.start = 0 insitu.config = ez2d.xml geometry.coord_sys = 0 # 0: Cartesian -geometry.is_periodic = 0 0 # Is periodic? +boundary.field_lo = pml pml +boundary.field_hi = pml pml geometry.prob_lo = -125.e-6 -149.e-6 geometry.prob_hi = 125.e-6 1.e-6 warpx.fine_tag_lo = -12.e-6 -110.e-6 @@ -29,7 +30,6 @@ algo.particle_pusher = 0 algo.particle_shape = 3 algo.maxwell_solver = ckc warpx.use_filter = 1 -warpx.do_pml = 1 warpx.pml_ncell = 10 warpx.cfl = .99 warpx.do_moving_window = 1 diff --git a/Docs/source/dataanalysis/paraview.rst b/Docs/source/dataanalysis/paraview.rst index 2d0b22678..3f7b41da2 100644 --- a/Docs/source/dataanalysis/paraview.rst +++ b/Docs/source/dataanalysis/paraview.rst @@ -19,7 +19,7 @@ openPMD ------- WarpX' openPMD files can be visualized with ParaView 5.9+. -ParaView supports ADIOS1, ADIOS2 and HDF5 files, as it implements (liked WarpX) against `openPMD-api `__. +ParaView supports ADIOS1, ADIOS2 and HDF5 files, as it implements (like WarpX) against `openPMD-api `__. For openPMD output, WarpX automatically creates an ``.pmd`` file per diagnostics, which can be opened with ParaView. diff --git a/Docs/source/developers/dimensionality.rst b/Docs/source/developers/dimensionality.rst index 607a62dcc..ee98368d5 100644 --- a/Docs/source/developers/dimensionality.rst +++ b/Docs/source/developers/dimensionality.rst @@ -50,3 +50,8 @@ extra SoA attribute ``theta`` ==================== =========== =========== =========== Please see the following sections for particle AoS and SoA details. + +Conventions +----------- + +In 2D3V, we assume that the position of a particle in ``y`` is equal to ``0``. diff --git a/Docs/source/developers/gnumake/openpmd.rst b/Docs/source/developers/gnumake/openpmd.rst index 68b150071..4721fb570 100644 --- a/Docs/source/developers/gnumake/openpmd.rst +++ b/Docs/source/developers/gnumake/openpmd.rst @@ -9,7 +9,7 @@ therefore we recommend to use `spack `__ in order to facilitate the installation. More specifically, we recommend that you try installing the -`openPMD-api library 0.12.0a or newer `_ +`openPMD-api library 0.14.2 or newer `_ using spack (first section below). If this fails, a back-up solution is to install parallel HDF5 with spack, and then install the openPMD-api library from source. @@ -89,7 +89,7 @@ If optional dependencies are installed in non-system paths, one needs to `hint t # optional: only if you manually installed HDF5 and/or ADIOS2 in custom directories export HDF5_ROOT=$HOME/path_to_installed_software/hdf5-1.12.0/ - export ADIOS2_ROOT=$HOME/path_to_installed_software/adios2-2.6.0/ + export ADIOS2_ROOT=$HOME/path_to_installed_software/adios2-2.7.1/ Then, in the ``$HOME/warpx_directory/``, download and build openPMD-api: diff --git a/Docs/source/glossary.rst b/Docs/source/glossary.rst index cd13769fb..387d84b24 100644 --- a/Docs/source/glossary.rst +++ b/Docs/source/glossary.rst @@ -29,13 +29,14 @@ Abbreviations * **NERSC:** `National Energy Research Scientific Computing Center `__, a supercomputing center located in Berkeley, CA (USA) * **NSF:** the `National Science Foundation `__, a large public agency in the United States of America, supporting research and education * **OLCF:** `Oak Ridge Leadership Computing Facility `__, a supercomputing center located in Oak Ridge, TN (USA) +* **PDE:** `partial differential equation `__, an equation which imposes relations between the various partial derivatives of a multivariable function * **PIC:** :ref:`particle-in-cell `, the method implemented in WarpX +* **PR:** github pull request * **PSATD:** pseudo-spectral analytical time-domain method, a spectral field solver with better numerical properties than FDTD solvers * **PWFA:** plasma-wakefield acceleration -* **PDE:** `partial differential equation `__, an equation which imposes relations between the various partial derivatives of a multivariable function -* **RZ:** for the coordinate system ``r-z`` in cylindrical geometry; we use "RZ" when we refer to quasi-cylindrical geometry, decomposed in azimuthal modes (see details `here `__) * **QED:** `quantum electrodynamics `__ * **RPA:** radiation-pressure acceleration (of protons/ions), e.g. hole-boring (HB) or light-sail (LS) acceleration +* **RZ:** for the coordinate system ``r-z`` in cylindrical geometry; we use "RZ" when we refer to quasi-cylindrical geometry, decomposed in azimuthal modes (see details `here `__) * **TNSA:** target-normal sheet acceleration (of protons/ions) Terms diff --git a/Docs/source/install/cmake.rst b/Docs/source/install/cmake.rst index c1c8f53be..0837085e8 100644 --- a/Docs/source/install/cmake.rst +++ b/Docs/source/install/cmake.rst @@ -92,6 +92,7 @@ CMake Option Default & Values Descr ``WarpX_QED_TABLE_GEN`` ON/**OFF** QED table generation support (requires PICSAR and Boost) ``WarpX_MAG_LLG`` ON/**OFF** LLG module for modeling spin for magnetized materials if set to ``ON`` ============================= ============================================ ======================================================== +``WarpX_SENSEI`` ON/**OFF** SENSEI in situ visualization ============================= ============================================ ========================================================= WarpX can be configured in further detail with options from AMReX, which are `documented in the AMReX manual `_. @@ -108,7 +109,7 @@ CMake Option Default & Values Des ``WarpX_amrex_internal`` **ON**/OFF Needs a pre-installed AMReX library if set to ``OFF`` ``WarpX_openpmd_src`` *None* Path to openPMD-api source directory (preferred if set) ``WarpX_openpmd_repo`` ``https://github.com/openPMD/openPMD-api.git`` Repository URI to pull and build openPMD-api from -``WarpX_openpmd_branch`` ``0.13.2`` Repository branch for ``WarpX_openpmd_repo`` +``WarpX_openpmd_branch`` ``0.14.2`` Repository branch for ``WarpX_openpmd_repo`` ``WarpX_openpmd_internal`` **ON**/OFF Needs a pre-installed openPMD-api library if set to ``OFF`` ``WarpX_picsar_src`` *None* Path to PICSAR source directory (preferred if set) ``WarpX_picsar_repo`` ``https://github.com/ECP-WarpX/picsar.git`` Repository URI to pull and build PICSAR from diff --git a/Docs/source/install/dependencies.rst b/Docs/source/install/dependencies.rst index a80b06581..13d88b147 100644 --- a/Docs/source/install/dependencies.rst +++ b/Docs/source/install/dependencies.rst @@ -20,7 +20,7 @@ Optional dependencies include: - `FFTW3 `_: for spectral solver (PSATD) support - `BLAS++ `_ and `LAPACK++ `_: for spectral solver (PSATD) support in RZ geometry - `Boost 1.66.0+ `__: for QED lookup tables generation support -- `openPMD-api 0.12.0+ `__: we automatically download and compile a copy of openPMD-api for openPMD I/O support +- `openPMD-api 0.14.2+ `__: we automatically download and compile a copy of openPMD-api for openPMD I/O support - see `optional I/O backends `__ - `CCache `__: to speed up rebuilds (needs 3.7.9+ for CUDA) diff --git a/Docs/source/install/hpc/cori.rst b/Docs/source/install/hpc/cori.rst index 9d7d896ec..014a446de 100644 --- a/Docs/source/install/hpc/cori.rst +++ b/Docs/source/install/hpc/cori.rst @@ -3,15 +3,16 @@ Cori (NERSC) ============ -The `Cori cluster `_ is located at NERSC. +The `Cori cluster `_ is located at NERSC. If you are new to this system, please see the following resources: * `GPU nodes `__ -* `Cori user guide `_ -* Batch system: `Slurm `_ -* `Production directories `_: +* `Cori user guide `__ +* Batch system: `Slurm `__ +* `Jupyter service `__ +* `Production directories `__: * ``$SCRATCH``: per-user production directory (20TB) * ``/global/cscratch1/sd/m3239``: shared production directory for users in the project ``m3239`` (50TB) @@ -37,10 +38,16 @@ We use the following modules and environments on the system (``$HOME/knl_warpx.p module swap PrgEnv-intel PrgEnv-gnu module load cmake/3.20.5 module load cray-hdf5-parallel/1.10.5.2 - module load cray-fftw + module load cray-fftw/3.3.8.4 module load cray-python/3.7.3.2 - export CMAKE_PREFIX_PATH=$PWD/adios2-2.7.1-knl-install:$CMAKE_PREFIX_PATH + export PKG_CONFIG_PATH=$FFTW_DIR/pkgconfig:$PKG_CONFIG_PATH + export CMAKE_PREFIX_PATH=$HOME/sw/adios2-2.7.1-knl-install:$CMAKE_PREFIX_PATH + + if [ -d "$HOME/sw/venvs/knl_warpx" ] + then + source $HOME/sw/venvs/knl_warpx/bin/activate + fi export CXXFLAGS="-march=knl" export CFLAGS="-march=knl" @@ -51,9 +58,23 @@ And install ADIOS2: source $HOME/knl_warpx.profile - git clone -b v2.7.1 https://github.com/ornladios/ADIOS2.git adios2 - cmake -S adios2 -B adios2-build -DADIOS2_USE_Fortran=OFF -DADIOS2_USE_Python=OFF -DCMAKE_INSTALL_PREFIX=adios2-2.7.1-knl-install - cmake --build adios2-build --target install --parallel 16 + git clone -b v2.7.1 https://github.com/ornladios/ADIOS2.git src/adios2 + cmake -S src/adios2 -B src/adios2-build -DADIOS2_USE_Fortran=OFF -DADIOS2_USE_Python=OFF -DCMAKE_INSTALL_PREFIX=$HOME/sw/adios2-2.7.1-knl-install + cmake --build src/adios2-build --target install --parallel 16 + +For PICMI and Python workflows, also install a virtual environment: + +.. code-block:: bash + + # establish Python dependencies + python3 -m pip install --user --upgrade pip + python3 -m pip install --user virtualenv + + python3 -m venv $HOME/sw/venvs/knl_warpx + source $HOME/sw/venvs/knl_warpx/bin/activate + + python3 -m pip install --upgrade pip + MPICC="cc -shared" python3 -m pip install -U --no-cache-dir -v mpi4py Haswell ^^^^^^^ @@ -62,12 +83,19 @@ We use the following modules and environments on the system (``$HOME/haswell_war .. code-block:: bash + module swap PrgEnv-intel PrgEnv-gnu module load cmake/3.20.5 module load cray-hdf5-parallel/1.10.5.2 - module load cray-fftw + module load cray-fftw/3.3.8.4 module load cray-python/3.7.3.2 - export CMAKE_PREFIX_PATH=$PWD/adios2-2.7.1-haswell-install:$CMAKE_PREFIX_PATH + export PKG_CONFIG_PATH=$FFTW_DIR/pkgconfig:$PKG_CONFIG_PATH + export CMAKE_PREFIX_PATH=$HOME/sw/adios2-2.7.1-haswell-install:$CMAKE_PREFIX_PATH + + if [ -d "$HOME/sw/venvs/haswell_warpx" ] + then + source $HOME/sw/venvs/haswell_warpx/bin/activate + fi And install ADIOS2: @@ -75,9 +103,23 @@ And install ADIOS2: source $HOME/haswell_warpx.profile - git clone -b v2.7.1 https://github.com/ornladios/ADIOS2.git adios2 - cmake -S adios2 -B adios2-build -DADIOS2_USE_Fortran=OFF -DADIOS2_USE_Python=OFF -DCMAKE_INSTALL_PREFIX=adios2-2.7.1-haswell-install - cmake --build adios2-build --target install --parallel 16 + git clone -b v2.7.1 https://github.com/ornladios/ADIOS2.git src/adios2 + cmake -S src/adios2 -B src/adios2-build -DADIOS2_USE_Fortran=OFF -DADIOS2_USE_Python=OFF -DCMAKE_INSTALL_PREFIX=$HOME/sw/adios2-2.7.1-haswell-install + cmake --build src/adios2-build --target install --parallel 16 + +For PICMI and Python workflows, also install a virtual environment: + +.. code-block:: bash + + # establish Python dependencies + python3 -m pip install --user --upgrade pip + python3 -m pip install --user virtualenv + + python3 -m venv $HOME/sw/venvs/haswell_warpx + source $HOME/sw/venvs/haswell_warpx/bin/activate + + python3 -m pip install --upgrade pip + MPICC="cc -shared" python3 -m pip install -U --no-cache-dir -v mpi4py GPU (V100) ^^^^^^^^^^ @@ -96,7 +138,12 @@ We use the following modules and environments on the system (``$HOME/gpu_warpx.p module load gcc/8.3.0 cuda/11.4.0 cmake/3.20.5 module load openmpi - export CMAKE_PREFIX_PATH=$PWD/adios2-2.7.1-gpu-install:$CMAKE_PREFIX_PATH + export CMAKE_PREFIX_PATH=$HOME/sw/adios2-2.7.1-gpu-install:$CMAKE_PREFIX_PATH + + if [ -d "$HOME/sw/venvs/gpu_warpx" ] + then + source $HOME/sw/venvs/gpu_warpx/bin/activate + fi # compiler environment hints export CC=$(which gcc) @@ -120,10 +167,23 @@ And install ADIOS2: source $HOME/gpu_warpx.profile - git clone -b v2.7.1 https://github.com/ornladios/ADIOS2.git adios2 - cmake -S adios2 -B adios2-build -DADIOS2_USE_Fortran=OFF -DADIOS2_USE_Python=OFF -DCMAKE_INSTALL_PREFIX=adios2-2.7.1-gpu-install - cmake --build adios2-build --target install --parallel 16 + git clone -b v2.7.1 https://github.com/ornladios/ADIOS2.git src/adios2 + cmake -S src/adios2 -B src/adios2-build -DADIOS2_USE_Fortran=OFF -DADIOS2_USE_Python=OFF -DCMAKE_INSTALL_PREFIX=$HOME/sw/adios2-2.7.1-gpu-install + cmake --build src/adios2-build --target install --parallel 16 + +For PICMI and Python workflows, also install a virtual environment: + +.. code-block:: bash + + # establish Python dependencies + python3 -m pip install --user --upgrade pip + python3 -m pip install --user virtualenv + python3 -m venv $HOME/sw/venvs/gpu_warpx + source $HOME/sw/venvs/gpu_warpx/bin/activate + + python3 -m pip install --upgrade pip + python3 -m pip install -U --no-cache-dir -v mpi4py Building WarpX -------------- @@ -142,6 +202,15 @@ If you want to run on either of the three partitions of Cori, open a new termina # GPU: #source $HOME/gpu_warpx.profile +.. warning:: + + Consider that all three Cori partitions are *incompatible*. + + Do not *source* multiple ``...warpx.profile`` files in the same terminal session. + Open a new terminal and log into Cori again, if you want to switch the targeted Cori partition. + + If you re-submit an already compiled simulation that you ran on another day or in another session, *make sure to source* the corresponding ``...warpx.profile`` again after login! + Then, ``cd`` into the directory ``$HOME/src/warpx`` and use the following commands to compile: .. code-block:: bash @@ -153,16 +222,15 @@ Then, ``cd`` into the directory ``$HOME/src/warpx`` and use the following comman cmake -S . -B build -DWarpX_OPENPMD=ON -DWarpX_DIMS=3 cmake --build build -j 16 -The general :ref:`cmake compile-time options and instructions for Python (PICMI) bindings ` apply as usual. +The general :ref:`cmake compile-time options and instructions for Python (PICMI) bindings ` apply as usual: -.. warning:: - - Consider that all three Cori partitions are *incompatible*. +.. code-block:: bash - Do not *source* multiple ``...warpx.profile`` files in the same terminal session. - Open a new terminal and log into Cori again, if you want to switch the targeted Cori partition. + # PICMI build + cd $HOME/src/warpx - If you re-submit an already compiled simulation that you ran on another day or in another session, *make sure to source* the corresponding ``...warpx.profile`` again after login! + # compile parallel PICMI interfaces with openPMD support and 3D, 2D and RZ + WarpX_MPI=ON WarpX_OPENPMD=ON BUILD_PARALLEL=16 python3 -m pip install --force-reinstall -v . .. _running-cpp-cori: @@ -178,6 +246,8 @@ The batch script below can be used to run a WarpX simulation on 2 KNL nodes on the supercomputer Cori at NERSC. Replace descriptions between chevrons ``<>`` by relevant values, for instance ```` could be ``laserWakefield``. +For PICMI Python runs, the ```` has to read ``python3`` and the ```` is the path to your PICMI input script. + .. literalinclude:: ../../../../Tools/BatchScripts/batch_cori.sh :language: bash diff --git a/Docs/source/install/hpc/summit.rst b/Docs/source/install/hpc/summit.rst index abb98a13f..06bbfcc87 100644 --- a/Docs/source/install/hpc/summit.rst +++ b/Docs/source/install/hpc/summit.rst @@ -36,7 +36,7 @@ We use the following modules and environments on the system (``$HOME/warpx.profi # required dependencies module load cmake - module load gcc/6.4.0 + module load gcc/9.3.0 module load cuda # optional: faster re-builds @@ -46,12 +46,11 @@ We use the following modules and environments on the system (``$HOME/warpx.profi module load fftw # optional: for QED lookup table generation support - module load boost/1.66.0 + module load boost/1.76.0 # optional: for openPMD support - module load ums - module load ums-aph114 - module load openpmd-api/0.13.2 + #module load adios2/2.7.1 # currently broken: OLCFHELP-3319 + module load hdf5/1.10.7 # optional: for PSATD in RZ geometry support # note: needs the ums modules above @@ -60,17 +59,15 @@ We use the following modules and environments on the system (``$HOME/warpx.profi # optional: Ascent in situ support # note: build WarpX with CMake - export Ascent_DIR=/gpfs/alpine/world-shared/csc340/software/ascent/current/summit/cuda/gnu/ascent_install + export Ascent_DIR=/gpfs/alpine/world-shared/csc340/software/ascent/current/summit/cuda/gnu/ascent-install # optional: for Python bindings or libEnsemble - module load python/3.7.0 - - # optional: for libEnsemble - module load openblas/0.3.9-omp - module load netlib-lapack/3.8.0 - if [ -d "$HOME/sw/venvs/warpx-libE" ] + module load python/3.8.10 + module load openblas/0.3.15-omp + module load netlib-lapack/3.9.1 + if [ -d "$HOME/sw/venvs/warpx" ] then - source $HOME/sw/venvs/warpx-libE/bin/activate + source $HOME/sw/venvs/warpx/bin/activate fi # optional: just an additional text editor @@ -99,7 +96,7 @@ We recommend to store the above lines in a file, such as ``$HOME/warpx.profile`` source $HOME/warpx.profile -Optionally, download and install :ref:`libEnsemble ` for dynamic ensemble optimizations: +Optionally, download and install Python packages such as :ref:`libEnsemble ` for dynamic ensemble optimizations: .. code-block:: bash @@ -107,11 +104,11 @@ Optionally, download and install :ref:`libEnsemble ` for dynamic en export LAPACK=$OLCF_NETLIB_LAPACK_ROOT/lib64/liblapack.so python3 -m pip install --user --upgrade pip python3 -m pip install --user virtualenv - python3 -m venv $HOME/sw/venvs/warpx-libE - source $HOME/sw/venvs/warpx-libE/bin/activate + python3 -m venv $HOME/sw/venvs/warpx + source $HOME/sw/venvs/warpx/bin/activate python3 -m pip install --upgrade pip python3 -m pip install --upgrade cython - python3 -m pip install --upgrade numpy==1.19.5 + python3 -m pip install --upgrade numpy python3 -m pip install --upgrade scipy python3 -m pip install --upgrade mpi4py --no-binary mpi4py python3 -m pip install --upgrade -r $HOME/src/warpx/Tools/LibEnsemble/requirements.txt diff --git a/Docs/source/usage/examples.rst b/Docs/source/usage/examples.rst index 2602b0247..91cf103f1 100644 --- a/Docs/source/usage/examples.rst +++ b/Docs/source/usage/examples.rst @@ -72,6 +72,15 @@ The Monte-Carlo collision (MCC) model can be used to simulate capacitive dischar * :download:`test case 1 <../../../Examples/Physics_applications/capacitive_discharge/inputs_2d>` +.. note:: + + This example needs `additional calibration data for cross sections `__. + Download this data alongside your inputs file and update the paths in the inputs file: + + .. code-block:: bash + + git clone https://github.com/ECP-WarpX/warpx-data.git + Test cases ---------- diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index c725951e3..6aabd4700 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -4,7 +4,7 @@ Input Parameters ================ .. note:: - :cpp:`amrex::Parser` (see :ref:`running-cpp-parameters-parser`) is used for the right-hand-side of all input parameters that consist of one or more floats, so expressions like ``.density_max = "2.+1."`` and/or using user-defined constants are accepted. See below for more detail. + :cpp:`amrex::Parser` (see :ref:`running-cpp-parameters-parser`) is used for the right-hand-side of all input parameters that consist of one or more integers or floats, so expressions like ``.density_max = "2.+1."`` and/or using user-defined constants are accepted. See below for more detail. .. _running-cpp-parameters-overall: @@ -211,7 +211,7 @@ Domain Boundary Conditions -------------------------- * ``boundary.field_lo`` and ``boundary.field_hi`` (`2 strings` for 2D, `3 strings` for 3D, `pml` by default) - Boundary conditions applied to field at the lower and upper domain boundaries. Depending on the type of boundary condition, the value for ``geometry.is_periodic`` will be set, overriding the user-input for the input parameter, ``geometry.is_periodic``. If not set, the default value for the fields at the domain boundary will be set to pml. + Boundary conditions applied to fields at the lower and upper domain boundaries. Options are: * ``Periodic``: This option can be used to set periodic domain boundaries. Note that if the fields for lo in a certain dimension are set to periodic, then the corresponding upper boundary must also be set to periodic. If particle boundaries are not specified in the input file, then particles boundaries by default will be set to periodic. If particles boundaries are specified, then they must be set to periodic corresponding to the periodic field boundaries. @@ -404,8 +404,10 @@ Math parser and user-defined constants -------------------------------------- WarpX uses AMReX's math parser that reads expressions in the input file. -It can be used in all input parameters that consist of one or more floats. -Note that when multiple floats are expected, the expressions are space delimited. +It can be used in all input parameters that consist of one or more integers or floats. +Integer input expecting boolean, 0 or 1, are not parsed. +Note that when multiple values are expected, the expressions are space delimited. +For integer input values, the expressions are evaluated as real numbers and the final result rounded to the nearest integer. WarpX constants ^^^^^^^^^^^^^^^ @@ -429,7 +431,7 @@ User-defined constants ^^^^^^^^^^^^^^^^^^^^^^ Users can define their own constants in the input file. -These constants can be used for any parameter that consists of one or more floats. +These constants can be used for any parameter that consists of one or more integers or floats. User-defined constant names can contain only letters, numbers and the character ``_``. The name of each constant has to begin with a letter. The following names are used by WarpX, and cannot be used as user-defined constants: ``x``, ``y``, ``z``, ``X``, ``Y``, ``t``. @@ -448,7 +450,7 @@ Besides, for profiles that depend on spatial coordinates (the plasma momentum di The parser reads python-style expressions between double quotes, for instance ``"a0*x**2 * (1-y*1.e2) * (x>0)"`` is a valid expression where ``a0`` is a -user-defined constant (see below) and ``x`` and ``y`` are spatial coordinates. The names are case sensitive. The factor +user-defined constant (see above) and ``x`` and ``y`` are spatial coordinates. The names are case sensitive. The factor ``(x>0)`` is ``1`` where ``x>0`` and ``0`` where ``x<=0``. It allows the user to define functions by intervals. Alternatively the expression above can be written as ``if(x>0, a0*x**2 * (1-y*1.e2), 0)``. @@ -1189,6 +1191,15 @@ Laser initialization value of the `By` component is set to zero. Note that the current implementation of the parser for B-field on particles is applied in cartesian co-ordinates as a function of (x,y,z) even for RZ. + To apply a series of plasma lenses, use the option ``repeated_plasma_lens``. This + option requires the following parameters, in the lab frame, + ``repeated_plasma_lens_period``, the period length of the repeat, a single float number, + ``repeated_plasma_lens_starts``, the start of each lens relative to the period, an array of floats, + ``repeated_plasma_lens_lengths``, the length of each lens, an array of floats, + ``repeated_plasma_lens_strengths_B``, the focusing strength of each lens, an array of floats. + The applied field is uniform longitudinally (along z) with a hard edge, + where residence corrections are used for more accurate field calculation. + The field is of the form :math:`B_x = \mathrm{strength} \cdot y` and :math:`B_y = -\mathrm{strength} \cdot x`, :math`:B_z = 0`. * ``particles.E_ext_particle_init_style`` (string) optional (default is "default") This parameter determines the type of initialization for the external @@ -1208,16 +1219,17 @@ Laser initialization using ``my_constants``. For a two-dimensional simulation, similar to the B-field, it is assumed that the first and second dimensions are `x` and `z`, respectively, and the value of the `Ey` component is set to zero. - The current implementation of the parser for E-field on particles + Note that the current implementation of the parser for E-field on particles is applied in cartesian co-ordinates as a function of (x,y,z) even for RZ. To apply a series of plasma lenses, use the option ``repeated_plasma_lens``. This - option requires the following parameters, + option requires the following parameters, in the lab frame, ``repeated_plasma_lens_period``, the period length of the repeat, a single float number, ``repeated_plasma_lens_starts``, the start of each lens relative to the period, an array of floats, ``repeated_plasma_lens_lengths``, the length of each lens, an array of floats, - ``repeated_plasma_lens_strengths``, the focusing strength of each lens, an array of floats. + ``repeated_plasma_lens_strengths_E``, the focusing strength of each lens, an array of floats. The applied field is uniform longitudinally (along z) with a hard edge, where residence corrections are used for more accurate field calculation. + The field is of the form :math:`E_x = \mathrm{strength} \cdot x` and :math:`E_y = \mathrm{strength} \cdot y`, :math:`Ez = 0`. * ``particles.E_external_particle`` & ``particles.B_external_particle`` (list of `float`) optional (default `0. 0. 0.`) Two separate parameters which add an externally applied uniform E-field or @@ -1921,6 +1933,11 @@ In-situ capabilities can be used by turning on Sensei or Ascent (provided they a Whether to include the guard cells in the output of the raw fields. Only works with ``.format = plotfile``. + * ``.plot_raw_rho`` (`0` or `1`) optional (default `0`) + By default, the charge density written in the plot files is averaged on the cell centers. + When ``.plot_raw_rho = 1``, then the raw (i.e. non-averaged) charge density is also saved in the output files. + Only works with ``.format = plotfile``. + * ``.coarsening_ratio`` (list of `int`) optional (default `1 1 1`) Reduce size of the field output by this ratio in each dimension. (This is done by averaging the field over 1 or 2 points along each direction, depending on the staggering). diff --git a/Examples/Modules/ParticleBoundaryScrape/analysis_scrape.py b/Examples/Modules/ParticleBoundaryScrape/analysis_scrape.py new file mode 100755 index 000000000..b970c4933 --- /dev/null +++ b/Examples/Modules/ParticleBoundaryScrape/analysis_scrape.py @@ -0,0 +1,19 @@ +#! /usr/bin/env python + +import yt + +# This test shoots a beam of electrons at cubic embedded boundary geometry +# At time step 40, none of the particles have hit the boundary yet. At time +# step 60, all of them should have been absorbed by the boundary. In the +# absence of the cube, none of the particles would have had time to exit +# the problem domain yet. + +# all particles are still there +ds40 = yt.load("particle_scrape_plt00040") +np40 = ds40.index.particle_headers['electrons'].num_particles +assert(np40 == 612) + +# all particles have been removed +ds60 = yt.load("particle_scrape_plt00060") +np60 = ds60.index.particle_headers['electrons'].num_particles +assert(np60 == 0) diff --git a/Examples/Modules/ParticleBoundaryScrape/inputs_scrape b/Examples/Modules/ParticleBoundaryScrape/inputs_scrape new file mode 100644 index 000000000..fbc591657 --- /dev/null +++ b/Examples/Modules/ParticleBoundaryScrape/inputs_scrape @@ -0,0 +1,48 @@ +amr.n_cell = 64 64 128 +amr.max_grid_size = 128 +amr.max_level = 0 + +max_step = 60 + +geometry.coord_sys = 0 +geometry.prob_lo = -125.e-6 -125.e-6 -149.e-6 +geometry.prob_hi = 125.e-6 125.e-6 1.e-6 +warpx.cfl = 0.99 + +# Domain Boundary condition +boundary.field_lo = none none none +boundary.field_hi = none none none + +# Use parser to build EB +# Note that for amrex EB implicit function, >0 is covered, =0 is boundary and <0 is regular. +# This sets the region from -12.5e-6 < x < 12.5e-6, -12.5e-6 < y < 12.5e-6, -8.65e-6 < z < -6.15e-6 to be "covered" +warpx.eb_implicit_function = "-max(max(max(x-12.5e-6,-12.5e-6-x), max(y-12.5e-6,-12.5e-6-y)), max(z-(-6.15e-5),-8.65e-5-z))" + +diagnostics.diags_names = diag1 +diag1.intervals = 20 +diag1.diag_type = Full +diag1.fields_to_plot = Ex Ey Ez Bx By Bz + +# Order of particle shape factors +algo.particle_shape = 1 + +################################# +############ PLASMA ############# +################################# +particles.species_names = electrons + +electrons.species_type = electron +electrons.injection_style = "NUniformPerCell" +electrons.num_particles_per_cell_each_dim = 1 1 1 +electrons.xmin = -1.e-5 +electrons.xmax = 1.e-5 +electrons.ymin = -1.e-5 +electrons.ymax = 1.e-5 +electrons.zmin = -149.e-6 +electrons.zmax = -129.e-6 +electrons.profile = constant +electrons.density = 1.e23 # number of electrons per m^3 +electrons.momentum_distribution_type = "constant" +electrons.uz = 2000. # uth the std of the (unitless) momentum +electrons.save_particles_at_xhi = 1 +electrons.save_particles_at_eb = 1 diff --git a/Examples/Physics_applications/plasma_acceleration/PICMI_inputs_plasma_acceleration_mr.py b/Examples/Physics_applications/plasma_acceleration/PICMI_inputs_plasma_acceleration_mr.py index 8ea29032d..4dc144b3e 100644 --- a/Examples/Physics_applications/plasma_acceleration/PICMI_inputs_plasma_acceleration_mr.py +++ b/Examples/Physics_applications/plasma_acceleration/PICMI_inputs_plasma_acceleration_mr.py @@ -35,7 +35,6 @@ hi = [25e-6, 25e-6, 200.e-6]) solver = picmi.ElectromagneticSolver(grid=grid, cfl=1, - warpx_do_pml = True, warpx_pml_ncell = 10) beam_distribution = picmi.UniformDistribution(density = 1.e23, diff --git a/Examples/Tests/Langmuir/PICMI_inputs_langmuir_rz_multimode_analyze.py b/Examples/Tests/Langmuir/PICMI_inputs_langmuir_rz_multimode_analyze.py index 16a3f0575..22ab1679b 100644 --- a/Examples/Tests/Langmuir/PICMI_inputs_langmuir_rz_multimode_analyze.py +++ b/Examples/Tests/Langmuir/PICMI_inputs_langmuir_rz_multimode_analyze.py @@ -91,7 +91,7 @@ moving_window_zvelocity = 0., warpx_max_grid_size=64) -solver = picmi.ElectromagneticSolver(grid=grid, cfl=1., warpx_do_pml=False) +solver = picmi.ElectromagneticSolver(grid=grid, cfl=1.) ########################## # diagnostics diff --git a/Examples/Tests/Langmuir/inputs_3d_multi_rt b/Examples/Tests/Langmuir/inputs_3d_multi_rt index 232bf3d29..39f0c97da 100644 --- a/Examples/Tests/Langmuir/inputs_3d_multi_rt +++ b/Examples/Tests/Langmuir/inputs_3d_multi_rt @@ -2,19 +2,19 @@ max_step = 40 # number of grid points -amr.n_cell = 64 64 64 +amr.n_cell = nx nx nx # Maximum allowable size of each subdomain in the problem domain; # this is used to decompose the domain for parallel calculations. -amr.max_grid_size = 64 +amr.max_grid_size = nx nx nx # Maximum level in hierarchy (for now must be 0, i.e., one level in total) amr.max_level = 0 # Geometry geometry.coord_sys = 0 # 0: Cartesian -geometry.prob_lo = -20.e-6 -20.e-6 -20.e-6 # physical domain -geometry.prob_hi = 20.e-6 20.e-6 20.e-6 +geometry.prob_lo = -lx/2. -lx/2. -lx/2. # physical domain +geometry.prob_hi = lx/2. lx/2. lx/2. # Boundary condition boundary.field_lo = periodic periodic periodic @@ -37,11 +37,14 @@ algo.particle_shape = 1 warpx.cfl = 1.0 # Parameters for the plasma wave +my_constants.lx = 40.e-6 # length of sides +my_constants.dx = 6.25e-07 # grid cell size +my_constants.nx = lx/dx # number of cells in each dimension my_constants.epsilon = 0.01 my_constants.n0 = 2.e24 # electron and positron densities, #/m^3 my_constants.wp = sqrt(2.*n0*q_e**2/(epsilon0*m_e)) # plasma frequency my_constants.kp = wp/clight # plasma wavenumber -my_constants.k = 2.*pi/20.e-6 # perturbation wavenumber +my_constants.k = 2.*2.*pi/lx # perturbation wavenumber # Note: kp is calculated in SI for a density of 4e24 (i.e. 2e24 electrons + 2e24 positrons) # k is calculated so as to have 2 periods within the 40e-6 wide box. diff --git a/Examples/Tests/PEC/analysis_pec.py b/Examples/Tests/PEC/analysis_pec.py index 3257d8e4e..d9d9b84f8 100755 --- a/Examples/Tests/PEC/analysis_pec.py +++ b/Examples/Tests/PEC/analysis_pec.py @@ -20,7 +20,6 @@ import yt yt.funcs.mylog.setLevel(50) import numpy as np -from scipy.constants import e, m_e, epsilon_0, c sys.path.insert(1, '../../../../warpx/Regression/Checksum/') import checksumAPI diff --git a/Examples/Tests/PEC/analysis_pec_mr.py b/Examples/Tests/PEC/analysis_pec_mr.py index 067f2e658..05654ed93 100755 --- a/Examples/Tests/PEC/analysis_pec_mr.py +++ b/Examples/Tests/PEC/analysis_pec_mr.py @@ -20,7 +20,6 @@ import yt yt.funcs.mylog.setLevel(50) import numpy as np -from scipy.constants import e, m_e, epsilon_0, c sys.path.insert(1, '../../../../warpx/Regression/Checksum/') import checksumAPI diff --git a/Examples/Tests/PerformanceTests/automated_test_1_uniform_rest_32ppc b/Examples/Tests/PerformanceTests/automated_test_1_uniform_rest_32ppc new file mode 120000 index 000000000..169abb79f --- /dev/null +++ b/Examples/Tests/PerformanceTests/automated_test_1_uniform_rest_32ppc @@ -0,0 +1 @@ +../../../Tools/PerformanceTests/automated_test_1_uniform_rest_32ppc \ No newline at end of file diff --git a/Examples/Tests/PerformanceTests/automated_test_2_uniform_rest_1ppc b/Examples/Tests/PerformanceTests/automated_test_2_uniform_rest_1ppc new file mode 120000 index 000000000..5f6c6d2ed --- /dev/null +++ b/Examples/Tests/PerformanceTests/automated_test_2_uniform_rest_1ppc @@ -0,0 +1 @@ +../../../Tools/PerformanceTests/automated_test_2_uniform_rest_1ppc \ No newline at end of file diff --git a/Examples/Tests/PerformanceTests/automated_test_3_uniform_drift_4ppc b/Examples/Tests/PerformanceTests/automated_test_3_uniform_drift_4ppc new file mode 120000 index 000000000..0fe17ba03 --- /dev/null +++ b/Examples/Tests/PerformanceTests/automated_test_3_uniform_drift_4ppc @@ -0,0 +1 @@ +../../../Tools/PerformanceTests/automated_test_3_uniform_drift_4ppc \ No newline at end of file diff --git a/Examples/Tests/PerformanceTests/automated_test_4_labdiags_2ppc b/Examples/Tests/PerformanceTests/automated_test_4_labdiags_2ppc new file mode 120000 index 000000000..1e67353c4 --- /dev/null +++ b/Examples/Tests/PerformanceTests/automated_test_4_labdiags_2ppc @@ -0,0 +1 @@ +../../../Tools/PerformanceTests/automated_test_4_labdiags_2ppc \ No newline at end of file diff --git a/Examples/Tests/PerformanceTests/automated_test_5_loadimbalance b/Examples/Tests/PerformanceTests/automated_test_5_loadimbalance new file mode 120000 index 000000000..40734b2fe --- /dev/null +++ b/Examples/Tests/PerformanceTests/automated_test_5_loadimbalance @@ -0,0 +1 @@ +../../../Tools/PerformanceTests/automated_test_5_loadimbalance \ No newline at end of file diff --git a/Examples/Tests/PerformanceTests/automated_test_6_output_2ppc b/Examples/Tests/PerformanceTests/automated_test_6_output_2ppc new file mode 120000 index 000000000..4be041eb9 --- /dev/null +++ b/Examples/Tests/PerformanceTests/automated_test_6_output_2ppc @@ -0,0 +1 @@ +../../../Tools/PerformanceTests/automated_test_6_output_2ppc \ No newline at end of file diff --git a/Examples/Tests/SilverMueller/inputs_2d_x b/Examples/Tests/SilverMueller/inputs_2d_x index bbfa1aad1..5bd4ef1c0 100644 --- a/Examples/Tests/SilverMueller/inputs_2d_x +++ b/Examples/Tests/SilverMueller/inputs_2d_x @@ -15,15 +15,14 @@ amr.max_level = 0 geometry.coord_sys = 0 geometry.prob_lo = -20.e-6 -40.e-6 geometry.prob_hi = 20.e-6 40.e-6 -boundary.field_lo = absorbing_silver_mueller absorbing_silver_mueller -boundary.field_hi = absorbing_silver_mueller absorbing_silver_mueller +boundary.field_lo = absorbing_silver_mueller periodic +boundary.field_hi = absorbing_silver_mueller periodic # Verbosity warpx.verbose = 1 # Algorithms warpx.cfl = 1.0 -warpx.do_pml = 0 warpx.use_filter = 0 warpx.do_moving_window = 0 diff --git a/Examples/Tests/SilverMueller/inputs_2d_z b/Examples/Tests/SilverMueller/inputs_2d_z index cf3ff3147..ef6b5d01f 100644 --- a/Examples/Tests/SilverMueller/inputs_2d_z +++ b/Examples/Tests/SilverMueller/inputs_2d_z @@ -24,7 +24,6 @@ warpx.verbose = 1 # Algorithms warpx.cfl = 1.0 -warpx.do_pml = 0 warpx.use_filter = 0 warpx.do_moving_window = 0 diff --git a/Examples/Tests/SilverMueller/inputs_rz_z b/Examples/Tests/SilverMueller/inputs_rz_z index d3308dac7..228fb8a11 100644 --- a/Examples/Tests/SilverMueller/inputs_rz_z +++ b/Examples/Tests/SilverMueller/inputs_rz_z @@ -27,7 +27,6 @@ warpx.n_rz_azimuthal_modes = 2 # Algorithms warpx.cfl = 1.0 -warpx.do_pml = 0 warpx.use_filter = 0 warpx.do_moving_window = 0 diff --git a/Examples/Tests/boundaries/inputs_3d b/Examples/Tests/boundaries/inputs_3d index 749ce51a5..2fba4a6d0 100644 --- a/Examples/Tests/boundaries/inputs_3d +++ b/Examples/Tests/boundaries/inputs_3d @@ -22,8 +22,6 @@ boundary.field_hi = pec pec periodic boundary.particle_lo = reflecting absorbing periodic boundary.particle_hi = reflecting absorbing periodic -warpx.do_pml = 0 - # Algorithms algo.particle_shape = 1 diff --git a/Examples/Tests/galilean/inputs_rz b/Examples/Tests/galilean/inputs_rz index 2704e00fe..92f84a3ab 100644 --- a/Examples/Tests/galilean/inputs_rz +++ b/Examples/Tests/galilean/inputs_rz @@ -36,7 +36,6 @@ particles.species_names = electrons ions warpx.do_nodal = 1 warpx.use_filter = 1 -warpx.do_pml = 0 psatd.nox = 16 psatd.noy = 16 diff --git a/Examples/Tests/multi_J/inputs_rz b/Examples/Tests/multi_J/inputs_rz index f7e360838..588d907fb 100644 --- a/Examples/Tests/multi_J/inputs_rz +++ b/Examples/Tests/multi_J/inputs_rz @@ -23,7 +23,6 @@ algo.particle_shape = 3 # Numerics warpx.do_moving_window = 1 -warpx.do_pml = 0 warpx.moving_window_dir = z warpx.moving_window_v = 1. warpx.n_rz_azimuthal_modes = 1 @@ -39,7 +38,7 @@ warpx.do_divb_cleaning = 1 warpx.do_multi_J = 1 warpx.do_multi_J_n_depositions = 2 psatd.J_linear_in_time = 1 -psatd.do_time_averaging = 0 +psatd.do_time_averaging = 1 # PSATD psatd.update_with_rho = 1 diff --git a/Examples/Tests/photon_pusher/analysis_photon_pusher.py b/Examples/Tests/photon_pusher/analysis_photon_pusher.py index 2347d0bac..83dfb0c6a 100755 --- a/Examples/Tests/photon_pusher/analysis_photon_pusher.py +++ b/Examples/Tests/photon_pusher/analysis_photon_pusher.py @@ -120,10 +120,10 @@ def generate(): f.write("amr.max_grid_size = 8\n") f.write("amr.plot_int = 1\n") f.write("geometry.coord_sys = 0\n") - f.write("geometry.is_periodic = 1 1 1\n") + f.write("boundary.field_lo = periodic periodic periodic\n") + f.write("boundary.field_hi = periodic periodic periodic\n") f.write("geometry.prob_lo = -0.5e-6 -0.5e-6 -0.5e-6\n") f.write("geometry.prob_hi = 0.5e-6 0.5e-6 0.5e-6\n") - f.write("warpx.do_pml = 0\n") f.write("algo.charge_deposition = standard\n") f.write("algo.field_gathering = energy-conserving\n") f.write("warpx.cfl = 1.0\n") diff --git a/Examples/Tests/plasma_lens/analysis.py b/Examples/Tests/plasma_lens/analysis.py index b348a3053..53b80f8d2 100755 --- a/Examples/Tests/plasma_lens/analysis.py +++ b/Examples/Tests/plasma_lens/analysis.py @@ -53,10 +53,13 @@ def applylens(x0, vx0, vz0, lens_length, lens_strength): vx1 = -w*A*np.sin(w*t + phi) return x1, vx1 +vel_z = eval(ds.parameters.get('my_constants.vel_z')) + plasma_lens_period = float(ds.parameters.get('particles.repeated_plasma_lens_period')) plasma_lens_starts = [float(x) for x in ds.parameters.get('particles.repeated_plasma_lens_starts').split()] plasma_lens_lengths = [float(x) for x in ds.parameters.get('particles.repeated_plasma_lens_lengths').split()] -plasma_lens_strengths = [eval(x) for x in ds.parameters.get('particles.repeated_plasma_lens_strengths').split()] +plasma_lens_strengths_E = [eval(x) for x in ds.parameters.get('particles.repeated_plasma_lens_strengths_E').split()] +plasma_lens_strengths_B = [eval(x) for x in ds.parameters.get('particles.repeated_plasma_lens_strengths_B').split()] clight = c @@ -81,8 +84,9 @@ def applylens(x0, vx0, vz0, lens_length, lens_strength): tt = tt + dt xx = xx + dt*ux yy = yy + dt*uy - xx, ux = applylens(xx, ux, uz, plasma_lens_lengths[i], plasma_lens_strengths[i]) - yy, uy = applylens(yy, uy, uz, plasma_lens_lengths[i], plasma_lens_strengths[i]) + lens_strength = plasma_lens_strengths_E[i] + plasma_lens_strengths_B[i]*vel_z + xx, ux = applylens(xx, ux, uz, plasma_lens_lengths[i], lens_strength) + yy, uy = applylens(yy, uy, uz, plasma_lens_lengths[i], lens_strength) dt = plasma_lens_lengths[i]/uz tt = tt + dt zz = z_lens + plasma_lens_lengths[i] @@ -92,10 +96,10 @@ def applylens(x0, vx0, vz0, lens_length, lens_strength): xx = xx + dt0*ux yy = yy + dt1*uy -assert abs(xx - xx_sim) < 0.011, Exception('error in x particle position') -assert abs(yy - yy_sim) < 0.011, Exception('error in y particle position') -assert abs(ux - ux_sim) < 70., Exception('error in x particle velocity') -assert abs(uy - uy_sim) < 70., Exception('error in y particle velocity') +assert abs(np.abs((xx - xx_sim)/xx)) < 0.003, Exception('error in x particle position') +assert abs(np.abs((yy - yy_sim)/yy)) < 0.003, Exception('error in y particle position') +assert abs(np.abs((ux - ux_sim)/ux)) < 5.e-5, Exception('error in x particle velocity') +assert abs(np.abs((uy - uy_sim)/uy)) < 5.e-5, Exception('error in y particle velocity') test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, filename) diff --git a/Examples/Tests/plasma_lens/inputs_3d b/Examples/Tests/plasma_lens/inputs_3d index fa19685a2..67b549eb2 100644 --- a/Examples/Tests/plasma_lens/inputs_3d +++ b/Examples/Tests/plasma_lens/inputs_3d @@ -16,32 +16,35 @@ boundary.field_hi = pec pec pec boundary.particle_lo = absorbing absorbing absorbing boundary.particle_hi = absorbing absorbing absorbing -warpx.do_pml = 0 -warpx.const_dt = 1.e-6 +warpx.const_dt = 1.e-7 warpx.do_electrostatic = labframe # Algorithms algo.particle_shape = 1 +my_constants.vel_z = 0.2e6 + # particles particles.species_names = electrons electrons.charge = -q_e electrons.mass = m_e electrons.injection_style = "MultipleParticles" -electrons.multiple_particles_pos_x = 0.5 0. -electrons.multiple_particles_pos_y = 0. 0.4 +electrons.multiple_particles_pos_x = 0.05 0. +electrons.multiple_particles_pos_y = 0. 0.04 electrons.multiple_particles_pos_z = 0.05 0.05 electrons.multiple_particles_vel_x = 0. 0. electrons.multiple_particles_vel_y = 0. 0. -electrons.multiple_particles_vel_z = 0.02e6/clight 0.02e6/clight +electrons.multiple_particles_vel_z = vel_z/clight vel_z/clight electrons.multiple_particles_weight = 1. 1. particles.E_ext_particle_init_style = repeated_plasma_lens +particles.B_ext_particle_init_style = repeated_plasma_lens particles.repeated_plasma_lens_period = 0.5 particles.repeated_plasma_lens_starts = 0.1 0.11 0.12 0.13 particles.repeated_plasma_lens_lengths = 0.1 0.11 0.12 0.13 -particles.repeated_plasma_lens_strengths = 0.07 0.06 0.06 0.03 +particles.repeated_plasma_lens_strengths_E = 0.06 0.08 0.06 0.02 +particles.repeated_plasma_lens_strengths_B = 0.08/vel_z 0.04/vel_z 0.06/vel_z 0.04/vel_z # Diagnostics diagnostics.diags_names = diag1 diff --git a/Examples/Tests/radiation_reaction/test_const_B_analytical/analysis_classicalRR.py b/Examples/Tests/radiation_reaction/test_const_B_analytical/analysis_classicalRR.py index c008527a9..a09654b53 100755 --- a/Examples/Tests/radiation_reaction/test_const_B_analytical/analysis_classicalRR.py +++ b/Examples/Tests/radiation_reaction/test_const_B_analytical/analysis_classicalRR.py @@ -163,10 +163,10 @@ def generate(): f.write("amr.blocking_factor = 32\n") f.write("amr.max_grid_size = 64\n") f.write("geometry.coord_sys = 0\n") - f.write("geometry.is_periodic = 1 1 1\n") + f.write("boundary.field_lo = periodic periodic periodic\n") + f.write("boundary.field_hi = periodic periodic periodic\n") f.write("geometry.prob_lo = {} {} {}\n".format(-sim_size, -sim_size, -sim_size)) f.write("geometry.prob_hi = {} {} {}\n".format(sim_size, sim_size, sim_size)) - f.write("warpx.do_pml = 0\n") f.write("algo.charge_deposition = standard\n") f.write("algo.field_gathering = energy-conserving\n") f.write("warpx.cfl = 1.0\n") diff --git a/Examples/Tests/radiation_reaction/test_const_B_analytical/inputs_3d b/Examples/Tests/radiation_reaction/test_const_B_analytical/inputs_3d index 65a2eb4b8..5b78921f1 100644 --- a/Examples/Tests/radiation_reaction/test_const_B_analytical/inputs_3d +++ b/Examples/Tests/radiation_reaction/test_const_B_analytical/inputs_3d @@ -10,7 +10,6 @@ amr.max_grid_size = 64 geometry.coord_sys = 0 geometry.prob_lo = -8e-07 -8e-07 -8e-07 geometry.prob_hi = 8e-07 8e-07 8e-07 -warpx.do_pml = 0 algo.charge_deposition = standard algo.field_gathering = energy-conserving warpx.cfl = 1.0 diff --git a/Examples/Tests/restart/analysis_restart.py b/Examples/Tests/restart/analysis_restart.py index 7d5bab405..698ef8a7d 100755 --- a/Examples/Tests/restart/analysis_restart.py +++ b/Examples/Tests/restart/analysis_restart.py @@ -1,6 +1,7 @@ #! /usr/bin/env python import sys +import re import yt import numpy as np sys.path.insert(1, '../../../../warpx/Regression/Checksum/') @@ -9,7 +10,9 @@ tolerance = sys.float_info.epsilon print('tolerance = ', tolerance) -filename = 'restart_plt00010' +filename = sys.argv[1] +psatd = True if re.search('psatd', filename) else False + ds = yt.load( filename ) ad = ds.all_data() xb = ad['beam', 'particle_position_x'].to_ndarray() @@ -17,7 +20,7 @@ zb = ad['beam', 'particle_position_z'].to_ndarray() ze = ad['plasma_e', 'particle_position_z'].to_ndarray() -filename = 'orig_restart_plt00010' +filename = 'orig_restart_psatd_plt00010' if (psatd) else 'orig_restart_plt00010' ds = yt.load( filename ) ad = ds.all_data() xb0 = ad['beam', 'particle_position_x'].to_ndarray() diff --git a/Python/pywarpx/_libwarpx.py b/Python/pywarpx/_libwarpx.py index 77039037c..110de5190 100755 --- a/Python/pywarpx/_libwarpx.py +++ b/Python/pywarpx/_libwarpx.py @@ -74,8 +74,13 @@ def _get_package_root(): try: libwarpx = ctypes.CDLL(os.path.join(_get_package_root(), libname)) -except OSError: - raise Exception('"%s" was not installed. It can be installed by running "make" in the Python directory of WarpX' % libname) +except OSError as e: + value = e.args[0] + if f'{libname}: cannot open shared object file: No such file or directory' in value: + raise Exception(f'"{libname}" was not installed. Installation instructions can be found here https://warpx.readthedocs.io/en/latest/install/users.html') from e + else: + print("Failed to load the libwarpx shared object library") + raise # WarpX can be compiled using either double or float libwarpx.warpx_Real_size.restype = ctypes.c_int @@ -370,7 +375,7 @@ def getCellSize(direction, level=0): # # libwarpx.warpx_ComputePMLFactors(lev, dt) -def add_particles(species_name, x=0., y=0., z=0., ux=0., uy=0., uz=0., +def add_particles(species_name, x=None, y=None, z=None, ux=None, uy=None, uz=None, w=None, unique_particles=True, **kwargs): ''' @@ -382,11 +387,12 @@ def add_particles(species_name, x=0., y=0., z=0., ux=0., uy=0., uz=0., species_name : the species to add the particle to x, y, z : arrays or scalars of the particle positions (default = 0.) ux, uy, uz : arrays or scalars of the particle momenta (default = 0.) + w : array or scalar of particle weights (default = 0.) unique_particles : whether the particles are unique or duplicated on several processes. (default = True) - kwargs : dictionary containing an entry for the particle weights - (with keyword 'w') and all the extra particle attribute - arrays. If an attribute is not given it will be set to 0. + kwargs : dictionary containing an entry for all the extra particle + attribute arrays. If an attribute is not given it will be + set to 0. ''' @@ -397,47 +403,69 @@ def add_particles(species_name, x=0., y=0., z=0., ux=0., uy=0., uz=0., lenux = np.size(ux) lenuy = np.size(uy) lenuz = np.size(uz) - - if (lenx == 0 or leny == 0 or lenz == 0 or lenux == 0 or - lenuy == 0 or lenuz == 0): - return - - maxlen = max(lenx, leny, lenz, lenux, lenuy, lenuz) - assert lenx==maxlen or lenx==1, "Length of x doesn't match len of others" - assert leny==maxlen or leny==1, "Length of y doesn't match len of others" - assert lenz==maxlen or lenz==1, "Length of z doesn't match len of others" - assert lenux==maxlen or lenux==1, "Length of ux doesn't match len of others" - assert lenuy==maxlen or lenuy==1, "Length of uy doesn't match len of others" - assert lenuz==maxlen or lenuz==1, "Length of uz doesn't match len of others" + lenw = np.size(w) + + # --- Find the max length of the parameters supplied + maxlen = 0 + if x is not None: + maxlen = max(maxlen, lenx) + if y is not None: + maxlen = max(maxlen, leny) + if z is not None: + maxlen = max(maxlen, lenz) + if ux is not None: + maxlen = max(maxlen, lenux) + if uy is not None: + maxlen = max(maxlen, lenuy) + if uz is not None: + maxlen = max(maxlen, lenuz) + if w is not None: + maxlen = max(maxlen, lenw) + + # --- Make sure that the lengths of the input parameters are consistent + assert x is None or lenx==maxlen or lenx==1, "Length of x doesn't match len of others" + assert y is None or leny==maxlen or leny==1, "Length of y doesn't match len of others" + assert z is None or lenz==maxlen or lenz==1, "Length of z doesn't match len of others" + assert ux is None or lenux==maxlen or lenux==1, "Length of ux doesn't match len of others" + assert uy is None or lenuy==maxlen or lenuy==1, "Length of uy doesn't match len of others" + assert uz is None or lenuz==maxlen or lenuz==1, "Length of uz doesn't match len of others" + assert w is None or lenw==maxlen or lenw==1, "Length of w doesn't match len of others" for key, val in kwargs.items(): assert np.size(val)==1 or len(val)==maxlen, f"Length of {key} doesn't match len of others" + # --- If the length of the input is zero, then quietly return + # --- This is not an error - it just means that no particles are to be injected. + if maxlen == 0: + return + + # --- Broadcast scalars into appropriate length arrays + # --- If the parameter was not supplied, use the default value if lenx == 1: - x = np.array(x)*np.ones(maxlen) + x = np.full(maxlen, (x or 0.), float) if leny == 1: - y = np.array(y)*np.ones(maxlen) + y = np.full(maxlen, (y or 0.), float) if lenz == 1: - z = np.array(z)*np.ones(maxlen) + z = np.full(maxlen, (z or 0.), float) if lenux == 1: - ux = np.array(ux)*np.ones(maxlen) + ux = np.full(maxlen, (ux or 0.), float) if lenuy == 1: - uy = np.array(uy)*np.ones(maxlen) + uy = np.full(maxlen, (uy or 0.), float) if lenuz == 1: - uz = np.array(uz)*np.ones(maxlen,'d') + uz = np.full(maxlen, (uz or 0.), float) + if lenw == 1: + w = np.full(maxlen, (w or 0.), float) for key, val in kwargs.items(): if np.size(val) == 1: - kwargs[key] = np.array(val)*np.ones(maxlen) + kwargs[key] = np.full(maxlen, val, float) # --- The -3 is because the comps include the velocites nattr = get_nattr_species(species_name) - 3 attr = np.zeros((maxlen, nattr)) + attr[:,0] = w for key, vals in kwargs.items(): - if key == 'w': - attr[:,0] = vals - else: - # --- The -3 is because components 1 to 3 are velocities - attr[:,get_particle_comp_index(species_name, key)-3] = vals + # --- The -3 is because components 1 to 3 are velocities + attr[:,get_particle_comp_index(species_name, key)-3] = vals libwarpx.warpx_addNParticles( ctypes.c_char_p(species_name.encode('utf-8')), x.size, diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index f4d7a34ed..3f483c8b6 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -525,7 +525,6 @@ class ElectromagneticSolver(picmistandard.PICMI_ElectromagneticSolver): def init(self, kw): assert self.method is None or self.method in ['Yee', 'CKC', 'PSATD'], Exception("Only 'Yee', 'CKC', and 'PSATD' are supported") - self.do_pml = kw.pop('warpx_do_pml', None) self.pml_ncell = kw.pop('warpx_pml_ncell', None) if self.method == 'PSATD': @@ -539,7 +538,6 @@ def initialize_inputs(self): self.grid.initialize_inputs() - pywarpx.warpx.do_pml = self.do_pml pywarpx.warpx.pml_ncell = self.pml_ncell pywarpx.warpx.do_nodal = self.l_nodal @@ -1034,6 +1032,11 @@ def initialize_inputs(self): self.diagnostic.openpmd_backend = self.openpmd_backend self.diagnostic.intervals = self.period + if self.write_dir is not None or self.file_prefix is not None: + write_dir = (self.write_dir or 'diags') + file_prefix = (self.file_prefix or self.name) + self.diagnostic.file_prefix = write_dir + '/' + file_prefix + # --- Use a set to ensure that fields don't get repeated. variables = set() @@ -1087,7 +1090,35 @@ def initialize_inputs(self): class LabFrameFieldDiagnostic(picmistandard.PICMI_LabFrameFieldDiagnostic): + """ + Warp specific arguments: + - warpx_new_BTD: Use the new BTD diagnostics + - warpx_format: Passed to .format + - warpx_openpmd_backend: Passed to .openpmd_backend + - warpx_file_prefix: Passed to .file_prefix + - warpx_buffer_size: Passed to .buffer_size + - warpx_lower_bound: Passed to .lower_bound + - warpx_upper_bound: Passed to .upper_bound + """ + __doc__ = picmistandard.PICMI_LabFrameFieldDiagnostic.__doc__ + __doc__ + def init(self, kw): + self.use_new_BTD = kw.pop('warpx_new_BTD', False) + if self.use_new_BTD: + # The user is using the new BTD + self.format = kw.pop('warpx_format', None) + self.openpmd_backend = kw.pop('warpx_openpmd_backend', None) + self.file_prefix = kw.pop('warpx_file_prefix', None) + self.buffer_size = kw.pop('warpx_buffer_size', None) + self.lower_bound = kw.pop('warpx_lower_bound', None) + self.upper_bound = kw.pop('warpx_upper_bound', None) + def initialize_inputs(self): + if self.use_new_BTD: + self.initialize_inputs_new() + else: + self.initialize_inputs_old() + + def initialize_inputs_old(self): pywarpx.warpx.check_consistency('num_snapshots_lab', self.num_snapshots, 'The number of snapshots must be the same in all lab frame diagnostics') pywarpx.warpx.check_consistency('dt_snapshots_lab', self.dt_snapshots, 'The time between snapshots must be the same in all lab frame diagnostics') @@ -1099,6 +1130,65 @@ def initialize_inputs(self): pywarpx.warpx.do_back_transformed_fields = 1 pywarpx.warpx.lab_data_directory = self.write_dir + def initialize_inputs_new(self): + + name = getattr(self, 'name', None) + if name is None: + diagnostics_number = len(pywarpx.diagnostics._diagnostics_dict) + 1 + self.name = 'diag{}'.format(diagnostics_number) + + try: + self.diagnostic = pywarpx.diagnostics._diagnostics_dict[self.name] + except KeyError: + self.diagnostic = pywarpx.Diagnostics.Diagnostic(self.name, _species_dict={}) + pywarpx.diagnostics._diagnostics_dict[self.name] = self.diagnostic + + self.diagnostic.diag_type = 'BackTransformed' + self.diagnostic.format = self.format + self.diagnostic.openpmd_backend = self.openpmd_backend + self.diagnostic.diag_lo = self.lower_bound + self.diagnostic.diag_hi = self.upper_bound + + self.diagnostic.do_back_transformed_fields = 1 + self.diagnostic.num_snapshots_lab = self.num_snapshots + self.diagnostic.dt_snapshots_lab = self.dt_snapshots + self.diagnostic.buffer_size = self.buffer_size + + # --- Use a set to ensure that fields don't get repeated. + fields_to_plot = set() + + for dataname in self.data_list: + if dataname == 'E': + fields_to_plot.add('Ex') + fields_to_plot.add('Ey') + fields_to_plot.add('Ez') + elif dataname == 'B': + fields_to_plot.add('Bx') + fields_to_plot.add('By') + fields_to_plot.add('Bz') + elif dataname == 'J': + fields_to_plot.add('jx') + fields_to_plot.add('jy') + fields_to_plot.add('jz') + elif dataname in ['Ex', 'Ey', 'Ez', 'Bx', 'By', 'Bz', 'rho']: + fields_to_plot.add(dataname) + elif dataname in ['Jx', 'Jy', 'Jz']: + fields_to_plot.add(dataname.lower()) + elif dataname.startswith('rho_'): + # Adds rho_species diagnostic + fields_to_plot.add(dataname) + + # --- Convert the set to a sorted list so that the order + # --- is the same on all processors. + fields_to_plot = list(fields_to_plot) + fields_to_plot.sort() + self.diagnostic.fields_to_plot = fields_to_plot + + if self.write_dir is not None or self.file_prefix is not None: + write_dir = (self.write_dir or 'diags') + file_prefix = (self.file_prefix or self.name) + self.diagnostic.file_prefix = write_dir + '/' + file_prefix + class LabFrameParticleDiagnostic(picmistandard.PICMI_LabFrameParticleDiagnostic): def initialize_inputs(self): diff --git a/Regression/Checksum/benchmarks_json/LaserAccelerationBoost.json b/Regression/Checksum/benchmarks_json/LaserAccelerationBoost.json index 12d6b5224..d1523cec5 100644 --- a/Regression/Checksum/benchmarks_json/LaserAccelerationBoost.json +++ b/Regression/Checksum/benchmarks_json/LaserAccelerationBoost.json @@ -39,6 +39,6 @@ "jx": 5122916237.313671, "jy": 551784296059286.56, "jz": 448986163636.4643, - "rho": 1567.0144888546802 + "rho": 1568.420379535507 } -} \ No newline at end of file +} diff --git a/Regression/Checksum/benchmarks_json/Plasma_lens.json b/Regression/Checksum/benchmarks_json/Plasma_lens.json index 69ad58618..ae0938e25 100644 --- a/Regression/Checksum/benchmarks_json/Plasma_lens.json +++ b/Regression/Checksum/benchmarks_json/Plasma_lens.json @@ -2,20 +2,20 @@ "electrons": { "particle_cpu": 1.0, "particle_id": 2.0, - "particle_momentum_x": 2.2518779586696112e-26, - "particle_momentum_y": 1.8015029214643703e-26, - "particle_momentum_z": 3.6437524496743613e-26, - "particle_position_x": 0.33581536370442583, - "particle_position_y": 0.2686524426674559, - "particle_position_z": 3.979988490885961 + "particle_momentum_x": 1.8497357687843446e-27, + "particle_momentum_y": 1.4797852897079812e-27, + "particle_momentum_z": 3.6436751241117655e-25, + "particle_position_x": 0.03778437270526418, + "particle_position_y": 0.030227636257933975, + "particle_position_z": 3.9799632585096267 }, "lev=0": { "Bx": 0.0, "By": 0.0, "Bz": 0.0, - "Ex": 8.557217761445071e-07, - "Ey": 9.086796476605627e-07, - "Ez": 1.2874219492176113e-06, + "Ex": 9.938634536083956e-07, + "Ey": 9.994968813016262e-07, + "Ez": 1.3536427852197406e-06, "jx": 0.0, "jy": 0.0, "jz": 0.0 diff --git a/Regression/Checksum/benchmarks_json/multi_J_rz_psatd.json b/Regression/Checksum/benchmarks_json/multi_J_rz_psatd.json index 9f3cbd1d1..8d465465f 100644 --- a/Regression/Checksum/benchmarks_json/multi_J_rz_psatd.json +++ b/Regression/Checksum/benchmarks_json/multi_J_rz_psatd.json @@ -6,44 +6,44 @@ "particle_momentum_y": 8.719557136998762e-16, "particle_momentum_z": 5.461841477490683e-13, "particle_position_x": 6.278329546764386, - "particle_position_y": 17.946647860264175, + "particle_position_y": 17.94664786026417, "particle_theta": 1570796.3267948946, "particle_weight": 6241509074.460753 }, "lev=0": { - "Bx": 8.005962037549714e-12, - "By": 24944.639504016905, - "Bz": 4.210132508880100e-12, - "Ex": 4676885383506.242, - "Ey": 2.391716965612074e-03, - "Ez": 4318113804898.98, - "jx": 371830042113583.8, - "jy": 4.116492489660151e-02, - "jz": 1923608653789179.8, - "rho": 5287882.90867836, + "Bx": 7.576533012513204e-12, + "By": 24912.146189783907, + "Bz": 4.005984970626178e-12, + "Ex": 4666716648235.923, + "Ey": 2.123908815458177e-03, + "Ez": 4313874828857.1045, + "jx": 363242056086168.4, + "jy": 6.317201519130083e-02, + "jz": 1930576792579723.2, + "rho": 5286010.854344782, "rho_driver": 6281777.267757402, - "rho_plasma_e": 49566310.561886705, - "rho_plasma_p": 50769179.767392606 + "rho_plasma_e": 49565978.70143689, + "rho_plasma_p": 50769177.37711251 }, "plasma_e": { "particle_cpu": 12650.0, "particle_id": 159086515.0, - "particle_momentum_x": 6.660647123797951e-20, - "particle_momentum_y": 6.741831979985861e-20, - "particle_momentum_z": 2.8535667914189037e-20, - "particle_position_x": 1.1423537080799195, - "particle_position_y": 0.6139665443929749, + "particle_momentum_x": 6.660652528085463e-20, + "particle_momentum_y": 6.741012158179844e-20, + "particle_momentum_z": 2.8600053012310933e-20, + "particle_position_x": 1.1423570256434674, + "particle_position_y": 0.6139580971700734, "particle_theta": 20188.939948727297, "particle_weight": 1002457942911.3788 }, "plasma_p": { "particle_cpu": 12650.0, "particle_id": 163728835.0, - "particle_momentum_x": 6.647195404477396e-20, - "particle_momentum_y": 6.774818723353511e-20, - "particle_momentum_z": 5.606433290348049e-20, - "particle_position_x": 1.1365201481547165, - "particle_position_y": 0.6152067075782452, + "particle_momentum_x": 6.640162861597256e-20, + "particle_momentum_y": 6.7677607837394e-20, + "particle_momentum_z": 5.599488516840493e-20, + "particle_position_x": 1.1365201489632144, + "particle_position_y": 0.6152067089894871, "particle_theta": 20286.92798337582, "particle_weight": 1002457942911.3788 } diff --git a/Regression/Checksum/benchmarks_json/restart_psatd.json b/Regression/Checksum/benchmarks_json/restart_psatd.json new file mode 100644 index 000000000..12eb370d4 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/restart_psatd.json @@ -0,0 +1,68 @@ +{ + "beam": { + "particle_cpu": 0.0, + "particle_id": 1500500.0, + "particle_momentum_x": 4.2541507163023845e-19, + "particle_momentum_y": 4.152774664953137e-19, + "particle_momentum_z": 2.7285024671598035e-17, + "particle_position_x": 0.001517617677859585, + "particle_position_y": 0.0014929352020519265, + "particle_position_z": 1.9017771002859827, + "particle_weight": 3120754537.230381 + }, + "driver": { + "particle_cpu": 0.0, + "particle_id": 500500.0, + "particle_momentum_x": 4.747251762692475e+21, + "particle_momentum_y": 4.957368621566193e+21, + "particle_momentum_z": 2.9967719884827463e+25, + "particle_position_x": 0.0015297460887947526, + "particle_position_y": 0.0016025569807081841, + "particle_position_z": 0.30618320806025484, + "particle_weight": 6241509074.460762 + }, + "driverback": { + "particle_cpu": 0.0, + "particle_id": 2500500.0, + "particle_momentum_x": 4.683153456335865e+21, + "particle_momentum_y": 5.084648482174756e+21, + "particle_momentum_z": 2.999729460061163e+25, + "particle_position_x": 0.0015293780426403722, + "particle_position_y": 0.0016041310861395035, + "particle_position_z": 0.4918072823278685, + "particle_weight": 6241509074.460762 + }, + "lev=0": { + "Bx": 97777.04044826895, + "By": 97787.26556234213, + "Bz": 31.59916720107527, + "Ex": 12868507499248.19, + "Ey": 12867303176821.19, + "Ez": 32449258298200.113, + "jx": 634704212071.4764, + "jy": 611625734809.7017, + "jz": 904274042090495.5 + }, + "plasma_e": { + "particle_cpu": 4116.0, + "particle_id": 22919946.0, + "particle_momentum_x": 5.685882296125418e-22, + "particle_momentum_y": 5.662977916983953e-22, + "particle_momentum_z": 1.1184398018268609e-17, + "particle_position_x": 0.13505924338837194, + "particle_position_y": 0.13505922196934064, + "particle_position_z": 0.2036272092198746, + "particle_weight": 126912632943.36244 + }, + "plasma_p": { + "particle_cpu": 4116.0, + "particle_id": 25328394.0, + "particle_momentum_x": 5.686073146345258e-22, + "particle_momentum_y": 5.663176459735941e-22, + "particle_momentum_z": 2.0535791478847752e-14, + "particle_position_x": 0.1350562483696602, + "particle_position_y": 0.1350562483813245, + "particle_position_z": 0.20362719494726383, + "particle_weight": 126912632943.36244 + } +} \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/silver_mueller_2d_x.json b/Regression/Checksum/benchmarks_json/silver_mueller_2d_x.json index 67b98b57d..83c6576a6 100644 --- a/Regression/Checksum/benchmarks_json/silver_mueller_2d_x.json +++ b/Regression/Checksum/benchmarks_json/silver_mueller_2d_x.json @@ -1,13 +1,13 @@ { "lev=0": { - "Bx": 6.1312424184606115e-09, - "By": 2.679014566360329e-08, - "Bz": 2.6908062711786136e-08, - "Ex": 4.470308886992601, - "Ey": 7.171046771146154, - "Ez": 7.670630188490648, + "Bx": 5.3173130498255016e-09, + "By": 2.7171916142338513e-08, + "Bz": 2.795222039240815e-08, + "Ex": 4.152944711886871, + "Ey": 7.867836700647592, + "Ez": 7.8177914065231064, "jx": 0.0, - "jy": 2.96970387975164e-46, + "jy": 2.9697038995489428e-46, "jz": 0.0 } } \ No newline at end of file diff --git a/Regression/WarpX-GPU-tests.ini b/Regression/WarpX-GPU-tests.ini index 899d3cdfb..18ee1a109 100644 --- a/Regression/WarpX-GPU-tests.ini +++ b/Regression/WarpX-GPU-tests.ini @@ -48,7 +48,7 @@ emailBody = Check https://ccse.lbl.gov/pub/GpuRegressionTesting/WarpX/ for more [AMReX] dir = /home/regtester/git/amrex/ -branch = development +branch = 44edcc104f551b6243984b567ccd6723ac336699 [source] dir = /home/regtester/git/WarpX @@ -56,7 +56,7 @@ branch = development [extra-PICSAR] dir = /home/regtester/git/picsar/ -branch = development +branch = c16b642e3dcf860480dd1dd21cefa3874f395773 # individual problems follow diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini index c3358dcc1..d9966c435 100644 --- a/Regression/WarpX-tests.ini +++ b/Regression/WarpX-tests.ini @@ -48,7 +48,7 @@ emailBody = Check https://ccse.lbl.gov/pub/RegressionTesting/WarpX/ for more det [AMReX] dir = /home/regtester/AMReX_RegTesting/amrex/ -branch = development +branch = 44edcc104f551b6243984b567ccd6723ac336699 [source] dir = /home/regtester/AMReX_RegTesting/warpx @@ -56,7 +56,7 @@ branch = development [extra-PICSAR] dir = /home/regtester/AMReX_RegTesting/picsar/ -branch = development +branch = c16b642e3dcf860480dd1dd21cefa3874f395773 # individual problems follow @@ -909,6 +909,25 @@ particleTypes = beam analysisRoutine = Examples/Tests/restart/analysis_restart.py tolerance = 1.e-14 +[restart_psatd] +buildDir = . +inputFile = Examples/Tests/restart/inputs +runtime_params = algo.maxwell_solver=psatd psatd.use_default_v_galilean=1 particles.use_fdtd_nci_corr=0 chk.file_prefix=restart_psatd_chk +dim = 3 +addToCompileString = USE_PSATD=TRUE +restartTest = 1 +restartFileNum = 5 +useMPI = 1 +numprocs = 2 +useOMP = 1 +numthreads = 1 +compileTest = 0 +doVis = 0 +compareParticles = 0 +particleTypes = beam +analysisRoutine = Examples/Tests/restart/analysis_restart.py +tolerance = 1.e-14 + [space_charge_initialization_2d] buildDir = . inputFile = Examples/Modules/space_charge_initialization/inputs_3d @@ -2295,6 +2314,24 @@ particleTypes = electrons analysisRoutine = Examples/Modules/ParticleBoundaryProcess/analysis_absorption.py tolerance = 1.0e-4 +[particle_scrape] +buildDir = . +inputFile = Examples/Modules/ParticleBoundaryScrape/inputs_scrape +runtime_params = +dim = 3 +addToCompileString = USE_EB=TRUE +restartTest = 0 +useMPI = 1 +numprocs = 2 +useOMP = 0 +numthreads = 0 +compileTest = 0 +doVis = 0 +compareParticles = 1 +particleTypes = electrons +analysisRoutine = Examples/Modules/ParticleBoundaryScrape/analysis_scrape.py +tolerance = 1.0e-4 + [Python_particle_attr_access] buildDir = . inputFile = Examples/Tests/ParticleDataPython/PICMI_inputs_2d.py @@ -2310,3 +2347,111 @@ numthreads = 0 compileTest = 0 doVis = 0 analysisRoutine = Examples/Tests/ParticleDataPython/analysis.py + +[Performance_works_1_uniform_rest_32ppc] +buildDir = . +inputFile = Examples/Tests/PerformanceTests/automated_test_1_uniform_rest_32ppc +runtime_params = amr.n_cell=64 64 64 max_step=10 diagnostics.diags_names=diag1 diag1.intervals=0 diag1.diag_type=Full +dim = 3 +addToCompileString = +restartTest = 0 +useMPI = 1 +numprocs = 2 +useOMP = 0 +numthreads = 0 +compileTest = 0 +doVis = 0 +compareParticles = 1 +particleTypes = electrons ions +analysisRoutine = +tolerance = 1.0e-4 + +[Performance_works_2_uniform_rest_1ppc] +buildDir = . +inputFile = Examples/Tests/PerformanceTests/automated_test_2_uniform_rest_1ppc +runtime_params = amr.n_cell=64 64 64 max_step=10 diagnostics.diags_names=diag1 diag1.intervals=0 diag1.diag_type=Full +dim = 3 +addToCompileString = +restartTest = 0 +useMPI = 1 +numprocs = 2 +useOMP = 0 +numthreads = 0 +compileTest = 0 +doVis = 0 +compareParticles = 1 +particleTypes = electrons +analysisRoutine = +tolerance = 1.0e-4 + +[Performance_works_3_uniform_drift_4ppc] +buildDir = . +inputFile = Examples/Tests/PerformanceTests/automated_test_3_uniform_drift_4ppc +runtime_params = amr.n_cell=64 64 64 max_step=10 diagnostics.diags_names=diag1 diag1.intervals=0 diag1.diag_type=Full +dim = 3 +addToCompileString = +restartTest = 0 +useMPI = 1 +numprocs = 2 +useOMP = 0 +numthreads = 0 +compileTest = 0 +doVis = 0 +compareParticles = 1 +particleTypes = electrons ions +analysisRoutine = +tolerance = 1.0e-4 + +[Performance_works_4_labdiags_2ppc] +buildDir = . +inputFile = Examples/Tests/PerformanceTests/automated_test_4_labdiags_2ppc +runtime_params = amr.n_cell=64 64 64 max_step=10 diagnostics.diags_names=diag1 diag1.intervals=0 diag1.diag_type=Full +dim = 3 +addToCompileString = +restartTest = 0 +useMPI = 1 +numprocs = 2 +useOMP = 0 +numthreads = 0 +compileTest = 0 +doVis = 0 +compareParticles = 1 +particleTypes = electrons ions +analysisRoutine = +tolerance = 1.0e-4 + +[Performance_works_5_loadimbalance] +buildDir = . +inputFile = Examples/Tests/PerformanceTests/automated_test_5_loadimbalance +runtime_params = amr.n_cell=64 64 64 max_step=10 diagnostics.diags_names=diag1 diag1.intervals=0 diag1.diag_type=Full +dim = 3 +addToCompileString = +restartTest = 0 +useMPI = 1 +numprocs = 2 +useOMP = 0 +numthreads = 0 +compileTest = 0 +doVis = 0 +compareParticles = 1 +particleTypes = electrons ions +analysisRoutine = +tolerance = 1.0e-4 + +[Performance_works_6_output_2ppc] +buildDir = . +inputFile = Examples/Tests/PerformanceTests/automated_test_6_output_2ppc +runtime_params = amr.n_cell=64 64 64 max_step=10 +dim = 3 +addToCompileString = +restartTest = 0 +useMPI = 1 +numprocs = 2 +useOMP = 0 +numthreads = 0 +compileTest = 0 +doVis = 0 +compareParticles = 1 +particleTypes = electrons ions +analysisRoutine = +tolerance = 1.0e-4 diff --git a/Source/BoundaryConditions/PML_fwd.H b/Source/BoundaryConditions/PML_fwd.H index 7395a2c87..58532a890 100644 --- a/Source/BoundaryConditions/PML_fwd.H +++ b/Source/BoundaryConditions/PML_fwd.H @@ -1,10 +1,13 @@ -/* Copyright 2021 Luca Fedeli +/* Copyright 2021 Luca Fedeli, Axel Huebl * * This file is part of WarpX. * * License: BSD-3-Clause-LBNL */ +#ifndef WARPX_PML_FWD_H +#define WARPX_PML_FWD_H + struct Sigma; struct SigmaBox; @@ -14,3 +17,5 @@ class MultiSigmaBox; enum struct PatchType; class PML; + +#endif /* WARPX_PML_FWD_H */ diff --git a/Source/BoundaryConditions/WarpXEvolvePML.cpp b/Source/BoundaryConditions/WarpXEvolvePML.cpp index a6c83b87e..1fbbacef1 100644 --- a/Source/BoundaryConditions/WarpXEvolvePML.cpp +++ b/Source/BoundaryConditions/WarpXEvolvePML.cpp @@ -12,7 +12,7 @@ #include "Utils/WarpXProfilerWrapper.H" #include "WarpX_PML_kernels.H" -#ifdef BL_USE_SENSEI_INSITU +#ifdef AMREX_USE_SENSEI_INSITU # include #endif #include diff --git a/Source/BoundaryConditions/WarpXFieldBoundaries.cpp b/Source/BoundaryConditions/WarpXFieldBoundaries.cpp index b7f625c8a..0a3e10aa5 100644 --- a/Source/BoundaryConditions/WarpXFieldBoundaries.cpp +++ b/Source/BoundaryConditions/WarpXFieldBoundaries.cpp @@ -56,7 +56,9 @@ void WarpX::ApplyBfieldBoundary (const int lev, PatchType patch_type, DtType a_d } if(applySilverMueller) m_fdtd_solver_fp[0]->ApplySilverMuellerBoundary( Efield_fp[lev], Bfield_fp[lev], - Geom(lev).Domain(), dt[lev]); + Geom(lev).Domain(), dt[lev], + WarpX::field_boundary_lo, + WarpX::field_boundary_hi); } } } diff --git a/Source/Diagnostics/BTDiagnostics.cpp b/Source/Diagnostics/BTDiagnostics.cpp index 3d8ebf571..53a1fc09a 100644 --- a/Source/Diagnostics/BTDiagnostics.cpp +++ b/Source/Diagnostics/BTDiagnostics.cpp @@ -143,8 +143,6 @@ BTDiagnostics::ReadParameters () } AMREX_ALWAYS_ASSERT_WITH_MESSAGE(snapshot_interval_is_specified, "For back-transformed diagnostics, user should specify either dz_snapshots_lab or dt_snapshots_lab"); - // For BTD, we always need rho to perform Lorentz Transform of current-density - if (WarpXUtilStr::is_in(m_cellcenter_varnames, "rho")) warpx.setplot_rho(true); if (pp_diag_name.query("buffer_size", m_buffer_size)) { if(m_max_box_size < m_buffer_size) m_max_box_size = m_buffer_size; diff --git a/Source/Diagnostics/BackTransformedDiagnostic_fwd.H b/Source/Diagnostics/BackTransformedDiagnostic_fwd.H index 32ff20f76..2766d2e27 100644 --- a/Source/Diagnostics/BackTransformedDiagnostic_fwd.H +++ b/Source/Diagnostics/BackTransformedDiagnostic_fwd.H @@ -1,11 +1,15 @@ -/* Copyright 2021 Luca Fedeli +/* Copyright 2021 Luca Fedeli, Axel Huebl * * This file is part of WarpX. * * License: BSD-3-Clause-LBNL */ +#ifndef WARPX_BACK_TRANSFORMED_DIAGNOSTICS_FWD_H +#define WARPX_BACK_TRANSFORMED_DIAGNOSTICS_FWD_H class LabFrameDiag; class LabFrameSnapShot; class BackTransformedDiagnostic; + +#endif /* WARPX_BACK_TRANSFORMED_DIAGNOSTICS_FWD_H */ diff --git a/Source/Diagnostics/ComputeDiagFunctors/ComputeDiagFunctor_fwd.H b/Source/Diagnostics/ComputeDiagFunctors/ComputeDiagFunctor_fwd.H index 958da378b..0480543ff 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/ComputeDiagFunctor_fwd.H +++ b/Source/Diagnostics/ComputeDiagFunctors/ComputeDiagFunctor_fwd.H @@ -1,8 +1,13 @@ -/* Copyright 2021 Luca Fedeli +/* Copyright 2021 Luca Fedeli, Axel Huebl * * This file is part of WarpX. * * License: BSD-3-Clause-LBNL */ +#ifndef WARPX_COMPUTEDIAGFUNCTOR_FWD_H +#define WARPX_COMPUTEDIAGFUNCTOR_FWD_H + class ComputeDiagFunctor; + +#endif /* WARPX_COMPUTEDIAGFUNCTOR_FWD_H */ diff --git a/Source/Diagnostics/Diagnostics.cpp b/Source/Diagnostics/Diagnostics.cpp index c7f408a80..cb4ffcd02 100644 --- a/Source/Diagnostics/Diagnostics.cpp +++ b/Source/Diagnostics/Diagnostics.cpp @@ -58,12 +58,6 @@ Diagnostics::BaseReadParameters () m_varnames = {"Ex", "Ey", "Ez", "Bx", "By", "Bz", "jx", "jy", "jz"}; } - // If user requests rho with back-transformed diagnostics, we set plot_rho=true - // and compute rho at each iteration - if (WarpXUtilStr::is_in(m_varnames, "rho") && WarpX::do_back_transformed_diagnostics) { - warpx.setplot_rho(true); - } - // Sanity check if user requests to plot phi if (WarpXUtilStr::is_in(m_varnames, "phi")){ AMREX_ALWAYS_ASSERT_WITH_MESSAGE( @@ -204,7 +198,8 @@ Diagnostics::InitData () InitializeFieldBufferData(i_buffer, lev); } } - // When particle buffers, m_particle_buffers are included, they will be initialized here + // When particle buffers, m_particle_boundary_buffer are included, + // they will be initialized here InitializeParticleBuffer(); amrex::ParmParse pp_diag_name(m_diag_name); @@ -268,7 +263,7 @@ Diagnostics::InitBaseData () } else if (m_format == "ascent"){ m_flush_format = std::make_unique(); } else if (m_format == "sensei"){ -#ifdef BL_USE_SENSEI_INSITU +#ifdef AMREX_USE_SENSEI_INSITU m_flush_format = std::make_unique( dynamic_cast(const_cast(&warpx)), m_diag_name); diff --git a/Source/Diagnostics/FlushFormats/CMakeLists.txt b/Source/Diagnostics/FlushFormats/CMakeLists.txt index 478db3750..7e19ae222 100644 --- a/Source/Diagnostics/FlushFormats/CMakeLists.txt +++ b/Source/Diagnostics/FlushFormats/CMakeLists.txt @@ -3,6 +3,7 @@ target_sources(WarpX FlushFormatAscent.cpp FlushFormatCheckpoint.cpp FlushFormatPlotfile.cpp + FlushFormatSensei.cpp ) if(WarpX_HAVE_OPENPMD) diff --git a/Source/Diagnostics/FlushFormats/FlushFormat.H b/Source/Diagnostics/FlushFormats/FlushFormat.H index 33afa45ee..fd2ab2482 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormat.H +++ b/Source/Diagnostics/FlushFormats/FlushFormat.H @@ -5,6 +5,7 @@ #include "Diagnostics/ParticleDiag/ParticleDiag.H" #include "Particles/MultiParticleContainer.H" +#include "WarpX.H" class FlushFormat { diff --git a/Source/Diagnostics/FlushFormats/FlushFormatAscent.cpp b/Source/Diagnostics/FlushFormats/FlushFormatAscent.cpp index 9c582e904..d264c415b 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatAscent.cpp +++ b/Source/Diagnostics/FlushFormats/FlushFormatAscent.cpp @@ -19,29 +19,40 @@ FlushFormatAscent::WriteToFile ( bool /*isBTD*/, int /*snapshotID*/, const amrex::Geometry& /*full_BTD_snapshot*/, bool /*isLastBTDFlush*/) const { #ifdef AMREX_USE_ASCENT + WARPX_PROFILE("FlushFormatAscent::WriteToFile()"); + auto & warpx = WarpX::GetInstance(); // wrap mesh data + WARPX_PROFILE_VAR("FlushFormatAscent::WriteToFile::MultiLevelToBlueprint", prof_ascent_mesh_blueprint); conduit::Node bp_mesh; amrex::MultiLevelToBlueprint( nlev, amrex::GetVecOfConstPtrs(mf), varnames, geom, time, iteration, warpx.refRatio(), bp_mesh); + WARPX_PROFILE_VAR_STOP(prof_ascent_mesh_blueprint); + WARPX_PROFILE_VAR("FlushFormatAscent::WriteToFile::WriteParticles", prof_ascent_particles); WriteParticles(particle_diags, bp_mesh); + WARPX_PROFILE_VAR_STOP(prof_ascent_particles); // If you want to save blueprint HDF5 files w/o using an Ascent // extract, you can call the following AMReX helper: // const auto step = istep[0]; // WriteBlueprintFiles(bp_mesh,"bp_export",step,"hdf5"); + WARPX_PROFILE_VAR("FlushFormatAscent::WriteToFile::publish", prof_ascent_publish); ascent::Ascent ascent; conduit::Node opts; opts["exceptions"] = "catch"; opts["mpi_comm"] = MPI_Comm_c2f(ParallelDescriptor::Communicator()); ascent.open(opts); ascent.publish(bp_mesh); + WARPX_PROFILE_VAR_STOP(prof_ascent_publish); + + WARPX_PROFILE_VAR("FlushFormatAscent::WriteToFile::execute", prof_ascent_execute); conduit::Node actions; ascent.execute(actions); ascent.close(); + WARPX_PROFILE_VAR_STOP(prof_ascent_execute); #else amrex::ignore_unused(varnames, mf, geom, iteration, time, @@ -55,6 +66,8 @@ FlushFormatAscent::WriteToFile ( void FlushFormatAscent::WriteParticles(const amrex::Vector& particle_diags, conduit::Node& a_bp_mesh) const { + WARPX_PROFILE("FlushFormatAscent::WriteParticles()"); + // wrap particle data for each species // we prefix the fields with "particle_{species_name}" b/c we // want to to uniquely name all the fields that can be plotted diff --git a/Source/Diagnostics/FlushFormats/FlushFormatOpenPMD.cpp b/Source/Diagnostics/FlushFormats/FlushFormatOpenPMD.cpp index 0204c2af1..8af12308e 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatOpenPMD.cpp +++ b/Source/Diagnostics/FlushFormats/FlushFormatOpenPMD.cpp @@ -29,11 +29,7 @@ FlushFormatOpenPMD::FlushFormatOpenPMD (const std::string& diag_name) openPMD::IterationEncoding encoding = openPMD::IterationEncoding::groupBased; if ( 0 == openpmd_encoding.compare("v") ) -#if OPENPMDAPI_VERSION_GE(0, 14, 0) encoding = openPMD::IterationEncoding::variableBased; -#else - encoding = openPMD::IterationEncoding::groupBased; -#endif else if ( 0 == openpmd_encoding.compare("g") ) encoding = openPMD::IterationEncoding::groupBased; else if ( 0 == openpmd_encoding.compare("f") ) @@ -121,7 +117,7 @@ FlushFormatOpenPMD::WriteToFile ( varnames, mf, geom, output_iteration, time, isBTD, full_BTD_snapshot); // particles: all (reside only on locally finest level) - m_OpenPMDPlotWriter->WriteOpenPMDParticles(particle_diags); + m_OpenPMDPlotWriter->WriteOpenPMDParticles(particle_diags, isBTD); // signal that no further updates will be written to this iteration m_OpenPMDPlotWriter->CloseStep(isBTD, isLastBTDFlush); diff --git a/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.cpp b/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.cpp index 6ed4c141e..0bda9b7e7 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.cpp +++ b/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.cpp @@ -3,12 +3,12 @@ #include "Diagnostics/ParticleDiag/ParticleDiag.H" #include "Particles/Filter/FilterFunctors.H" #include "Particles/WarpXParticleContainer.H" +#include "Particles/ParticleBuffer.H" #include "Utils/Interpolate.H" #include "Utils/WarpXProfilerWrapper.H" #include "WarpX.H" #include -#include #include #include #include @@ -290,6 +290,8 @@ FlushFormatPlotfile::WriteWarpXHeader( HeaderFile << warpx.getcurrent_injection_position() << "\n"; HeaderFile << warpx.getdo_moving_window() << "\n"; + + HeaderFile << warpx.time_of_last_gal_shift << "\n"; } } @@ -304,14 +306,14 @@ FlushFormatPlotfile::WriteHeaderParticle( } void -FlushFormatPlotfile::WriteParticles(const std::string& dir, - const amrex::Vector& particle_diags) const +FlushFormatPlotfile::WriteParticles (const std::string& dir, + const amrex::Vector& particle_diags) const { for (unsigned i = 0, n = particle_diags.size(); i < n; ++i) { WarpXParticleContainer* pc = particle_diags[i].getParticleContainer(); - amrex::AmrParticleContainer<0, 0, PIdx::nattribs, 0, amrex::PinnedArenaAllocator> - tmp(&WarpX::GetInstance()); + auto tmp = ParticleBuffer::getTmpPC(pc); + Vector real_names; Vector int_names; Vector int_flags; @@ -327,9 +329,6 @@ FlushFormatPlotfile::WriteParticles(const std::string& dir, real_names.push_back("theta"); #endif - // add runtime real comps to tmp - for (int ic = 0; ic < pc->NumRuntimeRealComps(); ++ic) { tmp.AddRealComp(false); } - // get the names of the real comps real_names.resize(pc->NumRealComps()); auto runtime_rnames = pc->getParticleRuntimeComps(); @@ -339,9 +338,6 @@ FlushFormatPlotfile::WriteParticles(const std::string& dir, real_flags = particle_diags[i].plot_flags; real_flags.resize(pc->NumRealComps(), 1); - // add runtime int comps to tmp - for (int ic = 0; ic < pc->NumRuntimeIntComps(); ++ic) { tmp.AddIntComp(false); } - // and the names int_names.resize(pc->NumIntComps()); auto runtime_inames = pc->getParticleRuntimeiComps(); diff --git a/Source/Diagnostics/FlushFormats/FlushFormatSensei.H b/Source/Diagnostics/FlushFormats/FlushFormatSensei.H index 23a5a2614..7e32b8ff9 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatSensei.H +++ b/Source/Diagnostics/FlushFormats/FlushFormatSensei.H @@ -4,7 +4,7 @@ #include "FlushFormat.H" #include -#if defined(BL_USE_SENSEI_INSITU) +#if defined(AMREX_USE_SENSEI_INSITU) # include #else namespace amrex { diff --git a/Source/Diagnostics/FlushFormats/FlushFormatSensei.cpp b/Source/Diagnostics/FlushFormats/FlushFormatSensei.cpp index 6852f08b6..9927fb62f 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatSensei.cpp +++ b/Source/Diagnostics/FlushFormats/FlushFormatSensei.cpp @@ -2,7 +2,7 @@ #include "WarpX.H" -#ifdef BL_USE_SENSEI_INSITU +#ifdef AMREX_USE_SENSEI_INSITU # include #endif @@ -16,7 +16,7 @@ FlushFormatSensei::FlushFormatSensei (amrex::AmrMesh *amr_mesh, m_insitu_config(), m_insitu_pin_mesh(0), m_insitu_bridge(nullptr), m_amr_mesh(amr_mesh) { -#ifndef BL_USE_SENSEI_INSITU +#ifndef AMREX_USE_SENSEI_INSITU amrex::ignore_unused(m_insitu_pin_mesh, m_insitu_bridge, m_amr_mesh, diag_name); #else amrex::ParmParse pp_diag_name(diag_name); @@ -41,7 +41,7 @@ FlushFormatSensei::FlushFormatSensei (amrex::AmrMesh *amr_mesh, FlushFormatSensei::~FlushFormatSensei () { -#ifdef BL_USE_SENSEI_INSITU +#ifdef AMREX_USE_SENSEI_INSITU delete m_insitu_bridge; #endif } @@ -58,7 +58,7 @@ FlushFormatSensei::WriteToFile ( bool /*isBTD*/, int /*snapshotID*/, const amrex::Geometry& /*full_BTD_snapshot*/, bool /*isLastBTDFlush*/) const { -#ifndef BL_USE_SENSEI_INSITU +#ifndef AMREX_USE_SENSEI_INSITU (void)varnames; (void)mf; (void)geom; @@ -72,6 +72,8 @@ FlushFormatSensei::WriteToFile ( (void)plot_raw_rho; (void)plot_raw_F; #else + WARPX_PROFILE("FlushFormatSensei::WriteToFile()"); + amrex::Vector *mf_ptr = const_cast*>(&mf); @@ -90,7 +92,7 @@ void FlushFormatSensei::WriteParticles ( const amrex::Vector& particle_diags) const { -#ifndef BL_USE_SENSEI_INSITU +#ifndef AMREX_USE_SENSEI_INSITU (void)particle_diags; #else amrex::ErrorStream() << "FlushFormatSensei::WriteParticles : " diff --git a/Source/Diagnostics/FlushFormats/FlushFormat_fwd.H b/Source/Diagnostics/FlushFormats/FlushFormat_fwd.H index 53890ddbd..85812f790 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormat_fwd.H +++ b/Source/Diagnostics/FlushFormats/FlushFormat_fwd.H @@ -1,8 +1,13 @@ -/* Copyright 2021 Luca Fedeli +/* Copyright 2021 Luca Fedeli, Axel Huebl * * This file is part of WarpX. * * License: BSD-3-Clause-LBNL */ +#ifndef WARPX_FLUSH_FORMAT_FWD_H +#define WARPX_FLUSH_FORMAT_FWD_H + class FlushFormat; + +#endif /* WARPX_FLUSH_FORMAT_FWD_H */ diff --git a/Source/Diagnostics/FullDiagnostics.cpp b/Source/Diagnostics/FullDiagnostics.cpp index ef73621b5..ba72662de 100644 --- a/Source/Diagnostics/FullDiagnostics.cpp +++ b/Source/Diagnostics/FullDiagnostics.cpp @@ -452,21 +452,8 @@ FullDiagnostics::InitializeFieldFunctors (int lev) } else if ( m_varnames[comp] == "jz" ){ m_all_field_functors[lev][comp] = std::make_unique(warpx.get_pointer_current_fp(lev, 2), lev, m_crse_ratio); } else if ( m_varnames[comp] == "rho" ){ - if ( WarpX::do_back_transformed_diagnostics ) { - if ( WarpX::maxwell_solver_id == MaxwellSolverAlgo::PSATD ) { - // rho_new is stored in component 1 of rho_fp when using PSATD - amrex::MultiFab *rho_new = new amrex::MultiFab(*warpx.get_pointer_rho_fp(lev), amrex::make_alias, - 1, 1); - m_all_field_functors[lev][comp] = std::make_unique(rho_new, lev, m_crse_ratio); - } else { - m_all_field_functors[lev][comp] = std::make_unique(warpx.get_pointer_rho_fp(lev), - lev, m_crse_ratio); - } - } - else { - // Initialize rho functor to dump total rho - m_all_field_functors[lev][comp] = std::make_unique(lev, m_crse_ratio); - } + // Initialize rho functor to dump total rho + m_all_field_functors[lev][comp] = std::make_unique(lev, m_crse_ratio); } else if ( m_varnames[comp].rfind("rho_", 0) == 0 ){ // Initialize rho functor to dump rho per species m_all_field_functors[lev][comp] = std::make_unique(lev, m_crse_ratio, m_rho_per_species_index[i]); diff --git a/Source/Diagnostics/MultiDiagnostics_fwd.H b/Source/Diagnostics/MultiDiagnostics_fwd.H index 195174bb9..8931960bd 100644 --- a/Source/Diagnostics/MultiDiagnostics_fwd.H +++ b/Source/Diagnostics/MultiDiagnostics_fwd.H @@ -1,8 +1,13 @@ -/* Copyright 2021 Luca Fedeli +/* Copyright 2021 Luca Fedeli, Axel Huebl * * This file is part of WarpX. * * License: BSD-3-Clause-LBNL */ +#ifndef WARPX_MULTI_DIAGNOSTICS_FWD_H +#define WARPX_MULTI_DIAGNOSTICS_FWD_H + class MultiDiagnostics; + +#endif /* WARPX_MULTI_DIAGNOSTICS_FWD_H */ diff --git a/Source/Diagnostics/ParticleDiag/ParticleDiag_fwd.H b/Source/Diagnostics/ParticleDiag/ParticleDiag_fwd.H index 99cecb219..4763bd290 100644 --- a/Source/Diagnostics/ParticleDiag/ParticleDiag_fwd.H +++ b/Source/Diagnostics/ParticleDiag/ParticleDiag_fwd.H @@ -1,10 +1,13 @@ -/* Copyright 2021 Luca Fedeli +/* Copyright 2021 Luca Fedeli, Axel Huebl * * This file is part of WarpX. * * License: BSD-3-Clause-LBNL */ -class ParticleDiag; +#ifndef WARPX_PARTICLE_DIAG_FWD_H +#define WARPX_PARTICLE_DIAG_FWD_H +class ParticleDiag; +#endif /* WARPX_PARTICLE_DIAG_FWD_H */ diff --git a/Source/Diagnostics/ReducedDiags/MultiReducedDiags_fwd.H b/Source/Diagnostics/ReducedDiags/MultiReducedDiags_fwd.H index c3b9c1f51..56bed9c1c 100644 --- a/Source/Diagnostics/ReducedDiags/MultiReducedDiags_fwd.H +++ b/Source/Diagnostics/ReducedDiags/MultiReducedDiags_fwd.H @@ -1,8 +1,13 @@ -/* Copyright 2021 Luca Fedeli +/* Copyright 2021 Luca Fedeli, Axel Huebl * * This file is part of WarpX. * * License: BSD-3-Clause-LBNL */ +#ifndef WARPX_MULTIREDUCEDDIAGS_FWD_H +#define WARPX_MULTIREDUCEDDIAGS_FWD_H + class MultiReducedDiags; + +#endif /* WARPX_MULTIREDUCEDDIAGS_FWD_H */ diff --git a/Source/Diagnostics/WarpXIO.cpp b/Source/Diagnostics/WarpXIO.cpp index 84ff56866..cb4eb76a0 100644 --- a/Source/Diagnostics/WarpXIO.cpp +++ b/Source/Diagnostics/WarpXIO.cpp @@ -14,7 +14,7 @@ #include "Utils/WarpXProfilerWrapper.H" #include "WarpX.H" -#ifdef BL_USE_SENSEI_INSITU +#ifdef AMREX_USE_SENSEI_INSITU # include #endif #include @@ -173,6 +173,9 @@ WarpX::InitFromCheckpoint () if (do_moving_window_before_restart) { moving_window_x = moving_window_x_checkpoint; } + + is >> time_of_last_gal_shift; + GotoNextLine(is); } const int nlevs = finestLevel()+1; @@ -281,7 +284,7 @@ WarpX::InitFromCheckpoint () } } - // Initilize particles + // Initialize particles mypc->AllocData(); mypc->Restart(restart_chkfile); diff --git a/Source/Diagnostics/WarpXOpenPMD.H b/Source/Diagnostics/WarpXOpenPMD.H index 4d5c410ad..925e2a6f3 100644 --- a/Source/Diagnostics/WarpXOpenPMD.H +++ b/Source/Diagnostics/WarpXOpenPMD.H @@ -1,4 +1,4 @@ -/* Copyright 2019-2020 Axel Huebl, Junmin Gu, Maxence Thevenet +/* Copyright 2019-2021 Axel Huebl, Junmin Gu, Maxence Thevenet * * * This file is part of WarpX. @@ -9,6 +9,8 @@ #define WARPX_OPEN_PMD_H_ #include "Particles/WarpXParticleContainer.H" +#include "Particles/ParticleBuffer.H" +#include "Diagnostics/FlushFormats/FlushFormat.H" #include "Diagnostics/ParticleDiag/ParticleDiag_fwd.H" @@ -40,8 +42,8 @@ class Timer { public: - Timer(const char* tag) {m_Tag = tag; m_Start = amrex::second();} - ~Timer() { + Timer (const char* tag) {m_Tag = tag; m_Start = amrex::second();} + ~Timer () { m_End = amrex::second(); amrex::ParallelDescriptor::ReduceRealMax(m_End, amrex::ParallelDescriptor::IOProcessorNumber()); amrex::Print()<; + using ParticleContainer = typename ParticleBuffer::BufferType; using ParticleIter = typename amrex::ParIter<0, 0, PIdx::nattribs, 0, amrex::PinnedArenaAllocator>; - WarpXParticleCounter(ParticleContainer* pc); - unsigned long GetTotalNumParticles() {return m_Total;} + WarpXParticleCounter (ParticleContainer* pc); + unsigned long GetTotalNumParticles () {return m_Total;} std::vector m_ParticleOffsetAtRank; std::vector m_ParticleSizeAtRank; @@ -73,7 +75,7 @@ private: * @param[out] offset particle offset over all, mpi-global amrex fabs * @param[out] sum number of all particles from all amrex fabs */ - void GetParticleOffsetOfProcessor(const long& numParticles, + void GetParticleOffsetOfProcessor (const long& numParticles, unsigned long long& offset, unsigned long long& sum) const ; @@ -127,7 +129,8 @@ public: */ void CloseStep (bool isBTD = false, bool isLastBTDFlush = false); - void WriteOpenPMDParticles (const amrex::Vector& particle_diags); + void WriteOpenPMDParticles (const amrex::Vector& particle_diags, + const bool isBTD = false); void WriteOpenPMDFieldsAll ( const std::vector& varnames, @@ -142,18 +145,23 @@ private: void Init (openPMD::Access access, bool isBTD); - inline openPMD::Iteration& GetIteration(int iteration) const + /** Get the openPMD::Iteration object of the current Series + * + * We use this helper function to differentiate between efficient, temporally + * sequentially increasing writes to iteration numbers and random-access + * writes to iterations, e.g., as needed for back-transformed diagnostics. + * + * @param[in] iteration iteration number (lab-frame for BTD) + * @param[in] isBTD is this a backtransformed diagnostics write? + * @return the iteration object + */ + inline openPMD::Iteration GetIteration (int const iteration, bool const isBTD) const { - // so BTD will be able to revisit previous steps, so we do not use steps with these two encodings, - if ( (openPMD::IterationEncoding::fileBased == m_Encoding ) || - (openPMD::IterationEncoding::groupBased == m_Encoding ) ) + if (isBTD) { - openPMD::Iteration& it = m_Series->iterations[iteration]; - return it; + return m_Series->iterations[iteration]; } else { - auto iterations = m_Series->writeIterations(); - openPMD::Iteration& it = iterations[iteration]; - return it; + return m_Series->writeIterations()[iteration]; } } @@ -162,17 +170,23 @@ private: * @param[in] meshes The meshes in a series * @param[in] full_geom The geometry */ - void SetupFields( openPMD::Container< openPMD::Mesh >& meshes, amrex::Geometry& full_geom ) const; - - void SetupMeshComp( openPMD::Mesh& mesh, - amrex::Geometry& full_geom, - openPMD::MeshRecordComponent& mesh_comp - ) const; - - void GetMeshCompNames( int meshLevel, - const std::string& varname, - std::string& field_name, - std::string& comp_name ) const; + void SetupFields ( + openPMD::Container< openPMD::Mesh >& meshes, + amrex::Geometry& full_geom + ) const; + + void SetupMeshComp ( + openPMD::Mesh& mesh, + amrex::Geometry& full_geom, + openPMD::MeshRecordComponent& mesh_comp + ) const; + + void GetMeshCompNames ( + int meshLevel, + const std::string& varname, + std::string& field_name, + std::string& comp_name + ) const; /** This function sets up the entries for storing the particle positions, global IDs, and constant records (charge, mass) * @@ -232,6 +246,7 @@ private: * @param[in] int_comp_names The int attribute names, from WarpX * @param[in] charge Charge of the particles (note: fix for ions) * @param[in] mass Mass of the particles + * @param[in] isBTD is this a backtransformed diagnostics write? */ void DumpToFile (ParticleContainer* pc, const std::string& name, @@ -241,7 +256,8 @@ private: const amrex::Vector& real_comp_names, const amrex::Vector& int_comp_names, amrex::ParticleReal const charge, - amrex::ParticleReal const mass) const; + amrex::ParticleReal const mass, + const bool isBTD) const; /** Get the openPMD-api filename for openPMD::Series * diff --git a/Source/Diagnostics/WarpXOpenPMD.cpp b/Source/Diagnostics/WarpXOpenPMD.cpp index 19d8a42f8..2b374e737 100644 --- a/Source/Diagnostics/WarpXOpenPMD.cpp +++ b/Source/Diagnostics/WarpXOpenPMD.cpp @@ -1,4 +1,4 @@ -/* Copyright 2019-2020 Axel Huebl, Junmin Gu +/* Copyright 2019-2021 Axel Huebl, Junmin Gu * * This file is part of WarpX. * @@ -35,6 +35,7 @@ #include #include +#include #include #include #include @@ -317,7 +318,7 @@ WarpXOpenPMDPlot::WarpXOpenPMDPlot ( m_OpenPMDoptions = detail::getSeriesOptions(operator_type, operator_parameters); } -WarpXOpenPMDPlot::~WarpXOpenPMDPlot() +WarpXOpenPMDPlot::~WarpXOpenPMDPlot () { if( m_Series ) { @@ -374,7 +375,7 @@ void WarpXOpenPMDPlot::CloseStep (bool isBTD, bool isLastBTDFlush) if (isBTD and !isLastBTDFlush) callClose = false; if (callClose) { if (m_Series) { - GetIteration(m_CurrentStep).close(); + GetIteration(m_CurrentStep, isBTD).close(); } // create a little helper file for ParaView 5.9+ @@ -442,13 +443,14 @@ WarpXOpenPMDPlot::Init (openPMD::Access access, bool isBTD) } void -WarpXOpenPMDPlot::WriteOpenPMDParticles (const amrex::Vector& particle_diags) +WarpXOpenPMDPlot::WriteOpenPMDParticles (const amrex::Vector& particle_diags, + const bool isBTD) { WARPX_PROFILE("WarpXOpenPMDPlot::WriteOpenPMDParticles()"); for (unsigned i = 0, n = particle_diags.size(); i < n; ++i) { WarpXParticleContainer* pc = particle_diags[i].getParticleContainer(); - ParticleContainer tmp(&WarpX::GetInstance()); + auto tmp = ParticleBuffer::getTmpPC(pc); // names of amrex::Real and int particle attributes in SoA data amrex::Vector real_names; amrex::Vector int_names; @@ -468,9 +470,6 @@ WarpXOpenPMDPlot::WriteOpenPMDParticles (const amrex::Vector& part real_names.push_back("theta"); #endif - // add runtime real comps to tmp - for (int ic = 0; ic < pc->NumRuntimeRealComps(); ++ic) { tmp.AddRealComp(false); } - // get the names of the real comps real_names.resize(pc->NumRealComps()); auto runtime_rnames = pc->getParticleRuntimeComps(); @@ -483,9 +482,6 @@ WarpXOpenPMDPlot::WriteOpenPMDParticles (const amrex::Vector& part real_flags = particle_diags[i].plot_flags; real_flags.resize(pc->NumRealComps(), 1); - // add runtime int comps to tmp - for (int ic = 0; ic < pc->NumRuntimeIntComps(); ++ic) { tmp.AddIntComp(false); } - // and the names int_names.resize(pc->NumIntComps()); auto runtime_inames = pc->getParticleRuntimeiComps(); @@ -530,7 +526,8 @@ WarpXOpenPMDPlot::WriteOpenPMDParticles (const amrex::Vector& part real_flags, int_flags, real_names, int_names, - pc->getCharge(), pc->getMass() + pc->getCharge(), pc->getMass(), + isBTD ); } @@ -548,12 +545,13 @@ WarpXOpenPMDPlot::DumpToFile (ParticleContainer* pc, const amrex::Vector& real_comp_names, const amrex::Vector& int_comp_names, amrex::ParticleReal const charge, - amrex::ParticleReal const mass) const + amrex::ParticleReal const mass, + const bool isBTD) const { AMREX_ALWAYS_ASSERT_WITH_MESSAGE(m_Series != nullptr, "openPMD: series must be initialized"); WarpXParticleCounter counter(pc); - openPMD::Iteration& currIteration = GetIteration(iteration); + openPMD::Iteration currIteration = GetIteration(iteration, isBTD); openPMD::ParticleSpecies currSpecies = currIteration.particles[name]; // meta data for ED-PIC extension @@ -850,7 +848,7 @@ WarpXOpenPMDPlot::SaveRealProperty (ParticleIter& pti, void -WarpXOpenPMDPlot::SetupPos( +WarpXOpenPMDPlot::SetupPos ( openPMD::ParticleSpecies& currSpecies, const unsigned long long& np, amrex::ParticleReal const charge, @@ -894,15 +892,15 @@ WarpXOpenPMDPlot::SetupPos( /* - * Set up paramter for mesh container using the geometry (from level 0) + * Set up parameter for mesh container using the geometry (from level 0) * * @param [IN] meshes: openPMD-api mesh container * @param [IN] full_geom: field geometry * */ void -WarpXOpenPMDPlot::SetupFields( openPMD::Container< openPMD::Mesh >& meshes, - amrex::Geometry& full_geom ) const +WarpXOpenPMDPlot::SetupFields ( openPMD::Container< openPMD::Mesh >& meshes, + amrex::Geometry& full_geom ) const { // meta data for ED-PIC extension auto const period = full_geom.periodicity(); // TODO double-check: is this the proper global bound or of some level? @@ -973,9 +971,9 @@ WarpXOpenPMDPlot::SetupFields( openPMD::Container< openPMD::Mesh >& meshes, * @param [IN]: mesh_comp a component for the mesh */ void -WarpXOpenPMDPlot::SetupMeshComp( openPMD::Mesh& mesh, +WarpXOpenPMDPlot::SetupMeshComp (openPMD::Mesh& mesh, amrex::Geometry& full_geom, - openPMD::MeshRecordComponent& mesh_comp ) const + openPMD::MeshRecordComponent& mesh_comp) const { amrex::Box const & global_box = full_geom.Domain(); auto const global_size = getReversedVec(global_box.size()); @@ -1009,10 +1007,10 @@ WarpXOpenPMDPlot::SetupMeshComp( openPMD::Mesh& mesh, * @param comp_name [OUT]: comp name for openPMD-api output */ void -WarpXOpenPMDPlot::GetMeshCompNames( int meshLevel, +WarpXOpenPMDPlot::GetMeshCompNames (int meshLevel, const std::string& varname, std::string& field_name, - std::string& comp_name ) const + std::string& comp_name) const { if (varname.size() >= 2u ) { std::string const varname_1st = varname.substr(0u, 1u); // 1st character @@ -1061,7 +1059,10 @@ WarpXOpenPMDPlot::WriteOpenPMDFieldsAll ( //const std::string& filename, bool const first_write_to_iteration = ! m_Series->iterations.contains( iteration ); // meta data - openPMD::Iteration& series_iteration = GetIteration(m_CurrentStep); + openPMD::Iteration series_iteration = GetIteration(m_CurrentStep, isBTD); + + // collective open + series_iteration.open(); auto meshes = series_iteration.meshes; if (first_write_to_iteration) { @@ -1131,7 +1132,7 @@ WarpXOpenPMDPlot::WriteOpenPMDFieldsAll ( //const std::string& filename, // // // -WarpXParticleCounter::WarpXParticleCounter(ParticleContainer* pc) +WarpXParticleCounter::WarpXParticleCounter (ParticleContainer* pc) { m_MPISize = amrex::ParallelDescriptor::NProcs(); m_MPIRank = amrex::ParallelDescriptor::MyProc(); @@ -1178,11 +1179,11 @@ WarpXParticleCounter::WarpXParticleCounter(ParticleContainer* pc) // sum of all particles in the comm // void -WarpXParticleCounter::GetParticleOffsetOfProcessor(const long& numParticles, - unsigned long long& offset, - unsigned long long& sum) const - - +WarpXParticleCounter::GetParticleOffsetOfProcessor ( + const long& numParticles, + unsigned long long& offset, + unsigned long long& sum +) const { offset = 0; #if defined(AMREX_USE_MPI) diff --git a/Source/Diagnostics/requirements.txt b/Source/Diagnostics/requirements.txt index 89d14e604..8d9176f92 100644 --- a/Source/Diagnostics/requirements.txt +++ b/Source/Diagnostics/requirements.txt @@ -1,8 +1,8 @@ -# Copyright 2020 Axel Huebl +# Copyright 2020-2021 Axel Huebl # # This file is part of WarpX. # # License: BSD-3-Clause-LBNL # keep this entry for GitHub's dependency graph -openPMD-api>=0.12.0 +openPMD-api>=0.14.2 diff --git a/Source/EmbeddedBoundary/DistanceToEB.H b/Source/EmbeddedBoundary/DistanceToEB.H index ccdc30cf9..ff43aed5b 100644 --- a/Source/EmbeddedBoundary/DistanceToEB.H +++ b/Source/EmbeddedBoundary/DistanceToEB.H @@ -12,6 +12,8 @@ #include #include +#ifdef AMREX_USE_EB + namespace DistanceToEB { @@ -30,71 +32,6 @@ void normalize (amrex::RealVect& a) noexcept a[2] *= inv_norm); } -AMREX_GPU_HOST_DEVICE AMREX_INLINE -void compute_weights (const amrex::ParticleReal xp, - const amrex::ParticleReal yp, - const amrex::ParticleReal zp, - amrex::GpuArray const& plo, - amrex::GpuArray const& dxi, - int& i, int& j, int& k, amrex::Real W[AMREX_SPACEDIM][2]) noexcept -{ -#if (defined WARPX_DIM_3D) - amrex::Real x = (xp - plo[0]) * dxi[0]; - amrex::Real y = (yp - plo[1]) * dxi[1]; - amrex::Real z = (zp - plo[2]) * dxi[2]; - - i = static_cast(amrex::Math::floor(x)); - j = static_cast(amrex::Math::floor(y)); - k = static_cast(amrex::Math::floor(z)); - - W[0][1] = x - i; - W[1][1] = y - j; - W[2][1] = z - k; - - W[0][0] = 1.0 - W[0][1]; - W[1][0] = 1.0 - W[1][1]; - W[2][0] = 1.0 - W[2][1]; -#elif (defined WARPX_DIM_XZ) - amrex::Real x = (xp - plo[0]) * dxi[0]; - amrex::Real z = (zp - plo[1]) * dxi[1]; - - i = static_cast(amrex::Math::floor(x)); - j = static_cast(amrex::Math::floor(z)); - k = 0; - - W[0][1] = x - i; - W[1][1] = z - j; - - W[0][0] = 1.0 - W[0][1]; - W[1][0] = 1.0 - W[1][1]; - - amrex::ignore_unused(yp); -#endif -} - -AMREX_GPU_HOST_DEVICE AMREX_INLINE -amrex::Real interp_distance (int i, int j, int k, const amrex::Real W[AMREX_SPACEDIM][2], - amrex::Array4 const& phi) noexcept -{ - amrex::Real phi_value = 0; -#if (defined WARPX_DIM_3D) - phi_value += phi(i, j , k ) * W[0][0] * W[1][0] * W[2][0]; - phi_value += phi(i+1, j , k ) * W[0][1] * W[1][0] * W[2][0]; - phi_value += phi(i, j+1, k ) * W[0][0] * W[1][1] * W[2][0]; - phi_value += phi(i+1, j+1, k ) * W[0][1] * W[1][1] * W[2][0]; - phi_value += phi(i, j , k+1) * W[0][0] * W[1][0] * W[2][1]; - phi_value += phi(i+1, j , k+1) * W[0][1] * W[1][0] * W[2][1]; - phi_value += phi(i , j+1, k+1) * W[0][0] * W[1][1] * W[2][1]; - phi_value += phi(i+1, j+1, k+1) * W[0][1] * W[1][1] * W[2][1]; -#elif (defined WARPX_DIM_XZ) - phi_value += phi(i, j , k) * W[0][0] * W[1][0]; - phi_value += phi(i+1, j , k) * W[0][1] * W[1][0]; - phi_value += phi(i, j+1, k) * W[0][0] * W[1][1]; - phi_value += phi(i+1, j+1, k) * W[0][1] * W[1][1]; -#endif - return phi_value; -} - AMREX_GPU_HOST_DEVICE AMREX_INLINE amrex::RealVect interp_normal (int i, int j, int k, const amrex::Real W[AMREX_SPACEDIM][2], amrex::Array4 const& phi, @@ -141,10 +78,14 @@ amrex::RealVect interp_normal (int i, int j, int k, const amrex::Real W[AMREX_SP normal[1] += phi(i , j+1, k) * dxi[1] * W[0][0]; normal[1] -= phi(i+1, j , k) * dxi[1] * W[0][1]; normal[1] += phi(i+1, j+1, k) * dxi[1] * W[0][1]; +#else + amrex::RealVect normal{0.0, 0.0}; + amrex::ignore_unused(i, j, k, W, phi, dxi); + amrex::Abort("Error: interp_distance not yet implemented in RZ"); #endif return normal; } } - -#endif +#endif // AMREX_USE_EB +#endif // DISTANCETOEB_H_ diff --git a/Source/EmbeddedBoundary/ParticleScraper.H b/Source/EmbeddedBoundary/ParticleScraper.H index 9c2bd6552..283bf62da 100644 --- a/Source/EmbeddedBoundary/ParticleScraper.H +++ b/Source/EmbeddedBoundary/ParticleScraper.H @@ -12,6 +12,7 @@ #include #include +#include "Particles/Gather/ScalarFieldGather.H" /** * \brief Interact particles with the embedded boundary walls. @@ -164,9 +165,9 @@ scrapeParticles (PC& pc, const amrex::Vector& distance_t int i, j, k; amrex::Real W[AMREX_SPACEDIM][2]; - DistanceToEB::compute_weights(xp, yp, zp, plo, dxi, i, j, k, W); + compute_weights_nodal(xp, yp, zp, plo, dxi, i, j, k, W); - amrex::Real phi_value = DistanceToEB::interp_distance(i, j, k, W, phi); + amrex::Real phi_value = interp_field_nodal(i, j, k, W, phi); amrex::RealVect normal = DistanceToEB::interp_normal(i, j, k, W, phi, dxi); // the closest point on the surface to pos is pos - grad phi(pos) * phi(pos) diff --git a/Source/EmbeddedBoundary/WarpXInitEB.cpp b/Source/EmbeddedBoundary/WarpXInitEB.cpp index 17b1d64f3..0157c7e3a 100644 --- a/Source/EmbeddedBoundary/WarpXInitEB.cpp +++ b/Source/EmbeddedBoundary/WarpXInitEB.cpp @@ -115,26 +115,23 @@ WarpX::ComputeEdgeLengths () { auto const &flags = eb_fact.getMultiEBCellFlagFab(); auto const &edge_centroid = eb_fact.getEdgeCent(); for (amrex::MFIter mfi(flags); mfi.isValid(); ++mfi){ - amrex::Box const &box = mfi.validbox(); - amrex::FabType fab_type = flags[mfi].getType(box); for (int idim = 0; idim < AMREX_SPACEDIM; ++idim){ + amrex::Box const &box = mfi.tilebox(m_edge_lengths[maxLevel()][idim]->ixType().toIntVect()); + amrex::FabType fab_type = flags[mfi].getType(box); auto const &edge_lengths_dim = m_edge_lengths[maxLevel()][idim]->array(mfi); if (fab_type == amrex::FabType::regular) { // every cell in box is all regular - amrex::LoopOnCpu(amrex::convert(box, amrex::Box(edge_lengths_dim).ixType()), - [=](int i, int j, int k) { + amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { edge_lengths_dim(i, j, k) = 1.; }); } else if (fab_type == amrex::FabType::covered) { // every cell in box is all covered - amrex::LoopOnCpu(amrex::convert(box, amrex::Box(edge_lengths_dim).ixType()), - [=](int i, int j, int k) { + amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { edge_lengths_dim(i, j, k) = 0.; }); } else { auto const &edge_cent = edge_centroid[idim]->const_array(mfi); - amrex::LoopOnCpu(amrex::convert(box, amrex::Box(edge_cent).ixType()), - [=](int i, int j, int k) { + amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { if (edge_cent(i, j, k) == amrex::Real(-1.0)) { // This edge is all covered edge_lengths_dim(i, j, k) = 0.; @@ -167,26 +164,23 @@ WarpX::ComputeFaceAreas () { auto const &area_frac = eb_fact.getAreaFrac(); for (amrex::MFIter mfi(flags); mfi.isValid(); ++mfi) { - amrex::Box const &box = mfi.validbox(); - amrex::FabType fab_type = flags[mfi].getType(box); for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) { + amrex::Box const &box = mfi.tilebox(m_face_areas[maxLevel()][idim]->ixType().toIntVect()); + amrex::FabType fab_type = flags[mfi].getType(box); auto const &face_areas_dim = m_face_areas[maxLevel()][idim]->array(mfi); if (fab_type == amrex::FabType::regular) { // every cell in box is all regular - amrex::LoopOnCpu(amrex::convert(box, amrex::Box(face_areas_dim).ixType()), - [=](int i, int j, int k) { + amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { face_areas_dim(i, j, k) = amrex::Real(1.); }); } else if (fab_type == amrex::FabType::covered) { // every cell in box is all covered - amrex::LoopOnCpu(amrex::convert(box, amrex::Box(face_areas_dim).ixType()), - [=](int i, int j, int k) { + amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { face_areas_dim(i, j, k) = amrex::Real(0.); }); } else { auto const &face = area_frac[idim]->const_array(mfi); - amrex::LoopOnCpu(amrex::convert(box, amrex::Box(face).ixType()), - [=](int i, int j, int k) { + amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { face_areas_dim(i, j, k) = face(i, j, k); }); } @@ -208,11 +202,10 @@ WarpX::ScaleEdges () { auto const &flags = eb_fact.getMultiEBCellFlagFab(); for (amrex::MFIter mfi(flags); mfi.isValid(); ++mfi) { - amrex::Box const &box = mfi.validbox(); for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) { + amrex::Box const &box = mfi.tilebox(m_edge_lengths[maxLevel()][idim]->ixType().toIntVect()); auto const &edge_lengths_dim = m_edge_lengths[maxLevel()][idim]->array(mfi); - amrex::LoopOnCpu(amrex::convert(box, amrex::Box(edge_lengths_dim).ixType()), - [=](int i, int j, int k) { + amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { edge_lengths_dim(i, j, k) *= cell_size[idim]; }); } @@ -235,8 +228,8 @@ WarpX::ScaleAreas() { auto const &flags = eb_fact.getMultiEBCellFlagFab(); for (amrex::MFIter mfi(flags); mfi.isValid(); ++mfi) { - amrex::Box const &box = mfi.validbox(); for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) { + amrex::Box const &box = mfi.tilebox(m_face_areas[maxLevel()][idim]->ixType().toIntVect()); if (idim == 0) { full_area = cell_size[1]*cell_size[2]; } else if (idim == 1) { @@ -245,8 +238,8 @@ WarpX::ScaleAreas() { full_area = cell_size[0]*cell_size[1]; } auto const &face_areas_dim = m_face_areas[maxLevel()][idim]->array(mfi); - amrex::LoopOnCpu(amrex::convert(box, amrex::Box(face_areas_dim).ixType()), - [=](int i, int j, int k) { + + amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { face_areas_dim(i, j, k) *= full_area; }); } diff --git a/Source/Evolve/WarpXEvolve.cpp b/Source/Evolve/WarpXEvolve.cpp index 29508fad6..b9d0c56d0 100644 --- a/Source/Evolve/WarpXEvolve.cpp +++ b/Source/Evolve/WarpXEvolve.cpp @@ -23,6 +23,7 @@ #endif #include "Parallelization/GuardCellManager.H" #include "Particles/MultiParticleContainer.H" +#include "Particles/ParticleBoundaryBuffer.H" #include "Python/WarpX_py.H" #include "Utils/IntervalsParser.H" #include "Utils/WarpXAlgorithmSelection.H" @@ -265,8 +266,16 @@ WarpX::Evolve (int numsteps) mypc->ContinuousFluxInjection(dt[0]); + m_particle_boundary_buffer->gatherParticles(*mypc, amrex::GetVecOfConstPtrs(m_distance_to_eb)); + mypc->ApplyBoundaryConditions(); + // interact with particles with EB walls (if present) +#ifdef AMREX_USE_EB + AMREX_ALWAYS_ASSERT(maxLevel() == 0); + mypc->ScrapeParticles(amrex::GetVecOfConstPtrs(m_distance_to_eb)); +#endif + // Electrostatic solver: particles can move by an arbitrary number of cells if( do_electrostatic != ElectrostaticSolverAlgo::None ) { @@ -291,13 +300,10 @@ WarpX::Evolve (int numsteps) } } - // interact with particles with EB walls (if present) -#ifdef AMREX_USE_EB - AMREX_ALWAYS_ASSERT(maxLevel() == 0); - mypc->ScrapeParticles(amrex::GetVecOfConstPtrs(m_distance_to_eb)); -#endif if (sort_intervals.contains(step+1)) { - amrex::Print() << "re-sorting particles \n"; + if (verbose) { + amrex::Print() << "re-sorting particles \n"; + } mypc->SortParticlesByBin(sort_bin_size); } @@ -312,6 +318,15 @@ WarpX::Evolve (int numsteps) ComputeSpaceChargeField( reset_fields ); } + // sync up time + for (int i = 0; i <= max_level; ++i) { + t_new[i] = cur_time; + } + + // warpx_py_afterstep runs with the updated global time. It is included + // in the evolve timing. + if (warpx_py_afterstep) warpx_py_afterstep(); + Real evolve_time_end_step = amrex::second(); evolve_time += evolve_time_end_step - evolve_time_beg_step; @@ -322,10 +337,6 @@ WarpX::Evolve (int numsteps) << " s; This step = " << evolve_time_end_step-evolve_time_beg_step << " s; Avg. per step = " << evolve_time/(step+1) << " s\n"; } - // sync up time - for (int i = 0; i <= max_level; ++i) { - t_new[i] = cur_time; - } /// reduced diags if (reduced_diags->m_plot_rd != 0) @@ -335,12 +346,6 @@ WarpX::Evolve (int numsteps) } multi_diags->FilterComputePackFlush( step ); - if (cur_time >= stop_time - 1.e-3*dt[0]) { - break; - } - - if (warpx_py_afterstep) warpx_py_afterstep(); - // inputs: unused parameters (e.g. typos) check after step 1 has finished if (!early_params_checked) { amrex::Print() << "\n"; // better: conditional \n based on return value @@ -348,6 +353,10 @@ WarpX::Evolve (int numsteps) early_params_checked = true; } + if (cur_time >= stop_time - 1.e-3*dt[0]) { + break; + } + // End loop on time steps } @@ -555,13 +564,13 @@ WarpX::OneStep_multiJ (const amrex::Real cur_time) // 3) Deposit rho (in rho_new, since it will be moved during the loop) if (WarpX::update_with_rho) { - // Deposit rho at relative time -dt in component 1 (rho_new) + // Deposit rho at relative time -dt // (dt[0] denotes the time step on mesh refinement level 0) - mypc->DepositCharge(rho_fp, -dt[0], 1); + mypc->DepositCharge(rho_fp, -dt[0]); // Filter, exchange boundary, and interpolate across levels SyncRho(); // Forward FFT of rho_new - PSATDForwardTransformRho(1); + PSATDForwardTransformRho(0, 1); } // 4) Deposit J if needed @@ -569,7 +578,8 @@ WarpX::OneStep_multiJ (const amrex::Real cur_time) { // Deposit J at relative time -dt with time step dt // (dt[0] denotes the time step on mesh refinement level 0) - mypc->DepositCurrent(current_fp, dt[0], -dt[0]); + auto& current = (WarpX::do_current_centering) ? current_fp_nodal : current_fp; + mypc->DepositCurrent(current, dt[0], -dt[0]); // Filter, exchange boundary, and interpolate across levels SyncCurrent(); // Forward FFT of J @@ -599,7 +609,8 @@ WarpX::OneStep_multiJ (const amrex::Real cur_time) // Deposit new J at relative time t_depose with time step dt // (dt[0] denotes the time step on mesh refinement level 0) - mypc->DepositCurrent(current_fp, dt[0], t_depose); + auto& current = (WarpX::do_current_centering) ? current_fp_nodal : current_fp; + mypc->DepositCurrent(current, dt[0], t_depose); // Filter, exchange boundary, and interpolate across levels SyncCurrent(); // Forward FFT of J @@ -608,12 +619,12 @@ WarpX::OneStep_multiJ (const amrex::Real cur_time) // Deposit new rho if (WarpX::update_with_rho) { - // Deposit rho at relative time (i_depose-n_depose+1)*sub_dt in component 1 (rho_new) - mypc->DepositCharge(rho_fp, (i_depose-n_depose+1)*sub_dt, 1); + // Deposit rho at relative time (i_depose-n_depose+1)*sub_dt + mypc->DepositCharge(rho_fp, (i_depose-n_depose+1)*sub_dt); // Filter, exchange boundary, and interpolate across levels SyncRho(); // Forward FFT of rho_new - PSATDForwardTransformRho(1); + PSATDForwardTransformRho(0, 1); } // Advance E,B,F,G fields in time and update the average fields diff --git a/Source/FieldSolver/ElectrostaticSolver.cpp b/Source/FieldSolver/ElectrostaticSolver.cpp index 6fe470282..16974987c 100644 --- a/Source/FieldSolver/ElectrostaticSolver.cpp +++ b/Source/FieldSolver/ElectrostaticSolver.cpp @@ -137,41 +137,33 @@ WarpX::AddSpaceChargeFieldLabFrame () "Error: RZ electrostatic only implemented for a single mode"); #endif - // Allocate fields for charge - const int num_levels = max_level + 1; - Vector > rho(num_levels); - // Use number of guard cells used for local deposition of rho - const amrex::IntVect ng = guard_cells.ng_depos_rho; + // Zero out the charge density for (int lev = 0; lev <= max_level; lev++) { - BoxArray nba = boxArray(lev); - nba.surroundingNodes(); - rho[lev] = std::make_unique(nba, dmap[lev], 1, ng); - rho[lev]->setVal(0.); + rho_fp[lev]->setVal(0.); } // Deposit particle charge density (source of Poisson solver) for (int ispecies=0; ispeciesnSpecies(); ispecies++){ WarpXParticleContainer& species = mypc->GetParticleContainer(ispecies); bool const local = true; + bool const interpolate_across_levels = false; bool const reset = false; bool const do_rz_volume_scaling = false; - species.DepositCharge(rho, local, reset, do_rz_volume_scaling); - } - for (int lev = 0; lev <= max_level; lev++) { - ApplyFilterandSumBoundaryRho (lev, lev, *rho[lev], 0, 1); + species.DepositCharge(rho_fp, local, reset, do_rz_volume_scaling, interpolate_across_levels); } #ifdef WARPX_DIM_RZ for (int lev = 0; lev <= max_level; lev++) { - ApplyInverseVolumeScalingToChargeDensity(rho[lev].get(), lev); + ApplyInverseVolumeScalingToChargeDensity(rho_fp[lev].get(), lev); } #endif + SyncRho(); // Apply filter, perform MPI exchange, interpolate across levels // beta is zero in lab frame // Todo: use simpler finite difference form with beta=0 std::array beta = {0._rt}; // Compute the potential phi, by solving the Poisson equation - computePhi( rho, phi_fp, beta, self_fields_required_precision, self_fields_max_iters, self_fields_verbosity ); + computePhi( rho_fp, phi_fp, beta, self_fields_required_precision, self_fields_max_iters, self_fields_verbosity ); // Compute the corresponding electric and magnetic field, from the potential phi computeE( Efield_fp, phi_fp, beta ); @@ -227,7 +219,7 @@ WarpX::computePhiRZ (const amrex::Vector >& rho int const verbosity) const { // Create a new geometry with the z coordinate scaled by gamma - amrex::Real const gamma = std::sqrt(1._rt/(1. - beta[2]*beta[2])); + amrex::Real const gamma = std::sqrt(1._rt/(1._rt - beta[2]*beta[2])); amrex::Vector geom_scaled(max_level + 1); for (int lev = 0; lev <= max_level; ++lev) { @@ -431,7 +423,7 @@ WarpX::computePhiCartesian (const amrex::Vector // one of the axes of the grid, i.e. that only *one* of the Cartesian // components of `beta` is non-negligible. linop.setSigma({AMREX_D_DECL( - 1.-beta[0]*beta[0], 1.-beta[1]*beta[1], 1.-beta[2]*beta[2])}); + 1._rt-beta[0]*beta[0], 1._rt-beta[1]*beta[1], 1._rt-beta[2]*beta[2])}); // get the EB potential at the current time std::string potential_eb_str = "0"; diff --git a/Source/FieldSolver/FiniteDifferenceSolver/ApplySilverMuellerBoundary.cpp b/Source/FieldSolver/FiniteDifferenceSolver/ApplySilverMuellerBoundary.cpp index 58a382d19..02b712679 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/ApplySilverMuellerBoundary.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/ApplySilverMuellerBoundary.cpp @@ -37,7 +37,9 @@ void FiniteDifferenceSolver::ApplySilverMuellerBoundary ( std::array< std::unique_ptr, 3 >& Efield, std::array< std::unique_ptr, 3 >& Bfield, amrex::Box domain_box, - amrex::Real const dt ) { + amrex::Real const dt, + amrex::Vector field_boundary_lo, + amrex::Vector field_boundary_hi) { // Ensure that we are using the Yee solver if (m_fdtd_algo != MaxwellSolverAlgo::Yee) { @@ -67,6 +69,11 @@ void FiniteDifferenceSolver::ApplySilverMuellerBoundary ( int const nmodes = m_nmodes; Real const rmin = m_rmin; + // Infer whether the Silver-Mueller needs to be applied in each direction + bool const apply_hi_r = (field_boundary_hi[0] == FieldBoundaryType::Absorbing_SilverMueller); + bool const apply_lo_z = (field_boundary_lo[1] == FieldBoundaryType::Absorbing_SilverMueller); + bool const apply_hi_z = (field_boundary_hi[1] == FieldBoundaryType::Absorbing_SilverMueller); + // tiling is usually set by TilingIfNotGPU() // but here, we set it to false because of potential race condition, // since we grow the tiles by one guard cell after creating them. @@ -96,12 +103,12 @@ void FiniteDifferenceSolver::ApplySilverMuellerBoundary ( [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ // At the +z boundary (innermost guard cell) - if ( j==domain_box.bigEnd(1)+1 ){ + if ( apply_hi_z && (j==domain_box.bigEnd(1)+1) ){ for (int m=0; m<2*nmodes-1; m++) Br(i,j,0,m) = coef1_z*Br(i,j,0,m) - coef2_z*Et(i,j,0,m); } // At the -z boundary (innermost guard cell) - if ( j==domain_box.smallEnd(1)-1 ){ + if ( apply_lo_z && (j==domain_box.smallEnd(1)-1) ){ for (int m=0; m<2*nmodes-1; m++) Br(i,j,0,m) = coef1_z*Br(i,j,0,m) + coef2_z*Et(i,j+1,0,m); } @@ -110,17 +117,17 @@ void FiniteDifferenceSolver::ApplySilverMuellerBoundary ( [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ // At the +z boundary (innermost guard cell) - if ( j==domain_box.bigEnd(1)+1 ){ + if ( apply_hi_z && (j==domain_box.bigEnd(1)+1) ){ for (int m=0; m<2*nmodes-1; m++) Bt(i,j,0,m) = coef1_z*Bt(i,j,0,m) + coef2_z*Er(i,j,0,m); } // At the -z boundary (innermost guard cell) - if ( j==domain_box.smallEnd(1)-1 ){ + if ( apply_lo_z && (j==domain_box.smallEnd(1)-1) ){ for (int m=0; m<2*nmodes-1; m++) Bt(i,j,0,m) = coef1_z*Bt(i,j,0,m) - coef2_z*Er(i,j+1,0,m); } // At the +r boundary (innermost guard cell) - if ( i==domain_box.bigEnd(0)+1 ){ + if ( apply_hi_r && (i==domain_box.bigEnd(0)+1) ){ // Mode 0 Bt(i,j,0,0) = coef1_r*Bt(i,j,0,0) - coef2_r*Ez(i,j,0,0) + coef3_r*CylindricalYeeAlgorithm::UpwardDz(Er, coefs_z, n_coefs_z, i, j, 0, 0); @@ -138,7 +145,7 @@ void FiniteDifferenceSolver::ApplySilverMuellerBoundary ( [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ // At the +r boundary (innermost guard cell) - if ( i==domain_box.bigEnd(0)+1 ){ + if ( apply_hi_r && (i==domain_box.bigEnd(0)+1) ){ Real const r = rmin + (i + 0.5_rt)*dr; // r on nodal point (Bz is cell-centered in r) // Mode 0 Bz(i,j,0,0) = coef1_r*Bz(i,j,0,0) + coef2_r*Et(i,j,0,0) - coef3_r*Et(i,j,0,0)/r; @@ -170,6 +177,18 @@ void FiniteDifferenceSolver::ApplySilverMuellerBoundary ( amrex::Real const coef1_z = (1._rt - cdt_over_dz)/(1._rt + cdt_over_dz); amrex::Real const coef2_z = 2._rt*cdt_over_dz/(1._rt + cdt_over_dz) / PhysConst::c; + bool const apply_lo_x = (field_boundary_lo[0] == FieldBoundaryType::Absorbing_SilverMueller); + bool const apply_hi_x = (field_boundary_hi[0] == FieldBoundaryType::Absorbing_SilverMueller); +#ifdef WARPX_DIM_3D + bool const apply_lo_y = (field_boundary_lo[1] == FieldBoundaryType::Absorbing_SilverMueller); + bool const apply_hi_y = (field_boundary_hi[1] == FieldBoundaryType::Absorbing_SilverMueller); + bool const apply_lo_z = (field_boundary_lo[2] == FieldBoundaryType::Absorbing_SilverMueller); + bool const apply_hi_z = (field_boundary_hi[2] == FieldBoundaryType::Absorbing_SilverMueller); +#else + bool const apply_lo_z = (field_boundary_lo[1] == FieldBoundaryType::Absorbing_SilverMueller); + bool const apply_hi_z = (field_boundary_hi[1] == FieldBoundaryType::Absorbing_SilverMueller); +#endif + // Loop through the grids, and over the tiles within each grid #ifdef AMREX_USE_OMP #pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) @@ -207,23 +226,23 @@ void FiniteDifferenceSolver::ApplySilverMuellerBoundary ( #ifdef WARPX_DIM_3D // At the +y boundary (innermost guard cell) - if ( j==domain_box.bigEnd(1)+1 ) + if ( apply_hi_y && ( j==domain_box.bigEnd(1)+1 ) ) Bx(i,j,k) = coef1_y * Bx(i,j,k) + coef2_y * Ez(i,j,k); // At the -y boundary (innermost guard cell) - if ( j==domain_box.smallEnd(1)-1 ) + if ( apply_lo_y && ( j==domain_box.smallEnd(1)-1 ) ) Bx(i,j,k) = coef1_y * Bx(i,j,k) - coef2_y * Ez(i,j+1,k); // At the +z boundary (innermost guard cell) - if ( k==domain_box.bigEnd(2)+1 ) + if ( apply_hi_z && ( k==domain_box.bigEnd(2)+1 ) ) Bx(i,j,k) = coef1_z * Bx(i,j,k) - coef2_z * Ey(i,j,k); // At the -z boundary (innermost guard cell) - if ( k==domain_box.smallEnd(2)-1 ) + if ( apply_lo_z && ( k==domain_box.smallEnd(2)-1 ) ) Bx(i,j,k) = coef1_z * Bx(i,j,k) + coef2_z * Ey(i,j,k+1); #elif WARPX_DIM_XZ // At the +z boundary (innermost guard cell) - if ( j==domain_box.bigEnd(1)+1 ) + if ( apply_hi_z && ( j==domain_box.bigEnd(1)+1 ) ) Bx(i,j,k) = coef1_z * Bx(i,j,k) - coef2_z * Ey(i,j,k); // At the -z boundary (innermost guard cell) - if ( j==domain_box.smallEnd(1)-1 ) + if ( apply_lo_z && ( j==domain_box.smallEnd(1)-1 ) ) Bx(i,j,k) = coef1_z * Bx(i,j,k) + coef2_z * Ey(i,j+1,k); #endif }, @@ -232,24 +251,24 @@ void FiniteDifferenceSolver::ApplySilverMuellerBoundary ( [=] AMREX_GPU_DEVICE (int i, int j, int k){ // At the +x boundary (innermost guard cell) - if ( i==domain_box.bigEnd(0)+1 ) + if ( apply_hi_x && ( i==domain_box.bigEnd(0)+1 ) ) By(i,j,k) = coef1_x * By(i,j,k) - coef2_x * Ez(i,j,k); // At the -x boundary (innermost guard cell) - if ( i==domain_box.smallEnd(0)-1 ) + if ( apply_lo_x && ( i==domain_box.smallEnd(0)-1 ) ) By(i,j,k) = coef1_x * By(i,j,k) + coef2_x * Ez(i+1,j,k); #ifdef WARPX_DIM_3D // At the +z boundary (innermost guard cell) - if ( k==domain_box.bigEnd(2)+1 ) + if ( apply_hi_z && ( k==domain_box.bigEnd(2)+1 ) ) By(i,j,k) = coef1_z * By(i,j,k) + coef2_z * Ex(i,j,k); // At the -z boundary (innermost guard cell) - if ( k==domain_box.smallEnd(2)-1 ) + if ( apply_lo_z && ( k==domain_box.smallEnd(2)-1 ) ) By(i,j,k) = coef1_z * By(i,j,k) - coef2_z * Ex(i,j,k+1); #elif WARPX_DIM_XZ // At the +z boundary (innermost guard cell) - if ( j==domain_box.bigEnd(1)+1 ) + if ( apply_hi_z && ( j==domain_box.bigEnd(1)+1 ) ) By(i,j,k) = coef1_z * By(i,j,k) + coef2_z * Ex(i,j,k); // At the -z boundary (innermost guard cell) - if ( j==domain_box.smallEnd(1)-1 ) + if ( apply_lo_z && ( j==domain_box.smallEnd(1)-1 ) ) By(i,j,k) = coef1_z * By(i,j,k) - coef2_z * Ex(i,j+1,k); #endif }, @@ -258,17 +277,17 @@ void FiniteDifferenceSolver::ApplySilverMuellerBoundary ( [=] AMREX_GPU_DEVICE (int i, int j, int k){ // At the +x boundary (innermost guard cell) - if ( i==domain_box.bigEnd(0)+1 ) + if ( apply_hi_x && ( i==domain_box.bigEnd(0)+1 ) ) Bz(i,j,k) = coef1_x * Bz(i,j,k) + coef2_x * Ey(i,j,k); // At the -x boundary (innermost guard cell) - if ( i==domain_box.smallEnd(0)-1 ) + if ( apply_lo_x && ( i==domain_box.smallEnd(0)-1 ) ) Bz(i,j,k) = coef1_x * Bz(i,j,k) - coef2_x * Ey(i+1,j,k); #ifdef WARPX_DIM_3D // At the +y boundary (innermost guard cell) - if ( j==domain_box.bigEnd(1)+1 ) + if ( apply_hi_y && ( j==domain_box.bigEnd(1)+1 ) ) Bz(i,j,k) = coef1_y * Bz(i,j,k) - coef2_y * Ex(i,j,k); // At the -y boundary (innermost guard cell) - if ( j==domain_box.smallEnd(1)-1 ) + if ( apply_lo_y && ( j==domain_box.smallEnd(1)-1 ) ) Bz(i,j,k) = coef1_y * Bz(i,j,k) + coef2_y * Ex(i,j+1,k); #endif } diff --git a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H index 5cb1906eb..443b90f19 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H @@ -72,7 +72,9 @@ class FiniteDifferenceSolver std::array< std::unique_ptr, 3 >& Efield, std::array< std::unique_ptr, 3 >& Bfield, amrex::Box domain_box, - amrex::Real const dt ); + amrex::Real const dt, + amrex::Vector field_boundary_lo, + amrex::Vector field_boundary_hi); void ComputeDivE ( const std::array,3>& Efield, amrex::MultiFab& divE ); diff --git a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver_fwd.H b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver_fwd.H index 4ecd05116..7e8e2e7b9 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver_fwd.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver_fwd.H @@ -1,8 +1,13 @@ -/* Copyright 2021 Luca Fedeli +/* Copyright 2021 Luca Fedeli, Axel Huebl * * This file is part of WarpX. * * License: BSD-3-Clause-LBNL */ +#ifndef WARPX_FINITE_DIFFERENCE_SOLVER_FWD_H +#define WARPX_FINITE_DIFFERENCE_SOLVER_FWD_H + class FiniteDifferenceSolver; + +#endif /* WARPX_FINITE_DIFFERENCE_SOLVER_FWD_H */ diff --git a/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicProperties/MacroscopicProperties.cpp b/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicProperties/MacroscopicProperties.cpp index c3db5a64c..45c97b882 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicProperties/MacroscopicProperties.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicProperties/MacroscopicProperties.cpp @@ -361,6 +361,7 @@ MacroscopicProperties::InitializeMacroMultiFabUsingParser ( IntVect iv = macro_mf->ixType().toIntVect(); for ( MFIter mfi(*macro_mf, TilingIfNotGPU()); mfi.isValid(); ++mfi ) { // Initialize ghost cells in addition to valid cells + const Box& tb = mfi.tilebox(iv, macro_mf->nGrowVect()); auto const& macro_fab = macro_mf->array(mfi); amrex::ParallelFor (tb, diff --git a/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicProperties/MacroscopicProperties_fwd.H b/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicProperties/MacroscopicProperties_fwd.H index 2ca4662d7..5f44f49e0 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicProperties/MacroscopicProperties_fwd.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicProperties/MacroscopicProperties_fwd.H @@ -1,8 +1,13 @@ -/* Copyright 2021 Luca Fedeli +/* Copyright 2021 Luca Fedeli, Axel Huebl * * This file is part of WarpX. * * License: BSD-3-Clause-LBNL */ +#ifndef WARPX_MACROSCOPICPROPERIES_FWD_H +#define WARPX_MACROSCOPICPROPERIES_FWD_H + class MacroscopicProperties; + +#endif /* WARPX_MACROSCOPICPROPERIES_FWD_H */ diff --git a/Source/FieldSolver/SpectralSolver/AnyFFT.H b/Source/FieldSolver/SpectralSolver/AnyFFT.H index 23cadd30f..3e23f8f9e 100644 --- a/Source/FieldSolver/SpectralSolver/AnyFFT.H +++ b/Source/FieldSolver/SpectralSolver/AnyFFT.H @@ -14,6 +14,9 @@ #if defined(AMREX_USE_CUDA) # include #elif defined(AMREX_USE_HIP) +// cstddef: work-around for ROCm/rocFFT <=4.3.0 +// https://github.com/ROCmSoftwarePlatform/rocFFT/blob/rocm-4.3.0/library/include/rocfft.h#L36-L42 +# include # include #else # include diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmRZ.H b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmRZ.H index 07c4d8142..77f33c631 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmRZ.H +++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmRZ.H @@ -76,6 +76,7 @@ class PsatdAlgorithmRZ : public SpectralBaseAlgorithmRZ bool m_dive_cleaning; bool m_divb_cleaning; SpectralRealCoefficients C_coef, S_ck_coef, X1_coef, X2_coef, X3_coef; + SpectralRealCoefficients X5_coef, X6_coef; }; #endif // WARPX_PSATD_ALGORITHM_RZ_H_ diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmRZ.cpp b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmRZ.cpp index ac8ae23e0..c497ab618 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmRZ.cpp +++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmRZ.cpp @@ -46,8 +46,26 @@ PsatdAlgorithmRZ::PsatdAlgorithmRZ (SpectralKSpaceRZ const & spectral_kspace, coefficients_initialized = false; - // TODO Implement time averaging and remove this - amrex::ignore_unused(m_time_averaging); + if (time_averaging && J_linear_in_time) + { + X5_coef = SpectralRealCoefficients(ba, dm, n_rz_azimuthal_modes, 0); + X6_coef = SpectralRealCoefficients(ba, dm, n_rz_azimuthal_modes, 0); + } + + if (time_averaging && !J_linear_in_time) + { + amrex::Abort("RZ PSATD: psatd.do_time_averaging = 1 implemented only with psatd.J_linear_in_time = 1"); + } + + if (dive_cleaning && !J_linear_in_time) + { + amrex::Abort("RZ PSATD: warpx.do_dive_cleaning = 1 implemented only with psatd.J_linear_in_time = 1"); + } + + if (divb_cleaning && !J_linear_in_time) + { + amrex::Abort("RZ PSATD: warpx.do_divb_cleaning = 1 implemented only with psatd.J_linear_in_time = 1"); + } } /* Advance the E and B field in spectral space (stored in `f`) @@ -56,7 +74,8 @@ void PsatdAlgorithmRZ::pushSpectralFields(SpectralFieldDataRZ & f) { - bool const update_with_rho = m_update_with_rho; + const bool update_with_rho = m_update_with_rho; + const bool time_averaging = m_time_averaging; const bool J_linear_in_time = m_J_linear_in_time; const bool dive_cleaning = m_dive_cleaning; const bool divb_cleaning = m_divb_cleaning; @@ -84,6 +103,14 @@ PsatdAlgorithmRZ::pushSpectralFields(SpectralFieldDataRZ & f) amrex::Array4 const& X2_arr = X2_coef[mfi].array(); amrex::Array4 const& X3_arr = X3_coef[mfi].array(); + amrex::Array4 X5_arr; + amrex::Array4 X6_arr; + if (time_averaging && J_linear_in_time) + { + X5_arr = X5_coef[mfi].array(); + X6_arr = X6_coef[mfi].array(); + } + // Extract pointers for the k vectors auto const & kr_modes = f.getKrArray(mfi); amrex::Real const* kr_arr = kr_modes.dataPtr(); @@ -125,6 +152,22 @@ PsatdAlgorithmRZ::pushSpectralFields(SpectralFieldDataRZ & f) Complex const rho_old = fields(i,j,k,rho_old_m); Complex const rho_new = fields(i,j,k,rho_new_m); + int Ep_avg_m; + int Em_avg_m; + int Ez_avg_m; + int Bp_avg_m; + int Bm_avg_m; + int Bz_avg_m; + if (time_averaging) + { + Ep_avg_m = Idx.Ex_avg + Idx.n_fields*mode; + Em_avg_m = Idx.Ey_avg + Idx.n_fields*mode; + Ez_avg_m = Idx.Ez_avg + Idx.n_fields*mode; + Bp_avg_m = Idx.Bx_avg + Idx.n_fields*mode; + Bm_avg_m = Idx.By_avg + Idx.n_fields*mode; + Bz_avg_m = Idx.Bz_avg + Idx.n_fields*mode; + } + // k vector values, and coefficients // The k values for each mode are grouped together int const ir = i + nr*mode; @@ -132,6 +175,7 @@ PsatdAlgorithmRZ::pushSpectralFields(SpectralFieldDataRZ & f) amrex::Real const kz = modified_kz_arr[j]; constexpr amrex::Real c2 = PhysConst::c*PhysConst::c; + constexpr amrex::Real ep0 = PhysConst::ep0; constexpr amrex::Real inv_ep0 = 1._rt/PhysConst::ep0; Complex const I = Complex{0._rt,1._rt}; amrex::Real const C = C_arr(i,j,k,mode); @@ -230,6 +274,52 @@ PsatdAlgorithmRZ::pushSpectralFields(SpectralFieldDataRZ & f) fields(i,j,k,G_m) = C * G_old + I * S_ck * k_dot_B; } + + if (time_averaging) + { + amrex::Real const X5 = X5_arr(i,j,k,mode); + amrex::Real const X6 = X6_arr(i,j,k,mode); + + fields(i,j,k,Ep_avg_m) += S_ck * Ep_old + + c2 * ep0 * X1 * (kz * Bp_old - I * kr * 0.5_rt * Bz_old) + - kr * 0.5_rt * (X5 * rho_old + X6 * rho_new) + X3/c2 * Jp - X2/c2 * Jp_new; + + fields(i,j,k,Em_avg_m) += S_ck * Em_old + - c2 * ep0 * X1 * (kz * Bm_old + I * kr * 0.5_rt * Bz_old) + + kr * 0.5_rt * (X5 * rho_old + X6 * rho_new) + X3/c2 * Jm - X2/c2 * Jm_new; + + fields(i,j,k,Ez_avg_m) += S_ck * Ez_old + + I * c2 * ep0 * X1 * kr * (Bp_old + Bm_old) + + I * kz * (X5 * rho_old + X6 * rho_new) + X3/c2 * Jz - X2/c2 * Jz_new; + + fields(i,j,k,Bp_avg_m) += S_ck * Bp_old + - ep0 * X1 * (kz * Ep_old - I * kr * 0.5_rt * Ez_old) + - X5/c2 * (kz * Jp - I * kr * 0.5_rt * Jz) + - X6/c2 * (kz * Jp_new - I * kr * 0.5_rt * Jz_new); + + fields(i,j,k,Bm_avg_m) += S_ck * Bm_old + + ep0 * X1 * (kz * Em_old + I * kr * 0.5_rt * Ez_old) + + X5/c2 * (kz * Jm + I * kr * 0.5_rt * Jz) + + X6/c2 * (kz * Jm_new + I * kr * 0.5_rt * Jz_new); + + fields(i,j,k,Bz_avg_m) += S_ck * Bz_old + - I * kr * ep0 * X1 * (Ep_old + Em_old) + - I * kr * X5/c2 * (Jp + Jm) - I * kr * X6/c2 * (Jp_new + Jm_new); + + if (dive_cleaning) + { + fields(i,j,k,Ep_avg_m) += -c2 * kr * 0.5_rt * ep0 * X1 * F_old; + fields(i,j,k,Em_avg_m) += c2 * kr * 0.5_rt * ep0 * X1 * F_old; + fields(i,j,k,Ez_avg_m) += I * c2 * ep0 * X1 * F_old * kz; + } + + if (divb_cleaning) + { + fields(i,j,k,Bp_avg_m) += -c2 * kr * 0.5_rt * ep0 * X1 * G_old; + fields(i,j,k,Bm_avg_m) += c2 * kr * 0.5_rt * ep0 * X1 * G_old; + fields(i,j,k,Bz_avg_m) += I * c2 * ep0 * X1 * G_old * kz; + } + } } }); } @@ -237,6 +327,8 @@ PsatdAlgorithmRZ::pushSpectralFields(SpectralFieldDataRZ & f) void PsatdAlgorithmRZ::InitializeSpectralCoefficients (SpectralFieldDataRZ const & f) { + const bool time_averaging = m_time_averaging; + const bool J_linear_in_time = m_J_linear_in_time; // Fill them with the right values: // Loop over boxes and allocate the corresponding coefficients @@ -255,6 +347,14 @@ void PsatdAlgorithmRZ::InitializeSpectralCoefficients (SpectralFieldDataRZ const amrex::Array4 const& X2 = X2_coef[mfi].array(); amrex::Array4 const& X3 = X3_coef[mfi].array(); + amrex::Array4 X5; + amrex::Array4 X6; + if (time_averaging && J_linear_in_time) + { + X5 = X5_coef[mfi].array(); + X6 = X6_coef[mfi].array(); + } + auto const & kr_modes = f.getKrArray(mfi); amrex::Real const* kr_arr = kr_modes.dataPtr(); int const nr = bx.length(0); @@ -287,6 +387,27 @@ void PsatdAlgorithmRZ::InitializeSpectralCoefficients (SpectralFieldDataRZ const X2(i,j,k,mode) = c*c * dt*dt / (6._rt*ep0); X3(i,j,k,mode) = - c*c * dt*dt / (3._rt*ep0); } + + if (time_averaging && J_linear_in_time) + { + constexpr amrex::Real c2 = PhysConst::c; + const amrex::Real dt3 = dt * dt * dt; + const amrex::Real om = c * k_norm; + const amrex::Real om2 = om * om; + const amrex::Real om4 = om2 * om2; + + if (om != 0.0_rt) + { + X5(i,j,k) = c2 / ep0 * (S_ck(i,j,k) / om2 - (1._rt - C(i,j,k)) / (om4 * dt) + - 0.5_rt * dt / om2); + X6(i,j,k) = c2 / ep0 * ((1._rt - C(i,j,k)) / (om4 * dt) - 0.5_rt * dt / om2); + } + else + { + X5(i,j,k) = - c2 * dt3 / (8._rt * ep0); + X6(i,j,k) = - c2 * dt3 / (24._rt * ep0); + } + } }); } } diff --git a/Source/FieldSolver/SpectralSolver/SpectralFieldData_fwd.H b/Source/FieldSolver/SpectralSolver/SpectralFieldData_fwd.H index d01cc0a83..d9f923c80 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralFieldData_fwd.H +++ b/Source/FieldSolver/SpectralSolver/SpectralFieldData_fwd.H @@ -1,9 +1,14 @@ -/* Copyright 2021 Luca Fedeli +/* Copyright 2021 Luca Fedeli, Axel Huebl * * This file is part of WarpX. * * License: BSD-3-Clause-LBNL */ +#ifndef WARPX_SPECTRALFIELDDATA_FWD_H +#define WARPX_SPECTRALFIELDDATA_FWD_H + class SpectralFieldIndex; class SpectralFieldData; + +#endif /* WARPX_SPECTRALFIELDDATA_FWD_H */ diff --git a/Source/FieldSolver/SpectralSolver/SpectralKSpace_fwd.H b/Source/FieldSolver/SpectralSolver/SpectralKSpace_fwd.H index 32417ff5c..a256767d5 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralKSpace_fwd.H +++ b/Source/FieldSolver/SpectralSolver/SpectralKSpace_fwd.H @@ -1,10 +1,15 @@ -/* Copyright 2021 Luca Fedeli +/* Copyright 2021 Luca Fedeli, Axel Huebl * * This file is part of WarpX. * * License: BSD-3-Clause-LBNL */ +#ifndef WARPX_SPECTRALKSPACE_FWD_H +#define WARPX_SPECTRALKSPACE_FWD_H + struct ShiftType; class SpectralKSpace; + +#endif /* WARPX_SPECTRALKSPACE_FWD_H */ diff --git a/Source/FieldSolver/SpectralSolver/SpectralSolverRZ_fwd.H b/Source/FieldSolver/SpectralSolver/SpectralSolverRZ_fwd.H index bfe332fe7..7814912bf 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralSolverRZ_fwd.H +++ b/Source/FieldSolver/SpectralSolver/SpectralSolverRZ_fwd.H @@ -1,8 +1,13 @@ -/* Copyright 2021 Luca Fedeli +/* Copyright 2021 Luca Fedeli, Axel Huebl * * This file is part of WarpX. * * License: BSD-3-Clause-LBNL */ +#ifndef WARPX_SPECTRALSOLVERRZ_FWD_H +#define WARPX_SPECTRALSOLVERRZ_FWD_H + class SpectralSolverRZ; + +#endif /* WARPX_SPECTRALSOLVERRZ_FWD_H */ diff --git a/Source/FieldSolver/SpectralSolver/SpectralSolver_fwd.H b/Source/FieldSolver/SpectralSolver/SpectralSolver_fwd.H index fecd3bd8b..b979025f3 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralSolver_fwd.H +++ b/Source/FieldSolver/SpectralSolver/SpectralSolver_fwd.H @@ -1,8 +1,13 @@ -/* Copyright 2021 Luca Fedeli +/* Copyright 2021 Luca Fedeli, Axel Huebl * * This file is part of WarpX. * * License: BSD-3-Clause-LBNL */ +#ifndef WARPX_SPECTRALSOLVER_FWD_H +#define WARPX_SPECTRALSOLVER_FWD_H + class SpectralSolver; + +#endif /* WARPX_SPECTRALSOLVER_FWD_H */ diff --git a/Source/FieldSolver/WarpXPushFieldsEM.cpp b/Source/FieldSolver/WarpXPushFieldsEM.cpp index 4503fddbb..f6fdab073 100644 --- a/Source/FieldSolver/WarpXPushFieldsEM.cpp +++ b/Source/FieldSolver/WarpXPushFieldsEM.cpp @@ -26,7 +26,7 @@ #include "WarpX_FDTD.H" #include -#ifdef BL_USE_SENSEI_INSITU +#ifdef AMREX_USE_SENSEI_INSITU # include #endif #include @@ -264,12 +264,12 @@ WarpX::PSATDForwardTransformJ () } void -WarpX::PSATDForwardTransformRho (const int icomp) +WarpX::PSATDForwardTransformRho (const int icomp, const int dcomp) { const SpectralFieldIndex& Idx = spectral_solver_fp[0]->m_spectral_index; // Select index in k space - const int dst_comp = (icomp == 0) ? Idx.rho_old : Idx.rho_new; + const int dst_comp = (dcomp == 0) ? Idx.rho_old : Idx.rho_new; for (int lev = 0; lev <= finest_level; ++lev) { @@ -410,8 +410,12 @@ WarpX::PushPSATD () PSATDForwardTransformEB(); PSATDForwardTransformJ(); - PSATDForwardTransformRho(0); // rho old - PSATDForwardTransformRho(1); // rho new + // Do rho FFTs only if needed + if (WarpX::update_with_rho || WarpX::current_correction || WarpX::do_dive_cleaning) + { + PSATDForwardTransformRho(0,0); // rho old + PSATDForwardTransformRho(1,1); // rho new + } PSATDPushSpectralFields(); PSATDBackwardTransformEB(); if (WarpX::fft_do_time_averaging) PSATDBackwardTransformEBavg(); diff --git a/Source/FieldSolver/WarpX_QED_Field_Pushers.cpp b/Source/FieldSolver/WarpX_QED_Field_Pushers.cpp index 9e6cbd18c..c3d8f1c7d 100644 --- a/Source/FieldSolver/WarpX_QED_Field_Pushers.cpp +++ b/Source/FieldSolver/WarpX_QED_Field_Pushers.cpp @@ -11,7 +11,7 @@ #include "WarpX_QED_K.H" #include -#ifdef BL_USE_SENSEI_INSITU +#ifdef AMREX_USE_SENSEI_INSITU # include #endif #include diff --git a/Source/Filter/NCIGodfreyFilter_fwd.H b/Source/Filter/NCIGodfreyFilter_fwd.H index 33e098dde..02c5b4a1f 100644 --- a/Source/Filter/NCIGodfreyFilter_fwd.H +++ b/Source/Filter/NCIGodfreyFilter_fwd.H @@ -1,10 +1,15 @@ -/* Copyright 2021 Luca Fedeli +/* Copyright 2021 Luca Fedeli, Axel Huebl * * This file is part of WarpX. * * License: BSD-3-Clause-LBNL */ +#ifndef WARPX_NCI_GODFREY_FILTER_FWD_H +#define WARPX_NCI_GODFREY_FILTER_FWD_H + enum class godfrey_coeff_set; class NCIGodfreyFilter; + +#endif /* WARPX_NCI_GODFREY_FILTER_FWD_H */ diff --git a/Source/Initialization/InjectorPosition_fwd.H b/Source/Initialization/InjectorPosition_fwd.H index 7c585915f..c5b3d27af 100644 --- a/Source/Initialization/InjectorPosition_fwd.H +++ b/Source/Initialization/InjectorPosition_fwd.H @@ -1,10 +1,15 @@ -/* Copyright 2021 Luca Fedeli +/* Copyright 2021 Luca Fedeli, Axel Huebl * * This file is part of WarpX. * * License: BSD-3-Clause-LBNL */ +#ifndef WARPX_INJECTOR_POSITION_FWD_H +#define WARPX_INJECTOR_POSITION_FWD_H + struct InjectorPositionRandom; struct InjectorPositionRegular; struct InjectorPosition; + +#endif /* WARPX_INJECTOR_POSITION_FWD_H */ diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index 26a9d3053..a7a079184 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -25,7 +25,7 @@ #include #include -#ifdef BL_USE_SENSEI_INSITU +#ifdef AMREX_USE_SENSEI_INSITU # include #endif #include @@ -119,9 +119,6 @@ WarpX::InitData () else { InitFromCheckpoint(); - if (is_synchronized) { - ComputeDt(); - } PostRestart(); } @@ -403,9 +400,6 @@ WarpX::InitFilter (){ void WarpX::PostRestart () { - if (WarpX::maxwell_solver_id == MaxwellSolverAlgo::PSATD) { - amrex::Abort("WarpX::PostRestart: TODO for PSATD"); - } mypc->PostRestart(); } diff --git a/Source/Laser/LaserProfilesImpl/LaserProfileGaussian.cpp b/Source/Laser/LaserProfilesImpl/LaserProfileGaussian.cpp index c79790159..918bc2f7a 100644 --- a/Source/Laser/LaserProfilesImpl/LaserProfileGaussian.cpp +++ b/Source/Laser/LaserProfilesImpl/LaserProfileGaussian.cpp @@ -111,10 +111,9 @@ WarpXLaserProfiles::GaussianLaserProfile::fill_amplitude ( // Time stretching due to STCs and phi2 complex envelope // (1 if zeta=0, beta=0, phi2=0) const Complex stretch_factor = 1._rt + 4._rt * - (m_params.zeta+m_params.beta*m_params.focal_distance) - * (m_params.zeta+m_params.beta*m_params.focal_distance) - * (inv_tau2*inv_complex_waist_2) + 2._rt *I * (m_params.phi2 - - m_params.beta*m_params.beta*k0*m_params.focal_distance) * inv_tau2; + (m_params.zeta+m_params.beta*m_params.focal_distance*inv_tau2) + * (m_params.zeta+m_params.beta*m_params.focal_distance*inv_complex_waist_2) + + 2._rt*I*(m_params.phi2-m_params.beta*m_params.beta*k0*m_params.focal_distance)*inv_tau2; // Amplitude and monochromatic oscillations Complex prefactor = diff --git a/Source/Make.WarpX b/Source/Make.WarpX index 90c02eded..730dff33b 100644 --- a/Source/Make.WarpX +++ b/Source/Make.WarpX @@ -149,7 +149,7 @@ endif ifeq ($(USE_OPENPMD), TRUE) # try pkg-config query - ifeq (0, $(shell pkg-config "openPMD >= 0.12.0"; echo $$?)) + ifeq (0, $(shell pkg-config "openPMD >= 0.14.2"; echo $$?)) CXXFLAGS += $(shell pkg-config --cflags openPMD) LIBRARY_LOCATIONS += $(shell pkg-config --variable=libdir openPMD) libraries += $(shell pkg-config --libs-only-l openPMD) diff --git a/Source/Parallelization/WarpXComm.cpp b/Source/Parallelization/WarpXComm.cpp index 0f4d366fe..12c86bb36 100644 --- a/Source/Parallelization/WarpXComm.cpp +++ b/Source/Parallelization/WarpXComm.cpp @@ -178,10 +178,17 @@ WarpX::UpdateAuxilaryDataStagToNodal () Btmp[i] = std::make_unique(cnba, dm, 1, ngtmp); } } + Btmp[0]->setVal(0.0); + Btmp[1]->setVal(0.0); + Btmp[2]->setVal(0.0); // ParallelCopy from coarse level for (int i = 0; i < 3; ++i) { IntVect ng = Btmp[i]->nGrowVect(); - Btmp[i]->ParallelCopy(*Bfield_aux[lev-1][i], 0, 0, 1, ng, ng, cperiod); + // Guard cells may not be up to date beyond ng_FieldGather + const amrex::IntVect& ng_src = guard_cells.ng_FieldGather; + // Copy Bfield_aux to Btmp, using up to ng_src (=ng_FieldGather) guard cells from + // Bfield_aux and filling up to ng (=nGrow) guard cells in Btmp + Btmp[i]->ParallelCopy(*Bfield_aux[lev-1][i], 0, 0, 1, ng_src, ng, cperiod); } #ifdef AMREX_USE_OMP @@ -228,10 +235,17 @@ WarpX::UpdateAuxilaryDataStagToNodal () cnba, dm, 1, ngtmp); } } + Etmp[0]->setVal(0.0); + Etmp[1]->setVal(0.0); + Etmp[2]->setVal(0.0); // ParallelCopy from coarse level for (int i = 0; i < 3; ++i) { IntVect ng = Etmp[i]->nGrowVect(); - Etmp[i]->ParallelCopy(*Efield_aux[lev-1][i], 0, 0, 1, ng, ng, cperiod); + // Guard cells may not be up to date beyond ng_FieldGather + const amrex::IntVect& ng_src = guard_cells.ng_FieldGather; + // Copy Efield_aux to Etmp, using up to ng_src (=ng_FieldGather) guard cells from + // Efield_aux and filling up to ng (=nGrow) guard cells in Etmp + Etmp[i]->ParallelCopy(*Efield_aux[lev-1][i], 0, 0, 1, ng_src, ng, cperiod); } #ifdef AMREX_USE_OMP @@ -282,9 +296,13 @@ WarpX::UpdateAuxilaryDataSameType () dBx.setVal(0.0); dBy.setVal(0.0); dBz.setVal(0.0); - dBx.ParallelCopy(*Bfield_aux[lev-1][0], 0, 0, Bfield_aux[lev-1][0]->nComp(), ng, ng, crse_period); - dBy.ParallelCopy(*Bfield_aux[lev-1][1], 0, 0, Bfield_aux[lev-1][1]->nComp(), ng, ng, crse_period); - dBz.ParallelCopy(*Bfield_aux[lev-1][2], 0, 0, Bfield_aux[lev-1][2]->nComp(), ng, ng, crse_period); + // Guard cells may not be up to date beyond ng_FieldGather + const amrex::IntVect& ng_src = guard_cells.ng_FieldGather; + // Copy Bfield_aux to the dB MultiFabs, using up to ng_src (=ng_FieldGather) guard + // cells from Bfield_aux and filling up to ng (=nGrow) guard cells in the dB MultiFabs + dBx.ParallelCopy(*Bfield_aux[lev-1][0], 0, 0, Bfield_aux[lev-1][0]->nComp(), ng_src, ng, crse_period); + dBy.ParallelCopy(*Bfield_aux[lev-1][1], 0, 0, Bfield_aux[lev-1][1]->nComp(), ng_src, ng, crse_period); + dBz.ParallelCopy(*Bfield_aux[lev-1][2], 0, 0, Bfield_aux[lev-1][2]->nComp(), ng_src, ng, crse_period); if (Bfield_cax[lev][0]) { MultiFab::Copy(*Bfield_cax[lev][0], dBx, 0, 0, Bfield_cax[lev][0]->nComp(), ng); @@ -340,9 +358,13 @@ WarpX::UpdateAuxilaryDataSameType () dEx.setVal(0.0); dEy.setVal(0.0); dEz.setVal(0.0); - dEx.ParallelCopy(*Efield_aux[lev-1][0], 0, 0, Efield_aux[lev-1][0]->nComp(), ng, ng, crse_period); - dEy.ParallelCopy(*Efield_aux[lev-1][1], 0, 0, Efield_aux[lev-1][1]->nComp(), ng, ng, crse_period); - dEz.ParallelCopy(*Efield_aux[lev-1][2], 0, 0, Efield_aux[lev-1][2]->nComp(), ng, ng, crse_period); + // Guard cells may not be up to date beyond ng_FieldGather + const amrex::IntVect& ng_src = guard_cells.ng_FieldGather; + // Copy Efield_aux to the dE MultiFabs, using up to ng_src (=ng_FieldGather) guard + // cells from Efield_aux and filling up to ng (=nGrow) guard cells in the dE MultiFabs + dEx.ParallelCopy(*Efield_aux[lev-1][0], 0, 0, Efield_aux[lev-1][0]->nComp(), ng_src, ng, crse_period); + dEy.ParallelCopy(*Efield_aux[lev-1][1], 0, 0, Efield_aux[lev-1][1]->nComp(), ng_src, ng, crse_period); + dEz.ParallelCopy(*Efield_aux[lev-1][2], 0, 0, Efield_aux[lev-1][2]->nComp(), ng_src, ng, crse_period); if (Efield_cax[lev][0]) { MultiFab::Copy(*Efield_cax[lev][0], dEx, 0, 0, Efield_cax[lev][0]->nComp(), ng); diff --git a/Source/Particles/CMakeLists.txt b/Source/Particles/CMakeLists.txt index efda88b88..06f8b718f 100644 --- a/Source/Particles/CMakeLists.txt +++ b/Source/Particles/CMakeLists.txt @@ -1,11 +1,13 @@ target_sources(WarpX PRIVATE MultiParticleContainer.cpp + ParticleBoundaries.cpp PhotonParticleContainer.cpp PhysicalParticleContainer.cpp RigidInjectedParticleContainer.cpp WarpXParticleContainer.cpp LaserParticleContainer.cpp + ParticleBoundaryBuffer.cpp ) add_subdirectory(Collision) diff --git a/Source/Particles/Collision/BinaryCollision.H b/Source/Particles/Collision/BinaryCollision.H index a51461749..089e6992d 100644 --- a/Source/Particles/Collision/BinaryCollision.H +++ b/Source/Particles/Collision/BinaryCollision.H @@ -10,6 +10,7 @@ #include "Particles/Collision/CollisionBase.H" #include "Particles/Collision/PairWiseCoulombCollisionFunc.H" #include "Particles/Collision/ShuffleFisherYates.H" +#include "Particles/Pusher/GetAndSetPosition.H" #include "Particles/MultiParticleContainer.H" #include "Particles/WarpXParticleContainer.H" #include "Utils/ParticleUtils.H" @@ -36,9 +37,9 @@ #include #include #include +#include #include #include -#include #include #include @@ -164,19 +165,13 @@ public: // Extract low-level data int const n_cells = bins_1.numBins(); // - Species 1 - auto& soa_1 = ptile_1.GetStructOfArrays(); - amrex::ParticleReal * const AMREX_RESTRICT ux_1 = - soa_1.GetRealData(PIdx::ux).data(); - amrex::ParticleReal * const AMREX_RESTRICT uy_1 = - soa_1.GetRealData(PIdx::uy).data(); - amrex::ParticleReal * const AMREX_RESTRICT uz_1 = - soa_1.GetRealData(PIdx::uz).data(); - amrex::ParticleReal const * const AMREX_RESTRICT w_1 = - soa_1.GetRealData(PIdx::w).data(); + const auto soa_1 = ptile_1.getParticleTileData(); index_type* indices_1 = bins_1.permutationPtr(); index_type const* cell_offsets_1 = bins_1.offsetsPtr(); amrex::Real q1 = species_1.getCharge(); amrex::Real m1 = species_1.getMass(); + constexpr int getpos_offset = 0; + auto get_position_1 = GetParticlePosition(ptile_1, getpos_offset); const amrex::Real dt = WarpX::GetInstance().getdt(lev); amrex::Geometry const& geom = WarpX::GetInstance().Geom(lev); @@ -218,7 +213,7 @@ public: cell_start_1, cell_half_1, cell_half_1, cell_stop_1, indices_1, indices_1, - ux_1, uy_1, uz_1, ux_1, uy_1, uz_1, w_1, w_1, + soa_1, soa_1, get_position_1, get_position_1, q1, q1, m1, m1, dt*ndt, dV, engine ); } ); @@ -238,29 +233,20 @@ public: // Extract low-level data int const n_cells = bins_1.numBins(); // - Species 1 - auto& soa_1 = ptile_1.GetStructOfArrays(); - amrex::ParticleReal * const AMREX_RESTRICT ux_1 = - soa_1.GetRealData(PIdx::ux).data(); - amrex::ParticleReal * const AMREX_RESTRICT uy_1 = - soa_1.GetRealData(PIdx::uy).data(); - amrex::ParticleReal * const AMREX_RESTRICT uz_1 = - soa_1.GetRealData(PIdx::uz).data(); - amrex::ParticleReal const * const AMREX_RESTRICT w_1 = - soa_1.GetRealData(PIdx::w).data(); + const auto soa_1 = ptile_1.getParticleTileData(); index_type* indices_1 = bins_1.permutationPtr(); index_type const* cell_offsets_1 = bins_1.offsetsPtr(); amrex::Real q1 = species_1.getCharge(); amrex::Real m1 = species_1.getMass(); + constexpr int getpos_offset = 0; + auto get_position_1 = GetParticlePosition(ptile_1, getpos_offset); // - Species 2 - auto& soa_2 = ptile_2.GetStructOfArrays(); - amrex::Real* ux_2 = soa_2.GetRealData(PIdx::ux).data(); - amrex::Real* uy_2 = soa_2.GetRealData(PIdx::uy).data(); - amrex::Real* uz_2 = soa_2.GetRealData(PIdx::uz).data(); - amrex::Real* w_2 = soa_2.GetRealData(PIdx::w).data(); + const auto soa_2 = ptile_2.getParticleTileData(); index_type* indices_2 = bins_2.permutationPtr(); index_type const* cell_offsets_2 = bins_2.offsetsPtr(); amrex::Real q2 = species_2.getCharge(); amrex::Real m2 = species_2.getMass(); + auto get_position_2 = GetParticlePosition(ptile_2, getpos_offset); const amrex::Real dt = WarpX::GetInstance().getdt(lev); amrex::Geometry const& geom = WarpX::GetInstance().Geom(lev); @@ -308,7 +294,7 @@ public: binary_collision_functor( cell_start_1, cell_stop_1, cell_start_2, cell_stop_2, indices_1, indices_2, - ux_1, uy_1, uz_1, ux_2, uy_2, uz_2, w_1, w_2, + soa_1, soa_2, get_position_1, get_position_2, q1, q2, m1, m2, dt*ndt, dV, engine ); } ); diff --git a/Source/Particles/Collision/ElasticCollisionPerez.H b/Source/Particles/Collision/ElasticCollisionPerez.H index 6f12fab02..0dce6e376 100644 --- a/Source/Particles/Collision/ElasticCollisionPerez.H +++ b/Source/Particles/Collision/ElasticCollisionPerez.H @@ -9,6 +9,7 @@ #include "ComputeTemperature.H" #include "UpdateMomentumPerezElastic.H" +#include "Particles/WarpXParticleContainer.H" #include "Utils/WarpXConst.H" #include @@ -33,25 +34,33 @@ * @param[in] dV is the volume of the corresponding cell. */ -template +template AMREX_GPU_HOST_DEVICE AMREX_INLINE void ElasticCollisionPerez ( T_index const I1s, T_index const I1e, T_index const I2s, T_index const I2e, T_index *I1, T_index *I2, - T_R *u1x, T_R *u1y, T_R *u1z, - T_R *u2x, T_R *u2y, T_R *u2z, - T_R const *w1, T_R const *w2, + SoaData_type soa_1, SoaData_type soa_2, + GetParticlePosition /*get_position_1*/, GetParticlePosition /*get_position_2*/, T_R const q1, T_R const q2, T_R const m1, T_R const m2, T_R const T1, T_R const T2, T_R const dt, T_R const L, T_R const dV, amrex::RandomEngine const& engine) { - int NI1 = I1e - I1s; int NI2 = I2e - I2s; + T_R * const AMREX_RESTRICT w1 = soa_1.m_rdata[PIdx::w]; + T_R * const AMREX_RESTRICT u1x = soa_1.m_rdata[PIdx::ux]; + T_R * const AMREX_RESTRICT u1y = soa_1.m_rdata[PIdx::uy]; + T_R * const AMREX_RESTRICT u1z = soa_1.m_rdata[PIdx::uz]; + + T_R * const AMREX_RESTRICT w2 = soa_2.m_rdata[PIdx::w]; + T_R * const AMREX_RESTRICT u2x = soa_2.m_rdata[PIdx::ux]; + T_R * const AMREX_RESTRICT u2y = soa_2.m_rdata[PIdx::uy]; + T_R * const AMREX_RESTRICT u2z = soa_2.m_rdata[PIdx::uz]; + // get local T1t and T2t T_R T1t; T_R T2t; if ( T1 <= T_R(0.0) && L <= T_R(0.0) ) @@ -85,8 +94,13 @@ void ElasticCollisionPerez ( // compute Debye length lmdD T_R lmdD; - lmdD = T_R(1.0)/std::sqrt( n1*q1*q1/(T1t*PhysConst::ep0) + - n2*q2*q2/(T2t*PhysConst::ep0) ); + if ( T1t < T_R(0.0) || T2t < T_R(0.0) ) { + lmdD = T_R(0.0); + } + else { + lmdD = T_R(1.0)/std::sqrt( n1*q1*q1/(T1t*PhysConst::ep0) + + n2*q2*q2/(T2t*PhysConst::ep0) ); + } T_R rmin = std::pow( T_R(4.0) * MathConst::pi / T_R(3.0) * amrex::max(n1,n2), T_R(-1.0/3.0) ); lmdD = amrex::max(lmdD, rmin); diff --git a/Source/Particles/Collision/PairWiseCoulombCollisionFunc.H b/Source/Particles/Collision/PairWiseCoulombCollisionFunc.H index 60737d3b2..e48cbab72 100644 --- a/Source/Particles/Collision/PairWiseCoulombCollisionFunc.H +++ b/Source/Particles/Collision/PairWiseCoulombCollisionFunc.H @@ -9,6 +9,7 @@ #define PAIRWISE_COULOMB_COLLISION_FUNC_H_ #include "ElasticCollisionPerez.H" +#include "Particles/Pusher/GetAndSetPosition.H" #include "Particles/WarpXParticleContainer.H" #include "Utils/WarpXUtil.H" @@ -27,6 +28,7 @@ class PairWiseCoulombCollisionFunc{ using ParticleType = WarpXParticleContainer::ParticleType; using ParticleBins = amrex::DenseBins; using index_type = ParticleBins::index_type; + using SoaData_type = WarpXParticleContainer::ParticleTileType::ParticleTileDataType; public: /** @@ -69,9 +71,8 @@ public: index_type const I1s, index_type const I1e, index_type const I2s, index_type const I2e, index_type* I1, index_type* I2, - amrex::Real* u1x, amrex::Real* u1y, amrex::Real* u1z, - amrex::Real* u2x, amrex::Real* u2y, amrex::Real* u2z, - amrex::Real const * w1, amrex::Real const * w2, + SoaData_type soa_1, SoaData_type soa_2, + GetParticlePosition get_position_1, GetParticlePosition get_position_2, amrex::Real const q1, amrex::Real const q2, amrex::Real const m1, amrex::Real const m2, amrex::Real const dt, amrex::Real const dV, @@ -79,7 +80,7 @@ public: { ElasticCollisionPerez( I1s, I1e, I2s, I2e, I1, I2, - u1x, u1y, u1z, u2x, u2y, u2z, w1, w2, + soa_1, soa_2, get_position_1, get_position_2, q1, q2, m1, m2, amrex::Real(-1.0), amrex::Real(-1.0), dt, m_CoulombLog, dV, engine ); } diff --git a/Source/Particles/ElementaryProcess/Ionization.H b/Source/Particles/ElementaryProcess/Ionization.H index bb919550a..c970fb4f9 100644 --- a/Source/Particles/ElementaryProcess/Ionization.H +++ b/Source/Particles/ElementaryProcess/Ionization.H @@ -124,7 +124,7 @@ struct IonizationFilterFunc ); // Compute probability of ionization p - amrex::Real w_dtau = 1._rt/ ga * m_adk_prefactor[ion_lev] * + amrex::Real w_dtau = (E == 0._rt) ? 0._rt : 1._rt/ ga * m_adk_prefactor[ion_lev] * std::pow(E, m_adk_power[ion_lev]) * std::exp( m_adk_exp_prefactor[ion_lev]/E ); amrex::Real p = 1._rt - std::exp( - w_dtau ); diff --git a/Source/Particles/ElementaryProcess/QEDInternals/BreitWheelerEngineWrapper_fwd.H b/Source/Particles/ElementaryProcess/QEDInternals/BreitWheelerEngineWrapper_fwd.H index 42ad23694..483a65de8 100644 --- a/Source/Particles/ElementaryProcess/QEDInternals/BreitWheelerEngineWrapper_fwd.H +++ b/Source/Particles/ElementaryProcess/QEDInternals/BreitWheelerEngineWrapper_fwd.H @@ -1,12 +1,17 @@ -/* Copyright 2021 Luca Fedeli +/* Copyright 2021 Luca Fedeli, Axel Huebl * * This file is part of WarpX. * * License: BSD-3-Clause-LBNL */ +#ifndef WARPX_BREIT_WHEELER_ENGINE_WRAPPER_FWD_H +#define WARPX_BREIT_WHEELER_ENGINE_WRAPPER_FWD_H + class BreitWheelerGetOpticalDepth; class BreitWheelerEvolveOpticalDepth; class BreitWheelerGeneratePairs; class BreitWheelerEngine; + +#endif /* WARPX_BREIT_WHEELER_ENGINE_WRAPPER_FWD_H */ diff --git a/Source/Particles/ElementaryProcess/QEDInternals/QuantumSyncEngineWrapper_fwd.H b/Source/Particles/ElementaryProcess/QEDInternals/QuantumSyncEngineWrapper_fwd.H index eb79b0c07..2d8b2911c 100644 --- a/Source/Particles/ElementaryProcess/QEDInternals/QuantumSyncEngineWrapper_fwd.H +++ b/Source/Particles/ElementaryProcess/QEDInternals/QuantumSyncEngineWrapper_fwd.H @@ -1,12 +1,17 @@ -/* Copyright 2021 Luca Fedeli +/* Copyright 2021 Luca Fedeli, Axel Huebl * * This file is part of WarpX. * * License: BSD-3-Clause-LBNL */ +#ifndef WARPX_QUANTUM_SYNC_ENGINE_WRAPPER_FWD_H +#define WARPX_QUANTUM_SYNC_ENGINE_WRAPPER_FWD_H + class QuantumSynchrotronGetOpticalDepth; class QuantumSynchrotronEvolveOpticalDepth; class QuantumSynchrotronPhotonEmission; class QuantumSynchrotronEngine; + +#endif /* WARPX_QUANTUM_SYNC_ENGINE_WRAPPER_FWD_H */ diff --git a/Source/Particles/Gather/GetExternalFields.H b/Source/Particles/Gather/GetExternalFields.H index 468b8d078..8e8a15a38 100644 --- a/Source/Particles/Gather/GetExternalFields.H +++ b/Source/Particles/Gather/GetExternalFields.H @@ -30,10 +30,14 @@ struct GetExternalField amrex::Real m_time; amrex::Real m_repeated_plasma_lens_period; + amrex::Real m_gamma_boost; + amrex::Real m_uz_boost; const amrex::Real* AMREX_RESTRICT m_repeated_plasma_lens_starts = nullptr; const amrex::Real* AMREX_RESTRICT m_repeated_plasma_lens_lengths = nullptr; - const amrex::Real* AMREX_RESTRICT m_repeated_plasma_lens_strengths = nullptr; + const amrex::Real* AMREX_RESTRICT m_repeated_plasma_lens_strengths_E = nullptr; + const amrex::Real* AMREX_RESTRICT m_repeated_plasma_lens_strengths_B = nullptr; int m_n_lenses; + int m_lens_is_electric; amrex::Real m_dt; const amrex::ParticleReal* AMREX_RESTRICT m_ux = nullptr; const amrex::ParticleReal* AMREX_RESTRICT m_uy = nullptr; @@ -65,16 +69,21 @@ struct GetExternalField amrex::ParticleReal x, y, z; m_get_position(i, x, y, z); - amrex::ParticleReal const uxp = m_ux[i]; - amrex::ParticleReal const uyp = m_uy[i]; - amrex::ParticleReal const uzp = m_uz[i]; + const amrex::ParticleReal uxp = m_ux[i]; + const amrex::ParticleReal uyp = m_uy[i]; + const amrex::ParticleReal uzp = m_uz[i]; + constexpr amrex::Real inv_c2 = 1._rt/(PhysConst::c*PhysConst::c); - const amrex::Real inv_gamma = 1._rt/std::sqrt(1._rt + (uxp*uxp + uyp*uyp + uzp*uzp)*inv_c2); - const amrex::ParticleReal vzp = uzp*inv_gamma; + const amrex::ParticleReal gamma = std::sqrt(1._rt + (uxp*uxp + uyp*uyp + uzp*uzp)*inv_c2); + const amrex::ParticleReal vzp = uzp/gamma; - // This assumes that vzp > 0. - amrex::ParticleReal const zl = z; - amrex::ParticleReal const zr = z + vzp*m_dt; + amrex::ParticleReal zl = z; + amrex::ParticleReal zr = z + vzp*m_dt; + + if (m_gamma_boost > 1._rt) { + zl = m_gamma_boost*zl + m_uz_boost*m_time; + zr = m_gamma_boost*zr + m_uz_boost*m_time; + } // This assumes that zl > 0. int i_lens = static_cast(std::floor(zl/m_repeated_plasma_lens_period)); @@ -85,17 +94,46 @@ struct GetExternalField // Calculate the residence correction // frac will be 1 if the step is completely inside the lens, between 0 and 1 // when entering or leaving the lens, and otherwise 0. + // This assumes that vzp > 0. amrex::Real fl = 0.; if (zl >= lens_start && zl < lens_end) fl = 1.; amrex::Real fr = 0.; if (zr >= lens_start && zr < lens_end) fr = 1.; amrex::Real frac = fl; - amrex::Real dzi = 1./(vzp*m_dt); - if (fl > fr) frac = (lens_end - zl)*dzi; - if (fr > fl) frac = (zr - lens_start)*dzi; - - field_x += x*frac*m_repeated_plasma_lens_strengths[i_lens]; - field_y += y*frac*m_repeated_plasma_lens_strengths[i_lens]; + if (fl > fr) frac = (lens_end - zl)/(zr - zl); + if (fr > fl) frac = (zr - lens_start)/(zr - zl); + + if (m_lens_is_electric) { + amrex::Real Ex = x*frac*m_repeated_plasma_lens_strengths_E[i_lens]; + amrex::Real Ey = y*frac*m_repeated_plasma_lens_strengths_E[i_lens]; + if (m_gamma_boost > 1._rt) { + // Transform the fields to the boosted frame + const amrex::Real Bx = +y*frac*m_repeated_plasma_lens_strengths_B[i_lens]; + const amrex::Real By = -x*frac*m_repeated_plasma_lens_strengths_B[i_lens]; + const amrex::Real vz_boost = m_uz_boost/m_gamma_boost; + const amrex::Real Ex_boost = m_gamma_boost*(Ex - vz_boost*By); + const amrex::Real Ey_boost = m_gamma_boost*(Ey + vz_boost*Bx); + Ex = Ex_boost; + Ey = Ey_boost; + } + field_x += Ex; + field_y += Ey; + } else { + amrex::Real Bx = +y*frac*m_repeated_plasma_lens_strengths_B[i_lens]; + amrex::Real By = -x*frac*m_repeated_plasma_lens_strengths_B[i_lens]; + if (m_gamma_boost > 1._rt) { + // Transform the fields to the boosted frame + const amrex::Real Ex = x*frac*m_repeated_plasma_lens_strengths_E[i_lens]; + const amrex::Real Ey = y*frac*m_repeated_plasma_lens_strengths_E[i_lens]; + const amrex::Real vz_boost = m_uz_boost/m_gamma_boost; + const amrex::Real Bx_boost = m_gamma_boost*(Bx + vz_boost*Ey*inv_c2); + const amrex::Real By_boost = m_gamma_boost*(By - vz_boost*Ex*inv_c2); + Bx = Bx_boost; + By = By_boost; + } + field_x += Bx; + field_y += By; + } } else diff --git a/Source/Particles/Gather/GetExternalFields.cpp b/Source/Particles/Gather/GetExternalFields.cpp index 03c4dc2b8..d25d7b34e 100644 --- a/Source/Particles/Gather/GetExternalFields.cpp +++ b/Source/Particles/Gather/GetExternalFields.cpp @@ -8,6 +8,7 @@ #include +using namespace amrex::literals; GetExternalEField::GetExternalEField (const WarpXParIter& a_pti, int a_offset) noexcept { @@ -32,6 +33,10 @@ GetExternalEField::GetExternalEField (const WarpXParIter& a_pti, int a_offset) n else if (mypc.m_E_ext_particle_s=="repeated_plasma_lens") { m_type = RepeatedPlasmaLens; + m_time = warpx.gett_new(a_pti.GetLevel()); + m_gamma_boost = WarpX::gamma_boost; + m_uz_boost = std::sqrt(WarpX::gamma_boost*WarpX::gamma_boost - 1._rt)*PhysConst::c; + m_lens_is_electric = 1; m_dt = warpx.getdt(a_pti.GetLevel()); m_get_position = GetParticlePosition(a_pti, a_offset); auto& attribs = a_pti.GetAttribs(); @@ -42,7 +47,8 @@ GetExternalEField::GetExternalEField (const WarpXParIter& a_pti, int a_offset) n m_n_lenses = static_cast(mypc.h_repeated_plasma_lens_starts.size()); m_repeated_plasma_lens_starts = mypc.d_repeated_plasma_lens_starts.data(); m_repeated_plasma_lens_lengths = mypc.d_repeated_plasma_lens_lengths.data(); - m_repeated_plasma_lens_strengths = mypc.d_repeated_plasma_lens_strengths.data(); + m_repeated_plasma_lens_strengths_E = mypc.d_repeated_plasma_lens_strengths_E.data(); + m_repeated_plasma_lens_strengths_B = mypc.d_repeated_plasma_lens_strengths_B.data(); } } @@ -66,4 +72,24 @@ GetExternalBField::GetExternalBField (const WarpXParIter& a_pti, int a_offset) n m_yfield_partparser = mypc.m_By_particle_parser->compile<4>(); m_zfield_partparser = mypc.m_Bz_particle_parser->compile<4>(); } + else if (mypc.m_B_ext_particle_s=="repeated_plasma_lens") + { + m_type = RepeatedPlasmaLens; + m_time = warpx.gett_new(a_pti.GetLevel()); + m_gamma_boost = WarpX::gamma_boost; + m_uz_boost = std::sqrt(WarpX::gamma_boost*WarpX::gamma_boost - 1._rt)*PhysConst::c; + m_lens_is_electric = 0; + m_dt = warpx.getdt(a_pti.GetLevel()); + m_get_position = GetParticlePosition(a_pti, a_offset); + auto& attribs = a_pti.GetAttribs(); + m_ux = attribs[PIdx::ux].dataPtr() + a_offset; + m_uy = attribs[PIdx::uy].dataPtr() + a_offset; + m_uz = attribs[PIdx::uz].dataPtr() + a_offset; + m_repeated_plasma_lens_period = mypc.m_repeated_plasma_lens_period; + m_n_lenses = static_cast(mypc.h_repeated_plasma_lens_starts.size()); + m_repeated_plasma_lens_starts = mypc.d_repeated_plasma_lens_starts.data(); + m_repeated_plasma_lens_lengths = mypc.d_repeated_plasma_lens_lengths.data(); + m_repeated_plasma_lens_strengths_E = mypc.d_repeated_plasma_lens_strengths_E.data(); + m_repeated_plasma_lens_strengths_B = mypc.d_repeated_plasma_lens_strengths_B.data(); + } } diff --git a/Source/Particles/Gather/ScalarFieldGather.H b/Source/Particles/Gather/ScalarFieldGather.H new file mode 100644 index 000000000..a051c87ab --- /dev/null +++ b/Source/Particles/Gather/ScalarFieldGather.H @@ -0,0 +1,124 @@ +/* Copyright 2021 Modern Electron + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ +#ifndef SCALARFIELDGATHER_H_ +#define SCALARFIELDGATHER_H_ + +/** + * \brief Compute weight of each surrounding node in interpolating a nodal field + * to the given coordinates. + * + * \param xp, yp, zp Particle position coordinates + * \param scalar_field Array4 of the nodal scalar field, either full array or tile. + * \param plo Index lower bounds of domain. + * \param dxi 3D cell spacing + * \param i, j, p Variables to store indices of position on grid + * \param W 2D array of weights to store each neighbouring node + */ +AMREX_GPU_HOST_DEVICE AMREX_INLINE +void compute_weights_nodal (const amrex::ParticleReal xp, + const amrex::ParticleReal yp, + const amrex::ParticleReal zp, + amrex::GpuArray const& plo, + amrex::GpuArray const& dxi, + int& i, int& j, int& k, amrex::Real W[AMREX_SPACEDIM][2]) noexcept +{ +#if (defined WARPX_DIM_3D) + amrex::Real x = (xp - plo[0]) * dxi[0]; + amrex::Real y = (yp - plo[1]) * dxi[1]; + amrex::Real z = (zp - plo[2]) * dxi[2]; + + i = static_cast(amrex::Math::floor(x)); + j = static_cast(amrex::Math::floor(y)); + k = static_cast(amrex::Math::floor(z)); + + W[0][1] = x - i; + W[1][1] = y - j; + W[2][1] = z - k; + + W[0][0] = 1.0 - W[0][1]; + W[1][0] = 1.0 - W[1][1]; + W[2][0] = 1.0 - W[2][1]; +#elif (defined WARPX_DIM_XZ) + amrex::Real x = (xp - plo[0]) * dxi[0]; + amrex::Real z = (zp - plo[1]) * dxi[1]; + + i = static_cast(amrex::Math::floor(x)); + j = static_cast(amrex::Math::floor(z)); + k = 0; + + W[0][1] = x - i; + W[1][1] = z - j; + + W[0][0] = 1.0 - W[0][1]; + W[1][0] = 1.0 - W[1][1]; + + amrex::ignore_unused(yp); +#else + amrex::ignore_unused(xp, yp, zp, plo, dxi, i, j, k, W); + amrex::Abort("Error: compute_weights not yet implemented in RZ"); +#endif +} + +/** + * \brief Interpolate nodal field value based on surrounding indices and weights. + * + * \param i, j, p Indices of position on grid + * \param W 2D array of weights for each neighbouring node + * \param scalar_field Array4 of the nodal scalar field, either full array or tile. + */ +AMREX_GPU_HOST_DEVICE AMREX_INLINE +amrex::Real interp_field_nodal (int i, int j, int k, + const amrex::Real W[AMREX_SPACEDIM][2], + amrex::Array4 const& scalar_field) noexcept +{ + amrex::Real value = 0; +#if (defined WARPX_DIM_3D) + value += scalar_field(i, j , k ) * W[0][0] * W[1][0] * W[2][0]; + value += scalar_field(i+1, j , k ) * W[0][1] * W[1][0] * W[2][0]; + value += scalar_field(i, j+1, k ) * W[0][0] * W[1][1] * W[2][0]; + value += scalar_field(i+1, j+1, k ) * W[0][1] * W[1][1] * W[2][0]; + value += scalar_field(i, j , k+1) * W[0][0] * W[1][0] * W[2][1]; + value += scalar_field(i+1, j , k+1) * W[0][1] * W[1][0] * W[2][1]; + value += scalar_field(i , j+1, k+1) * W[0][0] * W[1][1] * W[2][1]; + value += scalar_field(i+1, j+1, k+1) * W[0][1] * W[1][1] * W[2][1]; +#elif (defined WARPX_DIM_XZ) + value += scalar_field(i, j , k) * W[0][0] * W[1][0]; + value += scalar_field(i+1, j , k) * W[0][1] * W[1][0]; + value += scalar_field(i, j+1, k) * W[0][0] * W[1][1]; + value += scalar_field(i+1, j+1, k) * W[0][1] * W[1][1]; +#else + amrex::ignore_unused(i, j, k, W, scalar_field); + amrex::Abort("Error: interp_field not yet implemented in RZ"); +#endif + return value; +} + +/** + * \brief Scalar field gather for a single particle. The field has to be defined + * at the cell nodes (see https://amrex-codes.github.io/amrex/docs_html/Basics.html#id2) + * + * \param xp, yp, zp Particle position coordinates + * \param scalar_field Array4 of the nodal scalar field, either full array or tile. + * \param dxi 3D cell spacing + * \param lo Index lower bounds of domain. + */ +AMREX_GPU_HOST_DEVICE AMREX_INLINE +amrex::Real doGatherScalarFieldNodal (const amrex::ParticleReal xp, + const amrex::ParticleReal yp, + const amrex::ParticleReal zp, + amrex::Array4 const& scalar_field, + amrex::GpuArray const& dxi, + amrex::GpuArray const& lo) noexcept +{ + // first find the weight of surrounding nodes to use during interpolation + int ii, jj, kk; + amrex::Real W[AMREX_SPACEDIM][2]; + compute_weights_nodal(xp, yp, zp, lo, dxi, ii, jj, kk, W); + + return interp_field_nodal(ii, jj, kk, W, scalar_field); +} +#endif // SCALARFIELDGATHER_H_ diff --git a/Source/Particles/Make.package b/Source/Particles/Make.package index f25628e04..81e6cac3e 100644 --- a/Source/Particles/Make.package +++ b/Source/Particles/Make.package @@ -4,6 +4,8 @@ CEXE_sources += RigidInjectedParticleContainer.cpp CEXE_sources += PhysicalParticleContainer.cpp CEXE_sources += PhotonParticleContainer.cpp CEXE_sources += LaserParticleContainer.cpp +CEXE_sources += ParticleBoundaryBuffer.cpp +CEXE_sources += ParticleBoundaries.cpp include $(WARPX_HOME)/Source/Particles/Pusher/Make.package include $(WARPX_HOME)/Source/Particles/Deposition/Make.package diff --git a/Source/Particles/MultiParticleContainer.H b/Source/Particles/MultiParticleContainer.H index b59aa4371..5c924a3ab 100644 --- a/Source/Particles/MultiParticleContainer.H +++ b/Source/Particles/MultiParticleContainer.H @@ -23,6 +23,7 @@ #include "Utils/WarpXConst.H" #include "Utils/WarpXUtil.H" #include "WarpXParticleContainer.H" +#include "ParticleBoundaries.H" #include #include @@ -145,11 +146,10 @@ public: * a fraction of dt). When different than 0, the particle * position will be temporarily modified to match the time * of the deposition. - * \param[in] icomp component of the MultiFab where rho is deposited (old, new) */ void DepositCharge (amrex::Vector >& rho, - const amrex::Real relative_t, const int icomp = 0); + const amrex::Real relative_t); /** * \brief Deposit current density. @@ -299,10 +299,12 @@ public: amrex::Real m_repeated_plasma_lens_period; amrex::Vector h_repeated_plasma_lens_starts; amrex::Vector h_repeated_plasma_lens_lengths; - amrex::Vector h_repeated_plasma_lens_strengths; + amrex::Vector h_repeated_plasma_lens_strengths_E; + amrex::Vector h_repeated_plasma_lens_strengths_B; amrex::Gpu::DeviceVector d_repeated_plasma_lens_starts; amrex::Gpu::DeviceVector d_repeated_plasma_lens_lengths; - amrex::Gpu::DeviceVector d_repeated_plasma_lens_strengths; + amrex::Gpu::DeviceVector d_repeated_plasma_lens_strengths_E; + amrex::Gpu::DeviceVector d_repeated_plasma_lens_strengths_B; #ifdef WARPX_QED /** diff --git a/Source/Particles/MultiParticleContainer.cpp b/Source/Particles/MultiParticleContainer.cpp index ba8825522..549358f30 100644 --- a/Source/Particles/MultiParticleContainer.cpp +++ b/Source/Particles/MultiParticleContainer.cpp @@ -223,28 +223,62 @@ MultiParticleContainer::ReadParameters () } - // if the input string for E_ext_particle_s is + // if the input string for E_ext_particle_s or B_ext_particle_s is // "repeated_plasma_lens" then the plasma lens properties // must be provided in the input file. - if (m_E_ext_particle_s == "repeated_plasma_lens") { + if (m_E_ext_particle_s == "repeated_plasma_lens" || + m_B_ext_particle_s == "repeated_plasma_lens") { queryWithParser(pp_particles, "repeated_plasma_lens_period", m_repeated_plasma_lens_period); getArrWithParser(pp_particles, "repeated_plasma_lens_starts", h_repeated_plasma_lens_starts); getArrWithParser(pp_particles, "repeated_plasma_lens_lengths", h_repeated_plasma_lens_lengths); - getArrWithParser(pp_particles, "repeated_plasma_lens_strengths", h_repeated_plasma_lens_strengths); int n_lenses = static_cast(h_repeated_plasma_lens_starts.size()); d_repeated_plasma_lens_starts.resize(n_lenses); d_repeated_plasma_lens_lengths.resize(n_lenses); - d_repeated_plasma_lens_strengths.resize(n_lenses); amrex::Gpu::copyAsync(amrex::Gpu::hostToDevice, h_repeated_plasma_lens_starts.begin(), h_repeated_plasma_lens_starts.end(), d_repeated_plasma_lens_starts.begin()); amrex::Gpu::copyAsync(amrex::Gpu::hostToDevice, h_repeated_plasma_lens_lengths.begin(), h_repeated_plasma_lens_lengths.end(), d_repeated_plasma_lens_lengths.begin()); - amrex::Gpu::copyAsync(amrex::Gpu::hostToDevice, - h_repeated_plasma_lens_strengths.begin(), h_repeated_plasma_lens_strengths.end(), - d_repeated_plasma_lens_strengths.begin()); + + if (m_E_ext_particle_s == "repeated_plasma_lens") { + getArrWithParser(pp_particles, "repeated_plasma_lens_strengths_E", h_repeated_plasma_lens_strengths_E); + } + if (m_B_ext_particle_s == "repeated_plasma_lens") { + getArrWithParser(pp_particles, "repeated_plasma_lens_strengths_B", h_repeated_plasma_lens_strengths_B); + } + if (WarpX::gamma_boost > 1._rt) { + AMREX_ALWAYS_ASSERT_WITH_MESSAGE( + m_E_ext_particle_s == "repeated_plasma_lens" || m_E_ext_particle_s == "default", + "With gamma_boost > 1, E_ext_particle_init_style and B_ext_particle_init_style" + "must be either repeated_plasma_lens or unspecified"); + AMREX_ALWAYS_ASSERT_WITH_MESSAGE( + m_B_ext_particle_s == "repeated_plasma_lens" || m_B_ext_particle_s == "default", + "With gamma_boost > 1, E_ext_particle_init_style and B_ext_particle_init_style" + "must be either repeated_plasma_lens or unspecified"); + if (m_E_ext_particle_s == "default") { + m_E_ext_particle_s = "repeated_plasma_lens"; + h_repeated_plasma_lens_strengths_E.resize(n_lenses); + } + if (m_B_ext_particle_s == "default") { + m_B_ext_particle_s = "repeated_plasma_lens"; + h_repeated_plasma_lens_strengths_B.resize(n_lenses); + } + } + + if (m_E_ext_particle_s == "repeated_plasma_lens") { + d_repeated_plasma_lens_strengths_E.resize(n_lenses); + amrex::Gpu::copyAsync(amrex::Gpu::hostToDevice, + h_repeated_plasma_lens_strengths_E.begin(), h_repeated_plasma_lens_strengths_E.end(), + d_repeated_plasma_lens_strengths_E.begin()); + } + if (m_B_ext_particle_s == "repeated_plasma_lens") { + d_repeated_plasma_lens_strengths_B.resize(n_lenses); + amrex::Gpu::copyAsync(amrex::Gpu::hostToDevice, + h_repeated_plasma_lens_strengths_B.begin(), h_repeated_plasma_lens_strengths_B.end(), + d_repeated_plasma_lens_strengths_B.begin()); + } amrex::Gpu::synchronize(); } @@ -471,10 +505,9 @@ MultiParticleContainer::DepositCurrent ( } // Call the deposition kernel for each species - for (int ispecies = 0; ispecies < nSpecies(); ispecies++) + for (auto& pc : allcontainers) { - WarpXParticleContainer& species = GetParticleContainer(ispecies); - species.DepositCurrent(J, dt, relative_t); + pc->DepositCurrent(J, dt, relative_t); } #ifdef WARPX_DIM_RZ @@ -488,28 +521,26 @@ MultiParticleContainer::DepositCurrent ( void MultiParticleContainer::DepositCharge ( amrex::Vector >& rho, - const amrex::Real relative_t, const int icomp) + const amrex::Real relative_t) { // Reset the rho array for (int lev = 0; lev < rho.size(); ++lev) { - int const nc = WarpX::ncomps; - rho[lev]->setVal(0.0, icomp*nc, nc, rho[lev]->nGrowVect()); + rho[lev]->setVal(0.0, 0, WarpX::ncomps, rho[lev]->nGrowVect()); } // Push the particles in time, if needed if (relative_t != 0.) PushX(relative_t); // Call the deposition kernel for each species - for (int ispecies = 0; ispecies < nSpecies(); ispecies++) + for (auto& pc : allcontainers) { - WarpXParticleContainer& species = GetParticleContainer(ispecies); bool const local = true; bool const reset = false; bool const do_rz_volume_scaling = false; bool const interpolate_across_levels = false; - species.DepositCharge(rho, local, reset, do_rz_volume_scaling, - interpolate_across_levels, icomp); + pc->DepositCharge(rho, local, reset, do_rz_volume_scaling, + interpolate_across_levels); } // Push the particles back in time diff --git a/Source/Particles/MultiParticleContainer_fwd.H b/Source/Particles/MultiParticleContainer_fwd.H index 20316a9cd..b7590876b 100644 --- a/Source/Particles/MultiParticleContainer_fwd.H +++ b/Source/Particles/MultiParticleContainer_fwd.H @@ -1,8 +1,13 @@ -/* Copyright 2021 Luca Fedeli +/* Copyright 2021 Luca Fedeli, Axel Huebl * * This file is part of WarpX. * * License: BSD-3-Clause-LBNL */ +#ifndef WARPX_MULTI_PARTICLE_CONTAINER_FWD_H +#define WARPX_MULTI_PARTICLE_CONTAINER_FWD_H + class MultiParticleContainer; + +#endif /* WARPX_MULTI_PARTICLE_CONTAINER_FWD_H */ diff --git a/Source/Particles/ParticleBoundaries.H b/Source/Particles/ParticleBoundaries.H index 30213b81c..8cc00cdc3 100644 --- a/Source/Particles/ParticleBoundaries.H +++ b/Source/Particles/ParticleBoundaries.H @@ -17,116 +17,16 @@ struct ParticleBoundaries { - ParticleBoundaries () noexcept - { - SetAll(ParticleBoundaryType::Absorbing); - reflect_all_velocities = false; - } + ParticleBoundaries () noexcept; - void - Set_reflect_all_velocities(bool flag) - { - reflect_all_velocities = flag; - } + void Set_reflect_all_velocities (bool flag); + void SetAll (ParticleBoundaryType bc); - void - SetAll (ParticleBoundaryType bc) { - xmin_bc = bc; - xmax_bc = bc; - ymin_bc = bc; - ymax_bc = bc; - zmin_bc = bc; - zmax_bc = bc; - } + void SetBoundsX (ParticleBoundaryType bc_lo, ParticleBoundaryType bc_hi); + void SetBoundsY (ParticleBoundaryType bc_lo, ParticleBoundaryType bc_hi); + void SetBoundsZ (ParticleBoundaryType bc_lo, ParticleBoundaryType bc_hi); - void - SetBoundsX(ParticleBoundaryType bc_lo, ParticleBoundaryType bc_hi) { xmin_bc = bc_lo; xmax_bc = bc_hi; } - void - SetBoundsY(ParticleBoundaryType bc_lo, ParticleBoundaryType bc_hi) { ymin_bc = bc_lo; ymax_bc = bc_hi; } - void - SetBoundsZ(ParticleBoundaryType bc_lo, ParticleBoundaryType bc_hi) { zmin_bc = bc_lo; zmax_bc = bc_hi; } - - bool - CheckAll (ParticleBoundaryType bc) { - return (xmin_bc == bc && xmax_bc == bc -#ifdef WARPX_DIM_3D - && ymin_bc == bc && ymax_bc == bc -#endif - && zmin_bc == bc && zmax_bc == bc); - } - - /* \brief Applies absorbing or reflecting boundary condition to the input particles, along all axis. - * For reflecting boundaries, the position of the particle is changed appropriately and - * the sign of the velocity is changed (depending on the reflect_all_velocities flag). - * For absorbing, a flag is set whether the particle has been lost (it is up to the calling - * code to take appropriate action to remove any lost particles). - * Note that periodic boundaries are handled in AMReX code. - * - * \param x, xmin, xmax: particle x position, location of x boundary - * \param y, ymin, ymax: particle y position, location of y boundary (3D only) - * \param z, zmin, zmax: particle z position, location of z boundary - * \param ux, uy, uz: particle momenta - * \param particle_lost: output, flags whether the particle was lost - * \param boundaries: object with boundary condition settings - */ - AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE - static void - apply_boundaries (amrex::ParticleReal& x, amrex::Real xmin, amrex::Real xmax, -#ifdef WARPX_DIM_3D - amrex::ParticleReal& y, amrex::Real ymin, amrex::Real ymax, -#endif - amrex::ParticleReal& z, amrex::Real zmin, amrex::Real zmax, - amrex::ParticleReal& ux, amrex::ParticleReal& uy, amrex::ParticleReal& uz, - bool& particle_lost, - ParticleBoundaries const& boundaries) - { - bool change_sign_ux = false; - bool change_sign_uy = false; - bool change_sign_uz = false; - - apply_boundary(x, xmin, xmax, change_sign_ux, particle_lost, boundaries.xmin_bc, boundaries.xmax_bc); -#ifdef WARPX_DIM_3D - apply_boundary(y, ymin, ymax, change_sign_uy, particle_lost, boundaries.ymin_bc, boundaries.ymax_bc); -#endif - apply_boundary(z, zmin, zmax, change_sign_uz, particle_lost, boundaries.zmin_bc, boundaries.zmax_bc); - - if (boundaries.reflect_all_velocities && (change_sign_ux | change_sign_uy | change_sign_uz)) { - change_sign_ux = true; - change_sign_uy = true; - change_sign_uz = true; - } - if (change_sign_ux) ux = -ux; - if (change_sign_uy) uy = -uy; - if (change_sign_uz) uz = -uz; - } - - AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE - static void - apply_boundary (amrex::ParticleReal& x, amrex::Real xmin, amrex::Real xmax, - bool& change_sign_ux, bool& particle_lost, - ParticleBoundaryType xmin_bc, ParticleBoundaryType xmax_bc) - { - if (x < xmin) { - if (xmin_bc == ParticleBoundaryType::Absorbing || xmin_bc == ParticleBoundaryType::Open) { - particle_lost = true; - } - else if (xmin_bc == ParticleBoundaryType::Reflecting) { - x = 2*xmin - x; - change_sign_ux = true; - } - } - else if (x > xmax) { - if (xmax_bc == ParticleBoundaryType::Absorbing || xmin_bc == ParticleBoundaryType::Open) { - particle_lost = true; - } - else if (xmax_bc == ParticleBoundaryType::Reflecting) { - x = 2*xmax - x; - change_sign_ux = true; - } - } - } - -private: + bool CheckAll (ParticleBoundaryType bc); ParticleBoundaryType xmin_bc; ParticleBoundaryType xmax_bc; diff --git a/Source/Particles/ParticleBoundaries.cpp b/Source/Particles/ParticleBoundaries.cpp new file mode 100644 index 000000000..826874b1d --- /dev/null +++ b/Source/Particles/ParticleBoundaries.cpp @@ -0,0 +1,63 @@ +/* Copyright 2021 David Grote + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ + +#include "ParticleBoundaries.H" + +ParticleBoundaries::ParticleBoundaries () noexcept +{ + SetAll(ParticleBoundaryType::Absorbing); + reflect_all_velocities = false; +} + +void +ParticleBoundaries::Set_reflect_all_velocities (bool flag) +{ + reflect_all_velocities = flag; +} + +void +ParticleBoundaries::SetAll (ParticleBoundaryType bc) +{ + xmin_bc = bc; + xmax_bc = bc; + ymin_bc = bc; + ymax_bc = bc; + zmin_bc = bc; + zmax_bc = bc; +} + +void +ParticleBoundaries::SetBoundsX (ParticleBoundaryType bc_lo, ParticleBoundaryType bc_hi) +{ + xmin_bc = bc_lo; + xmax_bc = bc_hi; +} + +void +ParticleBoundaries::SetBoundsY (ParticleBoundaryType bc_lo, ParticleBoundaryType bc_hi) +{ + ymin_bc = bc_lo; + ymax_bc = bc_hi; +} + +void +ParticleBoundaries::SetBoundsZ (ParticleBoundaryType bc_lo, ParticleBoundaryType bc_hi) +{ + zmin_bc = bc_lo; + zmax_bc = bc_hi; +} + +bool +ParticleBoundaries::CheckAll (ParticleBoundaryType bc) +{ + return (xmin_bc == bc && xmax_bc == bc +#ifdef WARPX_DIM_3D + && ymin_bc == bc && ymax_bc == bc +#endif + && zmin_bc == bc && zmax_bc == bc); +} + diff --git a/Source/Particles/ParticleBoundaries_K.H b/Source/Particles/ParticleBoundaries_K.H new file mode 100644 index 000000000..84c5424b5 --- /dev/null +++ b/Source/Particles/ParticleBoundaries_K.H @@ -0,0 +1,91 @@ +/* Copyright 2021 David Grote + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ +#ifndef PARTICLEBOUNDARIES_K_H_ +#define PARTICLEBOUNDARIES_K_H_ + +#include "ParticleBoundaries.H" + +#include + +namespace ApplyParticleBoundaries { + + /* \brief Applies the boundary condition on a specific axis + * This is called by apply_boundaries. + */ + AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE + void + apply_boundary (amrex::ParticleReal& x, amrex::Real xmin, amrex::Real xmax, + bool& change_sign_ux, bool& particle_lost, + ParticleBoundaryType xmin_bc, ParticleBoundaryType xmax_bc) + { + if (x < xmin) { + if (xmin_bc == ParticleBoundaryType::Absorbing || xmin_bc == ParticleBoundaryType::Open) { + particle_lost = true; + } + else if (xmin_bc == ParticleBoundaryType::Reflecting) { + x = 2*xmin - x; + change_sign_ux = true; + } + } + else if (x > xmax) { + if (xmax_bc == ParticleBoundaryType::Absorbing || xmin_bc == ParticleBoundaryType::Open) { + particle_lost = true; + } + else if (xmax_bc == ParticleBoundaryType::Reflecting) { + x = 2*xmax - x; + change_sign_ux = true; + } + } + } + + /* \brief Applies absorbing or reflecting boundary condition to the input particles, along all axis. + * For reflecting boundaries, the position of the particle is changed appropriately and + * the sign of the velocity is changed (depending on the reflect_all_velocities flag). + * For absorbing, a flag is set whether the particle has been lost (it is up to the calling + * code to take appropriate action to remove any lost particles). + * Note that periodic boundaries are handled in AMReX code. + * + * \param x, xmin, xmax: particle x position, location of x boundary + * \param y, ymin, ymax: particle y position, location of y boundary (3D only) + * \param z, zmin, zmax: particle z position, location of z boundary + * \param ux, uy, uz: particle momenta + * \param particle_lost: output, flags whether the particle was lost + * \param boundaries: object with boundary condition settings + */ + AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE + void + apply_boundaries (amrex::ParticleReal& x, amrex::Real xmin, amrex::Real xmax, +#ifdef WARPX_DIM_3D + amrex::ParticleReal& y, amrex::Real ymin, amrex::Real ymax, +#endif + amrex::ParticleReal& z, amrex::Real zmin, amrex::Real zmax, + amrex::ParticleReal& ux, amrex::ParticleReal& uy, amrex::ParticleReal& uz, + bool& particle_lost, + ParticleBoundaries const& boundaries) + { + bool change_sign_ux = false; + bool change_sign_uy = false; + bool change_sign_uz = false; + + apply_boundary(x, xmin, xmax, change_sign_ux, particle_lost, boundaries.xmin_bc, boundaries.xmax_bc); +#ifdef WARPX_DIM_3D + apply_boundary(y, ymin, ymax, change_sign_uy, particle_lost, boundaries.ymin_bc, boundaries.ymax_bc); +#endif + apply_boundary(z, zmin, zmax, change_sign_uz, particle_lost, boundaries.zmin_bc, boundaries.zmax_bc); + + if (boundaries.reflect_all_velocities && (change_sign_ux | change_sign_uy | change_sign_uz)) { + change_sign_ux = true; + change_sign_uy = true; + change_sign_uz = true; + } + if (change_sign_ux) ux = -ux; + if (change_sign_uy) uy = -uy; + if (change_sign_uz) uz = -uz; + } + +} +#endif diff --git a/Source/Particles/ParticleBoundaryBuffer.H b/Source/Particles/ParticleBoundaryBuffer.H new file mode 100644 index 000000000..452722d56 --- /dev/null +++ b/Source/Particles/ParticleBoundaryBuffer.H @@ -0,0 +1,61 @@ +/* Copyright 2021 Andrew Myers + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ +#ifndef PARTICLEBOUNDARYBUFFER_H_ +#define PARTICLEBOUNDARYBUFFER_H_ + +#include "Particles/ParticleBuffer.H" +#include "Particles/MultiParticleContainer_fwd.H" + +#include + +/** + * This stores particles that have left / been absorbed by domain and embedded boundaries. + */ +class ParticleBoundaryBuffer +{ +public: + ParticleBoundaryBuffer (); + + int numSpecies() const { return getSpeciesNames().size(); } + + const std::vector& getSpeciesNames() const { + static bool initialized = false; + if (!initialized) + { + amrex::ParmParse pp_particles("particles"); + pp_particles.queryarr("species_names", m_species_names); + initialized = true; + } + return m_species_names; + } + + void gatherParticles (MultiParticleContainer& mypc, + const amrex::Vector& distance_to_eb); + + void clearParticles (); + + void printNumParticles () const; + + constexpr int numBoundaries () const { + return AMREX_SPACEDIM*2 +#ifdef AMREX_USE_EB + + 1 +#endif + ; + } + +private: + // over boundary, then number of species + std::vector > > m_particle_containers; + + // over boundary, then number of species + std::vector > m_do_boundary_buffer; + + mutable std::vector m_species_names; +}; + +#endif /*PARTICLEBOUNDARYBUFFER_H_*/ diff --git a/Source/Particles/ParticleBoundaryBuffer.cpp b/Source/Particles/ParticleBoundaryBuffer.cpp new file mode 100644 index 000000000..f84d05b28 --- /dev/null +++ b/Source/Particles/ParticleBoundaryBuffer.cpp @@ -0,0 +1,234 @@ +/* Copyright 2021 Andrew Myers + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ + + +#include "EmbeddedBoundary/DistanceToEB.H" +#include "Particles/ParticleBoundaryBuffer.H" +#include "Particles/MultiParticleContainer.H" +#include "Particles/Gather/ScalarFieldGather.H" + +#include +#include + +struct IsOutsideDomainBoundary { + amrex::GpuArray m_plo; + amrex::GpuArray m_phi; + int m_idim; + int m_iside; + + template + AMREX_GPU_DEVICE AMREX_FORCE_INLINE + int operator() (const SrcData& src, + int ip, const amrex::RandomEngine& /*engine*/) const noexcept + { + const auto& p = src.getSuperParticle(ip); + if (m_iside == 0) { + if (p.pos(m_idim) < m_plo[m_idim]) { return 1; } + } else { + if (p.pos(m_idim) >= m_phi[m_idim]) { return 1; } + } + return 0; + } +}; + +struct CopyAndTimestamp { + int m_index; + int m_step; + + template + AMREX_GPU_HOST_DEVICE + void operator() (const DstData& dst, const SrcData& src, + int src_i, int dst_i) const noexcept + { + dst.m_aos[dst_i] = src.m_aos[src_i]; + for (int j = 0; j < SrcData::NAR; ++j) + dst.m_rdata[j][dst_i] = src.m_rdata[j][src_i]; + for (int j = 0; j < src.m_num_runtime_real; ++j) + dst.m_runtime_rdata[j][dst_i] = src.m_runtime_rdata[j][src_i]; + for (int j = 0; j < src.m_num_runtime_int; ++j) + dst.m_runtime_idata[j][dst_i] = src.m_runtime_idata[j][src_i]; + dst.m_runtime_idata[m_index][dst_i] = m_step; + } +}; + +ParticleBoundaryBuffer::ParticleBoundaryBuffer () +{ + m_particle_containers.resize(numBoundaries()); + m_do_boundary_buffer.resize(numBoundaries()); + + for (int i = 0; i < numBoundaries(); ++i) + { + m_particle_containers[i].resize(numSpecies()); + m_do_boundary_buffer[i].resize(numSpecies(), 0); + } + + for (int ispecies = 0; ispecies < numSpecies(); ++ispecies) + { + amrex::ParmParse pp_species(getSpeciesNames()[ispecies]); + pp_species.query("save_particles_at_xlo", m_do_boundary_buffer[0][ispecies]); + pp_species.query("save_particles_at_xhi", m_do_boundary_buffer[1][ispecies]); +#if AMREX_SPACEDIM == 2 + pp_species.query("save_particles_at_zlo", m_do_boundary_buffer[2][ispecies]); + pp_species.query("save_particles_at_zhi", m_do_boundary_buffer[3][ispecies]); +#else + pp_species.query("save_particles_at_ylo", m_do_boundary_buffer[2][ispecies]); + pp_species.query("save_particles_at_yhi", m_do_boundary_buffer[3][ispecies]); + pp_species.query("save_particles_at_zlo", m_do_boundary_buffer[4][ispecies]); + pp_species.query("save_particles_at_zhi", m_do_boundary_buffer[5][ispecies]); +#endif +#ifdef AMREX_USE_EB + pp_species.query("save_particles_at_eb", m_do_boundary_buffer[AMREX_SPACEDIM*2][ispecies]); +#endif + } +} + +void ParticleBoundaryBuffer::printNumParticles () const { + for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) + { + for (int iside = 0; iside < 2; ++iside) + { + auto& buffer = m_particle_containers[2*idim+iside]; + for (int i = 0; i < numSpecies(); ++i) + { + int np = buffer[i].isDefined() ? buffer[i].TotalNumberOfParticles(false) : 0; + amrex::Print() << "Species " << getSpeciesNames()[i] << " has " + << np << " particles in the boundary buffer " + << " for side " << iside << " of dim " << idim << "\n"; + } + } + } +#ifdef AMREX_USE_EB + auto& buffer = m_particle_containers[2*AMREX_SPACEDIM]; + for (int i = 0; i < numSpecies(); ++i) + { + int np = buffer[i].isDefined() ? buffer[i].TotalNumberOfParticles(false) : 0; + amrex::Print() << "Species " << getSpeciesNames()[i] << " has " + << np << " particles in the EB boundary buffer \n"; + } +#endif +} + +void ParticleBoundaryBuffer::clearParticles () { + for (int i = 0; i < numBoundaries(); ++i) + { + auto& buffer = m_particle_containers[i]; + for (int ispecies = 0; ispecies < numSpecies(); ++ispecies) + { + auto& species_buffer = buffer[ispecies]; + if (species_buffer.isDefined()) species_buffer.clearParticles(); + } + } +} + +void ParticleBoundaryBuffer::gatherParticles (MultiParticleContainer& mypc, + const amrex::Vector& distance_to_eb) +{ + using PIter = amrex::ParConstIter<0,0,PIdx::nattribs>; + const auto& warpx_instance = WarpX::GetInstance(); + const amrex::Geometry& geom = warpx_instance.Geom(0); + auto plo = geom.ProbLoArray(); + auto phi = geom.ProbHiArray(); + auto dxi = geom.InvCellSizeArray(); + for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) + { + if (geom.isPeriodic(idim)) continue; + for (int iside = 0; iside < 2; ++iside) + { + auto& buffer = m_particle_containers[2*idim+iside]; + for (int i = 0; i < numSpecies(); ++i) + { + if (!m_do_boundary_buffer[2*idim+iside][i]) continue; + const auto& pc = mypc.GetParticleContainer(i); + if (!buffer[i].isDefined()) + { + buffer[i] = ParticleBuffer::getTmpPC(&pc); + buffer[i].AddIntComp(false); // for timestamp + } + auto& species_buffer = buffer[i]; + for (int lev = 0; lev < pc.numLevels(); ++lev) + { + const auto& plevel = pc.GetParticles(lev); + for(PIter pti(pc, lev); pti.isValid(); ++pti) + { + auto index = std::make_pair(pti.index(), pti.LocalTileIndex()); + if(plevel.find(index) == plevel.end()) continue; + + auto& ptile_buffer = species_buffer.DefineAndReturnParticleTile( + lev, pti.index(), pti.LocalTileIndex()); + const auto& ptile = plevel.at(index); + auto np = ptile.numParticles(); + if (np == 0) continue; + + auto dst_index = ptile_buffer.numParticles(); + ptile_buffer.resize(dst_index + np); + + int timestamp_index = ptile_buffer.NumRuntimeIntComps()-1; + int timestep = warpx_instance.getistep(0); + auto count = amrex::filterAndTransformParticles(ptile_buffer, ptile, + IsOutsideDomainBoundary{plo, phi, idim, iside}, + CopyAndTimestamp{timestamp_index, timestep}, + 0, dst_index); + ptile_buffer.resize(dst_index + count); + } + } + } + } + } + +#ifdef AMREX_USE_EB + auto& buffer = m_particle_containers[m_particle_containers.size()-1]; + for (int i = 0; i < numSpecies(); ++i) + { + const auto& pc = mypc.GetParticleContainer(i); + if (!buffer[i].isDefined()) + { + buffer[i] = ParticleBuffer::getTmpPC(&pc); + buffer[i].AddIntComp(false); // for timestamp + } + auto& species_buffer = buffer[i]; + for (int lev = 0; lev < pc.numLevels(); ++lev) + { + const auto& plevel = pc.GetParticles(lev); + for(PIter pti(pc, lev); pti.isValid(); ++pti) + { + auto phiarr = (*distance_to_eb[lev])[pti].array(); // signed distance function + auto index = std::make_pair(pti.index(), pti.LocalTileIndex()); + if(plevel.find(index) == plevel.end()) continue; + + const auto getPosition = GetParticlePosition(pti); + auto& ptile_buffer = species_buffer.DefineAndReturnParticleTile(lev, pti.index(), + pti.LocalTileIndex()); + const auto& ptile = plevel.at(index); + auto np = ptile.numParticles(); + if (np == 0) continue; + + auto dst_index = ptile_buffer.numParticles(); + ptile_buffer.resize(dst_index + np); + + int timestamp_index = ptile_buffer.NumRuntimeIntComps()-1; + int timestep = warpx_instance.getistep(0); + using SrcData = WarpXParticleContainer::ParticleTileType::ConstParticleTileDataType; + auto count = amrex::filterAndTransformParticles(ptile_buffer, ptile, + [=] AMREX_GPU_HOST_DEVICE (const SrcData& /*src*/, const int ip) noexcept + { + amrex::ParticleReal xp, yp, zp; + getPosition(ip, xp, yp, zp); + + amrex::Real phi_value = doGatherScalarFieldNodal( + xp, yp, zp, phiarr, dxi, plo + ); + return phi_value < 0.0 ? 1 : 0; + }, + CopyAndTimestamp{timestamp_index, timestep}, 0, dst_index); + ptile_buffer.resize(dst_index + count); + } + } + } +#else + amrex::ignore_unused(distance_to_eb, dxi); +#endif +} diff --git a/Source/Particles/ParticleBoundaryBuffer_fwd.H b/Source/Particles/ParticleBoundaryBuffer_fwd.H new file mode 100644 index 000000000..dcc453364 --- /dev/null +++ b/Source/Particles/ParticleBoundaryBuffer_fwd.H @@ -0,0 +1,13 @@ +/* Copyright 2021 Andrew Myers, Axel Huebl + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ + +#ifndef WARPX_PARTICLE_BOUNDARY_BUFFER_FWD_H +#define WARPX_PARTICLE_BOUNDARY_BUFFER_FWD_H + +class ParticleBoundaryBuffer; + +#endif /* WARPX_PARTICLE_BOUNDARY_BUFFER_FWD_H */ diff --git a/Source/Particles/ParticleBuffer.H b/Source/Particles/ParticleBuffer.H new file mode 100644 index 000000000..50bcf1129 --- /dev/null +++ b/Source/Particles/ParticleBuffer.H @@ -0,0 +1,33 @@ +/* Copyright 2021 Andrew Myers + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ +#ifndef PARTICLEBUFFER_H_ +#define PARTICLEBUFFER_H_ + +#include "Particles/MultiParticleContainer.H" +#include "WarpX.H" + +#include + +namespace ParticleBuffer { + template class Allocator> + using BufferType = amrex::AmrParticleContainer<0, 0, PIdx::nattribs, 0, Allocator>; + + template class Allocator> + BufferType getTmpPC (const WarpXParticleContainer* pc) + { + BufferType tmp(&WarpX::GetInstance()); + // add runtime real comps to tmp + for (int ic = 0; ic < pc->NumRuntimeRealComps(); ++ic) { tmp.AddRealComp(false); } + + // add runtime int comps to tmp + for (int ic = 0; ic < pc->NumRuntimeIntComps(); ++ic) { tmp.AddIntComp(false); } + + return tmp; + } +} + +#endif /*PARTICLEBUFFER_H_*/ diff --git a/Source/Particles/Pusher/GetAndSetPosition.H b/Source/Particles/Pusher/GetAndSetPosition.H index c9d0765e3..b0f1257f5 100644 --- a/Source/Particles/Pusher/GetAndSetPosition.H +++ b/Source/Particles/Pusher/GetAndSetPosition.H @@ -37,16 +37,13 @@ void get_particle_position (const WarpXParticleContainer::SuperParticleType& p, z = p.pos(2); #else x = p.pos(0); - y = std::numeric_limits::quiet_NaN(); + y = amrex::ParticleReal(0.0); z = p.pos(1); #endif } /** \brief Functor that can be used to extract the positions of the macroparticles * inside a ParallelFor kernel - * - * \param a_pti iterator to the tile containing the macroparticles - * \param a_offset offset to apply to the particle indices */ struct GetParticlePosition { @@ -57,12 +54,20 @@ struct GetParticlePosition #if (defined WARPX_DIM_RZ) const RType* m_theta = nullptr; #elif (AMREX_SPACEDIM == 2) - static constexpr RType m_snan = std::numeric_limits::quiet_NaN(); + static constexpr RType m_y_default = RType(0.0); #endif GetParticlePosition () = default; - GetParticlePosition (const WarpXParIter& a_pti, int a_offset = 0) noexcept + /** Constructor + * + * \tparam ptiType the type of the particle iterator used in the constructor + * + * \param a_pti iterator to the tile containing the macroparticles + * \param a_offset offset to apply to the particle indices + */ + template + GetParticlePosition (const ptiType& a_pti, int a_offset = 0) noexcept { const auto& aos = a_pti.GetArrayOfStructs(); m_structs = aos().dataPtr() + a_offset; @@ -90,7 +95,7 @@ struct GetParticlePosition z = p.pos(2); #else x = p.pos(0); - y = m_snan; + y = m_y_default; z = p.pos(1); #endif } @@ -114,7 +119,7 @@ struct GetParticlePosition z = p.pos(2); #else x = p.pos(0); - y = m_snan; + y = m_y_default; z = p.pos(1); #endif } diff --git a/Source/Particles/Pusher/UpdateMomentumBorisWithRadiationReaction.H b/Source/Particles/Pusher/UpdateMomentumBorisWithRadiationReaction.H index 8dfa85ed6..6f0eae476 100644 --- a/Source/Particles/Pusher/UpdateMomentumBorisWithRadiationReaction.H +++ b/Source/Particles/Pusher/UpdateMomentumBorisWithRadiationReaction.H @@ -71,8 +71,8 @@ void UpdateMomentumBorisWithRadiationReaction( const amrex::Real coeff = gamma_n*gamma_n*(fl_q2-bdotE2); //Radiation reaction constant - const amrex::Real RRcoeff = 2.0_rt*PhysConst::r_e*q*q/ - (3.0_rt*m*m*PhysConst::c*PhysConst::c); + const amrex::Real q_over_mc = q/(m*PhysConst::c); + const amrex::Real RRcoeff = (2.0_rt/3.0_rt)*PhysConst::r_e*q_over_mc*q_over_mc; //Compute the components of the RR force const amrex::Real frx = diff --git a/Source/Particles/WarpXParticleContainer.cpp b/Source/Particles/WarpXParticleContainer.cpp index 427b58121..cb7356daa 100644 --- a/Source/Particles/WarpXParticleContainer.cpp +++ b/Source/Particles/WarpXParticleContainer.cpp @@ -13,6 +13,7 @@ #include "Deposition/CurrentDeposition.H" #include "Pusher/GetAndSetPosition.H" #include "Pusher/UpdatePosition.H" +#include "ParticleBoundaries_K.H" #include "Utils/CoarsenMR.H" #include "Utils/WarpXAlgorithmSelection.H" #include "Utils/WarpXConst.H" @@ -1134,13 +1135,13 @@ WarpXParticleContainer::ApplyBoundaryConditions (ParticleBoundaries& boundary_co // Note that for RZ, (x, y, z) is actually (r, theta, z). bool particle_lost = false; - ParticleBoundaries::apply_boundaries(x, xmin, xmax, + ApplyParticleBoundaries::apply_boundaries(x, xmin, xmax, #ifdef WARPX_DIM_3D - y, ymin, ymax, + y, ymin, ymax, #endif - z, zmin, zmax, - ux[i], uy[i], uz[i], particle_lost, - boundary_conditions); + z, zmin, zmax, + ux[i], uy[i], uz[i], particle_lost, + boundary_conditions); if (particle_lost) { p.id() = -1; diff --git a/Source/Utils/WarpXConst.H b/Source/Utils/WarpXConst.H index 6fe7833da..fc40be290 100644 --- a/Source/Utils/WarpXConst.H +++ b/Source/Utils/WarpXConst.H @@ -12,6 +12,9 @@ #include +#include +#include + // Math constants namespace MathConst { @@ -20,6 +23,11 @@ namespace MathConst // Physical constants. Values are the 2018 CODATA recommended values // https://physics.nist.gov/cuu/Constants/index.html +// +// New additions here should also be considered for addition to +// `warpx_constants` in WarpXUtil.cpp's `makeParser`, so that they're +// available in parsing and evaluation of PICMI expressions, as well +// as the corresponding Python definitions namespace PhysConst { static constexpr auto c = static_cast( 299'792'458. ); diff --git a/Source/Utils/WarpXUtil.H b/Source/Utils/WarpXUtil.H index 0391e3f7a..78a407b6f 100644 --- a/Source/Utils/WarpXUtil.H +++ b/Source/Utils/WarpXUtil.H @@ -197,10 +197,22 @@ void getCellCoordinates (int i, int j, int k, } +/** +* \brief Do a safe cast of a real to an int +* This ensures that the float value is within the range of ints and if not, +* raises an exception. +* +* \param x Real value to cast +* \param real_name String, the name of the variable being casted to use in the error message +*/ +int +safeCastToInt(amrex::Real x, const std::string& real_name); + /** * \brief Initialize an amrex::Parser object from a string containing a math expression * * \param parse_function String to read to initialize the parser. +* \param varnames A list of predefined independent variables */ amrex::Parser makeParser (std::string const& parse_function, amrex::Vector const& varnames); @@ -220,15 +232,20 @@ amrex::ParserExecutor compileParser (amrex::Parser const* parser) * amrex::ParmParse::query reads a name and a value from the input file. This function does the * same, and applies the amrex::Parser to the value, so the user has the choice to specify a value or * a math expression (including user-defined constants). - * Only works for amrex::Real numbers, one would need another version for integers etc. + * Works for amrex::Real numbers and integers. * * \param[in] a_pp amrex::ParmParse object * \param[in] str name of the parameter to read - * \param[out] val where the value queried and parsed is stored + * \param[out] val where the value queried and parsed is stored, either a scalar or vector + * \param[in] start_ix start index in the list of inputs values (optional with arrays) + * \param[in] num_val number of input values to use (optional with arrays) */ int queryWithParser (const amrex::ParmParse& a_pp, char const * const str, amrex::Real& val); int queryArrWithParser (const amrex::ParmParse& a_pp, char const * const str, std::vector& val, const int start_ix, const int num_val); +int queryWithParser (const amrex::ParmParse& a_pp, char const * const str, int& val); +int queryArrWithParser (const amrex::ParmParse& a_pp, char const * const str, std::vector& val, + const int start_ix, const int num_val); /** * \brief Similar to amrex::ParmParse::get, but also supports math expressions for the value. @@ -236,16 +253,22 @@ int queryArrWithParser (const amrex::ParmParse& a_pp, char const * const str, st * amrex::ParmParse::get reads a name and a value from the input file. This function does the * same, and applies the Parser to the value, so the user has the choice to specify a value or * a math expression (including user-defined constants). - * Only works for amrex::Real numbers, one would need another version for integers etc. + * Works for amrex::Real numbers and integers. * * \param[in] a_pp amrex::ParmParse object * \param[in] str name of the parameter to read * \param[out] val where the value queried and parsed is stored + * \param[in] start_ix start index in the list of inputs values (optional with arrays) + * \param[in] num_val number of input values to use (optional with arrays) */ void getWithParser (const amrex::ParmParse& a_pp, char const * const str, amrex::Real& val); void getArrWithParser (const amrex::ParmParse& a_pp, char const * const str, std::vector& val); void getArrWithParser (const amrex::ParmParse& a_pp, char const * const str, std::vector& val, const int start_ix, const int num_val); +void getWithParser (const amrex::ParmParse& a_pp, char const * const str, int& val); +void getArrWithParser (const amrex::ParmParse& a_pp, char const * const str, std::vector& val); +void getArrWithParser (const amrex::ParmParse& a_pp, char const * const str, std::vector& val, + const int start_ix, const int num_val); namespace WarpXUtilMsg{ diff --git a/Source/Utils/WarpXUtil.cpp b/Source/Utils/WarpXUtil.cpp index a88156f89..6a1b5259a 100644 --- a/Source/Utils/WarpXUtil.cpp +++ b/Source/Utils/WarpXUtil.cpp @@ -34,11 +34,27 @@ #include #include #include +#include using namespace amrex; +void PreparseAMReXInputIntArray(amrex::ParmParse& a_pp, char const * const input_str, const bool replace) +{ + const int cnt = a_pp.countval(input_str); + if (cnt > 0) { + Vector input_array; + getArrWithParser(a_pp, input_str, input_array); + if (replace) { + a_pp.remove(input_str); + } + a_pp.addarr(input_str, input_array); + } +} + void ParseGeometryInput() { + // Parse prob_lo and hi, evaluating any expressions since geometry does not + // parse its input ParmParse pp_geometry("geometry"); Vector prob_lo(AMREX_SPACEDIM); @@ -66,6 +82,22 @@ void ParseGeometryInput() pp_geometry.addarr("prob_lo", prob_lo); pp_geometry.addarr("prob_hi", prob_hi); + + // Parse amr input, evaluating any expressions since amr does not parse its input + ParmParse pp_amr("amr"); + + // Note that n_cell is replaced so that only the parsed version is written out to the + // warpx_job_info file. This must be done since yt expects to be able to parse + // the value of n_cell from that file. For the rest, this doesn't matter. + PreparseAMReXInputIntArray(pp_amr, "n_cell", true); + PreparseAMReXInputIntArray(pp_amr, "max_grid_size", false); + PreparseAMReXInputIntArray(pp_amr, "max_grid_size_x", false); + PreparseAMReXInputIntArray(pp_amr, "max_grid_size_y", false); + PreparseAMReXInputIntArray(pp_amr, "max_grid_size_z", false); + PreparseAMReXInputIntArray(pp_amr, "blocking_factor", false); + PreparseAMReXInputIntArray(pp_amr, "blocking_factor_x", false); + PreparseAMReXInputIntArray(pp_amr, "blocking_factor_y", false); + PreparseAMReXInputIntArray(pp_amr, "blocking_factor_z", false); } void ReadBoostedFrameParameters(Real& gamma_boost, Real& beta_boost, @@ -239,6 +271,30 @@ void Store_parserString(const amrex::ParmParse& pp, std::string query_string, f.clear(); } +int safeCastToInt(const amrex::Real x, const std::string& real_name) { + int result = 0; + bool error_detected = false; + std::string assert_msg; + // (2.0*(numeric_limits::max()/2+1)) converts numeric_limits::max()+1 to a real ensuring accuracy to all digits + // This accepts x = 2**31-1 but rejects 2**31. + if (x < (2.0*(std::numeric_limits::max()/2+1))) { + if (std::ceil(x) >= std::numeric_limits::min()) { + result = static_cast(x); + } else { + error_detected = true; + assert_msg = "Error: Negative overflow detected when casting " + real_name + " = " + std::to_string(x) + " to int"; + } + } else if (x > 0) { + error_detected = true; + assert_msg = "Error: Overflow detected when casting " + real_name + " = " + std::to_string(x) + " to int"; + } else { + error_detected = true; + assert_msg = "Error: NaN detected when casting " + real_name + " to int"; + } + WarpXUtilMsg::AlwaysAssert(!error_detected, assert_msg); + return result; +} + Parser makeParser (std::string const& parse_function, amrex::Vector const& varnames) { // Since queryWithParser recursively calls this routine, keep track of symbols @@ -247,9 +303,33 @@ Parser makeParser (std::string const& parse_function, amrex::Vector Parser parser(parse_function); parser.registerVariables(varnames); - ParmParse pp_my_constants("my_constants"); + std::set symbols = parser.symbols(); for (auto const& v : varnames) symbols.erase(v.c_str()); + + // User can provide inputs under this name, through which expressions + // can be provided for arbitrary variables. PICMI inputs are aware of + // this convention and use the same prefix as well. This potentially + // includes variable names that match physical or mathematical + // constants, in case the user wishes to enforce a different + // system of units or some form of quasi-physical behavior in the + // simulation. Thus, this needs to override any built-in + // constants. + ParmParse pp_my_constants("my_constants"); + + // Physical / Numerical Constants available to parsed expressions + static std::map warpx_constants = + { + {"clight", PhysConst::c}, + {"epsilon0", PhysConst::ep0}, + {"mu0", PhysConst::mu0}, + {"q_e", PhysConst::q_e}, + {"m_e", PhysConst::m_e}, + {"m_p", PhysConst::m_p}, + {"m_u", PhysConst::m_u}, + {"pi", MathConst::pi}, + }; + for (auto it = symbols.begin(); it != symbols.end(); ) { Real v; @@ -261,33 +341,17 @@ Parser makeParser (std::string const& parse_function, amrex::Vector if (is_input) { parser.setConstant(*it, v); it = symbols.erase(it); - } else if (std::strcmp(it->c_str(), "q_e") == 0) { - parser.setConstant(*it, PhysConst::q_e); - it = symbols.erase(it); - } else if (std::strcmp(it->c_str(), "m_e") == 0) { - parser.setConstant(*it, PhysConst::m_e); - it = symbols.erase(it); - } else if (std::strcmp(it->c_str(), "m_p") == 0) { - parser.setConstant(*it, PhysConst::m_p); - it = symbols.erase(it); - } else if (std::strcmp(it->c_str(), "m_u") == 0) { - parser.setConstant(*it, PhysConst::m_u); - it = symbols.erase(it); - } else if (std::strcmp(it->c_str(), "epsilon0") == 0) { - parser.setConstant(*it, PhysConst::ep0); - it = symbols.erase(it); - } else if (std::strcmp(it->c_str(), "mu0") == 0) { - parser.setConstant(*it, PhysConst::mu0); - it = symbols.erase(it); - } else if (std::strcmp(it->c_str(), "clight") == 0) { - parser.setConstant(*it, PhysConst::c); - it = symbols.erase(it); - } else if (std::strcmp(it->c_str(), "pi") == 0) { - parser.setConstant(*it, MathConst::pi); - it = symbols.erase(it); - } else { - ++it; + continue; + } + + auto constant = warpx_constants.find(*it); + if (constant != warpx_constants.end()) { + parser.setConstant(*it, constant->second); + it = symbols.erase(it); + continue; } + + ++it; } for (auto const& s : symbols) { amrex::Abort("makeParser::Unknown symbol "+s); @@ -383,6 +447,53 @@ getArrWithParser (const amrex::ParmParse& a_pp, char const * const str, std::vec } } +int queryWithParser (const amrex::ParmParse& a_pp, char const * const str, int& val) { + amrex::Real rval; + const int result = queryWithParser(a_pp, str, rval); + if (result) { + val = safeCastToInt(std::round(rval), str); + } + return result; +} + +void getWithParser (const amrex::ParmParse& a_pp, char const * const str, int& val) { + amrex::Real rval; + getWithParser(a_pp, str, rval); + val = safeCastToInt(std::round(rval), str); +} + +int queryArrWithParser (const amrex::ParmParse& a_pp, char const * const str, std::vector& val, + const int start_ix, const int num_val) { + std::vector rval; + const int result = queryArrWithParser(a_pp, str, rval, start_ix, num_val); + if (result) { + val.resize(rval.size()); + for (unsigned long i = 0 ; i < val.size() ; i++) { + val[i] = safeCastToInt(std::round(rval[i]), str); + } + } + return result; +} + +void getArrWithParser (const amrex::ParmParse& a_pp, char const * const str, std::vector& val) { + std::vector rval; + getArrWithParser(a_pp, str, rval); + val.resize(rval.size()); + for (unsigned long i = 0 ; i < val.size() ; i++) { + val[i] = safeCastToInt(std::round(rval[i]), str); + } +} + +void getArrWithParser (const amrex::ParmParse& a_pp, char const * const str, std::vector& val, + const int start_ix, const int num_val) { + std::vector rval; + getArrWithParser(a_pp, str, rval, start_ix, num_val); + val.resize(rval.size()); + for (unsigned long i = 0 ; i < val.size() ; i++) { + val[i] = safeCastToInt(std::round(rval[i]), str); + } +} + /** * \brief Ensures that the blocks are setup correctly for the RZ spectral solver * When using the RZ spectral solver, the Hankel transform cannot be @@ -544,38 +655,6 @@ void ReadBCParams () } } } - // temporarily check : If silver mueller is selected for one boundary, it should be - // selected at all valid boundaries. - for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) { - if (WarpX::field_boundary_lo[idim] == FieldBoundaryType::Absorbing_SilverMueller || - WarpX::field_boundary_hi[idim] == FieldBoundaryType::Absorbing_SilverMueller){ -#if (AMREX_SPACEDIM == 3) - AMREX_ALWAYS_ASSERT_WITH_MESSAGE( - (WarpX::field_boundary_lo[0] == FieldBoundaryType::Absorbing_SilverMueller)&& - (WarpX::field_boundary_hi[0] == FieldBoundaryType::Absorbing_SilverMueller)&& - (WarpX::field_boundary_lo[1] == FieldBoundaryType::Absorbing_SilverMueller)&& - (WarpX::field_boundary_hi[1] == FieldBoundaryType::Absorbing_SilverMueller)&& - (WarpX::field_boundary_lo[2] == FieldBoundaryType::Absorbing_SilverMueller)&& - (WarpX::field_boundary_hi[2] == FieldBoundaryType::Absorbing_SilverMueller) - , " The current implementation requires silver-mueller boundary condition to be applied at all boundaries!"); -#else -#ifndef WARPX_DIM_RZ - AMREX_ALWAYS_ASSERT_WITH_MESSAGE( - (WarpX::field_boundary_lo[0] == FieldBoundaryType::Absorbing_SilverMueller)&& - (WarpX::field_boundary_hi[0] == FieldBoundaryType::Absorbing_SilverMueller)&& - (WarpX::field_boundary_lo[1] == FieldBoundaryType::Absorbing_SilverMueller)&& - (WarpX::field_boundary_hi[1] == FieldBoundaryType::Absorbing_SilverMueller) - , " The current implementation requires silver-mueller boundary condition to be applied at all boundaries!"); -#else - AMREX_ALWAYS_ASSERT_WITH_MESSAGE( - (WarpX::field_boundary_hi[0] == FieldBoundaryType::Absorbing_SilverMueller)&& - (WarpX::field_boundary_lo[1] == FieldBoundaryType::Absorbing_SilverMueller)&& - (WarpX::field_boundary_hi[1] == FieldBoundaryType::Absorbing_SilverMueller) - , " The current implementation requires silver-mueller boundary condition to be applied at all boundaries!"); -#endif -#endif - } - } #ifdef WARPX_DIM_RZ // Ensure code aborts if PEC is specified at r=0 for RZ AMREX_ALWAYS_ASSERT_WITH_MESSAGE( WarpX::field_boundary_lo[0] == FieldBoundaryType::None, diff --git a/Source/WarpX.H b/Source/WarpX.H index 1f14ae13a..489f6d09e 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -19,6 +19,7 @@ #include "Evolve/WarpXDtType.H" #include "FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver_fwd.H" #include "FieldSolver/FiniteDifferenceSolver/MacroscopicProperties/MacroscopicProperties_fwd.H" +#include "Particles/ParticleBoundaryBuffer_fwd.H" #ifdef WARPX_USE_PSATD # ifdef WARPX_DIM_RZ # include "FieldSolver/SpectralSolver/SpectralSolverRZ_fwd.H" @@ -60,6 +61,10 @@ #include #include +#if defined(AMREX_USE_EB) && defined(WARPX_DIM_RZ) +static_assert(false, "Embedded boundaries are not supported in RZ mode."); +#endif + enum struct PatchType : int { fine, @@ -612,6 +617,7 @@ public: void DampJPML (int lev, PatchType patch_type); void CopyJPML (); + bool isAnyBoundaryPML(); /** * \brief Synchronize the nodal points of the PML MultiFabs @@ -710,7 +716,6 @@ public: amrex::Real getmoving_window_x() const {return moving_window_x;} amrex::Real getcurrent_injection_position () const {return current_injection_position;} bool getis_synchronized() const {return is_synchronized;} - void setplot_rho (bool a_plot_rho) {plot_rho = a_plot_rho;} int maxStep () const {return max_step;} amrex::Real stopTime () const {return stop_time;} @@ -1343,8 +1348,6 @@ private: std::string restart_chkfile; - bool plot_rho = false; - amrex::VisMF::Header::Version plotfile_headerversion = amrex::VisMF::Header::Version_v1; amrex::VisMF::Header::Version slice_plotfile_headerversion = amrex::VisMF::Header::Version_v1; @@ -1378,11 +1381,14 @@ private: int noy_fft = 16; int noz_fft = 16; - // Domain decomposition on Level 0 + //! Domain decomposition on Level 0 amrex::IntVect numprocs{0}; + //! particle buffer for scraped particles on the boundaries + std::unique_ptr m_particle_boundary_buffer; + // - // Embeded Boundary + // Embedded Boundary // // Factory for field data @@ -1440,8 +1446,9 @@ private: * with k-space filtering (if needed) * * \param[in] icomp index of fourth component (0 for rho_old, 1 for rho_new) + * \param[in] dcomp index of spectral component (0 for rho_old, 1 for rho_new) */ - void PSATDForwardTransformRho (const int icomp); + void PSATDForwardTransformRho (const int icomp, const int dcomp); /** * \brief Copy rho_new to rho_old in spectral space diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index f079a2603..3f7024c71 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -28,12 +28,13 @@ #include "FieldSolver/WarpX_FDTD.H" #include "Filter/NCIGodfreyFilter.H" #include "Particles/MultiParticleContainer.H" +#include "Particles/ParticleBoundaryBuffer.H" #include "Python/WarpXWrappers.h" #include "Utils/WarpXAlgorithmSelection.H" #include "Utils/WarpXConst.H" #include "Utils/WarpXUtil.H" -#ifdef BL_USE_SENSEI_INSITU +#ifdef AMREX_USE_SENSEI_INSITU # include #endif #include @@ -317,6 +318,9 @@ WarpX::WarpX () } do_back_transformed_particles = mypc->doBackTransformedDiagnostics(); + // Particle Boundary Buffer (i.e., scraped particles on boundary) + m_particle_boundary_buffer = std::make_unique(); + // Diagnostics multi_diags = std::make_unique(); @@ -489,7 +493,7 @@ WarpX::ReadParameters () { { ParmParse pp;// Traditionally, max_step and stop_time do not have prefix. - pp.query("max_step", max_step); + queryWithParser(pp, "max_step", max_step); queryWithParser(pp, "stop_time", stop_time); pp.query("authors", authors); } @@ -509,7 +513,7 @@ WarpX::ReadParameters () ParmParse pp_warpx("warpx"); std::vector numprocs_in; - pp_warpx.queryarr("numprocs", numprocs_in); + queryArrWithParser(pp_warpx, "numprocs", numprocs_in, 0, AMREX_SPACEDIM); if (not numprocs_in.empty()) { AMREX_ALWAYS_ASSERT_WITH_MESSAGE (numprocs_in.size() == AMREX_SPACEDIM, @@ -544,7 +548,7 @@ WarpX::ReadParameters () queryWithParser(pp_warpx, "cfl", cfl); pp_warpx.query("verbose", verbose); - pp_warpx.query("regrid_int", regrid_int); + queryWithParser(pp_warpx, "regrid_int", regrid_int); pp_warpx.query("do_subcycling", do_subcycling); pp_warpx.query("do_multi_J", do_multi_J); if (do_multi_J) @@ -573,8 +577,8 @@ WarpX::ReadParameters () pp_warpx.query("do_moving_window", do_moving_window); if (do_moving_window) { - pp_warpx.query("start_moving_window_step", start_moving_window_step); - pp_warpx.query("end_moving_window_step", end_moving_window_step); + queryWithParser(pp_warpx, "start_moving_window_step", start_moving_window_step); + queryWithParser(pp_warpx, "end_moving_window_step", end_moving_window_step); std::string s; pp_warpx.get("moving_window_dir", s); if (s == "x" || s == "X") { @@ -615,7 +619,7 @@ WarpX::ReadParameters () AMREX_ALWAYS_ASSERT_WITH_MESSAGE( (s == "z" || s == "Z"), "The boosted frame diagnostic currently only works if the boost is in the z direction."); - pp_warpx.get("num_snapshots_lab", num_snapshots_lab); + queryWithParser(pp_warpx, "num_snapshots_lab", num_snapshots_lab); // Read either dz_snapshots_lab or dt_snapshots_lab bool snapshot_interval_is_specified = 0; @@ -645,7 +649,7 @@ WarpX::ReadParameters () if (do_electrostatic == ElectrostaticSolverAlgo::LabFrame) { queryWithParser(pp_warpx, "self_fields_required_precision", self_fields_required_precision); - pp_warpx.query("self_fields_max_iters", self_fields_max_iters); + queryWithParser(pp_warpx, "self_fields_max_iters", self_fields_max_iters); pp_warpx.query("self_fields_verbosity", self_fields_verbosity); // Note that with the relativistic version, these parameters would be // input for each species. @@ -664,7 +668,7 @@ WarpX::ReadParameters () pp_warpx.query("use_filter", use_filter); pp_warpx.query("use_filter_compensation", use_filter_compensation); Vector parse_filter_npass_each_dir(AMREX_SPACEDIM,1); - pp_warpx.queryarr("filter_npass_each_dir", parse_filter_npass_each_dir); + queryArrWithParser(pp_warpx, "filter_npass_each_dir", parse_filter_npass_each_dir, 0, AMREX_SPACEDIM); filter_npass_each_dir[0] = parse_filter_npass_each_dir[0]; filter_npass_each_dir[1] = parse_filter_npass_each_dir[1]; #if (AMREX_SPACEDIM == 3) @@ -709,20 +713,6 @@ WarpX::ReadParameters () pp_warpx.query("do_divb_cleaning", do_divb_cleaning); pp_warpx.query("n_field_gather_buffer", n_field_gather_buffer); pp_warpx.query("n_current_deposition_buffer", n_current_deposition_buffer); -#ifdef AMREX_USE_GPU - std::vectorsort_intervals_string_vec = {"4"}; -#else - std::vector sort_intervals_string_vec = {"-1"}; -#endif - pp_warpx.queryarr("sort_intervals", sort_intervals_string_vec); - sort_intervals = IntervalsParser(sort_intervals_string_vec); - - Vector vect_sort_bin_size(AMREX_SPACEDIM,1); - bool sort_bin_size_is_specified = pp_warpx.queryarr("sort_bin_size", vect_sort_bin_size); - if (sort_bin_size_is_specified){ - for (int i=0; i parse_do_pml_Lo(AMREX_SPACEDIM,0); - // Switching pml lo to 1 when do_pml = 1 and if domain is non-periodic - // Note to remove this code when new BC API is fully functional - if (do_pml == 1) { - for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) { - if ( Geom(0).isPeriodic(idim) == 0) parse_do_pml_Lo[idim] = 1; - } - } - pp_warpx.queryarr("do_pml_Lo", parse_do_pml_Lo); - do_pml_Lo[0] = parse_do_pml_Lo[0]; - do_pml_Lo[1] = parse_do_pml_Lo[1]; -#if (AMREX_SPACEDIM == 3) - do_pml_Lo[2] = parse_do_pml_Lo[2]; -#endif - // setting default to 0 - Vector parse_do_pml_Hi(AMREX_SPACEDIM,0); - // Switching pml hi to 1 when do_pml = 1 and if domain is non-periodic - // Note to remove this code when new BC API is fully functional - if (do_pml == 1) { - for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) { - if ( Geom(0).isPeriodic(idim) == 0) parse_do_pml_Hi[idim] = 1; - } - } - pp_warpx.queryarr("do_pml_Hi", parse_do_pml_Hi); - do_pml_Hi[0] = parse_do_pml_Hi[0]; - do_pml_Hi[1] = parse_do_pml_Hi[1]; -#if (AMREX_SPACEDIM == 3) - do_pml_Hi[2] = parse_do_pml_Hi[2]; + AMREX_ALWAYS_ASSERT_WITH_MESSAGE( isAnyBoundaryPML() == false, + "PML are not implemented in RZ geometry; please set a different boundary condition using boundary.field_lo and boundary.field_hi."); #endif if ( (do_pml_j_damping==1)&&(do_pml_in_domain==0) ){ @@ -890,11 +849,11 @@ WarpX::ReadParameters () ParmParse pp_vismf("vismf"); pp_vismf.add("usesingleread", use_single_read); pp_vismf.add("usesinglewrite", use_single_write); - pp_warpx.query("mffile_nstreams", mffile_nstreams); + queryWithParser(pp_warpx, "mffile_nstreams", mffile_nstreams); VisMF::SetMFFileInStreams(mffile_nstreams); - pp_warpx.query("field_io_nfiles", field_io_nfiles); + queryWithParser(pp_warpx, "field_io_nfiles", field_io_nfiles); VisMF::SetNOutFiles(field_io_nfiles); - pp_warpx.query("particle_io_nfiles", particle_io_nfiles); + queryWithParser(pp_warpx, "particle_io_nfiles", particle_io_nfiles); ParmParse pp_particles("particles"); pp_particles.add("particles_nfiles", particle_io_nfiles); } @@ -914,7 +873,7 @@ WarpX::ReadParameters () if (do_nodal) galerkin_interpolation = false; // Only needs to be set with WARPX_DIM_RZ, otherwise defaults to 1 - pp_warpx.query("n_rz_azimuthal_modes", n_rz_azimuthal_modes); + queryWithParser(pp_warpx, "n_rz_azimuthal_modes", n_rz_azimuthal_modes); // If true, the current is deposited on a nodal grid and then interpolated onto a Yee grid pp_warpx.query("do_current_centering", do_current_centering); @@ -998,9 +957,10 @@ WarpX::ReadParameters () std::vector lasers_names; pp_lasers.queryarr("names", lasers_names); + std::vector sort_intervals_string_vec = {"-1"}; if (!species_names.empty() || !lasers_names.empty()) { int particle_shape; - if (pp_algo.query("particle_shape", particle_shape) == false) + if (queryWithParser(pp_algo, "particle_shape", particle_shape) == false) { amrex::Abort("\nalgo.particle_shape must be set in the input file:" "\nplease set algo.particle_shape to 1, 2, or 3"); @@ -1025,6 +985,24 @@ WarpX::ReadParameters () " some numerical artifact will be present at the interface between coarse and fine patch." "\nWe recommend setting algo.particle_shape = 1 in order to avoid this issue"); } + + // default sort interval for particles if species or lasers vector is not empty +#ifdef AMREX_USE_GPU + sort_intervals_string_vec = {"4"}; +#else + sort_intervals_string_vec = {"-1"}; +#endif + } + + amrex::ParmParse pp_warpx("warpx"); + pp_warpx.queryarr("sort_intervals", sort_intervals_string_vec); + sort_intervals = IntervalsParser(sort_intervals_string_vec); + + Vector vect_sort_bin_size(AMREX_SPACEDIM,1); + bool sort_bin_size_is_specified = pp_warpx.queryarr("sort_bin_size", vect_sort_bin_size); + if (sort_bin_size_is_specified){ + for (int i=0; i 0) @@ -1096,20 +1074,20 @@ WarpX::ReadParameters () pp_psatd.query("noy", noy_str); pp_psatd.query("noz", noz_str); - if(nox_str == "inf"){ + if(nox_str == "inf") { nox_fft = -1; - } else{ - pp_psatd.query("nox", nox_fft); + } else { + queryWithParser(pp_psatd, "nox", nox_fft); } - if(noy_str == "inf"){ + if(noy_str == "inf") { noy_fft = -1; - } else{ - pp_psatd.query("noy", noy_fft); + } else { + queryWithParser(pp_psatd, "noy", noy_fft); } - if(noz_str == "inf"){ + if(noz_str == "inf") { noz_fft = -1; - } else{ - pp_psatd.query("noz", noz_fft); + } else { + queryWithParser(pp_psatd, "noz", noz_fft); } @@ -1244,8 +1222,8 @@ WarpX::ReadParameters () } queryArrWithParser(pp_slice, "dom_lo", slice_lo, 0, AMREX_SPACEDIM); queryArrWithParser(pp_slice, "dom_hi", slice_hi, 0, AMREX_SPACEDIM); - pp_slice.queryarr("coarsening_ratio",slice_crse_ratio,0,AMREX_SPACEDIM); - pp_slice.query("plot_int",slice_plot_int); + queryArrWithParser(pp_slice, "coarsening_ratio",slice_crse_ratio,0,AMREX_SPACEDIM); + queryWithParser(pp_slice, "plot_int",slice_plot_int); slice_realbox.setLo(slice_lo); slice_realbox.setHi(slice_hi); slice_cr_ratio = IntVect(AMREX_D_DECL(1,1,1)); @@ -1259,7 +1237,7 @@ WarpX::ReadParameters () if (do_back_transformed_diagnostics) { AMREX_ALWAYS_ASSERT_WITH_MESSAGE(gamma_boost > 1.0, "gamma_boost must be > 1 to use the boost frame diagnostic"); - pp_slice.query("num_slice_snapshots_lab", num_slice_snapshots_lab); + queryWithParser(pp_slice, "num_slice_snapshots_lab", num_slice_snapshots_lab); if (num_slice_snapshots_lab > 0) { getWithParser(pp_slice, "dt_slice_snapshots_lab", dt_slice_snapshots_lab ); getWithParser(pp_slice, "particle_slice_width_lab",particle_slice_width_lab); @@ -1335,7 +1313,9 @@ WarpX::BackwardCompatibility () amrex::Abort("warpx.use_kspace_filter is not supported anymore. " "Please use the flag use_filter, see documentation."); } - + if ( pp_warpx.query("do_pml", backward_int) ) { + amrex::Abort( "do_pml is not supported anymore. Please use boundary.field_lo and boundary.field_hi to set the boundary conditions."); + } ParmParse pp_interpolation("interpolation"); if (pp_interpolation.query("nox", backward_int) || pp_interpolation.query("noy", backward_int) || @@ -1694,14 +1674,15 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm } #endif - bool deposit_charge = do_dive_cleaning || (plot_rho && do_back_transformed_diagnostics); + bool deposit_charge = do_dive_cleaning || (do_electrostatic == ElectrostaticSolverAlgo::LabFrame); if (WarpX::maxwell_solver_id == MaxwellSolverAlgo::PSATD) { - deposit_charge = do_dive_cleaning || (plot_rho && do_back_transformed_diagnostics) - || update_with_rho || current_correction; + deposit_charge = do_dive_cleaning || update_with_rho || current_correction; } if (deposit_charge) { - rho_fp[lev] = std::make_unique(amrex::convert(ba,rho_nodal_flag),dm,2*ncomps,ngRho,tag("rho_fp")); + // For the multi-J algorithm we can allocate only one rho component (no distinction between old and new) + const int rho_ncomps = (WarpX::do_multi_J) ? ncomps : 2*ncomps; + rho_fp[lev] = std::make_unique(amrex::convert(ba,rho_nodal_flag),dm,rho_ncomps,ngRho,tag("rho_fp")); } if (do_electrostatic == ElectrostaticSolverAlgo::LabFrame) @@ -1918,8 +1899,10 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm current_cp[lev][1] = std::make_unique(amrex::convert(cba,jy_nodal_flag),dm,ncomps,ngJ,tag("current_cp[y]")); current_cp[lev][2] = std::make_unique(amrex::convert(cba,jz_nodal_flag),dm,ncomps,ngJ,tag("current_cp[z]")); - if (do_dive_cleaning || (plot_rho && do_back_transformed_diagnostics)) { - rho_cp[lev] = std::make_unique(amrex::convert(cba,rho_nodal_flag),dm,2*ncomps,ngRho,tag("rho_cp")); + if (deposit_charge) { + // For the multi-J algorithm we can allocate only one rho component (no distinction between old and new) + const int rho_ncomps = (WarpX::do_multi_J) ? ncomps : 2*ncomps; + rho_cp[lev] = std::make_unique(amrex::convert(cba,rho_nodal_flag),dm,rho_ncomps,ngRho,tag("rho_cp")); } if (do_dive_cleaning) @@ -2548,20 +2531,36 @@ WarpX::RestoreCurrent (int lev) std::string WarpX::Version () { + std::string version; #ifdef WARPX_GIT_VERSION - return std::string(WARPX_GIT_VERSION); -#else - return std::string("Unknown"); + version = std::string(WARPX_GIT_VERSION); #endif + if( version.empty() ) + return std::string("Unknown"); + else + return version; } std::string WarpX::PicsarVersion () { + std::string version; #ifdef PICSAR_GIT_VERSION - return std::string(PICSAR_GIT_VERSION); -#else - return std::string("Unknown"); + version = std::string(PICSAR_GIT_VERSION); #endif + if( version.empty() ) + return std::string("Unknown"); + else + return version; +} + +bool +WarpX::isAnyBoundaryPML() +{ + for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) { + if ( WarpX::field_boundary_lo[idim] == FieldBoundaryType::PML) return true; + if ( WarpX::field_boundary_hi[idim] == FieldBoundaryType::PML) return true; + } + return false; } diff --git a/Source/main.cpp b/Source/main.cpp index a27354210..751a63fb0 100644 --- a/Source/main.cpp +++ b/Source/main.cpp @@ -22,11 +22,14 @@ #include #if defined(AMREX_USE_MPI) - #include +# include #endif #if defined(AMREX_USE_HIP) && defined(WARPX_USE_PSATD) -#include +// cstddef: work-around for ROCm/rocFFT <=4.3.0 +// https://github.com/ROCmSoftwarePlatform/rocFFT/blob/rocm-4.3.0/library/include/rocfft.h#L36-L42 +# include +# include #endif int main(int argc, char* argv[]) diff --git a/Tools/PerformanceTests/automated_test_1_uniform_rest_32ppc b/Tools/PerformanceTests/automated_test_1_uniform_rest_32ppc index 64f05f142..1a31a9aae 100644 --- a/Tools/PerformanceTests/automated_test_1_uniform_rest_32ppc +++ b/Tools/PerformanceTests/automated_test_1_uniform_rest_32ppc @@ -10,15 +10,19 @@ amr.max_level = 0 # Geometry geometry.coord_sys = 0 # 0: Cartesian -geometry.is_periodic = 0 0 1 # Is periodic? geometry.prob_lo = -20.e-6 -20.e-6 -20.e-6 # physical domain geometry.prob_hi = 20.e-6 20.e-6 20.e-6 +# Boundaries +boundary.field_lo = pec pec periodic +boundary.field_hi = pec pec periodic +boundary.particle_lo = absorbing absorbing periodic +boundary.particle_hi = absorbing absorbing periodic + # Verbosity warpx.verbose = 1 algo.particle_shape = 3 -warpx.do_pml = 0 # CFL warpx.cfl = 1.0 diff --git a/Tools/PerformanceTests/automated_test_2_uniform_rest_1ppc b/Tools/PerformanceTests/automated_test_2_uniform_rest_1ppc index 677894ff2..17b7a02b4 100644 --- a/Tools/PerformanceTests/automated_test_2_uniform_rest_1ppc +++ b/Tools/PerformanceTests/automated_test_2_uniform_rest_1ppc @@ -10,15 +10,19 @@ amr.max_level = 0 # Geometry geometry.coord_sys = 0 # 0: Cartesian -geometry.is_periodic = 0 0 0 # Is periodic? geometry.prob_lo = -20.e-6 -20.e-6 -20.e-6 # physical domain geometry.prob_hi = 20.e-6 20.e-6 20.e-6 +# Boundaries +boundary.field_lo = pec pec pec +boundary.field_hi = pec pec pec +boundary.particle_lo = absorbing absorbing absorbing +boundary.particle_hi = absorbing absorbing absorbing + # Verbosity warpx.verbose = 1 algo.particle_shape = 3 -warpx.do_pml = 1 # CFL warpx.cfl = 1.0 diff --git a/Tools/PerformanceTests/automated_test_3_uniform_drift_4ppc b/Tools/PerformanceTests/automated_test_3_uniform_drift_4ppc index f673a3b37..7a671167f 100644 --- a/Tools/PerformanceTests/automated_test_3_uniform_drift_4ppc +++ b/Tools/PerformanceTests/automated_test_3_uniform_drift_4ppc @@ -10,16 +10,20 @@ amr.max_level = 0 # Geometry geometry.coord_sys = 0 # 0: Cartesian -geometry.is_periodic = 0 0 1 # Is periodic? geometry.prob_lo = -20.e-6 -20.e-6 -20.e-6 # physical domain geometry.prob_hi = 20.e-6 20.e-6 20.e-6 +# Boundaries +boundary.field_lo = pec pec periodic +boundary.field_hi = pec pec periodic +boundary.particle_lo = absorbing absorbing periodic +boundary.particle_hi = absorbing absorbing periodic + # Verbosity warpx.verbose = 1 # Algorithms algo.particle_shape = 3 -warpx.do_pml = 0 # CFL warpx.cfl = 1.0 diff --git a/Tools/PerformanceTests/automated_test_4_labdiags_2ppc b/Tools/PerformanceTests/automated_test_4_labdiags_2ppc index 88c574281..f8337c8ba 100644 --- a/Tools/PerformanceTests/automated_test_4_labdiags_2ppc +++ b/Tools/PerformanceTests/automated_test_4_labdiags_2ppc @@ -8,10 +8,15 @@ amr.max_level = 0 # Geometry geometry.coord_sys = 0 # 0: Cartesian -geometry.is_periodic = 0 0 0 # Is periodic? geometry.prob_lo = -150.e-6 -150.e-6 -80.e-6 # physical domain geometry.prob_hi = 150.e-6 150.e-6 0. +# Boundaries +boundary.field_lo = pec pec pec +boundary.field_hi = pec pec pec +boundary.particle_lo = absorbing absorbing absorbing +boundary.particle_hi = absorbing absorbing absorbing + # Verbosity warpx.verbose = 1 @@ -19,7 +24,6 @@ warpx.verbose = 1 algo.particle_shape = 3 warpx.use_filter = 1 warpx.cfl = 1.0 -warpx.do_pml = 0 # Moving window warpx.do_moving_window = 1 diff --git a/Tools/PerformanceTests/automated_test_5_loadimbalance b/Tools/PerformanceTests/automated_test_5_loadimbalance index c7b44dc35..bdfd874ef 100644 --- a/Tools/PerformanceTests/automated_test_5_loadimbalance +++ b/Tools/PerformanceTests/automated_test_5_loadimbalance @@ -10,14 +10,18 @@ amr.max_level = 0 # Geometry geometry.coord_sys = 0 # 0: Cartesian -geometry.is_periodic = 0 0 1 # Is periodic? geometry.prob_lo = -20.e-6 -20.e-6 -20.e-6 # physical domain geometry.prob_hi = 20.e-6 20.e-6 20.e-6 +# Boundaries +boundary.field_lo = pec pec periodic +boundary.field_hi = pec pec periodic +boundary.particle_lo = absorbing absorbing periodic +boundary.particle_hi = absorbing absorbing periodic + warpx.verbose = 1 algo.load_balance_intervals = -5 algo.particle_shape = 3 -warpx.do_pml = 0 # CFL warpx.cfl = 1.0 diff --git a/Tools/PerformanceTests/automated_test_6_output_2ppc b/Tools/PerformanceTests/automated_test_6_output_2ppc index d00fe1654..89d69c476 100644 --- a/Tools/PerformanceTests/automated_test_6_output_2ppc +++ b/Tools/PerformanceTests/automated_test_6_output_2ppc @@ -10,15 +10,19 @@ amr.max_level = 0 # Geometry geometry.coord_sys = 0 # 0: Cartesian -geometry.is_periodic = 0 0 0 # Is periodic? geometry.prob_lo = -20.e-6 -20.e-6 -20.e-6 # physical domain geometry.prob_hi = 20.e-6 20.e-6 20.e-6 +# Boundaries +boundary.field_lo = pec pec pec +boundary.field_hi = pec pec pec +boundary.particle_lo = absorbing absorbing absorbing +boundary.particle_hi = absorbing absorbing absorbing + # Verbosity warpx.verbose = 1 algo.particle_shape = 3 -warpx.do_pml = 0 # CFL warpx.cfl = 1.0 diff --git a/Tools/Release/updateAMReX.py b/Tools/Release/updateAMReX.py index 807b655e9..03e96951e 100755 --- a/Tools/Release/updateAMReX.py +++ b/Tools/Release/updateAMReX.py @@ -13,6 +13,13 @@ import re import requests import sys +try: + from configupdater import ConfigUpdater +except ImportError: + print("Warning: Cannot update .ini files without 'configupdater'") + print("Consider running 'python -m pip install configupdater'") + ConfigUpdater = None + sys.exit(1) # Maintainer Inputs ########################################################### @@ -109,6 +116,23 @@ with open(run_test_path, "w", encoding='utf-8') as f: f.write(run_test_content) +if ConfigUpdater is not None: + # WarpX-tests.ini + tests_ini_path = str(REPO_DIR.joinpath("Regression/WarpX-tests.ini")) + cp = ConfigUpdater() + cp.optionxform = str + cp.read(tests_ini_path) + cp['AMReX']['branch'].value = amrex_new_branch + cp.update_file() + + # WarpX-GPU-tests.ini + tests_gpu_ini_path = str(REPO_DIR.joinpath("Regression/WarpX-GPU-tests.ini")) + cp = ConfigUpdater() + cp.optionxform = str + cp.read(tests_gpu_ini_path) + cp['AMReX']['branch'].value = amrex_new_branch + cp.update_file() + # WarpX references to AMReX: cmake/dependencies/AMReX.cmake with open(amrex_cmake_path, encoding='utf-8') as f: amrex_cmake_content = f.read() diff --git a/Tools/Release/updatePICSAR.py b/Tools/Release/updatePICSAR.py index bb17d5132..a0fdac81e 100755 --- a/Tools/Release/updatePICSAR.py +++ b/Tools/Release/updatePICSAR.py @@ -13,6 +13,13 @@ import re import requests import sys +try: + from configupdater import ConfigUpdater +except ImportError: + print("Warning: Cannot update .ini files without 'configupdater'") + print("Consider running 'python -m pip install configupdater'") + ConfigUpdater = None + sys.exit(1) # Maintainer Inputs ########################################################### @@ -109,6 +116,23 @@ with open(run_test_path, "w", encoding='utf-8') as f: f.write(run_test_content) +if ConfigUpdater is not None: + # WarpX-tests.ini + tests_ini_path = str(REPO_DIR.joinpath("Regression/WarpX-tests.ini")) + cp = ConfigUpdater() + cp.optionxform = str + cp.read(tests_ini_path) + cp['extra-PICSAR']['branch'].value = PICSAR_new_branch + cp.update_file() + + # WarpX-GPU-tests.ini + tests_gpu_ini_path = str(REPO_DIR.joinpath("Regression/WarpX-GPU-tests.ini")) + cp = ConfigUpdater() + cp.optionxform = str + cp.read(tests_gpu_ini_path) + cp['extra-PICSAR']['branch'].value = PICSAR_new_branch + cp.update_file() + # WarpX references to PICSAR: cmake/dependencies/PICSAR.cmake with open(PICSAR_cmake_path, encoding='utf-8') as f: PICSAR_cmake_content = f.read() diff --git a/cmake/WarpXFunctions.cmake b/cmake/WarpXFunctions.cmake index 2daa211c9..509294387 100644 --- a/cmake/WarpXFunctions.cmake +++ b/cmake/WarpXFunctions.cmake @@ -210,6 +210,10 @@ function(set_warpx_binary_name) set_property(TARGET ${tgt} APPEND_STRING PROPERTY OUTPUT_NAME ".GENQEDTABLES") endif() + if(WarpX_SENSEI) + set_property(TARGET ${tgt} APPEND_STRING PROPERTY OUTPUT_NAME ".SENSEI") + endif() + if(CMAKE_BUILD_TYPE MATCHES "Debug") set_property(TARGET ${tgt} APPEND_STRING PROPERTY OUTPUT_NAME ".DEBUG") @@ -354,5 +358,6 @@ function(warpx_print_summary) message(" QED: ${WarpX_QED}") message(" LLG: ${WarpX_MAG_LLG}") message(" QED table generation: ${WarpX_QED_TABLE_GEN}") + message(" SENSEI: ${WarpX_SENSEI}") message("") endfunction() diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index 3813f35fa..7c65d93ed 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -61,6 +61,10 @@ macro(find_amrex) set(AMReX_PARTICLES_PRECISION "SINGLE" CACHE INTERNAL "") endif() + if(WarpX_SENSEI) + set(AMReX_SENSEI ON CACHE INTERNAL "") + endif() + set(AMReX_INSTALL ${BUILD_SHARED_LIBS} CACHE INTERNAL "") set(AMReX_AMRLEVEL OFF CACHE INTERNAL "") set(AMReX_ENABLE_TESTS OFF CACHE INTERNAL "") @@ -72,7 +76,7 @@ macro(find_amrex) set(AMReX_TINY_PROFILE ON CACHE BOOL "") if(WarpX_COMPUTE STREQUAL CUDA) - if(WarpX_ASCENT) + if(WarpX_ASCENT OR WarpX_SENSEI) set(AMReX_GPU_RDC ON CACHE BOOL "") else() # we don't need RDC and disabling it simplifies the build @@ -81,7 +85,6 @@ macro(find_amrex) endif() endif() - # AMReX_SENSEI # shared libs, i.e. for Python bindings, need relocatable code if(WarpX_LIB) set(AMReX_PIC ON CACHE INTERNAL "") @@ -196,7 +199,7 @@ macro(find_amrex) message(STATUS "Searching for pre-installed AMReX ...") # https://amrex-codes.github.io/amrex/docs_html/BuildingAMReX.html#importing-amrex-into-your-cmake-project if(WarpX_ASCENT) - set(COMPONENT_ASCENT AMReX_ASCENT AMReX_CONDUIT) + set(COMPONENT_ASCENT ASCENT CONDUIT) else() set(COMPONENT_ASCENT) endif() @@ -215,9 +218,14 @@ macro(find_amrex) else() set(COMPONENT_PIC) endif() + if(WarpX_SENSEI) + set(COMPONENT_SENSEI SENSEI) + else() + set(COMPONENT_SENSEI) + endif() set(COMPONENT_PRECISION ${WarpX_PRECISION} P${WarpX_PRECISION}) - find_package(AMReX 21.08 CONFIG REQUIRED COMPONENTS ${COMPONENT_ASCENT} ${COMPONENT_DIM} ${COMPONENT_EB} PARTICLES ${COMPONENT_PIC} ${COMPONENT_PRECISION} TINYP LSOLVERS) + find_package(AMReX 21.08 CONFIG REQUIRED COMPONENTS ${COMPONENT_ASCENT} ${COMPONENT_DIM} ${COMPONENT_EB} PARTICLES ${COMPONENT_PIC} ${COMPONENT_PRECISION} ${COMPONENT_SENSEI} TINYP LSOLVERS) message(STATUS "AMReX: Found version '${AMReX_VERSION}'") endif() endmacro() @@ -231,7 +239,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "21.08" +set(WarpX_amrex_branch "44edcc104f551b6243984b567ccd6723ac336699" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") diff --git a/cmake/dependencies/openPMD.cmake b/cmake/dependencies/openPMD.cmake index e8d31f0e8..e0d0e3cf1 100644 --- a/cmake/dependencies/openPMD.cmake +++ b/cmake/dependencies/openPMD.cmake @@ -10,13 +10,13 @@ function(find_openpmd) if(WarpX_openpmd_internal OR WarpX_openpmd_src) set(CMAKE_POLICY_DEFAULT_CMP0077 NEW) - # see https://openpmd-api.readthedocs.io/en/0.12.0-alpha/dev/buildoptions.html - set(openPMD_USE_MPI ${WarpX_MPI} CACHE INTERNAL "") - set(openPMD_USE_PYTHON OFF CACHE INTERNAL "") - set(BUILD_CLI_TOOLS OFF CACHE INTERNAL "") # FIXME - set(BUILD_EXAMPLES OFF CACHE INTERNAL "") # FIXME - set(BUILD_TESTING OFF CACHE INTERNAL "") # FIXME - set(openPMD_INSTALL ${BUILD_SHARED_LIBS} CACHE INTERNAL "") + # see https://openpmd-api.readthedocs.io/en/0.14.1/dev/buildoptions.html + set(openPMD_USE_MPI ${WarpX_MPI} CACHE INTERNAL "") + set(openPMD_USE_PYTHON OFF CACHE INTERNAL "") + set(openPMD_BUILD_CLI_TOOLS OFF CACHE INTERNAL "") + set(openPMD_BUILD_EXAMPLES OFF CACHE INTERNAL "") + set(openPMD_BUILD_TESTING OFF CACHE INTERNAL "") + set(openPMD_INSTALL ${BUILD_SHARED_LIBS} CACHE INTERNAL "") if(WarpX_openpmd_src) add_subdirectory(${WarpX_openpmd_src} _deps/localopenpmd-build/) @@ -65,7 +65,7 @@ function(find_openpmd) else() set(COMPONENT_WMPI NOMPI) endif() - find_package(openPMD 0.12.0 CONFIG REQUIRED COMPONENTS ${COMPONENT_WMPI}) + find_package(openPMD 0.14.2 CONFIG REQUIRED COMPONENTS ${COMPONENT_WMPI}) message(STATUS "openPMD-api: Found version '${openPMD_VERSION}'") endif() endfunction() @@ -81,7 +81,7 @@ if(WarpX_OPENPMD) set(WarpX_openpmd_repo "https://github.com/openPMD/openPMD-api.git" CACHE STRING "Repository URI to pull and build openPMD-api from if(WarpX_openpmd_internal)") - set(WarpX_openpmd_branch "0.13.4" + set(WarpX_openpmd_branch "0.14.2" CACHE STRING "Repository branch for WarpX_openpmd_repo if(WarpX_openpmd_internal)") diff --git a/run_test.sh b/run_test.sh index 5c70e5ab4..3d969630d 100755 --- a/run_test.sh +++ b/run_test.sh @@ -51,7 +51,7 @@ echo "cd $PWD" # Clone PICSAR, AMReX and warpx-data git clone https://github.com/AMReX-Codes/amrex.git -cd amrex && git checkout 21.08 && cd - +cd amrex && git checkout 44edcc104f551b6243984b567ccd6723ac336699 && cd - # Use QED brach for QED tests git clone https://github.com/ECP-WarpX/picsar.git cd picsar && git checkout c16b642e3dcf860480dd1dd21cefa3874f395773 && cd - diff --git a/setup.py b/setup.py index 0cca44cb5..a281bc658 100644 --- a/setup.py +++ b/setup.py @@ -174,34 +174,40 @@ def build_extension(self, ext): # Pick up existing WarpX libraries or... PYWARPX_LIB_DIR = os.environ.get('PYWARPX_LIB_DIR') +env = os.environ.copy() # ... build WarpX libraries with CMake # note: changed default for SHARED, MPI, TESTING and EXAMPLES -WarpX_COMPUTE = os.environ.get('WarpX_COMPUTE', 'OMP') -WarpX_MPI = os.environ.get('WarpX_MPI', 'OFF') -WarpX_EB = os.environ.get('WarpX_EB', 'OFF') -WarpX_OPENPMD = os.environ.get('WarpX_OPENPMD', 'OFF') -WarpX_PRECISION = os.environ.get('WarpX_PRECISION', 'DOUBLE') -WarpX_PSATD = os.environ.get('WarpX_PSATD', 'OFF') -WarpX_QED = os.environ.get('WarpX_QED', 'ON') -WarpX_QED_TABLE_GEN = os.environ.get('WarpX_QED_TABLE_GEN', 'OFF') -WarpX_DIMS = os.environ.get('WarpX_DIMS', '2;3;RZ') -BUILD_PARALLEL = os.environ.get('BUILD_PARALLEL', '2') -BUILD_SHARED_LIBS = os.environ.get('WarpX_BUILD_SHARED_LIBS', +WarpX_COMPUTE = env.pop('WarpX_COMPUTE', 'OMP') +WarpX_MPI = env.pop('WarpX_MPI', 'OFF') +WarpX_EB = env.pop('WarpX_EB', 'OFF') +WarpX_OPENPMD = env.pop('WarpX_OPENPMD', 'OFF') +WarpX_PRECISION = env.pop('WarpX_PRECISION', 'DOUBLE') +WarpX_PSATD = env.pop('WarpX_PSATD', 'OFF') +WarpX_QED = env.pop('WarpX_QED', 'ON') +WarpX_QED_TABLE_GEN = env.pop('WarpX_QED_TABLE_GEN', 'OFF') +WarpX_DIMS = env.pop('WarpX_DIMS', '2;3;RZ') +BUILD_PARALLEL = env.pop('BUILD_PARALLEL', '2') +BUILD_SHARED_LIBS = env.pop('WarpX_BUILD_SHARED_LIBS', 'OFF') -#BUILD_TESTING = os.environ.get('WarpX_BUILD_TESTING', +#BUILD_TESTING = env.pop('WarpX_BUILD_TESTING', # 'OFF') -#BUILD_EXAMPLES = os.environ.get('WarpX_BUILD_EXAMPLES', +#BUILD_EXAMPLES = env.pop('WarpX_BUILD_EXAMPLES', # 'OFF') # openPMD-api sub-control -HDF5_USE_STATIC_LIBRARIES = os.environ.get('HDF5_USE_STATIC_LIBRARIES', 'OFF') -ADIOS_USE_STATIC_LIBS = os.environ.get('ADIOS_USE_STATIC_LIBS', 'OFF') +HDF5_USE_STATIC_LIBRARIES = env.pop('HDF5_USE_STATIC_LIBRARIES', 'OFF') +ADIOS_USE_STATIC_LIBS = env.pop('ADIOS_USE_STATIC_LIBS', 'OFF') # CMake dependency control (developers & package managers) -WarpX_amrex_src = os.environ.get('WarpX_amrex_src') -WarpX_amrex_internal = os.environ.get('WarpX_amrex_internal', 'ON') -WarpX_openpmd_src = os.environ.get('WarpX_openpmd_src') -WarpX_openpmd_internal = os.environ.get('WarpX_openpmd_internal', 'ON') -WarpX_picsar_src = os.environ.get('WarpX_picsar_src') -WarpX_picsar_internal = os.environ.get('WarpX_picsar_internal', 'ON') +WarpX_amrex_src = env.pop('WarpX_amrex_src', '') +WarpX_amrex_internal = env.pop('WarpX_amrex_internal', 'ON') +WarpX_openpmd_src = env.pop('WarpX_openpmd_src', '') +WarpX_openpmd_internal = env.pop('WarpX_openpmd_internal', 'ON') +WarpX_picsar_src = env.pop('WarpX_picsar_src', '') +WarpX_picsar_internal = env.pop('WarpX_picsar_internal', 'ON') + +for key in env.keys(): + if key.lower().startswith('warpx'): + print(f"\nWARNING: Found environment variable '{key}', which is not a recognized WarpX option\n") + # https://cmake.org/cmake/help/v3.0/command/if.html if WarpX_MPI.upper() in ['1', 'ON', 'TRUE', 'YES']: @@ -277,7 +283,7 @@ def build_extension(self, ext): # ] #}, extras_require={ - 'all': ['openPMD-api~=0.13.0', 'openPMD-viewer~=1.1', 'yt~=3.6', 'matplotlib'], + 'all': ['openPMD-api~=0.14.2', 'openPMD-viewer~=1.1', 'yt~=3.6,>=4.0.1', 'matplotlib'], }, # cmdclass={'test': PyTest}, # platforms='any',