Skip to content

Commit

Permalink
Merge pull request #4002 from franzpoeschel/topic-flush-inside-step
Browse files Browse the repository at this point in the history
openPMD plugin: Flush data to disk within a step
  • Loading branch information
psychocoderHPC authored Nov 8, 2022
2 parents 373bc27 + 20e22cb commit e80c45e
Show file tree
Hide file tree
Showing 6 changed files with 83 additions and 11 deletions.
56 changes: 56 additions & 0 deletions include/picongpu/plugins/common/openPMDVersion.def
Original file line number Diff line number Diff line change
Expand Up @@ -157,5 +157,61 @@ namespace picongpu
useSpanAPI,
std::forward<Functor>(createBaseBuffer));
}


enum class PreferredFlushTarget : bool
{
Disk,
Buffer
};

namespace detail
{
/*
* Do some SFINAE tricks to detect whether the openPMD API allows
* specifying JSON configs in flush calls or not.
*/
template<typename Series = ::openPMD::Series, typename Dummy = void>
struct FlushSeries
{
constexpr static bool supportsFlushParameters = false;
static void run(Series& series, PreferredFlushTarget)
{
series.flush();
}
};

/*
* Enable this if Series::flush accepts string parameters.
*/
template<typename Series>
struct FlushSeries<Series, decltype(std::declval<Series>().flush(std::declval<std::string>()))>
{
constexpr static bool supportsFlushParameters = true;
static constexpr char const* jsonConfigBP5TargetBuffer
= R"({"adios2": {"engine": {"preferred_flush_target": "buffer"}}})";
static constexpr char const* jsonConfigBP5TargetDisk
= R"({"adios2": {"engine": {"preferred_flush_target": "disk"}}})";

static void run(Series& series, PreferredFlushTarget target)
{
switch(target)
{
case PreferredFlushTarget::Disk:
series.flush(jsonConfigBP5TargetDisk);
break;
case PreferredFlushTarget::Buffer:
series.flush(jsonConfigBP5TargetBuffer);
break;
}
}
};
} // namespace detail

inline void flushSeries(::openPMD::Series& series, PreferredFlushTarget target)
{
detail::FlushSeries<>::run(series, target);
}

} // namespace openPMD
} // namespace picongpu
20 changes: 20 additions & 0 deletions include/picongpu/plugins/openPMD/Json.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,14 @@

# include "picongpu/plugins/openPMD/Json.hpp"

# include "picongpu/plugins/common/openPMDVersion.def"
# include "picongpu/plugins/openPMD/Json_private.hpp"

# include <algorithm> // std::copy_n, std::find
# include <cctype> // std::isspace

# include <openPMD/openPMD.hpp>

/*
* Note:
* This is a hostonly .cpp file because CMake will not use -isystem for system
Expand Down Expand Up @@ -330,6 +333,23 @@ The key 'select' must point to either a single string or an array of strings.)EN
adios2EngineParams["BufferChunkSize"] = "2147381248";
}
}
if constexpr(picongpu::openPMD::detail::FlushSeries<openPMD::Series>::supportsFlushParameters)
{
auto& adios2Engine = config["adios2"]["engine"];
if(!adios2Engine.contains("preferred_flush_target"))
{
/*
* Only relevant for ADIOS2 engines that support this feature,
* ignored otherwise. Currently supported in BP5.
* Small datasets should be written to the internal ADIOS2
* buffer.
* Big datasets should explicitly specify their flush target
* in Series::flush(). Options are "buffer" and "disk".
* Ideally, all flush() calls should specify this explicitly.
*/
adios2Engine["preferred_flush_target"] = "buffer";
}
}
}
} // namespace

Expand Down
2 changes: 1 addition & 1 deletion include/picongpu/plugins/openPMD/NDScalars.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ namespace picongpu
std::make_shared<T_Scalar>(value),
std::move(std::get<1>(tuple)),
std::move(std::get<2>(tuple)));
params.openPMDSeries->flush();
flushSeries(*params.openPMDSeries, PreferredFlushTarget::Buffer);
}

private:
Expand Down
2 changes: 1 addition & 1 deletion include/picongpu/plugins/openPMD/WriteSpecies.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -504,7 +504,7 @@ namespace picongpu
globalNumParticles,
*params->jsonMatcher,
series.particlesPath() + speciesGroup);
params->openPMDSeries->flush();
flushSeries(*params->openPMDSeries, PreferredFlushTarget::Buffer);
}

log<picLog::INPUT_OUTPUT>("openPMD: ( end ) writing particle patches for %1%")
Expand Down
8 changes: 3 additions & 5 deletions include/picongpu/plugins/openPMD/openPMDWriter.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -730,7 +730,7 @@ make sure that environment variable OPENPMD_BP_BACKEND is not set to ADIOS1.
::openPMD::shareRaw(rawPtr),
asStandardVector(recordOffsetDims),
asStandardVector(recordLocalSizeDims));
params->openPMDSeries->flush();
flushSeries(*params->openPMDSeries, PreferredFlushTarget::Disk);
}

/** Implementation of loading random number generator states
Expand Down Expand Up @@ -1386,9 +1386,7 @@ make sure that environment variable OPENPMD_BP_BACKEND is not set to ADIOS1.

if(numDataPoints == 0)
{
// technically not necessary if we write no dataset,
// but let's keep things uniform
params->openPMDSeries->flush();
flushSeries(*params->openPMDSeries, PreferredFlushTarget::Disk);
continue;
}

Expand Down Expand Up @@ -1439,7 +1437,7 @@ make sure that environment variable OPENPMD_BP_BACKEND is not set to ADIOS1.
}
}

params->openPMDSeries->flush();
flushSeries(*params->openPMDSeries, PreferredFlushTarget::Disk);
}
}

Expand Down
6 changes: 2 additions & 4 deletions include/picongpu/plugins/openPMD/writer/ParticleAttribute.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -103,9 +103,7 @@ namespace picongpu

if(elements == 0)
{
// technically not necessary if we write no dataset,
// but let's keep things uniform
params->openPMDSeries->flush();
flushSeries(*params->openPMDSeries, PreferredFlushTarget::Disk);
continue;
}

Expand Down Expand Up @@ -137,7 +135,7 @@ namespace picongpu
span[i] = reinterpret_cast<ComponentType*>(dataPtr)[d + i * components];
}

params->openPMDSeries->flush();
flushSeries(*params->openPMDSeries, PreferredFlushTarget::Disk);
}

auto unitMap = convertToUnitDimension(unitDimension);
Expand Down

0 comments on commit e80c45e

Please sign in to comment.