diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e9a6091aef..723bd7ad12 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -66,7 +66,7 @@ repos: # clang-format v13 # to run manually, use .github/workflows/clang-format/clang-format.sh - repo: https://github.com/pre-commit/mirrors-clang-format - rev: v21.1.8 + rev: v22.1.0 hooks: - id: clang-format # By default, the clang-format hook configures: @@ -88,7 +88,7 @@ repos: # Sorts Python imports according to PEP8 # https://www.python.org/dev/peps/pep-0008/#imports - repo: https://github.com/pycqa/isort - rev: 7.0.0 + rev: 8.0.1 hooks: - id: isort name: isort (python) diff --git a/CMakeLists.txt b/CMakeLists.txt index 1ada05698a..004a20de1e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -400,6 +400,7 @@ set(CORE_SOURCE src/Format.cpp src/Iteration.cpp src/IterationEncoding.cpp + src/LoadStoreChunk.cpp src/Mesh.cpp src/ParticlePatches.cpp src/ParticleSpecies.cpp @@ -411,6 +412,7 @@ set(CORE_SOURCE src/version.cpp src/auxiliary/Date.cpp src/auxiliary/Filesystem.cpp + src/auxiliary/Future.cpp src/auxiliary/JSON.cpp src/auxiliary/JSONMatcher.cpp src/auxiliary/Memory.cpp @@ -826,6 +828,7 @@ if(openPMD_BUILD_TESTING) elseif(${test_name} STREQUAL "Core") list(APPEND ${out_list} test/Files_Core/automatic_variable_encoding.cpp + test/Files_Core/read_nonexistent_attribute.cpp ) endif() endmacro() diff --git a/include/openPMD/Dataset.hpp b/include/openPMD/Dataset.hpp index e1f0058885..b8af286e14 100644 --- a/include/openPMD/Dataset.hpp +++ b/include/openPMD/Dataset.hpp @@ -34,6 +34,18 @@ namespace openPMD using Extent = std::vector; using Offset = std::vector; +/** Selection of a region of memory for storing chunks. + * + * Used to specify a non-contiguous memory region when storing + * data chunks. This allows writing data that is not contiguous + * in memory. + */ +struct MemorySelection +{ + Offset offset; + Extent extent; +}; + class Dataset { friend class RecordComponent; diff --git a/include/openPMD/Datatype.hpp b/include/openPMD/Datatype.hpp index a11d3db75f..3f667ae47f 100644 --- a/include/openPMD/Datatype.hpp +++ b/include/openPMD/Datatype.hpp @@ -294,7 +294,8 @@ template inline constexpr Datatype determineDatatype(T &&val) { (void)val; // don't need this, it only has a name for Doxygen - using T_stripped = std::remove_cv_t>; + using T_stripped = + std::remove_extent_t>>; if constexpr (auxiliary::IsPointer_v) { return determineDatatype>(); @@ -419,6 +420,13 @@ inline size_t toBits(Datatype d) return toBytes(d) * CHAR_BIT; } +/** Check if a Datatype is a signed type + * + * @param d Datatype to test + * @return true if signed type (integer, floating point, complex), else false + */ +constexpr bool isSigned(Datatype d); + /** Compare if a Datatype is a vector type * * @param d Datatype to test @@ -595,14 +603,26 @@ inline std::tuple isInteger() */ template inline bool isSameFloatingPoint(Datatype d) +{ + return isSameFloatingPoint(d, determineDatatype()); +} + +/** Compare if two Datatypes are equivalent floating point types + * + * @param d1 First Datatype to compare + * @param d2 Second Datatype to compare + * @return true if both types are floating point and have same bitness, else + * false + */ +inline bool isSameFloatingPoint(Datatype d1, Datatype d2) { // template - bool tt_is_fp = isFloatingPoint(); + bool tt_is_fp = isFloatingPoint(d1); // Datatype - bool dt_is_fp = isFloatingPoint(d); + bool dt_is_fp = isFloatingPoint(d2); - if (tt_is_fp && dt_is_fp && toBits(d) == toBits(determineDatatype())) + if (tt_is_fp && dt_is_fp && toBits(d1) == toBits(d2)) return true; else return false; @@ -617,15 +637,26 @@ inline bool isSameFloatingPoint(Datatype d) */ template inline bool isSameComplexFloatingPoint(Datatype d) +{ + return isSameComplexFloatingPoint(d, determineDatatype()); +} + +/** Compare if two Datatypes are equivalent complex floating point types + * + * @param d1 First Datatype to compare + * @param d2 Second Datatype to compare + * @return true if both types are complex floating point and have same bitness, + * else false + */ +inline bool isSameComplexFloatingPoint(Datatype d1, Datatype d2) { // template - bool tt_is_cfp = isComplexFloatingPoint(); + bool tt_is_cfp = isComplexFloatingPoint(d1); // Datatype - bool dt_is_cfp = isComplexFloatingPoint(d); + bool dt_is_cfp = isComplexFloatingPoint(d2); - if (tt_is_cfp && dt_is_cfp && - toBits(d) == toBits(determineDatatype())) + if (tt_is_cfp && dt_is_cfp && toBits(d1) == toBits(d2)) return true; else return false; @@ -640,17 +671,29 @@ inline bool isSameComplexFloatingPoint(Datatype d) */ template inline bool isSameInteger(Datatype d) +{ + return isSameInteger(d, determineDatatype()); +} + +/** Compare if two Datatypes are equivalent integer types + * + * @param d1 First Datatype to compare + * @param d2 Second Datatype to compare + * @return true if both types are integers, same signedness and same bitness, + * else false + */ +inline bool isSameInteger(Datatype d1, Datatype d2) { // template bool tt_is_int, tt_is_sig; - std::tie(tt_is_int, tt_is_sig) = isInteger(); + std::tie(tt_is_int, tt_is_sig) = isInteger(d1); // Datatype bool dt_is_int, dt_is_sig; - std::tie(dt_is_int, dt_is_sig) = isInteger(d); + std::tie(dt_is_int, dt_is_sig) = isInteger(d2); if (tt_is_int && dt_is_int && tt_is_sig == dt_is_sig && - toBits(d) == toBits(determineDatatype())) + toBits(d1) == toBits(d2)) return true; else return false; @@ -691,46 +734,26 @@ constexpr bool isChar(Datatype d) template constexpr bool isSameChar(Datatype d); +/** Compare if two Datatypes are equivalent char types + * + * @param d1 First Datatype to compare + * @param d2 Second Datatype to compare + * @return true if both types are chars with same signedness and size, else + * false + */ +constexpr bool isSameChar(Datatype d1, Datatype d2); + /** Comparison for two Datatypes * * Besides returning true for the same types, identical implementations on * some platforms, e.g. if long and long long are the same or double and * long double will also return true. + * + * @param d First Datatype to compare + * @param e Second Datatype to compare + * @return true if the datatypes are equivalent */ -inline bool isSame(openPMD::Datatype const d, openPMD::Datatype const e) -{ - // exact same type - if (static_cast(d) == static_cast(e)) - return true; - - bool d_is_vec = isVector(d); - bool e_is_vec = isVector(e); - - // same int - bool d_is_int, d_is_sig; - std::tie(d_is_int, d_is_sig) = isInteger(d); - bool e_is_int, e_is_sig; - std::tie(e_is_int, e_is_sig) = isInteger(e); - if (d_is_int && e_is_int && d_is_vec == e_is_vec && d_is_sig == e_is_sig && - toBits(d) == toBits(e)) - return true; - - // same float - bool d_is_fp = isFloatingPoint(d); - bool e_is_fp = isFloatingPoint(e); - - if (d_is_fp && e_is_fp && d_is_vec == e_is_vec && toBits(d) == toBits(e)) - return true; - - // same complex floating point - bool d_is_cfp = isComplexFloatingPoint(d); - bool e_is_cfp = isComplexFloatingPoint(e); - - if (d_is_cfp && e_is_cfp && d_is_vec == e_is_vec && toBits(d) == toBits(e)) - return true; - - return false; -} +constexpr bool isSame(openPMD::Datatype d, openPMD::Datatype e); /** * @brief basicDatatype Strip openPMD Datatype of std::vector, std::array et. @@ -740,15 +763,34 @@ inline bool isSame(openPMD::Datatype const d, openPMD::Datatype const e) */ Datatype basicDatatype(Datatype dt); +/** Convert a scalar Datatype to its vector variant + * + * @param dt Scalar Datatype to convert + * @return Vector Datatype (e.g., INT becomes VEC_INT) + */ Datatype toVectorType(Datatype dt); +/** Convert a Datatype to its string representation + * + * @param dt Datatype to convert + * @return String representation of the Datatype + */ std::string datatypeToString(Datatype dt); +/** Convert a string to a Datatype + * + * @param s String representation of a Datatype + * @return The corresponding Datatype + */ Datatype stringToDatatype(const std::string &s); -void warnWrongDtype(std::string const &key, Datatype store, Datatype request); - -std::ostream &operator<<(std::ostream &, openPMD::Datatype const &); +/** Stream operator for Datatype + * + * @param os Output stream + * @param dt Datatype to output + * @return Reference to the stream + */ +std::ostream &operator<<(std::ostream &os, openPMD::Datatype const &dt); template constexpr auto datatypeIndex() -> size_t diff --git a/include/openPMD/Datatype.tpp b/include/openPMD/Datatype.tpp index 6685c62f73..b09351fd77 100644 --- a/include/openPMD/Datatype.tpp +++ b/include/openPMD/Datatype.tpp @@ -25,6 +25,7 @@ // comment to prevent clang-format from moving this #include up // datatype macros may be included and un-included in other headers #include "openPMD/DatatypeMacros.hpp" +#include "openPMD/auxiliary/TypeTraits.hpp" #include #include // std::void_t @@ -222,36 +223,97 @@ namespace detail template constexpr bool is_char_v = is_char::value; - template - inline bool isSameChar() + struct IsChar { - return - // both must be char types - is_char_v && is_char_v && - // both must have equivalent sign - std::is_signed_v == std::is_signed_v && - // both must have equivalent size - sizeof(T_Char1) == sizeof(T_Char2); + template + static constexpr bool call() + { + return is_char_v; + } + template + static constexpr bool call() + { + return false; + } + }; + + constexpr inline bool isChar(Datatype dtype) + { + return switchType(dtype); } - template - struct IsSameChar + struct DtypeSize { - template - static bool call() + template + static constexpr size_t call() { - return isSameChar(); + return sizeof(T); } - - static constexpr char const *errorMsg = "IsSameChar"; + static constexpr char const *errorMsg = "DtypeSize"; }; + constexpr inline size_t dtypeSize(Datatype dtype) + { + return switchType(dtype); + } } // namespace detail template constexpr inline bool isSameChar(Datatype d) { - return switchType>(d); + return isSameChar(d, determineDatatype()); +} + +constexpr bool isSameChar(Datatype d1, Datatype d2) +{ + return detail::isChar(d1) && detail::isChar(d2) && + isSigned(d1) == isSigned(d2) && + detail::dtypeSize(d1) == detail::dtypeSize(d2); +} + +namespace detail +{ + struct IsSigned + { + template + static constexpr bool call() + { + if constexpr (auxiliary::IsVector_v || auxiliary::IsArray_v) + { + return call(); + } + else if constexpr (std::is_same_v) + { + return call(); + } + else + { + return std::is_signed_v; + } + } + + static constexpr char const *errorMsg = "IsSigned"; + }; +} // namespace detail + +constexpr inline bool isSigned(Datatype d) +{ + return switchType(d); +} + +constexpr bool isSame(openPMD::Datatype const d, openPMD::Datatype const e) +{ + return + // exact same type + static_cast(d) == static_cast(e) + // same int + || isSameInteger(d, e) + // same float + || isSameFloatingPoint(d, e) + // same complex floating point + || isSameComplexFloatingPoint(d, e) + // same char + || isSameChar(d, e); } } // namespace openPMD diff --git a/include/openPMD/IO/ADIOS/ADIOS2File.hpp b/include/openPMD/IO/ADIOS/ADIOS2File.hpp index d34cc8ebe5..66aa47e702 100644 --- a/include/openPMD/IO/ADIOS/ADIOS2File.hpp +++ b/include/openPMD/IO/ADIOS/ADIOS2File.hpp @@ -20,6 +20,7 @@ */ #pragma once +#include "openPMD/Dataset.hpp" #include "openPMD/IO/ADIOS/ADIOS2Auxiliary.hpp" #include "openPMD/IO/ADIOS/ADIOS2PreloadAttributes.hpp" #include "openPMD/IO/ADIOS/ADIOS2PreloadVariables.hpp" @@ -107,11 +108,14 @@ struct WriteDataset static void call(Params &&...); }; +/** Buffered put operation with unique pointer */ struct BufferedUniquePtrPut { std::string name; Offset offset; Extent extent; + /** Optional memory selection for non-contiguous memory regions */ + std::optional memorySelection; UniquePtrWithLambda data; Datatype dtype = Datatype::UNDEFINED; diff --git a/include/openPMD/IO/ADIOS/ADIOS2IOHandler.hpp b/include/openPMD/IO/ADIOS/ADIOS2IOHandler.hpp index 4316f5181f..656f249b6b 100644 --- a/include/openPMD/IO/ADIOS/ADIOS2IOHandler.hpp +++ b/include/openPMD/IO/ADIOS/ADIOS2IOHandler.hpp @@ -21,6 +21,7 @@ */ #pragma once +#include "openPMD/Dataset.hpp" #include "openPMD/Error.hpp" #include "openPMD/IO/ADIOS/ADIOS2Auxiliary.hpp" #include "openPMD/IO/ADIOS/ADIOS2FilePosition.hpp" @@ -509,6 +510,7 @@ class ADIOS2IOHandlerImpl adios2::Variable verifyDataset( Offset const &offset, Extent const &extent, + std::optional const &memorySelection, adios2::IO &IO, adios2::Engine &engine, std::string const &varName, @@ -622,6 +624,18 @@ class ADIOS2IOHandlerImpl var.SetSelection( {adios2::Dims(offset.begin(), offset.end()), adios2::Dims(extent.begin(), extent.end())}); + + if (memorySelection.has_value()) + { + var.SetMemorySelection( + {adios2::Dims( + memorySelection->offset.begin(), + memorySelection->offset.end()), + adios2::Dims( + memorySelection->extent.begin(), + memorySelection->extent.end())}); + } + return var; } @@ -629,6 +643,7 @@ class ADIOS2IOHandlerImpl { bool noGroupBased = false; bool blosc2bp5 = false; + bool memorySelection = false; } printedWarningsAlready; }; // ADIOS2IOHandlerImpl @@ -942,7 +957,7 @@ class ADIOS2IOHandler : public AbstractIOHandler try { auto params = internal::defaultParsedFlushParams; - this->flush(params); + this->flush_impl(params); } catch (std::exception const &ex) { @@ -990,6 +1005,6 @@ class ADIOS2IOHandler : public AbstractIOHandler return true; } - std::future flush(internal::ParsedFlushParams &) override; + std::future flush_impl(internal::ParsedFlushParams &) override; }; // ADIOS2IOHandler } // namespace openPMD diff --git a/include/openPMD/IO/ADIOS/macros.hpp b/include/openPMD/IO/ADIOS/macros.hpp index 8e57d9191d..5249486daa 100644 --- a/include/openPMD/IO/ADIOS/macros.hpp +++ b/include/openPMD/IO/ADIOS/macros.hpp @@ -41,6 +41,34 @@ #define openPMD_HAVE_ADIOS2_BP5 0 #endif +namespace openPMD +{ +namespace detail +{ + /** Trait to check if a variable supports SetMemorySelection + * + * @tparam Variable ADIOS2 variable type + */ + template + struct CanTheMemorySelectionBeReset + { + static constexpr bool value = false; + }; + + template + struct CanTheMemorySelectionBeReset< + Variable, + decltype(std::declval().SetMemorySelection())> + { + static constexpr bool value = true; + }; +} // namespace detail + +/** Whether ADIOS2 Variable supports SetMemorySelection */ +constexpr bool CanTheMemorySelectionBeReset = + detail::CanTheMemorySelectionBeReset>::value; +} // namespace openPMD + #else #define openPMD_HAS_ADIOS_2_8 0 diff --git a/include/openPMD/IO/AbstractIOHandler.hpp b/include/openPMD/IO/AbstractIOHandler.hpp index 9b7735b5ba..eccb73d742 100644 --- a/include/openPMD/IO/AbstractIOHandler.hpp +++ b/include/openPMD/IO/AbstractIOHandler.hpp @@ -265,13 +265,21 @@ class AbstractIOHandler * backends that decide to implement this operation asynchronously. */ std::future flush(internal::FlushParams const &); + /** Counter tracking the number of flush operations. This is later used to + * avoid repeated flushing in the DeferredComputation objects returned by + * the loadStoreChunk() API. (The counter is copied as a weak reference to + * the shared pointer, and the value is compared to the value upon enqueuing + * the operation. If the flush counter has proceeded past the old value, our + * operation has already been run.) */ + std::shared_ptr m_flushCounter = + std::make_shared(0); /** Process operations in queue according to FIFO. * * @return Future indicating the completion state of the operation for * backends that decide to implement this operation asynchronously. */ - virtual std::future flush(internal::ParsedFlushParams &) = 0; + std::future flush(internal::ParsedFlushParams &); /** The currently used backend */ virtual std::string backendName() const = 0; @@ -315,6 +323,17 @@ class AbstractIOHandler IterationEncoding m_encoding = IterationEncoding::groupBased; OpenpmdStandard m_standard = auxiliary::parseStandard(getStandardDefault()); bool m_verify_homogeneous_extents = true; + +protected: + /** Implementation of flush operation for subclasses + * + * Do not call directly, use flush() wrapper instead. + * + * @param params Parsed flush parameters + * @return Future indicating completion state + */ + virtual std::future + flush_impl(internal::ParsedFlushParams ¶ms) = 0; }; // AbstractIOHandler } // namespace openPMD diff --git a/include/openPMD/IO/AbstractIOHandlerImplCommon.hpp b/include/openPMD/IO/AbstractIOHandlerImplCommon.hpp index 7261b4bf71..4c995be817 100644 --- a/include/openPMD/IO/AbstractIOHandlerImplCommon.hpp +++ b/include/openPMD/IO/AbstractIOHandlerImplCommon.hpp @@ -28,10 +28,10 @@ #include "openPMD/auxiliary/StringManip.hpp" #include "openPMD/backend/Writable.hpp" +#include #include #include #include -#include namespace openPMD { @@ -50,7 +50,10 @@ class AbstractIOHandlerImplCommon : public AbstractIOHandlerImpl * without the OS path */ std::unordered_map m_files; - std::unordered_set m_dirty; + // MUST be an ordered set in order to consistently flush on different + // parallel processes (same logic cant apply to m_files since Writable* + // pointers are not predictable) + std::set m_dirty; enum PossiblyExisting { diff --git a/include/openPMD/IO/DummyIOHandler.hpp b/include/openPMD/IO/DummyIOHandler.hpp index 8abcf20990..100711c944 100644 --- a/include/openPMD/IO/DummyIOHandler.hpp +++ b/include/openPMD/IO/DummyIOHandler.hpp @@ -44,7 +44,7 @@ class DummyIOHandler : public AbstractIOHandler /** No-op consistent with the IOHandler interface to enable library use * without IO. */ - std::future flush(internal::ParsedFlushParams &) override; + std::future flush_impl(internal::ParsedFlushParams &) override; std::string backendName() const override; }; // DummyIOHandler } // namespace openPMD diff --git a/include/openPMD/IO/HDF5/HDF5IOHandler.hpp b/include/openPMD/IO/HDF5/HDF5IOHandler.hpp index 07b3978b87..5e2ddfd526 100644 --- a/include/openPMD/IO/HDF5/HDF5IOHandler.hpp +++ b/include/openPMD/IO/HDF5/HDF5IOHandler.hpp @@ -46,7 +46,7 @@ class HDF5IOHandler : public AbstractIOHandler return "HDF5"; } - std::future flush(internal::ParsedFlushParams &) override; + std::future flush_impl(internal::ParsedFlushParams &) override; private: std::unique_ptr m_impl; diff --git a/include/openPMD/IO/HDF5/ParallelHDF5IOHandler.hpp b/include/openPMD/IO/HDF5/ParallelHDF5IOHandler.hpp index abeb196b11..7b0c43129d 100644 --- a/include/openPMD/IO/HDF5/ParallelHDF5IOHandler.hpp +++ b/include/openPMD/IO/HDF5/ParallelHDF5IOHandler.hpp @@ -56,7 +56,7 @@ class ParallelHDF5IOHandler : public AbstractIOHandler return "MPI_HDF5"; } - std::future flush(internal::ParsedFlushParams &) override; + std::future flush_impl(internal::ParsedFlushParams &) override; private: std::unique_ptr m_impl; diff --git a/include/openPMD/IO/IOTask.hpp b/include/openPMD/IO/IOTask.hpp index 1ee7248b32..662a34f9e3 100644 --- a/include/openPMD/IO/IOTask.hpp +++ b/include/openPMD/IO/IOTask.hpp @@ -495,6 +495,8 @@ struct OPENPMDAPI_EXPORT Extent extent = {}; Offset offset = {}; + /** Optional memory selection for non-contiguous memory regions */ + std::optional memorySelection = std::nullopt; Datatype dtype = Datatype::UNDEFINED; auxiliary::WriteBuffer data; }; @@ -558,6 +560,9 @@ struct OPENPMDAPI_EXPORT } // in parameters + /** If true, only query if the backend supports buffer views without + * performing operation */ + bool queryOnly = false; Offset offset; Extent extent; Datatype dtype = Datatype::UNDEFINED; diff --git a/include/openPMD/IO/InvalidatableFile.hpp b/include/openPMD/IO/InvalidatableFile.hpp index 6bdc24cbe6..aa0f8c2c6b 100644 --- a/include/openPMD/IO/InvalidatableFile.hpp +++ b/include/openPMD/IO/InvalidatableFile.hpp @@ -82,4 +82,21 @@ struct hash result_type operator()(argument_type const &s) const noexcept; }; + +/** Specialization of std::less for InvalidatableFile + * + * Enables using InvalidatableFile in ordered containers like std::set + * for consistent ordering across parallel processes. + */ +template <> +struct less +{ + using first_argument_type = openPMD::InvalidatableFile; + using second_argument_type = first_argument_type; + using result_type = decltype(std::less<>()( + *std::declval(), + *std::declval())); + result_type + operator()(first_argument_type const &, second_argument_type const &) const; +}; } // namespace std diff --git a/include/openPMD/IO/JSON/JSONIOHandler.hpp b/include/openPMD/IO/JSON/JSONIOHandler.hpp index 07e797d4b3..db8b687ed9 100644 --- a/include/openPMD/IO/JSON/JSONIOHandler.hpp +++ b/include/openPMD/IO/JSON/JSONIOHandler.hpp @@ -59,7 +59,7 @@ class JSONIOHandler : public AbstractIOHandler return "JSON"; } - std::future flush(internal::ParsedFlushParams &) override; + std::future flush_impl(internal::ParsedFlushParams &) override; private: JSONIOHandlerImpl m_impl; diff --git a/include/openPMD/Iteration.hpp b/include/openPMD/Iteration.hpp index c66199c46f..38ee56f7ad 100644 --- a/include/openPMD/Iteration.hpp +++ b/include/openPMD/Iteration.hpp @@ -129,6 +129,15 @@ namespace internal */ StepStatus m_stepStatus = StepStatus::NoStep; + /** + * Cached copy of the key under which this Iteration lives in + * Series::iterations. Populated when the iteration + * object is created/inserted. This allows constant-time lookup + * of the owning map entry instead of a linear scan in + * Series::indexOf(). + */ + std::optional m_iterationIndex = std::nullopt; + /** * Information on a parsing request that has not yet been executed. * Otherwise empty. @@ -153,6 +162,8 @@ class Iteration : public Attributable friend class Writable; friend class StatefulIterator; friend class StatefulSnapshotsContainer; + template + friend struct traits::GenerationPolicy; public: Iteration(Iteration const &) = default; @@ -276,6 +287,16 @@ class Iteration : public Attributable private: Iteration(); + /** + * @brief Get the cached iteration index. + * This is the key under which this iteration is stored in the + * Series::iterations map. Used internally for testing the index + * caching optimization. + * + * @return The cached iteration index. + */ + uint64_t getCachedIterationIndex() const; + using Data_t = internal::IterationData; std::shared_ptr m_iterationData; @@ -431,6 +452,26 @@ class Iteration : public Attributable void runDeferredParseAccess(); }; // Iteration +namespace traits +{ + /** Generation policy for Iteration objects. + * + * This policy populates the cached iteration index when an Iteration + * is created or inserted into a Series, enabling constant-time lookup + * of the owning map entry. + */ + template <> + struct GenerationPolicy + { + constexpr static bool is_noop = false; + template + void operator()(Iterator &it) + { + it->second.get().m_iterationIndex = it->first; + } + }; +} // namespace traits + extern template float Iteration::time() const; extern template double Iteration::time() const; diff --git a/include/openPMD/LoadStoreChunk.hpp b/include/openPMD/LoadStoreChunk.hpp new file mode 100644 index 0000000000..89983ab0fe --- /dev/null +++ b/include/openPMD/LoadStoreChunk.hpp @@ -0,0 +1,402 @@ +#pragma once + +#include "openPMD/Dataset.hpp" +#include "openPMD/auxiliary/Future.hpp" +#include "openPMD/auxiliary/Memory.hpp" +#include "openPMD/auxiliary/UniquePtr.hpp" + +// comment to prevent this include from being moved by clang-format +#include "openPMD/DatatypeMacros.hpp" + +#include +#include + +namespace openPMD +{ +class RecordComponent; +class ConfigureStoreChunkFromBuffer; +class ConfigureLoadStoreFromBuffer; +template +class DynamicMemoryView; +class Attributable; + +namespace internal +{ + /** Internal configuration for load/store operations without buffer. Default + * values for optionally specified parameters (offset, extent) must be + * computed to create this configuration struct. */ + struct LoadStoreConfig + { + Offset offset; + Extent extent; + }; + /** Internal configuration for load/store operations with buffer. Default + * values for optionally specified parameters (offset, extent) must be + * computed to create this configuration struct. MemorySelection remains + * optional even then. */ + struct LoadStoreConfigWithBuffer + { + Offset offset; + Extent extent; + std::optional memorySelection; + }; + +} // namespace internal + +namespace auxiliary::detail +{ +#define OPENPMD_ENUMERATE_TYPES(type) , std::shared_ptr + using shared_ptr_dataset_types = auxiliary::detail::variant_tail_t< + auxiliary::detail::bottom OPENPMD_FOREACH_DATASET_DATATYPE( + OPENPMD_ENUMERATE_TYPES)>; +#undef OPENPMD_ENUMERATE_TYPES +} // namespace auxiliary::detail + +/** Base class for configuring load/store chunk operations. + * + * Actual data members of `ConfigureLoadStore<>` and methods that don't + * depend on the ChildClass template parameter. By extracting the members to + * this struct, we can pass them around between different instances of the + * class template. Numbers of method instantiations can be reduced. + */ +class ConfigureLoadStore +{ + friend class openPMD::RecordComponent; + +protected: + ConfigureLoadStore(RecordComponent &); + RecordComponent &m_rc; + + std::optional m_offset; + std::optional m_extent; + + bool m_unsafeNoAutomaticFlush = false; + + [[nodiscard]] auto dim() const -> uint8_t; + auto storeChunkConfig() -> internal::LoadStoreConfig; + + auto deferFlush(Attributable &); + + auto getOffset() -> Offset const &; + auto getExtent() -> Extent const &; + + // The below methods return void. + // For chaining calls, they should return *this, but this class right + // here is going to be somewhere in the inheritance chain, and the final + // class should be returned. Could be solved more elegantly with CRT, + // but that blows up compile-time, so we make internal void functions + // and then repeat them in the final classes. + // (e.g. ConfigureLoadStoreFromBuffer::offset()) + + void offset_impl(Offset); + void extent_impl(Extent); + void unsafeNoAutomaticFlush_impl(); + +private: + auto withSharedPtr_impl_mut(std::shared_ptr data, Datatype) + -> openPMD::ConfigureLoadStoreFromBuffer; + auto withSharedPtr_impl_const(std::shared_ptr data, Datatype) + -> openPMD::ConfigureStoreChunkFromBuffer; + auto withUniquePtr_impl_mut(UniquePtrWithLambda, Datatype) + -> openPMD::ConfigureStoreChunkFromBuffer; + auto withUniquePtr_impl_const(UniquePtrWithLambda, Datatype) + -> openPMD::ConfigureStoreChunkFromBuffer; + auto withRawPtr_impl_mut(void *data, Datatype) + -> openPMD::ConfigureLoadStoreFromBuffer; + auto withRawPtr_impl_const(void const *data, Datatype) + -> openPMD::ConfigureStoreChunkFromBuffer; + +public: + using this_t = ConfigureLoadStore; + + // Configuration methods (always available) + + /** Set the offset within the dataset + * + * Optional. The operation will apply without offset by default (i.e. offset + * = (0, 0, ...)). + * + * @param offset Offset within the dataset + * @return Reference to this object for chaining + */ + auto offset(Offset offset) -> this_t & + { + offset_impl(std::move(offset)); + return *this; + } + /** Set the extent within the dataset + * + * Optional. The operation will apply to the entire dataset by default (i.e. + * operation extent = global dataset extent - operation offset). + * + * @param extent Extent within the dataset, counted from the offset + * @return Reference to this object for chaining + */ + auto extent(Extent extent) -> this_t & + { + extent_impl(std::move(extent)); + return *this; + } + /** Disable automatic flush after store operation + * + * The returned objects of type DeferredComputation will still return a + * buffer upon get() / operator()(), but these buffers are not guaranteed to + * be filled until explicitly flushing. + * + * @return Reference to this object for chaining + */ + auto unsafeNoAutomaticFlush() -> this_t & + { + unsafeNoAutomaticFlush_impl(); + return *this; + } + + /* + * If the type is non-const, then the return type should be + * ConfigureLoadStoreFromBuffer, but if it is a const type, Load operations + * make no sense, so the return type should be + * ConfigureStoreChunkFromBuffer<>. + */ + template + using shared_ptr_return_type = std::conditional_t< + std::is_const_v, + ConfigureStoreChunkFromBuffer, + ConfigureLoadStoreFromBuffer>; + + /* + * As loading into unique pointer types makes no sense, the case is + * simpler for unique pointers. Just remove the array extents here. + * (Our interface wrappers still support const-type unique pointers, + * but the internal logic does not handle them separately.) + */ + template + using unique_ptr_return_type = openPMD::ConfigureStoreChunkFromBuffer; + + // Buffer specification methods (return specialized configurations) + template + auto withSharedPtr(std::shared_ptr) -> shared_ptr_return_type; + template + auto withUniquePtr(UniquePtrWithLambda) -> unique_ptr_return_type; + template + auto withUniquePtr(std::unique_ptr) -> unique_ptr_return_type; + template + auto withRawPtr(T *data) -> shared_ptr_return_type; + template + auto withContiguousContainer(T_ContiguousContainer &data) + -> std::enable_if_t< + auxiliary::IsContiguousContainer_v, + shared_ptr_return_type>; + + // Enqueue methods (deferred execution) + template + [[nodiscard]] auto storeSpan() -> DynamicMemoryView; + // definition for this one is in RecordComponent.tpp since it needs the + // definition of class RecordComponent. + template + [[nodiscard]] auto storeSpan(F &&createBuffer) -> DynamicMemoryView; + + template + [[nodiscard]] auto load() + -> auxiliary::DeferredComputation>; + + [[nodiscard]] auto loadVariant() -> auxiliary::DeferredComputation< + auxiliary::detail::shared_ptr_dataset_types>; +}; + +/** Configuration for storing chunks from a buffer. + * + * This class is used to configure a store chunk operation, where data is + * stored from a provided buffer into a dataset. + * This class is distinct from ConfigureLoadStoreFromBuffer, since reading + * data does not make sense on const / unique pointer types. This way, the type + * system will only allow read operations where they can actually run. + */ +class ConfigureStoreChunkFromBuffer : public ConfigureLoadStore +{ + friend class ConfigureLoadStore; + +protected: + auxiliary::WriteBuffer m_buffer; + Datatype m_datatype; + std::optional m_mem_select; + + ConfigureStoreChunkFromBuffer( + auxiliary::WriteBuffer buffer, Datatype, ConfigureLoadStore &&); + + // The below methods return void. + // For chaining calls, they should return *this, but this class right + // here is going to be somewhere in the inheritance chain, and the final + // class should be returned. Could be solved more elegantly with CRT, + // but that blows up compile-time, so we make internal void functions + // and then repeat them in the final classes. + + /** Set memory selection for non-contiguous memory regions */ + void memorySelection_impl(MemorySelection); + + auto storeChunkConfig() -> internal::LoadStoreConfigWithBuffer; + +public: + using this_t = ConfigureStoreChunkFromBuffer; + + // Configuration methods (always available) + + /** Set the offset within the dataset + * + * Optional. The operation will apply without offset by default (i.e. offset + * = (0, 0, ...)). + * + * @param offset Offset within the dataset + * @return Reference to this object for chaining + */ + auto offset(Offset offset) -> this_t & + { + offset_impl(std::move(offset)); + return *this; + } + + /** Set the extent within the dataset + * + * Optional. The operation will apply to the entire dataset by default (i.e. + * operation extent = global dataset extent - operation offset). + * + * @param extent Extent within the dataset, counted from the offset + * @return Reference to this object for chaining + */ + auto extent(Extent extent) -> this_t & + { + extent_impl(std::move(extent)); + return *this; + } + + /** Disable automatic flush after store operation + * + * The returned objects of type DeferredComputation will still return a + * buffer upon get() / operator()(), but these buffers are not guaranteed to + * be filled until explicitly flushing. + * + * @return Reference to this object for chaining + */ + auto unsafeNoAutomaticFlush() -> this_t & + { + unsafeNoAutomaticFlush_impl(); + return *this; + } + + /** Set memory selection for non-contiguous memory regions + * + * @param memorySelection Selection of memory region + * @return Reference to this object for chaining + */ + auto memorySelection(MemorySelection memorySelection) -> this_t & + { + memorySelection_impl(std::move(memorySelection)); + return *this; + } + + // Enqueue method (deferred execution) + + /** Store the chunk data + * + * @return Deferred computation that performs the store when invoked + */ + auto store() -> auxiliary::DeferredComputation; + + /** This intentionally shadows the parent class's enqueueLoad methods in + * order to show a compile error when using load() on an object + * of this class. The parent method can still be accessed through + * typecasting if needed. + */ + template + auto load() + { + static_assert( + auxiliary::dependent_false_v, + "Cannot load chunk data into a buffer that is const or a " + "unique_ptr."); + } +}; + +/** Configuration for loading/storing chunks from/to a buffer. + * + * This class supports both loading and storing operations, allowing + * reading data into or writing data from a provided buffer. + */ +class ConfigureLoadStoreFromBuffer : public ConfigureStoreChunkFromBuffer +{ + friend class ConfigureLoadStore; + friend class RecordComponent; + + using ConfigureStoreChunkFromBuffer::ConfigureStoreChunkFromBuffer; + +public: + using this_t = ConfigureLoadStoreFromBuffer; + + // Configuration methods (always available) + + /** Set the offset within the dataset + * + * Optional. The operation will apply without offset by default (i.e. offset + * = (0, 0, ...)). + * + * @param offset Offset within the dataset + * @return Reference to this object for chaining + */ + auto offset(Offset offset) -> this_t & + { + offset_impl(std::move(offset)); + return *this; + } + + /** Set the extent within the dataset + * + * Optional. The operation will apply to the entire dataset by default (i.e. + * operation extent = global dataset extent - operation offset). + * + * @param extent Extent within the dataset, counted from the offset + * @return Reference to this object for chaining + */ + auto extent(Extent extent) -> this_t & + { + extent_impl(std::move(extent)); + return *this; + } + + /** Disable automatic flush after operation + * + * The returned objects of type DeferredComputation will still return a + * buffer upon get() / operator()(), but these buffers are not guaranteed to + * be filled until explicitly flushing. + * + * @return Reference to this object for chaining + */ + auto unsafeNoAutomaticFlush() -> this_t & + { + unsafeNoAutomaticFlush_impl(); + return *this; + } + + /** Set memory selection for non-contiguous memory regions + * + * @param memorySelection Selection of memory region + * @return Reference to this object for chaining + */ + auto memorySelection(MemorySelection memorySelection) -> this_t & + { + memorySelection_impl(std::move(memorySelection)); + return *this; + } + + // Enqueue method (deferred execution) + + /** Load the chunk data into the buffer + * + * @return Deferred computation that performs the load when invoked + */ + auto load() -> auxiliary::DeferredComputation; +}; + +} // namespace openPMD + +#include "openPMD/UndefDatatypeMacros.hpp" +// comment to prevent these includes from being moved by clang-format +#include "openPMD/LoadStoreChunk.tpp" diff --git a/include/openPMD/LoadStoreChunk.tpp b/include/openPMD/LoadStoreChunk.tpp new file mode 100644 index 0000000000..f6b0fedf15 --- /dev/null +++ b/include/openPMD/LoadStoreChunk.tpp @@ -0,0 +1,76 @@ +#pragma once + +#include "openPMD/LoadStoreChunk.hpp" + +namespace openPMD +{ +template +auto ConfigureLoadStore::withSharedPtr(std::shared_ptr data) + -> shared_ptr_return_type +{ + using T_decayed = std::remove_cv_t>; + constexpr auto dtype = determineDatatype(); + if constexpr (std::is_const_v) + { + return withSharedPtr_impl_const(data, dtype); + } + else + { + return withSharedPtr_impl_mut(data, dtype); + } +} + +template +auto ConfigureLoadStore::withUniquePtr(UniquePtrWithLambda data) + -> unique_ptr_return_type + +{ + using T_decayed = std::remove_cv_t>; + constexpr auto dtype = determineDatatype(); + if constexpr (std::is_const_v) + { + return withUniquePtr_impl_const( + std::move(data).template static_cast_(), dtype); + } + else + { + return withUniquePtr_impl_mut( + std::move(data).template static_cast_(), dtype); + } +} + +template +auto ConfigureLoadStore::withUniquePtr(std::unique_ptr data) + -> unique_ptr_return_type +{ + return withUniquePtr(UniquePtrWithLambda(std::move(data))); +} + +template +auto ConfigureLoadStore::withRawPtr(T *data) -> shared_ptr_return_type +{ + using T_decayed = std::remove_cv_t>; + constexpr auto dtype = determineDatatype(); + if constexpr (std::is_const_v) + { + return withRawPtr_impl_const(data, dtype); + } + else + { + return withRawPtr_impl_mut(data, dtype); + } +} + +template +auto ConfigureLoadStore::withContiguousContainer(T_ContiguousContainer &data) + -> std::enable_if_t< + auxiliary::IsContiguousContainer_v, + shared_ptr_return_type> +{ + if (!m_extent.has_value() && dim() == 1) + { + m_extent = Extent{data.size()}; + } + return withRawPtr(data.data()); +} +} // namespace openPMD diff --git a/include/openPMD/ParticleSpecies.hpp b/include/openPMD/ParticleSpecies.hpp index 4f309c0f2a..2c38a0cfe1 100644 --- a/include/openPMD/ParticleSpecies.hpp +++ b/include/openPMD/ParticleSpecies.hpp @@ -57,14 +57,19 @@ class ParticleSpecies : public Container namespace traits { + /** Generation policy for ParticleSpecies objects. + * + * Links particle patches to their parent hierarchy when a species is + * created. + */ template <> struct GenerationPolicy { constexpr static bool is_noop = false; template - void operator()(T &ret) + void operator()(T &it) { - ret.particlePatches.linkHierarchy(ret.writable()); + it->second.particlePatches.linkHierarchy(it->second.writable()); } }; } // namespace traits diff --git a/include/openPMD/RecordComponent.hpp b/include/openPMD/RecordComponent.hpp index d06b4213f4..58cfee1478 100644 --- a/include/openPMD/RecordComponent.hpp +++ b/include/openPMD/RecordComponent.hpp @@ -22,15 +22,13 @@ #include "openPMD/Dataset.hpp" #include "openPMD/Datatype.hpp" +#include "openPMD/LoadStoreChunk.hpp" #include "openPMD/auxiliary/ShareRaw.hpp" #include "openPMD/auxiliary/TypeTraits.hpp" #include "openPMD/auxiliary/UniquePtr.hpp" #include "openPMD/backend/Attributable.hpp" #include "openPMD/backend/BaseRecordComponent.hpp" -// comment to prevent this include from being moved by clang-format -#include "openPMD/DatatypeMacros.hpp" - #include #include #include @@ -128,6 +126,12 @@ class RecordComponent : public BaseRecordComponent friend class MeshRecordComponent; template friend T &internal::makeOwning(T &self, Series); + friend class ConfigureLoadStore; + friend class ConfigureLoadStoreFromBuffer; + friend class ConfigureStoreChunkFromBuffer; + friend struct VisitorEnqueueLoadVariantWithoutFlush; + friend struct VisitorEnqueueLoadVariantWithFlush; + friend struct VisitorLoadVariant; public: enum class Allocation @@ -214,6 +218,16 @@ class RecordComponent : public BaseRecordComponent */ bool empty() const; + /** Prepare a load/store chunk configuration object + * + * This is the entry point for the experimental new API for loading and + * storing chunks. It returns a ConfigureLoadStore object that can be used + * to specify offset, extent, and buffer for the operation. + * + * @return ConfigureLoadStore object for configuring the operation + */ + ConfigureLoadStore prepareLoadStore(); + /** Load and allocate a chunk of data * * Set offset to {0u} and extent to {-1u} for full selection. @@ -224,11 +238,8 @@ class RecordComponent : public BaseRecordComponent template std::shared_ptr loadChunk(Offset = {0u}, Extent = {-1u}); -#define OPENPMD_ENUMERATE_TYPES(type) , std::shared_ptr - using shared_ptr_dataset_types = auxiliary::detail::variant_tail_t< - auxiliary::detail::bottom OPENPMD_FOREACH_DATASET_DATATYPE( - OPENPMD_ENUMERATE_TYPES)>; -#undef OPENPMD_ENUMERATE_TYPES + using shared_ptr_dataset_types = + auxiliary::detail::shared_ptr_dataset_types; /** std::variant-based version of allocating loadChunk(Offset, Extent) * @@ -266,25 +277,6 @@ class RecordComponent : public BaseRecordComponent template void loadChunk(std::shared_ptr data, Offset offset, Extent extent); - /** Load a chunk of data into pre-allocated memory, array version. - * - * @param data Preallocated, contiguous buffer, large enough to load the - * the requested data into it. - * The shared pointer must own and manage the buffer. - * Optimizations might be implemented based on this - * assumption (e.g. skipping the operation if the backend - * is the unique owner). - * The array-based overload helps avoid having to manually - * specify the delete[] destructor (C++17 feature). - * @param offset Offset within the dataset. Set to {0u} for full selection. - * @param extent Extent within the dataset, counted from the offset. - * Set to {-1u} for full selection. - * If offset is non-zero and extent is {-1u} the leftover - * extent in the record component will be selected. - */ - template - void loadChunk(std::shared_ptr data, Offset offset, Extent extent); - /** Load a chunk of data into pre-allocated memory, raw pointer version. * * @param data Preallocated, contiguous buffer, large enough to load the @@ -324,18 +316,6 @@ class RecordComponent : public BaseRecordComponent template void storeChunk(std::shared_ptr data, Offset offset, Extent extent); - /** Store a chunk of data from a chunk of memory, array version. - * - * @param data Preallocated, contiguous buffer, large enough to read the - * the specified data from it. - * The array-based overload helps avoid having to manually - * specify the delete[] destructor (C++17 feature). - * @param offset Offset within the dataset. - * @param extent Extent within the dataset, counted from the offset. - */ - template - void storeChunk(std::shared_ptr data, Offset offset, Extent extent); - /** Store a chunk of data from a chunk of memory, unique pointer version. * * @param data Preallocated, contiguous buffer, large enough to read the @@ -497,8 +477,28 @@ class RecordComponent : public BaseRecordComponent */ RecordComponent &makeEmpty(Dataset d); - void storeChunk( - auxiliary::WriteBuffer buffer, Datatype datatype, Offset o, Extent e); + void storeChunk_impl( + auxiliary::WriteBuffer buffer, + Datatype datatype, + internal::LoadStoreConfigWithBuffer); + + template + DynamicMemoryView storeChunkSpan_impl(internal::LoadStoreConfig); + template + DynamicMemoryView storeChunkSpanCreateBuffer_impl( + internal::LoadStoreConfig, F &&createBuffer); + + template + void loadChunk_impl( + std::shared_ptr const &, internal::LoadStoreConfigWithBuffer); + void loadChunk_impl( + std::shared_ptr const &, + Datatype, + internal::LoadStoreConfigWithBuffer); + template + std::shared_ptr loadChunkAllocate_impl(internal::LoadStoreConfig); + std::shared_ptr loadChunkAllocate_impl( + Datatype, size_t dtype_size, internal::LoadStoreConfig); // clang-format off OPENPMD_protected @@ -564,6 +564,4 @@ namespace internal } // namespace openPMD -#include "openPMD/UndefDatatypeMacros.hpp" -// comment to prevent these includes from being moved by clang-format #include "RecordComponent.tpp" diff --git a/include/openPMD/RecordComponent.tpp b/include/openPMD/RecordComponent.tpp index 9d1d8332b4..f5d31f1faa 100644 --- a/include/openPMD/RecordComponent.tpp +++ b/include/openPMD/RecordComponent.tpp @@ -23,6 +23,7 @@ #include "openPMD/Datatype.hpp" #include "openPMD/Error.hpp" +#include "openPMD/LoadStoreChunk.hpp" #include "openPMD/RecordComponent.hpp" #include "openPMD/Span.hpp" #include "openPMD/auxiliary/Memory.hpp" @@ -32,6 +33,7 @@ #include "openPMD/backend/Attributable.hpp" #include +#include #include namespace openPMD @@ -41,8 +43,12 @@ template inline void RecordComponent::storeChunk(std::unique_ptr data, Offset o, Extent e) { - storeChunk( - UniquePtrWithLambda(std::move(data)), std::move(o), std::move(e)); + prepareLoadStore() + .offset(std::move(o)) + .extent(std::move(e)) + .withUniquePtr(std::move(data)) + .unsafeNoAutomaticFlush() + .store(); } template @@ -50,48 +56,40 @@ inline typename std::enable_if_t< auxiliary::IsContiguousContainer_v> RecordComponent::storeChunk(T_ContiguousContainer &data, Offset o, Extent e) { - uint8_t dim = getDimensionality(); + auto storeChunkConfig = prepareLoadStore(); - // default arguments - // offset = {0u}: expand to right dim {0u, 0u, ...} - Offset offset = o; - if (o.size() == 1u && o.at(0) == 0u) + auto joined_dim = joinedDimension(); + if (!joined_dim.has_value() && (o.size() != 1 || o.at(0) != 0u)) { - if (joinedDimension().has_value()) - { - offset.clear(); - } - else if (dim > 1u) - { - offset = Offset(dim, 0u); - } + storeChunkConfig.offset(std::move(o)); + } + if (e.size() != 1 || e.at(0) != -1u) + { + storeChunkConfig.extent(std::move(e)); } - // extent = {-1u}: take full size - Extent extent(dim, 1u); - // avoid outsmarting the user: - // - stdlib data container implement 1D -> 1D chunk to write - if (e.size() == 1u && e.at(0) == -1u && dim == 1u) - extent.at(0) = data.size(); - else - extent = e; - - storeChunk(auxiliary::shareRaw(data.data()), offset, extent); + std::move(storeChunkConfig) + .withContiguousContainer(data) + .unsafeNoAutomaticFlush() + .store(); } template inline DynamicMemoryView RecordComponent::storeChunk(Offset o, Extent e, F &&createBuffer) { - verifyChunk(o, e); + return prepareLoadStore() + .offset(std::move(o)) + .extent(std::move(e)) + .storeSpan(std::forward(createBuffer)); +} - /* - * The openPMD backend might not yet know about this dataset. - * Flush the openPMD hierarchy to the backend without flushing any actual - * data yet. - */ - seriesFlush_impl( - {FlushLevel::SkeletonOnly}); +template +inline DynamicMemoryView RecordComponent::storeChunkSpanCreateBuffer_impl( + internal::LoadStoreConfig cfg, F &&createBuffer) +{ + auto [o, e] = std::move(cfg); + verifyChunk(o, e); size_t size = 1; for (auto ext : e) @@ -102,33 +100,61 @@ RecordComponent::storeChunk(Offset o, Extent e, F &&createBuffer) * Flushing the skeleton does not create datasets, * so we might need to do it now. */ - if (!written()) + auto &rc = get(); + if (!rc.m_dataset.has_value()) { - auto &rc = get(); - if (!rc.m_dataset.has_value()) - { - throw error::WrongAPIUsage( - "[RecordComponent] Must specify dataset type and extent before " - "using storeChunk() (see RecordComponent::resetDataset())."); - } - Parameter dCreate(rc.m_dataset.value()); - dCreate.name = Attributable::get().m_writable.ownKeyWithinParent; - IOHandler()->enqueue(IOTask(this, dCreate)); + throw error::WrongAPIUsage( + "[RecordComponent] Must specify dataset type and extent before " + "using storeChunk() (see RecordComponent::resetDataset())."); } + Parameter query; + query.queryOnly = true; + IOHandler()->enqueue(IOTask(this, query)); + IOHandler()->flush(internal::defaultFlushParams); + Parameter getBufferView; getBufferView.offset = o; getBufferView.extent = e; getBufferView.dtype = getDatatype(); - IOHandler()->enqueue(IOTask(this, getBufferView)); - IOHandler()->flush(internal::defaultFlushParams); - auto &out = *getBufferView.out; - if (!out.backendManagedBuffer) + + if (query.out->backendManagedBuffer) + { + // Need to initialize the dataset for the Span API + // But this is a non-collective call and initializing the dataset is + // collective in HDF5 So we do this only in backends that actually + // support the Span API (i.e. ADIOS2) which do not share this + // restriction + // TODO: Add some form of collective ::commitDefinitions() call to + // RecordComponents to be called by users before the Span API + if (!written()) + { + /* + * The openPMD backend might not yet know about this dataset. + * Flush the openPMD hierarchy to the backend without flushing any + * actual data yet. + */ + seriesFlush_impl( + {FlushLevel::SkeletonOnly}); + Parameter dCreate(rc.m_dataset.value()); + dCreate.name = Attributable::get().m_writable.ownKeyWithinParent; + IOHandler()->enqueue(IOTask(this, dCreate)); + + setWritten(true, EnqueueAsynchronously::OnlyAsync); + } + + IOHandler()->enqueue(IOTask(this, getBufferView)); + IOHandler()->flush(internal::defaultFlushParams); + } + + // The backend might still refuse the operation even if backend managed + // buffers are generally supported, so check again + if (!getBufferView.out->backendManagedBuffer) { // note that data might have either // type shared_ptr or shared_ptr auto data = std::forward(createBuffer)(size); - out.ptr = static_cast(data.get()); + getBufferView.out->ptr = static_cast(data.get()); if (size > 0) { storeChunk(std::move(data), std::move(o), std::move(e)); @@ -169,4 +195,12 @@ inline auto RecordComponent::visit(Args &&...args) return switchDatasetType>( getDatatype(), *this, std::forward(args)...); } + +// definitions for LoadStoreChunk.hpp +template +auto ConfigureLoadStore::storeSpan(F &&createBuffer) -> DynamicMemoryView +{ + return m_rc.storeChunkSpanCreateBuffer_impl( + storeChunkConfig(), std::forward(createBuffer)); +} } // namespace openPMD diff --git a/include/openPMD/auxiliary/Defer.hpp b/include/openPMD/auxiliary/Defer.hpp new file mode 100644 index 0000000000..804e775c70 --- /dev/null +++ b/include/openPMD/auxiliary/Defer.hpp @@ -0,0 +1,80 @@ +#pragma once + +#include +#include +#include + +namespace openPMD::auxiliary +{ +/** Defer wrapper + * + * Executes a functor when destroyed unless explicitly cancelled. + * Similar to Go's defer or C++'s experimental::scope_exit. + * + * Similar also to DeferredComputation under Future.hpp, but has another + * application scope (this: internal resource cleanup, that: public Future-like + * API) and is hence kept separate. + * + * @tparam F The functor type + */ +template +struct defer_type +{ + F functor; + bool do_run_this = true; + ~defer_type() + { + if (!do_run_this) + { + return; + } + do_run_this = false; + std::move(functor)(); + } + + explicit defer_type() = default; + + struct forwarding_tag + {}; + + template + defer_type(forwarding_tag, F_ &&functor_in) + : functor{std::forward(functor_in)} + {} + + template + defer_type(defer_type &&other) : functor{std::move(other.functor)} + { + other.do_run_this = false; + } + + template + auto operator=(defer_type &&other) + { + functor = std::move(other.functor); + other.do_run_this = false; + } + + defer_type(defer_type const &) = delete; + auto operator=(defer_type const &) -> defer_type & = delete; +}; + +/** Type-erased defer wrapper for void functors */ +using opaque_defer_type = defer_type>; + +/** Create a defer wrapper + * + * Creates a defer wrapper that will execute the given functor when + * destroyed. + * + * @param functor The functor to execute on destruction + * @return A defer wrapper + */ +template +auto defer(F &&functor) -> defer_type> +{ + using res_t = defer_type>; + using tag_t = typename res_t::forwarding_tag; + return res_t{tag_t{}, std::forward(functor)}; +} +} // namespace openPMD::auxiliary diff --git a/include/openPMD/auxiliary/Filesystem.hpp b/include/openPMD/auxiliary/Filesystem.hpp index 25239f65a3..6ad0288a5f 100644 --- a/include/openPMD/auxiliary/Filesystem.hpp +++ b/include/openPMD/auxiliary/Filesystem.hpp @@ -20,6 +20,7 @@ */ #pragma once +#include #include #include @@ -66,6 +67,19 @@ namespace auxiliary */ std::vector list_directory(std::string const &path); + /** List all contents of a directory at a given absolute or relative path. + * + * @note The equivalent of `ls path` + * @note Both contained files and directories are listed. + * `.` and `..` are not returned. + * @param path Absolute or relative path of directory to examine. + * @return Vector of all contained files and directories, + * or an empty option in case of an IO error, in which case the + * specific error will be stored in errno. + */ + std::optional> + list_directory_nothrow(std::string const &path); + /** Create all required directories to have a reachable given absolute or * relative path. * diff --git a/include/openPMD/auxiliary/Future.hpp b/include/openPMD/auxiliary/Future.hpp new file mode 100644 index 0000000000..e4e51dc7eb --- /dev/null +++ b/include/openPMD/auxiliary/Future.hpp @@ -0,0 +1,131 @@ +#pragma once + +#include +#include +#include + +namespace openPMD::auxiliary::detail +{ +/** Internal helper for deferred computation - executes task once */ +template +struct OneTimeTask +{ + using task_type = std::function; + // Helper struct so we get auto-generated move constructor / assignment + // operator, but can still override constructors outside + struct Members + { + task_type m_task; + bool m_task_valid = true; + }; + Members members; + + static constexpr bool noexcept_move = + std::is_move_constructible_v && + std::is_move_assignable_v; + + explicit OneTimeTask(); + OneTimeTask(task_type); + + OneTimeTask(OneTimeTask &&) noexcept(noexcept_move); + OneTimeTask(OneTimeTask const &) = delete; + + auto operator=(OneTimeTask &&) noexcept(noexcept_move) -> OneTimeTask &; + auto operator=(OneTimeTask const &) -> OneTimeTask & = delete; + + auto operator()() -> T; +}; + +/** Internal helper for cached value storage. Used when the API requires + * creation of a DeferredComputation object, but there is not actually a + * computation to run. */ +template +struct CachedValue +{ + T val; +}; +template <> +struct CachedValue +{ + // this is silly +}; +} // namespace openPMD::auxiliary::detail + +namespace openPMD::auxiliary +{ +/** A computation that is deferred until explicitly invoked. + * + * This class wraps a callable, allowing lazy evaluation. + * The computation is performed once on first invocation, repeated invocation is + * an error. Check if the computation is still valid by calling valid(). + * + * Note: Some API operations may construct a DeferredComputation without any + * actual computation, instead emplacing a cached value. This is treated + * transparently to the user. In this case however, the object will not turn + * invalid upon invocation. + * + * @tparam T The return type of the computation + */ +template +class DeferredComputation +{ + using task_type = std::function; + using cached_type = std::conditional_t< + std::is_void_v, + // just something that is not void + detail::CachedValue, + T>; + std::variant, detail::CachedValue> m_task; + +public: + static constexpr bool noexcept_move = + std::is_move_constructible_v> && + std::is_move_assignable_v> && + std::is_move_constructible_v> && + std::is_move_assignable_v>; + /** Construct from a callable + * + * @param task The callable to execute + */ + DeferredComputation(task_type task); + /** Construct from a cached value + * + * @param val The pre-computed value + */ + DeferredComputation(cached_type val); + + explicit DeferredComputation(); + + DeferredComputation(DeferredComputation &&) noexcept(noexcept_move); + DeferredComputation(DeferredComputation const &) = delete; + + auto operator=(DeferredComputation &&) noexcept(noexcept_move) + -> DeferredComputation &; + auto operator=(DeferredComputation const &) + -> DeferredComputation & = delete; + + ~DeferredComputation(); + + /** Get the result of the computation + * + * @return The result of the computation + */ + auto get() -> T; + /** Invoke the computation + * + * Alias for get() + * @return The result of the computation + */ + auto operator()() -> T; + + /** Discard the computation without executing it + */ + void invalidate() &&; + + /** Check if the computation is valid + * + * @return true if the computation has not been invalidated + */ + [[nodiscard]] auto valid() const noexcept -> bool; +}; +} // namespace openPMD::auxiliary diff --git a/include/openPMD/auxiliary/Memory.hpp b/include/openPMD/auxiliary/Memory.hpp index 6f8807b354..99b6b53d22 100644 --- a/include/openPMD/auxiliary/Memory.hpp +++ b/include/openPMD/auxiliary/Memory.hpp @@ -65,6 +65,7 @@ namespace auxiliary [[nodiscard]] auto release() -> UniquePtrWithLambda; }; using SharedPtr = std::shared_ptr; + using ReadSharedPtr = std::shared_ptr; /* * Use std::any publically since some compilers have trouble with * certain uses of std::variant, so hide it from them. @@ -73,17 +74,21 @@ namespace auxiliary */ std::any m_buffer; - WriteBuffer(); - WriteBuffer(std::shared_ptr ptr); - WriteBuffer(UniquePtrWithLambda ptr); + explicit WriteBuffer(); + // @todo implementation must distinguish const types + template + explicit WriteBuffer(std::shared_ptr ptr); + explicit WriteBuffer(UniquePtrWithLambda ptr); WriteBuffer(WriteBuffer &&) noexcept; WriteBuffer(WriteBuffer const &) = delete; WriteBuffer &operator=(WriteBuffer &&) noexcept; WriteBuffer &operator=(WriteBuffer const &) = delete; - WriteBuffer const &operator=(std::shared_ptr ptr); - WriteBuffer const &operator=(UniquePtrWithLambda ptr); + // @todo implementation must distinguish const types + template + WriteBuffer &operator=(std::shared_ptr const &ptr); + WriteBuffer &operator=(UniquePtrWithLambda ptr); void const *get() const; diff --git a/include/openPMD/auxiliary/Memory_internal.hpp b/include/openPMD/auxiliary/Memory_internal.hpp index bf3c8ccb4b..ee7134d9dc 100644 --- a/include/openPMD/auxiliary/Memory_internal.hpp +++ b/include/openPMD/auxiliary/Memory_internal.hpp @@ -25,6 +25,8 @@ namespace openPMD::auxiliary { // cannot use a unique_ptr inside a std::variant, so we represent it with this -using WriteBufferTypes = - std::variant; +using WriteBufferTypes = std::variant< + WriteBuffer::CopyableUniquePtr, + WriteBuffer::SharedPtr, + WriteBuffer::ReadSharedPtr>; } // namespace openPMD::auxiliary diff --git a/include/openPMD/auxiliary/UniquePtr.hpp b/include/openPMD/auxiliary/UniquePtr.hpp index 87f3261b45..ee17794d3e 100644 --- a/include/openPMD/auxiliary/UniquePtr.hpp +++ b/include/openPMD/auxiliary/UniquePtr.hpp @@ -176,10 +176,11 @@ template UniquePtrWithLambda UniquePtrWithLambda::static_cast_() && { using other_type = std::remove_extent_t; + auto original_ptr = this->release(); return UniquePtrWithLambda{ - static_cast(this->release()), - [deleter = std::move(this->get_deleter())](other_type *ptr) { - deleter(static_cast(ptr)); + static_cast(original_ptr), + [deleter = std::move(this->get_deleter()), original_ptr](other_type *) { + deleter(original_ptr); }}; } } // namespace openPMD diff --git a/include/openPMD/backend/Attributable.hpp b/include/openPMD/backend/Attributable.hpp index 1c1d3db4a5..8767e92cb0 100644 --- a/include/openPMD/backend/Attributable.hpp +++ b/include/openPMD/backend/Attributable.hpp @@ -244,6 +244,7 @@ class Attributable friend class internal::AttributableData; friend class Snapshots; friend struct internal::HomogenizeExtents; + friend class ConfigureLoadStore; protected: // tag for internal constructor @@ -589,9 +590,10 @@ OPENPMD_protected { return writable().written; } - enum class EnqueueAsynchronously : bool + enum class EnqueueAsynchronously : uint8_t { - Yes, + OnlyAsync, + Both, No }; /* diff --git a/include/openPMD/backend/Attribute.hpp b/include/openPMD/backend/Attribute.hpp index 446b4a7375..9dc621d6e7 100644 --- a/include/openPMD/backend/Attribute.hpp +++ b/include/openPMD/backend/Attribute.hpp @@ -153,9 +153,10 @@ namespace detail { U res{}; res.reserve(pv->size()); - if constexpr (std::is_convertible_v< - typename T::value_type, - typename U::value_type>) + if constexpr ( + std::is_convertible_v< + typename T::value_type, + typename U::value_type>) { std::copy(pv->begin(), pv->end(), std::back_inserter(res)); return {res}; @@ -194,9 +195,10 @@ namespace detail { U res{}; res.reserve(pv->size()); - if constexpr (std::is_convertible_v< - typename T::value_type, - typename U::value_type>) + if constexpr ( + std::is_convertible_v< + typename T::value_type, + typename U::value_type>) { std::copy(pv->begin(), pv->end(), std::back_inserter(res)); return {res}; @@ -234,9 +236,10 @@ namespace detail else if constexpr (auxiliary::IsVector_v && auxiliary::IsArray_v) { U res{}; - if constexpr (std::is_convertible_v< - typename T::value_type, - typename U::value_type>) + if constexpr ( + std::is_convertible_v< + typename T::value_type, + typename U::value_type>) { if (res.size() != pv->size()) { diff --git a/include/openPMD/backend/ContainerImpl.tpp b/include/openPMD/backend/ContainerImpl.tpp index 0384333e10..de11c91f14 100644 --- a/include/openPMD/backend/ContainerImpl.tpp +++ b/include/openPMD/backend/ContainerImpl.tpp @@ -140,7 +140,8 @@ auto Container::operator[](key_type const &key) T t = T(); t.linkHierarchy(writable()); - auto &ret = container().insert({key, std::move(t)}).first->second; + auto inserted_iterator = container().insert({key, std::move(t)}).first; + auto &ret = inserted_iterator->second; if constexpr (std::is_same_v) { ret.writable().ownKeyWithinParent = key; @@ -150,7 +151,7 @@ auto Container::operator[](key_type const &key) ret.writable().ownKeyWithinParent = std::to_string(key); } traits::GenerationPolicy gen; - gen(ret); + gen(inserted_iterator); return ret; } } @@ -172,7 +173,8 @@ auto Container::operator[](key_type &&key) T t = T(); t.linkHierarchy(writable()); - auto &ret = container().insert({key, std::move(t)}).first->second; + auto inserted_iterator = container().insert({key, std::move(t)}).first; + auto &ret = inserted_iterator->second; if constexpr (std::is_same_v) { ret.writable().ownKeyWithinParent = std::move(key); @@ -182,7 +184,7 @@ auto Container::operator[](key_type &&key) ret.writable().ownKeyWithinParent = std::to_string(std::move(key)); } traits::GenerationPolicy gen; - gen(ret); + gen(inserted_iterator); return ret; } } diff --git a/include/openPMD/backend/PatchRecordComponent.hpp b/include/openPMD/backend/PatchRecordComponent.hpp index fed17dfd3b..eedd84e158 100644 --- a/include/openPMD/backend/PatchRecordComponent.hpp +++ b/include/openPMD/backend/PatchRecordComponent.hpp @@ -122,7 +122,8 @@ template inline void PatchRecordComponent::load(std::shared_ptr data) { Datatype dtype = determineDatatype(); - if (dtype != getDatatype()) + // Attention: Do NOT use operator==(), doesnt work properly on Windows! + if (!isSame(dtype, getDatatype())) throw std::runtime_error( "Type conversion during particle patch loading not yet " "implemented"); @@ -160,10 +161,7 @@ template inline void PatchRecordComponent::store(uint64_t idx, T data) { Datatype dtype = determineDatatype(); - if (dtype != getDatatype() && !isSameInteger(getDatatype()) && - !isSameFloatingPoint(getDatatype()) && - !isSameComplexFloatingPoint(getDatatype()) && - !isSameChar(getDatatype())) + if (!isSame(dtype, getDatatype())) { std::ostringstream oss; oss << "Datatypes of patch data (" << dtype << ") and dataset (" @@ -190,10 +188,7 @@ template inline void PatchRecordComponent::store(T data) { Datatype dtype = determineDatatype(); - if (dtype != getDatatype() && !isSameInteger(getDatatype()) && - !isSameFloatingPoint(getDatatype()) && - !isSameComplexFloatingPoint(getDatatype()) && - !isSameChar(getDatatype())) + if (!isSame(dtype, getDatatype())) { std::ostringstream oss; oss << "Datatypes of patch data (" << dtype << ") and dataset (" diff --git a/src/Datatype.cpp b/src/Datatype.cpp index 479286066c..ead2859c81 100644 --- a/src/Datatype.cpp +++ b/src/Datatype.cpp @@ -29,13 +29,6 @@ namespace openPMD { -void warnWrongDtype(std::string const &key, Datatype store, Datatype request) -{ - std::cerr << "Warning: Attribute '" << key << "' stored as " << store - << ", requested as " << request - << ". Casting unconditionally with possible loss of precision.\n"; -} - std::ostream &operator<<(std::ostream &os, openPMD::Datatype const &d) { using DT = openPMD::Datatype; diff --git a/src/IO/ADIOS/ADIOS2File.cpp b/src/IO/ADIOS/ADIOS2File.cpp index 1d20aeb04d..6bddeb6bd4 100644 --- a/src/IO/ADIOS/ADIOS2File.cpp +++ b/src/IO/ADIOS/ADIOS2File.cpp @@ -23,6 +23,7 @@ #include "openPMD/Error.hpp" #include "openPMD/IO/ADIOS/ADIOS2Auxiliary.hpp" #include "openPMD/IO/ADIOS/ADIOS2IOHandler.hpp" +#include "openPMD/IO/ADIOS/macros.hpp" #include "openPMD/IO/AbstractIOHandler.hpp" #include "openPMD/IterationEncoding.hpp" #include "openPMD/auxiliary/Environment.hpp" @@ -70,6 +71,7 @@ void DatasetReader::call( adios2::Variable var = impl->verifyDataset( bp.param.offset, bp.param.extent, + std::nullopt, IO, engine, bp.name, @@ -88,6 +90,12 @@ void DatasetReader::call( template inline constexpr bool always_false_v = false; +static constexpr char const *warningMemorySelection = + "[Warning] Using a version of ADIOS2 that cannot reset memory selections " + "on a variable, once specified. When using memory selections, then please " + "specify it explicitly on all storeChunk() calls. Further info: " + "https://github.com/ornladios/ADIOS2/pull/4169."; + template void WriteDataset::call(ADIOS2File &ba, detail::BufferedPut &bp) { @@ -98,7 +106,9 @@ void WriteDataset::call(ADIOS2File &ba, detail::BufferedPut &bp) std::visit( [&](auto &&arg) { using ptr_type = std::decay_t; - if constexpr (std::is_same_v>) + if constexpr ( + std::is_same_v> || + std::is_same_v>) { auto ptr = static_cast(arg.get()); auto &engine = ba.getEngine(); @@ -106,6 +116,7 @@ void WriteDataset::call(ADIOS2File &ba, detail::BufferedPut &bp) adios2::Variable var = ba.m_impl->verifyDataset( bp.param.offset, bp.param.extent, + bp.param.memorySelection, ba.m_IO, engine, bp.name, @@ -113,15 +124,37 @@ void WriteDataset::call(ADIOS2File &ba, detail::BufferedPut &bp) ba.variables()); engine.Put(var, ptr); + if (bp.param.memorySelection.has_value()) + { + if constexpr (openPMD::CanTheMemorySelectionBeReset) + { + var.SetMemorySelection(); + } + else if (!ba.m_impl->printedWarningsAlready.memorySelection) + { + std::cerr << warningMemorySelection << std::endl; + ba.m_impl->printedWarningsAlready.memorySelection = + true; + } + } } - else if constexpr (std::is_same_v< - ptr_type, - auxiliary::WriteBuffer::CopyableUniquePtr>) + else if constexpr ( + std::is_same_v< + ptr_type, + auxiliary::WriteBuffer::CopyableUniquePtr>) { BufferedUniquePtrPut bput; bput.name = std::move(bp.name); bput.offset = std::move(bp.param.offset); bput.extent = std::move(bp.param.extent); + bput.memorySelection = std::move(bp.param.memorySelection); + /* + * Note: Moving is required here since it's a unique_ptr. + * std::forward<>() would theoretically work, but it + * requires the type parameter and we don't have that + * inside the lambda. + * (ptr_type does not work for this case). + */ bput.data = arg.release(); bput.dtype = bp.param.dtype; ba.m_uniquePtrPuts.push_back(std::move(bput)); @@ -169,12 +202,25 @@ struct RunUniquePtrPut adios2::Variable var = ba.m_impl->verifyDataset( bufferedPut.offset, bufferedPut.extent, + bufferedPut.memorySelection, ba.m_IO, engine, bufferedPut.name, std::nullopt, ba.variables()); engine.Put(var, ptr); + if (bufferedPut.memorySelection.has_value()) + { + if constexpr (openPMD::CanTheMemorySelectionBeReset) + { + var.SetMemorySelection(); + } + else if (!ba.m_impl->printedWarningsAlready.memorySelection) + { + std::cerr << warningMemorySelection << std::endl; + ba.m_impl->printedWarningsAlready.memorySelection = true; + } + } } static constexpr char const *errorMsg = "RunUniquePtrPut"; diff --git a/src/IO/ADIOS/ADIOS2IOHandler.cpp b/src/IO/ADIOS/ADIOS2IOHandler.cpp index 2f5a698289..d3b19fde0f 100644 --- a/src/IO/ADIOS/ADIOS2IOHandler.cpp +++ b/src/IO/ADIOS/ADIOS2IOHandler.cpp @@ -51,6 +51,7 @@ #include #include #include +#include #include #include #include @@ -1239,6 +1240,7 @@ namespace detail adios2::Variable variable = impl->verifyDataset( params.offset, params.extent, + std::nullopt, IO, engine, varName, @@ -1316,6 +1318,12 @@ void ADIOS2IOHandlerImpl::getBufferView( parameters.out->backendManagedBuffer = false; return; } + else if (parameters.queryOnly) + { + parameters.out->backendManagedBuffer = true; + return; + } + setAndGetFilePosition(writable); auto file = refreshFileFromParent(writable, /* preferParentFile = */ false); detail::ADIOS2File &ba = getFileData(file, IfFileNotOpen::ThrowError); @@ -1741,7 +1749,7 @@ void ADIOS2IOHandlerImpl::listPaths( */ auto &fileData = getFileData(file, IfFileNotOpen::ThrowError); - std::unordered_set subdirs; + std::set subdirs; /* * When reading an attribute, we cannot distinguish * whether its containing "folder" is a group or a @@ -1883,7 +1891,7 @@ void ADIOS2IOHandlerImpl::listDatasets( auto &fileData = getFileData(file, IfFileNotOpen::ThrowError); - std::unordered_set subdirs; + std::set subdirs; for (auto var : fileData.availableVariablesPrefixed(myName)) { // if string still contains a slash, variable is a dataset below the @@ -2620,7 +2628,7 @@ ADIOS2IOHandler::ADIOS2IOHandler( {} std::future -ADIOS2IOHandler::flush(internal::ParsedFlushParams &flushParams) +ADIOS2IOHandler::flush_impl(internal::ParsedFlushParams &flushParams) { return m_impl.flush(flushParams); } @@ -2661,7 +2669,7 @@ ADIOS2IOHandler::ADIOS2IOHandler( std::move(initialize_from), std::move(path), at, std::move(config)) {} -std::future ADIOS2IOHandler::flush(internal::ParsedFlushParams &) +std::future ADIOS2IOHandler::flush_impl(internal::ParsedFlushParams &) { return std::future(); } diff --git a/src/IO/ADIOS/ADIOS2PreloadAttributes.cpp b/src/IO/ADIOS/ADIOS2PreloadAttributes.cpp index 2b9bb02c2d..a9adbb74d6 100644 --- a/src/IO/ADIOS/ADIOS2PreloadAttributes.cpp +++ b/src/IO/ADIOS/ADIOS2PreloadAttributes.cpp @@ -248,7 +248,7 @@ PreloadAdiosAttributes::getAttribute(std::string const &name) const } AttributeLocation const &location = it->second; Datatype determinedDatatype = determineDatatype(); - if (location.dt != determinedDatatype) + if (!isSame(location.dt, determinedDatatype)) { std::stringstream errorMsg; errorMsg << "[ADIOS2] Wrong datatype for attribute: " << name diff --git a/src/IO/AbstractIOHandler.cpp b/src/IO/AbstractIOHandler.cpp index 5f2bdeb2f5..e54336a6c6 100644 --- a/src/IO/AbstractIOHandler.cpp +++ b/src/IO/AbstractIOHandler.cpp @@ -122,6 +122,26 @@ std::future AbstractIOHandler::flush(internal::FlushParams const ¶ms) return future; } +std::future AbstractIOHandler::flush(internal::ParsedFlushParams ¶ms) +{ + // The flush counter indicates the number of times that m_work has been + // emptied. Only increment it if m_work was full before operation and is + // empty after operation. + // Enqueuers can use this counter to check if the enqueued operation has + // been flushed already. + bool increase_flush_counter = !m_work.empty(); + auto res = this->flush_impl(params); + if (!m_work.empty()) + { + throw error::Internal("flush() did not clear all work!"); + } + if (increase_flush_counter) + { + ++*m_flushCounter; + } + return res; +} + bool AbstractIOHandler::fullSupportForVariableBasedEncoding() const { return false; diff --git a/src/IO/AbstractIOHandlerImpl.cpp b/src/IO/AbstractIOHandlerImpl.cpp index 4f93ff1a5b..fe5efc2d66 100644 --- a/src/IO/AbstractIOHandlerImpl.cpp +++ b/src/IO/AbstractIOHandlerImpl.cpp @@ -275,10 +275,26 @@ std::future AbstractIOHandlerImpl::flush() i.writable->parent, "->", i.writable, - "] WRITE_DATASET, offset=", - [¶meter]() { return vec_as_string(parameter.offset); }, - ", extent=", - [¶meter]() { return vec_as_string(parameter.extent); }); + "] WRITE_DATASET: ", + [&]() { + std::stringstream stream; + stream << "offset: " << vec_as_string(parameter.offset) + << " extent: " << vec_as_string(parameter.extent) + << " mem-selection: "; + if (parameter.memorySelection.has_value()) + { + stream << vec_as_string( + parameter.memorySelection->offset) + << "--" + << vec_as_string( + parameter.memorySelection->extent); + } + else + { + stream << "NONE"; + } + return stream.str(); + }); writeDataset(i.writable, parameter); break; } diff --git a/src/IO/DummyIOHandler.cpp b/src/IO/DummyIOHandler.cpp index f3b4e155d2..52c83bb624 100644 --- a/src/IO/DummyIOHandler.cpp +++ b/src/IO/DummyIOHandler.cpp @@ -39,7 +39,7 @@ DummyIOHandler::DummyIOHandler(std::string path, Access at) void DummyIOHandler::enqueue(IOTask const &) {} -std::future DummyIOHandler::flush(internal::ParsedFlushParams &) +std::future DummyIOHandler::flush_impl(internal::ParsedFlushParams &) { return std::future(); } diff --git a/src/IO/HDF5/HDF5IOHandler.cpp b/src/IO/HDF5/HDF5IOHandler.cpp index ee02020983..a6a97fa6ff 100644 --- a/src/IO/HDF5/HDF5IOHandler.cpp +++ b/src/IO/HDF5/HDF5IOHandler.cpp @@ -25,6 +25,7 @@ #include "openPMD/IO/Access.hpp" #include "openPMD/IO/FlushParametersInternal.hpp" #include "openPMD/IO/HDF5/HDF5IOHandlerImpl.hpp" +#include "openPMD/auxiliary/Defer.hpp" #include "openPMD/auxiliary/Environment.hpp" #include "openPMD/auxiliary/JSON_internal.hpp" #include "openPMD/auxiliary/Variant.hpp" @@ -388,6 +389,8 @@ void HDF5IOHandlerImpl::createPath( "[HDF5] Creating a path in a file opened as read only is not " "possible."); + herr_t status = 0; + hid_t gapl = H5Pcreate(H5P_GROUP_ACCESS); #if H5_VERSION_GE(1, 10, 0) && openPMD_HAVE_MPI if (m_hdf5_collective_metadata) @@ -396,7 +399,15 @@ void HDF5IOHandlerImpl::createPath( } #endif - herr_t status; + auto defer_close_gapl = auxiliary::defer([&]() { + status = H5Pclose(gapl); + if (status != 0) + { + std::cerr << "[HDF5] Internal error: Failed to close HDF5 property " + "during path creation." + << std::endl; + } + }); if (!writable->written) { @@ -426,6 +437,20 @@ void HDF5IOHandlerImpl::createPath( /* Create the path in the file */ std::stack groups; groups.push(node_id); + auto defer_close_groups = auxiliary::defer([&]() { + while (!groups.empty()) + { + status = H5Gclose(groups.top()); + if (status != 0) + { + std::cerr + << "[HDF5] Internal error: Failed to close HDF5 group " + "during path creation." + << std::endl; + } + groups.pop(); + } + }); for (std::string const &folder : auxiliary::split(path, "/", false)) { // avoid creation of paths that already exist @@ -447,29 +472,12 @@ void HDF5IOHandlerImpl::createPath( groups.push(group_id); } - /* Close the groups */ - while (!groups.empty()) - { - status = H5Gclose(groups.top()); - VERIFY( - status == 0, - "[HDF5] Internal error: Failed to close HDF5 group during path " - "creation"); - groups.pop(); - } - writable->written = true; writable->abstractFilePosition = std::make_shared(path); m_fileNames[writable] = file.name; } - - status = H5Pclose(gapl); - VERIFY( - status == 0, - "[HDF5] Internal error: Failed to close HDF5 property during path " - "creation"); } namespace @@ -840,6 +848,7 @@ void HDF5IOHandlerImpl::createDataset( throw error::OperationUnsupportedInBackend( "HDF5", "No support for Datasets with undefined extent."); } + herr_t status = 0; if (!writable->written) { @@ -923,6 +932,27 @@ void HDF5IOHandlerImpl::createDataset( node_id >= 0, "[HDF5] Internal error: Failed to open HDF5 group during dataset " "creation"); + auto defer_close_node_id = + auxiliary::defer([&]() { + status = H5Gclose(node_id); + if (status != 0) + { + std::cerr + << "[HDF5] Internal error: Failed to close HDF5 group " + "during dataset creation." + << std::endl; + } + }); + auto defer_close_gapl = auxiliary::defer([&]() { + status = H5Pclose(gapl); + if (status != 0) + { + std::cerr + << "[HDF5] Internal error: Failed to close HDF5 property " + "during dataset creation." + << std::endl; + } + }); if (access::append(m_handler->m_backendAccess)) { @@ -938,7 +968,7 @@ void HDF5IOHandlerImpl::createDataset( // > should be able to detect and recycle the file space when no // > other reference to the deleted object exists // https://github.com/openPMD/openPMD-api/pull/1007#discussion_r867223316 - herr_t status = H5Ldelete(node_id, name.c_str(), H5P_DEFAULT); + status = H5Ldelete(node_id, name.c_str(), H5P_DEFAULT); VERIFY( status == 0, "[HDF5] Internal error: Failed to delete old dataset '" + @@ -964,9 +994,29 @@ void HDF5IOHandlerImpl::createDataset( space >= 0, "[HDF5] Internal error: Failed to create dataspace during dataset " "creation"); + auto defer_close_space = auxiliary::defer([&]() { + status = H5Sclose(space); + if (status != 0) + { + std::cerr + << "[HDF5] Internal error: Failed to close HDF5 dataset " + "space during dataset creation." + << std::endl; + } + }); /* enable chunking on the created dataspace */ hid_t datasetCreationProperty = H5Pcreate(H5P_DATASET_CREATE); + auto defer_close_datasetCreationProperty = auxiliary::defer([&]() { + status = H5Pclose(datasetCreationProperty); + if (status != 0) + { + std::cerr + << "[HDF5] Internal error: Failed to close HDF5 dataset " + "creation property during dataset creation." + << std::endl; + } + }); H5Pset_fill_time(datasetCreationProperty, H5D_FILL_TIME_NEVER); @@ -1003,7 +1053,7 @@ void HDF5IOHandlerImpl::createDataset( } else { - herr_t status = H5Pset_chunk( + status = H5Pset_chunk( datasetCreationProperty, chunking->size(), chunking->data()); @@ -1016,7 +1066,7 @@ void HDF5IOHandlerImpl::createDataset( for (auto const &filter : filters) { - herr_t status = std::visit( + status = std::visit( auxiliary::overloaded{ [&](DatasetParams::ByID const &by_id) { return H5Pset_filter( @@ -1050,6 +1100,16 @@ void HDF5IOHandlerImpl::createDataset( datatype >= 0, "[HDF5] Internal error: Failed to get HDF5 datatype during dataset " "creation"); + auto defer_close_datatype = auxiliary::defer([&]() { + status = H5Tclose(datatype); + if (status != 0) + { + std::cerr + << "[HDF5] Internal error: Failed to close HDF5 datatype " + "during dataset creation." + << std::endl; + } + }); hid_t group_id = H5Dcreate( node_id, name.c_str(), @@ -1062,38 +1122,16 @@ void HDF5IOHandlerImpl::createDataset( group_id >= 0, "[HDF5] Internal error: Failed to create HDF5 group during dataset " "creation"); - - herr_t status; - status = H5Dclose(group_id); - VERIFY( - status == 0, - "[HDF5] Internal error: Failed to close HDF5 dataset during " - "dataset creation"); - status = H5Tclose(datatype); - VERIFY( - status == 0, - "[HDF5] Internal error: Failed to close HDF5 datatype during " - "dataset creation"); - status = H5Pclose(datasetCreationProperty); - VERIFY( - status == 0, - "[HDF5] Internal error: Failed to close HDF5 dataset creation " - "property during dataset creation"); - status = H5Sclose(space); - VERIFY( - status == 0, - "[HDF5] Internal error: Failed to close HDF5 dataset space during " - "dataset creation"); - status = H5Gclose(node_id); - VERIFY( - status == 0, - "[HDF5] Internal error: Failed to close HDF5 group during dataset " - "creation"); - status = H5Pclose(gapl); - VERIFY( - status == 0, - "[HDF5] Internal error: Failed to close HDF5 property during " - "dataset creation"); + auto defer_close_group_id = auxiliary::defer([&]() { + status = H5Dclose(group_id); + if (status != 0) + { + std::cerr + << "[HDF5] Internal error: Failed to close HDF5 dataset " + "during dataset creation." + << std::endl; + } + }); writable->written = true; writable->abstractFilePosition = @@ -1134,6 +1172,16 @@ void HDF5IOHandlerImpl::extendDataset( dataset_id >= 0, "[HDF5] Internal error: Failed to open HDF5 dataset during dataset " "extension"); + herr_t status = 0; + auto defer_close_dataset_id = auxiliary::defer([&]() { + status = H5Dclose(dataset_id); + if (status != 0) + { + std::cerr << "[HDF5] Internal error: Failed to close HDF5 dataset " + "during dataset extension." + << std::endl; + } + }); // Datasets may only be extended if they have chunked layout, so let's see // whether this one does @@ -1160,18 +1208,11 @@ void HDF5IOHandlerImpl::extendDataset( for (auto const &val : parameters.extent) size.push_back(static_cast(val)); - herr_t status; status = H5Dset_extent(dataset_id, size.data()); VERIFY( status == 0, "[HDF5] Internal error: Failed to extend HDF5 dataset during dataset " "extension"); - - status = H5Dclose(dataset_id); - VERIFY( - status == 0, - "[HDF5] Internal error: Failed to close HDF5 dataset during dataset " - "extension"); } void HDF5IOHandlerImpl::availableChunks( @@ -1191,7 +1232,26 @@ void HDF5IOHandlerImpl::availableChunks( dataset_id >= 0, "[HDF5] Internal error: Failed to open HDF5 dataset during dataset " "read"); + herr_t status = 0; + auto defer_close_dataset_id = auxiliary::defer([&]() { + status = H5Dclose(dataset_id); + if (status != 0) + { + std::cerr << "[HDF5] Internal error: Failed to close HDF5 dataset " + "during availableChunks task." + << std::endl; + } + }); hid_t dataset_space = H5Dget_space(dataset_id); + auto defer_close_dataset_space = auxiliary::defer([&]() { + status = H5Sclose(dataset_space); + if (status != 0) + { + std::cerr << "[HDF5] Internal error: Failed to close HDF5 dataset " + "space during availableChunks task." + << std::endl; + } + }); int ndims = H5Sget_simple_extent_ndims(dataset_space); VERIFY( ndims >= 0, @@ -1229,19 +1289,6 @@ void HDF5IOHandlerImpl::availableChunks( extent.push_back(e); } parameters.chunks->emplace_back(std::move(offset), std::move(extent)); - - herr_t status; - status = H5Sclose(dataset_space); - VERIFY( - status == 0, - "[HDF5] Internal error: Failed to close HDF5 dataset space during " - "availableChunks task"); - - status = H5Dclose(dataset_id); - VERIFY( - status == 0, - "[HDF5] Internal error: Failed to close HDF5 dataset during " - "availableChunks task"); } void HDF5IOHandlerImpl::openFile( @@ -1329,6 +1376,16 @@ void HDF5IOHandlerImpl::openPath( H5Pset_all_coll_metadata_ops(gapl, true); } #endif + herr_t status = 0; + auto defer_close_gapl = auxiliary::defer([&]() { + status = H5Pclose(gapl); + if (status != 0) + { + std::cerr << "[HDF5] Internal error: Failed to close HDF5 property " + "during path opening." + << std::endl; + } + }); node_id = H5Gopen( file.id, concrete_h5_file_position(writable->parent).c_str(), gapl); @@ -1341,6 +1398,15 @@ void HDF5IOHandlerImpl::openPath( "[HDF5] Internal error: Failed to open HDF5 group during path " "opening"); } + auto defer_close_node_id = auxiliary::defer([&]() { + status = H5Gclose(node_id); + if (status != 0) + { + std::cerr << "[HDF5] Internal error: Failed to close HDF5 group " + "during path opening." + << std::endl; + } + }); /* Sanitize path */ std::string path = parameters.path; @@ -1360,40 +1426,17 @@ void HDF5IOHandlerImpl::openPath( "[HDF5] Internal error: Failed to open HDF5 group during path " "opening"); } - - herr_t status; - status = H5Gclose(path_id); - if (status != 0) - { - throw error::ReadError( - error::AffectedObject::Group, - error::Reason::Other, - "HDF5", - "[HDF5] Internal error: Failed to close HDF5 group during path " - "opening"); - } - } - - herr_t status; - status = H5Gclose(node_id); - if (status != 0) - { - throw error::ReadError( - error::AffectedObject::Group, - error::Reason::Other, - "HDF5", - "[HDF5] Internal error: Failed to close HDF5 group during path " - "opening"); - } - status = H5Pclose(gapl); - if (status != 0) - { - throw error::ReadError( - error::AffectedObject::Group, - error::Reason::Other, - "HDF5", - "[HDF5] Internal error: Failed to close HDF5 property during path " - "opening"); + auto defer_close_path_id = + auxiliary::defer([&]() { + status = H5Gclose(path_id); + if (status != 0) + { + std::cerr + << "[HDF5] Internal error: Failed to close HDF5 group " + "during path opening." + << std::endl; + } + }); } writable->written = true; @@ -1423,6 +1466,16 @@ void HDF5IOHandlerImpl::openDataset( H5Pset_all_coll_metadata_ops(gapl, true); } #endif + herr_t status = 0; + auto defer_close_gapl = auxiliary::defer([&]() { + status = H5Pclose(gapl); + if (status != 0) + { + std::cerr << "[HDF5] Internal error: Failed to close HDF5 property " + "during dataset opening." + << std::endl; + } + }); node_id = H5Gopen( file.id, concrete_h5_file_position(writable->parent).c_str(), gapl); @@ -1435,6 +1488,15 @@ void HDF5IOHandlerImpl::openDataset( "Internal error: Failed to open HDF5 group during dataset " "opening"); } + auto defer_close_node_id = auxiliary::defer([&]() { + status = H5Gclose(node_id); + if (status != 0) + { + std::cerr << "[HDF5] Internal error: Failed to close HDF5 group " + "during dataset opening." + << std::endl; + } + }); /* Sanitize name */ std::string name = parameters.name; @@ -1453,10 +1515,40 @@ void HDF5IOHandlerImpl::openDataset( "Internal error: Failed to open HDF5 dataset during dataset " "opening"); } + auto defer_close_dataset_id = auxiliary::defer([&]() { + status = H5Dclose(dataset_id); + if (status != 0) + { + std::cerr << "[HDF5] Internal error: Failed to close HDF5 dataset " + "during dataset opening." + << std::endl; + } + }); hid_t dataset_type, dataset_space; dataset_type = H5Dget_type(dataset_id); + auto defer_close_dataset_type = auxiliary::defer([&]() { + status = H5Tclose(dataset_type); + if (status != 0) + { + std::cerr + << "[HDF5] Internal error: Failed to close HDF5 dataset type " + "during dataset opening." + << std::endl; + } + }); + dataset_space = H5Dget_space(dataset_id); + auto defer_close_dataset_space = auxiliary::defer([&]() { + status = H5Sclose(dataset_space); + if (status != 0) + { + std::cerr + << "[HDF5] Internal error: Failed to close HDF5 dataset space " + "during dataset opening." + << std::endl; + } + }); H5S_class_t dataset_class = H5Sget_simple_extent_type(dataset_space); @@ -1534,36 +1626,37 @@ void HDF5IOHandlerImpl::openDataset( "HDF5", "Unknown dataset type"); }; + if (remaining_tries == 0) { throw_error(); } + hid_t next_type = H5Tget_super(dataset_type); if (next_type == H5I_INVALID_HID) { throw_error(); } - else if (H5Tequal(dataset_type, next_type)) - { - H5Tclose(next_type); - throw_error(); - } - else - { - if (H5Tclose(dataset_type) != 0) + + auto defer_close_next_type = auxiliary::defer([&]() { + status = H5Tclose(next_type); + if (status != 0) { - throw error::ReadError( - error::AffectedObject::Group, - error::Reason::Other, - "HDF5", - "Internal error: Failed to close HDF5 dataset type " - "during " - "dataset opening"); + std::cerr + << "[HDF5] Internal error: Failed to close HDF5 " + "dataset type during dataset opening." + << std::endl; } - dataset_type = next_type; - --remaining_tries; - repeat = true; + }); + + if (H5Tequal(dataset_type, next_type)) + { + throw_error(); } + + dataset_type = next_type; + --remaining_tries; + repeat = true; } } while (repeat); } @@ -1597,58 +1690,6 @@ void HDF5IOHandlerImpl::openDataset( *extent = e; } - herr_t status; - status = H5Sclose(dataset_space); - if (status != 0) - { - throw error::ReadError( - error::AffectedObject::Group, - error::Reason::Other, - "HDF5", - "Internal error: Failed to close HDF5 dataset space during " - "dataset opening"); - } - status = H5Tclose(dataset_type); - if (status != 0) - { - throw error::ReadError( - error::AffectedObject::Group, - error::Reason::Other, - "HDF5", - "Internal error: Failed to close HDF5 dataset type during " - "dataset opening"); - } - status = H5Dclose(dataset_id); - if (status != 0) - { - throw error::ReadError( - error::AffectedObject::Group, - error::Reason::Other, - "HDF5", - "Internal error: Failed to close HDF5 dataset during dataset " - "opening"); - } - status = H5Gclose(node_id); - if (status != 0) - { - throw error::ReadError( - error::AffectedObject::Group, - error::Reason::Other, - "HDF5", - "Internal error: Failed to close HDF5 group during dataset " - "opening"); - } - status = H5Pclose(gapl); - if (status != 0) - { - throw error::ReadError( - error::AffectedObject::Group, - error::Reason::Other, - "HDF5", - "Internal error: Failed to close HDF5 property during dataset " - "opening"); - } - writable->written = true; writable->abstractFilePosition = std::make_shared(name); @@ -1721,20 +1762,26 @@ void HDF5IOHandlerImpl::deletePath( node_id >= 0, "[HDF5] Internal error: Failed to open HDF5 group during path " "deletion"); + herr_t status = 0; + auto defer_close_node_id = + auxiliary::defer([&]() { + status = H5Gclose(node_id); + if (status != 0) + { + std::cerr + << "[HDF5] Internal error: Failed to close HDF5 group " + "during path deletion." + << std::endl; + } + }); path += static_cast( writable->abstractFilePosition.get()) ->location; - herr_t status = H5Ldelete(node_id, path.c_str(), H5P_DEFAULT); + status = H5Ldelete(node_id, path.c_str(), H5P_DEFAULT); VERIFY( status == 0, "[HDF5] Internal error: Failed to delete HDF5 group"); - status = H5Gclose(node_id); - VERIFY( - status == 0, - "[HDF5] Internal error: Failed to close HDF5 group during path " - "deletion"); - writable->written = false; writable->abstractFilePosition.reset(); @@ -1773,20 +1820,26 @@ void HDF5IOHandlerImpl::deleteDataset( node_id >= 0, "[HDF5] Internal error: Failed to open HDF5 group during dataset " "deletion"); + herr_t status = 0; + auto defer_close_node_id = + auxiliary::defer([&]() { + status = H5Gclose(node_id); + if (status != 0) + { + std::cerr + << "[HDF5] Internal error: Failed to close HDF5 group " + "during dataset deletion." + << std::endl; + } + }); name += static_cast( writable->abstractFilePosition.get()) ->location; - herr_t status = H5Ldelete(node_id, name.c_str(), H5P_DEFAULT); + status = H5Ldelete(node_id, name.c_str(), H5P_DEFAULT); VERIFY( status == 0, "[HDF5] Internal error: Failed to delete HDF5 group"); - status = H5Gclose(node_id); - VERIFY( - status == 0, - "[HDF5] Internal error: Failed to close HDF5 group during dataset " - "deletion"); - writable->written = false; writable->abstractFilePosition.reset(); @@ -1815,17 +1868,23 @@ void HDF5IOHandlerImpl::deleteAttribute( node_id >= 0, "[HDF5] Internal error: Failed to open HDF5 group during attribute " "deletion"); + herr_t status = 0; + auto defer_close_node_id = + auxiliary::defer([&]() { + status = H5Oclose(node_id); + if (status != 0) + { + std::cerr + << "[HDF5] Internal error: Failed to close HDF5 group " + "during attribute deletion." + << std::endl; + } + }); - herr_t status = H5Adelete(node_id, name.c_str()); + status = H5Adelete(node_id, name.c_str()); VERIFY( status == 0, "[HDF5] Internal error: Failed to delete HDF5 attribute"); - - status = H5Oclose(node_id); - VERIFY( - status == 0, - "[HDF5] Internal error: Failed to close HDF5 group during " - "attribute deletion"); } } @@ -1837,20 +1896,46 @@ void HDF5IOHandlerImpl::writeDataset( "[HDF5] Writing into a dataset in a file opened as read only is " "not possible."); + if (parameters.memorySelection.has_value()) + { + throw error::OperationUnsupportedInBackend( + "HDF5", + "Non-contiguous memory selections not supported in HDF5 backend."); + } File file = requireFile("writeDataset", writable, /* checkParent = */ true); - hid_t dataset_id, filespace, memspace; herr_t status; + hid_t dataset_id, filespace, memspace; dataset_id = H5Dopen( file.id, concrete_h5_file_position(writable).c_str(), H5P_DEFAULT); VERIFY( dataset_id >= 0, "[HDF5] Internal error: Failed to open HDF5 dataset during dataset " "write"); + auto defer_close_dataset = auxiliary::defer([&]() { + status = H5Dclose(dataset_id); + if (status != 0) + { + std::cerr << "[HDF5] Internal error: Failed to close dataset " + + concrete_h5_file_position(writable) + + " during dataset write" + << std::endl; + } + }); filespace = H5Dget_space(dataset_id); + auto defer_close_filespace = auxiliary::defer([&]() { + status = H5Sclose(filespace); + if (status != 0) + { + std::cerr << "[HDF5] Internal error: Failed to close dataset file " + "space during dataset write" + << std::endl; + } + }); int ndims = H5Sget_simple_extent_ndims(filespace); + auxiliary::opaque_defer_type defer_close_memspace; if (ndims == 0) { if (parameters.offset != Offset{0} || parameters.extent != Extent{1}) @@ -1871,6 +1956,15 @@ void HDF5IOHandlerImpl::writeDataset( memspace > 0, "[HDF5] Internal error: Failed to create memspace during dataset " "write"); + defer_close_memspace = auxiliary::defer([&]() { + status = H5Sclose(memspace); // + if (status != 0) + { + std::cerr << "[HDF5] Internal error: Failed to close " + "dataset memory space during dataset write" + << std::endl; + } + }); } else { @@ -1884,6 +1978,15 @@ void HDF5IOHandlerImpl::writeDataset( block.push_back(static_cast(val)); memspace = H5Screate_simple( static_cast(block.size()), block.data(), nullptr); + defer_close_memspace = auxiliary::defer([&]() { + status = H5Sclose(memspace); // + if (status != 0) + { + std::cerr << "[HDF5] Internal error: Failed to close " + "dataset memory space during dataset write" + << std::endl; + } + }); status = H5Sselect_hyperslab( filespace, H5S_SELECT_SET, @@ -1914,6 +2017,15 @@ void HDF5IOHandlerImpl::writeDataset( dataType >= 0, "[HDF5] Internal error: Failed to get HDF5 datatype during dataset " "write"); + auto defer_close_dataType = auxiliary::defer([&]() { + status = H5Tclose(dataType); + if (status != 0) + { + std::cerr << "[HDF5] Internal error: Failed to close dataset " + "datatype during dataset write." + << std::endl; + } + }); switch (a.dtype) { using DT = Datatype; @@ -1952,26 +2064,6 @@ void HDF5IOHandlerImpl::writeDataset( default: throw std::runtime_error("[HDF5] Datatype not implemented in HDF5 IO"); } - status = H5Tclose(dataType); - VERIFY( - status == 0, - "[HDF5] Internal error: Failed to close dataset datatype during " - "dataset write"); - status = H5Sclose(filespace); - VERIFY( - status == 0, - "[HDF5] Internal error: Failed to close dataset file space during " - "dataset write"); - status = H5Sclose(memspace); - VERIFY( - status == 0, - "[HDF5] Internal error: Failed to close dataset memory space during " - "dataset write"); - status = H5Dclose(dataset_id); - VERIFY( - status == 0, - "[HDF5] Internal error: Failed to close dataset " + - concrete_h5_file_position(writable) + " during dataset write"); m_fileNames[writable] = file.name; } @@ -1993,6 +2085,7 @@ void HDF5IOHandlerImpl::writeAttribute( auto res = getFile(writable); File file = res ? res.value() : getFile(writable->parent).value(); hid_t node_id, attribute_id; + herr_t status = 0; hid_t fapl = H5Pcreate(H5P_LINK_ACCESS); #if H5_VERSION_GE(1, 10, 0) && openPMD_HAVE_MPI @@ -2001,6 +2094,15 @@ void HDF5IOHandlerImpl::writeAttribute( H5Pset_all_coll_metadata_ops(fapl, true); } #endif + auto defer_close_fapl = auxiliary::defer([&]() { + status = H5Pclose(fapl); + if (status != 0) + { + std::cerr << "[HDF5] Internal error: Failed to close HDF5 " + "property during attribute write." + << std::endl; + } + }); node_id = H5Oopen(file.id, concrete_h5_file_position(writable).c_str(), fapl); @@ -2008,9 +2110,18 @@ void HDF5IOHandlerImpl::writeAttribute( node_id >= 0, "[HDF5] Internal error: Failed to open HDF5 object during attribute " "write"); + auto defer_close_node_id = auxiliary::defer([&]() { + status = H5Oclose(node_id); + if (status != 0) + { + std::cerr << "[HDF5] Internal error: Failed to close " + + concrete_h5_file_position(writable) + + " during attribute write." + << std::endl; + } + }); Attribute const att(Attribute::from_any, parameters.m_resource); Datatype dtype = parameters.dtype; - herr_t status; GetH5DataType getH5DataType({ {typeid(bool).name(), m_H5T_BOOL_ENUM}, {typeid(std::complex).name(), m_H5T_CFLOAT}, @@ -2022,6 +2133,15 @@ void HDF5IOHandlerImpl::writeAttribute( dataType >= 0, "[HDF5] Internal error: Failed to get HDF5 datatype during attribute " "write"); + auto defer_close_dataType = auxiliary::defer([&]() { + status = H5Tclose(dataType); + if (status != 0) + { + std::cerr << "[HDF5] Internal error: Failed to close HDF5 datatype " + "during attribute write." + << std::endl; + } + }); std::string name = parameters.name; auto create_attribute_anew = [&]() { hid_t dataspace = getH5DataSpace(att); @@ -2029,6 +2149,15 @@ void HDF5IOHandlerImpl::writeAttribute( dataspace >= 0, "[HDF5] Internal error: Failed to get HDF5 dataspace during " "attribute write"); + auto defer_close_dataspace = auxiliary::defer([&]() { + status = H5Sclose(dataspace); + if (status != 0) + { + std::cerr << "[HDF5] Internal error: Failed to close HDF5 " + "dataspace during attribute write." + << std::endl; + } + }); attribute_id = H5Acreate( node_id, name.c_str(), @@ -2040,11 +2169,6 @@ void HDF5IOHandlerImpl::writeAttribute( node_id >= 0, "[HDF5] Internal error: Failed to create HDF5 attribute during " "attribute write"); - status = H5Sclose(dataspace); - VERIFY( - status == 0, - "[HDF5] Internal error: Failed to close HDF5 dataspace during " - "attribute write"); }; if (H5Aexists(node_id, name.c_str()) != 0) { @@ -2066,7 +2190,7 @@ void HDF5IOHandlerImpl::writeAttribute( equal >= 0, "[HDF5] Internal error: Failed to compare HDF5 attribute types " "during attribute write"); - if (equal == 0) // unequal + if (equal == 0) // unequal - need to delete and recreate { status = H5Aclose(attribute_id); VERIFY( @@ -2087,6 +2211,16 @@ void HDF5IOHandlerImpl::writeAttribute( { create_attribute_anew(); } + auto defer_close_attribute_id = auxiliary::defer([&]() { + status = H5Aclose(attribute_id); + if (status != 0) + { + std::cerr << "[HDF5] Internal error: Failed to close attribute " + + name + " at " + concrete_h5_file_position(writable) + + " during attribute write." + << std::endl; + } + }); using DT = Datatype; switch (dtype) @@ -2293,28 +2427,6 @@ void HDF5IOHandlerImpl::writeAttribute( "[HDF5] Internal error: Failed to write attribute " + name + " at " + concrete_h5_file_position(writable)); - status = H5Tclose(dataType); - VERIFY( - status == 0, - "[HDF5] Internal error: Failed to close HDF5 datatype during Attribute " - "write"); - - status = H5Aclose(attribute_id); - VERIFY( - status == 0, - "[HDF5] Internal error: Failed to close attribute " + name + " at " + - concrete_h5_file_position(writable) + " during attribute write"); - status = H5Oclose(node_id); - VERIFY( - status == 0, - "[HDF5] Internal error: Failed to close " + - concrete_h5_file_position(writable) + " during attribute write"); - status = H5Pclose(fapl); - VERIFY( - status == 0, - "[HDF5] Internal error: Failed to close HDF5 property during attribute " - "write"); - m_fileNames[writable] = file.name; } @@ -2330,8 +2442,28 @@ void HDF5IOHandlerImpl::readDataset( dataset_id >= 0, "[HDF5] Internal error: Failed to open HDF5 dataset during dataset " "read"); + auto defer_close_dataset_id = auxiliary::defer([&]() { + status = H5Dclose(dataset_id); + if (status != 0) + { + std::cerr + << "[HDF5] Internal error: Failed to close dataset during " + "dataset read." + << std::endl; + } + }); filespace = H5Dget_space(dataset_id); + auto defer_close_filespace = auxiliary::defer([&]() { + status = H5Sclose(filespace); + if (status != 0) + { + std::cerr + << "[HDF5] Internal error: Failed to close dataset file space " + "during dataset read." + << std::endl; + } + }); int ndims = H5Sget_simple_extent_ndims(filespace); if (ndims == 0) @@ -2379,6 +2511,16 @@ void HDF5IOHandlerImpl::readDataset( "[HDF5] Internal error: Failed to select hyperslab during dataset " "read"); } + auto defer_close_memspace = auxiliary::defer([&]() { + status = H5Sclose(memspace); + if (status != 0) + { + std::cerr + << "[HDF5] Internal error: Failed to close dataset memory " + "space during dataset read." + << std::endl; + } + }); void *data = parameters.data.get(); @@ -2419,6 +2561,16 @@ void HDF5IOHandlerImpl::readDataset( {typeid(std::complex).name(), m_H5T_CLONG_DOUBLE}, }); hid_t dataType = getH5DataType(a); + auto defer_close_dataType = auxiliary::defer([&]() { + status = H5Tclose(dataType); + if (status != 0) + { + std::cerr + << "[HDF5] Internal error: Failed to close dataset datatype " + "during dataset read." + << std::endl; + } + }); if (H5Tequal(dataType, H5T_NATIVE_LDOUBLE)) { // We have previously determined in openDataset() that this dataset is @@ -2427,29 +2579,37 @@ void HDF5IOHandlerImpl::readDataset( // the worked-around m_H5T_LONG_DOUBLE_80_LE. // Check this. hid_t checkDatasetTypeAgain = H5Dget_type(dataset_id); + auto defer_close_checkDatasetTypeAgain = auxiliary::defer([&]() { + status = H5Tclose(checkDatasetTypeAgain); + if (status != 0) + { + std::cerr << "[HDF5] Internal error: Failed to close HDF5 " + "dataset type during dataset reading." + << std::endl; + } + }); if (!H5Tequal(checkDatasetTypeAgain, H5T_NATIVE_LDOUBLE)) { dataType = m_H5T_LONG_DOUBLE_80_LE; } - status = H5Tclose(checkDatasetTypeAgain); - VERIFY( - status == 0, - "[HDF5] Internal error: Failed to close HDF5 dataset type during " - "dataset reading"); } else if (H5Tequal(dataType, m_H5T_CLONG_DOUBLE)) { // Same deal for m_H5T_CLONG_DOUBLE hid_t checkDatasetTypeAgain = H5Dget_type(dataset_id); + auto defer_close_checkDatasetTypeAgain = auxiliary::defer([&]() { + status = H5Tclose(checkDatasetTypeAgain); + if (status != 0) + { + std::cerr << "[HDF5] Internal error: Failed to close HDF5 " + "dataset type during dataset reading." + << std::endl; + } + }); if (!H5Tequal(checkDatasetTypeAgain, m_H5T_CLONG_DOUBLE)) { dataType = m_H5T_CLONG_DOUBLE_80_LE; } - status = H5Tclose(checkDatasetTypeAgain); - VERIFY( - status == 0, - "[HDF5] Internal error: Failed to close HDF5 dataset type during " - "dataset reading"); } VERIFY( dataType >= 0, @@ -2463,26 +2623,6 @@ void HDF5IOHandlerImpl::readDataset( m_datasetTransferProperty, data); VERIFY(status == 0, "[HDF5] Internal error: Failed to read dataset"); - - status = H5Tclose(dataType); - VERIFY( - status == 0, - "[HDF5] Internal error: Failed to close dataset datatype during " - "dataset read"); - status = H5Sclose(filespace); - VERIFY( - status == 0, - "[HDF5] Internal error: Failed to close dataset file space during " - "dataset read"); - status = H5Sclose(memspace); - VERIFY( - status == 0, - "[HDF5] Internal error: Failed to close dataset memory space during " - "dataset read"); - status = H5Dclose(dataset_id); - VERIFY( - status == 0, - "[HDF5] Internal error: Failed to close dataset during dataset read"); } void HDF5IOHandlerImpl::readAttribute( @@ -2506,6 +2646,15 @@ void HDF5IOHandlerImpl::readAttribute( H5Pset_all_coll_metadata_ops(fapl, true); } #endif + auto defer_close_fapl = auxiliary::defer([&]() { + status = H5Pclose(fapl); + if (status != 0) + { + std::cerr << "[HDF5] Internal error: Failed to close HDF5 " + "attribute during attribute read." + << std::endl; + } + }); obj_id = H5Oopen(file.id, concrete_h5_file_position(writable).c_str(), fapl); @@ -2519,7 +2668,26 @@ void HDF5IOHandlerImpl::readAttribute( concrete_h5_file_position(writable).c_str() + "' during attribute read"); } + auto defer_close_obj_id = auxiliary::defer([&]() { + status = H5Oclose(obj_id); + if (status != 0) + { + std::cerr << "[HDF5] Internal error: Failed to close " + + concrete_h5_file_position(writable) + + " during attribute read." + << std::endl; + ; + } + }); std::string const &attr_name = parameters.name; + if (H5Aexists(obj_id, attr_name.c_str()) <= 0) + { + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::NotFound, + "HDF5", + parameters.name); + } attr_id = H5Aopen(obj_id, attr_name.c_str(), H5P_DEFAULT); if (attr_id < 0) { @@ -2533,10 +2701,38 @@ void HDF5IOHandlerImpl::readAttribute( concrete_h5_file_position(writable).c_str() + ") during attribute read"); } + auto defer_close_attr_id = auxiliary::defer([&]() { + status = H5Aclose(attr_id); + if (status != 0) + { + std::cerr << "[HDF5] Internal error: Failed to close attribute " + + attr_name + " at " + concrete_h5_file_position(writable) + + " during attribute read." + << std::endl; + } + }); hid_t attr_type, attr_space; attr_type = H5Aget_type(attr_id); + auto defer_close_attr_type = auxiliary::defer([&]() { + status = H5Tclose(attr_type); + if (status != 0) + { + std::cerr << "[HDF5] Internal error: Failed to close attribute " + "file space during attribute read." + << std::endl; + } + }); attr_space = H5Aget_space(attr_id); + auto defer_close_attr_space = auxiliary::defer([&]() { + status = H5Sclose(attr_space); + if (status != 0) + { + std::cerr << "[HDF5] Internal error: Failed to close attribute " + "datatype during attribute read." + << std::endl; + } + }); int ndims = H5Sget_simple_extent_ndims(attr_space); std::vector dims(ndims, 0); @@ -3065,63 +3261,9 @@ void HDF5IOHandlerImpl::readAttribute( " at " + concrete_h5_file_position(writable)); } - status = H5Tclose(attr_type); - if (status != 0) - { - throw error::ReadError( - error::AffectedObject::Attribute, - error::Reason::CannotRead, - "HDF5", - "[HDF5] Internal error: Failed to close attribute datatype during " - "attribute read"); - } - status = H5Sclose(attr_space); - if (status != 0) - { - throw error::ReadError( - error::AffectedObject::Attribute, - error::Reason::CannotRead, - "HDF5", - "[HDF5] Internal error: Failed to close attribute file space " - "during " - "attribute read"); - } - auto dtype = parameters.dtype; *dtype = a.dtype; *parameters.m_resource = a.getAny(); - - status = H5Aclose(attr_id); - if (status != 0) - { - throw error::ReadError( - error::AffectedObject::Attribute, - error::Reason::CannotRead, - "HDF5", - "[HDF5] Internal error: Failed to close attribute " + attr_name + - " at " + concrete_h5_file_position(writable) + - " during attribute read"); - } - status = H5Oclose(obj_id); - if (status != 0) - { - throw error::ReadError( - error::AffectedObject::Attribute, - error::Reason::CannotRead, - "HDF5", - "[HDF5] Internal error: Failed to close " + - concrete_h5_file_position(writable) + " during attribute read"); - } - status = H5Pclose(fapl); - if (status != 0) - { - throw error::ReadError( - error::AffectedObject::Attribute, - error::Reason::CannotRead, - "HDF5", - "[HDF5] Internal error: Failed to close HDF5 attribute during " - "attribute read"); - } } void HDF5IOHandlerImpl::listPaths( @@ -3133,6 +3275,7 @@ void HDF5IOHandlerImpl::listPaths( "listing"); File file = requireFile("listPaths", writable, /* checkParent = */ true); + herr_t status = 0; hid_t gapl = H5Pcreate(H5P_GROUP_ACCESS); #if H5_VERSION_GE(1, 10, 0) && openPMD_HAVE_MPI @@ -3141,15 +3284,34 @@ void HDF5IOHandlerImpl::listPaths( H5Pset_all_coll_metadata_ops(gapl, true); } #endif + auto defer_close_gapl = auxiliary::defer([&]() { + status = H5Pclose(gapl); + if (status != 0) + { + std::cerr << "[HDF5] Internal error: Failed to close HDF5 property " + "during path listing." + << std::endl; + } + }); hid_t node_id = H5Gopen(file.id, concrete_h5_file_position(writable).c_str(), gapl); VERIFY( node_id >= 0, "[HDF5] Internal error: Failed to open HDF5 group during path listing"); + auto defer_close_node_id = auxiliary::defer([&]() { + status = H5Gclose(node_id); + if (status != 0) + { + std::cerr << "[HDF5] Internal error: Failed to close HDF5 group " + + concrete_h5_file_position(writable) + + " during path listing." + << std::endl; + } + }); H5G_info_t group_info; - herr_t status = H5Gget_info(node_id, &group_info); + status = H5Gget_info(node_id, &group_info); VERIFY( status == 0, "[HDF5] Internal error: Failed to get HDF5 group info for " + @@ -3166,17 +3328,6 @@ void HDF5IOHandlerImpl::listPaths( paths->emplace_back(name.data(), name_length); } } - - status = H5Gclose(node_id); - VERIFY( - status == 0, - "[HDF5] Internal error: Failed to close HDF5 group " + - concrete_h5_file_position(writable) + " during path listing"); - status = H5Pclose(gapl); - VERIFY( - status == 0, - "[HDF5] Internal error: Failed to close HDF5 property during path " - "listing"); } void HDF5IOHandlerImpl::listDatasets( @@ -3188,6 +3339,7 @@ void HDF5IOHandlerImpl::listDatasets( "listing"); File file = requireFile("listDatasets", writable, /* checkParent = */ true); + herr_t status = 0; hid_t gapl = H5Pcreate(H5P_GROUP_ACCESS); #if H5_VERSION_GE(1, 10, 0) && openPMD_HAVE_MPI @@ -3196,6 +3348,15 @@ void HDF5IOHandlerImpl::listDatasets( H5Pset_all_coll_metadata_ops(gapl, true); } #endif + auto defer_close_gapl = auxiliary::defer([&]() { + status = H5Pclose(gapl); + if (status != 0) + { + std::cerr << "[HDF5] Internal error: Failed to close HDF5 property " + "during dataset listing." + << std::endl; + } + }); hid_t node_id = H5Gopen(file.id, concrete_h5_file_position(writable).c_str(), gapl); @@ -3203,9 +3364,19 @@ void HDF5IOHandlerImpl::listDatasets( node_id >= 0, "[HDF5] Internal error: Failed to open HDF5 group during dataset " "listing"); + auto defer_close_node_id = auxiliary::defer([&]() { + status = H5Gclose(node_id); + if (status != 0) + { + std::cerr << "[HDF5] Internal error: Failed to close HDF5 group " + + concrete_h5_file_position(writable) + + " during dataset listing." + << std::endl; + } + }); H5G_info_t group_info; - herr_t status = H5Gget_info(node_id, &group_info); + status = H5Gget_info(node_id, &group_info); VERIFY( status == 0, "[HDF5] Internal error: Failed to get HDF5 group info for " + @@ -3222,17 +3393,6 @@ void HDF5IOHandlerImpl::listDatasets( datasets->emplace_back(name.data(), name_length); } } - - status = H5Gclose(node_id); - VERIFY( - status == 0, - "[HDF5] Internal error: Failed to close HDF5 group " + - concrete_h5_file_position(writable) + " during dataset listing"); - status = H5Pclose(gapl); - VERIFY( - status == 0, - "[HDF5] Internal error: Failed to close HDF5 property during dataset " - "listing"); } void HDF5IOHandlerImpl::listAttributes( @@ -3246,6 +3406,7 @@ void HDF5IOHandlerImpl::listAttributes( File file = requireFile("listAttributes", writable, /* checkParent = */ true); hid_t node_id; + herr_t status = 0; hid_t fapl = H5Pcreate(H5P_LINK_ACCESS); #if H5_VERSION_GE(1, 10, 0) && openPMD_HAVE_MPI @@ -3254,6 +3415,15 @@ void HDF5IOHandlerImpl::listAttributes( H5Pset_all_coll_metadata_ops(fapl, true); } #endif + auto defer_close_fapl = auxiliary::defer([&]() { + status = H5Pclose(fapl); + if (status != 0) + { + std::cerr << "[HDF5] Internal error: Failed to close HDF5 property " + "during attribute listing." + << std::endl; + } + }); node_id = H5Oopen(file.id, concrete_h5_file_position(writable).c_str(), fapl); @@ -3261,8 +3431,16 @@ void HDF5IOHandlerImpl::listAttributes( node_id >= 0, "[HDF5] Internal error: Failed to open HDF5 group during attribute " "listing"); - - herr_t status; + auto defer_close_node_id = auxiliary::defer([&]() { + status = H5Oclose(node_id); + if (status != 0) + { + std::cerr << "[HDF5] Internal error: Failed to close HDF5 object " + + concrete_h5_file_position(writable) + + " during attribute listing." + << std::endl; + } + }); #if H5_VERSION_GE(1, 12, 0) H5O_info2_t object_info; status = H5Oget_info3(node_id, &object_info, H5O_INFO_NUM_ATTRS); @@ -3299,17 +3477,6 @@ void HDF5IOHandlerImpl::listAttributes( H5P_DEFAULT); attributes->emplace_back(name.data(), name_length); } - - status = H5Oclose(node_id); - VERIFY( - status == 0, - "[HDF5] Internal error: Failed to close HDF5 object during attribute " - "listing"); - status = H5Pclose(fapl); - VERIFY( - status == 0, - "[HDF5] Internal error: Failed to close HDF5 property during dataset " - "listing"); } void HDF5IOHandlerImpl::deregister( @@ -3425,7 +3592,7 @@ HDF5IOHandler::HDF5IOHandler( HDF5IOHandler::~HDF5IOHandler() = default; -std::future HDF5IOHandler::flush(internal::ParsedFlushParams ¶ms) +std::future HDF5IOHandler::flush_impl(internal::ParsedFlushParams ¶ms) { return m_impl->flush(params); } @@ -3444,7 +3611,7 @@ HDF5IOHandler::HDF5IOHandler( HDF5IOHandler::~HDF5IOHandler() = default; -std::future HDF5IOHandler::flush(internal::ParsedFlushParams &) +std::future HDF5IOHandler::flush_impl(internal::ParsedFlushParams &) { return std::future(); } diff --git a/src/IO/HDF5/ParallelHDF5IOHandler.cpp b/src/IO/HDF5/ParallelHDF5IOHandler.cpp index 7de4960feb..0cf8b396a3 100644 --- a/src/IO/HDF5/ParallelHDF5IOHandler.cpp +++ b/src/IO/HDF5/ParallelHDF5IOHandler.cpp @@ -76,7 +76,7 @@ ParallelHDF5IOHandler::ParallelHDF5IOHandler( ParallelHDF5IOHandler::~ParallelHDF5IOHandler() = default; std::future -ParallelHDF5IOHandler::flush(internal::ParsedFlushParams ¶ms) +ParallelHDF5IOHandler::flush_impl(internal::ParsedFlushParams ¶ms) { if (auto hdf5_config_it = params.backendConfig.json().find("hdf5"); hdf5_config_it != params.backendConfig.json().end()) @@ -462,7 +462,8 @@ ParallelHDF5IOHandler::ParallelHDF5IOHandler( ParallelHDF5IOHandler::~ParallelHDF5IOHandler() = default; -std::future ParallelHDF5IOHandler::flush(internal::ParsedFlushParams &) +std::future +ParallelHDF5IOHandler::flush_impl(internal::ParsedFlushParams &) { return std::future(); } diff --git a/src/IO/InvalidatableFile.cpp b/src/IO/InvalidatableFile.cpp index cb6930da2b..c4a5d2c69f 100644 --- a/src/IO/InvalidatableFile.cpp +++ b/src/IO/InvalidatableFile.cpp @@ -80,3 +80,9 @@ std::hash::operator()( return std::hash>{}( s.fileState); } +auto std::less::operator()( + first_argument_type const &first, second_argument_type const &second) const + -> result_type +{ + return less<>()(*first, *second); +} diff --git a/src/IO/JSON/JSONIOHandler.cpp b/src/IO/JSON/JSONIOHandler.cpp index 03cef438d3..0e167b801b 100644 --- a/src/IO/JSON/JSONIOHandler.cpp +++ b/src/IO/JSON/JSONIOHandler.cpp @@ -32,7 +32,8 @@ JSONIOHandler::JSONIOHandler( openPMD::json::TracingJSON jsonCfg, JSONIOHandlerImpl::FileFormat format, std::string originalExtension) - : AbstractIOHandler{std::move(initialize_from), std::move(path), at, std::move(jsonCfg)} + : AbstractIOHandler{ + std::move(initialize_from), std::move(path), at, std::move(jsonCfg)} , m_impl{this, format, std::move(originalExtension)} {} @@ -45,13 +46,14 @@ JSONIOHandler::JSONIOHandler( openPMD::json::TracingJSON jsonCfg, JSONIOHandlerImpl::FileFormat format, std::string originalExtension) - : AbstractIOHandler{std::move(initialize_from), std::move(path), at, std::move(jsonCfg)} + : AbstractIOHandler{ + std::move(initialize_from), std::move(path), at, std::move(jsonCfg)} , m_impl{ JSONIOHandlerImpl{this, comm, format, std::move(originalExtension)}} {} #endif -std::future JSONIOHandler::flush(internal::ParsedFlushParams &) +std::future JSONIOHandler::flush_impl(internal::ParsedFlushParams &) { return m_impl.flush(); } diff --git a/src/IO/JSON/JSONIOHandlerImpl.cpp b/src/IO/JSON/JSONIOHandlerImpl.cpp index 59541c1e30..cc36a2edb3 100644 --- a/src/IO/JSON/JSONIOHandlerImpl.cpp +++ b/src/IO/JSON/JSONIOHandlerImpl.cpp @@ -1146,6 +1146,13 @@ void JSONIOHandlerImpl::writeDataset( access::write(m_handler->m_backendAccess), "[JSON] Cannot write data in read-only mode."); + if (parameters.memorySelection.has_value()) + { + throw error::OperationUnsupportedInBackend( + "JSON", + "Non-contiguous memory selections not supported in JSON backend."); + } + auto pos = setAndGetFilePosition(writable); auto file = refreshFileFromParent(writable); auto &j = obtainJsonContents(writable); @@ -2332,7 +2339,7 @@ auto JSONIOHandlerImpl::verifyDataset( } Datatype dt = stringToDatatype(j["datatype"].get()); VERIFY_ALWAYS( - dt == parameters.dtype, + isSame(dt, parameters.dtype), "[JSON] Read/Write request does not fit the dataset's type"); } catch (json::basic_json::type_error &) diff --git a/src/Iteration.cpp b/src/Iteration.cpp index ab35c1e3ae..085495db93 100644 --- a/src/Iteration.cpp +++ b/src/Iteration.cpp @@ -59,6 +59,16 @@ Iteration::Iteration() : Attributable(NoInit()) particles.writable().ownKeyWithinParent = "particles"; } +uint64_t Iteration::getCachedIterationIndex() const +{ + auto idx = get().m_iterationIndex; + if (!idx.has_value()) + { + throw error::Internal("Iteration index not known."); + } + return *idx; +} + template Iteration &Iteration::setTime(T newTime) { @@ -246,7 +256,7 @@ void Iteration::flushFileBased( * If it was written before, then in the context of another iteration. */ auto &attr = s.get().m_rankTable.m_attributable; - attr.setWritten(false, Attributable::EnqueueAsynchronously::Yes); + attr.setWritten(false, Attributable::EnqueueAsynchronously::Both); s.get() .m_rankTable.m_attributable.get() .m_writable.abstractFilePosition.reset(); @@ -513,9 +523,10 @@ void Iteration::read_impl(std::string const &groupPath) setDt(Attribute(Attribute::from_any, *aRead.m_resource) .get()); // conversion cast if a backend reports an integer type - else if (auto val = Attribute(Attribute::from_any, *aRead.m_resource) - .getOptional(); - val.has_value()) + else if ( + auto val = Attribute(Attribute::from_any, *aRead.m_resource) + .getOptional(); + val.has_value()) setDt(val.value()); else throw error::ReadError( @@ -539,9 +550,10 @@ void Iteration::read_impl(std::string const &groupPath) setTime(Attribute(Attribute::from_any, *aRead.m_resource) .get()); // conversion cast if a backend reports an integer type - else if (auto val = Attribute(Attribute::from_any, *aRead.m_resource) - .getOptional(); - val.has_value()) + else if ( + auto val = Attribute(Attribute::from_any, *aRead.m_resource) + .getOptional(); + val.has_value()) setTime(val.value()); else throw error::ReadError( @@ -841,7 +853,7 @@ auto Iteration::beginStep( { bool previous = series.iterations.written(); series.iterations.setWritten( - false, Attributable::EnqueueAsynchronously::Yes); + false, Attributable::EnqueueAsynchronously::Both); auto oldStatus = IOHandl->m_seriesStatus; IOHandl->m_seriesStatus = internal::SeriesStatus::Parsing; try @@ -858,7 +870,7 @@ auto Iteration::beginStep( } IOHandl->m_seriesStatus = oldStatus; series.iterations.setWritten( - previous, Attributable::EnqueueAsynchronously::Yes); + previous, Attributable::EnqueueAsynchronously::Both); } else if (thisObject.has_value()) { diff --git a/src/LoadStoreChunk.cpp b/src/LoadStoreChunk.cpp new file mode 100644 index 0000000000..6a8c526be6 --- /dev/null +++ b/src/LoadStoreChunk.cpp @@ -0,0 +1,398 @@ + + +#include "openPMD/LoadStoreChunk.hpp" +#include "openPMD/Datatype.hpp" +#include "openPMD/Error.hpp" +#include "openPMD/RecordComponent.hpp" +#include "openPMD/Span.hpp" +#include "openPMD/auxiliary/Future.hpp" +#include "openPMD/auxiliary/Memory.hpp" +#include "openPMD/auxiliary/Memory_internal.hpp" +#include "openPMD/auxiliary/ShareRawInternal.hpp" +#include "openPMD/auxiliary/UniquePtr.hpp" + +// comment to keep clang-format from reordering +#include "openPMD/DatatypeMacros.hpp" +#include "openPMD/backend/Attributable.hpp" + +#include +#include +#include + +namespace openPMD +{ +namespace +{ + template + auto asWriteBuffer(std::shared_ptr &&ptr) -> auxiliary::WriteBuffer + { + /* std::static_pointer_cast correctly reference-counts the pointer */ + return auxiliary::WriteBuffer( + std::static_pointer_cast(std::move(ptr))); + } + template + auto asWriteBuffer(UniquePtrWithLambda &&ptr) -> auxiliary::WriteBuffer + { + return auxiliary::WriteBuffer( + std::move(ptr).template static_cast_()); + } + + /* + * There is no backend support currently for const unique pointers. + * We support these mostly for providing a clean API to users that have such + * pointers and want to store from them, but there will be no + * backend-specific optimizations for such buffers as there are for + * non-const unique pointers. + */ + template + auto asWriteBuffer(UniquePtrWithLambda &&ptr) + -> auxiliary::WriteBuffer + { + auto raw_ptr = ptr.release(); + return asWriteBuffer( + std::shared_ptr{ + raw_ptr, + [deleter = std::move(ptr.get_deleter())]( + auto const *delete_me) { deleter(delete_me); }}); + } +} // namespace + +ConfigureLoadStore::ConfigureLoadStore(RecordComponent &rc) : m_rc(rc) +{} + +auto ConfigureLoadStore::dim() const -> uint8_t +{ + return m_rc.getDimensionality(); +} + +auto ConfigureLoadStore::storeChunkConfig() -> internal::LoadStoreConfig +{ + return internal::LoadStoreConfig{getOffset(), getExtent()}; +} + +auto ConfigureLoadStore::deferFlush(Attributable &attr) +{ + if (m_unsafeNoAutomaticFlush) + { + throw error::Internal( + "Configuring an automatic flush operating after configuring that " + "those should be switched off."); + } + auto index = attr.IOHandler()->m_flushCounter; + return [attr, + old_index = *index, + current_index = std::weak_ptr(index)]() mutable { + auto lock_current_index = current_index.lock(); + if (!lock_current_index || *lock_current_index >= old_index) + { + return; + } + attr.seriesFlush(); + }; +} + +auto ConfigureLoadStore::getOffset() -> Offset const & +{ + if (!m_offset.has_value()) + { + if (m_rc.joinedDimension().has_value()) + { + m_offset = std::make_optional(); + } + else + { + m_offset = std::make_optional(dim(), 0); + } + } + return *m_offset; +} + +auto ConfigureLoadStore::getExtent() -> Extent const & +{ + if (!m_extent.has_value()) + { + m_extent = std::make_optional(m_rc.getExtent()); + if (m_offset.has_value()) + { + auto it_o = m_offset->begin(); + auto end_o = m_offset->end(); + auto it_e = m_extent->begin(); + auto end_e = m_extent->end(); + for (; it_o != end_o && it_e != end_e; ++it_e, ++it_o) + { + *it_e -= *it_o; + } + } + } + return *m_extent; +} + +auto ConfigureLoadStore::withSharedPtr_impl_mut( + std::shared_ptr data, Datatype datatype) + -> openPMD::ConfigureLoadStoreFromBuffer +{ + if (!data) + { + throw std::runtime_error( + "Unallocated pointer passed during chunk store."); + } + return openPMD::ConfigureLoadStoreFromBuffer( + auxiliary::WriteBuffer(std::move(data)), datatype, {std::move(*this)}); +} +auto ConfigureLoadStore::withSharedPtr_impl_const( + std::shared_ptr data, Datatype datatype) + -> openPMD::ConfigureStoreChunkFromBuffer +{ + if (!data) + { + throw std::runtime_error( + "Unallocated pointer passed during chunk store."); + } + return openPMD::ConfigureStoreChunkFromBuffer( + auxiliary::WriteBuffer(std::move(data)), datatype, {std::move(*this)}); +} + +auto ConfigureLoadStore::withUniquePtr_impl_mut( + UniquePtrWithLambda data, Datatype dtype) + -> openPMD::ConfigureStoreChunkFromBuffer + +{ + if (!data) + { + throw std::runtime_error( + "Unallocated pointer passed during chunk store."); + } + + return openPMD::ConfigureStoreChunkFromBuffer( + auxiliary::WriteBuffer(std::move(data)), dtype, {std::move(*this)}); +} +auto ConfigureLoadStore::withUniquePtr_impl_const( + UniquePtrWithLambda data, Datatype dtype) + -> openPMD::ConfigureStoreChunkFromBuffer + +{ + if (!data) + { + throw std::runtime_error( + "Unallocated pointer passed during chunk store."); + } + + void const *raw_ptr = data.release(); + auto &deleter = data.get_deleter(); + return openPMD::ConfigureStoreChunkFromBuffer( + auxiliary::WriteBuffer( + std::shared_ptr( + raw_ptr, + [deleter_lambda = std::move(deleter)](auto const *p) { + deleter_lambda(p); + })), + dtype, + {std::move(*this)}); +} + +auto ConfigureLoadStore::withRawPtr_impl_mut(void *data, Datatype dtype) + -> openPMD::ConfigureLoadStoreFromBuffer +{ + if (!data) + { + throw std::runtime_error( + "Unallocated pointer passed during chunk store."); + } + return openPMD::ConfigureLoadStoreFromBuffer( + auxiliary::WriteBuffer(auxiliary::shareRaw(data)), + dtype, + {std::move(*this)}); +} + +auto ConfigureLoadStore::withRawPtr_impl_const(void const *data, Datatype dtype) + -> openPMD::ConfigureStoreChunkFromBuffer +{ + if (!data) + { + throw std::runtime_error( + "Unallocated pointer passed during chunk store."); + } + return openPMD::ConfigureStoreChunkFromBuffer( + auxiliary::WriteBuffer(auxiliary::shareRaw(data)), + dtype, + {std::move(*this)}); +} + +template +auto ConfigureLoadStore::storeSpan() -> DynamicMemoryView +{ + return m_rc.storeChunkSpan_impl(storeChunkConfig()); +} + +template +auto ConfigureLoadStore::load() + -> auxiliary::DeferredComputation> +{ + auto res = m_rc.loadChunkAllocate_impl(storeChunkConfig()); + if (m_unsafeNoAutomaticFlush) + { + return auxiliary::DeferredComputation>( + std::move(res)); + } + return auxiliary::DeferredComputation>( + [res_lambda = std::move(res), dflush = deferFlush(m_rc)]() mutable { + dflush(); + return res_lambda; + }); +} + +struct VisitorEnqueueLoadVariantWithFlush +{ + template + static auto + call(RecordComponent &rc, internal::LoadStoreConfig cfg, F &&dflush) + -> auxiliary::DeferredComputation< + auxiliary::detail::shared_ptr_dataset_types> + { + auto res = rc.loadChunkAllocate_impl(std::move(cfg)); + return auxiliary::DeferredComputation< + auxiliary::detail::shared_ptr_dataset_types>( + [res_lambda = std::move(res), + dflush_lambda = std::forward(dflush)]() mutable + -> auxiliary::detail::shared_ptr_dataset_types { + dflush_lambda(); + return res_lambda; + }); + } +}; +struct VisitorEnqueueLoadVariantWithoutFlush +{ + template + static auto call(RecordComponent &rc, internal::LoadStoreConfig cfg) + -> auxiliary::DeferredComputation< + auxiliary::detail::shared_ptr_dataset_types> + { + auto res = rc.loadChunkAllocate_impl(std::move(cfg)); + return auxiliary::DeferredComputation< + auxiliary::detail::shared_ptr_dataset_types>(std::move(res)); + } +}; + +auto ConfigureLoadStore::loadVariant() -> auxiliary::DeferredComputation< + auxiliary::detail::shared_ptr_dataset_types> +{ + if (m_unsafeNoAutomaticFlush) + { + return m_rc.visit( + this->storeChunkConfig()); + } + else + { + return m_rc.visit( + this->storeChunkConfig(), deferFlush(m_rc)); + } +} + +struct VisitorLoadVariant +{ + template + static auto call(RecordComponent &rc, internal::LoadStoreConfig cfg) + -> auxiliary::detail::shared_ptr_dataset_types + { + return rc.loadChunkAllocate_impl(std::move(cfg)); + } +}; + +ConfigureStoreChunkFromBuffer::ConfigureStoreChunkFromBuffer( + auxiliary::WriteBuffer buffer, Datatype dt, ConfigureLoadStore &&core) + : ConfigureLoadStore(std::move(core)) + , m_buffer(std::move(buffer)) + , m_datatype(dt) +{} + +auto ConfigureStoreChunkFromBuffer::storeChunkConfig() + -> internal::LoadStoreConfigWithBuffer +{ + return internal::LoadStoreConfigWithBuffer{ + this->getOffset(), this->getExtent(), m_mem_select}; +} + +auto ConfigureStoreChunkFromBuffer::store() + -> auxiliary::DeferredComputation +{ + this->m_rc.storeChunk_impl( + std::move(m_buffer), m_datatype, storeChunkConfig()); + if (m_unsafeNoAutomaticFlush) + { + return auxiliary::DeferredComputation( + auxiliary::detail::CachedValue()); + } + return auxiliary::DeferredComputation( + [dflush = deferFlush(m_rc)]() mutable -> void { dflush(); }); +} + +auto ConfigureLoadStoreFromBuffer::load() + -> auxiliary::DeferredComputation +{ + auto *shared_ptr = std::get_if( + &this->m_buffer.as_variant()); + if (!shared_ptr) + { + throw std::runtime_error( + "ConfigureLoadStoreFromBuffer must be instantiated with a " + "non-const shared_ptr type."); + } + this->m_rc.loadChunk_impl( + *shared_ptr, m_datatype, this->storeChunkConfig()); + if (m_unsafeNoAutomaticFlush) + { + return auxiliary::DeferredComputation( + auxiliary::detail::CachedValue()); + } + return auxiliary::DeferredComputation( + [dflush = this->deferFlush(this->m_rc)]() mutable -> void { + dflush(); + }); +} + +void ConfigureLoadStore::extent_impl(Extent extent) +{ + m_extent = std::make_optional(std::move(extent)); +} + +void ConfigureLoadStore::offset_impl(Offset offset) +{ + m_offset = std::make_optional(std::move(offset)); +} + +void ConfigureLoadStore::unsafeNoAutomaticFlush_impl() +{ + m_unsafeNoAutomaticFlush = true; +} + +void ConfigureStoreChunkFromBuffer::memorySelection_impl(MemorySelection sel) +{ + m_mem_select = std::make_optional(std::move(sel)); +} +// namespace core + +// need this for clang-tidy +#define OPENPMD_ARRAY(type) type[] +#define OPENPMD_POINTER(type) type * +#define OPENPMD_APPLY_TEMPLATE(template_, type) template_ + +#define INSTANTIATE_METHOD_TEMPLATES(dtype) \ + template auto ConfigureLoadStore::load() \ + -> auxiliary::DeferredComputation; +#define INSTANTIATE_METHOD_TEMPLATES_WITH_AND_WITHOUT_EXTENT(type) \ + INSTANTIATE_METHOD_TEMPLATES(type) \ + INSTANTIATE_METHOD_TEMPLATES(OPENPMD_ARRAY(type)) \ + template auto ConfigureLoadStore::storeSpan() -> DynamicMemoryView; + +OPENPMD_FOREACH_DATASET_DATATYPE( + INSTANTIATE_METHOD_TEMPLATES_WITH_AND_WITHOUT_EXTENT) + +#undef INSTANTIATE_METHOD_TEMPLATES +#undef INSTANTIATE_METHOD_TEMPLATES_WITH_AND_WITHOUT_EXTENT + +#undef INSTANTIATE_METHOD_TEMPLATES +#undef OPENPMD_ARRAY +#undef OPENPMD_POINTER +#undef OPENPMD_APPLY_TEMPLATE +} // namespace openPMD diff --git a/src/RecordComponent.cpp b/src/RecordComponent.cpp index c6eef23313..a1a4680154 100644 --- a/src/RecordComponent.cpp +++ b/src/RecordComponent.cpp @@ -23,6 +23,7 @@ #include "openPMD/DatatypeHelpers.hpp" #include "openPMD/Error.hpp" #include "openPMD/IO/Format.hpp" +#include "openPMD/LoadStoreChunk.hpp" #include "openPMD/Series.hpp" #include "openPMD/auxiliary/Environment.hpp" #include "openPMD/auxiliary/Memory.hpp" @@ -34,6 +35,9 @@ // comment so clang-format does not move this #include "openPMD/DatatypeMacros.hpp" +// comment +#include "openPMD/DatatypeMacros.hpp" + #include #include #include @@ -189,6 +193,72 @@ auto resource(T &t) -> attribute_types & return t.template resource(); } +ConfigureLoadStore RecordComponent::prepareLoadStore() +{ + return ConfigureLoadStore{*this}; +} + +namespace +{ +#if (defined(_LIBCPP_VERSION) && _LIBCPP_VERSION < 11000) || \ + (defined(__apple_build_version__) && __clang_major__ < 14) + template + auto createSpanBufferFallback(size_t size) -> UniquePtrWithLambda + { + return UniquePtrWithLambda{ + new T[size], [](auto *ptr) { delete[] ptr; }}; + } +#else + template + auto createSpanBufferFallback(size_t size) -> std::unique_ptr + { + return std::unique_ptr{new T[size]}; + } +#endif +} // namespace + +template +DynamicMemoryView +RecordComponent::storeChunkSpan_impl(internal::LoadStoreConfig cfg) +{ + return storeChunkSpanCreateBuffer_impl( + std::move(cfg), &createSpanBufferFallback); +} + +template +std::shared_ptr +RecordComponent::loadChunkAllocate_impl(internal::LoadStoreConfig cfg) +{ + using T = std::remove_cv_t>; + auto res = loadChunkAllocate_impl( + determineDatatype(), sizeof(T), std::move(cfg)); + return std::static_pointer_cast(res); +} + +std::shared_ptr RecordComponent::loadChunkAllocate_impl( + Datatype dtype, size_t dtype_size, internal::LoadStoreConfig cfg) +{ + auto [o, e] = std::move(cfg); + + size_t numPoints = 1; + for (auto val : e) + { + numPoints *= val; + } + + auto newData = + std::shared_ptr(new char[numPoints * dtype_size], [](void *p) { + delete[] (static_cast(p)); + }); + prepareLoadStore() + .offset(std::move(o)) + .extent(std::move(e)) + .withSharedPtr_impl_mut(newData, dtype) + .unsafeNoAutomaticFlush() + .load(); + return newData; +} + RecordComponent::RecordComponent() : BaseRecordComponent(NoInit()) { setData(std::make_shared()); @@ -633,14 +703,18 @@ void RecordComponent::readBase(bool require_unit_si) } } -void RecordComponent::storeChunk( - auxiliary::WriteBuffer buffer, Datatype dtype, Offset o, Extent e) +void RecordComponent::storeChunk_impl( + auxiliary::WriteBuffer buffer, + Datatype dtype, + internal::LoadStoreConfigWithBuffer cfg) { + auto [o, e, memorySelection] = std::move(cfg); verifyChunk(dtype, o, e); Parameter dWrite; dWrite.offset = std::move(o); dWrite.extent = std::move(e); + dWrite.memorySelection = memorySelection; dWrite.dtype = dtype; /* std::static_pointer_cast correctly reference-counts the pointer */ dWrite.data = std::move(buffer); @@ -657,7 +731,7 @@ void RecordComponent::verifyChunk( if (empty()) throw std::runtime_error( "Chunks cannot be written for an empty RecordComponent."); - if (dtype != getDatatype()) + if (!isSame(dtype, getDatatype())) { std::ostringstream oss; oss << "Datatypes of chunk data (" << dtype @@ -764,68 +838,80 @@ template std::shared_ptr RecordComponent::loadChunk(Offset o, Extent e) { uint8_t dim = getDimensionality(); + auto operation = prepareLoadStore(); // default arguments // offset = {0u}: expand to right dim {0u, 0u, ...} - Offset offset = o; - if (o.size() == 1u && o.at(0) == 0u && dim > 1u) - offset = Offset(dim, 0u); + if (o.size() != 1u || o.at(0) != 0u || dim <= 1u) + { + operation.offset(std::move(o)); + } // extent = {-1u}: take full size - Extent extent(dim, 1u); - if (e.size() == 1u && e.at(0) == -1u) + if (e.size() != 1u || e.at(0) != -1u) { - extent = getExtent(); - for (uint8_t i = 0u; i < dim; ++i) - extent[i] -= offset[i]; + operation.extent(std::move(e)); } - else - extent = e; - - uint64_t numPoints = 1u; - for (auto const &dimensionSize : extent) - numPoints *= dimensionSize; -#if (defined(_LIBCPP_VERSION) && _LIBCPP_VERSION < 11000) || \ - (defined(__apple_build_version__) && __clang_major__ < 14) - auto newData = - std::shared_ptr(new T[numPoints], [](T *p) { delete[] p; }); - loadChunk(newData, offset, extent); - return newData; -#else - auto newData = std::shared_ptr[]>( - new std::remove_extent_t[numPoints]); - loadChunk(newData, offset, extent); - return std::static_pointer_cast(std::move(newData)); -#endif + return operation.unsafeNoAutomaticFlush().load().get(); } namespace detail { - template - struct do_convert + struct FillBuffer { - template - static std::optional call(Attribute &attr) + template + static void call( + void *target, + size_t numPoints, + RecordComponent const &component, + internal::RecordComponentData const &rc) { - if constexpr (std::is_convertible_v) + std::optional val = rc.m_constantValue.getOptional(); + + if (val.has_value()) { - return std::make_optional(attr.get()); + auto raw_ptr = static_cast(target); + std::fill(raw_ptr, raw_ptr + numPoints, *val); } else { - return std::nullopt; + std::string const data_type_str = + datatypeToString(component.getDatatype()); + std::string const requ_type_str = + datatypeToString(determineDatatype()); + std::string err_msg = + "Type conversion during chunk loading not possible! "; + err_msg += + "Data: " + data_type_str + "; Load as: " + requ_type_str; + throw error::WrongAPIUsage(err_msg); } } - static constexpr char const *errorMsg = "is_conversible"; + static constexpr char const *errorMsg = "FillBuffer"; }; } // namespace detail template -void RecordComponent::loadChunk(std::shared_ptr data, Offset o, Extent e) +void RecordComponent::loadChunk_impl( + std::shared_ptr const &data, internal::LoadStoreConfigWithBuffer cfg) +{ + loadChunk_impl( + std::static_pointer_cast(data), + determineDatatype>>(), + std::move(cfg)); +} + +void RecordComponent::loadChunk_impl( + std::shared_ptr const &data, + Datatype dtype_requested, + internal::LoadStoreConfigWithBuffer cfg) { - Datatype dtype = determineDatatype(data); + if (cfg.memorySelection.has_value()) + { + throw error::WrongAPIUsage( + "Unsupported: Memory selections in chunk loading."); + } /* * For constant components, we implement type conversion, so there is * a separate check further below. @@ -833,40 +919,21 @@ void RecordComponent::loadChunk(std::shared_ptr data, Offset o, Extent e) * JSON/TOML backends as they might implicitly turn a LONG into an INT in a * constant component. The frontend needs to catch such edge cases. * Ref. `if (constant())` branch. + * + * Attention: Do NOT use operator==(), doesnt work properly on Windows! */ - if (dtype != getDatatype() && !constant()) - if (!isSameInteger(getDatatype()) && - !isSameFloatingPoint(getDatatype()) && - !isSameComplexFloatingPoint(getDatatype()) && - !isSameChar(getDatatype())) - { - std::string const data_type_str = datatypeToString(getDatatype()); - std::string const requ_type_str = - datatypeToString(determineDatatype()); - std::string err_msg = - "Type conversion during chunk loading not yet implemented! "; - err_msg += "Data: " + data_type_str + "; Load as: " + requ_type_str; - throw std::runtime_error(err_msg); - } - - uint8_t dim = getDimensionality(); - - // default arguments - // offset = {0u}: expand to right dim {0u, 0u, ...} - Offset offset = o; - if (o.size() == 1u && o.at(0) == 0u && dim > 1u) - offset = Offset(dim, 0u); - - // extent = {-1u}: take full size - Extent extent(dim, 1u); - if (e.size() == 1u && e.at(0) == -1u) + if (!isSame(dtype_requested, getDatatype()) && !constant()) { - extent = getExtent(); - for (uint8_t i = 0u; i < dim; ++i) - extent[i] -= offset[i]; + std::string const data_type_str = datatypeToString(getDatatype()); + std::string const requ_type_str = datatypeToString(dtype_requested); + std::string err_msg = + "Type conversion during chunk loading not yet implemented! "; + err_msg += "Data: " + data_type_str + "; Load as: " + requ_type_str; + throw std::runtime_error(err_msg); } - else - extent = e; + + auto dim = getDimensionality(); + auto [offset, extent, memorySelection] = std::move(cfg); if (extent.size() != dim || offset.size() != dim) { @@ -885,9 +952,6 @@ void RecordComponent::loadChunk(std::shared_ptr data, Offset o, Extent e) "Chunk does not reside inside dataset (Dimension on index " + std::to_string(i) + ". DS: " + std::to_string(dse[i]) + " - Chunk: " + std::to_string(offset[i] + extent[i]) + ")"); - if (!data) - throw std::runtime_error( - "Unallocated pointer passed during chunk loading."); auto &rc = get(); if (constant()) @@ -896,25 +960,8 @@ void RecordComponent::loadChunk(std::shared_ptr data, Offset o, Extent e) for (auto const &dimensionSize : extent) numPoints *= dimensionSize; - std::optional val = - switchNonVectorType>( - /* dt = */ getDatatype(), rc.m_constantValue); - - if (val.has_value()) - { - T *raw_ptr = data.get(); - std::fill(raw_ptr, raw_ptr + numPoints, *val); - } - else - { - std::string const data_type_str = datatypeToString(getDatatype()); - std::string const requ_type_str = - datatypeToString(determineDatatype()); - std::string err_msg = - "Type conversion during chunk loading not possible! "; - err_msg += "Data: " + data_type_str + "; Load as: " + requ_type_str; - throw error::WrongAPIUsage(err_msg); - } + switchDatasetType( + dtype_requested, data.get(), numPoints, *this, rc); } else { @@ -928,80 +975,80 @@ void RecordComponent::loadChunk(std::shared_ptr data, Offset o, Extent e) } template -void RecordComponent::loadChunk( - std::shared_ptr ptr, Offset offset, Extent extent) +void RecordComponent::loadChunk(std::shared_ptr data, Offset o, Extent e) { - loadChunk( - std::static_pointer_cast(std::move(ptr)), - std::move(offset), - std::move(extent)); + // static_assert(!std::is_same_v, "EVIL"); + uint8_t dim = getDimensionality(); + auto operation = prepareLoadStore(); + + // default arguments + // offset = {0u}: expand to right dim {0u, 0u, ...} + if (o.size() != 1u || o.at(0) != 0u || dim <= 1u) + { + operation.offset(std::move(o)); + } + + // extent = {-1u}: take full size + if (e.size() != 1u || e.at(0) != -1u) + { + operation.extent(std::move(e)); + } + + operation.withSharedPtr(std::move(data)).unsafeNoAutomaticFlush().load(); } template void RecordComponent::loadChunkRaw(T *ptr, Offset offset, Extent extent) { - loadChunk(auxiliary::shareRaw(ptr), std::move(offset), std::move(extent)); + prepareLoadStore() + .offset(std::move(offset)) + .extent(std::move(extent)) + .withRawPtr(ptr) + .unsafeNoAutomaticFlush() + .load(); } template void RecordComponent::storeChunk(std::shared_ptr data, Offset o, Extent e) { - if (!data) - throw std::runtime_error( - "Unallocated pointer passed during chunk store."); - Datatype dtype = determineDatatype(data); - - /* std::static_pointer_cast correctly reference-counts the pointer */ - storeChunk( - auxiliary::WriteBuffer(std::static_pointer_cast(data)), - dtype, - std::move(o), - std::move(e)); + prepareLoadStore() + .offset(std::move(o)) + .extent(std::move(e)) + .withSharedPtr(std::move(data)) + .unsafeNoAutomaticFlush() + .store(); } template void RecordComponent::storeChunk( UniquePtrWithLambda data, Offset o, Extent e) { - if (!data) - throw std::runtime_error( - "Unallocated pointer passed during chunk store."); - Datatype dtype = determineDatatype<>(data); - - storeChunk( - auxiliary::WriteBuffer{std::move(data).template static_cast_()}, - dtype, - std::move(o), - std::move(e)); -} - -template -void RecordComponent::storeChunk(std::shared_ptr data, Offset o, Extent e) -{ - storeChunk( - std::static_pointer_cast(std::move(data)), - std::move(o), - std::move(e)); + prepareLoadStore() + .offset(std::move(o)) + .extent(std::move(e)) + .withUniquePtr(std::move(data)) + .unsafeNoAutomaticFlush() + .store(); } template void RecordComponent::storeChunkRaw(T const *ptr, Offset offset, Extent extent) { - storeChunk(auxiliary::shareRaw(ptr), std::move(offset), std::move(extent)); + prepareLoadStore() + .offset(std::move(offset)) + .extent(std::move(extent)) + .withRawPtr(ptr) + .unsafeNoAutomaticFlush() + .store(); } template DynamicMemoryView RecordComponent::storeChunk(Offset offset, Extent extent) { - return storeChunk(std::move(offset), std::move(extent), [](size_t size) { -#if (defined(_LIBCPP_VERSION) && _LIBCPP_VERSION < 11000) || \ - (defined(__apple_build_version__) && __clang_major__ < 14) - return UniquePtrWithLambda{ - new T[size], [](auto *ptr) { delete[] ptr; }}; -#else - return std::unique_ptr{new T[size]}; -#endif - }); + return prepareLoadStore() + .offset(std::move(offset)) + .extent(std::move(extent)) + .storeSpan(); } template @@ -1015,10 +1062,6 @@ void RecordComponent::verifyChunk(Offset const &o, Extent const &e) const #define OPENPMD_ARRAY(type) type[] #define OPENPMD_INSTANTIATE_BASIC(type) \ - template void RecordComponent::loadChunk( \ - std::shared_ptr data, Offset o, Extent e); \ - template void RecordComponent::loadChunk( \ - std::shared_ptr data, Offset o, Extent e); \ template void RecordComponent::loadChunkRaw( \ OPENPMD_PTR(type) ptr, Offset offset, Extent extent); \ template void RecordComponent::verifyChunk( \ @@ -1026,21 +1069,28 @@ void RecordComponent::verifyChunk(Offset const &o, Extent const &e) const template DynamicMemoryView RecordComponent::storeChunk( \ Offset offset, Extent extent); \ template void RecordComponent::storeChunkRaw( \ - OPENPMD_PTR(type const) ptr, Offset offset, Extent extent); + OPENPMD_PTR(type const) ptr, Offset offset, Extent extent); \ + template DynamicMemoryView RecordComponent::storeChunkSpan_impl( \ + internal::LoadStoreConfig cfg); -#define OPENPMD_INSTANTIATE_CONST_AND_NONCONST(type) \ - template void RecordComponent::storeChunk( \ - std::shared_ptr data, Offset o, Extent e); \ - template void RecordComponent::storeChunk( \ - std::shared_ptr data, Offset o, Extent e); +#define OPENPMD_INSTANTIATE_CONST_AND_NONCONST(type) #define OPENPMD_INSTANTIATE_WITH_AND_WITHOUT_EXTENT(type) \ + template void RecordComponent::loadChunk( \ + std::shared_ptr data, Offset o, Extent e); \ template std::shared_ptr RecordComponent::loadChunk( \ Offset o, Extent e); \ template void RecordComponent::storeChunk( \ - UniquePtrWithLambda data, Offset o, Extent e); + UniquePtrWithLambda data, Offset o, Extent e); \ + template void RecordComponent::loadChunk_impl( \ + std::shared_ptr const &data, \ + internal::LoadStoreConfigWithBuffer cfg); \ + template std::shared_ptr RecordComponent::loadChunkAllocate_impl( \ + internal::LoadStoreConfig cfg); #define OPENPMD_INSTANTIATE_FULLMATRIX(type) \ + template void RecordComponent::storeChunk( \ + std::shared_ptr data, Offset o, Extent e); \ template RecordComponent &RecordComponent::makeConstant(type); \ template RecordComponent &RecordComponent::makeEmpty( \ uint8_t dimensions); diff --git a/src/Series.cpp b/src/Series.cpp index a826303193..14f813559d 100644 --- a/src/Series.cpp +++ b/src/Series.cpp @@ -523,12 +523,13 @@ void Series::flushRankTable() }; auto writeDataset = [&rank, &maxSize, this, &rankTable]( - std::shared_ptr put, size_t num_lines = 1) { + std::shared_ptr const &put, + size_t num_lines = 1) { Parameter chunk; chunk.dtype = Datatype::CHAR; chunk.offset = {uint64_t(rank), 0}; chunk.extent = {num_lines, maxSize}; - chunk.data = std::move(put); + chunk.data = put; IOHandler()->enqueue( IOTask(&rankTable.m_attributable, std::move(chunk))); }; @@ -569,8 +570,13 @@ void Series::flushRankTable() * > } */ [asRawPtr](char *) { delete asRawPtr; }}; - writeDataset(std::move(put), /* num_lines = */ size); + writeDataset(put, /* num_lines = */ size); } + + // Must ensure that the Writable is consistently set to written on all + // ranks + series.m_rankTable.m_attributable.setWritten( + true, EnqueueAsynchronously::OnlyAsync); return; } #endif @@ -583,7 +589,7 @@ void Series::flushRankTable() new char[maxSize]{}, [](char const *ptr) { delete[] ptr; }}; std::copy_n(myRankInfo.c_str(), mySize, put.get()); - writeDataset(std::move(put)); + writeDataset(put); } std::string Series::particlesPath() const @@ -1503,9 +1509,9 @@ void Series::flushFileBased( * current iteration by the backend) */ this->setWritten( - false, Attributable::EnqueueAsynchronously::Yes); + false, Attributable::EnqueueAsynchronously::Both); series.iterations.setWritten( - false, Attributable::EnqueueAsynchronously::Yes); + false, Attributable::EnqueueAsynchronously::Both); setDirty(dirty() || it->second.dirty()); std::string filename = iterationFilename(it->first); @@ -1950,12 +1956,11 @@ void Series::readOneIterationFileBased(std::string const &filePath) readBase(); - using DT = Datatype; aRead.name = "iterationEncoding"; IOHandler()->enqueue(IOTask(this, aRead)); IOHandler()->flush(internal::defaultFlushParams); IterationEncoding encoding_out; - if (*aRead.dtype == DT::STRING) + if (isSame(*aRead.dtype, Datatype::STRING)) { std::string encoding = Attribute(Attribute::from_any, *aRead.m_resource) .get(); @@ -1997,7 +2002,7 @@ void Series::readOneIterationFileBased(std::string const &filePath) setWritten(false, Attributable::EnqueueAsynchronously::No); setIterationEncoding_internal( encoding_out, internal::default_or_explicit::explicit_); - setWritten(old_written, Attributable::EnqueueAsynchronously::Yes); + setWritten(old_written, Attributable::EnqueueAsynchronously::Both); } else throw std::runtime_error( @@ -2010,7 +2015,7 @@ void Series::readOneIterationFileBased(std::string const &filePath) aRead.name = "iterationFormat"; IOHandler()->enqueue(IOTask(this, aRead)); IOHandler()->flush(internal::defaultFlushParams); - if (*aRead.dtype == DT::STRING) + if (isSame(*aRead.dtype, Datatype::STRING)) { setWritten(false, Attributable::EnqueueAsynchronously::No); setIterationFormat(Attribute(Attribute::from_any, *aRead.m_resource) @@ -2581,8 +2586,9 @@ std::string Series::iterationFilename(IterationIndex_t i) { return series.m_overrideFilebasedFilename.value(); } - else if (auto iteration = series.m_iterationFilenames.find(i); // - iteration != series.m_iterationFilenames.end()) + else if ( + auto iteration = series.m_iterationFilenames.find(i); // + iteration != series.m_iterationFilenames.end()) { return iteration->second; } @@ -2604,16 +2610,25 @@ std::string Series::iterationFilename(IterationIndex_t i) Series::iterations_iterator Series::indexOf(Iteration const &iteration) { auto &series = get(); - for (auto it = series.iterations.begin(); it != series.iterations.end(); - ++it) + // first try the cached index; if it points to the correct entry return it + auto idx = iteration.get().m_iterationIndex; + if (!idx.has_value()) { - if (&it->second.Attributable::get() == &iteration.Attributable::get()) - { - return it; - } + throw error::Internal("Iteration index not known."); + } + + auto it = series.iterations.find(*idx); + if (it != series.iterations.end() && + &it->second.Attributable::get() == &iteration.Attributable::get()) + { + return it; + } + else + { + throw error::Internal( + "Iteration " + std::to_string(*idx) + + " no longer known by the Series?"); } - throw std::runtime_error( - "[Iteration::close] Iteration not found in Series."); } AdvanceStatus Series::advance( diff --git a/src/auxiliary/Filesystem.cpp b/src/auxiliary/Filesystem.cpp index 2a3e947a9e..b22117d171 100644 --- a/src/auxiliary/Filesystem.cpp +++ b/src/auxiliary/Filesystem.cpp @@ -67,6 +67,20 @@ bool file_exists(std::string const &path) } std::vector list_directory(std::string const &path) +{ + auto res = list_directory_nothrow(path); + if (!res) + { + throw std::system_error(std::error_code(errno, std::system_category())); + } + else + { + return *res; + } +} + +std::optional> +list_directory_nothrow(std::string const &path) { std::vector ret; #ifdef _WIN32 @@ -86,7 +100,9 @@ std::vector list_directory(std::string const &path) #else auto directory = opendir(path.c_str()); if (!directory) - throw std::system_error(std::error_code(errno, std::system_category())); + { + return std::nullopt; + } dirent *entry; while ((entry = readdir(directory)) != nullptr) if (strcmp(entry->d_name, ".") != 0 && strcmp(entry->d_name, "..") != 0) @@ -96,6 +112,46 @@ std::vector list_directory(std::string const &path) return ret; } +#ifndef _WIN32 +// Need to manually preserve sticky bit and setgid on Unix systems +namespace +{ + std::string get_parent(std::string const &path) + { + std::string parent = path; + size_t pos = parent.find_last_of(directory_separator); + if (pos != std::string::npos) + { + parent = parent.substr(0, pos); + if (parent.empty()) + parent = "/"; + } + else + { + parent.clear(); + } + return parent; + } + + mode_t get_permissions(std::string const &path) + { + std::string parent = get_parent(path); + if (parent.empty() || !directory_exists(parent)) + { + return 0; + } + + struct stat s; + if (stat(parent.c_str(), &s) != 0) + { + return 0; + } + + return s.st_mode & 07777; + } +} // namespace +#endif + bool create_directories(std::string const &path) { if (directory_exists(path)) @@ -106,10 +162,11 @@ bool create_directories(std::string const &path) return CreateDirectory(p.c_str(), nullptr); }; #else - mode_t mask = umask(0); - umask(mask); - auto mk = [mask](std::string const &p) -> bool { - return (0 == mkdir(p.c_str(), 0777 & ~mask)); + auto mk = [](std::string const &p) -> bool { + // preserve sticky and setgid from parent + mode_t parentPerms = + get_permissions(get_parent(p)) & (S_ISVTX | S_ISGID); + return (0 == mkdir(p.c_str(), 0777 | parentPerms)); }; #endif std::istringstream ss(path); @@ -150,14 +207,20 @@ bool remove_directory(std::string const &path) return (0 == remove(p.c_str())); }; #endif - for (auto const &entry : list_directory(path)) + auto entries = list_directory_nothrow(path); + // Check if some other process was faster deleting this + if (entries) { - auto partialPath = path; - partialPath.append(std::string(1, directory_separator)).append(entry); - if (directory_exists(partialPath)) - success &= remove_directory(partialPath); - else if (file_exists(partialPath)) - success &= remove_file(partialPath); + for (auto const &entry : *entries) + { + auto partialPath = path; + partialPath.append(std::string(1, directory_separator)) + .append(entry); + if (directory_exists(partialPath)) + success &= remove_directory(partialPath); + else if (file_exists(partialPath)) + success &= remove_file(partialPath); + } } success &= del(path); return success; diff --git a/src/auxiliary/Future.cpp b/src/auxiliary/Future.cpp new file mode 100644 index 0000000000..39af555f9e --- /dev/null +++ b/src/auxiliary/Future.cpp @@ -0,0 +1,190 @@ +#include "openPMD/auxiliary/Future.hpp" +#include "openPMD/Error.hpp" +#include "openPMD/RecordComponent.hpp" + +#include +#include +#include + +// comment + +#include "openPMD/DatatypeMacros.hpp" + +namespace openPMD::auxiliary::detail +{ +template +OneTimeTask::OneTimeTask() = default; + +template +OneTimeTask::OneTimeTask(task_type task) : members{std::move(task)} +{} + +template +OneTimeTask::OneTimeTask(OneTimeTask &&other) noexcept(noexcept_move) + : members(std::move(other.members)) +{ + other.members.m_task_valid = false; +} + +template +auto OneTimeTask::operator=(OneTimeTask &&other) noexcept(noexcept_move) + -> OneTimeTask & +{ + this->members = std::move(other.members); + other.members.m_task_valid = false; + return *this; +} + +template +auto OneTimeTask::operator()() -> T +{ + if (!members.m_task_valid) + { + throw error::WrongAPIUsage( + "[DeferredComputation] No valid state. Probably already " + "computed."); + } + if (!members.m_task) + { + throw error::WrongAPIUsage( + "[DeferredComputation] No valid task was specified."); + } + members.m_task_valid = false; + if constexpr (std::is_void_v) + { + std::move(members.m_task)(); + members.m_task = {}; + } + else + { + auto res = std::move(members.m_task)(); + members.m_task = {}; // reset + return res; + } +} +} // namespace openPMD::auxiliary::detail + +namespace openPMD::auxiliary +{ + +template +DeferredComputation::DeferredComputation(task_type task) + : m_task(detail::OneTimeTask{std::move(task)}) +{} + +template +DeferredComputation::DeferredComputation(cached_type cached_val) + : m_task(detail::CachedValue{std::move(cached_val)}) +{} + +template +DeferredComputation::DeferredComputation() = default; + +template +DeferredComputation::DeferredComputation(DeferredComputation &&) noexcept( + noexcept_move) = default; + +template +auto DeferredComputation::operator=(DeferredComputation &&) noexcept( + noexcept_move) -> DeferredComputation & = default; + +template +DeferredComputation::~DeferredComputation() +{ + try + { + std::visit( + auxiliary::overloaded{ + [](detail::OneTimeTask &task) { + if (task.members.m_task_valid) + { + std::move(task)(); + } + }, + [](detail::CachedValue &) {}}, + this->m_task); + } + catch (std::exception const &e) + { + std::cerr << "[DeferredComputation] Error in destructor: '" << e.what() + << "'." << std::endl; + } + catch (...) + { + std::cerr << "[DeferredComputation] Unknown error in destructor." + << std::endl; + } +} + +template +auto DeferredComputation::get() -> T +{ + return std::visit( + auxiliary::overloaded{ + [](detail::OneTimeTask &task) -> T { return std::move(task)(); }, + [](detail::CachedValue &cached) -> T { return cached.val; }}, + this->m_task); +} + +template <> +auto DeferredComputation::get() -> void +{ + std::visit( + auxiliary::overloaded{ + [](detail::OneTimeTask &task) { std::move(task)(); }, + [](detail::CachedValue &) { return; }}, + this->m_task); +} + +template +auto DeferredComputation::operator()() -> T +{ + return get(); +} + +template +void DeferredComputation::invalidate() && +{ + std::visit( + auxiliary::overloaded{ + [](detail::OneTimeTask &task) { + task.members.m_task = {}; + task.members.m_task_valid = false; + }, + [](detail::CachedValue const &) {}}, + this->m_task); +} + +template +auto DeferredComputation::valid() const noexcept -> bool +{ + return std::visit( + auxiliary::overloaded{ + [](detail::OneTimeTask const &task) { + return task.members.m_task_valid; + }, + [](detail::CachedValue const &) { return true; }}, + this->m_task); +} + +template class DeferredComputation; +template class DeferredComputation; +template class DeferredComputation; // used in tests + +// need this for clang-tidy +#define OPENPMD_ARRAY(type) type[] +#define OPENPMD_APPLY_TEMPLATE(template_, type) template_ + +#define INSTANTIATE_FUTURE(dtype) \ + template class DeferredComputation; +#define INSTANTIATE_FUTURE_WITH_AND_WITHOUT_EXTENT(type) \ + INSTANTIATE_FUTURE(type) INSTANTIATE_FUTURE(OPENPMD_ARRAY(type)) +OPENPMD_FOREACH_NONVECTOR_DATATYPE(INSTANTIATE_FUTURE_WITH_AND_WITHOUT_EXTENT) +#undef INSTANTIATE_FUTURE +#undef INSTANTIATE_FUTURE_WITH_AND_WITHOUT_EXTENT +#undef OPENPMD_ARRAY +#undef OPENPMD_APPLY_TEMPLATE +} // namespace openPMD::auxiliary + +#include "openPMD/UndefDatatypeMacros.hpp" diff --git a/src/auxiliary/Memory.cpp b/src/auxiliary/Memory.cpp index c2a0f2aa0d..b002288a88 100644 --- a/src/auxiliary/Memory.cpp +++ b/src/auxiliary/Memory.cpp @@ -21,8 +21,10 @@ #include "openPMD/auxiliary/Memory.hpp" #include "openPMD/ChunkInfo.hpp" +#include "openPMD/Datatype.tpp" #include "openPMD/auxiliary/Memory_internal.hpp" #include "openPMD/auxiliary/UniquePtr.hpp" +#include "openPMD/backend/Variant_internal.hpp" #include #include @@ -193,8 +195,21 @@ auto WriteBuffer::CopyableUniquePtr::release() -> UniquePtrWithLambda WriteBuffer::WriteBuffer() : m_buffer(std::make_any()) {} -WriteBuffer::WriteBuffer(std::shared_ptr ptr) - : m_buffer(std::make_any(std::move(ptr))) +template +WriteBuffer::WriteBuffer(std::shared_ptr ptr) + : m_buffer //(std::make_any(std::move(ptr))) + ([&]() { + if constexpr (std::is_const_v) + { + return std::make_any( + std::static_pointer_cast(ptr)); + } + else + { + return std::make_any( + std::static_pointer_cast(ptr)); + } + }()) {} WriteBuffer::WriteBuffer(UniquePtrWithLambda ptr) : m_buffer( @@ -204,12 +219,22 @@ WriteBuffer::WriteBuffer(UniquePtrWithLambda ptr) WriteBuffer::WriteBuffer(WriteBuffer &&) noexcept = default; WriteBuffer &WriteBuffer::operator=(WriteBuffer &&) noexcept = default; -WriteBuffer const &WriteBuffer::operator=(std::shared_ptr ptr) +template +WriteBuffer &WriteBuffer::operator=(std::shared_ptr const &ptr) { - m_buffer = std::make_any(std::move(ptr)); + if constexpr (std::is_const_v) + { + m_buffer = std::make_any( + std::static_pointer_cast(ptr)); + } + else + { + m_buffer = std::make_any( + std::static_pointer_cast(ptr)); + } return *this; } -WriteBuffer const &WriteBuffer::operator=(UniquePtrWithLambda ptr) +WriteBuffer &WriteBuffer::operator=(UniquePtrWithLambda ptr) { m_buffer = std::make_any(CopyableUniquePtr(std::move(ptr))); @@ -226,4 +251,22 @@ void const *WriteBuffer::get() const }, as_variant()); } + +#define OPENPMD_INSTANTIATE(dtype) \ + template WriteBuffer::WriteBuffer(std::shared_ptr); \ + template WriteBuffer &WriteBuffer::operator=( \ + std::shared_ptr const &); + +#ifndef DOXYGEN_SHOULD_SKIP_THIS + +OPENPMD_FOREACH_DATASET_DATATYPE(OPENPMD_INSTANTIATE) +template WriteBuffer::WriteBuffer(std::shared_ptr); +template WriteBuffer &WriteBuffer::operator=(std::shared_ptr const &); +template WriteBuffer::WriteBuffer(std::shared_ptr); +template WriteBuffer & +WriteBuffer::operator=(std::shared_ptr const &); + +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +#undef OPENPMD_INSTANTIATE } // namespace openPMD::auxiliary diff --git a/src/auxiliary/UniquePtr.cpp b/src/auxiliary/UniquePtr.cpp index 6625ca3a47..828c33a785 100644 --- a/src/auxiliary/UniquePtr.cpp +++ b/src/auxiliary/UniquePtr.cpp @@ -58,7 +58,7 @@ namespace auxiliary OPENPMD_FOREACH_DATASET_DATATYPE( OPENPMD_INSTANTIATE_WITH_AND_WITHOUT_EXTENT) - OPENPMD_INSTANTIATE(void) + OPENPMD_INSTANTIATE(void) OPENPMD_INSTANTIATE(void const) #undef OPENPMD_INSTANTIATE #undef OPENPMD_INSTANTIATE_WITH_AND_WITHOUT_EXTENT @@ -99,12 +99,16 @@ UniquePtrWithLambda::UniquePtrWithLambda( std::unique_ptr); #define OPENPMD_INSTANTIATE_WITH_AND_WITHOUT_EXTENT(type) \ - OPENPMD_INSTANTIATE(type) OPENPMD_INSTANTIATE(OPENPMD_ARRAY(type)) + OPENPMD_INSTANTIATE(type) \ + OPENPMD_INSTANTIATE(OPENPMD_ARRAY(type)) \ + OPENPMD_INSTANTIATE(type const) \ + OPENPMD_INSTANTIATE(OPENPMD_ARRAY(type const)) -OPENPMD_FOREACH_DATASET_DATATYPE(OPENPMD_INSTANTIATE_WITH_AND_WITHOUT_EXTENT) +OPENPMD_FOREACH_NONVECTOR_DATATYPE(OPENPMD_INSTANTIATE_WITH_AND_WITHOUT_EXTENT) // Instantiate this directly, do not instantiate the // `std::unique_ptr`-based constructor. template class UniquePtrWithLambda; +template class UniquePtrWithLambda; #undef OPENPMD_INSTANTIATE #undef OPENPMD_INSTANTIATE_WITH_AND_WITHOUT_EXTENT #undef OPENPMD_ARRAY diff --git a/src/backend/Attributable.cpp b/src/backend/Attributable.cpp index ce1c2936cb..dd9019b255 100644 --- a/src/backend/Attributable.cpp +++ b/src/backend/Attributable.cpp @@ -524,12 +524,18 @@ void Attributable::setWritten(bool val, EnqueueAsynchronously ea) switch (ea) { - case EnqueueAsynchronously::Yes: { + case EnqueueAsynchronously::OnlyAsync: { Parameter param; param.target_status = val; IOHandler()->enqueue(IOTask(this, param)); + return; + } + case EnqueueAsynchronously::Both: { + Parameter param; + param.target_status = val; + IOHandler()->enqueue(IOTask(this, param)); + break; } - break; case EnqueueAsynchronously::No: break; } diff --git a/src/backend/BaseRecord.cpp b/src/backend/BaseRecord.cpp index 9c2161be0a..be07f00f19 100644 --- a/src/backend/BaseRecord.cpp +++ b/src/backend/BaseRecord.cpp @@ -801,9 +801,10 @@ inline void BaseRecord::readBase() "timeOffset", Attribute(Attribute::from_any, *aRead.m_resource).get()); // conversion cast if a backend reports an integer type - else if (auto val = Attribute(Attribute::from_any, *aRead.m_resource) - .getOptional(); - val.has_value()) + else if ( + auto val = Attribute(Attribute::from_any, *aRead.m_resource) + .getOptional(); + val.has_value()) this->setAttribute("timeOffset", val.value()); else throw std::runtime_error( diff --git a/src/binding/python/Attributable.cpp b/src/binding/python/Attributable.cpp index 23795276ce..e8ec058656 100644 --- a/src/binding/python/Attributable.cpp +++ b/src/binding/python/Attributable.cpp @@ -422,15 +422,17 @@ bool setAttributeFromObject_char( #endif // this must come after tryCast>, // because tryCast> implicitly covers chars as well - else if (auto list_of_string = tryCast(obj); - list_of_string.has_value()) + else if ( + auto list_of_string = tryCast(obj); + list_of_string.has_value()) { return attr.setAttribute(key, std::move(*list_of_string)); } // Again: `char` vs. `signed char`, resp. `char` vs. `unsigned char` // depending on `char`'s signedness. - else if (auto list_of_int = tryCast>(obj); - list_of_int.has_value()) + else if ( + auto list_of_int = tryCast>(obj); + list_of_int.has_value()) { std::vector casted; casted.reserve(list_of_int->size()); diff --git a/src/binding/python/Iteration.cpp b/src/binding/python/Iteration.cpp index 8346aaaf63..60ffe8b9f8 100644 --- a/src/binding/python/Iteration.cpp +++ b/src/binding/python/Iteration.cpp @@ -101,7 +101,7 @@ void init_Iteration(py::module &m) py::return_value_policy::copy, // garbage collection: return value must be freed before // Iteration - py::keep_alive<1, 0>())) + py::keep_alive<0, 1>())) .def_property_readonly( "particles", py::cpp_function( @@ -109,7 +109,7 @@ void init_Iteration(py::module &m) py::return_value_policy::copy, // garbage collection: return value must be freed before // Iteration - py::keep_alive<1, 0>())); + py::keep_alive<0, 1>())); add_pickle( cl, [](openPMD::Series series, std::vector const &group) { diff --git a/src/binding/python/ParticleSpecies.cpp b/src/binding/python/ParticleSpecies.cpp index d619554220..a097c37e84 100644 --- a/src/binding/python/ParticleSpecies.cpp +++ b/src/binding/python/ParticleSpecies.cpp @@ -54,7 +54,7 @@ void init_ParticleSpecies(py::module &m) [](ParticleSpecies &ps) { return ps.particlePatches; }, py::return_value_policy::copy, // garbage collection: return value must be freed before Series - py::keep_alive<1, 0>())); + py::keep_alive<0, 1>())); add_pickle( cl, [](openPMD::Series series, std::vector const &group) { uint64_t const n_it = std::stoull(group.at(1)); diff --git a/src/binding/python/PatchRecordComponent.cpp b/src/binding/python/PatchRecordComponent.cpp index 5887ba7d03..c68cdbc16f 100644 --- a/src/binding/python/PatchRecordComponent.cpp +++ b/src/binding/python/PatchRecordComponent.cpp @@ -130,7 +130,9 @@ void init_PatchRecordComponent(py::module &m) switch (dtype) { case DT::BOOL: - return prc.store(idx, *static_cast(buf.ptr)); + throw std::runtime_error( + "make_constant: " + "Boolean type not supported!"); break; case DT::SHORT: return prc.store(idx, *static_cast(buf.ptr)); diff --git a/src/binding/python/RecordComponent.cpp b/src/binding/python/RecordComponent.cpp index 232df5861d..382783a730 100644 --- a/src/binding/python/RecordComponent.cpp +++ b/src/binding/python/RecordComponent.cpp @@ -489,7 +489,14 @@ inline void store_chunk( check_buffer_is_contiguous(a); - // dtype_from_numpy(a.dtype()) + if (!dtype_to_numpy(r.getDatatype()).is(a.dtype())) + { + std::stringstream err; + err << "Attempting store from Python array of type '" + << dtype_from_numpy(a.dtype()) + << "' into Record Component of type '" << r.getDatatype() << "'."; + throw error::WrongAPIUsage(err.str()); + } switchDatasetType( r.getDatatype(), r, a, offset, extent); } @@ -770,6 +777,15 @@ inline void load_chunk( check_buffer_is_contiguous(a); + if (!dtype_to_numpy(r.getDatatype()).is(a.dtype())) + { + std::stringstream err; + err << "Attempting load into Python array of type '" + << dtype_from_numpy(a.dtype()) + << "' from Record Component of type '" << r.getDatatype() << "'."; + throw error::WrongAPIUsage(err.str()); + } + switchDatasetType( r.getDatatype(), r, a, offset, extent); } diff --git a/src/binding/python/Series.cpp b/src/binding/python/Series.cpp index ec2b38c867..6c95cf19b8 100644 --- a/src/binding/python/Series.cpp +++ b/src/binding/python/Series.cpp @@ -301,7 +301,8 @@ not possible once it has been closed. throw std::runtime_error("Unreachable"); }, // copy + keepalive - py::return_value_policy::copy) + py::return_value_policy::copy, + py::keep_alive<0, 1>()) .def( "current_iteration", [](Snapshots &s) -> std::optional { @@ -315,6 +316,7 @@ not possible once it has been closed. return std::nullopt; } }, + py::keep_alive<0, 1>(), "Return the iteration that is currently being written to, if " "it " "exists."); @@ -503,7 +505,7 @@ this method. [](Series &s) { return s.iterations; }, py::return_value_policy::copy, // garbage collection: return value must be freed before Series - py::keep_alive<1, 0>())) + py::keep_alive<0, 1>())) .def( "read_iterations", [](Series &s) { @@ -645,5 +647,18 @@ users to overwrite default options, while keeping any other ones. py::arg("comm"), docs_merge_json) #endif - ; + .def("__del__", [](Series &s) { + try + { + s.close(); + } + catch (std::exception const &e) + { + std::cerr << "Error during close: " << e.what() << std::endl; + } + catch (...) + { + std::cerr << "Unknown error during close." << std::endl; + } + }); } diff --git a/test/AuxiliaryTest.cpp b/test/AuxiliaryTest.cpp index ee0b029473..3612b13adb 100644 --- a/test/AuxiliaryTest.cpp +++ b/test/AuxiliaryTest.cpp @@ -19,6 +19,8 @@ * If not, see . */ // expose private and protected members for invasive testing +#include "openPMD/Error.hpp" +#include "openPMD/auxiliary/Future.hpp" #if openPMD_USE_INVASIVE_TESTS #define OPENPMD_private public: #define OPENPMD_protected public: @@ -538,3 +540,31 @@ TEST_CASE("filesystem_test", "[auxiliary]") REQUIRE(!remove_file("./nonexistent_file_in_cmake_bin_directory")); #endif } + +TEST_CASE("future_test", "[auxiliary]") +{ + using task_type = auxiliary::DeferredComputation; + size_t counter = 0; + + auto make_task = [&counter]() { + counter = 0; + return task_type{[&counter]() { + ++counter; + return "success"; + }}; + }; + + auto move_construct = make_task(); + task_type move_constructed(std::move(move_construct)); + REQUIRE(counter == 0); + REQUIRE(move_constructed() == "success"); + REQUIRE(counter == 1); + REQUIRE_THROWS_AS(move_constructed(), error::WrongAPIUsage); + + auto move_assign = make_task(); + task_type move_assigned = std::move(move_assign); + REQUIRE(counter == 0); + REQUIRE(move_assigned() == "success"); + REQUIRE(counter == 1); + REQUIRE_THROWS_AS(move_assigned(), error::WrongAPIUsage); +} diff --git a/test/CoreTest.cpp b/test/CoreTest.cpp index ee266ff56e..903363538d 100644 --- a/test/CoreTest.cpp +++ b/test/CoreTest.cpp @@ -1270,7 +1270,7 @@ TEST_CASE("use_count_test", "[core]") pprc.resetDataset(Dataset(determineDatatype(), {4})); pprc.store(0, static_cast(1)); REQUIRE( - std::get>( + std::get>( static_cast *>( pprc.get().m_chunks.front().parameter.get()) ->data.as_variant()) @@ -1750,6 +1750,11 @@ TEST_CASE("automatic_variable_encoding", "[adios2]") automatic_variable_encoding::automatic_variable_encoding(); } +TEST_CASE("read_nonexistent_attribute", "[core]") +{ + read_nonexistent_attribute::read_nonexistent_attribute(); +} + TEST_CASE("unique_ptr", "[core]") { auto stdptr = std::make_unique(5); diff --git a/test/Files_Core/CoreTests.hpp b/test/Files_Core/CoreTests.hpp index fa62279c82..f769beeb99 100644 --- a/test/Files_Core/CoreTests.hpp +++ b/test/Files_Core/CoreTests.hpp @@ -24,3 +24,8 @@ namespace automatic_variable_encoding { auto automatic_variable_encoding() -> void; } + +namespace read_nonexistent_attribute +{ +auto read_nonexistent_attribute() -> void; +} diff --git a/test/Files_Core/read_nonexistent_attribute.cpp b/test/Files_Core/read_nonexistent_attribute.cpp new file mode 100644 index 0000000000..f4025db627 --- /dev/null +++ b/test/Files_Core/read_nonexistent_attribute.cpp @@ -0,0 +1,104 @@ + +/* Copyright 2026 Franz Poeschel + * + * This file is part of openPMD-api. + * + * openPMD-api is free software: you can redistribute it and/or modify + * it under the terms of of either the GNU General Public License or + * the GNU Lesser General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * openPMD-api is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License and the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * and the GNU Lesser General Public License along with openPMD-api. + * If not, see . + */ + +#if !openPMD_USE_INVASIVE_TESTS + +namespace read_nonexistent_attribute +{ +void read_nonexistent_attribute() +{} +} // namespace read_nonexistent_attribute + +#else + +#define OPENPMD_private public: +#define OPENPMD_protected public: + +#include "CoreTests.hpp" + +#include "openPMD/Error.hpp" +#include "openPMD/IO/AbstractIOHandler.hpp" +#include "openPMD/IO/IOTask.hpp" + +#include +namespace read_nonexistent_attribute +{ +using namespace openPMD; +static auto testedFileExtensions() -> std::vector +{ + auto allExtensions = getFileExtensions(); + auto newEnd = std::remove_if( + allExtensions.begin(), + allExtensions.end(), + []([[maybe_unused]] std::string const &ext) { + // sst and ssc need a receiver for testing + // bp5 is already tested via bp + // toml parsing is very slow and its implementation is equivalent to + // the json backend, so it is only activated for selected tests + return ext == "sst" || ext == "ssc" || ext == "bp5" || + ext == "toml"; + }); + return {allExtensions.begin(), newEnd}; +} + +void run(std::string const &ext) +{ + auto const filename = "../samples/read_nonexistent_attribute." + ext; + + auto do_create = [&filename]() { + Series write(filename, Access::CREATE); + write.close(); + }; + + do_create(); + + // Try reading an attribute from this Series which does not actually exist. + // This tests the bugs fixed in + // https://github.com/openPMD/openPMD-api/pull/1866: + // + // 1. The HDF5 backend should verify that the attribute exists before trying + // to read it. Otherwise, the read failure will print ugly backtraces. + // 2. The HDF5 backend should clean up resources also in case an operations + // returns early. Otherwise the second call to do_create() will fail, + // since the first HDF5 file will remain open (resource leak). + { + Series read(filename, Access::READ_ONLY); + Parameter readAttr; + readAttr.name = "this_attribute_does_hopefully_not_exist"; + read.IOHandler()->enqueue(IOTask(&read, readAttr)); + REQUIRE_THROWS_AS( + read.IOHandler()->flush(internal::defaultFlushParams), + error::ReadError); + } + + do_create(); +} + +void read_nonexistent_attribute() +{ + for (auto const &ext : testedFileExtensions()) + { + run(ext); + } +} +} // namespace read_nonexistent_attribute +#endif diff --git a/test/ParallelIOTest.cpp b/test/ParallelIOTest.cpp index 9c28f52945..c72b9b1574 100644 --- a/test/ParallelIOTest.cpp +++ b/test/ParallelIOTest.cpp @@ -454,18 +454,51 @@ void available_chunks_test(std::string const &file_ending) } )END"; - std::vector data{2, 4, 6, 8}; + std::vector xdata{2, 4, 6, 8}; + std::vector ydata{0, 0, 0, 0, 0, // + 0, 1, 2, 3, 0, // + 0, 4, 5, 6, 0, // + 0, 7, 8, 9, 0, // + 0, 0, 0, 0, 0}; + std::vector ydata_firstandlastrow{-1, -1, -1}; { Series write(name, Access::CREATE, MPI_COMM_WORLD, parameters.str()); Iteration it0 = write.iterations[0]; auto E_x = it0.meshes["E"]["x"]; E_x.resetDataset({Datatype::INT, {mpi_size, 4}}); - E_x.storeChunk(data, {mpi_rank, 0}, {1, 4}); + E_x.storeChunk(xdata, {mpi_rank, 0}, {1, 4}); + auto E_y = it0.meshes["E"]["y"]; + E_y.resetDataset({Datatype::INT, {5, 3ul * mpi_size}}); + E_y.prepareLoadStore() + .withContiguousContainer(ydata_firstandlastrow) + .offset({0, 3ul * mpi_rank}) + .extent({1, 3}) + .store(); + E_y.prepareLoadStore() + .offset({1, 3ul * mpi_rank}) + .extent({3, 3}) + .withContiguousContainer(ydata) + .memorySelection({{1, 1}, {5, 5}}) + .store(); + // if condition checks if this PR is available in ADIOS2: + // https://github.com/ornladios/ADIOS2/pull/4169 + if constexpr (CanTheMemorySelectionBeReset) + { + E_y.prepareLoadStore() + .withContiguousContainer(ydata_firstandlastrow) + .offset({4, 3ul * mpi_rank}) + .extent({1, 3}) + .store(); + } it0.close(); } { - Series read(name, Access::READ_ONLY, MPI_COMM_WORLD); + Series read( + name, + Access::READ_ONLY, + MPI_COMM_WORLD, + R"({"verify_homogeneous_extents": false})"); Iteration it0 = read.iterations[0]; auto E_x = it0.meshes["E"]["x"]; ChunkTable table = E_x.availableChunks(); @@ -492,6 +525,41 @@ void available_chunks_test(std::string const &file_ending) { REQUIRE(ranks[i] == i); } + + auto E_y = it0.meshes["E"]["y"]; + auto width = E_y.getExtent()[1]; + auto first_row = + E_y.prepareLoadStore().extent({1, width}).load().get(); + auto middle_rows = E_y.prepareLoadStore() + .offset({1, 0}) + .extent({3, width}) + .load() + .get(); + auto last_row = E_y.prepareLoadStore().offset({4, 0}).load().get(); + read.flush(); + + for (auto row : [&]() -> std::vector *> { + if constexpr (CanTheMemorySelectionBeReset) + { + return {&first_row, &last_row}; + } + else + { + return {&first_row}; + } + }()) + { + for (size_t i = 0; i < width; ++i) + { + REQUIRE(row->get()[i] == -1); + } + } + for (size_t i = 0; i < width * 3; ++i) + { + size_t row = i / width; + int required_value = row * 3 + (i % 3) + 1; + REQUIRE(middle_rows.get()[i] == required_value); + } } } diff --git a/test/SerialIOTest.cpp b/test/SerialIOTest.cpp index 49aab1db18..ae6297ad5f 100644 --- a/test/SerialIOTest.cpp +++ b/test/SerialIOTest.cpp @@ -942,7 +942,11 @@ inline void constant_scalar(std::string const &file_ending) new unsigned int[6], [](unsigned int const *p) { delete[] p; }); unsigned int e{0}; std::generate(E.get(), E.get() + 6, [&e] { return e++; }); - E_y.storeChunk(std::move(E), {0, 0, 0}, {1, 2, 3}); + // check that const-type unique pointers work in the builder pattern + E_y.prepareLoadStore() + .extent({1, 2, 3}) + .withUniquePtr(std::move(E).static_cast_()) + .store(); // store a number of predefined attributes in E Mesh &E_mesh = s.snapshots()[1].meshes["E"]; @@ -1753,13 +1757,17 @@ inline void write_test( auto opaqueTypeDataset = rc.visit(); auto variantTypeDataset = rc.loadChunkVariant(); + auto variantTypeDataset2 = rc.prepareLoadStore().loadVariant().get(); rc.seriesFlush(); - std::visit( - [](auto &&shared_ptr) { - std::cout << "First value in loaded chunk: '" << shared_ptr.get()[0] - << '\'' << std::endl; - }, - variantTypeDataset); + for (auto ptr : {&variantTypeDataset, &variantTypeDataset2}) + { + std::visit( + [](auto &&shared_ptr) { + std::cout << "First value in loaded chunk: '" + << shared_ptr.get()[0] << '\'' << std::endl; + }, + *ptr); + } #ifndef _WIN32 if (test_rank_table) diff --git a/test/python/unittest/API/APITest.py b/test/python/unittest/API/APITest.py index c22551a074..97850c5aaf 100644 --- a/test/python/unittest/API/APITest.py +++ b/test/python/unittest/API/APITest.py @@ -2026,6 +2026,9 @@ def makeAvailableChunksRoundTrip(self, ext): # Cleaner: write.close() # But let's keep this instance to test that that workflow stays # functional. + # Need to delete everything as garbage collection will keep `write` + # alive as long as E_x is around. + del E_x del write read = io.Series( @@ -2209,7 +2212,6 @@ def testError(self): def testCustomGeometries(self): DS = io.Dataset - DT = io.Datatype sample_data = np.ones([10], dtype=np.int_) write = io.Series("../samples/custom_geometries_python.json", @@ -2217,25 +2219,25 @@ def testCustomGeometries(self): E = write.iterations[0].meshes["E"] E.set_attribute("geometry", "other:customGeometry") E_x = E["x"] - E_x.reset_dataset(DS(DT.LONG, [10])) + E_x.reset_dataset(DS(np.dtype(np.int_), [10])) E_x[:] = sample_data B = write.iterations[0].meshes["B"] B.set_geometry("customGeometry") B_x = B["x"] - B_x.reset_dataset(DS(DT.LONG, [10])) + B_x.reset_dataset(DS(np.dtype(np.int_), [10])) B_x[:] = sample_data e_energyDensity = write.iterations[0].meshes["e_energyDensity"] e_energyDensity.set_geometry("other:customGeometry") e_energyDensity_x = e_energyDensity[io.Mesh_Record_Component.SCALAR] - e_energyDensity_x.reset_dataset(DS(DT.LONG, [10])) + e_energyDensity_x.reset_dataset(DS(np.dtype(np.int_), [10])) e_energyDensity_x[:] = sample_data e_chargeDensity = write.iterations[0].meshes["e_chargeDensity"] e_chargeDensity.set_geometry(io.Geometry.other) e_chargeDensity_x = e_chargeDensity[io.Mesh_Record_Component.SCALAR] - e_chargeDensity_x.reset_dataset(DS(DT.LONG, [10])) + e_chargeDensity_x.reset_dataset(DS(np.dtype(np.int_), [10])) e_chargeDensity_x[:] = sample_data self.assertTrue(write) @@ -2341,6 +2343,163 @@ def testScalarHdf5Fields(self): self.assertEqual(loaded_from_scalar, np.array([45])) series_read_again.close() + def testKeepaliveMeshComponent(self): + """Test keepalive for mesh component extraction.""" + for ext in tested_file_extensions: + self.backend_keepalive_mesh_component(ext) + + def testKeepaliveParticlePosition(self): + """Test keepalive for particle position component extraction.""" + for ext in tested_file_extensions: + self.backend_keepalive_particle_position(ext) + + def testKeepaliveParticlePatches(self): + """Test keepalive for particle patches component extraction.""" + for ext in tested_file_extensions: + self.backend_keepalive_particle_patches(ext) + + def backend_keepalive_mesh_component(self, file_ending): + """Helper function that tests keepalive + for mesh component extraction.""" + import gc + + filename = "unittest_py_keepalive_mesh." + file_ending + path = filename + + def get_component_only(): + series = io.Series(path, io.Access.create_linear) + backend = series.backend + iteration = series.snapshots()[0] + mesh = iteration.meshes["E"] + component = mesh["x"] + + mesh.axis_labels = ["x", "y"] + component.reset_dataset(io.Dataset(np.dtype("float"), [10, 10])) + + del iteration + del mesh + del series + gc.collect() + + return component, backend + + component, backend = get_component_only() + gc.collect() + + component[:, :] = np.reshape( + np.arange(100, dtype=np.dtype("float")), + [10, 10] + ) + + component.series_flush() + if backend == "ADIOS2": + del component + gc.collect() + + read = io.Series(path, io.Access.read_only) + loaded = read.snapshots()[0].meshes["E"]["x"][:] + read.flush() + np.testing.assert_array_equal( + loaded, + np.reshape(np.arange(100, dtype=np.dtype("float")), [10, 10]) + ) + + def backend_keepalive_particle_position(self, file_ending): + """Helper function that tests keepalive + for particle position component extraction.""" + import gc + + filename = "unittest_py_keepalive_particle." + file_ending + path = filename + num_particles = 100 + + def get_component_only(): + series = io.Series(path, io.Access.create_linear) + backend = series.backend + iteration = series.snapshots()[0] + particles = iteration.particles["electrons"] + position = particles["position"]["x"] + + position.reset_dataset( + io.Dataset(np.dtype("float"), [num_particles])) + + del iteration + del particles + del series + gc.collect() + + return position, backend + + position, backend = get_component_only() + gc.collect() + + position[:] = np.arange(num_particles, dtype=np.dtype("float")) + + position.series_flush() + if backend == "ADIOS2": + del position + gc.collect() + + read = io.Series(path, io.Access.read_only) + loaded = read.snapshots()[0] \ + .particles["electrons"]["position"]["x"][:] + read.flush() + np.testing.assert_array_equal( + loaded, + np.arange(num_particles, dtype=np.dtype("float")) + ) + + def backend_keepalive_particle_patches(self, file_ending): + """Helper function that tests keepalive + for particle patches extraction.""" + import gc + + filename = "unittest_py_keepalive_patches." + file_ending + path = filename + + def get_component_only(): + series = io.Series(path, io.Access.create_linear) + backend = series.backend + iteration = series.snapshots()[0] + particles = iteration.particles["electrons"] + + dset = io.Dataset(np.dtype(np.float32), [30]) + position_x = particles["position"]["x"] + position_x.reset_dataset(dset) + position_x[:] = np.arange(30, dtype=np.float32) + + dset = io.Dataset(np.dtype("uint64"), [2]) + num_particles_comp = particles.particle_patches["numParticles"] + num_particles_comp.reset_dataset(dset) + num_particles_comp.store(0, np.uint64(10)) + num_particles_comp.store(1, np.uint64(20)) + + del iteration + del particles + del series + gc.collect() + + return num_particles_comp, backend + + component, backend = get_component_only() + gc.collect() + + component.store(0, np.uint64(50)) + + component.series_flush() + if backend == "ADIOS2": + del component + gc.collect() + + read = io.Series(path, io.Access.read_only) + loaded = read.snapshots()[0] \ + .particles["electrons"].particle_patches["numParticles"].load() + read.flush() + np.testing.assert_array_equal( + loaded[0], + np.uint64(50) + ) + if __name__ == '__main__': unittest.main()