Skip to content

Commit

Permalink
Merge pull request #300 from gerlero/files
Browse files Browse the repository at this point in the history
Update serialization
  • Loading branch information
gerlero authored Dec 5, 2024
2 parents aeec6c2 + dbcb83c commit 33e698a
Show file tree
Hide file tree
Showing 3 changed files with 39 additions and 45 deletions.
6 changes: 3 additions & 3 deletions foamlib/_files/_files.py
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,7 @@ def __setitem__(self, keywords: str | tuple[str, ...] | None, data: Entry) -> No
elif not isinstance(keywords, tuple):
keywords = (keywords,)

if keywords and not isinstance(normalize(keywords[-1], kind=Kind.KEYWORD), str):
if keywords and not isinstance(normalize(keywords[-1]), str):
msg = f"Invalid keyword: {keywords[-1]}"
raise ValueError(msg)

Expand Down Expand Up @@ -304,7 +304,7 @@ def __setitem__(self, keywords: str | tuple[str, ...] | None, data: Entry) -> No
...,
before
+ indentation
+ dumps(keywords[-1], kind=Kind.KEYWORD)
+ dumps(keywords[-1])
+ b"\n"
+ indentation
+ b"{\n"
Expand All @@ -322,7 +322,7 @@ def __setitem__(self, keywords: str | tuple[str, ...] | None, data: Entry) -> No
normalize(data, kind=kind),
before
+ indentation
+ dumps(keywords[-1], kind=Kind.KEYWORD)
+ dumps(keywords[-1])
+ b" "
+ dumps(data, kind=kind)
+ b";"
Expand Down
66 changes: 33 additions & 33 deletions foamlib/_files/_parsing.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@
from ._types import Data, Dimensioned, DimensionSet, File


class Tensor(Enum):
class _Tensor(Enum):
SCALAR = auto()
VECTOR = auto()
SYMM_TENSOR = auto()
Expand All @@ -49,25 +49,25 @@ class Tensor(Enum):
@property
def shape(self) -> tuple[int, ...]:
return {
Tensor.SCALAR: (),
Tensor.VECTOR: (3,),
Tensor.SYMM_TENSOR: (6,),
Tensor.TENSOR: (9,),
_Tensor.SCALAR: (),
_Tensor.VECTOR: (3,),
_Tensor.SYMM_TENSOR: (6,),
_Tensor.TENSOR: (9,),
}[self]

@property
def size(self) -> int:
return {
Tensor.SCALAR: 1,
Tensor.VECTOR: 3,
Tensor.SYMM_TENSOR: 6,
Tensor.TENSOR: 9,
_Tensor.SCALAR: 1,
_Tensor.VECTOR: 3,
_Tensor.SYMM_TENSOR: 6,
_Tensor.TENSOR: 9,
}[self]

def pattern(self, *, ignore: Regex | None = None) -> str:
float_pattern = r"(?i:[+-]?(?:(?:\d+\.?\d*(?:e[+-]?\d+)?)|nan|inf(?:inity)?))"

if self == Tensor.SCALAR:
if self == _Tensor.SCALAR:
return float_pattern

ignore_pattern = (
Expand All @@ -77,7 +77,7 @@ def pattern(self, *, ignore: Regex | None = None) -> str:
return rf"\((?:{ignore_pattern})?(?:{float_pattern}{ignore_pattern}){{{self.size - 1}}}{float_pattern}(?:{ignore_pattern})?\)"

def parser(self) -> ParserElement:
if self == Tensor.SCALAR:
if self == _Tensor.SCALAR:
return common.ieee_float

return (
Expand All @@ -88,10 +88,10 @@ def parser(self) -> ParserElement:

def __str__(self) -> str:
return {
Tensor.SCALAR: "scalar",
Tensor.VECTOR: "vector",
Tensor.SYMM_TENSOR: "symmTensor",
Tensor.TENSOR: "tensor",
_Tensor.SCALAR: "scalar",
_Tensor.VECTOR: "vector",
_Tensor.SYMM_TENSOR: "symmTensor",
_Tensor.TENSOR: "tensor",
}[self]


Expand All @@ -115,7 +115,7 @@ def _list_of(entry: ParserElement) -> ParserElement:


def _parse_ascii_field(
s: str, tensor_kind: Tensor, *, ignore: Regex | None
s: str, tensor_kind: _Tensor, *, ignore: Regex | None
) -> list[float] | list[list[float]]:
values = [
float(v)
Expand All @@ -125,7 +125,7 @@ def _parse_ascii_field(
.split()
]

if tensor_kind == Tensor.SCALAR:
if tensor_kind == _Tensor.SCALAR:
return values

return [
Expand All @@ -135,15 +135,15 @@ def _parse_ascii_field(


def _unpack_binary_field(
b: bytes, tensor_kind: Tensor, *, length: int
b: bytes, tensor_kind: _Tensor, *, length: int
) -> list[float] | list[list[float]]:
float_size = len(b) / tensor_kind.size / length
assert float_size in (4, 8)

arr = array.array("f" if float_size == 4 else "d", b)
values = arr.tolist()

if tensor_kind == Tensor.SCALAR:
if tensor_kind == _Tensor.SCALAR:
return values

return [
Expand All @@ -153,14 +153,14 @@ def _unpack_binary_field(


def _tensor_list(
tensor_kind: Tensor | None = None, *, ignore: Regex | None = None
tensor_kind: _Tensor | None = None, *, ignore: Regex | None = None
) -> ParserElement:
if tensor_kind is None:
return (
_tensor_list(Tensor.SCALAR, ignore=ignore)
| _tensor_list(Tensor.VECTOR, ignore=ignore)
| _tensor_list(Tensor.SYMM_TENSOR, ignore=ignore)
| _tensor_list(Tensor.TENSOR, ignore=ignore)
_tensor_list(_Tensor.SCALAR, ignore=ignore)
| _tensor_list(_Tensor.VECTOR, ignore=ignore)
| _tensor_list(_Tensor.SYMM_TENSOR, ignore=ignore)
| _tensor_list(_Tensor.TENSOR, ignore=ignore)
)

tensor_pattern = tensor_kind.pattern(ignore=ignore)
Expand Down Expand Up @@ -274,10 +274,10 @@ def _keyword_entry_of(
Literal("[").suppress() + common.number[0, 7] + Literal("]").suppress()
).set_parse_action(lambda tks: DimensionSet(*tks))
_TENSOR = (
Tensor.SCALAR.parser()
| Tensor.VECTOR.parser()
| Tensor.SYMM_TENSOR.parser()
| Tensor.TENSOR.parser()
_Tensor.SCALAR.parser()
| _Tensor.VECTOR.parser()
| _Tensor.SYMM_TENSOR.parser()
| _Tensor.TENSOR.parser()
)
_IDENTIFIER = Combine(
Word(_IDENTCHARS, _IDENTBODYCHARS, exclude_chars="()")
Expand All @@ -289,15 +289,15 @@ def _keyword_entry_of(
_FIELD = (Keyword("uniform", _IDENTBODYCHARS).suppress() + _TENSOR) | (
Keyword("nonuniform", _IDENTBODYCHARS).suppress() + _tensor_list(ignore=_COMMENT)
)
TOKEN = dbl_quoted_string | _IDENTIFIER
_TOKEN = dbl_quoted_string | _IDENTIFIER
DATA = Forward()
_KEYWORD_ENTRY = _keyword_entry_of(TOKEN | _list_of(_IDENTIFIER), DATA)
_DICT = _dict_of(TOKEN, DATA)
_KEYWORD_ENTRY = _keyword_entry_of(_TOKEN | _list_of(_IDENTIFIER), DATA)
_DICT = _dict_of(_TOKEN, DATA)
_DATA_ENTRY = Forward()
_LIST_ENTRY = _DICT | _KEYWORD_ENTRY | _DATA_ENTRY
_LIST = _list_of(_LIST_ENTRY)
_NUMBER = common.signed_integer ^ common.ieee_float
_DATA_ENTRY <<= _FIELD | _LIST | _DIMENSIONED | _DIMENSIONS | _NUMBER | _SWITCH | TOKEN
_DATA_ENTRY <<= _FIELD | _LIST | _DIMENSIONED | _DIMENSIONS | _NUMBER | _SWITCH | _TOKEN

DATA <<= (
_DATA_ENTRY[1, ...]
Expand All @@ -307,7 +307,7 @@ def _keyword_entry_of(
)

_LOCATED_DICTIONARY = Group(
_keyword_entry_of(TOKEN, Opt(DATA, default=""), located=True)
_keyword_entry_of(_TOKEN, Opt(DATA, default=""), located=True)
)[...]
_LOCATED_DATA = Group(Located(DATA.copy().add_parse_action(lambda tks: ["", tks[0]])))

Expand Down
12 changes: 3 additions & 9 deletions foamlib/_files/_serialization.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
else:
from typing import Mapping, Sequence

from ._parsing import DATA, TOKEN
from ._parsing import DATA
from ._types import Data, Dimensioned, DimensionSet, Entry
from ._util import is_sequence

Expand All @@ -25,7 +25,6 @@

class Kind(Enum):
DEFAULT = auto()
KEYWORD = auto()
SINGLE_ENTRY = auto()
ASCII_FIELD = auto()
DOUBLE_PRECISION_BINARY_FIELD = auto()
Expand Down Expand Up @@ -61,7 +60,7 @@ def normalize(data: Entry, *, kind: Kind = Kind.DEFAULT) -> Entry:

if isinstance(data, tuple) and kind == Kind.SINGLE_ENTRY and len(data) == 2:
k, v = data
return (normalize(k, kind=Kind.KEYWORD), normalize(v))
return (normalize(k), normalize(v))

if is_sequence(data) and (kind == Kind.SINGLE_ENTRY or not isinstance(data, tuple)):
return [normalize(d, kind=Kind.SINGLE_ENTRY) for d in data]
Expand All @@ -72,11 +71,6 @@ def normalize(data: Entry, *, kind: Kind = Kind.DEFAULT) -> Entry:
return Dimensioned(value, data.dimensions, data.name)

if isinstance(data, str):
if kind == Kind.KEYWORD:
data = TOKEN.parse_string(data, parse_all=True)[0]
assert isinstance(data, str)
return data

return cast(Data, DATA.parse_string(data, parse_all=True)[0])

if isinstance(
Expand Down Expand Up @@ -105,7 +99,7 @@ def dumps(

if isinstance(data, tuple) and kind == Kind.SINGLE_ENTRY and len(data) == 2:
k, v = data
ret = dumps(k, kind=Kind.KEYWORD) + b" " + dumps(v)
ret = dumps(k) + b" " + dumps(v)
if not isinstance(v, Mapping):
ret += b";"
return ret
Expand Down

0 comments on commit 33e698a

Please sign in to comment.