From b3a4947b3d470aaf986166f9322d2303c6afa69b Mon Sep 17 00:00:00 2001 From: Seperman Date: Fri, 26 Aug 2022 09:25:39 -0700 Subject: [PATCH] Adding include_paths to the docs --- conftest.py | 22 ++++++++++ deepdiff/deephash.py | 10 +++-- deepdiff/delta.py | 9 ++-- deepdiff/diff.py | 56 ++++++++++++++++-------- deepdiff/helper.py | 44 +++++++++---------- deepdiff/serialization.py | 86 ++++++++++++++++++++++++++++++++++++- docs/diff_doc.rst | 4 ++ docs/exclude_paths.rst | 28 ++++++++++++ tests/test_cache.py | 3 +- tests/test_delta.py | 33 +++++++++++++- tests/test_diff_text.py | 32 ++++++++++++++ tests/test_ignore_order.py | 4 ++ tests/test_serialization.py | 27 ++++++++---- 13 files changed, 298 insertions(+), 60 deletions(-) diff --git a/conftest.py b/conftest.py index 0206df11..263b1296 100644 --- a/conftest.py +++ b/conftest.py @@ -46,6 +46,28 @@ def nested_a_result(): return json.load(the_file) +@pytest.fixture(scope='class') +def nested_a_affected_paths(): + return { + 'root[0][0][2][0][1]', 'root[0][1][1][1][5]', 'root[0][2][1]', + 'root[1][1][2][0][1]', 'root[1][2][0]', 'root[1][2][0][1][5]', + 'root[1][0][2][2][3]', 'root[0][0][1][0][0]', 'root[0][1][0][2][3]', + 'root[0][3][0][2][3]', 'root[0][3][1][0][2]', 'root[1][1][1][0][0]', + 'root[1][0][1][2][1]', 'root[1][0][2][1][2]', 'root[1][3][0][2][3]', + 'root[1][3][1][0][2]', 'root[1][2][0][2]', 'root[1][0][2][0][1]', + 'root[0][3][2][0][1]', 'root[0][3][2][1][0]', 'root[1][3][1][1]', + 'root[1][2][1][1][0]', 'root[1][2][1][0]', 'root[1][0][0][0][2]', + 'root[1][3][2][1][0]', 'root[1][0][0][1][1]', 'root[0][1][2][0]', + 'root[0][1][2][1][0]', 'root[0][2][0][1][2]', 'root[1][3][0][1]', + 'root[0][3][1][1]', 'root[1][2][0][0][2]', 'root[1][3][2][0][1]', + 'root[1][0][1][0]', 'root[1][2][0][0][0]', 'root[1][0][0][0][1]', + 'root[1][3][2][2][2]', 'root[0][1][1][2][1]', 'root[0][1][1][2][2]', + 'root[0][2][0][0][2]', 'root[0][2][0][0][3]', 'root[0][3][1][2][1]', + 'root[0][3][1][2][2]', 'root[1][2][1][2][3]', 'root[1][0][0][1][2]', + 'root[1][0][0][2][1]', 'root[1][3][1][2][1]', 'root[1][3][1][2][2]' + } + + @pytest.fixture(scope='class') def nested_b_t1(): with open(os.path.join(FIXTURES_DIR, 'nested_b_t1.json')) as the_file: diff --git a/deepdiff/deephash.py b/deepdiff/deephash.py index 0158c3ae..f657c546 100644 --- a/deepdiff/deephash.py +++ b/deepdiff/deephash.py @@ -9,7 +9,7 @@ convert_item_or_items_into_compiled_regexes_else_none, get_id, type_is_subclass_of_type_group, type_in_type_group, number_to_string, datetime_normalize, KEY_TO_VAL_STR, short_repr, - get_truncate_datetime, dict_) + get_truncate_datetime, dict_, add_root_to_paths) from deepdiff.base import Base logger = logging.getLogger(__name__) @@ -123,6 +123,7 @@ def __init__(self, hashes=None, exclude_types=None, exclude_paths=None, + include_paths=None, exclude_regex_paths=None, hasher=None, ignore_repetition=True, @@ -146,7 +147,7 @@ def __init__(self, raise ValueError( ("The following parameter(s) are not valid: %s\n" "The valid parameters are obj, hashes, exclude_types, significant_digits, truncate_datetime," - "exclude_paths, exclude_regex_paths, hasher, ignore_repetition, " + "exclude_paths, include_paths, exclude_regex_paths, hasher, ignore_repetition, " "number_format_notation, apply_hash, ignore_type_in_groups, ignore_string_type_changes, " "ignore_numeric_type_changes, ignore_type_subclasses, ignore_string_case " "number_to_string_func, ignore_private_variables, parent " @@ -160,7 +161,8 @@ def __init__(self, exclude_types = set() if exclude_types is None else set(exclude_types) self.exclude_types_tuple = tuple(exclude_types) # we need tuple for checking isinstance self.ignore_repetition = ignore_repetition - self.exclude_paths = convert_item_or_items_into_set_else_none(exclude_paths) + self.exclude_paths = add_root_to_paths(convert_item_or_items_into_set_else_none(exclude_paths)) + self.include_paths = add_root_to_paths(convert_item_or_items_into_set_else_none(include_paths)) self.exclude_regex_paths = convert_item_or_items_into_compiled_regexes_else_none(exclude_regex_paths) self.hasher = default_hasher if hasher is None else hasher self.hashes[UNPROCESSED_KEY] = [] @@ -327,6 +329,8 @@ def _skip_this(self, obj, parent): skip = False if self.exclude_paths and parent in self.exclude_paths: skip = True + if self.include_paths and parent not in self.include_paths: + skip = True elif self.exclude_regex_paths and any( [exclude_regex_path.search(parent) for exclude_regex_path in self.exclude_regex_paths]): skip = True diff --git a/deepdiff/delta.py b/deepdiff/delta.py index 6a94f15b..2a65be7d 100644 --- a/deepdiff/delta.py +++ b/deepdiff/delta.py @@ -7,7 +7,8 @@ from deepdiff.helper import ( strings, short_repr, numbers, np_ndarray, np_array_factory, numpy_dtypes, get_doc, - not_found, numpy_dtype_string_to_type, dict_) + not_found, numpy_dtype_string_to_type, dict_, +) from deepdiff.path import _path_to_elements, _get_nested_obj, GET, GETATTR from deepdiff.anyset import AnySet @@ -70,11 +71,11 @@ def __init__( serializer=pickle_dump, verify_symmetry=False, ): - if 'safe_to_import' not in set(deserializer.__code__.co_varnames): + if hasattr(deserializer, '__code__') and 'safe_to_import' in set(deserializer.__code__.co_varnames): + _deserializer = deserializer + else: def _deserializer(obj, safe_to_import=None): return deserializer(obj) - else: - _deserializer = deserializer if diff is not None: if isinstance(diff, DeepDiff): diff --git a/deepdiff/diff.py b/deepdiff/diff.py index 363b1f53..d2775c25 100755 --- a/deepdiff/diff.py +++ b/deepdiff/diff.py @@ -21,15 +21,15 @@ type_is_subclass_of_type_group, type_in_type_group, get_doc, number_to_string, datetime_normalize, KEY_TO_VAL_STR, booleans, np_ndarray, get_numpy_ndarray_rows, OrderedSetPlus, RepeatedTimer, - TEXT_VIEW, TREE_VIEW, DELTA_VIEW, detailed__dict__, + TEXT_VIEW, TREE_VIEW, DELTA_VIEW, detailed__dict__, add_root_to_paths, np, get_truncate_datetime, dict_, CannotCompare, ENUM_IGNORE_KEYS) from deepdiff.serialization import SerializationMixin from deepdiff.distance import DistanceMixin from deepdiff.model import ( RemapDict, ResultDict, TextResult, TreeResult, DiffLevel, - DictRelationship, AttributeRelationship, + DictRelationship, AttributeRelationship, REPORT_KEYS, SubscriptableIterableRelationship, NonSubscriptableIterableRelationship, - SetRelationship, NumpyArrayRelationship, CUSTOM_FIELD) + SetRelationship, NumpyArrayRelationship, CUSTOM_FIELD, PrettyOrderedSet, ) from deepdiff.deephash import DeepHash, combine_hashes_lists from deepdiff.base import Base from deepdiff.lfucache import LFUCache, DummyLFU @@ -85,6 +85,7 @@ def _report_progress(_stats, progress_logger, duration): DEEPHASH_PARAM_KEYS = ( 'exclude_types', 'exclude_paths', + 'include_paths', 'exclude_regex_paths', 'hasher', 'significant_digits', @@ -119,6 +120,7 @@ def __init__(self, exclude_obj_callback=None, exclude_obj_callback_strict=None, exclude_paths=None, + include_paths=None, exclude_regex_paths=None, exclude_types=None, get_deep_distance=False, @@ -157,7 +159,7 @@ def __init__(self, raise ValueError(( "The following parameter(s) are not valid: %s\n" "The valid parameters are ignore_order, report_repetition, significant_digits, " - "number_format_notation, exclude_paths, exclude_types, exclude_regex_paths, ignore_type_in_groups, " + "number_format_notation, exclude_paths, include_paths, exclude_types, exclude_regex_paths, ignore_type_in_groups, " "ignore_string_type_changes, ignore_numeric_type_changes, ignore_type_subclasses, truncate_datetime, " "ignore_private_variables, ignore_nan_inequality, number_to_string_func, verbose_level, " "view, hasher, hashes, max_passes, max_diffs, " @@ -188,7 +190,8 @@ def __init__(self, ignore_numeric_type_changes=ignore_numeric_type_changes, ignore_type_subclasses=ignore_type_subclasses) self.report_repetition = report_repetition - self.exclude_paths = convert_item_or_items_into_set_else_none(exclude_paths) + self.exclude_paths = add_root_to_paths(convert_item_or_items_into_set_else_none(exclude_paths)) + self.include_paths = add_root_to_paths(convert_item_or_items_into_set_else_none(include_paths)) self.exclude_regex_paths = convert_item_or_items_into_compiled_regexes_else_none(exclude_regex_paths) self.exclude_types = set(exclude_types) if exclude_types else None self.exclude_types_tuple = tuple(exclude_types) if exclude_types else None # we need tuple for checking isinstance @@ -431,21 +434,24 @@ def _skip_this(self, level): Check whether this comparison should be skipped because one of the objects to compare meets exclusion criteria. :rtype: bool """ + level_path = level.path() skip = False - if self.exclude_paths and level.path() in self.exclude_paths: + if self.exclude_paths and level_path in self.exclude_paths: + skip = True + if self.include_paths and level_path not in self.include_paths: skip = True elif self.exclude_regex_paths and any( - [exclude_regex_path.search(level.path()) for exclude_regex_path in self.exclude_regex_paths]): + [exclude_regex_path.search(level_path) for exclude_regex_path in self.exclude_regex_paths]): skip = True elif self.exclude_types_tuple and \ (isinstance(level.t1, self.exclude_types_tuple) or isinstance(level.t2, self.exclude_types_tuple)): skip = True elif self.exclude_obj_callback and \ - (self.exclude_obj_callback(level.t1, level.path()) or self.exclude_obj_callback(level.t2, level.path())): + (self.exclude_obj_callback(level.t1, level_path) or self.exclude_obj_callback(level.t2, level_path)): skip = True elif self.exclude_obj_callback_strict and \ - (self.exclude_obj_callback_strict(level.t1, level.path()) and - self.exclude_obj_callback_strict(level.t2, level.path())): + (self.exclude_obj_callback_strict(level.t1, level_path) and + self.exclude_obj_callback_strict(level.t2, level_path)): skip = True return skip @@ -477,12 +483,12 @@ def _get_clean_to_keys_mapping(self, keys, level): return result def _diff_dict(self, - level, - parents_ids=frozenset([]), - print_as_attribute=False, - override=False, - override_t1=None, - override_t2=None): + level, + parents_ids=frozenset([]), + print_as_attribute=False, + override=False, + override_t1=None, + override_t2=None): """Difference of 2 dictionaries""" if override: # for special stuff like custom objects and named tuples we receive preprocessed t1 and t2 @@ -1097,7 +1103,7 @@ def get_other_pair(hash_value, in_t1=True): old_indexes=t1_indexes, new_indexes=t2_indexes) self._report_result('repetition_change', - repetition_change_level) + repetition_change_level) else: for hash_value in hashes_added: @@ -1423,6 +1429,22 @@ def get_stats(self): """ return self._stats + @property + def affected_paths(self): + """ + Get the list of paths that were affected. + Whether a value was changed or they were added or removed. + """ + result = OrderedSet() + for key in REPORT_KEYS: + value = self.get(key) + if value: + if isinstance(value, PrettyOrderedSet): + result |= value + else: + result |= OrderedSet(value.keys()) + return result + if __name__ == "__main__": # pragma: no cover import doctest diff --git a/deepdiff/helper.py b/deepdiff/helper.py index f5a6bc88..ab1a36e8 100644 --- a/deepdiff/helper.py +++ b/deepdiff/helper.py @@ -8,7 +8,7 @@ import time from ast import literal_eval from decimal import Decimal, localcontext -from collections import namedtuple, OrderedDict +from collections import namedtuple from itertools import repeat from ordered_set import OrderedSet from threading import Timer @@ -220,28 +220,6 @@ class indexed_set(set): """ -JSON_CONVERTOR = { - Decimal: float, - OrderedSet: list, - type: lambda x: x.__name__, - bytes: lambda x: x.decode('utf-8') -} - - -def json_convertor_default(default_mapping=None): - _convertor_mapping = JSON_CONVERTOR.copy() - if default_mapping: - _convertor_mapping.update(default_mapping) - - def _convertor(obj): - for original_type, convert_to in _convertor_mapping.items(): - if isinstance(obj, original_type): - return convert_to(obj) - raise TypeError('We do not know how to convert {} of type {} for json serialization. Please pass the default_mapping parameter with proper mapping of the object to a basic python type.'.format(obj, type(obj))) - - return _convertor - - def add_to_frozen_set(parents_ids, item_id): return parents_ids | {item_id} @@ -257,6 +235,26 @@ def convert_item_or_items_into_set_else_none(items): return items +def add_root_to_paths(paths): + """ + Sometimes the users want to just pass + [key] instead of root[key] for example. + Here we automatically add all sorts of variations that might match + the path they were supposed to pass. + """ + if paths is None: + return + result = OrderedSet() + for path in paths: + if path.startswith('root'): + result.add(path) + else: + result.add(f"root.{path}") + result.add(f"root[{path}]") + result.add(f"root['{path}']") + return result + + RE_COMPILED_TYPE = type(re.compile('')) diff --git a/deepdiff/serialization.py b/deepdiff/serialization.py index 796c0fcb..e66270b7 100644 --- a/deepdiff/serialization.py +++ b/deepdiff/serialization.py @@ -1,8 +1,8 @@ -import json import pickle import sys import io import os +import json import logging import re # NOQA import builtins # NOQA @@ -23,8 +23,9 @@ except ImportError: # pragma: no cover. clevercsv = None # pragma: no cover. from copy import deepcopy +from functools import partial from collections.abc import Mapping -from deepdiff.helper import (strings, json_convertor_default, get_type, TEXT_VIEW) +from deepdiff.helper import (strings, get_type, TEXT_VIEW) from deepdiff.model import DeltaResult logger = logging.getLogger(__name__) @@ -76,6 +77,34 @@ class UnsupportedFormatErr(TypeError): } +TYPE_STR_TO_TYPE = { + 'range': range, + 'complex': complex, + 'set': set, + 'frozenset': frozenset, + 'slice': slice, + 'str': str, + 'bytes': bytes, + 'list': list, + 'tuple': tuple, + 'int': int, + 'float': float, + 'dict': dict, + 'bool': bool, + 'bin': bin, + 'None': None, + 'NoneType': None, + 'datetime': datetime.datetime, + 'time': datetime.time, + 'timedelta': datetime.timedelta, + 'Decimal': decimal.Decimal, + 'OrderedSet': ordered_set.OrderedSet, + 'namedtuple': collections.namedtuple, + 'OrderedDict': collections.OrderedDict, + 'Pattern': re.Pattern, +} + + class ModuleNotFoundError(ImportError): """ Raised when the module is not found in sys.modules @@ -465,3 +494,56 @@ def _save_content(content, path, file_type, keep_backup=True): raise UnsupportedFormatErr('Only json, yaml, toml, csv, tsv and pickle are supported.\n' f' The {file_type} extension is not known.') return content + + +JSON_CONVERTOR = { + decimal.Decimal: float, + ordered_set.OrderedSet: list, + type: lambda x: x.__name__, + bytes: lambda x: x.decode('utf-8'), + datetime.datetime: lambda x: x.isoformat(), +} + + +def json_convertor_default(default_mapping=None): + if default_mapping: + _convertor_mapping = JSON_CONVERTOR.copy() + _convertor_mapping.update(default_mapping) + else: + _convertor_mapping = JSON_CONVERTOR + + def _convertor(obj): + for original_type, convert_to in _convertor_mapping.items(): + if isinstance(obj, original_type): + return convert_to(obj) + raise TypeError('We do not know how to convert {} of type {} for json serialization. Please pass the default_mapping parameter with proper mapping of the object to a basic python type.'.format(obj, type(obj))) + + return _convertor + + +class JSONDecoder(json.JSONDecoder): + + def __init__(self, *args, **kwargs): + json.JSONDecoder.__init__(self, object_hook=self.object_hook, *args, **kwargs) + + def object_hook(self, obj): + if 'old_type' in obj and 'new_type' in obj: + for type_key in ('old_type', 'new_type'): + type_str = obj[type_key] + obj[type_key] = TYPE_STR_TO_TYPE.get(type_str, type_str) + + return obj + + +def json_dumps(item, default_mapping=None, **kwargs): + """ + Dump json with extra details that are not normally json serializable + + Note: I tried to replace json with orjson for its speed. It does work + but the output it makes is a byte object and Postgres couldn't directly use it without + encoding to str. So I switched back to json. + """ + return json.dumps(item, default=json_convertor_default(default_mapping=default_mapping), **kwargs) + + +json_loads = partial(json.loads, cls=JSONDecoder) diff --git a/docs/diff_doc.rst b/docs/diff_doc.rst index 1958630e..f7a56ebd 100644 --- a/docs/diff_doc.rst +++ b/docs/diff_doc.rst @@ -43,6 +43,10 @@ exclude_paths: list, default = None :ref:`exclude_paths_label` List of paths to exclude from the report. If only one item, you can path it as a string. +include_paths: list, default = None + :ref:`include_paths_label` + List of the only paths to include in the report. If only one item, you can path it as a string. + exclude_regex_paths: list, default = None :ref:`exclude_regex_paths_label` List of string regex paths or compiled regex paths objects to exclude from the report. If only one item, you can pass it as a string or regex compiled object. diff --git a/docs/exclude_paths.rst b/docs/exclude_paths.rst index 2cc501ef..d7eda88c 100644 --- a/docs/exclude_paths.rst +++ b/docs/exclude_paths.rst @@ -16,6 +16,34 @@ Example >>> print (DeepDiff(t1, t2, exclude_paths=["root['ingredients']", "root['ingredients2']"])) # multiple items pass as a list or a set. {} +Also for root keys you don't have to pass as "root['key']". You can instead just pass the key: + +Example + >>> t1 = {"for life": "vegan", "ingredients": ["no meat", "no eggs", "no dairy"]} + >>> t2 = {"for life": "vegan", "ingredients": ["veggies", "tofu", "soy sauce"]} + >>> print (DeepDiff(t1, t2, exclude_paths="ingredients)) # one item pass it as a string + {} + >>> print (DeepDiff(t1, t2, exclude_paths=["ingredients", "ingredients2"])) # multiple items pass as a list or a set. + {} + + +.. _include_paths_label: + +Include Paths +============= + +Only include this part of your object tree in the comparison. +Use include_paths and pass a set or list of paths to limit diffing to only those paths. If only one item is being passed, just put it there as a string—no need to pass it as a list then. + +Example + >>> t1 = {"for life": "vegan", "ingredients": ["no meat", "no eggs", "no dairy"]} + >>> t2 = {"for life": "vegan", "ingredients": ["veggies", "tofu", "soy sauce"]} + >>> print (DeepDiff(t1, t2, include_paths="root['for life']")) # one item pass it as a string + {} + >>> print (DeepDiff(t1, t2, include_paths=["for life", "ingredients2"])) # multiple items pass as a list or a set and you don't need to pass the full path when dealing with root keys. So instead of "root['for life']" you can pass "for life" + {} + + .. _exclude_regex_paths_label: Exclude Regex Paths diff --git a/tests/test_cache.py b/tests/test_cache.py index 9a6ad59b..b6cd01b6 100644 --- a/tests/test_cache.py +++ b/tests/test_cache.py @@ -7,7 +7,7 @@ class TestCache: @pytest.mark.slow - def test_cache_deeply_nested_a1(self, nested_a_t1, nested_a_t2, nested_a_result): + def test_cache_deeply_nested_a1(self, nested_a_t1, nested_a_t2, nested_a_result, nested_a_affected_paths): diff = DeepDiff(nested_a_t1, nested_a_t2, ignore_order=True, cache_size=5000, cache_tuning_sample_size=280, @@ -25,6 +25,7 @@ def test_cache_deeply_nested_a1(self, nested_a_t1, nested_a_t2, nested_a_result) assert nested_a_result == diff diff_of_diff = DeepDiff(nested_a_result, diff.to_dict(), ignore_order=False) assert not diff_of_diff + assert nested_a_affected_paths == diff.affected_paths @pytest.mark.slow def test_cache_deeply_nested_a2(self, nested_a_t1, nested_a_t2, nested_a_result): diff --git a/tests/test_delta.py b/tests/test_delta.py index 27a37c3d..4bb7329f 100644 --- a/tests/test_delta.py +++ b/tests/test_delta.py @@ -16,7 +16,8 @@ INVALID_ACTION_WHEN_CALLING_SIMPLE_DELETE_ELEM, INDEXES_NOT_FOUND_WHEN_IGNORE_ORDER, FAIL_TO_REMOVE_ITEM_IGNORE_ORDER_MSG, UNABLE_TO_GET_PATH_MSG, NOT_VALID_NUMPY_TYPE) from deepdiff.serialization import ( - DELTA_IGNORE_ORDER_NEEDS_REPETITION_REPORT, DELTA_ERROR_WHEN_GROUP_BY + DELTA_IGNORE_ORDER_NEEDS_REPETITION_REPORT, DELTA_ERROR_WHEN_GROUP_BY, + json_dumps, json_loads, ) from tests import PicklableClass, parameterize_cases, CustomClass, CustomClass2 @@ -24,6 +25,35 @@ class TestBasicsOfDelta: + def test_from_null_delta_json(self): + t1 = None + t2 = [1, 2, 3, 5] + diff = DeepDiff(t1, t2) + delta = Delta(diff, serializer=json_dumps) + dump = delta.dumps() + delta2 = Delta(dump, deserializer=json_loads) + assert delta2 + t1 == t2 + assert t1 + delta2 == t2 + + def test_to_null_delta1_json(self): + t1 = 1 + t2 = None + diff = DeepDiff(t1, t2) + delta = Delta(diff, serializer=json_dumps) + dump = delta.dumps() + delta2 = Delta(dump, deserializer=json_loads) + assert delta2 + t1 == t2 + assert t1 + delta2 == t2 + + def test_to_null_delta2_json(self): + t1 = [1, 2, 3, 5] + t2 = None + diff = DeepDiff(t1, t2) + delta = Delta(diff) + + assert delta + t1 == t2 + assert t1 + delta == t2 + def test_list_difference_add_delta(self): t1 = [1, 2] t2 = [1, 2, 3, 5] @@ -1145,6 +1175,7 @@ def test_delta_view_and_to_delta_dict_are_equal_when_parameteres_passed(self): 'ignore_type_in_groups': [], 'report_repetition': True, 'exclude_paths': None, + 'include_paths': None, 'exclude_regex_paths': None, 'exclude_types': None, 'exclude_types_tuple': None, diff --git a/tests/test_diff_text.py b/tests/test_diff_text.py index c9717a03..cdda309c 100755 --- a/tests/test_diff_text.py +++ b/tests/test_diff_text.py @@ -96,6 +96,7 @@ def test_item_added_and_removed(self): } } assert result == ddiff + assert {"root[2]", "root[4]", "root[5]", "root[6]"} == ddiff.affected_paths def test_item_added_and_removed_verbose(self): t1 = {1: 1, 3: 3, 4: 4} @@ -1301,6 +1302,15 @@ def test_skip_path2(self): ddiff = DeepDiff(t1, t2, exclude_paths={"root['ingredients']"}) assert {} == ddiff + def test_skip_path2_key_names(self): + t1 = { + "for life": "vegan", + "ingredients": ["no meat", "no eggs", "no dairy"] + } + t2 = {"for life": "vegan"} + ddiff = DeepDiff(t1, t2, exclude_paths={"ingredients"}) + assert {} == ddiff + def test_skip_path2_reverse(self): t1 = { "for life": "vegan", @@ -1310,6 +1320,24 @@ def test_skip_path2_reverse(self): ddiff = DeepDiff(t2, t1, exclude_paths={"root['ingredients']"}) assert {} == ddiff + def test_include_path3(self): + t1 = { + "for life": "vegan", + "ingredients": ["no meat", "no eggs", "no dairy"] + } + t2 = {"for life": "vegan"} + ddiff = DeepDiff(t2, t1, include_paths={"root['for_life']"}) + assert {} == ddiff + + def test_include_path3_with_just_key_names(self): + t1 = { + "for life": "vegan", + "ingredients": ["no meat", "no eggs", "no dairy"] + } + t2 = {"for life": "vegan"} + ddiff = DeepDiff(t2, t1, include_paths={"for_life"}) + assert {} == ddiff + def test_skip_path4(self): t1 = { "for life": "vegan", @@ -1394,6 +1422,7 @@ def exclude_obj_callback_strict(obj, path): ddiff = DeepDiff(t1, t2, exclude_obj_callback_strict=exclude_obj_callback_strict) result = {'values_changed': {"root['x']": {'new_value': 12, 'old_value': 10}}} assert result == ddiff + assert {"root['x']"} == ddiff.affected_paths def test_skip_str_type_in_dictionary(self): t1 = {1: {2: "a"}} @@ -1447,6 +1476,7 @@ def test_list_none_item_removed(self): 'iterable_item_removed': {'root[2]': None} } assert result == ddiff + assert {"root[2]"} == ddiff.affected_paths def test_non_subscriptable_iterable(self): def gen1(): @@ -1466,6 +1496,7 @@ def gen2(): # Note: In text-style results, we currently pretend this stuff is subscriptable for readability assert result == ddiff + assert {"root[2]"} == ddiff.affected_paths @pytest.mark.parametrize('t1, t2, params, expected_result', [ (float('nan'), float('nan'), {}, ['values_changed']), @@ -1594,6 +1625,7 @@ def test_group_by_not_list_of_dicts(self): diff = DeepDiff(t1, t2, group_by='id') expected = {'values_changed': {'root[1]': {'new_value': 3, 'old_value': 2}}} assert expected == diff + assert {"root[1]"} == diff.affected_paths def test_datetime_in_key(self): diff --git a/tests/test_ignore_order.py b/tests/test_ignore_order.py index 52016b3f..aa6a3d4c 100644 --- a/tests/test_ignore_order.py +++ b/tests/test_ignore_order.py @@ -47,6 +47,7 @@ def test_ignore_order_depth3(self): t2 = [[{4, 5, 6}], {1, 2, 3}] ddiff = DeepDiff(t1, t2, ignore_order=True) assert {'set_item_added': ["root[1][0][6]"]} == ddiff + assert {"root[1][0][6]"} == ddiff.affected_paths def test_ignore_order_depth4(self): t1 = [[1, 2, 3, 4], [4, 2, 2, 1]] @@ -74,6 +75,7 @@ def test_ignore_order_depth5(self): } } assert expected == ddiff + assert {"root[1]", "root[2]", "root[3]"} == ddiff.affected_paths ddiff = DeepDiff(t1, t2, ignore_order=True, report_repetition=False, cache_purge_level=0) dist = ddiff._get_rough_distance() @@ -124,6 +126,7 @@ def test_dictionary_difference_ignore_order(self): t2 = {"a": [[{"b": 2, "c": 3}, {"b": 2, "c": 4}]]} ddiff = DeepDiff(t1, t2, ignore_order=True) assert {} == ddiff + assert set() == ddiff.affected_paths def test_nested_list_ignore_order(self): t1 = [1, 2, [3, 4]] @@ -190,6 +193,7 @@ def test_nested_list_ignore_order_report_repetition_wrong_currently(self): } } assert result != ddiff + assert {"root[2][0]"} == ddiff.affected_paths def test_list_of_unhashable_difference_ignore_order(self): t1 = [{"a": 2}, {"b": [3, 4, {1: 1}]}] diff --git a/tests/test_serialization.py b/tests/test_serialization.py index c501aa5a..9bd8c6d6 100644 --- a/tests/test_serialization.py +++ b/tests/test_serialization.py @@ -11,7 +11,7 @@ from deepdiff.serialization import ( pickle_load, pickle_dump, ForbiddenModule, ModuleNotFoundError, MODULE_NOT_FOUND_MSG, FORBIDDEN_MODULE_MSG, pretty_print_diff, - load_path_content, UnsupportedFormatErr) + load_path_content, UnsupportedFormatErr, json_dumps, json_loads) from conftest import FIXTURES_DIR from ordered_set import OrderedSet from tests import PicklableClass @@ -298,14 +298,14 @@ def test_pretty_print_diff_repetition_change(self, t1, t2, item_path): @pytest.mark.parametrize("expected, verbose_level", ( - ('Item root[5] added to dictionary.' - '\nItem root[3] removed from dictionary.' - '\nType of root[2] changed from int to str and value changed from 2 to "b".' - '\nValue of root[4] changed from 4 to 5.', 0), - ('Item root[5] (5) added to dictionary.' - '\nItem root[3] (3) removed from dictionary.' - '\nType of root[2] changed from int to str and value changed from 2 to "b".' - '\nValue of root[4] changed from 4 to 5.', 2), + ('Item root[5] added to dictionary.' + '\nItem root[3] removed from dictionary.' + '\nType of root[2] changed from int to str and value changed from 2 to "b".' + '\nValue of root[4] changed from 4 to 5.', 0), + ('Item root[5] (5) added to dictionary.' + '\nItem root[3] (3) removed from dictionary.' + '\nType of root[2] changed from int to str and value changed from 2 to "b".' + '\nValue of root[4] changed from 4 to 5.', 2), ), ids=("verbose=0", "verbose=2") ) def test_pretty_form_method(self, expected, verbose_level): @@ -314,3 +314,12 @@ def test_pretty_form_method(self, expected, verbose_level): ddiff = DeepDiff(t1, t2, view='tree', verbose_level=verbose_level) result = ddiff.pretty() assert result == expected + + @pytest.mark.parametrize('test_num, value', [ + (1, {'10': None}), + (2, {"type_changes": {"root": {"old_type": None, "new_type": list, "new_value": ["你好", 2, 3, 5]}}}) + ]) + def test_json_dumps_and_loads(self, test_num, value): + serialized = json_dumps(value) + back = json_loads(serialized) + assert value == back, f"test_json_dumps_and_loads tesst #{test_num} failed"