diff --git a/.changes/unreleased/Under the Hood-20221205-164948.yaml b/.changes/unreleased/Under the Hood-20221205-164948.yaml new file mode 100644 index 00000000000..579f973955b --- /dev/null +++ b/.changes/unreleased/Under the Hood-20221205-164948.yaml @@ -0,0 +1,7 @@ +kind: Under the Hood +body: Consolidate ParsedNode and CompiledNode classes +time: 2022-12-05T16:49:48.563583-05:00 +custom: + Author: gshank + Issue: "6383" + PR: "6384" diff --git a/core/dbt/adapters/base/impl.py b/core/dbt/adapters/base/impl.py index 33b7c45a3c4..bbac18cb16b 100644 --- a/core/dbt/adapters/base/impl.py +++ b/core/dbt/adapters/base/impl.py @@ -15,7 +15,6 @@ List, Mapping, Iterator, - Union, Set, ) @@ -38,9 +37,8 @@ ) from dbt.clients.agate_helper import empty_table, merge_tables, table_from_rows from dbt.clients.jinja import MacroGenerator -from dbt.contracts.graph.compiled import CompileResultNode, CompiledSeedNode from dbt.contracts.graph.manifest import Manifest, MacroManifest -from dbt.contracts.graph.parsed import ParsedSeedNode +from dbt.contracts.graph.nodes import ResultNode from dbt.events.functions import fire_event, warn_or_error from dbt.events.types import ( CacheMiss, @@ -64,9 +62,6 @@ from dbt.adapters.cache import RelationsCache, _make_ref_key_msg -SeedModel = Union[ParsedSeedNode, CompiledSeedNode] - - GET_CATALOG_MACRO_NAME = "get_catalog" FRESHNESS_MACRO_NAME = "collect_freshness" @@ -243,9 +238,7 @@ def nice_connection_name(self) -> str: return conn.name @contextmanager - def connection_named( - self, name: str, node: Optional[CompileResultNode] = None - ) -> Iterator[None]: + def connection_named(self, name: str, node: Optional[ResultNode] = None) -> Iterator[None]: try: if self.connections.query_header is not None: self.connections.query_header.set(name, node) @@ -257,7 +250,7 @@ def connection_named( self.connections.query_header.reset() @contextmanager - def connection_for(self, node: CompileResultNode) -> Iterator[None]: + def connection_for(self, node: ResultNode) -> Iterator[None]: with self.connection_named(node.unique_id, node): yield @@ -372,7 +365,7 @@ def _get_catalog_schemas(self, manifest: Manifest) -> SchemaSearchMap: lowercase strings. """ info_schema_name_map = SchemaSearchMap() - nodes: Iterator[CompileResultNode] = chain( + nodes: Iterator[ResultNode] = chain( [ node for node in manifest.nodes.values() diff --git a/core/dbt/adapters/base/query_headers.py b/core/dbt/adapters/base/query_headers.py index 26f34be9c93..dd88fdb2d41 100644 --- a/core/dbt/adapters/base/query_headers.py +++ b/core/dbt/adapters/base/query_headers.py @@ -5,7 +5,7 @@ from dbt.context.manifest import generate_query_header_context from dbt.contracts.connection import AdapterRequiredConfig, QueryComment -from dbt.contracts.graph.compiled import CompileResultNode +from dbt.contracts.graph.nodes import ResultNode from dbt.contracts.graph.manifest import Manifest from dbt.exceptions import RuntimeException @@ -90,7 +90,7 @@ def add(self, sql: str) -> str: def reset(self): self.set("master", None) - def set(self, name: str, node: Optional[CompileResultNode]): + def set(self, name: str, node: Optional[ResultNode]): wrapped: Optional[NodeWrapper] = None if node is not None: wrapped = NodeWrapper(node) diff --git a/core/dbt/adapters/base/relation.py b/core/dbt/adapters/base/relation.py index 3124384975a..55182396ef4 100644 --- a/core/dbt/adapters/base/relation.py +++ b/core/dbt/adapters/base/relation.py @@ -2,8 +2,7 @@ from dataclasses import dataclass from typing import Optional, TypeVar, Any, Type, Dict, Union, Iterator, Tuple, Set -from dbt.contracts.graph.compiled import CompiledNode -from dbt.contracts.graph.parsed import ParsedSourceDefinition, ParsedNode +from dbt.contracts.graph.nodes import SourceDefinition, ParsedNode from dbt.contracts.relation import ( RelationType, ComponentName, @@ -184,7 +183,7 @@ def quoted(self, identifier): ) @classmethod - def create_from_source(cls: Type[Self], source: ParsedSourceDefinition, **kwargs: Any) -> Self: + def create_from_source(cls: Type[Self], source: SourceDefinition, **kwargs: Any) -> Self: source_quoting = source.quoting.to_dict(omit_none=True) source_quoting.pop("column", None) quote_policy = deep_merge( @@ -209,7 +208,7 @@ def add_ephemeral_prefix(name: str): def create_ephemeral_from_node( cls: Type[Self], config: HasQuoting, - node: Union[ParsedNode, CompiledNode], + node: ParsedNode, ) -> Self: # Note that ephemeral models are based on the name. identifier = cls.add_ephemeral_prefix(node.name) @@ -222,7 +221,7 @@ def create_ephemeral_from_node( def create_from_node( cls: Type[Self], config: HasQuoting, - node: Union[ParsedNode, CompiledNode], + node: ParsedNode, quote_policy: Optional[Dict[str, bool]] = None, **kwargs: Any, ) -> Self: @@ -243,21 +242,18 @@ def create_from_node( def create_from( cls: Type[Self], config: HasQuoting, - node: Union[CompiledNode, ParsedNode, ParsedSourceDefinition], + node: Union[ParsedNode, SourceDefinition], **kwargs: Any, ) -> Self: if node.resource_type == NodeType.Source: - if not isinstance(node, ParsedSourceDefinition): + if not isinstance(node, SourceDefinition): raise InternalException( - "type mismatch, expected ParsedSourceDefinition but got {}".format(type(node)) + "type mismatch, expected SourceDefinition but got {}".format(type(node)) ) return cls.create_from_source(node, **kwargs) else: - if not isinstance(node, (ParsedNode, CompiledNode)): - raise InternalException( - "type mismatch, expected ParsedNode or CompiledNode but " - "got {}".format(type(node)) - ) + if not isinstance(node, (ParsedNode)): + raise InternalException(f"type mismatch, expected ParsedNode but got {type(node)}") return cls.create_from_node(config, node, **kwargs) @classmethod diff --git a/core/dbt/adapters/protocol.py b/core/dbt/adapters/protocol.py index f17c2bd6f45..0cc3b3c96ce 100644 --- a/core/dbt/adapters/protocol.py +++ b/core/dbt/adapters/protocol.py @@ -17,8 +17,7 @@ import agate from dbt.contracts.connection import Connection, AdapterRequiredConfig, AdapterResponse -from dbt.contracts.graph.compiled import CompiledNode, ManifestNode, NonSourceCompiledNode -from dbt.contracts.graph.parsed import ParsedNode, ParsedSourceDefinition +from dbt.contracts.graph.nodes import ParsedNode, SourceDefinition, ManifestNode from dbt.contracts.graph.model_config import BaseConfig from dbt.contracts.graph.manifest import Manifest from dbt.contracts.relation import Policy, HasQuoting @@ -51,7 +50,7 @@ def get_default_quote_policy(cls) -> Policy: def create_from( cls: Type[Self], config: HasQuoting, - node: Union[CompiledNode, ParsedNode, ParsedSourceDefinition], + node: Union[ParsedNode, SourceDefinition], ) -> Self: ... @@ -65,7 +64,7 @@ def compile_node( node: ManifestNode, manifest: Manifest, extra_context: Optional[Dict[str, Any]] = None, - ) -> NonSourceCompiledNode: + ) -> ManifestNode: ... diff --git a/core/dbt/clients/jinja.py b/core/dbt/clients/jinja.py index 5e9835952a8..ac04bb86cb4 100644 --- a/core/dbt/clients/jinja.py +++ b/core/dbt/clients/jinja.py @@ -25,8 +25,7 @@ ) from dbt.clients._jinja_blocks import BlockIterator, BlockData, BlockTag -from dbt.contracts.graph.compiled import CompiledGenericTestNode -from dbt.contracts.graph.parsed import ParsedGenericTestNode +from dbt.contracts.graph.nodes import GenericTestNode from dbt.exceptions import ( InternalException, @@ -620,7 +619,7 @@ def extract_toplevel_blocks( def add_rendered_test_kwargs( context: Dict[str, Any], - node: Union[ParsedGenericTestNode, CompiledGenericTestNode], + node: GenericTestNode, capture_macros: bool = False, ) -> None: """Render each of the test kwargs in the given context using the native diff --git a/core/dbt/compilation.py b/core/dbt/compilation.py index 0afd82c0d42..7cd6f49a5e6 100644 --- a/core/dbt/compilation.py +++ b/core/dbt/compilation.py @@ -1,6 +1,6 @@ import os from collections import defaultdict -from typing import List, Dict, Any, Tuple, cast, Optional +from typing import List, Dict, Any, Tuple, Optional import networkx as nx # type: ignore import pickle @@ -12,15 +12,13 @@ from dbt.clients.system import make_directory from dbt.context.providers import generate_runtime_model_context from dbt.contracts.graph.manifest import Manifest, UniqueID -from dbt.contracts.graph.compiled import ( - COMPILED_TYPES, - CompiledGenericTestNode, +from dbt.contracts.graph.nodes import ( + ParsedNode, + ManifestNode, + GenericTestNode, GraphMemberNode, InjectedCTE, - ManifestNode, - NonSourceCompiledNode, ) -from dbt.contracts.graph.parsed import ParsedNode from dbt.exceptions import ( dependency_not_found, InternalException, @@ -37,14 +35,6 @@ graph_file_name = "graph.gpickle" -def _compiled_type_for(model: ParsedNode): - if type(model) not in COMPILED_TYPES: - raise InternalException( - f"Asked to compile {type(model)} node, but it has no compiled form" - ) - return COMPILED_TYPES[type(model)] - - def print_compile_stats(stats): names = { NodeType.Model: "model", @@ -177,7 +167,7 @@ def initialize(self): # a dict for jinja rendering of SQL def _create_node_context( self, - node: NonSourceCompiledNode, + node: ManifestNode, manifest: Manifest, extra_context: Dict[str, Any], ) -> Dict[str, Any]: @@ -185,7 +175,7 @@ def _create_node_context( context = generate_runtime_model_context(node, self.config, manifest) context.update(extra_context) - if isinstance(node, CompiledGenericTestNode): + if isinstance(node, GenericTestNode): # for test nodes, add a special keyword args value to the context jinja.add_rendered_test_kwargs(context, node) @@ -262,10 +252,10 @@ def _inject_ctes_into_sql(self, sql: str, ctes: List[InjectedCTE]) -> str: def _recursively_prepend_ctes( self, - model: NonSourceCompiledNode, + model: ManifestNode, manifest: Manifest, extra_context: Optional[Dict[str, Any]], - ) -> Tuple[NonSourceCompiledNode, List[InjectedCTE]]: + ) -> Tuple[ManifestNode, List[InjectedCTE]]: """This method is called by the 'compile_node' method. Starting from the node that it is passed in, it will recursively call itself using the 'extra_ctes'. The 'ephemeral' models do @@ -306,8 +296,6 @@ def _recursively_prepend_ctes( # This model has already been compiled, so it's been # through here before if getattr(cte_model, "compiled", False): - assert isinstance(cte_model, tuple(COMPILED_TYPES.values())) - cte_model = cast(NonSourceCompiledNode, cte_model) new_prepended_ctes = cte_model.extra_ctes # if the cte_model isn't compiled, i.e. first time here @@ -344,7 +332,7 @@ def _recursively_prepend_ctes( return model, prepended_ctes - # creates a compiled_node from the ManifestNode passed in, + # Sets compiled fields in the ManifestNode passed in, # creates a "context" dictionary for jinja rendering, # and then renders the "compiled_code" using the node, the # raw_code and the context. @@ -353,7 +341,7 @@ def _compile_node( node: ManifestNode, manifest: Manifest, extra_context: Optional[Dict[str, Any]] = None, - ) -> NonSourceCompiledNode: + ) -> ManifestNode: if extra_context is None: extra_context = {} @@ -366,9 +354,8 @@ def _compile_node( "extra_ctes": [], } ) - compiled_node = _compiled_type_for(node).from_dict(data) - if compiled_node.language == ModelLanguage.python: + if node.language == ModelLanguage.python: # TODO could we also 'minify' this code at all? just aesthetic, not functional # quoating seems like something very specific to sql so far @@ -376,7 +363,7 @@ def _compile_node( # TODO try to find better way to do this, given that original_quoting = self.config.quoting self.config.quoting = {key: False for key in original_quoting.keys()} - context = self._create_node_context(compiled_node, manifest, extra_context) + context = self._create_node_context(node, manifest, extra_context) postfix = jinja.get_rendered( "{{ py_script_postfix(model) }}", @@ -384,23 +371,23 @@ def _compile_node( node, ) # we should NOT jinja render the python model's 'raw code' - compiled_node.compiled_code = f"{node.raw_code}\n\n{postfix}" + node.compiled_code = f"{node.raw_code}\n\n{postfix}" # restore quoting settings in the end since context is lazy evaluated self.config.quoting = original_quoting else: - context = self._create_node_context(compiled_node, manifest, extra_context) - compiled_node.compiled_code = jinja.get_rendered( + context = self._create_node_context(node, manifest, extra_context) + node.compiled_code = jinja.get_rendered( node.raw_code, context, node, ) - compiled_node.relation_name = self._get_relation_name(node) + node.relation_name = self._get_relation_name(node) - compiled_node.compiled = True + node.compiled = True - return compiled_node + return node def write_graph_file(self, linker: Linker, manifest: Manifest): filename = graph_file_name @@ -507,7 +494,7 @@ def compile(self, manifest: Manifest, write=True, add_test_edges=False) -> Graph return Graph(linker.graph) # writes the "compiled_code" into the target/compiled directory - def _write_node(self, node: NonSourceCompiledNode) -> ManifestNode: + def _write_node(self, node: ManifestNode) -> ManifestNode: if not node.extra_ctes_injected or node.resource_type == NodeType.Snapshot: return node fire_event(WritingInjectedSQLForNode(node_info=get_node_info())) @@ -524,7 +511,7 @@ def compile_node( manifest: Manifest, extra_context: Optional[Dict[str, Any]] = None, write: bool = True, - ) -> NonSourceCompiledNode: + ) -> ManifestNode: """This is the main entry point into this code. It's called by CompileRunner.compile, GenericRPCRunner.compile, and RunTask.get_hook_sql. It calls '_compile_node' to convert diff --git a/core/dbt/context/base.py b/core/dbt/context/base.py index 813a7a32b5e..e57c3edac56 100644 --- a/core/dbt/context/base.py +++ b/core/dbt/context/base.py @@ -8,7 +8,7 @@ from dbt.clients.jinja import get_rendered from dbt.clients.yaml_helper import yaml, safe_load, SafeLoader, Loader, Dumper # noqa: F401 from dbt.constants import SECRET_ENV_PREFIX, DEFAULT_ENV_PLACEHOLDER -from dbt.contracts.graph.compiled import CompiledResource +from dbt.contracts.graph.nodes import Resource from dbt.exceptions import ( CompilationException, MacroReturn, @@ -135,11 +135,11 @@ def __init__( self, context: Mapping[str, Any], cli_vars: Mapping[str, Any], - node: Optional[CompiledResource] = None, + node: Optional[Resource] = None, ) -> None: self._context: Mapping[str, Any] = context self._cli_vars: Mapping[str, Any] = cli_vars - self._node: Optional[CompiledResource] = node + self._node: Optional[Resource] = node self._merged: Mapping[str, Any] = self._generate_merged() def _generate_merged(self) -> Mapping[str, Any]: diff --git a/core/dbt/context/docs.py b/core/dbt/context/docs.py index 26096caa108..4908829d414 100644 --- a/core/dbt/context/docs.py +++ b/core/dbt/context/docs.py @@ -5,9 +5,8 @@ doc_target_not_found, ) from dbt.config.runtime import RuntimeConfig -from dbt.contracts.graph.compiled import CompileResultNode from dbt.contracts.graph.manifest import Manifest -from dbt.contracts.graph.parsed import ParsedMacro +from dbt.contracts.graph.nodes import Macro, ResultNode from dbt.context.base import contextmember from dbt.context.configured import SchemaYamlContext @@ -17,7 +16,7 @@ class DocsRuntimeContext(SchemaYamlContext): def __init__( self, config: RuntimeConfig, - node: Union[ParsedMacro, CompileResultNode], + node: Union[Macro, ResultNode], manifest: Manifest, current_project: str, ) -> None: @@ -55,7 +54,7 @@ def doc(self, *args: str) -> str: else: doc_invalid_args(self.node, args) - # ParsedDocumentation + # Documentation target_doc = self.manifest.resolve_doc( doc_name, doc_package_name, diff --git a/core/dbt/context/macro_resolver.py b/core/dbt/context/macro_resolver.py index 2766dc4130c..a108a1889b9 100644 --- a/core/dbt/context/macro_resolver.py +++ b/core/dbt/context/macro_resolver.py @@ -1,10 +1,10 @@ from typing import Dict, MutableMapping, Optional -from dbt.contracts.graph.parsed import ParsedMacro +from dbt.contracts.graph.nodes import Macro from dbt.exceptions import raise_duplicate_macro_name, raise_compiler_error from dbt.include.global_project import PROJECT_NAME as GLOBAL_PROJECT_NAME from dbt.clients.jinja import MacroGenerator -MacroNamespace = Dict[str, ParsedMacro] +MacroNamespace = Dict[str, Macro] # This class builds the MacroResolver by adding macros @@ -21,7 +21,7 @@ class MacroResolver: def __init__( self, - macros: MutableMapping[str, ParsedMacro], + macros: MutableMapping[str, Macro], root_project_name: str, internal_package_names, ) -> None: @@ -77,7 +77,7 @@ def _build_macros_by_name(self): def _add_macro_to( self, package_namespaces: Dict[str, MacroNamespace], - macro: ParsedMacro, + macro: Macro, ): if macro.package_name in package_namespaces: namespace = package_namespaces[macro.package_name] @@ -89,7 +89,7 @@ def _add_macro_to( raise_duplicate_macro_name(macro, macro, macro.package_name) package_namespaces[macro.package_name][macro.name] = macro - def add_macro(self, macro: ParsedMacro): + def add_macro(self, macro: Macro): macro_name: str = macro.name # internal macros (from plugins) will be processed separately from diff --git a/core/dbt/context/macros.py b/core/dbt/context/macros.py index dccd376b876..700109b8081 100644 --- a/core/dbt/context/macros.py +++ b/core/dbt/context/macros.py @@ -1,7 +1,7 @@ from typing import Any, Dict, Iterable, Union, Optional, List, Iterator, Mapping, Set from dbt.clients.jinja import MacroGenerator, MacroStack -from dbt.contracts.graph.parsed import ParsedMacro +from dbt.contracts.graph.nodes import Macro from dbt.include.global_project import PROJECT_NAME as GLOBAL_PROJECT_NAME from dbt.exceptions import raise_duplicate_macro_name, raise_compiler_error @@ -112,7 +112,7 @@ def __init__( def _add_macro_to( self, hierarchy: Dict[str, FlatNamespace], - macro: ParsedMacro, + macro: Macro, macro_func: MacroGenerator, ): if macro.package_name in hierarchy: @@ -125,7 +125,7 @@ def _add_macro_to( raise_duplicate_macro_name(macro_func.macro, macro, macro.package_name) hierarchy[macro.package_name][macro.name] = macro_func - def add_macro(self, macro: ParsedMacro, ctx: Dict[str, Any]): + def add_macro(self, macro: Macro, ctx: Dict[str, Any]): macro_name: str = macro.name # MacroGenerator is in clients/jinja.py @@ -147,13 +147,11 @@ def add_macro(self, macro: ParsedMacro, ctx: Dict[str, Any]): elif macro.package_name == self.root_package: self.globals[macro_name] = macro_func - def add_macros(self, macros: Iterable[ParsedMacro], ctx: Dict[str, Any]): + def add_macros(self, macros: Iterable[Macro], ctx: Dict[str, Any]): for macro in macros: self.add_macro(macro, ctx) - def build_namespace( - self, macros: Iterable[ParsedMacro], ctx: Dict[str, Any] - ) -> MacroNamespace: + def build_namespace(self, macros: Iterable[Macro], ctx: Dict[str, Any]) -> MacroNamespace: self.add_macros(macros, ctx) # Iterate in reverse-order and overwrite: the packages that are first diff --git a/core/dbt/context/providers.py b/core/dbt/context/providers.py index 35afeecddf3..06642810730 100644 --- a/core/dbt/context/providers.py +++ b/core/dbt/context/providers.py @@ -28,18 +28,15 @@ from .manifest import ManifestContext from dbt.contracts.connection import AdapterResponse from dbt.contracts.graph.manifest import Manifest, Disabled -from dbt.contracts.graph.compiled import ( - CompiledResource, - CompiledSeedNode, +from dbt.contracts.graph.nodes import ( + Macro, + Exposure, + Metric, + SeedNode, + SourceDefinition, + Resource, ManifestNode, ) -from dbt.contracts.graph.parsed import ( - ParsedMacro, - ParsedExposure, - ParsedMetric, - ParsedSeedNode, - ParsedSourceDefinition, -) from dbt.contracts.graph.metrics import MetricReference, ResolvedMetricReference from dbt.events.functions import get_metadata_vars from dbt.exceptions import ( @@ -512,7 +509,7 @@ def validate( def create_relation(self, target_model: ManifestNode, name: str) -> RelationProxy: if target_model.is_ephemeral_model: # In operations, we can't ref() ephemeral nodes, because - # ParsedMacros do not support set_cte + # Macros do not support set_cte raise_compiler_error( "Operations can not ref() ephemeral nodes, but {} is ephemeral".format( target_model.name @@ -584,9 +581,9 @@ def __init__( self, context: Dict[str, Any], config: RuntimeConfig, - node: CompiledResource, + node: Resource, ) -> None: - self._node: CompiledResource + self._node: Resource self._config: RuntimeConfig = config super().__init__(context, config.cli_vars, node=node) @@ -690,7 +687,7 @@ def __init__( raise InternalException(f"Invalid provider given to context: {provider}") # mypy appeasement - we know it'll be a RuntimeConfig self.config: RuntimeConfig - self.model: Union[ParsedMacro, ManifestNode] = model + self.model: Union[Macro, ManifestNode] = model super().__init__(config, manifest, model.package_name) self.sql_results: Dict[str, AttrDict] = {} self.context_config: Optional[ContextConfig] = context_config @@ -779,7 +776,7 @@ def inner(value: T) -> None: @contextmember def write(self, payload: str) -> str: # macros/source defs aren't 'writeable'. - if isinstance(self.model, (ParsedMacro, ParsedSourceDefinition)): + if isinstance(self.model, (Macro, SourceDefinition)): raise_compiler_error('cannot "write" macros or sources') self.model.build_path = self.model.write_node(self.config.target_path, "run", payload) return "" @@ -799,7 +796,7 @@ def try_or_compiler_error( @contextmember def load_agate_table(self) -> agate.Table: - if not isinstance(self.model, (ParsedSeedNode, CompiledSeedNode)): + if not isinstance(self.model, SeedNode): raise_compiler_error( "can only load_agate_table for seeds (got a {})".format(self.model.resource_type) ) @@ -1220,7 +1217,13 @@ def env_var(self, var: str, default: Optional[str] = None) -> str: if return_value is not None: # Save the env_var value in the manifest and the var name in the source_file. # If this is compiling, do not save because it's irrelevant to parsing. - if self.model and not hasattr(self.model, "compiled"): + compiling = ( + True + if hasattr(self.model, "compiled") + and getattr(self.model, "compiled", False) is True + else False + ) + if self.model and not compiling: # If the environment variable is set from a default, store a string indicating # that so we can skip partial parsing. Otherwise the file will be scheduled for # reparsing. If the default changes, the file will have been updated and therefore @@ -1275,7 +1278,7 @@ class MacroContext(ProviderContext): def __init__( self, - model: ParsedMacro, + model: Macro, config: RuntimeConfig, manifest: Manifest, provider: Provider, @@ -1390,7 +1393,7 @@ def generate_parser_model_context( def generate_generate_name_macro_context( - macro: ParsedMacro, + macro: Macro, config: RuntimeConfig, manifest: Manifest, ) -> Dict[str, Any]: @@ -1408,7 +1411,7 @@ def generate_runtime_model_context( def generate_runtime_macro_context( - macro: ParsedMacro, + macro: Macro, config: RuntimeConfig, manifest: Manifest, package_name: Optional[str], @@ -1444,7 +1447,7 @@ def __call__(self, *args) -> str: def generate_parse_exposure( - exposure: ParsedExposure, + exposure: Exposure, config: RuntimeConfig, manifest: Manifest, package_name: str, @@ -1494,7 +1497,7 @@ def validate_args(self, name, package): def generate_parse_metrics( - metric: ParsedMetric, + metric: Metric, config: RuntimeConfig, manifest: Manifest, package_name: str, diff --git a/core/dbt/contracts/graph/compiled.py b/core/dbt/contracts/graph/compiled.py deleted file mode 100644 index 28930932299..00000000000 --- a/core/dbt/contracts/graph/compiled.py +++ /dev/null @@ -1,236 +0,0 @@ -from dbt.contracts.graph.parsed import ( - HasTestMetadata, - ParsedNode, - ParsedAnalysisNode, - ParsedSingularTestNode, - ParsedHookNode, - ParsedModelNode, - ParsedExposure, - ParsedMetric, - ParsedResource, - ParsedRPCNode, - ParsedSqlNode, - ParsedGenericTestNode, - ParsedSeedNode, - ParsedSnapshotNode, - ParsedSourceDefinition, - SeedConfig, - TestConfig, - same_seeds, -) -from dbt.node_types import NodeType -from dbt.contracts.util import Replaceable - -from dbt.dataclass_schema import dbtClassMixin -from dataclasses import dataclass, field -from typing import Optional, List, Union, Dict, Type - - -@dataclass -class InjectedCTE(dbtClassMixin, Replaceable): - id: str - sql: str - - -@dataclass -class CompiledNodeMixin(dbtClassMixin): - # this is a special mixin class to provide a required argument. If a node - # is missing a `compiled` flag entirely, it must not be a CompiledNode. - compiled: bool - - -@dataclass -class CompiledNode(ParsedNode, CompiledNodeMixin): - compiled_code: Optional[str] = None - extra_ctes_injected: bool = False - extra_ctes: List[InjectedCTE] = field(default_factory=list) - relation_name: Optional[str] = None - _pre_injected_sql: Optional[str] = None - - def set_cte(self, cte_id: str, sql: str): - """This is the equivalent of what self.extra_ctes[cte_id] = sql would - do if extra_ctes were an OrderedDict - """ - for cte in self.extra_ctes: - if cte.id == cte_id: - cte.sql = sql - break - else: - self.extra_ctes.append(InjectedCTE(id=cte_id, sql=sql)) - - def __post_serialize__(self, dct): - dct = super().__post_serialize__(dct) - if "_pre_injected_sql" in dct: - del dct["_pre_injected_sql"] - return dct - - -@dataclass -class CompiledAnalysisNode(CompiledNode): - resource_type: NodeType = field(metadata={"restrict": [NodeType.Analysis]}) - - -@dataclass -class CompiledHookNode(CompiledNode): - resource_type: NodeType = field(metadata={"restrict": [NodeType.Operation]}) - index: Optional[int] = None - - -@dataclass -class CompiledModelNode(CompiledNode): - resource_type: NodeType = field(metadata={"restrict": [NodeType.Model]}) - - -# TODO: rm? -@dataclass -class CompiledRPCNode(CompiledNode): - resource_type: NodeType = field(metadata={"restrict": [NodeType.RPCCall]}) - - -@dataclass -class CompiledSqlNode(CompiledNode): - resource_type: NodeType = field(metadata={"restrict": [NodeType.SqlOperation]}) - - -@dataclass -class CompiledSeedNode(CompiledNode): - # keep this in sync with ParsedSeedNode! - resource_type: NodeType = field(metadata={"restrict": [NodeType.Seed]}) - config: SeedConfig = field(default_factory=SeedConfig) - root_path: Optional[str] = None - - @property - def empty(self): - """Seeds are never empty""" - return False - - def same_body(self, other) -> bool: - return same_seeds(self, other) - - -@dataclass -class CompiledSnapshotNode(CompiledNode): - resource_type: NodeType = field(metadata={"restrict": [NodeType.Snapshot]}) - - -@dataclass -class CompiledSingularTestNode(CompiledNode): - resource_type: NodeType = field(metadata={"restrict": [NodeType.Test]}) - # Was not able to make mypy happy and keep the code working. We need to - # refactor the various configs. - config: TestConfig = field(default_factory=TestConfig) # type:ignore - - -@dataclass -class CompiledGenericTestNode(CompiledNode, HasTestMetadata): - # keep this in sync with ParsedGenericTestNode! - resource_type: NodeType = field(metadata={"restrict": [NodeType.Test]}) - column_name: Optional[str] = None - file_key_name: Optional[str] = None - # Was not able to make mypy happy and keep the code working. We need to - # refactor the various configs. - config: TestConfig = field(default_factory=TestConfig) # type:ignore - - def same_contents(self, other) -> bool: - if other is None: - return False - - return self.same_config(other) and self.same_fqn(other) and True - - -CompiledTestNode = Union[CompiledSingularTestNode, CompiledGenericTestNode] - - -PARSED_TYPES: Dict[Type[CompiledNode], Type[ParsedResource]] = { - CompiledAnalysisNode: ParsedAnalysisNode, - CompiledModelNode: ParsedModelNode, - CompiledHookNode: ParsedHookNode, - CompiledRPCNode: ParsedRPCNode, - CompiledSqlNode: ParsedSqlNode, - CompiledSeedNode: ParsedSeedNode, - CompiledSnapshotNode: ParsedSnapshotNode, - CompiledSingularTestNode: ParsedSingularTestNode, - CompiledGenericTestNode: ParsedGenericTestNode, -} - - -COMPILED_TYPES: Dict[Type[ParsedResource], Type[CompiledNode]] = { - ParsedAnalysisNode: CompiledAnalysisNode, - ParsedModelNode: CompiledModelNode, - ParsedHookNode: CompiledHookNode, - ParsedRPCNode: CompiledRPCNode, - ParsedSqlNode: CompiledSqlNode, - ParsedSeedNode: CompiledSeedNode, - ParsedSnapshotNode: CompiledSnapshotNode, - ParsedSingularTestNode: CompiledSingularTestNode, - ParsedGenericTestNode: CompiledGenericTestNode, -} - - -# for some types, the compiled type is the parsed type, so make this easy -CompiledType = Union[Type[CompiledNode], Type[ParsedResource]] -CompiledResource = Union[ParsedResource, CompiledNode] - - -def compiled_type_for(parsed: ParsedNode) -> CompiledType: - if type(parsed) in COMPILED_TYPES: - return COMPILED_TYPES[type(parsed)] - else: - return type(parsed) - - -def parsed_instance_for(compiled: CompiledNode) -> ParsedResource: - cls = PARSED_TYPES.get(type(compiled)) - if cls is None: - # how??? - raise ValueError("invalid resource_type: {}".format(compiled.resource_type)) - - return cls.from_dict(compiled.to_dict(omit_none=True)) - - -NonSourceCompiledNode = Union[ - CompiledAnalysisNode, - CompiledSingularTestNode, - CompiledModelNode, - CompiledHookNode, - CompiledRPCNode, - CompiledSqlNode, - CompiledGenericTestNode, - CompiledSeedNode, - CompiledSnapshotNode, -] - -NonSourceParsedNode = Union[ - ParsedAnalysisNode, - ParsedSingularTestNode, - ParsedHookNode, - ParsedModelNode, - ParsedRPCNode, - ParsedSqlNode, - ParsedGenericTestNode, - ParsedSeedNode, - ParsedSnapshotNode, -] - - -# This is anything that can be in manifest.nodes. -ManifestNode = Union[ - NonSourceCompiledNode, - NonSourceParsedNode, -] - -# We allow either parsed or compiled nodes, or parsed sources, as some -# 'compile()' calls in the runner actually just return the original parsed -# node they were given. -CompileResultNode = Union[ - ManifestNode, - ParsedSourceDefinition, -] - -# anything that participates in the graph: sources, exposures, metrics, -# or manifest nodes -GraphMemberNode = Union[ - CompileResultNode, - ParsedExposure, - ParsedMetric, -] diff --git a/core/dbt/contracts/graph/manifest.py b/core/dbt/contracts/graph/manifest.py index 73034ec80f2..05c856f461f 100644 --- a/core/dbt/contracts/graph/manifest.py +++ b/core/dbt/contracts/graph/manifest.py @@ -16,29 +16,24 @@ TypeVar, Callable, Generic, - cast, AbstractSet, ClassVar, ) from typing_extensions import Protocol from uuid import UUID -from dbt.contracts.graph.compiled import ( - CompileResultNode, - ManifestNode, - NonSourceCompiledNode, - GraphMemberNode, -) -from dbt.contracts.graph.parsed import ( - ParsedMacro, - ParsedDocumentation, - ParsedSourceDefinition, - ParsedGenericTestNode, - ParsedExposure, - ParsedMetric, +from dbt.contracts.graph.nodes import ( + Macro, + Documentation, + SourceDefinition, + GenericTestNode, + Exposure, + Metric, HasUniqueID, UnpatchedSourceDefinition, - ManifestNodes, + ManifestNode, + GraphMemberNode, + ResultNode, ) from dbt.contracts.graph.unparsed import SourcePatch from dbt.contracts.files import SourceFile, SchemaSourceFile, FileHash, AnySourceFile @@ -96,7 +91,7 @@ def find(self, key, package: Optional[PackageName], manifest: "Manifest"): return self.perform_lookup(unique_id, manifest) return None - def add_doc(self, doc: ParsedDocumentation): + def add_doc(self, doc: Documentation): if doc.name not in self.storage: self.storage[doc.name] = {} self.storage[doc.name][doc.package_name] = doc.unique_id @@ -105,7 +100,7 @@ def populate(self, manifest): for doc in manifest.docs.values(): self.add_doc(doc) - def perform_lookup(self, unique_id: UniqueID, manifest) -> ParsedDocumentation: + def perform_lookup(self, unique_id: UniqueID, manifest) -> Documentation: if unique_id not in manifest.docs: raise dbt.exceptions.InternalException( f"Doc {unique_id} found in cache but not found in manifest" @@ -127,7 +122,7 @@ def find(self, search_name, package: Optional[PackageName], manifest: "Manifest" return self.perform_lookup(unique_id, manifest) return None - def add_source(self, source: ParsedSourceDefinition): + def add_source(self, source: SourceDefinition): if source.search_name not in self.storage: self.storage[source.search_name] = {} @@ -138,7 +133,7 @@ def populate(self, manifest): if hasattr(source, "source_name"): self.add_source(source) - def perform_lookup(self, unique_id: UniqueID, manifest: "Manifest") -> ParsedSourceDefinition: + def perform_lookup(self, unique_id: UniqueID, manifest: "Manifest") -> SourceDefinition: if unique_id not in manifest.sources: raise dbt.exceptions.InternalException( f"Source {unique_id} found in cache but not found in manifest" @@ -198,7 +193,7 @@ def find(self, search_name, package: Optional[PackageName], manifest: "Manifest" return self.perform_lookup(unique_id, manifest) return None - def add_metric(self, metric: ParsedMetric): + def add_metric(self, metric: Metric): if metric.search_name not in self.storage: self.storage[metric.search_name] = {} @@ -209,7 +204,7 @@ def populate(self, manifest): if hasattr(metric, "name"): self.add_metric(metric) - def perform_lookup(self, unique_id: UniqueID, manifest: "Manifest") -> ParsedMetric: + def perform_lookup(self, unique_id: UniqueID, manifest: "Manifest") -> Metric: if unique_id not in manifest.metrics: raise dbt.exceptions.InternalException( f"Metric {unique_id} found in cache but not found in manifest" @@ -365,7 +360,7 @@ class Locality(enum.IntEnum): @dataclass class MacroCandidate: locality: Locality - macro: ParsedMacro + macro: Macro def __eq__(self, other: object) -> bool: if not isinstance(other, MacroCandidate): @@ -430,16 +425,14 @@ def __lt__(self, other: object) -> bool: class CandidateList(List[M]): - def last(self) -> Optional[ParsedMacro]: + def last(self) -> Optional[Macro]: if not self: return None self.sort() return self[-1].macro -def _get_locality( - macro: ParsedMacro, root_project_name: str, internal_packages: Set[str] -) -> Locality: +def _get_locality(macro: Macro, root_project_name: str, internal_packages: Set[str]) -> Locality: if macro.package_name == root_project_name: return Locality.Root elif macro.package_name in internal_packages: @@ -465,16 +458,16 @@ class Disabled(Generic[D]): target: D -MaybeMetricNode = Optional[Union[ParsedMetric, Disabled[ParsedMetric]]] +MaybeMetricNode = Optional[Union[Metric, Disabled[Metric]]] -MaybeDocumentation = Optional[ParsedDocumentation] +MaybeDocumentation = Optional[Documentation] MaybeParsedSource = Optional[ Union[ - ParsedSourceDefinition, - Disabled[ParsedSourceDefinition], + SourceDefinition, + Disabled[SourceDefinition], ] ] @@ -514,7 +507,7 @@ def __init__(self): def find_macro_by_name( self, name: str, root_project_name: str, package: Optional[str] - ) -> Optional[ParsedMacro]: + ) -> Optional[Macro]: """Find a macro in the graph by its name and package name, or None for any package. The root project name is used to determine priority: - locally defined macros come first @@ -537,7 +530,7 @@ def filter(candidate: MacroCandidate) -> bool: def find_generate_macro_by_name( self, component: str, root_project_name: str - ) -> Optional[ParsedMacro]: + ) -> Optional[Macro]: """ The `generate_X_name` macros are similar to regular ones, but ignore imported packages. @@ -606,11 +599,11 @@ class Manifest(MacroMethods, DataClassMessagePackMixin, dbtClassMixin): # is added it must all be added in the __reduce_ex__ method in the # args tuple in the right position. nodes: MutableMapping[str, ManifestNode] = field(default_factory=dict) - sources: MutableMapping[str, ParsedSourceDefinition] = field(default_factory=dict) - macros: MutableMapping[str, ParsedMacro] = field(default_factory=dict) - docs: MutableMapping[str, ParsedDocumentation] = field(default_factory=dict) - exposures: MutableMapping[str, ParsedExposure] = field(default_factory=dict) - metrics: MutableMapping[str, ParsedMetric] = field(default_factory=dict) + sources: MutableMapping[str, SourceDefinition] = field(default_factory=dict) + macros: MutableMapping[str, Macro] = field(default_factory=dict) + docs: MutableMapping[str, Documentation] = field(default_factory=dict) + exposures: MutableMapping[str, Exposure] = field(default_factory=dict) + metrics: MutableMapping[str, Metric] = field(default_factory=dict) selectors: MutableMapping[str, Any] = field(default_factory=dict) files: MutableMapping[str, AnySourceFile] = field(default_factory=dict) metadata: ManifestMetadata = field(default_factory=ManifestMetadata) @@ -658,7 +651,7 @@ def __post_deserialize__(cls, obj): obj._lock = flags.MP_CONTEXT.Lock() return obj - def sync_update_node(self, new_node: NonSourceCompiledNode) -> NonSourceCompiledNode: + def sync_update_node(self, new_node: ManifestNode) -> ManifestNode: """update the node with a lock. The only time we should want to lock is when compiling an ephemeral ancestor of a node at runtime, because multiple threads could be just-in-time compiling the same ephemeral @@ -671,21 +664,21 @@ def sync_update_node(self, new_node: NonSourceCompiledNode) -> NonSourceCompiled with self._lock: existing = self.nodes[new_node.unique_id] if getattr(existing, "compiled", False): - # already compiled -> must be a NonSourceCompiledNode - return cast(NonSourceCompiledNode, existing) + # already compiled + return existing _update_into(self.nodes, new_node) return new_node - def update_exposure(self, new_exposure: ParsedExposure): + def update_exposure(self, new_exposure: Exposure): _update_into(self.exposures, new_exposure) - def update_metric(self, new_metric: ParsedMetric): + def update_metric(self, new_metric: Metric): _update_into(self.metrics, new_metric) def update_node(self, new_node: ManifestNode): _update_into(self.nodes, new_node) - def update_source(self, new_source: ParsedSourceDefinition): + def update_source(self, new_source: SourceDefinition): _update_into(self.sources, new_source) def build_flat_graph(self): @@ -738,7 +731,7 @@ def _materialization_candidates_for( def find_materialization_macro_by_name( self, project_name: str, materialization_name: str, adapter_type: str - ) -> Optional[ParsedMacro]: + ) -> Optional[Macro]: candidates: CandidateList = CandidateList( chain.from_iterable( self._materialization_candidates_for( @@ -943,8 +936,8 @@ def resolve_source( search_name = f"{target_source_name}.{target_table_name}" candidates = _search_packages(current_project, node_package) - source: Optional[ParsedSourceDefinition] = None - disabled: Optional[List[ParsedSourceDefinition]] = None + source: Optional[SourceDefinition] = None + disabled: Optional[List[SourceDefinition]] = None for pkg in candidates: source = self.source_lookup.find(search_name, pkg, self) @@ -968,8 +961,8 @@ def resolve_metric( node_package: str, ) -> MaybeMetricNode: - metric: Optional[ParsedMetric] = None - disabled: Optional[List[ParsedMetric]] = None + metric: Optional[Metric] = None + disabled: Optional[List[Metric]] = None candidates = _search_packages(current_project, node_package, target_metric_package) for pkg in candidates: @@ -992,7 +985,7 @@ def resolve_doc( package: Optional[str], current_project: str, node_package: str, - ) -> Optional[ParsedDocumentation]: + ) -> Optional[Documentation]: """Resolve the given documentation. This follows the same algorithm as resolve_ref except the is_enabled checks are unnecessary as docs are always enabled. @@ -1044,7 +1037,7 @@ def merge_from_artifact( # Methods that were formerly in ParseResult - def add_macro(self, source_file: SourceFile, macro: ParsedMacro): + def add_macro(self, source_file: SourceFile, macro: Macro): if macro.unique_id in self.macros: # detect that the macro exists and emit an error other_path = self.macros[macro.unique_id].original_file_path @@ -1086,30 +1079,30 @@ def add_source(self, source_file: SchemaSourceFile, source: UnpatchedSourceDefin self.sources[source.unique_id] = source # type: ignore source_file.sources.append(source.unique_id) - def add_node_nofile(self, node: ManifestNodes): + def add_node_nofile(self, node: ManifestNode): # nodes can't be overwritten! _check_duplicates(node, self.nodes) self.nodes[node.unique_id] = node - def add_node(self, source_file: AnySourceFile, node: ManifestNodes, test_from=None): + def add_node(self, source_file: AnySourceFile, node: ManifestNode, test_from=None): self.add_node_nofile(node) if isinstance(source_file, SchemaSourceFile): - if isinstance(node, ParsedGenericTestNode): + if isinstance(node, GenericTestNode): assert test_from source_file.add_test(node.unique_id, test_from) - if isinstance(node, ParsedMetric): + if isinstance(node, Metric): source_file.metrics.append(node.unique_id) - if isinstance(node, ParsedExposure): + if isinstance(node, Exposure): source_file.exposures.append(node.unique_id) else: source_file.nodes.append(node.unique_id) - def add_exposure(self, source_file: SchemaSourceFile, exposure: ParsedExposure): + def add_exposure(self, source_file: SchemaSourceFile, exposure: Exposure): _check_duplicates(exposure, self.exposures) self.exposures[exposure.unique_id] = exposure source_file.exposures.append(exposure.unique_id) - def add_metric(self, source_file: SchemaSourceFile, metric: ParsedMetric): + def add_metric(self, source_file: SchemaSourceFile, metric: Metric): _check_duplicates(metric, self.metrics) self.metrics[metric.unique_id] = metric source_file.metrics.append(metric.unique_id) @@ -1121,20 +1114,20 @@ def add_disabled_nofile(self, node: GraphMemberNode): else: self.disabled[node.unique_id] = [node] - def add_disabled(self, source_file: AnySourceFile, node: CompileResultNode, test_from=None): + def add_disabled(self, source_file: AnySourceFile, node: ResultNode, test_from=None): self.add_disabled_nofile(node) if isinstance(source_file, SchemaSourceFile): - if isinstance(node, ParsedGenericTestNode): + if isinstance(node, GenericTestNode): assert test_from source_file.add_test(node.unique_id, test_from) - if isinstance(node, ParsedMetric): + if isinstance(node, Metric): source_file.metrics.append(node.unique_id) - if isinstance(node, ParsedExposure): + if isinstance(node, Exposure): source_file.exposures.append(node.unique_id) else: source_file.nodes.append(node.unique_id) - def add_doc(self, source_file: SourceFile, doc: ParsedDocumentation): + def add_doc(self, source_file: SourceFile, doc: Documentation): _check_duplicates(doc, self.docs) self.docs[doc.unique_id] = doc source_file.docs.append(doc.unique_id) @@ -1192,27 +1185,27 @@ class WritableManifest(ArtifactMixin): nodes: Mapping[UniqueID, ManifestNode] = field( metadata=dict(description=("The nodes defined in the dbt project and its dependencies")) ) - sources: Mapping[UniqueID, ParsedSourceDefinition] = field( + sources: Mapping[UniqueID, SourceDefinition] = field( metadata=dict(description=("The sources defined in the dbt project and its dependencies")) ) - macros: Mapping[UniqueID, ParsedMacro] = field( + macros: Mapping[UniqueID, Macro] = field( metadata=dict(description=("The macros defined in the dbt project and its dependencies")) ) - docs: Mapping[UniqueID, ParsedDocumentation] = field( + docs: Mapping[UniqueID, Documentation] = field( metadata=dict(description=("The docs defined in the dbt project and its dependencies")) ) - exposures: Mapping[UniqueID, ParsedExposure] = field( + exposures: Mapping[UniqueID, Exposure] = field( metadata=dict( description=("The exposures defined in the dbt project and its dependencies") ) ) - metrics: Mapping[UniqueID, ParsedMetric] = field( + metrics: Mapping[UniqueID, Metric] = field( metadata=dict(description=("The metrics defined in the dbt project and its dependencies")) ) selectors: Mapping[UniqueID, Any] = field( metadata=dict(description=("The selectors defined in selectors.yml")) ) - disabled: Optional[Mapping[UniqueID, List[CompileResultNode]]] = field( + disabled: Optional[Mapping[UniqueID, List[ResultNode]]] = field( metadata=dict(description="A mapping of the disabled nodes in the target") ) parent_map: Optional[NodeEdgeMap] = field( diff --git a/core/dbt/contracts/graph/metrics.py b/core/dbt/contracts/graph/metrics.py index 20222b4a32b..b895aa5e2f5 100644 --- a/core/dbt/contracts/graph/metrics.py +++ b/core/dbt/contracts/graph/metrics.py @@ -12,7 +12,7 @@ def __str__(self): class ResolvedMetricReference(MetricReference): """ - Simple proxy over a ParsedMetric which delegates property + Simple proxy over a Metric which delegates property lookups to the underlying node. Also adds helper functions for working with metrics (ie. __str__ and templating functions) """ diff --git a/core/dbt/contracts/graph/parsed.py b/core/dbt/contracts/graph/nodes.py similarity index 85% rename from core/dbt/contracts/graph/parsed.py rename to core/dbt/contracts/graph/nodes.py index 8fc4ca0c3ed..a908167f49e 100644 --- a/core/dbt/contracts/graph/parsed.py +++ b/core/dbt/contracts/graph/nodes.py @@ -99,6 +99,49 @@ def add_macro(self, value: str): self.macros.append(value) +@dataclass +class InjectedCTE(dbtClassMixin, Replaceable): + id: str + sql: str + + +@dataclass +class CompiledNode: + compiled: bool = False + compiled_code: Optional[str] = None + extra_ctes_injected: bool = False + extra_ctes: List[InjectedCTE] = field(default_factory=list) + relation_name: Optional[str] = None + _pre_injected_sql: Optional[str] = None + + def set_cte(self, cte_id: str, sql: str): + """This is the equivalent of what self.extra_ctes[cte_id] = sql would + do if extra_ctes were an OrderedDict + """ + for cte in self.extra_ctes: + if cte.id == cte_id: + cte.sql = sql + break + else: + self.extra_ctes.append(InjectedCTE(id=cte_id, sql=sql)) + + def __post_serialize__(self, dct): + dct = super().__post_serialize__(dct) + if "_pre_injected_sql" in dct: + del dct["_pre_injected_sql"] + # Remove compiled attributes + if "compiled" in dct and dct["compiled"] is False: + del dct["compiled"] + del dct["extra_ctes_injected"] + del dct["extra_ctes"] + # "omit_none" means these might not be in the dictionary + if "compiled_code" in dct: + del dct["compiled_code"] + if "relation_name" in dct: + del dct["relation_name"] + return dct + + @dataclass class DependsOn(MacroDependsOn): nodes: List[str] = field(default_factory=list) @@ -213,7 +256,7 @@ def clear_event_status(self): @dataclass -class ParsedNodeDefaults(NodeInfoMixin, ParsedNodeMandatory): +class ParsedNodeDefaults(NodeInfoMixin, CompiledNode, ParsedNodeMandatory): tags: List[str] = field(default_factory=list) refs: List[List[str]] = field(default_factory=list) sources: List[List[str]] = field(default_factory=list) @@ -265,26 +308,26 @@ def _deserialize(cls, dct: Dict[str, int]): # between them. resource_type = dct["resource_type"] if resource_type == "model": - return ParsedModelNode.from_dict(dct) + return ModelNode.from_dict(dct) elif resource_type == "analysis": - return ParsedAnalysisNode.from_dict(dct) + return AnalysisNode.from_dict(dct) elif resource_type == "seed": - return ParsedSeedNode.from_dict(dct) + return SeedNode.from_dict(dct) elif resource_type == "rpc": - return ParsedRPCNode.from_dict(dct) + return RPCNode.from_dict(dct) elif resource_type == "sql": - return ParsedSqlNode.from_dict(dct) + return SqlNode.from_dict(dct) elif resource_type == "test": if "test_metadata" in dct: - return ParsedGenericTestNode.from_dict(dct) + return GenericTestNode.from_dict(dct) else: - return ParsedSingularTestNode.from_dict(dct) + return SingularTestNode.from_dict(dct) elif resource_type == "operation": - return ParsedHookNode.from_dict(dct) + return HookNode.from_dict(dct) elif resource_type == "seed": - return ParsedSeedNode.from_dict(dct) + return SeedNode.from_dict(dct) elif resource_type == "snapshot": - return ParsedSnapshotNode.from_dict(dct) + return SnapshotNode.from_dict(dct) else: return cls.from_dict(dct) @@ -354,29 +397,29 @@ def same_contents(self: T, old: Optional[T]) -> bool: @dataclass -class ParsedAnalysisNode(ParsedNode): +class AnalysisNode(ParsedNode): resource_type: NodeType = field(metadata={"restrict": [NodeType.Analysis]}) @dataclass -class ParsedHookNode(ParsedNode): +class HookNode(ParsedNode): resource_type: NodeType = field(metadata={"restrict": [NodeType.Operation]}) index: Optional[int] = None @dataclass -class ParsedModelNode(ParsedNode): +class ModelNode(ParsedNode): resource_type: NodeType = field(metadata={"restrict": [NodeType.Model]}) # TODO: rm? @dataclass -class ParsedRPCNode(ParsedNode): +class RPCNode(ParsedNode): resource_type: NodeType = field(metadata={"restrict": [NodeType.RPCCall]}) @dataclass -class ParsedSqlNode(ParsedNode): +class SqlNode(ParsedNode): resource_type: NodeType = field(metadata={"restrict": [NodeType.SqlOperation]}) @@ -417,8 +460,7 @@ def same_seeds(first: ParsedNode, second: ParsedNode) -> bool: @dataclass -class ParsedSeedNode(ParsedNode): - # keep this in sync with CompiledSeedNode! +class SeedNode(ParsedNode): resource_type: NodeType = field(metadata={"restrict": [NodeType.Seed]}) config: SeedConfig = field(default_factory=SeedConfig) # seeds need the root_path because the contents are not loaded initially @@ -450,7 +492,7 @@ class HasTestMetadata(dbtClassMixin): @dataclass -class ParsedSingularTestNode(ParsedNode): +class SingularTestNode(ParsedNode): resource_type: NodeType = field(metadata={"restrict": [NodeType.Test]}) # Was not able to make mypy happy and keep the code working. We need to # refactor the various configs. @@ -462,8 +504,7 @@ def test_node_type(self): @dataclass -class ParsedGenericTestNode(ParsedNode, HasTestMetadata): - # keep this in sync with CompiledGenericTestNode! +class GenericTestNode(ParsedNode, HasTestMetadata): resource_type: NodeType = field(metadata={"restrict": [NodeType.Test]}) column_name: Optional[str] = None file_key_name: Optional[str] = None @@ -495,7 +536,7 @@ class IntermediateSnapshotNode(ParsedNode): @dataclass -class ParsedSnapshotNode(ParsedNode): +class SnapshotNode(ParsedNode): resource_type: NodeType = field(metadata={"restrict": [NodeType.Snapshot]}) config: SnapshotConfig @@ -523,7 +564,7 @@ class ParsedMacroPatch(ParsedPatch): @dataclass -class ParsedMacro(UnparsedBaseNode, HasUniqueID): +class Macro(UnparsedBaseNode, HasUniqueID): name: str macro_sql: str resource_type: NodeType = field(metadata={"restrict": [NodeType.Macro]}) @@ -547,7 +588,7 @@ def patch(self, patch: ParsedMacroPatch): self.docs = patch.docs self.arguments = patch.arguments - def same_contents(self, other: Optional["ParsedMacro"]) -> bool: + def same_contents(self, other: Optional["Macro"]) -> bool: if other is None: return False # the only thing that makes one macro different from another with the @@ -556,7 +597,7 @@ def same_contents(self, other: Optional["ParsedMacro"]) -> bool: @dataclass -class ParsedDocumentation(UnparsedDocumentation, HasUniqueID): +class Documentation(UnparsedDocumentation, HasUniqueID): name: str block_contents: str @@ -564,7 +605,7 @@ class ParsedDocumentation(UnparsedDocumentation, HasUniqueID): def search_name(self): return self.name - def same_contents(self, other: Optional["ParsedDocumentation"]) -> bool: + def same_contents(self, other: Optional["Documentation"]) -> bool: if other is None: return False # the only thing that makes one doc different from another with the @@ -642,7 +683,7 @@ class ParsedSourceMandatory( @dataclass -class ParsedSourceDefinition(NodeInfoMixin, ParsedSourceMandatory): +class SourceDefinition(NodeInfoMixin, ParsedSourceMandatory): quoting: Quoting = field(default_factory=Quoting) loaded_at_field: Optional[str] = None freshness: Optional[FreshnessThreshold] = None @@ -663,7 +704,7 @@ def __post_serialize__(self, dct): del dct["_event_status"] return dct - def same_database_representation(self, other: "ParsedSourceDefinition") -> bool: + def same_database_representation(self, other: "SourceDefinition") -> bool: return ( self.database == other.database and self.schema == other.schema @@ -671,26 +712,26 @@ def same_database_representation(self, other: "ParsedSourceDefinition") -> bool: and True ) - def same_quoting(self, other: "ParsedSourceDefinition") -> bool: + def same_quoting(self, other: "SourceDefinition") -> bool: return self.quoting == other.quoting - def same_freshness(self, other: "ParsedSourceDefinition") -> bool: + def same_freshness(self, other: "SourceDefinition") -> bool: return ( self.freshness == other.freshness and self.loaded_at_field == other.loaded_at_field and True ) - def same_external(self, other: "ParsedSourceDefinition") -> bool: + def same_external(self, other: "SourceDefinition") -> bool: return self.external == other.external - def same_config(self, old: "ParsedSourceDefinition") -> bool: + def same_config(self, old: "SourceDefinition") -> bool: return self.config.same_contents( self.unrendered_config, old.unrendered_config, ) - def same_contents(self, old: Optional["ParsedSourceDefinition"]) -> bool: + def same_contents(self, old: Optional["SourceDefinition"]) -> bool: # existing when it didn't before is a change! if old is None: return True @@ -757,7 +798,7 @@ def search_name(self): @dataclass -class ParsedExposure(UnparsedBaseNode, HasUniqueID, HasFqn): +class Exposure(UnparsedBaseNode, HasUniqueID, HasFqn): name: str type: ExposureType owner: ExposureOwner @@ -784,34 +825,34 @@ def depends_on_nodes(self): def search_name(self): return self.name - def same_depends_on(self, old: "ParsedExposure") -> bool: + def same_depends_on(self, old: "Exposure") -> bool: return set(self.depends_on.nodes) == set(old.depends_on.nodes) - def same_description(self, old: "ParsedExposure") -> bool: + def same_description(self, old: "Exposure") -> bool: return self.description == old.description - def same_label(self, old: "ParsedExposure") -> bool: + def same_label(self, old: "Exposure") -> bool: return self.label == old.label - def same_maturity(self, old: "ParsedExposure") -> bool: + def same_maturity(self, old: "Exposure") -> bool: return self.maturity == old.maturity - def same_owner(self, old: "ParsedExposure") -> bool: + def same_owner(self, old: "Exposure") -> bool: return self.owner == old.owner - def same_exposure_type(self, old: "ParsedExposure") -> bool: + def same_exposure_type(self, old: "Exposure") -> bool: return self.type == old.type - def same_url(self, old: "ParsedExposure") -> bool: + def same_url(self, old: "Exposure") -> bool: return self.url == old.url - def same_config(self, old: "ParsedExposure") -> bool: + def same_config(self, old: "Exposure") -> bool: return self.config.same_contents( self.unrendered_config, old.unrendered_config, ) - def same_contents(self, old: Optional["ParsedExposure"]) -> bool: + def same_contents(self, old: Optional["Exposure"]) -> bool: # existing when it didn't before is a change! # metadata/tags changes are not "changes" if old is None: @@ -838,7 +879,7 @@ class MetricReference(dbtClassMixin, Replaceable): @dataclass -class ParsedMetric(UnparsedBaseNode, HasUniqueID, HasFqn): +class Metric(UnparsedBaseNode, HasUniqueID, HasFqn): name: str description: str label: str @@ -870,43 +911,43 @@ def depends_on_nodes(self): def search_name(self): return self.name - def same_model(self, old: "ParsedMetric") -> bool: + def same_model(self, old: "Metric") -> bool: return self.model == old.model - def same_window(self, old: "ParsedMetric") -> bool: + def same_window(self, old: "Metric") -> bool: return self.window == old.window - def same_dimensions(self, old: "ParsedMetric") -> bool: + def same_dimensions(self, old: "Metric") -> bool: return self.dimensions == old.dimensions - def same_filters(self, old: "ParsedMetric") -> bool: + def same_filters(self, old: "Metric") -> bool: return self.filters == old.filters - def same_description(self, old: "ParsedMetric") -> bool: + def same_description(self, old: "Metric") -> bool: return self.description == old.description - def same_label(self, old: "ParsedMetric") -> bool: + def same_label(self, old: "Metric") -> bool: return self.label == old.label - def same_calculation_method(self, old: "ParsedMetric") -> bool: + def same_calculation_method(self, old: "Metric") -> bool: return self.calculation_method == old.calculation_method - def same_expression(self, old: "ParsedMetric") -> bool: + def same_expression(self, old: "Metric") -> bool: return self.expression == old.expression - def same_timestamp(self, old: "ParsedMetric") -> bool: + def same_timestamp(self, old: "Metric") -> bool: return self.timestamp == old.timestamp - def same_time_grains(self, old: "ParsedMetric") -> bool: + def same_time_grains(self, old: "Metric") -> bool: return self.time_grains == old.time_grains - def same_config(self, old: "ParsedMetric") -> bool: + def same_config(self, old: "Metric") -> bool: return self.config.same_contents( self.unrendered_config, old.unrendered_config, ) - def same_contents(self, old: Optional["ParsedMetric"]) -> bool: + def same_contents(self, old: Optional["Metric"]) -> bool: # existing when it didn't before is a change! # metadata/tags changes are not "changes" if old is None: @@ -928,24 +969,40 @@ def same_contents(self, old: Optional["ParsedMetric"]) -> bool: ) -ManifestNodes = Union[ - ParsedAnalysisNode, - ParsedSingularTestNode, - ParsedHookNode, - ParsedModelNode, - ParsedRPCNode, - ParsedSqlNode, - ParsedGenericTestNode, - ParsedSeedNode, - ParsedSnapshotNode, +ManifestNode = Union[ + AnalysisNode, + SingularTestNode, + HookNode, + ModelNode, + RPCNode, + SqlNode, + GenericTestNode, + SeedNode, + SnapshotNode, ] +ResultNode = Union[ + ManifestNode, + SourceDefinition, +] -ParsedResource = Union[ - ParsedDocumentation, - ParsedMacro, +GraphMemberNode = Union[ + ResultNode, + Exposure, + Metric, +] + + +Resource = Union[ + Documentation, + Macro, ParsedNode, - ParsedExposure, - ParsedMetric, - ParsedSourceDefinition, + Exposure, + Metric, + SourceDefinition, +] + +TestNode = Union[ + SingularTestNode, + GenericTestNode, ] diff --git a/core/dbt/contracts/results.py b/core/dbt/contracts/results.py index 4adba9860b0..97c43396e33 100644 --- a/core/dbt/contracts/results.py +++ b/core/dbt/contracts/results.py @@ -1,6 +1,5 @@ -from dbt.contracts.graph.manifest import CompileResultNode from dbt.contracts.graph.unparsed import FreshnessThreshold -from dbt.contracts.graph.parsed import ParsedSourceDefinition +from dbt.contracts.graph.nodes import SourceDefinition, ResultNode from dbt.contracts.util import ( BaseArtifactMetadata, ArtifactMixin, @@ -145,7 +144,7 @@ def to_msg(self): @dataclass class NodeResult(BaseResult): - node: CompileResultNode + node: ResultNode @dataclass @@ -284,7 +283,7 @@ def from_success( @dataclass class SourceFreshnessResult(NodeResult): - node: ParsedSourceDefinition + node: SourceDefinition status: FreshnessStatus max_loaded_at: datetime snapshotted_at: datetime diff --git a/core/dbt/contracts/sql.py b/core/dbt/contracts/sql.py index a3e5b3d58db..b80304d2565 100644 --- a/core/dbt/contracts/sql.py +++ b/core/dbt/contracts/sql.py @@ -5,7 +5,7 @@ from dbt.dataclass_schema import dbtClassMixin -from dbt.contracts.graph.compiled import CompileResultNode +from dbt.contracts.graph.nodes import ResultNode from dbt.contracts.results import ( RunResult, RunResultsArtifact, @@ -32,7 +32,7 @@ class RemoteResult(VersionedSchema): class RemoteCompileResultMixin(RemoteResult): raw_code: str compiled_code: str - node: CompileResultNode + node: ResultNode timing: List[TimingInfo] diff --git a/core/dbt/graph/queue.py b/core/dbt/graph/queue.py index 56248409754..3c3b9625d27 100644 --- a/core/dbt/graph/queue.py +++ b/core/dbt/graph/queue.py @@ -5,8 +5,12 @@ from typing import Dict, Set, List, Generator, Optional from .graph import UniqueId -from dbt.contracts.graph.parsed import ParsedSourceDefinition, ParsedExposure, ParsedMetric -from dbt.contracts.graph.compiled import GraphMemberNode +from dbt.contracts.graph.nodes import ( + SourceDefinition, + Exposure, + Metric, + GraphMemberNode, +) from dbt.contracts.graph.manifest import Manifest from dbt.node_types import NodeType @@ -48,7 +52,7 @@ def _include_in_cost(self, node_id: UniqueId) -> bool: if node.resource_type != NodeType.Model: return False # must be a Model - tell mypy this won't be a Source or Exposure or Metric - assert not isinstance(node, (ParsedSourceDefinition, ParsedExposure, ParsedMetric)) + assert not isinstance(node, (SourceDefinition, Exposure, Metric)) if node.is_ephemeral: return False return True diff --git a/core/dbt/graph/selector.py b/core/dbt/graph/selector.py index 13a3ae0a952..0ed8ac50b0a 100644 --- a/core/dbt/graph/selector.py +++ b/core/dbt/graph/selector.py @@ -12,7 +12,7 @@ InternalException, InvalidSelectorException, ) -from dbt.contracts.graph.compiled import GraphMemberNode +from dbt.contracts.graph.nodes import GraphMemberNode from dbt.contracts.graph.manifest import Manifest from dbt.contracts.state import PreviousState diff --git a/core/dbt/graph/selector_methods.py b/core/dbt/graph/selector_methods.py index 0e59da38a16..f7044ecaf32 100644 --- a/core/dbt/graph/selector_methods.py +++ b/core/dbt/graph/selector_methods.py @@ -7,20 +7,16 @@ from .graph import UniqueId -from dbt.contracts.graph.compiled import ( - CompiledSingularTestNode, - CompiledGenericTestNode, - CompileResultNode, - ManifestNode, -) from dbt.contracts.graph.manifest import Manifest, WritableManifest -from dbt.contracts.graph.parsed import ( +from dbt.contracts.graph.nodes import ( HasTestMetadata, - ParsedSingularTestNode, - ParsedExposure, - ParsedMetric, - ParsedGenericTestNode, - ParsedSourceDefinition, + SingularTestNode, + Exposure, + Metric, + GenericTestNode, + SourceDefinition, + ResultNode, + ManifestNode, ) from dbt.contracts.state import PreviousState from dbt.exceptions import ( @@ -76,7 +72,7 @@ def is_selected_node(fqn: List[str], node_selector: str): return True -SelectorTarget = Union[ParsedSourceDefinition, ManifestNode, ParsedExposure, ParsedMetric] +SelectorTarget = Union[SourceDefinition, ManifestNode, Exposure, Metric] class SelectorMethod(metaclass=abc.ABCMeta): @@ -99,7 +95,7 @@ def parsed_nodes( def source_nodes( self, included_nodes: Set[UniqueId] - ) -> Iterator[Tuple[UniqueId, ParsedSourceDefinition]]: + ) -> Iterator[Tuple[UniqueId, SourceDefinition]]: for key, source in self.manifest.sources.items(): unique_id = UniqueId(key) @@ -107,9 +103,7 @@ def source_nodes( continue yield unique_id, source - def exposure_nodes( - self, included_nodes: Set[UniqueId] - ) -> Iterator[Tuple[UniqueId, ParsedExposure]]: + def exposure_nodes(self, included_nodes: Set[UniqueId]) -> Iterator[Tuple[UniqueId, Exposure]]: for key, exposure in self.manifest.exposures.items(): unique_id = UniqueId(key) @@ -117,9 +111,7 @@ def exposure_nodes( continue yield unique_id, exposure - def metric_nodes( - self, included_nodes: Set[UniqueId] - ) -> Iterator[Tuple[UniqueId, ParsedMetric]]: + def metric_nodes(self, included_nodes: Set[UniqueId]) -> Iterator[Tuple[UniqueId, Metric]]: for key, metric in self.manifest.metrics.items(): unique_id = UniqueId(key) @@ -139,13 +131,13 @@ def all_nodes( def configurable_nodes( self, included_nodes: Set[UniqueId] - ) -> Iterator[Tuple[UniqueId, CompileResultNode]]: + ) -> Iterator[Tuple[UniqueId, ResultNode]]: yield from chain(self.parsed_nodes(included_nodes), self.source_nodes(included_nodes)) def non_source_nodes( self, included_nodes: Set[UniqueId], - ) -> Iterator[Tuple[UniqueId, Union[ParsedExposure, ManifestNode, ParsedMetric]]]: + ) -> Iterator[Tuple[UniqueId, Union[Exposure, ManifestNode, Metric]]]: yield from chain( self.parsed_nodes(included_nodes), self.exposure_nodes(included_nodes), @@ -392,19 +384,19 @@ def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[Uniqu class TestTypeSelectorMethod(SelectorMethod): def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[UniqueId]: - search_types: Tuple[Type, ...] + search_type: Type # continue supporting 'schema' + 'data' for backwards compatibility if selector in ("generic", "schema"): - search_types = (ParsedGenericTestNode, CompiledGenericTestNode) + search_type = GenericTestNode elif selector in ("singular", "data"): - search_types = (ParsedSingularTestNode, CompiledSingularTestNode) + search_type = SingularTestNode else: raise RuntimeException( f'Invalid test type selector {selector}: expected "generic" or ' '"singular"' ) for node, real_node in self.parsed_nodes(included_nodes): - if isinstance(real_node, search_types): + if isinstance(real_node, search_type): yield node diff --git a/core/dbt/parser/README.md b/core/dbt/parser/README.md index 6ab326c42a6..7e4c208cdf9 100644 --- a/core/dbt/parser/README.md +++ b/core/dbt/parser/README.md @@ -126,17 +126,17 @@ These have executable SQL attached. Models - Are generated from SQL files in the 'models' directory - have a unique_id starting with 'model.' -- Final object is a ParsedModelNode +- Final object is a ModelNode -Data Tests +Singular Tests - Are generated from SQL files in 'tests' directory - have a unique_id starting with 'test.' -- Final object is a ParsedDataTestNode +- Final object is a SingularTestNode -Schema Tests +Generic Tests - Are generated from 'tests' in schema yaml files, which ultimately derive from tests in the 'macros' directory - Have a unique_id starting with 'test.' -- Final object is a ParsedSchemaTestNode +- Final object is a GenericTestNode - fqn is .schema_test. Hooks @@ -146,35 +146,35 @@ Hooks Analysis - comes from SQL files in 'analysis' directory -- Final object is a ParsedAnalysisNode +- Final object is a AnalysisNode RPC Node - This is a "node" representing the bit of Jinja-SQL that gets passed into the run_sql or compile_sql methods. When you're using the Cloud IDE, and you're working in a scratch tab, and you just want to compile/run what you have there: it needs to be parsed and executed, but it's not actually a model/node in the project, so it's this special thing. This is a temporary addition to the running manifest. -- Object is a ParsedRPCNode +- Object is a RPCNode ### sources - comes from 'sources' sections in yaml files -- Final object is a ParsedSourceDefinition node +- Final object is a SourceDefinition node - have a unique_id starting with 'source.' ### macros - comes from SQL files in 'macros' directory -- Final object is a ParsedMacro node +- Final object is a Macro node - have a unique_id starting with 'macro.' - Test macros are used in schema tests ### docs - comes from .md files in 'docs' directory -- Final object is a ParsedDocumentation +- Final object is a Documentation ### exposures - comes from 'exposures' sections in yaml files -- Final object is a ParsedExposure node +- Final object is a Exposure node ## Temporary patch files diff --git a/core/dbt/parser/analysis.py b/core/dbt/parser/analysis.py index 17eadb8783b..2102a76ac2e 100644 --- a/core/dbt/parser/analysis.py +++ b/core/dbt/parser/analysis.py @@ -1,16 +1,16 @@ import os -from dbt.contracts.graph.parsed import ParsedAnalysisNode +from dbt.contracts.graph.nodes import AnalysisNode from dbt.node_types import NodeType from dbt.parser.base import SimpleSQLParser from dbt.parser.search import FileBlock -class AnalysisParser(SimpleSQLParser[ParsedAnalysisNode]): - def parse_from_dict(self, dct, validate=True) -> ParsedAnalysisNode: +class AnalysisParser(SimpleSQLParser[AnalysisNode]): + def parse_from_dict(self, dct, validate=True) -> AnalysisNode: if validate: - ParsedAnalysisNode.validate(dct) - return ParsedAnalysisNode.from_dict(dct) + AnalysisNode.validate(dct) + return AnalysisNode.from_dict(dct) @property def resource_type(self) -> NodeType: diff --git a/core/dbt/parser/base.py b/core/dbt/parser/base.py index b6d349803f6..7fff4daebf4 100644 --- a/core/dbt/parser/base.py +++ b/core/dbt/parser/base.py @@ -16,7 +16,7 @@ from dbt.config import Project, RuntimeConfig from dbt.context.context_config import ContextConfig from dbt.contracts.graph.manifest import Manifest -from dbt.contracts.graph.parsed import HasUniqueID, ManifestNodes +from dbt.contracts.graph.nodes import HasUniqueID, ManifestNode from dbt.contracts.graph.unparsed import UnparsedNode, Docs from dbt.exceptions import ParsingException, validator_error_message, InternalException from dbt import hooks @@ -30,7 +30,7 @@ IntermediateValue = TypeVar("IntermediateValue", bound=HasUniqueID) IntermediateNode = TypeVar("IntermediateNode", bound=Any) -FinalNode = TypeVar("FinalNode", bound=ManifestNodes) +FinalNode = TypeVar("FinalNode", bound=ManifestNode) ConfiguredBlockType = TypeVar("ConfiguredBlockType", bound=FileBlock) @@ -365,7 +365,7 @@ def render_update(self, node: IntermediateNode, config: ContextConfig) -> None: msg = validator_error_message(exc) raise ParsingException(msg, node=node) from exc - def add_result_node(self, block: FileBlock, node: ManifestNodes): + def add_result_node(self, block: FileBlock, node: ManifestNode): if node.config.enabled: self.manifest.add_node(block.file, node) else: diff --git a/core/dbt/parser/docs.py b/core/dbt/parser/docs.py index a1130eda0da..fb9b488276e 100644 --- a/core/dbt/parser/docs.py +++ b/core/dbt/parser/docs.py @@ -4,7 +4,7 @@ from dbt.clients.jinja import get_rendered from dbt.contracts.files import SourceFile -from dbt.contracts.graph.parsed import ParsedDocumentation +from dbt.contracts.graph.nodes import Documentation from dbt.node_types import NodeType from dbt.parser.base import Parser from dbt.parser.search import BlockContents, FileBlock, BlockSearcher @@ -13,7 +13,7 @@ SHOULD_PARSE_RE = re.compile(r"{[{%]") -class DocumentationParser(Parser[ParsedDocumentation]): +class DocumentationParser(Parser[Documentation]): @property def resource_type(self) -> NodeType: return NodeType.Documentation @@ -27,11 +27,11 @@ def generate_unique_id(self, resource_name: str, _: Optional[str] = None) -> str # need to be part of the unique ID. return "{}.{}".format(self.project.project_name, resource_name) - def parse_block(self, block: BlockContents) -> Iterable[ParsedDocumentation]: + def parse_block(self, block: BlockContents) -> Iterable[Documentation]: unique_id = self.generate_unique_id(block.name) contents = get_rendered(block.contents, {}).strip() - doc = ParsedDocumentation( + doc = Documentation( path=block.file.path.relative_path, original_file_path=block.path.original_file_path, package_name=self.project.project_name, diff --git a/core/dbt/parser/generic_test.py b/core/dbt/parser/generic_test.py index b69ca20ef6d..822dd5b2d85 100644 --- a/core/dbt/parser/generic_test.py +++ b/core/dbt/parser/generic_test.py @@ -4,9 +4,8 @@ from dbt.exceptions import ParsingException from dbt.clients import jinja -from dbt.contracts.graph.parsed import ParsedGenericTestNode +from dbt.contracts.graph.nodes import GenericTestNode, Macro from dbt.contracts.graph.unparsed import UnparsedMacro -from dbt.contracts.graph.parsed import ParsedMacro from dbt.contracts.files import SourceFile from dbt.events.functions import fire_event from dbt.events.types import GenericTestFileParse @@ -17,7 +16,7 @@ from dbt import flags -class GenericTestParser(BaseParser[ParsedGenericTestNode]): +class GenericTestParser(BaseParser[GenericTestNode]): @property def resource_type(self) -> NodeType: return NodeType.Macro @@ -28,10 +27,10 @@ def get_compiled_path(cls, block: FileBlock): def parse_generic_test( self, block: jinja.BlockTag, base_node: UnparsedMacro, name: str - ) -> ParsedMacro: + ) -> Macro: unique_id = self.generate_unique_id(name) - return ParsedMacro( + return Macro( path=base_node.path, macro_sql=block.full_block, original_file_path=base_node.original_file_path, @@ -41,7 +40,7 @@ def parse_generic_test( unique_id=unique_id, ) - def parse_unparsed_generic_test(self, base_node: UnparsedMacro) -> Iterable[ParsedMacro]: + def parse_unparsed_generic_test(self, base_node: UnparsedMacro) -> Iterable[Macro]: try: blocks: List[jinja.BlockTag] = [ t diff --git a/core/dbt/parser/generic_test_builders.py b/core/dbt/parser/generic_test_builders.py index 3dfb541cb8f..3b1149e53a5 100644 --- a/core/dbt/parser/generic_test_builders.py +++ b/core/dbt/parser/generic_test_builders.py @@ -13,7 +13,7 @@ ) from dbt.clients.jinja import get_rendered, GENERIC_TEST_KWARGS_NAME -from dbt.contracts.graph.parsed import UnpatchedSourceDefinition +from dbt.contracts.graph.nodes import UnpatchedSourceDefinition from dbt.contracts.graph.unparsed import ( TestDef, UnparsedAnalysisUpdate, diff --git a/core/dbt/parser/hooks.py b/core/dbt/parser/hooks.py index 2ac8bfda0ef..d05ea136dc5 100644 --- a/core/dbt/parser/hooks.py +++ b/core/dbt/parser/hooks.py @@ -3,7 +3,7 @@ from dbt.context.context_config import ContextConfig from dbt.contracts.files import FilePath -from dbt.contracts.graph.parsed import ParsedHookNode +from dbt.contracts.graph.nodes import HookNode from dbt.exceptions import InternalException from dbt.node_types import NodeType, RunHookType from dbt.parser.base import SimpleParser @@ -65,7 +65,7 @@ def __iter__(self) -> Iterator[HookBlock]: ) -class HookParser(SimpleParser[HookBlock, ParsedHookNode]): +class HookParser(SimpleParser[HookBlock, HookNode]): def transform(self, node): return node @@ -81,10 +81,10 @@ def get_path(self) -> FilePath: ) return path - def parse_from_dict(self, dct, validate=True) -> ParsedHookNode: + def parse_from_dict(self, dct, validate=True) -> HookNode: if validate: - ParsedHookNode.validate(dct) - return ParsedHookNode.from_dict(dct) + HookNode.validate(dct) + return HookNode.from_dict(dct) @classmethod def get_compiled_path(cls, block: HookBlock): @@ -98,7 +98,7 @@ def _create_parsetime_node( fqn: List[str], name=None, **kwargs, - ) -> ParsedHookNode: + ) -> HookNode: return super()._create_parsetime_node( block=block, diff --git a/core/dbt/parser/macros.py b/core/dbt/parser/macros.py index 7f99753ad2c..7c5336b8ccf 100644 --- a/core/dbt/parser/macros.py +++ b/core/dbt/parser/macros.py @@ -4,7 +4,7 @@ from dbt.clients import jinja from dbt.contracts.graph.unparsed import UnparsedMacro -from dbt.contracts.graph.parsed import ParsedMacro +from dbt.contracts.graph.nodes import Macro from dbt.contracts.files import FilePath, SourceFile from dbt.exceptions import ParsingException from dbt.events.functions import fire_event @@ -16,7 +16,7 @@ from dbt import flags -class MacroParser(BaseParser[ParsedMacro]): +class MacroParser(BaseParser[Macro]): # This is only used when creating a MacroManifest separate # from the normal parsing flow. def get_paths(self) -> List[FilePath]: @@ -32,12 +32,10 @@ def resource_type(self) -> NodeType: def get_compiled_path(cls, block: FileBlock): return block.path.relative_path - def parse_macro( - self, block: jinja.BlockTag, base_node: UnparsedMacro, name: str - ) -> ParsedMacro: + def parse_macro(self, block: jinja.BlockTag, base_node: UnparsedMacro, name: str) -> Macro: unique_id = self.generate_unique_id(name) - return ParsedMacro( + return Macro( path=base_node.path, macro_sql=block.full_block, original_file_path=base_node.original_file_path, @@ -47,7 +45,7 @@ def parse_macro( unique_id=unique_id, ) - def parse_unparsed_macros(self, base_node: UnparsedMacro) -> Iterable[ParsedMacro]: + def parse_unparsed_macros(self, base_node: UnparsedMacro) -> Iterable[Macro]: try: blocks: List[jinja.BlockTag] = [ t diff --git a/core/dbt/parser/manifest.py b/core/dbt/parser/manifest.py index bbfe9714697..21594a93318 100644 --- a/core/dbt/parser/manifest.py +++ b/core/dbt/parser/manifest.py @@ -53,7 +53,6 @@ from dbt.contracts.files import FileHash, ParseFileType, SchemaSourceFile from dbt.parser.read_files import read_files, load_source_file from dbt.parser.partial import PartialParsing, special_override_macros -from dbt.contracts.graph.compiled import ManifestNode from dbt.contracts.graph.manifest import ( Manifest, Disabled, @@ -61,13 +60,14 @@ ManifestStateCheck, ParsingInfo, ) -from dbt.contracts.graph.parsed import ( - ParsedSourceDefinition, +from dbt.contracts.graph.nodes import ( + SourceDefinition, ParsedNode, - ParsedMacro, + Macro, ColumnInfo, - ParsedExposure, - ParsedMetric, + Exposure, + Metric, + ManifestNode, ) from dbt.contracts.util import Writable from dbt.exceptions import ( @@ -366,7 +366,7 @@ def load(self): self._perf_info.parse_project_elapsed = time.perf_counter() - start_parse_projects # patch_sources converts the UnparsedSourceDefinitions in the - # Manifest.sources to ParsedSourceDefinition via 'patch_source' + # Manifest.sources to SourceDefinition via 'patch_source' # in SourcePatcher start_patch = time.perf_counter() patcher = SourcePatcher(self.root_project, self.manifest) @@ -921,7 +921,7 @@ def process_sources(self, current_project: str): for node in self.manifest.nodes.values(): if node.resource_type == NodeType.Source: continue - assert not isinstance(node, ParsedSourceDefinition) + assert not isinstance(node, SourceDefinition) if node.created_at < self.started_at: continue _process_sources_for_node(self.manifest, current_project, node) @@ -1053,7 +1053,7 @@ def _get_node_column(node, column_name): return column -DocsContextCallback = Callable[[Union[ParsedNode, ParsedSourceDefinition]], Dict[str, Any]] +DocsContextCallback = Callable[[Union[ParsedNode, SourceDefinition]], Dict[str, Any]] # node and column descriptions @@ -1069,7 +1069,7 @@ def _process_docs_for_node( # source and table descriptions, column descriptions def _process_docs_for_source( context: Dict[str, Any], - source: ParsedSourceDefinition, + source: SourceDefinition, ): table_description = source.description source_description = source.source_description @@ -1085,22 +1085,22 @@ def _process_docs_for_source( # macro argument descriptions -def _process_docs_for_macro(context: Dict[str, Any], macro: ParsedMacro) -> None: +def _process_docs_for_macro(context: Dict[str, Any], macro: Macro) -> None: macro.description = get_rendered(macro.description, context) for arg in macro.arguments: arg.description = get_rendered(arg.description, context) # exposure descriptions -def _process_docs_for_exposure(context: Dict[str, Any], exposure: ParsedExposure) -> None: +def _process_docs_for_exposure(context: Dict[str, Any], exposure: Exposure) -> None: exposure.description = get_rendered(exposure.description, context) -def _process_docs_for_metrics(context: Dict[str, Any], metric: ParsedMetric) -> None: +def _process_docs_for_metrics(context: Dict[str, Any], metric: Metric) -> None: metric.description = get_rendered(metric.description, context) -def _process_refs_for_exposure(manifest: Manifest, current_project: str, exposure: ParsedExposure): +def _process_refs_for_exposure(manifest: Manifest, current_project: str, exposure: Exposure): """Given a manifest and exposure in that manifest, process its refs""" for ref in exposure.refs: target_model: Optional[Union[Disabled, ManifestNode]] = None @@ -1143,7 +1143,7 @@ def _process_refs_for_exposure(manifest: Manifest, current_project: str, exposur manifest.update_exposure(exposure) -def _process_refs_for_metric(manifest: Manifest, current_project: str, metric: ParsedMetric): +def _process_refs_for_metric(manifest: Manifest, current_project: str, metric: Metric): """Given a manifest and a metric in that manifest, process its refs""" for ref in metric.refs: target_model: Optional[Union[Disabled, ManifestNode]] = None @@ -1188,11 +1188,11 @@ def _process_refs_for_metric(manifest: Manifest, current_project: str, metric: P def _process_metrics_for_node( manifest: Manifest, current_project: str, - node: Union[ManifestNode, ParsedMetric, ParsedExposure], + node: Union[ManifestNode, Metric, Exposure], ): """Given a manifest and a node in that manifest, process its metrics""" for metric in node.metrics: - target_metric: Optional[Union[Disabled, ParsedMetric]] = None + target_metric: Optional[Union[Disabled, Metric]] = None target_metric_name: str target_metric_package: Optional[str] = None @@ -1276,10 +1276,8 @@ def _process_refs_for_node(manifest: Manifest, current_project: str, node: Manif manifest.update_node(node) -def _process_sources_for_exposure( - manifest: Manifest, current_project: str, exposure: ParsedExposure -): - target_source: Optional[Union[Disabled, ParsedSourceDefinition]] = None +def _process_sources_for_exposure(manifest: Manifest, current_project: str, exposure: Exposure): + target_source: Optional[Union[Disabled, SourceDefinition]] = None for source_name, table_name in exposure.sources: target_source = manifest.resolve_source( source_name, @@ -1301,8 +1299,8 @@ def _process_sources_for_exposure( manifest.update_exposure(exposure) -def _process_sources_for_metric(manifest: Manifest, current_project: str, metric: ParsedMetric): - target_source: Optional[Union[Disabled, ParsedSourceDefinition]] = None +def _process_sources_for_metric(manifest: Manifest, current_project: str, metric: Metric): + target_source: Optional[Union[Disabled, SourceDefinition]] = None for source_name, table_name in metric.sources: target_source = manifest.resolve_source( source_name, @@ -1325,7 +1323,7 @@ def _process_sources_for_metric(manifest: Manifest, current_project: str, metric def _process_sources_for_node(manifest: Manifest, current_project: str, node: ManifestNode): - target_source: Optional[Union[Disabled, ParsedSourceDefinition]] = None + target_source: Optional[Union[Disabled, SourceDefinition]] = None for source_name, table_name in node.sources: target_source = manifest.resolve_source( source_name, @@ -1351,7 +1349,7 @@ def _process_sources_for_node(manifest: Manifest, current_project: str, node: Ma # This is called in task.rpc.sql_commands when a "dynamic" node is # created in the manifest, in 'add_refs' -def process_macro(config: RuntimeConfig, manifest: Manifest, macro: ParsedMacro) -> None: +def process_macro(config: RuntimeConfig, manifest: Manifest, macro: Macro) -> None: ctx = generate_runtime_docs_context( config, macro, diff --git a/core/dbt/parser/models.py b/core/dbt/parser/models.py index 0a3f87018d9..7dea4aca135 100644 --- a/core/dbt/parser/models.py +++ b/core/dbt/parser/models.py @@ -1,6 +1,6 @@ from copy import deepcopy from dbt.context.context_config import ContextConfig -from dbt.contracts.graph.parsed import ParsedModelNode +from dbt.contracts.graph.nodes import ModelNode import dbt.flags as flags from dbt.events.functions import fire_event from dbt.events.types import ( @@ -181,11 +181,11 @@ def verify_python_model_code(node): raise ParsingException("No jinja in python model code is allowed", node=node) -class ModelParser(SimpleSQLParser[ParsedModelNode]): - def parse_from_dict(self, dct, validate=True) -> ParsedModelNode: +class ModelParser(SimpleSQLParser[ModelNode]): + def parse_from_dict(self, dct, validate=True) -> ModelNode: if validate: - ParsedModelNode.validate(dct) - return ParsedModelNode.from_dict(dct) + ModelNode.validate(dct) + return ModelNode.from_dict(dct) @property def resource_type(self) -> NodeType: @@ -221,7 +221,7 @@ def parse_python_model(self, node, config, context): # this is being used in macro build_config_dict context["config"](config_keys_used=config_keys_used) - def render_update(self, node: ParsedModelNode, config: ContextConfig) -> None: + def render_update(self, node: ModelNode, config: ContextConfig) -> None: self.manifest._parsing_info.static_analysis_path_count += 1 if node.language == ModelLanguage.python: @@ -266,9 +266,9 @@ def render_update(self, node: ParsedModelNode, config: ContextConfig) -> None: # top-level declaration of variables statically_parsed: Optional[Union[str, Dict[str, List[Any]]]] = None experimental_sample: Optional[Union[str, Dict[str, List[Any]]]] = None - exp_sample_node: Optional[ParsedModelNode] = None + exp_sample_node: Optional[ModelNode] = None exp_sample_config: Optional[ContextConfig] = None - jinja_sample_node: Optional[ParsedModelNode] = None + jinja_sample_node: Optional[ModelNode] = None jinja_sample_config: Optional[ContextConfig] = None result: List[str] = [] @@ -369,9 +369,7 @@ def render_update(self, node: ParsedModelNode, config: ContextConfig) -> None: } ) - def run_static_parser( - self, node: ParsedModelNode - ) -> Optional[Union[str, Dict[str, List[Any]]]]: + def run_static_parser(self, node: ModelNode) -> Optional[Union[str, Dict[str, List[Any]]]]: # if any banned macros have been overridden by the user, we cannot use the static parser. if self._has_banned_macro(node): # this log line is used for integration testing. If you change @@ -393,7 +391,7 @@ def run_static_parser( return "cannot_parse" def run_experimental_parser( - self, node: ParsedModelNode + self, node: ModelNode ) -> Optional[Union[str, Dict[str, List[Any]]]]: # if any banned macros have been overridden by the user, we cannot use the static parser. if self._has_banned_macro(node): @@ -419,7 +417,7 @@ def run_experimental_parser( return "cannot_parse" # checks for banned macros - def _has_banned_macro(self, node: ParsedModelNode) -> bool: + def _has_banned_macro(self, node: ModelNode) -> bool: # first check if there is a banned macro defined in scope for this model file root_project_name = self.root_project.project_name project_name = node.package_name @@ -439,9 +437,7 @@ def _has_banned_macro(self, node: ParsedModelNode) -> bool: # this method updates the model node rendered and unrendered config as well # as the node object. Used to populate these values when circumventing jinja # rendering like the static parser. - def populate( - self, node: ParsedModelNode, config: ContextConfig, statically_parsed: Dict[str, Any] - ): + def populate(self, node: ModelNode, config: ContextConfig, statically_parsed: Dict[str, Any]): # manually fit configs in config._config_call_dict = _get_config_call_dict(statically_parsed) @@ -489,9 +485,9 @@ def _shift_sources(static_parser_result: Dict[str, List[Any]]) -> Dict[str, List # returns a list of string codes to be sent as a tracking event def _get_exp_sample_result( - sample_node: ParsedModelNode, + sample_node: ModelNode, sample_config: ContextConfig, - node: ParsedModelNode, + node: ModelNode, config: ContextConfig, ) -> List[str]: result: List[Tuple[int, str]] = _get_sample_result(sample_node, sample_config, node, config) @@ -505,9 +501,9 @@ def process(codemsg): # returns a list of string codes to be sent as a tracking event def _get_stable_sample_result( - sample_node: ParsedModelNode, + sample_node: ModelNode, sample_config: ContextConfig, - node: ParsedModelNode, + node: ModelNode, config: ContextConfig, ) -> List[str]: result: List[Tuple[int, str]] = _get_sample_result(sample_node, sample_config, node, config) @@ -522,9 +518,9 @@ def process(codemsg): # returns a list of string codes that need a single digit prefix to be prepended # before being sent as a tracking event def _get_sample_result( - sample_node: ParsedModelNode, + sample_node: ModelNode, sample_config: ContextConfig, - node: ParsedModelNode, + node: ModelNode, config: ContextConfig, ) -> List[Tuple[int, str]]: result: List[Tuple[int, str]] = [] diff --git a/core/dbt/parser/partial.py b/core/dbt/parser/partial.py index 1a8c7e8193e..63ef33429c4 100644 --- a/core/dbt/parser/partial.py +++ b/core/dbt/parser/partial.py @@ -873,7 +873,7 @@ def delete_schema_source(self, schema_file, source_dict): source_name = source_dict["name"] # There may be multiple sources for each source dict, since # there will be a separate source node for each table. - # ParsedSourceDefinition name = table name, dict name is source_name + # SourceDefinition name = table name, dict name is source_name sources = schema_file.sources.copy() for unique_id in sources: if unique_id in self.saved_manifest.sources: diff --git a/core/dbt/parser/schemas.py b/core/dbt/parser/schemas.py index 4909d99f44e..5756ed4ba02 100644 --- a/core/dbt/parser/schemas.py +++ b/core/dbt/parser/schemas.py @@ -27,14 +27,14 @@ from dbt.context.macro_resolver import MacroResolver from dbt.contracts.files import FileHash, SchemaSourceFile from dbt.contracts.graph.model_config import MetricConfig, ExposureConfig -from dbt.contracts.graph.parsed import ( +from dbt.contracts.graph.nodes import ( ParsedNodePatch, ColumnInfo, - ParsedGenericTestNode, + GenericTestNode, ParsedMacroPatch, UnpatchedSourceDefinition, - ParsedExposure, - ParsedMetric, + Exposure, + Metric, ) from dbt.contracts.graph.unparsed import ( HasColumnDocs, @@ -168,7 +168,7 @@ def _trimmed(inp: str) -> str: return inp[:44] + "..." + inp[-3:] -class SchemaParser(SimpleParser[GenericTestBlock, ParsedGenericTestNode]): +class SchemaParser(SimpleParser[GenericTestBlock, GenericTestNode]): def __init__( self, project, @@ -195,10 +195,10 @@ def get_compiled_path(cls, block: FileBlock) -> str: def resource_type(self) -> NodeType: return NodeType.Test - def parse_from_dict(self, dct, validate=True) -> ParsedGenericTestNode: + def parse_from_dict(self, dct, validate=True) -> GenericTestNode: if validate: - ParsedGenericTestNode.validate(dct) - return ParsedGenericTestNode.from_dict(dct) + GenericTestNode.validate(dct) + return GenericTestNode.from_dict(dct) def parse_column_tests(self, block: TestBlock, column: UnparsedColumn) -> None: if not column.tests: @@ -219,7 +219,7 @@ def create_test_node( test_metadata: Dict[str, Any], file_key_name: str, column_name: Optional[str], - ) -> ParsedGenericTestNode: + ) -> GenericTestNode: HASH_LENGTH = 10 @@ -259,8 +259,8 @@ def get_hashable_md(data: Union[str, int, float, List, Dict]) -> Union[str, List "file_key_name": file_key_name, } try: - ParsedGenericTestNode.validate(dct) - return ParsedGenericTestNode.from_dict(dct) + GenericTestNode.validate(dct) + return GenericTestNode.from_dict(dct) except ValidationError as exc: msg = validator_error_message(exc) # this is a bit silly, but build an UnparsedNode just for error @@ -281,7 +281,7 @@ def _parse_generic_test( tags: List[str], column_name: Optional[str], schema_file_id: str, - ) -> ParsedGenericTestNode: + ) -> GenericTestNode: try: builder = TestBuilder( test=test, @@ -416,7 +416,7 @@ def render_test_update(self, node, config, builder, schema_file_id): msg = validator_error_message(exc) raise ParsingException(msg, node=node) from exc - def parse_node(self, block: GenericTestBlock) -> ParsedGenericTestNode: + def parse_node(self, block: GenericTestBlock) -> GenericTestNode: """In schema parsing, we rewrite most of the part of parse_node that builds the initial node to be parsed, but rendering is basically the same @@ -431,7 +431,7 @@ def parse_node(self, block: GenericTestBlock) -> ParsedGenericTestNode: self.add_test_node(block, node) return node - def add_test_node(self, block: GenericTestBlock, node: ParsedGenericTestNode): + def add_test_node(self, block: GenericTestBlock, node: GenericTestNode): test_from = {"key": block.target.yaml_key, "name": block.target.name} if node.config.enabled: self.manifest.add_node(block.file, node, test_from) @@ -440,7 +440,7 @@ def add_test_node(self, block: GenericTestBlock, node: ParsedGenericTestNode): def render_with_context( self, - node: ParsedGenericTestNode, + node: GenericTestNode, config: ContextConfig, ) -> None: """Given the parsed node and a ContextConfig to use during @@ -1027,7 +1027,7 @@ def parse_exposure(self, unparsed: UnparsedExposure): f"Calculated a {type(config)} for an exposure, but expected an ExposureConfig" ) - parsed = ParsedExposure( + parsed = Exposure( package_name=package_name, path=path, original_file_path=self.yaml.path.original_file_path, @@ -1130,7 +1130,7 @@ def parse_metric(self, unparsed: UnparsedMetric): f"Calculated a {type(config)} for a metric, but expected a MetricConfig" ) - parsed = ParsedMetric( + parsed = Metric( package_name=package_name, path=path, original_file_path=self.yaml.path.original_file_path, diff --git a/core/dbt/parser/seeds.py b/core/dbt/parser/seeds.py index 0cd5aeb6307..02c20df7cf5 100644 --- a/core/dbt/parser/seeds.py +++ b/core/dbt/parser/seeds.py @@ -1,17 +1,17 @@ from dbt.context.context_config import ContextConfig -from dbt.contracts.graph.parsed import ParsedSeedNode +from dbt.contracts.graph.nodes import SeedNode from dbt.node_types import NodeType from dbt.parser.base import SimpleSQLParser from dbt.parser.search import FileBlock -class SeedParser(SimpleSQLParser[ParsedSeedNode]): - def parse_from_dict(self, dct, validate=True) -> ParsedSeedNode: +class SeedParser(SimpleSQLParser[SeedNode]): + def parse_from_dict(self, dct, validate=True) -> SeedNode: # seeds need the root_path because the contents are not loaded dct["root_path"] = self.project.project_root if validate: - ParsedSeedNode.validate(dct) - return ParsedSeedNode.from_dict(dct) + SeedNode.validate(dct) + return SeedNode.from_dict(dct) @property def resource_type(self) -> NodeType: @@ -21,5 +21,5 @@ def resource_type(self) -> NodeType: def get_compiled_path(cls, block: FileBlock): return block.path.relative_path - def render_with_context(self, parsed_node: ParsedSeedNode, config: ContextConfig) -> None: + def render_with_context(self, parsed_node: SeedNode, config: ContextConfig) -> None: """Seeds don't need to do any rendering.""" diff --git a/core/dbt/parser/singular_test.py b/core/dbt/parser/singular_test.py index 22d203a8ebc..fbb3c8ce8fa 100644 --- a/core/dbt/parser/singular_test.py +++ b/core/dbt/parser/singular_test.py @@ -1,15 +1,15 @@ -from dbt.contracts.graph.parsed import ParsedSingularTestNode +from dbt.contracts.graph.nodes import SingularTestNode from dbt.node_types import NodeType from dbt.parser.base import SimpleSQLParser from dbt.parser.search import FileBlock from dbt.utils import get_pseudo_test_path -class SingularTestParser(SimpleSQLParser[ParsedSingularTestNode]): - def parse_from_dict(self, dct, validate=True) -> ParsedSingularTestNode: +class SingularTestParser(SimpleSQLParser[SingularTestNode]): + def parse_from_dict(self, dct, validate=True) -> SingularTestNode: if validate: - ParsedSingularTestNode.validate(dct) - return ParsedSingularTestNode.from_dict(dct) + SingularTestNode.validate(dct) + return SingularTestNode.from_dict(dct) @property def resource_type(self) -> NodeType: diff --git a/core/dbt/parser/snapshots.py b/core/dbt/parser/snapshots.py index 71e7bba955f..754a0341e69 100644 --- a/core/dbt/parser/snapshots.py +++ b/core/dbt/parser/snapshots.py @@ -3,7 +3,7 @@ from dbt.dataclass_schema import ValidationError -from dbt.contracts.graph.parsed import IntermediateSnapshotNode, ParsedSnapshotNode +from dbt.contracts.graph.nodes import IntermediateSnapshotNode, SnapshotNode from dbt.exceptions import ParsingException, validator_error_message from dbt.node_types import NodeType from dbt.parser.base import SQLParser @@ -11,7 +11,7 @@ from dbt.utils import split_path -class SnapshotParser(SQLParser[IntermediateSnapshotNode, ParsedSnapshotNode]): +class SnapshotParser(SQLParser[IntermediateSnapshotNode, SnapshotNode]): def parse_from_dict(self, dct, validate=True) -> IntermediateSnapshotNode: if validate: IntermediateSnapshotNode.validate(dct) @@ -53,7 +53,7 @@ def get_fqn(self, path: str, name: str) -> List[str]: fqn.append(name) return fqn - def transform(self, node: IntermediateSnapshotNode) -> ParsedSnapshotNode: + def transform(self, node: IntermediateSnapshotNode) -> SnapshotNode: try: # The config_call_dict is not serialized, because normally # it is not needed after parsing. But since the snapshot node @@ -61,7 +61,7 @@ def transform(self, node: IntermediateSnapshotNode) -> ParsedSnapshotNode: # the model config when there is also schema config. config_call_dict = node.config_call_dict dct = node.to_dict(omit_none=True) - parsed_node = ParsedSnapshotNode.from_dict(dct) + parsed_node = SnapshotNode.from_dict(dct) parsed_node.config_call_dict = config_call_dict self.set_snapshot_attributes(parsed_node) return parsed_node diff --git a/core/dbt/parser/sources.py b/core/dbt/parser/sources.py index 73fdd80e4c9..cc9acea98c3 100644 --- a/core/dbt/parser/sources.py +++ b/core/dbt/parser/sources.py @@ -10,10 +10,10 @@ ) from dbt.contracts.graph.manifest import Manifest, SourceKey from dbt.contracts.graph.model_config import SourceConfig -from dbt.contracts.graph.parsed import ( +from dbt.contracts.graph.nodes import ( UnpatchedSourceDefinition, - ParsedSourceDefinition, - ParsedGenericTestNode, + SourceDefinition, + GenericTestNode, ) from dbt.contracts.graph.unparsed import ( UnparsedSourceDefinition, @@ -38,7 +38,7 @@ # generate multiple UnpatchedSourceDefinition nodes (one per # table) in the SourceParser.add_source_definitions. The # SourcePatcher takes an UnparsedSourceDefinition and the -# SourcePatch and produces a ParsedSourceDefinition. Each +# SourcePatch and produces a SourceDefinition. Each # SourcePatch can be applied to multiple UnpatchedSourceDefinitions. class SourcePatcher: def __init__( @@ -50,16 +50,16 @@ def __init__( self.manifest = manifest self.schema_parsers: Dict[str, SchemaParser] = {} self.patches_used: Dict[SourceKey, Set[str]] = {} - self.sources: Dict[str, ParsedSourceDefinition] = {} + self.sources: Dict[str, SourceDefinition] = {} # This method calls the 'parse_source' method which takes # the UnpatchedSourceDefinitions in the manifest and combines them - # with SourcePatches to produce ParsedSourceDefinitions. + # with SourcePatches to produce SourceDefinitions. def construct_sources(self) -> None: for unique_id, unpatched in self.manifest.sources.items(): schema_file = self.manifest.files[unpatched.file_id] - if isinstance(unpatched, ParsedSourceDefinition): - # In partial parsing, there will be ParsedSourceDefinitions + if isinstance(unpatched, SourceDefinition): + # In partial parsing, there will be SourceDefinitions # which must be retained. self.sources[unpatched.unique_id] = unpatched continue @@ -80,7 +80,7 @@ def construct_sources(self) -> None: test_from = {"key": "sources", "name": patched.source.name} schema_file.add_test(test.unique_id, test_from) - # Convert UnpatchedSourceDefinition to a ParsedSourceDefinition + # Convert UnpatchedSourceDefinition to a SourceDefinition parsed = self.parse_source(patched) if parsed.config.enabled: self.sources[unique_id] = parsed @@ -118,8 +118,8 @@ def patch_source( table = UnparsedSourceTableDefinition.from_dict(table_dct) return unpatched.replace(source=source, table=table, patch_path=patch_path) - # This converts an UnpatchedSourceDefinition to a ParsedSourceDefinition - def parse_source(self, target: UnpatchedSourceDefinition) -> ParsedSourceDefinition: + # This converts an UnpatchedSourceDefinition to a SourceDefinition + def parse_source(self, target: UnpatchedSourceDefinition) -> SourceDefinition: source = target.source table = target.table refs = ParserRef.from_target(table) @@ -156,7 +156,7 @@ def parse_source(self, target: UnpatchedSourceDefinition) -> ParsedSourceDefinit default_database = self.root_project.credentials.database - parsed_source = ParsedSourceDefinition( + parsed_source = SourceDefinition( package_name=target.package_name, database=(source.database or default_database), schema=(source.schema or source.name), @@ -201,9 +201,7 @@ def get_schema_parser_for(self, package_name: str) -> "SchemaParser": self.schema_parsers[package_name] = schema_parser return schema_parser - def get_source_tests( - self, target: UnpatchedSourceDefinition - ) -> Iterable[ParsedGenericTestNode]: + def get_source_tests(self, target: UnpatchedSourceDefinition) -> Iterable[GenericTestNode]: for test, column in target.get_tests(): yield self.parse_source_test( target=target, @@ -215,7 +213,7 @@ def get_patch_for( self, unpatched: UnpatchedSourceDefinition, ) -> Optional[SourcePatch]: - if isinstance(unpatched, ParsedSourceDefinition): + if isinstance(unpatched, SourceDefinition): return None key = (unpatched.package_name, unpatched.source.name) patch: Optional[SourcePatch] = self.manifest.source_patches.get(key) @@ -234,7 +232,7 @@ def parse_source_test( target: UnpatchedSourceDefinition, test: Dict[str, Any], column: Optional[UnparsedColumn], - ) -> ParsedGenericTestNode: + ) -> GenericTestNode: column_name: Optional[str] if column is None: column_name = None @@ -286,7 +284,7 @@ def _generate_source_config(self, target: UnpatchedSourceDefinition, rendered: b patch_config_dict=precedence_configs, ) - def _get_relation_name(self, node: ParsedSourceDefinition): + def _get_relation_name(self, node: SourceDefinition): adapter = get_adapter(self.root_project) relation_cls = adapter.Relation return str(relation_cls.create_from(self.root_project, node)) diff --git a/core/dbt/parser/sql.py b/core/dbt/parser/sql.py index 14c74247b62..82d09c12d6b 100644 --- a/core/dbt/parser/sql.py +++ b/core/dbt/parser/sql.py @@ -3,7 +3,7 @@ from typing import Iterable from dbt.contracts.graph.manifest import SourceFile -from dbt.contracts.graph.parsed import ParsedSqlNode, ParsedMacro +from dbt.contracts.graph.nodes import SqlNode, Macro from dbt.contracts.graph.unparsed import UnparsedMacro from dbt.exceptions import InternalException from dbt.node_types import NodeType @@ -21,11 +21,11 @@ def name(self): return self.block_name -class SqlBlockParser(SimpleSQLParser[ParsedSqlNode]): - def parse_from_dict(self, dct, validate=True) -> ParsedSqlNode: +class SqlBlockParser(SimpleSQLParser[SqlNode]): + def parse_from_dict(self, dct, validate=True) -> SqlNode: if validate: - ParsedSqlNode.validate(dct) - return ParsedSqlNode.from_dict(dct) + SqlNode.validate(dct) + return SqlNode.from_dict(dct) @property def resource_type(self) -> NodeType: @@ -42,14 +42,14 @@ def get_compiled_path(block: FileBlock): return os.path.join("sql", block.name) - def parse_remote(self, sql: str, name: str) -> ParsedSqlNode: + def parse_remote(self, sql: str, name: str) -> SqlNode: source_file = SourceFile.remote(sql, self.project.project_name, "sql") contents = SqlBlock(block_name=name, file=source_file) return self.parse_node(contents) class SqlMacroParser(MacroParser): - def parse_remote(self, contents) -> Iterable[ParsedMacro]: + def parse_remote(self, contents) -> Iterable[Macro]: base = UnparsedMacro( path="from remote system", original_file_path="from remote system", diff --git a/core/dbt/task/freshness.py b/core/dbt/task/freshness.py index 51944cb4508..704368cf24f 100644 --- a/core/dbt/task/freshness.py +++ b/core/dbt/task/freshness.py @@ -25,7 +25,7 @@ from dbt.node_types import NodeType from dbt.graph import ResourceTypeSelector -from dbt.contracts.graph.parsed import ParsedSourceDefinition +from dbt.contracts.graph.nodes import SourceDefinition RESULT_FILE_NAME = "sources.json" @@ -141,7 +141,7 @@ class FreshnessSelector(ResourceTypeSelector): def node_is_match(self, node): if not super().node_is_match(node): return False - if not isinstance(node, ParsedSourceDefinition): + if not isinstance(node, SourceDefinition): return False return node.has_freshness diff --git a/core/dbt/task/generate.py b/core/dbt/task/generate.py index 0bc6f3f9527..48db2e772ba 100644 --- a/core/dbt/task/generate.py +++ b/core/dbt/task/generate.py @@ -8,7 +8,7 @@ from .compile import CompileTask from dbt.adapters.factory import get_adapter -from dbt.contracts.graph.compiled import CompileResultNode +from dbt.contracts.graph.nodes import ResultNode from dbt.contracts.graph.manifest import Manifest from dbt.contracts.results import ( NodeStatus, @@ -174,7 +174,7 @@ def format_stats(stats: PrimitiveDict) -> StatsDict: return stats_collector -def mapping_key(node: CompileResultNode) -> CatalogKey: +def mapping_key(node: ResultNode) -> CatalogKey: dkey = dbt.utils.lowercase(node.database) return CatalogKey(dkey, node.schema.lower(), node.identifier.lower()) diff --git a/core/dbt/task/list.py b/core/dbt/task/list.py index 43cd8e3f8fe..d8165560e2b 100644 --- a/core/dbt/task/list.py +++ b/core/dbt/task/list.py @@ -1,6 +1,6 @@ import json -from dbt.contracts.graph.parsed import ParsedExposure, ParsedSourceDefinition, ParsedMetric +from dbt.contracts.graph.nodes import Exposure, SourceDefinition, Metric from dbt.graph import ResourceTypeSelector from dbt.task.runnable import GraphRunnableTask, ManifestTask from dbt.task.test import TestSelector @@ -93,17 +93,17 @@ def _iterate_selected_nodes(self): def generate_selectors(self): for node in self._iterate_selected_nodes(): if node.resource_type == NodeType.Source: - assert isinstance(node, ParsedSourceDefinition) + assert isinstance(node, SourceDefinition) # sources are searched for by pkg.source_name.table_name source_selector = ".".join([node.package_name, node.source_name, node.name]) yield f"source:{source_selector}" elif node.resource_type == NodeType.Exposure: - assert isinstance(node, ParsedExposure) + assert isinstance(node, Exposure) # exposures are searched for by pkg.exposure_name exposure_selector = ".".join([node.package_name, node.name]) yield f"exposure:{exposure_selector}" elif node.resource_type == NodeType.Metric: - assert isinstance(node, ParsedMetric) + assert isinstance(node, Metric) # metrics are searched for by pkg.metric_name metric_selector = ".".join([node.package_name, node.name]) yield f"metric:{metric_selector}" diff --git a/core/dbt/task/run.py b/core/dbt/task/run.py index 39776b58e87..5b88d039904 100644 --- a/core/dbt/task/run.py +++ b/core/dbt/task/run.py @@ -17,9 +17,8 @@ from dbt.adapters.base import BaseRelation from dbt.clients.jinja import MacroGenerator from dbt.context.providers import generate_runtime_model_context -from dbt.contracts.graph.compiled import CompileResultNode from dbt.contracts.graph.model_config import Hook -from dbt.contracts.graph.parsed import ParsedHookNode +from dbt.contracts.graph.nodes import HookNode, ResultNode from dbt.contracts.results import NodeStatus, RunResult, RunStatus, RunningStatus, BaseResult from dbt.exceptions import ( CompilationException, @@ -79,17 +78,17 @@ def __eq__(self, other): return isinstance(other, self.__class__) -def _hook_list() -> List[ParsedHookNode]: +def _hook_list() -> List[HookNode]: return [] def get_hooks_by_tags( - nodes: Iterable[CompileResultNode], + nodes: Iterable[ResultNode], match_tags: Set[str], -) -> List[ParsedHookNode]: +) -> List[HookNode]: matched_nodes = [] for node in nodes: - if not isinstance(node, ParsedHookNode): + if not isinstance(node, HookNode): continue node_tags = node.tags if len(set(node_tags) & match_tags): @@ -304,20 +303,20 @@ def get_hook_sql(self, adapter, hook, idx, num_hooks, extra_context): hook_obj = get_hook(statement, index=hook_index) return hook_obj.sql or "" - def _hook_keyfunc(self, hook: ParsedHookNode) -> Tuple[str, Optional[int]]: + def _hook_keyfunc(self, hook: HookNode) -> Tuple[str, Optional[int]]: package_name = hook.package_name if package_name == self.config.project_name: package_name = BiggestName("") return package_name, hook.index - def get_hooks_by_type(self, hook_type: RunHookType) -> List[ParsedHookNode]: + def get_hooks_by_type(self, hook_type: RunHookType) -> List[HookNode]: if self.manifest is None: raise InternalException("self.manifest was None in get_hooks_by_type") nodes = self.manifest.nodes.values() # find all hooks defined in the manifest (could be multiple projects) - hooks: List[ParsedHookNode] = get_hooks_by_tags(nodes, {hook_type}) + hooks: List[HookNode] = get_hooks_by_tags(nodes, {hook_type}) hooks.sort(key=self._hook_keyfunc) return hooks diff --git a/core/dbt/task/runnable.py b/core/dbt/task/runnable.py index 279baffc448..226005497e4 100644 --- a/core/dbt/task/runnable.py +++ b/core/dbt/task/runnable.py @@ -39,9 +39,8 @@ NothingToDo, ) from dbt.events.contextvars import log_contextvars -from dbt.contracts.graph.compiled import CompileResultNode from dbt.contracts.graph.manifest import Manifest -from dbt.contracts.graph.parsed import ParsedSourceDefinition +from dbt.contracts.graph.nodes import SourceDefinition, ResultNode from dbt.contracts.results import NodeStatus, RunExecutionResult, RunningStatus from dbt.contracts.state import PreviousState from dbt.exceptions import ( @@ -108,7 +107,7 @@ class GraphRunnableTask(ManifestTask): def __init__(self, args, config): super().__init__(args, config) self.job_queue: Optional[GraphQueue] = None - self._flattened_nodes: Optional[List[CompileResultNode]] = None + self._flattened_nodes: Optional[List[ResultNode]] = None self.run_count: int = 0 self.num_nodes: int = 0 @@ -330,7 +329,7 @@ def _handle_result(self, result): if self.manifest is None: raise InternalException("manifest was None in _handle_result") - if isinstance(node, ParsedSourceDefinition): + if isinstance(node, SourceDefinition): self.manifest.update_source(node) else: self.manifest.update_node(node) diff --git a/core/dbt/task/test.py b/core/dbt/task/test.py index a2f64a80315..e48dc94e4e4 100644 --- a/core/dbt/task/test.py +++ b/core/dbt/task/test.py @@ -5,15 +5,12 @@ from dbt.events.format import pluralize from dbt.dataclass_schema import dbtClassMixin import threading -from typing import Union from .compile import CompileRunner from .run import RunTask -from dbt.contracts.graph.compiled import ( - CompiledSingularTestNode, - CompiledGenericTestNode, - CompiledTestNode, +from dbt.contracts.graph.nodes import ( + TestNode, ) from dbt.contracts.graph.manifest import Manifest from dbt.contracts.results import TestStatus, PrimitiveDict, RunResult @@ -91,7 +88,7 @@ def before_execute(self): self.print_start_line() def execute_test( - self, test: Union[CompiledSingularTestNode, CompiledGenericTestNode], manifest: Manifest + self, test: TestNode, manifest: Manifest ) -> TestResultData: context = generate_runtime_model_context(test, self.config, manifest) @@ -139,7 +136,7 @@ def execute_test( TestResultData.validate(test_result_dct) return TestResultData.from_dict(test_result_dct) - def execute(self, test: CompiledTestNode, manifest: Manifest): + def execute(self, test: TestNode, manifest: Manifest): result = self.execute_test(test, manifest) severity = test.config.severity.upper() diff --git a/test/unit/test_compiler.py b/test/unit/test_compiler.py index 919f897c549..649a5918f91 100644 --- a/test/unit/test_compiler.py +++ b/test/unit/test_compiler.py @@ -6,8 +6,7 @@ from dbt.adapters.postgres import Plugin from dbt.contracts.files import FileHash from dbt.contracts.graph.manifest import Manifest -from dbt.contracts.graph.parsed import NodeConfig, DependsOn, ParsedModelNode -from dbt.contracts.graph.compiled import CompiledModelNode, InjectedCTE +from dbt.contracts.graph.nodes import NodeConfig, DependsOn, ModelNode, InjectedCTE from dbt.node_types import NodeType from datetime import datetime @@ -86,7 +85,7 @@ def test__prepend_ctes__already_has_cte(self): manifest = Manifest( macros={}, nodes={ - 'model.root.view': ParsedModelNode( + 'model.root.view': ModelNode( name='view', database='dbt', schema='analytics', @@ -102,7 +101,7 @@ def test__prepend_ctes__already_has_cte(self): raw_code='with cte as (select * from something_else) select * from {{ref("ephemeral")}}', checksum=FileHash.from_contents(''), ), - 'model.root.ephemeral': ParsedModelNode( + 'model.root.ephemeral': ModelNode( name='ephemeral', database='dbt', schema='analytics', @@ -148,7 +147,7 @@ def test__prepend_ctes__no_ctes(self): manifest = Manifest( macros={}, nodes={ - 'model.root.view': ParsedModelNode( + 'model.root.view': ModelNode( name='view', database='dbt', schema='analytics', @@ -165,7 +164,7 @@ def test__prepend_ctes__no_ctes(self): 'select * from source_table'), checksum=FileHash.from_contents(''), ), - 'model.root.view_no_cte': ParsedModelNode( + 'model.root.view_no_cte': ModelNode( name='view_no_cte', database='dbt', schema='analytics', @@ -224,7 +223,7 @@ def test__prepend_ctes(self): manifest = Manifest( macros={}, nodes={ - 'model.root.view': ParsedModelNode( + 'model.root.view': ModelNode( name='view', database='dbt', schema='analytics', @@ -240,7 +239,7 @@ def test__prepend_ctes(self): raw_code='select * from {{ref("ephemeral")}}', checksum=FileHash.from_contents(''), ), - 'model.root.ephemeral': ParsedModelNode( + 'model.root.ephemeral': ModelNode( name='ephemeral', database='dbt', schema='analytics', @@ -284,7 +283,7 @@ def test__prepend_ctes(self): def test__prepend_ctes__cte_not_compiled(self): ephemeral_config = self.model_config.replace(materialized='ephemeral') - parsed_ephemeral = ParsedModelNode( + parsed_ephemeral = ModelNode( name='ephemeral', database='dbt', schema='analytics', @@ -304,7 +303,7 @@ def test__prepend_ctes__cte_not_compiled(self): raw_code='select * from source_table', checksum=FileHash.from_contents(''), ) - compiled_ephemeral = CompiledModelNode( + compiled_ephemeral = ModelNode( name='ephemeral', database='dbt', schema='analytics', @@ -331,7 +330,7 @@ def test__prepend_ctes__cte_not_compiled(self): manifest = Manifest( macros={}, nodes={ - 'model.root.view': CompiledModelNode( + 'model.root.view': ModelNode( name='view', database='dbt', schema='analytics', @@ -400,7 +399,7 @@ def test__prepend_ctes__multiple_levels(self): manifest = Manifest( macros={}, nodes={ - 'model.root.view': ParsedModelNode( + 'model.root.view': ModelNode( name='view', database='dbt', schema='analytics', @@ -417,7 +416,7 @@ def test__prepend_ctes__multiple_levels(self): checksum=FileHash.from_contents(''), ), - 'model.root.ephemeral': ParsedModelNode( + 'model.root.ephemeral': ModelNode( name='ephemeral', database='dbt', schema='analytics', @@ -433,7 +432,7 @@ def test__prepend_ctes__multiple_levels(self): raw_code='select * from {{ref("ephemeral_level_two")}}', checksum=FileHash.from_contents(''), ), - 'model.root.ephemeral_level_two': ParsedModelNode( + 'model.root.ephemeral_level_two': ModelNode( name='ephemeral_level_two', database='dbt', schema='analytics', @@ -488,7 +487,7 @@ def test__prepend_ctes__valid_ephemeral_sql(self): manifest = Manifest( macros={}, nodes={ - 'model.root.view': ParsedModelNode( + 'model.root.view': ModelNode( name='view', database='dbt', schema='analytics', @@ -504,7 +503,7 @@ def test__prepend_ctes__valid_ephemeral_sql(self): raw_code='select * from {{ref("ephemeral")}}', checksum=FileHash.from_contents(''), ), - 'model.root.inner_ephemeral': ParsedModelNode( + 'model.root.inner_ephemeral': ModelNode( name='inner_ephemeral', database='dbt', schema='analytics', @@ -520,7 +519,7 @@ def test__prepend_ctes__valid_ephemeral_sql(self): raw_code='select * from source_table', checksum=FileHash.from_contents(''), ), - 'model.root.ephemeral': ParsedModelNode( + 'model.root.ephemeral': ModelNode( name='ephemeral', database='dbt', schema='analytics', diff --git a/test/unit/test_context.py b/test/unit/test_context.py index 1aaf3711909..5e26bb11c04 100644 --- a/test/unit/test_context.py +++ b/test/unit/test_context.py @@ -10,11 +10,11 @@ from dbt.adapters import factory from dbt.adapters.base import AdapterConfig from dbt.clients.jinja import MacroStack -from dbt.contracts.graph.parsed import ( - ParsedModelNode, +from dbt.contracts.graph.nodes import ( + ModelNode, NodeConfig, DependsOn, - ParsedMacro, + Macro, ) from dbt.config.project import VarProvider from dbt.context import base, target, configured, providers, docs, manifest, macros @@ -33,7 +33,7 @@ class TestVar(unittest.TestCase): def setUp(self): - self.model = ParsedModelNode( + self.model = ModelNode( alias="model_one", name="model_one", database="dbt", @@ -273,7 +273,7 @@ def assert_has_keys(required_keys: Set[str], maybe_keys: Set[str], ctx: Dict[str def model(): - return ParsedModelNode( + return ModelNode( alias="model_one", name="model_one", database="dbt", @@ -315,7 +315,7 @@ def test_base_context(): def mock_macro(name, package_name): macro = mock.MagicMock( - __class__=ParsedMacro, + __class__=Macro, package_name=package_name, resource_type="macro", unique_id=f"macro.{package_name}.{name}", @@ -335,7 +335,7 @@ def mock_manifest(config): def mock_model(): return mock.MagicMock( - __class__=ParsedModelNode, + __class__=ModelNode, alias="model_one", name="model_one", database="dbt", diff --git a/test/unit/test_contracts_graph_compiled.py b/test/unit/test_contracts_graph_compiled.py index 982673514ab..fe1e25d7925 100644 --- a/test/unit/test_contracts_graph_compiled.py +++ b/test/unit/test_contracts_graph_compiled.py @@ -2,10 +2,10 @@ import pytest from dbt.contracts.files import FileHash -from dbt.contracts.graph.compiled import ( - CompiledModelNode, InjectedCTE, CompiledGenericTestNode +from dbt.contracts.graph.nodes import ( + ModelNode, InjectedCTE, GenericTestNode ) -from dbt.contracts.graph.parsed import ( +from dbt.contracts.graph.nodes import ( DependsOn, NodeConfig, TestConfig, TestMetadata, ColumnInfo ) from dbt.node_types import NodeType @@ -22,7 +22,7 @@ @pytest.fixture def basic_uncompiled_model(): - return CompiledModelNode( + return ModelNode( package_name='test', path='/root/models/foo.sql', original_file_path='models/foo.sql', @@ -54,7 +54,7 @@ def basic_uncompiled_model(): @pytest.fixture def basic_compiled_model(): - return CompiledModelNode( + return ModelNode( package_name='test', path='/root/models/foo.sql', original_file_path='models/foo.sql', @@ -210,19 +210,19 @@ def basic_compiled_dict(): def test_basic_uncompiled_model(minimal_uncompiled_dict, basic_uncompiled_dict, basic_uncompiled_model): node_dict = basic_uncompiled_dict node = basic_uncompiled_model - assert_symmetric(node, node_dict, CompiledModelNode) + assert_symmetric(node, node_dict, ModelNode) assert node.empty is False assert node.is_refable is True assert node.is_ephemeral is False - assert_from_dict(node, minimal_uncompiled_dict, CompiledModelNode) + assert_from_dict(node, minimal_uncompiled_dict, ModelNode) pickle.loads(pickle.dumps(node)) def test_basic_compiled_model(basic_compiled_dict, basic_compiled_model): node_dict = basic_compiled_dict node = basic_compiled_model - assert_symmetric(node, node_dict, CompiledModelNode) + assert_symmetric(node, node_dict, ModelNode) assert node.empty is False assert node.is_refable is True assert node.is_ephemeral is False @@ -231,13 +231,13 @@ def test_basic_compiled_model(basic_compiled_dict, basic_compiled_model): def test_invalid_extra_fields_model(minimal_uncompiled_dict): bad_extra = minimal_uncompiled_dict bad_extra['notvalid'] = 'nope' - assert_fails_validation(bad_extra, CompiledModelNode) + assert_fails_validation(bad_extra, ModelNode) def test_invalid_bad_type_model(minimal_uncompiled_dict): bad_type = minimal_uncompiled_dict bad_type['resource_type'] = str(NodeType.Macro) - assert_fails_validation(bad_type, CompiledModelNode) + assert_fails_validation(bad_type, ModelNode) unchanged_compiled_models = [ @@ -346,7 +346,7 @@ def minimal_schema_test_dict(): @pytest.fixture def basic_uncompiled_schema_test_node(): - return CompiledGenericTestNode( + return GenericTestNode( package_name='test', path='/root/x/path.sql', original_file_path='/root/path.sql', @@ -379,7 +379,7 @@ def basic_uncompiled_schema_test_node(): @pytest.fixture def basic_compiled_schema_test_node(): - return CompiledGenericTestNode( + return GenericTestNode( package_name='test', path='/root/x/path.sql', original_file_path='/root/path.sql', @@ -522,19 +522,19 @@ def test_basic_uncompiled_schema_test(basic_uncompiled_schema_test_node, basic_u node = basic_uncompiled_schema_test_node node_dict = basic_uncompiled_schema_test_dict minimum = minimal_schema_test_dict - assert_symmetric(node, node_dict, CompiledGenericTestNode) + assert_symmetric(node, node_dict, GenericTestNode) assert node.empty is False assert node.is_refable is False assert node.is_ephemeral is False - assert_from_dict(node, minimum, CompiledGenericTestNode) + assert_from_dict(node, minimum, GenericTestNode) def test_basic_compiled_schema_test(basic_compiled_schema_test_node, basic_compiled_schema_test_dict): node = basic_compiled_schema_test_node node_dict = basic_compiled_schema_test_dict - assert_symmetric(node, node_dict, CompiledGenericTestNode) + assert_symmetric(node, node_dict, GenericTestNode) assert node.empty is False assert node.is_refable is False assert node.is_ephemeral is False @@ -543,13 +543,13 @@ def test_basic_compiled_schema_test(basic_compiled_schema_test_node, basic_compi def test_invalid_extra_schema_test_fields(minimal_schema_test_dict): bad_extra = minimal_schema_test_dict bad_extra['extra'] = 'extra value' - assert_fails_validation(bad_extra, CompiledGenericTestNode) + assert_fails_validation(bad_extra, GenericTestNode) def test_invalid_resource_type_schema_test(minimal_schema_test_dict): bad_type = minimal_schema_test_dict bad_type['resource_type'] = str(NodeType.Model) - assert_fails_validation(bad_type, CompiledGenericTestNode) + assert_fails_validation(bad_type, GenericTestNode) unchanged_schema_tests = [ diff --git a/test/unit/test_contracts_graph_parsed.py b/test/unit/test_contracts_graph_parsed.py index 1114b65c31c..453d544ca1c 100644 --- a/test/unit/test_contracts_graph_parsed.py +++ b/test/unit/test_contracts_graph_parsed.py @@ -13,23 +13,23 @@ EmptySnapshotConfig, Hook, ) -from dbt.contracts.graph.parsed import ( - ParsedModelNode, +from dbt.contracts.graph.nodes import ( + ModelNode, DependsOn, ColumnInfo, - ParsedGenericTestNode, - ParsedSnapshotNode, + GenericTestNode, + SnapshotNode, IntermediateSnapshotNode, ParsedNodePatch, - ParsedMacro, - ParsedExposure, - ParsedMetric, - ParsedSeedNode, + Macro, + Exposure, + Metric, + SeedNode, Docs, MacroDependsOn, - ParsedSourceDefinition, - ParsedDocumentation, - ParsedHookNode, + SourceDefinition, + Documentation, + HookNode, ExposureOwner, TestMetadata, ) @@ -172,7 +172,7 @@ def base_parsed_model_dict(): @pytest.fixture def basic_parsed_model_object(): - return ParsedModelNode( + return ModelNode( package_name='test', path='/root/x/path.sql', original_file_path='/root/path.sql', @@ -279,7 +279,7 @@ def complex_parsed_model_dict(): @pytest.fixture def complex_parsed_model_object(): - return ParsedModelNode( + return ModelNode( package_name='test', path='/root/x/path.sql', original_file_path='/root/path.sql', @@ -342,14 +342,14 @@ def test_invalid_bad_tags(base_parsed_model_dict): # bad top-level field bad_tags = base_parsed_model_dict bad_tags['tags'] = 100 - assert_fails_validation(bad_tags, ParsedModelNode) + assert_fails_validation(bad_tags, ModelNode) def test_invalid_bad_materialized(base_parsed_model_dict): # bad nested field bad_materialized = base_parsed_model_dict bad_materialized['config']['materialized'] = None - assert_fails_validation(bad_materialized, ParsedModelNode) + assert_fails_validation(bad_materialized, ModelNode) unchanged_nodes = [ @@ -468,7 +468,7 @@ def basic_parsed_seed_dict(): @pytest.fixture def basic_parsed_seed_object(): - return ParsedSeedNode( + return SeedNode( name='foo', resource_type=NodeType.Seed, path='/root/seeds/seed.csv', @@ -570,7 +570,7 @@ def complex_parsed_seed_dict(): @pytest.fixture def complex_parsed_seed_object(): - return ParsedSeedNode( + return SeedNode( name='foo', resource_type=NodeType.Seed, path='/root/seeds/seed.csv', @@ -608,7 +608,7 @@ def test_seed_basic(basic_parsed_seed_dict, basic_parsed_seed_object, minimal_pa assert_symmetric(basic_parsed_seed_object, basic_parsed_seed_dict) assert basic_parsed_seed_object.get_materialization() == 'seed' - assert_from_dict(basic_parsed_seed_object, minimal_parsed_seed_dict, ParsedSeedNode) + assert_from_dict(basic_parsed_seed_object, minimal_parsed_seed_dict, SeedNode) def test_seed_complex(complex_parsed_seed_dict, complex_parsed_seed_object): @@ -719,7 +719,7 @@ def basic_parsed_model_patch_object(): @pytest.fixture def patched_model_object(): - return ParsedModelNode( + return ModelNode( package_name='test', path='/root/x/path.sql', original_file_path='/root/path.sql', @@ -824,7 +824,7 @@ def base_parsed_hook_dict(): @pytest.fixture def base_parsed_hook_object(): - return ParsedHookNode( + return HookNode( package_name='test', path='/root/x/path.sql', original_file_path='/root/path.sql', @@ -911,7 +911,7 @@ def complex_parsed_hook_dict(): @pytest.fixture def complex_parsed_hook_object(): - return ParsedHookNode( + return HookNode( package_name='test', path='/root/x/path.sql', original_file_path='/root/path.sql', @@ -952,11 +952,11 @@ def test_basic_parsed_hook(minimal_parsed_hook_dict, base_parsed_hook_dict, base node_dict = base_parsed_hook_dict minimum = minimal_parsed_hook_dict - assert_symmetric(node, node_dict, ParsedHookNode) + assert_symmetric(node, node_dict, HookNode) assert node.empty is False assert node.is_refable is False assert node.get_materialization() == 'view' - assert_from_dict(node, minimum, ParsedHookNode) + assert_from_dict(node, minimum, HookNode) pickle.loads(pickle.dumps(node)) @@ -973,7 +973,7 @@ def test_complex_parsed_hook(complex_parsed_hook_dict, complex_parsed_hook_objec def test_invalid_hook_index_type(base_parsed_hook_dict): bad_index = base_parsed_hook_dict bad_index['index'] = 'a string!?' - assert_fails_validation(bad_index, ParsedHookNode) + assert_fails_validation(bad_index, HookNode) @pytest.fixture @@ -1051,7 +1051,7 @@ def basic_parsed_schema_test_dict(): @pytest.fixture def basic_parsed_schema_test_object(): - return ParsedGenericTestNode( + return GenericTestNode( package_name='test', path='/root/x/path.sql', original_file_path='/root/path.sql', @@ -1143,7 +1143,7 @@ def complex_parsed_schema_test_object(): severity='WARN' ) cfg._extra.update({'extra_key': 'extra value'}) - return ParsedGenericTestNode( + return GenericTestNode( package_name='test', path='/root/x/path.sql', original_file_path='/root/path.sql', @@ -1180,20 +1180,20 @@ def test_basic_schema_test_node(minimal_parsed_schema_test_dict, basic_parsed_sc node = basic_parsed_schema_test_object node_dict = basic_parsed_schema_test_dict minimum = minimal_parsed_schema_test_dict - assert_symmetric(node, node_dict, ParsedGenericTestNode) + assert_symmetric(node, node_dict, GenericTestNode) assert node.empty is False assert node.is_ephemeral is False assert node.is_refable is False assert node.get_materialization() == 'test' - assert_from_dict(node, minimum, ParsedGenericTestNode) + assert_from_dict(node, minimum, GenericTestNode) pickle.loads(pickle.dumps(node)) def test_complex_schema_test_node(complex_parsed_schema_test_dict, complex_parsed_schema_test_object): # this tests for the presence of _extra keys - node = complex_parsed_schema_test_object # ParsedGenericTestNode + node = complex_parsed_schema_test_object # GenericTestNode assert(node.config._extra['extra_key']) node_dict = complex_parsed_schema_test_dict assert_symmetric(node, node_dict) @@ -1204,13 +1204,13 @@ def test_invalid_column_name_type(complex_parsed_schema_test_dict): # bad top-level field bad_column_name = complex_parsed_schema_test_dict bad_column_name['column_name'] = {} - assert_fails_validation(bad_column_name, ParsedGenericTestNode) + assert_fails_validation(bad_column_name, GenericTestNode) def test_invalid_severity(complex_parsed_schema_test_dict): invalid_config_value = complex_parsed_schema_test_dict invalid_config_value['config']['severity'] = 'WERROR' - assert_fails_validation(invalid_config_value, ParsedGenericTestNode) + assert_fails_validation(invalid_config_value, GenericTestNode) @pytest.fixture @@ -1494,7 +1494,7 @@ def basic_timestamp_snapshot_dict(): @pytest.fixture def basic_timestamp_snapshot_object(): - return ParsedSnapshotNode( + return SnapshotNode( package_name='test', path='/root/x/path.sql', original_file_path='/root/path.sql', @@ -1634,7 +1634,7 @@ def basic_check_snapshot_dict(): @pytest.fixture def basic_check_snapshot_object(): - return ParsedSnapshotNode( + return SnapshotNode( package_name='test', path='/root/x/path.sql', original_file_path='/root/path.sql', @@ -1719,10 +1719,10 @@ def test_timestamp_snapshot_ok(basic_timestamp_snapshot_dict, basic_timestamp_sn node = basic_timestamp_snapshot_object inter = basic_intermediate_timestamp_snapshot_object - assert_symmetric(node, node_dict, ParsedSnapshotNode) -# node_from_dict = ParsedSnapshotNode.from_dict(inter.to_dict(omit_none=True)) + assert_symmetric(node, node_dict, SnapshotNode) +# node_from_dict = SnapshotNode.from_dict(inter.to_dict(omit_none=True)) # node_from_dict.created_at = 1 - assert ParsedSnapshotNode.from_dict(inter.to_dict(omit_none=True)) == node + assert SnapshotNode.from_dict(inter.to_dict(omit_none=True)) == node assert node.is_refable is True assert node.is_ephemeral is False pickle.loads(pickle.dumps(node)) @@ -1733,8 +1733,8 @@ def test_check_snapshot_ok(basic_check_snapshot_dict, basic_check_snapshot_objec node = basic_check_snapshot_object inter = basic_intermediate_check_snapshot_object - assert_symmetric(node, node_dict, ParsedSnapshotNode) - assert ParsedSnapshotNode.from_dict(inter.to_dict(omit_none=True)) == node + assert_symmetric(node, node_dict, SnapshotNode) + assert SnapshotNode.from_dict(inter.to_dict(omit_none=True)) == node assert node.is_refable is True assert node.is_ephemeral is False pickle.loads(pickle.dumps(node)) @@ -1743,7 +1743,7 @@ def test_check_snapshot_ok(basic_check_snapshot_dict, basic_check_snapshot_objec def test_invalid_snapshot_bad_resource_type(basic_timestamp_snapshot_dict): bad_resource_type = basic_timestamp_snapshot_dict bad_resource_type['resource_type'] = str(NodeType.Model) - assert_fails_validation(bad_resource_type, ParsedSnapshotNode) + assert_fails_validation(bad_resource_type, SnapshotNode) def test_basic_parsed_node_patch(basic_parsed_model_patch_object, basic_parsed_model_patch_dict): @@ -1792,7 +1792,7 @@ def test_populated_parsed_node_patch(populated_parsed_node_patch_dict, populated class TestParsedMacro(ContractTestCase): - ContractType = ParsedMacro + ContractType = Macro def _ok_dict(self): return { @@ -1843,7 +1843,7 @@ def test_invalid_extra_field(self): class TestParsedDocumentation(ContractTestCase): - ContractType = ParsedDocumentation + ContractType = Documentation def _ok_dict(self): return { @@ -1931,7 +1931,7 @@ def basic_parsed_source_definition_dict(): @pytest.fixture def basic_parsed_source_definition_object(): - return ParsedSourceDefinition( + return SourceDefinition( columns={}, database='some_db', description='', @@ -1990,7 +1990,7 @@ def complex_parsed_source_definition_dict(): @pytest.fixture def complex_parsed_source_definition_object(): - return ParsedSourceDefinition( + return SourceDefinition( columns={}, database='some_db', description='', @@ -2019,32 +2019,32 @@ def test_basic_source_definition(minimum_parsed_source_definition_dict, basic_pa node_dict = basic_parsed_source_definition_dict minimum = minimum_parsed_source_definition_dict - assert_symmetric(node, node_dict, ParsedSourceDefinition) + assert_symmetric(node, node_dict, SourceDefinition) assert node.is_ephemeral is False assert node.is_refable is False assert node.has_freshness is False - assert_from_dict(node, minimum, ParsedSourceDefinition) + assert_from_dict(node, minimum, SourceDefinition) pickle.loads(pickle.dumps(node)) def test_invalid_missing(minimum_parsed_source_definition_dict): bad_missing_name = minimum_parsed_source_definition_dict del bad_missing_name['name'] - assert_fails_validation(bad_missing_name, ParsedSourceDefinition) + assert_fails_validation(bad_missing_name, SourceDefinition) def test_invalid_bad_resource_type(minimum_parsed_source_definition_dict): bad_resource_type = minimum_parsed_source_definition_dict bad_resource_type['resource_type'] = str(NodeType.Model) - assert_fails_validation(bad_resource_type, ParsedSourceDefinition) + assert_fails_validation(bad_resource_type, SourceDefinition) def test_complex_source_definition(complex_parsed_source_definition_dict, complex_parsed_source_definition_object): node = complex_parsed_source_definition_object node_dict = complex_parsed_source_definition_dict - assert_symmetric(node, node_dict, ParsedSourceDefinition) + assert_symmetric(node, node_dict, SourceDefinition) assert node.is_ephemeral is False assert node.is_refable is False @@ -2150,7 +2150,7 @@ def basic_parsed_exposure_dict(): @pytest.fixture def basic_parsed_exposure_object(): - return ParsedExposure( + return Exposure( name='my_exposure', type=ExposureType.Notebook, fqn=['test', 'exposures', 'my_exposure'], @@ -2207,7 +2207,7 @@ def complex_parsed_exposure_dict(): @pytest.fixture def complex_parsed_exposure_object(): - return ParsedExposure( + return Exposure( name='my_exposure', type=ExposureType.Analysis, owner=ExposureOwner(email='test@example.com', name='A Name'), @@ -2228,13 +2228,13 @@ def complex_parsed_exposure_object(): def test_basic_parsed_exposure(minimal_parsed_exposure_dict, basic_parsed_exposure_dict, basic_parsed_exposure_object): - assert_symmetric(basic_parsed_exposure_object, basic_parsed_exposure_dict, ParsedExposure) - assert_from_dict(basic_parsed_exposure_object, minimal_parsed_exposure_dict, ParsedExposure) + assert_symmetric(basic_parsed_exposure_object, basic_parsed_exposure_dict, Exposure) + assert_from_dict(basic_parsed_exposure_object, minimal_parsed_exposure_dict, Exposure) pickle.loads(pickle.dumps(basic_parsed_exposure_object)) def test_complex_parsed_exposure(complex_parsed_exposure_dict, complex_parsed_exposure_object): - assert_symmetric(complex_parsed_exposure_object, complex_parsed_exposure_dict, ParsedExposure) + assert_symmetric(complex_parsed_exposure_object, complex_parsed_exposure_dict, Exposure) unchanged_parsed_exposures = [ @@ -2325,7 +2325,7 @@ def basic_parsed_metric_dict(): @pytest.fixture def basic_parsed_metric_object(): - return ParsedMetric( + return Metric( name='my_metric', calculation_method='count', fqn=['test', 'metrics', 'my_metric'], diff --git a/test/unit/test_docs_blocks.py b/test/unit/test_docs_blocks.py index 8b87463313a..89821abfe12 100644 --- a/test/unit/test_docs_blocks.py +++ b/test/unit/test_docs_blocks.py @@ -3,7 +3,7 @@ from dbt.contracts.files import SourceFile, FileHash, FilePath from dbt.contracts.graph.manifest import Manifest -from dbt.contracts.graph.parsed import ParsedDocumentation +from dbt.contracts.graph.nodes import Documentation from dbt.node_types import NodeType from dbt.parser import docs from dbt.parser.search import FileBlock @@ -155,7 +155,7 @@ def test_load_file(self): docs_values = sorted(parser.manifest.docs.values(), key=lambda n: n.name) self.assertEqual(len(docs_values), 2) for result in docs_values: - self.assertIsInstance(result, ParsedDocumentation) + self.assertIsInstance(result, Documentation) self.assertEqual(result.package_name, 'some_package') self.assertEqual(result.original_file_path, self.testfile_path) self.assertEqual(result.resource_type, NodeType.Documentation) @@ -179,7 +179,7 @@ def test_load_file_extras(self): docs_values = sorted(parser.manifest.docs.values(), key=lambda n: n.name) self.assertEqual(len(docs_values), 2) for result in docs_values: - self.assertIsInstance(result, ParsedDocumentation) + self.assertIsInstance(result, Documentation) self.assertEqual(docs_values[0].name, 'snowplow_sessions') self.assertEqual(docs_values[1].name, 'snowplow_sessions__session_id') @@ -196,7 +196,7 @@ def test_multiple_raw_blocks(self): docs_values = sorted(parser.manifest.docs.values(), key=lambda n: n.name) self.assertEqual(len(docs_values), 2) for result in docs_values: - self.assertIsInstance(result, ParsedDocumentation) + self.assertIsInstance(result, Documentation) self.assertEqual(result.package_name, 'some_package') self.assertEqual(result.original_file_path, self.testfile_path) self.assertEqual(result.resource_type, NodeType.Documentation) diff --git a/test/unit/test_graph_selector_methods.py b/test/unit/test_graph_selector_methods.py index 87343ca3756..5d99182d62a 100644 --- a/test/unit/test_graph_selector_methods.py +++ b/test/unit/test_graph_selector_methods.py @@ -6,18 +6,18 @@ from pathlib import Path from dbt.contracts.files import FileHash -from dbt.contracts.graph.parsed import ( +from dbt.contracts.graph.nodes import ( DependsOn, MacroDependsOn, NodeConfig, - ParsedMacro, - ParsedModelNode, - ParsedExposure, - ParsedMetric, - ParsedSeedNode, - ParsedSingularTestNode, - ParsedGenericTestNode, - ParsedSourceDefinition, + Macro, + ModelNode, + Exposure, + Metric, + SeedNode, + SingularTestNode, + GenericTestNode, + SourceDefinition, TestConfig, TestMetadata, ColumnInfo, @@ -42,7 +42,7 @@ MetricSelectorMethod, ) import dbt.exceptions -import dbt.contracts.graph.parsed +import dbt.contracts.graph.nodes from .utils import replace_config @@ -77,7 +77,7 @@ def make_model(pkg, name, sql, refs=None, sources=None, tags=None, path=None, al source_values.append([src.source_name, src.name]) depends_on_nodes.append(src.unique_id) - return ParsedModelNode( + return ModelNode( language='sql', raw_code=sql, database='dbt', @@ -117,7 +117,7 @@ def make_seed(pkg, name, path=None, loader=None, alias=None, tags=None, fqn_extr checksum = FileHash.from_contents('') fqn = [pkg] + fqn_extras + [name] - return ParsedSeedNode( + return SeedNode( language='sql', raw_code='', database='dbt', @@ -148,7 +148,7 @@ def make_source(pkg, source_name, table_name, path=None, loader=None, identifier fqn = [pkg] + fqn_extras + [source_name, table_name] - return ParsedSourceDefinition( + return SourceDefinition( fqn=fqn, database='dbt', schema='dbt_schema', @@ -174,7 +174,7 @@ def make_macro(pkg, name, macro_sql, path=None, depends_on_macros=None): if depends_on_macros is None: depends_on_macros = [] - return ParsedMacro( + return Macro( name=name, macro_sql=macro_sql, unique_id=f'macro.{pkg}.{name}', @@ -200,7 +200,7 @@ def make_schema_test(pkg, test_name, test_model, test_kwargs, path=None, refs=No ref_values = [] source_values = [] # this doesn't really have to be correct - if isinstance(test_model, ParsedSourceDefinition): + if isinstance(test_model, SourceDefinition): kwargs['model'] = "{{ source('" + test_model.source_name + \ "', '" + test_model.name + "') }}" source_values.append([test_model.source_name, test_model.name]) @@ -247,7 +247,7 @@ def make_schema_test(pkg, test_name, test_model, test_kwargs, path=None, refs=No source_values.append([source.source_name, source.name]) depends_on_nodes.append(source.unique_id) - return ParsedGenericTestNode( + return GenericTestNode( language='sql', raw_code=raw_code, test_metadata=TestMetadata( @@ -303,7 +303,7 @@ def make_data_test(pkg, name, sql, refs=None, sources=None, tags=None, path=None source_values.append([src.source_name, src.name]) depends_on_nodes.append(src.unique_id) - return ParsedSingularTestNode( + return SingularTestNode( language='sql', raw_code=sql, database='dbt', @@ -336,7 +336,7 @@ def make_exposure(pkg, name, path=None, fqn_extras=None, owner=None): owner = ExposureOwner(email='test@example.com') fqn = [pkg, 'exposures'] + fqn_extras + [name] - return ParsedExposure( + return Exposure( name=name, type=ExposureType.Notebook, fqn=fqn, @@ -352,7 +352,7 @@ def make_metric(pkg, name, path=None): if path is None: path = 'schema.yml' - return ParsedMetric( + return Metric( name=name, path='schema.yml', package_name=pkg, @@ -970,14 +970,14 @@ def test_select_state_changed_seed_checksum_path_to_path(manifest, previous_stat change_node(manifest, seed.replace(checksum=FileHash( name='path', checksum=seed.original_file_path))) method = statemethod(manifest, previous_state) - with mock.patch('dbt.contracts.graph.parsed.warn_or_error') as warn_or_error_patch: + with mock.patch('dbt.contracts.graph.nodes.warn_or_error') as warn_or_error_patch: assert not search_manifest_using_method(manifest, method, 'modified') warn_or_error_patch.assert_called_once() event = warn_or_error_patch.call_args[0][0] assert event.info.name == 'SeedExceedsLimitSamePath' msg = event.info.msg assert msg.startswith('Found a seed (pkg.seed) >1MB in size') - with mock.patch('dbt.contracts.graph.parsed.warn_or_error') as warn_or_error_patch: + with mock.patch('dbt.contracts.graph.nodes.warn_or_error') as warn_or_error_patch: assert not search_manifest_using_method(manifest, method, 'new') warn_or_error_patch.assert_not_called() @@ -986,7 +986,7 @@ def test_select_state_changed_seed_checksum_sha_to_path(manifest, previous_state change_node(manifest, seed.replace(checksum=FileHash( name='path', checksum=seed.original_file_path))) method = statemethod(manifest, previous_state) - with mock.patch('dbt.contracts.graph.parsed.warn_or_error') as warn_or_error_patch: + with mock.patch('dbt.contracts.graph.nodes.warn_or_error') as warn_or_error_patch: assert search_manifest_using_method( manifest, method, 'modified') == {'seed'} warn_or_error_patch.assert_called_once() @@ -994,7 +994,7 @@ def test_select_state_changed_seed_checksum_sha_to_path(manifest, previous_state assert event.info.name == 'SeedIncreased' msg = event.info.msg assert msg.startswith('Found a seed (pkg.seed) >1MB in size') - with mock.patch('dbt.contracts.graph.parsed.warn_or_error') as warn_or_error_patch: + with mock.patch('dbt.contracts.graph.nodes.warn_or_error') as warn_or_error_patch: assert not search_manifest_using_method(manifest, method, 'new') warn_or_error_patch.assert_not_called() @@ -1003,11 +1003,11 @@ def test_select_state_changed_seed_checksum_path_to_sha(manifest, previous_state change_node(previous_state.manifest, seed.replace( checksum=FileHash(name='path', checksum=seed.original_file_path))) method = statemethod(manifest, previous_state) - with mock.patch('dbt.contracts.graph.parsed.warn_or_error') as warn_or_error_patch: + with mock.patch('dbt.contracts.graph.nodes.warn_or_error') as warn_or_error_patch: assert search_manifest_using_method( manifest, method, 'modified') == {'seed'} warn_or_error_patch.assert_not_called() - with mock.patch('dbt.contracts.graph.parsed.warn_or_error') as warn_or_error_patch: + with mock.patch('dbt.contracts.graph.nodes.warn_or_error') as warn_or_error_patch: assert not search_manifest_using_method(manifest, method, 'new') warn_or_error_patch.assert_not_called() diff --git a/test/unit/test_macro_resolver.py b/test/unit/test_macro_resolver.py index 17e1aca6dca..3e0b7622bce 100644 --- a/test/unit/test_macro_resolver.py +++ b/test/unit/test_macro_resolver.py @@ -1,15 +1,15 @@ import unittest from unittest import mock -from dbt.contracts.graph.parsed import ( - ParsedMacro +from dbt.contracts.graph.nodes import ( + Macro ) from dbt.context.macro_resolver import MacroResolver def mock_macro(name, package_name): macro = mock.MagicMock( - __class__=ParsedMacro, + __class__=Macro, package_name=package_name, resource_type='macro', unique_id=f'macro.{package_name}.{name}', diff --git a/test/unit/test_manifest.py b/test/unit/test_manifest.py index 3e18c555b9c..694e10ec4c0 100644 --- a/test/unit/test_manifest.py +++ b/test/unit/test_manifest.py @@ -15,14 +15,14 @@ from dbt.adapters.base.plugin import AdapterPlugin from dbt.contracts.files import FileHash from dbt.contracts.graph.manifest import Manifest, ManifestMetadata -from dbt.contracts.graph.parsed import ( - ParsedModelNode, +from dbt.contracts.graph.nodes import ( + ModelNode, DependsOn, NodeConfig, - ParsedSeedNode, - ParsedSourceDefinition, - ParsedExposure, - ParsedMetric + SeedNode, + SourceDefinition, + Exposure, + Metric ) from dbt.contracts.graph.unparsed import ( @@ -33,7 +33,6 @@ MetricTime ) -from dbt.contracts.graph.compiled import CompiledModelNode from dbt.events.functions import reset_metadata_vars from dbt.node_types import NodeType @@ -81,7 +80,7 @@ def setUp(self): }) self.exposures = { - 'exposure.root.my_exposure': ParsedExposure( + 'exposure.root.my_exposure': Exposure( name='my_exposure', type=ExposureType.Dashboard, owner=ExposureOwner(email='some@email.com'), @@ -101,7 +100,7 @@ def setUp(self): } self.metrics = { - 'metric.root.my_metric': ParsedMetric( + 'metric.root.my_metric': Metric( name='new_customers', label='New Customers', model='ref("multi")', @@ -133,7 +132,7 @@ def setUp(self): } self.nested_nodes = { - 'model.snowplow.events': ParsedModelNode( + 'model.snowplow.events': ModelNode( name='events', database='dbt', schema='analytics', @@ -155,7 +154,7 @@ def setUp(self): raw_code='does not matter', checksum=FileHash.empty(), ), - 'model.root.events': ParsedModelNode( + 'model.root.events': ModelNode( name='events', database='dbt', schema='analytics', @@ -177,7 +176,7 @@ def setUp(self): raw_code='does not matter', checksum=FileHash.empty(), ), - 'model.root.dep': ParsedModelNode( + 'model.root.dep': ModelNode( name='dep', database='dbt', schema='analytics', @@ -199,7 +198,7 @@ def setUp(self): raw_code='does not matter', checksum=FileHash.empty(), ), - 'model.root.nested': ParsedModelNode( + 'model.root.nested': ModelNode( name='nested', database='dbt', schema='analytics', @@ -221,7 +220,7 @@ def setUp(self): raw_code='does not matter', checksum=FileHash.empty(), ), - 'model.root.sibling': ParsedModelNode( + 'model.root.sibling': ModelNode( name='sibling', database='dbt', schema='analytics', @@ -243,7 +242,7 @@ def setUp(self): raw_code='does not matter', checksum=FileHash.empty(), ), - 'model.root.multi': ParsedModelNode( + 'model.root.multi': ModelNode( name='multi', database='dbt', schema='analytics', @@ -268,7 +267,7 @@ def setUp(self): } self.sources = { - 'source.root.my_source.my_table': ParsedSourceDefinition( + 'source.root.my_source.my_table': SourceDefinition( database='raw', schema='analytics', resource_type=NodeType.Source, @@ -493,7 +492,7 @@ def test_get_resource_fqns_empty(self): def test_get_resource_fqns(self): nodes = copy.copy(self.nested_nodes) - nodes['seed.root.seed'] = ParsedSeedNode( + nodes['seed.root.seed'] = SeedNode( name='seed', database='dbt', schema='analytics', @@ -542,7 +541,7 @@ def test_get_resource_fqns(self): self.assertEqual(resource_fqns, expect) def test__deepcopy_copies_flat_graph(self): - test_node = ParsedModelNode( + test_node = ModelNode( name='events', database='dbt', schema='analytics', @@ -586,7 +585,7 @@ def setUp(self): }) self.nested_nodes = { - 'model.snowplow.events': CompiledModelNode( + 'model.snowplow.events': ModelNode( name='events', database='dbt', schema='analytics', @@ -612,7 +611,7 @@ def setUp(self): extra_ctes=[], checksum=FileHash.empty(), ), - 'model.root.events': CompiledModelNode( + 'model.root.events': ModelNode( name='events', database='dbt', schema='analytics', @@ -638,7 +637,7 @@ def setUp(self): extra_ctes=[], checksum=FileHash.empty(), ), - 'model.root.dep': ParsedModelNode( + 'model.root.dep': ModelNode( name='dep', database='dbt', schema='analytics', @@ -659,7 +658,7 @@ def setUp(self): raw_code='does not matter', checksum=FileHash.empty(), ), - 'model.root.nested': ParsedModelNode( + 'model.root.nested': ModelNode( name='nested', database='dbt', schema='analytics', @@ -680,7 +679,7 @@ def setUp(self): raw_code='does not matter', checksum=FileHash.empty(), ), - 'model.root.sibling': ParsedModelNode( + 'model.root.sibling': ModelNode( name='sibling', database='dbt', schema='analytics', @@ -701,7 +700,7 @@ def setUp(self): raw_code='does not matter', checksum=FileHash.empty(), ), - 'model.root.multi': ParsedModelNode( + 'model.root.multi': ModelNode( name='multi', database='dbt', schema='analytics', diff --git a/test/unit/test_parser.py b/test/unit/test_parser.py index 1ae9e3917ed..45e165b9abf 100644 --- a/test/unit/test_parser.py +++ b/test/unit/test_parser.py @@ -28,10 +28,10 @@ from dbt.contracts.graph.model_config import ( NodeConfig, TestConfig, SnapshotConfig ) -from dbt.contracts.graph.parsed import ( - ParsedModelNode, ParsedMacro, ParsedNodePatch, DependsOn, ColumnInfo, - ParsedSingularTestNode, ParsedGenericTestNode, ParsedSnapshotNode, - ParsedAnalysisNode, UnpatchedSourceDefinition +from dbt.contracts.graph.nodes import ( + ModelNode, Macro, DependsOn, ColumnInfo, + SingularTestNode, GenericTestNode, SnapshotNode, + AnalysisNode, UnpatchedSourceDefinition ) from dbt.contracts.graph.unparsed import Docs from dbt.parser.models import ( @@ -60,7 +60,7 @@ def _generate_macros(self): name_sql[name] = sql for name, sql in name_sql.items(): - pm = ParsedMacro( + pm = Macro( name=name, resource_type=NodeType.Macro, unique_id=f'macro.root.{name}', @@ -510,7 +510,7 @@ def test_basic(self): self.parser.parse_file(block) self.assert_has_manifest_lengths(self.parser.manifest, nodes=1) node = list(self.parser.manifest.nodes.values())[0] - expected = ParsedModelNode( + expected = ModelNode( alias='model_1', name='model_1', database='test', @@ -568,7 +568,7 @@ def model(dbt, session): node = list(self.parser.manifest.nodes.values())[0] # we decided to not detect and auto supply for now since import name doesn't always match library name python_packages = ['sklearn==0.1.0'] - expected = ParsedModelNode( + expected = ModelNode( alias='py_model', name='py_model', database='test', @@ -756,7 +756,7 @@ def file_block_for(self, data, filename): # parser does not run in this case. That test is in integration test suite 072 def test_built_in_macro_override_detection(self): macro_unique_id = 'macro.root.ref' - self.parser.manifest.macros[macro_unique_id] = ParsedMacro( + self.parser.manifest.macros[macro_unique_id] = Macro( name='ref', resource_type=NodeType.Macro, unique_id=macro_unique_id, @@ -768,7 +768,7 @@ def test_built_in_macro_override_detection(self): raw_code = '{{ config(materialized="table") }}select 1 as id' block = self.file_block_for(raw_code, 'nested/model_1.sql') - node = ParsedModelNode( + node = ModelNode( alias='model_1', name='model_1', database='test', @@ -803,7 +803,7 @@ def setUp(self): manifest=self.manifest, root_project=self.root_project_config, ) - self.example_node = ParsedModelNode( + self.example_node = ModelNode( alias='model_1', name='model_1', database='test', @@ -982,7 +982,7 @@ def test_single_block(self): self.parser.parse_file(block) self.assert_has_manifest_lengths(self.parser.manifest, nodes=1) node = list(self.parser.manifest.nodes.values())[0] - expected = ParsedSnapshotNode( + expected = SnapshotNode( alias='foo', name='foo', # the `database` entry is overrridden by the target_database config @@ -1051,7 +1051,7 @@ def test_multi_block(self): self.parser.parse_file(block) self.assert_has_manifest_lengths(self.parser.manifest, nodes=2) nodes = sorted(self.parser.manifest.nodes.values(), key=lambda n: n.name) - expect_foo = ParsedSnapshotNode( + expect_foo = SnapshotNode( alias='foo', name='foo', database='dbt', @@ -1088,7 +1088,7 @@ def test_multi_block(self): 'updated_at': 'last_update', }, ) - expect_bar = ParsedSnapshotNode( + expect_bar = SnapshotNode( alias='bar', name='bar', database='dbt', @@ -1151,7 +1151,7 @@ def test_single_block(self): self.parser.parse_file(block) self.assertEqual(len(self.parser.manifest.macros), 1) macro = list(self.parser.manifest.macros.values())[0] - expected = ParsedMacro( + expected = Macro( name='foo', resource_type=NodeType.Macro, unique_id='macro.snowplow.foo', @@ -1173,7 +1173,7 @@ def test_multiple_blocks(self): self.parser.parse_file(block) self.assertEqual(len(self.parser.manifest.macros), 2) macros = sorted(self.parser.manifest.macros.values(), key=lambda m: m.name) - expected_bar = ParsedMacro( + expected_bar = Macro( name='bar', resource_type=NodeType.Macro, unique_id='macro.snowplow.bar', @@ -1182,7 +1182,7 @@ def test_multiple_blocks(self): path=normalize('macros/macro.sql'), macro_sql='{% macro bar(c, d) %}c + d{% endmacro %}', ) - expected_foo = ParsedMacro( + expected_foo = Macro( name='foo', resource_type=NodeType.Macro, unique_id='macro.snowplow.foo', @@ -1220,7 +1220,7 @@ def test_basic(self): self.parser.parse_file(block) self.assert_has_manifest_lengths(self.parser.manifest, nodes=1) node = list(self.parser.manifest.nodes.values())[0] - expected = ParsedSingularTestNode( + expected = SingularTestNode( alias='test_1', name='test_1', database='test', @@ -1263,7 +1263,7 @@ def test_basic(self): self.parser.manifest.files[block.file.file_id] = block.file self.parser.parse_file(block) node = list(self.parser.manifest.macros.values())[0] - expected = ParsedMacro( + expected = Macro( name='test_not_null', resource_type=NodeType.Macro, unique_id='macro.snowplow.test_not_null', @@ -1297,7 +1297,7 @@ def test_basic(self): self.parser.parse_file(block) self.assert_has_manifest_lengths(self.parser.manifest, nodes=1) node = list(self.parser.manifest.nodes.values())[0] - expected = ParsedAnalysisNode( + expected = AnalysisNode( alias='analysis_1', name='analysis_1', database='test', diff --git a/test/unit/test_partial_parsing.py b/test/unit/test_partial_parsing.py index a784532fcf4..34e85b0cef0 100644 --- a/test/unit/test_partial_parsing.py +++ b/test/unit/test_partial_parsing.py @@ -5,7 +5,7 @@ import dbt.exceptions from dbt.parser.partial import PartialParsing from dbt.contracts.graph.manifest import Manifest -from dbt.contracts.graph.parsed import ParsedModelNode +from dbt.contracts.graph.nodes import ModelNode from dbt.contracts.files import ParseFileType, SourceFile, SchemaSourceFile, FilePath, FileHash from dbt.node_types import NodeType from .utils import normalize @@ -88,7 +88,7 @@ def setUp(self): self.partial_parsing = PartialParsing(self.saved_manifest, self.new_files) def get_model(self, name): - return ParsedModelNode( + return ModelNode( package_name='my_test', path=f'{name}.sql', original_file_path=f'models/{name}.sql', @@ -106,7 +106,7 @@ def get_model(self, name): ) def get_python_model(self, name): - return ParsedModelNode( + return ModelNode( package_name='my_test', path=f'{name}.py', original_file_path=f'models/{name}.py', diff --git a/test/unit/utils.py b/test/unit/utils.py index 5df2ef6ac8c..046ac24ff41 100644 --- a/test/unit/utils.py +++ b/test/unit/utils.py @@ -227,7 +227,7 @@ def assert_fails_validation(dct, cls): def generate_name_macros(package): - from dbt.contracts.graph.parsed import ParsedMacro + from dbt.contracts.graph.nodes import Macro from dbt.node_types import NodeType name_sql = {} for component in ('database', 'schema', 'alias'): @@ -240,7 +240,7 @@ def generate_name_macros(package): name_sql[name] = sql for name, sql in name_sql.items(): - pm = ParsedMacro( + pm = Macro( name=name, resource_type=NodeType.Macro, unique_id=f'macro.{package}.{name}', @@ -275,7 +275,7 @@ def _make_table_of(self, rows, column_types): def MockMacro(package, name='my_macro', **kwargs): - from dbt.contracts.graph.parsed import ParsedMacro + from dbt.contracts.graph.nodes import Macro from dbt.node_types import NodeType mock_kwargs = dict( @@ -288,7 +288,7 @@ def MockMacro(package, name='my_macro', **kwargs): mock_kwargs.update(kwargs) macro = mock.MagicMock( - spec=ParsedMacro, + spec=Macro, **mock_kwargs ) macro.name = name @@ -309,9 +309,9 @@ def MockGenerateMacro(package, component='some_component', **kwargs): def MockSource(package, source_name, name, **kwargs): from dbt.node_types import NodeType - from dbt.contracts.graph.parsed import ParsedSourceDefinition + from dbt.contracts.graph.nodes import SourceDefinition src = mock.MagicMock( - __class__=ParsedSourceDefinition, + __class__=SourceDefinition, resource_type=NodeType.Source, source_name=source_name, package_name=package, @@ -325,13 +325,13 @@ def MockSource(package, source_name, name, **kwargs): def MockNode(package, name, resource_type=None, **kwargs): from dbt.node_types import NodeType - from dbt.contracts.graph.parsed import ParsedModelNode, ParsedSeedNode + from dbt.contracts.graph.nodes import ModelNode, SeedNode if resource_type is None: resource_type = NodeType.Model if resource_type == NodeType.Model: - cls = ParsedModelNode + cls = ModelNode elif resource_type == NodeType.Seed: - cls = ParsedSeedNode + cls = SeedNode else: raise ValueError(f'I do not know how to handle {resource_type}') node = mock.MagicMock( @@ -348,9 +348,9 @@ def MockNode(package, name, resource_type=None, **kwargs): def MockDocumentation(package, name, **kwargs): from dbt.node_types import NodeType - from dbt.contracts.graph.parsed import ParsedDocumentation + from dbt.contracts.graph.nodes import Documentation doc = mock.MagicMock( - __class__=ParsedDocumentation, + __class__=Documentation, resource_type=NodeType.Documentation, package_name=package, search_name=name, diff --git a/tests/functional/artifacts/expected_manifest.py b/tests/functional/artifacts/expected_manifest.py index 482e6f8672c..2656c84e249 100644 --- a/tests/functional/artifacts/expected_manifest.py +++ b/tests/functional/artifacts/expected_manifest.py @@ -94,7 +94,9 @@ def get_rendered_snapshot_config(**updates): "strategy": "check", "check_cols": "all", "unique_key": "id", + "target_database": None, "target_schema": None, + "updated_at": None, "meta": {}, "grants": {}, "packages": [], diff --git a/tests/functional/exit_codes/test_exit_codes.py b/tests/functional/exit_codes/test_exit_codes.py index dbef6361713..955953a0dc0 100644 --- a/tests/functional/exit_codes/test_exit_codes.py +++ b/tests/functional/exit_codes/test_exit_codes.py @@ -49,6 +49,7 @@ def test_compile(self, project): assert len(results) == 7 def test_snapshot_pass(self, project): + run_dbt(["run", "--model", "good"]) results = run_dbt(['snapshot']) assert len(results) == 1 check_table_does_exist(project.adapter, 'good_snapshot') diff --git a/tests/unit/test_events.py b/tests/unit/test_events.py index f41c9b49033..b05955a1720 100644 --- a/tests/unit/test_events.py +++ b/tests/unit/test_events.py @@ -19,7 +19,7 @@ import dbt.flags as flags import inspect import json -from dbt.contracts.graph.parsed import ParsedModelNode, NodeConfig, DependsOn +from dbt.contracts.graph.nodes import ModelNode, NodeConfig, DependsOn from dbt.contracts.files import FileHash from mashumaro.types import SerializableType from typing import Generic, TypeVar, Dict @@ -125,7 +125,7 @@ def test_buffer_FIFOs(self): def MockNode(): - return ParsedModelNode( + return ModelNode( alias="model_one", name="model_one", database="dbt",