diff --git a/postreise/analyze/check.py b/postreise/analyze/check.py index 0463fc87..a29ec157 100644 --- a/postreise/analyze/check.py +++ b/postreise/analyze/check.py @@ -3,11 +3,7 @@ import numpy as np import pandas as pd from powersimdata.input.grid import Grid -from powersimdata.network.usa_tamu.constants import zones -from powersimdata.network.usa_tamu.constants.plants import ( - all_resources, - renewable_resources, -) +from powersimdata.network.model import ModelImmutables from powersimdata.scenario.analyze import Analyze from powersimdata.scenario.scenario import Scenario @@ -71,17 +67,19 @@ def _check_scenario_is_in_analyze_state(scenario): raise ValueError("scenario must in analyze state") -def _check_areas_and_format(areas): +def _check_areas_and_format(areas, grid_model="usa_tamu"): """Ensure that areas are valid. Duplicates are removed and state abbreviations are converted to their actual name. :param str/list/tuple/set areas: areas(s) to check. Could be load zone name(s), state name(s)/abbreviation(s) or interconnect(s). + :param str grid_model: grid model. :raises TypeError: if areas is not a list/tuple/set of str. :raises ValueError: if areas is empty or not valid. :return: (*set*) -- areas as a set. State abbreviations are converted to state names. """ + mi = ModelImmutables(grid_model) if isinstance(areas, str): areas = {areas} elif isinstance(areas, (list, set, tuple)): @@ -92,27 +90,34 @@ def _check_areas_and_format(areas): raise TypeError("areas must be a str or a list/tuple/set of str") if len(areas) == 0: raise ValueError("areas must be non-empty") - all_areas = zones.loadzone | zones.abv | zones.state | zones.interconnect + all_areas = ( + mi.zones["loadzone"] + | mi.zones["abv"] + | mi.zones["state"] + | mi.zones["interconnect"] + ) if not areas <= all_areas: diff = areas - all_areas raise ValueError("invalid area(s): %s" % " | ".join(diff)) - abv_in_areas = [z for z in areas if z in zones.abv] + abv_in_areas = [z for z in areas if z in mi.zones["abv"]] for a in abv_in_areas: areas.remove(a) - areas.add(zones.abv2state[a]) + areas.add(mi.zones["abv2state"][a]) return areas -def _check_resources_and_format(resources): +def _check_resources_and_format(resources, grid_model="usa_tamu"): """Ensure that resources are valid and convert variable to a set. :param str/list/tuple/set resources: resource(s) to check. + :param str grid_model: grid model. :raises TypeError: if resources is not a list/tuple/set of str. :raises ValueError: if resources is empty or not valid. :return: (*set*) -- resources as a set. """ + mi = ModelImmutables(grid_model) if isinstance(resources, str): resources = {resources} elif isinstance(resources, (list, set, tuple)): @@ -123,23 +128,25 @@ def _check_resources_and_format(resources): raise TypeError("resources must be a str or a list/tuple/set of str") if len(resources) == 0: raise ValueError("resources must be non-empty") - if not resources <= all_resources: - diff = resources - all_resources + if not resources <= mi.plants["all_resources"]: + diff = resources - mi.plants["all_resources"] raise ValueError("invalid resource(s): %s" % " | ".join(diff)) return resources -def _check_resources_are_renewable_and_format(resources): +def _check_resources_are_renewable_and_format(resources, grid_model="usa_tamu"): """Ensure that resources are valid renewable resources and convert variable to a set. :param str/list/tuple/set resources: resource(s) to analyze. + :param str grid_model: grid model. :raises ValueError: if resources are not renewables. return: (*set*) -- resources as a set """ - resources = _check_resources_and_format(resources) - if not resources <= renewable_resources: - diff = resources - all_resources + mi = ModelImmutables(grid_model) + resources = _check_resources_and_format(resources, grid_model=grid_model) + if not resources <= mi.plants["renewable_resources"]: + diff = resources - mi.plants["all_resources"] raise ValueError("invalid renewable resource(s): %s" % " | ".join(diff)) return resources @@ -161,6 +168,7 @@ def _check_areas_are_in_grid_and_format(areas, grid): if not isinstance(areas, dict): raise TypeError("areas must be a dict") + mi = grid.model_immutables areas_formatted = {} for a in areas.keys(): if a in ["loadzone", "state", "interconnect"]: @@ -174,7 +182,7 @@ def _check_areas_are_in_grid_and_format(areas, grid): interconnects = _check_areas_and_format(v) for i in interconnects: try: - all_loadzones.update(zones.interconnect2loadzone[i]) + all_loadzones.update(mi.zones["interconnect2loadzone"][i]) except KeyError: raise ValueError("invalid interconnect: %s" % i) areas_formatted["interconnect"].update(interconnects) @@ -182,14 +190,14 @@ def _check_areas_are_in_grid_and_format(areas, grid): states = _check_areas_and_format(v) for s in states: try: - all_loadzones.update(zones.state2loadzone[s]) + all_loadzones.update(mi.zones["state2loadzone"][s]) except KeyError: raise ValueError("invalid state: %s" % s) areas_formatted["state"].update(states) elif k == "loadzone": loadzones = _check_areas_and_format(v) for l in loadzones: - if l not in zones.loadzone: + if l not in mi.zones["loadzone"]: raise ValueError("invalid load zone: %s" % l) all_loadzones.update(loadzones) areas_formatted["loadzone"].update(loadzones) diff --git a/postreise/analyze/demand.py b/postreise/analyze/demand.py index 8b1459bd..3613cf2d 100644 --- a/postreise/analyze/demand.py +++ b/postreise/analyze/demand.py @@ -1,4 +1,4 @@ -from powersimdata.network.usa_tamu.usa_tamu_model import area_to_loadzone +from powersimdata.network.model import area_to_loadzone from postreise.analyze.generation.summarize import ( get_generation_time_series_by_resources, @@ -17,7 +17,9 @@ def get_demand_time_series(scenario, area, area_type=None): column: demand values """ grid = scenario.state.get_grid() - loadzone_set = area_to_loadzone(grid, area, area_type=area_type) + loadzone_set = area_to_loadzone( + scenario.info["grid_model"], area, area_type=area_type + ) loadzone_id_set = {grid.zone2id[lz] for lz in loadzone_set if lz in grid.zone2id} return scenario.state.get_demand()[loadzone_id_set].sum(axis=1) diff --git a/postreise/analyze/generation/curtailment.py b/postreise/analyze/generation/curtailment.py index a7cb011d..40e93cad 100644 --- a/postreise/analyze/generation/curtailment.py +++ b/postreise/analyze/generation/curtailment.py @@ -1,5 +1,4 @@ import pandas as pd -from powersimdata.network.usa_tamu.constants.plants import renewable_resources from postreise.analyze.check import ( _check_areas_are_in_grid_and_format, @@ -32,7 +31,10 @@ def calculate_curtailment_time_series(scenario): pg = scenario.state.get_pg() plant_id = get_plant_id_for_resources( - renewable_resources.intersection(set(grid.plant.type)), grid + grid.model_immutables.plants["renewable_resources"].intersection( + set(grid.plant.type) + ), + grid, ) profiles = pd.concat( [scenario.state.get_solar(), scenario.state.get_wind()], axis=1 @@ -55,9 +57,13 @@ def calculate_curtailment_time_series_by_resources(scenario, resources=None): grid = scenario.state.get_grid() if resources is None: - resources = renewable_resources.intersection(set(grid.plant.type)) + resources = grid.model_immutables.plants["renewable_resources"].intersection( + set(grid.plant.type) + ) else: - resources = _check_resources_are_renewable_and_format(resources) + resources = _check_resources_are_renewable_and_format( + resources, grid_model=scenario.info["grid_model"] + ) curtailment_by_resources = decompose_plant_data_frame_into_resources( curtailment, resources, grid @@ -144,9 +150,13 @@ def calculate_curtailment_time_series_by_areas_and_resources( ) if resources is None: - resources = renewable_resources.intersection(set(grid.plant.type)) + resources = grid.model_immutables.plants["renewable_resources"].intersection( + set(grid.plant.type) + ) else: - resources = _check_resources_are_renewable_and_format(resources) + resources = _check_resources_are_renewable_and_format( + resources, grid_model=scenario.info["grid_model"] + ) curtailment_by_areas_and_resources = ( decompose_plant_data_frame_into_areas_and_resources( @@ -181,9 +191,13 @@ def calculate_curtailment_time_series_by_resources_and_areas( ) if resources is None: - resources = renewable_resources.intersection(set(grid.plant.type)) + resources = grid.model_immutables.plants["renewable_resources"].intersection( + set(grid.plant.type) + ) else: - resources = _check_resources_are_renewable_and_format(resources) + resources = _check_resources_are_renewable_and_format( + resources, grid_model=scenario.info["grid_model"] + ) curtailment_by_resources_and_areas = ( decompose_plant_data_frame_into_resources_and_areas( diff --git a/postreise/analyze/generation/emissions.py b/postreise/analyze/generation/emissions.py index 31685b16..8d2f08fa 100644 --- a/postreise/analyze/generation/emissions.py +++ b/postreise/analyze/generation/emissions.py @@ -1,16 +1,11 @@ import numpy as np import pandas as pd from numpy.polynomial.polynomial import polyval -from powersimdata.network.usa_tamu.constants.plants import ( - carbon_per_mmbtu, - carbon_per_mwh, - carbon_resources, - nox_per_mwh, - so2_per_mwh, -) +from powersimdata.network.model import ModelImmutables from postreise.analyze.check import ( _check_gencost, + _check_grid, _check_scenario_is_in_analyze_state, _check_time_series, ) @@ -29,16 +24,16 @@ def generate_emissions_stats(scenario, pollutant="carbon", method="simple"): :return: (*pandas.DataFrame*) -- emissions data frame. """ _check_scenario_is_in_analyze_state(scenario) - + mi = ModelImmutables(scenario.info["grid_model"]) allowed_methods = { "carbon": {"simple", "always-on", "decommit"}, "nox": {"simple"}, "so2": {"simple"}, } emissions_per_mwh = { - "carbon": carbon_per_mwh, - "nox": nox_per_mwh, - "so2": so2_per_mwh, + "carbon": mi.plants["carbon_per_mwh"], + "nox": mi.plants["nox_per_mwh"], + "so2": mi.plants["so2_per_mwh"], } if pollutant not in allowed_methods.keys(): @@ -63,7 +58,7 @@ def generate_emissions_stats(scenario, pollutant="carbon", method="simple"): costs = calc_costs(pg, grid.gencost["before"], decommit=decommit) heat = np.zeros_like(costs) - for fuel, val in carbon_per_mmbtu.items(): + for fuel, val in mi.plants["carbon_per_mmbtu"].items(): indices = (grid.plant["type"] == fuel).to_numpy() heat[:, indices] = ( costs[:, indices] / grid.plant["GenFuelCost"].values[indices] @@ -75,34 +70,40 @@ def generate_emissions_stats(scenario, pollutant="carbon", method="simple"): return emissions -def summarize_emissions_by_bus(emissions, plant): +def summarize_emissions_by_bus(emissions, grid): """Summarize time series emissions dataframe by type and bus. - :param pandas.DataFrame emissions: Hourly emissions by generator. - :param pandas.DataFrame plant: Generator specification table. - :return: (*dict*) -- Annual emissions by fuel and bus. + :param pandas.DataFrame emissions: hourly emissions by generator. + :param powersimdata.input.grid.Grid grid: grid object. + :return: (*dict*) -- annual emissions by fuel and bus. """ _check_time_series(emissions, "emissions") if (emissions < -1e-3).any(axis=None): raise ValueError("emissions must be non-negative") + _check_grid(grid) + plant = grid.plant + # sum by generator plant_totals = emissions.sum() # set up output data structure plant_buses = plant["bus_id"].unique() - bus_totals_by_type = {f: {b: 0 for b in plant_buses} for f in carbon_resources} + bus_totals_by_type = { + f: {b: 0 for b in plant_buses} + for f in grid.model_immutables.plants["carbon_resources"] + } # sum by fuel by bus for p in plant_totals.index: plant_type = plant.loc[p, "type"] - if plant_type not in carbon_resources: + if plant_type not in grid.model_immutables.plants["carbon_resources"]: continue plant_bus = plant.loc[p, "bus_id"] bus_totals_by_type[plant_type][plant_bus] += plant_totals.loc[p] # filter out buses whose emissions are zero bus_totals_by_type = { r: {b: v for b, v in bus_totals_by_type[r].items() if v > 0} - for r in carbon_resources + for r in grid.model_immutables.plants["carbon_resources"] } return bus_totals_by_type diff --git a/postreise/analyze/generation/summarize.py b/postreise/analyze/generation/summarize.py index 1394d138..2b005dc0 100644 --- a/postreise/analyze/generation/summarize.py +++ b/postreise/analyze/generation/summarize.py @@ -1,14 +1,6 @@ import numpy as np import pandas as pd -from powersimdata.network.usa_tamu.constants.plants import type2label -from powersimdata.network.usa_tamu.constants.zones import ( - abv2state, - interconnect2abv, - interconnect2loadzone, - loadzone2interconnect, - loadzone2state, - state2loadzone, -) +from powersimdata.network.model import ModelImmutables from powersimdata.scenario.scenario import Scenario from postreise.analyze.check import ( @@ -60,12 +52,17 @@ def sum_generation_by_state(scenario: Scenario) -> pd.DataFrame: """ # Start with energy by type & zone name energy_by_type_zoneid = sum_generation_by_type_zone(scenario) - zoneid2zonename = scenario.state.get_grid().id2zone + grid = scenario.state.get_grid() + zoneid2zonename = grid.id2zone energy_by_type_zonename = energy_by_type_zoneid.rename(zoneid2zonename, axis=1) # Build lists to use for groupbys zone_list = energy_by_type_zonename.columns - zone_states = [loadzone2state[zone] for zone in zone_list] - zone_interconnects = [loadzone2interconnect[zone] for zone in zone_list] + zone_states = [ + grid.model_immutables.zones["loadzone2state"][zone] for zone in zone_list + ] + zone_interconnects = [ + grid.model_immutables.zones["loadzone2interconnect"][zone] for zone in zone_list + ] # Run groupbys to aggregate by larger regions energy_by_type_state = energy_by_type_zonename.groupby(zone_states, axis=1).sum() energy_by_type_interconnect = energy_by_type_zonename.groupby( @@ -85,51 +82,56 @@ def sum_generation_by_state(scenario: Scenario) -> pd.DataFrame: return energy_by_type_state -def _groupby_state(index: str) -> str: - """Use state as a dict key if index is a smaller region (e.g. Texas East), - otherwise use the given index. - - :param str index: either a state name or region within a state. - :return: (*str*) -- the corresponding state name. - """ - interconnect_spanning_states = ("Texas", "New Mexico", "Montana") - for state in interconnect_spanning_states: - if index in state2loadzone[state]: - return state - return index - - -def summarize_hist_gen(hist_gen_raw: pd.DataFrame, all_resources: list) -> pd.DataFrame: +def summarize_hist_gen( + hist_gen_raw: pd.DataFrame, all_resources: list, grid_model="usa_tamu" +) -> pd.DataFrame: """Sum generation by state for the given resources from a scenario, adding totals for interconnects and for all states. :param pandas.DataFrame hist_gen_raw: historical generation data frame. Columns are resources and indices are either state or load zone. :param list all_resources: list of resources. + :param str grid_model: grid_model :return: (*pandas.DataFrame*) -- historical generation per resource. """ _check_data_frame(hist_gen_raw, "PG") - filtered_colnames = _check_resources_and_format(all_resources) + filtered_colnames = _check_resources_and_format( + all_resources, grid_model=grid_model + ) + mi = ModelImmutables(grid_model) result = hist_gen_raw.copy() # Interconnection eastern_areas = ( - set([abv2state[s] for s in interconnect2abv["Eastern"]]) - | interconnect2loadzone["Eastern"] + set([mi.zones["abv2state"][s] for s in mi.zones["interconnect2abv"]["Eastern"]]) + | mi.zones["interconnect2loadzone"]["Eastern"] ) eastern = result.loc[result.index.isin(eastern_areas)].sum() - ercot_areas = interconnect2loadzone["Texas"] + ercot_areas = mi.zones["interconnect2loadzone"]["Texas"] ercot = result.loc[result.index.isin(ercot_areas)].sum() western_areas = ( - set([abv2state[s] for s in interconnect2abv["Western"]]) - | interconnect2loadzone["Western"] + set([mi.zones["abv2state"][s] for s in mi.zones["interconnect2abv"]["Western"]]) + | mi.zones["interconnect2loadzone"]["Western"] ) western = result.loc[result.index.isin(western_areas)].sum() # State + def _groupby_state(index: str) -> str: + """Use state as a dict key if index is a smaller region (e.g. Texas East), + otherwise use the given index. + + :param str index: either a state name or region within a state. + :return: (*str*) -- the corresponding state name. + """ + return ( + mi.zones["loadzone2state"][index] + if index in mi.zones["loadzone2state"] + else index + ) + result = result.groupby(by=_groupby_state).aggregate(np.sum) # Summary @@ -141,7 +143,7 @@ def summarize_hist_gen(hist_gen_raw: pd.DataFrame, all_resources: list) -> pd.Da result.loc["All"] = all result = result.loc[:, filtered_colnames] - result.rename(columns=type2label, inplace=True) + result.rename(columns=mi.plants["type2label"], inplace=True) return result diff --git a/postreise/analyze/generation/tests/test_capacity_value.py b/postreise/analyze/generation/tests/test_capacity_value.py index a155e32e..5f7003e1 100644 --- a/postreise/analyze/generation/tests/test_capacity_value.py +++ b/postreise/analyze/generation/tests/test_capacity_value.py @@ -16,13 +16,13 @@ "plant_id": [101, 102, 103], "type": ["solar", "wind", "wind"], "Pmax": [9000, 5000, 4000], - "zone_name": ["zone1", "zone1", "zone2"], - "zone_id": [1, 1, 2], + "zone_name": ["Washington", "Washington", "Oregon"], + "zone_id": [201, 201, 202], } mock_bus = { "bus_id": [1, 2, 3, 4], - "zone_id": [1, 1, 2, 2], + "zone_id": [201, 201, 202, 202], } mock_storage = { @@ -32,7 +32,7 @@ mock_demand = pd.DataFrame( { - "zone 1": [ + "201": [ 133335, 133630, 131964, @@ -124,8 +124,8 @@ scenario.info["start_date"] = "2016-01-01 00:00:00" scenario.info["end_date"] = "2016-01-01 10:00:00" scenario.state.grid.zone2id = { - "zone1": 1, - "zone2": 2, + "Washington": 201, + "Oregon": 202, } @@ -228,14 +228,14 @@ def test_failure_too_many_hours(): def test_get_capacity_by_resources(): - arg = [(scenario, "zone2", "wind"), (scenario, "all", "wind")] + arg = [(scenario, "Oregon", "wind"), (scenario, "all", "wind")] expected = [4000, 9000] for a, e in zip(arg, expected): assert get_capacity_by_resources(*a).values == e def test_get_storage_capacity(): - arg = [(scenario, "zone1"), (scenario, "all")] + arg = [(scenario, "Washington"), (scenario, "all")] expected = [20, 30] for a, e in zip(arg, expected): assert get_storage_capacity(*a) == e @@ -243,7 +243,7 @@ def test_get_storage_capacity(): def test_sum_capacity_by_type_zone(): expected_df = pd.DataFrame( - {1: [9000, 5000], 2: [0, 4000]}, + {201: [9000, 5000], 202: [0, 4000]}, index=["solar", "wind"], ) check_dataframe_matches(expected_df, sum_capacity_by_type_zone(scenario)) diff --git a/postreise/analyze/generation/tests/test_curtailment.py b/postreise/analyze/generation/tests/test_curtailment.py index c41e73dc..6a349fe9 100644 --- a/postreise/analyze/generation/tests/test_curtailment.py +++ b/postreise/analyze/generation/tests/test_curtailment.py @@ -18,9 +18,9 @@ "plant_id": ["A", "B", "C", "D"], "bus_id": [1, 2, 3, 4], "lat": [47.6, 47.6, 37.8, 37.8], - "lon": [122.3, 122.3, 122.4, 122.4], + "lon": [-122.3, -122.3, -122.4, -122.4], "type": ["solar", "solar", "wind", "wind_offshore"], - "zone_name": ["zone1", "zone1", "zone2", "zone2"], + "zone_name": ["Washington", "Washington", "Bay Area", "Bay Area"], } mock_pg = pd.DataFrame( @@ -193,9 +193,9 @@ def test_summarize_curtailment_by_bus(self): class TestSummarizeCurtailmentByLocation(unittest.TestCase): def test_summarize_curtailment_by_location(self): expected_return = { - "solar": {(47.6, 122.3): 3.5}, - "wind": {(37.8, 122.4): 0.5}, - "wind_offshore": {(37.8, 122.4): 2.5}, + "solar": {(47.6, -122.3): 3.5}, + "wind": {(37.8, -122.4): 0.5}, + "wind_offshore": {(37.8, -122.4): 2.5}, } location_curtailment = summarize_curtailment_by_location(scenario) self.assertEqual(location_curtailment, expected_return) @@ -203,7 +203,7 @@ def test_summarize_curtailment_by_location(self): class TestGetCurtailmentTimeSeries(unittest.TestCase): def test_get_curtailment_time_series(self): - arg = [(scenario, "zone1"), (scenario, "zone2"), (scenario, "all")] + arg = [(scenario, "Washington"), (scenario, "Bay Area"), (scenario, "all")] expected_return = [ pd.DataFrame( { diff --git a/postreise/analyze/generation/tests/test_emissions.py b/postreise/analyze/generation/tests/test_emissions.py index a2ed2800..3c904d65 100644 --- a/postreise/analyze/generation/tests/test_emissions.py +++ b/postreise/analyze/generation/tests/test_emissions.py @@ -2,6 +2,7 @@ import pandas as pd import pytest from numpy.testing import assert_array_almost_equal +from powersimdata.tests.mock_grid import MockGrid from powersimdata.tests.mock_scenario import MockScenario from postreise.analyze.generation.emissions import ( @@ -196,7 +197,9 @@ def test_emissions_summarization(self, mock_pg, mock_plant): } # calculation - summation = summarize_emissions_by_bus(input_carbon, plant) + summation = summarize_emissions_by_bus( + input_carbon, MockGrid(grid_attrs={"plant": mock_plant}) + ) # checks err_msg = "summarize_emissions_by_bus didn't return a dict" diff --git a/postreise/analyze/generation/tests/test_summarize.py b/postreise/analyze/generation/tests/test_summarize.py index 389580b5..83f0751e 100644 --- a/postreise/analyze/generation/tests/test_summarize.py +++ b/postreise/analyze/generation/tests/test_summarize.py @@ -20,13 +20,13 @@ "plant_id": ["A", "B", "C", "D"], "zone_id": [1, 1, 2, 2], "type": ["solar", "wind", "hydro", "hydro"], - "zone_name": ["zone1", "zone1", "zone2", "zone2"], + "zone_name": ["Washington", "Washington", "Oregon", "Oregon"], } # bus_id is the index mock_bus = { "bus_id": [1, 2, 3, 4], - "zone_id": [1, 1, 2, 2], + "zone_id": [201, 201, 202, 202], } mock_storage = { @@ -54,8 +54,8 @@ grid_attrs = {"plant": mock_plant, "bus": mock_bus, "storage_gen": mock_storage} scenario = MockScenario(grid_attrs, pg=mock_pg, storage_pg=mock_storage_pg) scenario.state.grid.zone2id = { - "zone1": 1, - "zone2": 2, + "Washington": 201, + "Oregon": 202, } @@ -149,7 +149,7 @@ def test_summarize_hist_gen_shape(hist_gen_raw): def test_get_generation_time_series_by_resources(): - arg = [(scenario, "zone1", "wind"), (scenario, "zone2", "hydro")] + arg = [(scenario, "Washington", "wind"), (scenario, "Oregon", "hydro")] expected = [ pd.DataFrame({"wind": mock_pg["B"]}), pd.DataFrame({"hydro": mock_pg[["C", "D"]].sum(axis=1)}), @@ -159,7 +159,7 @@ def test_get_generation_time_series_by_resources(): def test_get_storage_time_series(): - arg = [(scenario, "zone2"), (scenario, "all")] + arg = [(scenario, "Oregon"), (scenario, "all")] expected = [mock_storage_pg[2], mock_storage_pg.sum(axis=1)] for a, e in zip(arg, expected): assert_array_almost_equal(get_storage_time_series(*a), e) diff --git a/postreise/analyze/helpers.py b/postreise/analyze/helpers.py index 5224c8ae..4585f3c6 100644 --- a/postreise/analyze/helpers.py +++ b/postreise/analyze/helpers.py @@ -1,8 +1,7 @@ from collections import defaultdict import pandas as pd -from powersimdata.network.usa_tamu.constants import zones -from powersimdata.network.usa_tamu.usa_tamu_model import area_to_loadzone +from powersimdata.network.model import area_to_loadzone from postreise.analyze.check import ( _check_areas_are_in_grid_and_format, @@ -70,7 +69,10 @@ def get_plant_id_in_interconnects(interconnects, grid): """ areas = _check_areas_are_in_grid_and_format({"interconnect": interconnects}, grid) loadzones = set.union( - *(zones.interconnect2loadzone[i] for i in areas["interconnect"]) + *( + grid.model_immutables.zones["interconnect2loadzone"][i] + for i in areas["interconnect"] + ) ) plant = grid.plant @@ -85,9 +87,10 @@ def get_plant_id_in_states(states, grid): :param powersimdata.input.grid.Grid grid: Grid instance. :return: (*set*) -- list of plant id. """ - areas = _check_areas_are_in_grid_and_format({"state": states}, grid) - loadzones = set.union(*(zones.state2loadzone[i] for i in areas["state"])) + loadzones = set.union( + *(grid.model_immutables.zones["state2loadzone"][i] for i in areas["state"]) + ) plant = grid.plant plant_id = plant[(plant.zone_name.isin(loadzones))].index @@ -296,7 +299,9 @@ def get_plant_id_for_resources_in_area(scenario, area, resources, area_type=None """ resource_set = set([resources]) if isinstance(resources, str) else set(resources) grid = scenario.state.get_grid() - loadzone_set = area_to_loadzone(grid, area, area_type=area_type) + loadzone_set = area_to_loadzone( + scenario.info["grid_model"], area, area_type=area_type + ) plant_id = grid.plant[ (grid.plant["zone_name"].isin(loadzone_set)) & (grid.plant["type"].isin(resource_set)) @@ -316,7 +321,9 @@ def get_storage_id_in_area(scenario, area, area_type=None): :return: (*list*) -- list of storage id """ grid = scenario.state.get_grid() - loadzone_set = area_to_loadzone(grid, area, area_type=area_type) + loadzone_set = area_to_loadzone( + scenario.info["grid_model"], area, area_type=area_type + ) loadzone_id_set = {grid.zone2id[lz] for lz in loadzone_set if lz in grid.zone2id} gen = grid.storage["gen"] diff --git a/postreise/analyze/tests/test_demand.py b/postreise/analyze/tests/test_demand.py index 09f99541..8b8eaf4e 100644 --- a/postreise/analyze/tests/test_demand.py +++ b/postreise/analyze/tests/test_demand.py @@ -6,7 +6,7 @@ mock_plant = { "plant_id": ["1001", "1002", "1003"], - "zone_name": ["B", "B", "C"], + "zone_name": ["Oregon", "Oregon", "Southern California"], "type": ["solar", "wind", "hydro"], } @@ -22,22 +22,22 @@ mock_demand = pd.DataFrame( { - 101: [1, 2, 3, 4], - 102: [4, 3, 2, 1], - 103: [2, 2, 2, 2], + 201: [1, 2, 3, 4], + 202: [4, 3, 2, 1], + 203: [2, 2, 2, 2], } ) scenario = MockScenario(grid_attrs, pg=mock_pg, demand=mock_demand) scenario.state.grid.zone2id = { - "A": 101, - "B": 102, - "C": 103, + "Washington": 201, + "Oregon": 202, + "Northern California": 203, } def test_get_demand_time_series(): - demand = get_demand_time_series(scenario, "A") + demand = get_demand_time_series(scenario, "Washington") expected_results = [1, 2, 3, 4] assert_array_equal(demand.to_numpy(), expected_results) diff --git a/postreise/analyze/tests/test_helpers.py b/postreise/analyze/tests/test_helpers.py index 103a254e..b7ddc029 100644 --- a/postreise/analyze/tests/test_helpers.py +++ b/postreise/analyze/tests/test_helpers.py @@ -28,19 +28,19 @@ "plant_id": ["A", "B", "C", "D"], "bus_id": [1, 1, 2, 3], "lat": [47.6, 47.6, 37.8, 37.8], - "lon": [122.3, 122.3, 122.4, 122.4], + "lon": [-122.3, -122.3, -122.4, -122.4], "type": ["coal", "ng", "coal", "solar"], "Pmin": [0, 50, 0, 0], "Pmax": [0, 300, 0, 50], - "zone_name": ["zone1", "zone1", "zone2", "zone2"], + "zone_name": ["Washington", "Washington", "Bay Area", "Bay Area"], } # bus_id is the index mock_bus = { "bus_id": [1, 2, 3, 4], "lat": [47.6, 37.8, 37.8, 40.7], - "lon": [122.3, 122.4, 122.4, 74], - "zone_id": [101, 102, 102, 103], + "lon": [-122.3, -122.4, -122.4, -74], + "zone_id": [201, 204, 204, 7], } mock_pg = pd.DataFrame( @@ -60,9 +60,9 @@ grid_attrs = {"plant": mock_plant, "bus": mock_bus, "storage_gen": mock_storage} scenario = MockScenario(grid_attrs) scenario.state.grid.zone2id = { - "zone1": 101, - "zone2": 102, - "zone3": 103, + "Washington": 201, + "Bay Area": 204, + "New York City": 7, } @@ -134,8 +134,8 @@ def _check_dataframe_matches(self, loc_data, expected_return): def test_summarize_location(self): expected_return = pd.DataFrame( { - (47.6, 122.3): [2, 4, 7, 12], - (37.8, 122.4): [2, 4, 7, 10], + (47.6, -122.3): [2, 4, 7, 12], + (37.8, -122.4): [2, 4, 7, 10], } ) loc_data = summarize_plant_to_location(mock_pg, self.grid) @@ -362,7 +362,7 @@ def test_get_plant_id_for_resources_in_states(grid): def test_get_plant_id_for_resources_in_area(): - arg = [(scenario, "zone1", "coal"), (scenario, "all", "coal")] + arg = [(scenario, "Washington", "coal"), (scenario, "all", "coal")] expected = [["A"], ["A", "C"]] for a, e in zip(arg, expected): plant_id = get_plant_id_for_resources_in_area(*a) @@ -370,7 +370,7 @@ def test_get_plant_id_for_resources_in_area(): def test_get_storage_id_in_area(): - arg = [(scenario, "zone2"), (scenario, "all")] + arg = [(scenario, "Bay Area"), (scenario, "all")] expected = [[1, 2], [0, 1, 2]] for a, e in zip(arg, expected): storage_id = get_storage_id_in_area(*a) diff --git a/postreise/plot/demo/emissions_map_demo.ipynb b/postreise/plot/demo/emissions_map_demo.ipynb index 7ec2ba8c..5f04bbd0 100644 --- a/postreise/plot/demo/emissions_map_demo.ipynb +++ b/postreise/plot/demo/emissions_map_demo.ipynb @@ -139,7 +139,7 @@ ], "source": [ "base_carbon_by_bus = emissions.summarize_emissions_by_bus(emissions.generate_emissions_stats(base), \n", - " base_grid.plant)" + " base_grid)" ] }, { @@ -157,7 +157,7 @@ ], "source": [ "ambitious_carbon_by_bus = emissions.summarize_emissions_by_bus(emissions.generate_emissions_stats(ambitious), \n", - " ambitious_grid.plant)" + " ambitious_grid)" ] }, { diff --git a/postreise/plot/plot_bar_generation_vs_capacity.py b/postreise/plot/plot_bar_generation_vs_capacity.py index 4c641d34..a6d60e5e 100644 --- a/postreise/plot/plot_bar_generation_vs_capacity.py +++ b/postreise/plot/plot_bar_generation_vs_capacity.py @@ -1,7 +1,6 @@ import matplotlib.pyplot as plt import pandas as pd -from powersimdata.input.grid import Grid -from powersimdata.network.usa_tamu.usa_tamu_model import area_to_loadzone +from powersimdata.network.model import ModelImmutables, area_to_loadzone from powersimdata.scenario.scenario import Scenario from postreise.analyze.generation.capacity_value import sum_capacity_by_type_zone @@ -79,26 +78,30 @@ def plot_bar_generation_vs_capacity( if not isinstance(resource_labels, dict): raise TypeError("ERROR: resource_labels should be a dictionary") - grid = Grid(["USA"]) - id2loadzone = grid.id2zone all_loadzone_data = {} scenario_data = {} for i, sid in enumerate(scenario_ids): scenario = Scenario(sid) + mi = ModelImmutables(scenario.info["grid_model"]) all_loadzone_data[sid] = { "gen": sum_generation_by_type_zone(scenario, time_range, time_zone).rename( - columns=id2loadzone + columns=mi.zones["id2loadzone"] + ), + "cap": sum_capacity_by_type_zone(scenario).rename( + columns=mi.zones["id2loadzone"] ), - "cap": sum_capacity_by_type_zone(scenario).rename(columns=id2loadzone), } scenario_data[sid] = { "name": scenario_names[i] if scenario_names else scenario.info["name"], + "grid_model": mi.model, "gen": {"label": "Generation", "unit": "TWh", "data": {}}, "cap": {"label": "Capacity", "unit": "GW", "data": {}}, } for area, area_type in zip(areas, area_types): - loadzone_set = area_to_loadzone(grid, area, area_type) for sid in scenario_ids: + loadzone_set = area_to_loadzone( + scenario_data[sid]["grid_model"], area, area_type + ) scenario_data[sid]["gen"]["data"][area] = ( all_loadzone_data[sid]["gen"][loadzone_set] .sum(axis=1) diff --git a/postreise/plot/plot_curtailment_ts.py b/postreise/plot/plot_curtailment_ts.py index 1012b28e..2951ed0e 100755 --- a/postreise/plot/plot_curtailment_ts.py +++ b/postreise/plot/plot_curtailment_ts.py @@ -2,7 +2,7 @@ import matplotlib.pyplot as plt import pandas as pd -from powersimdata.network.usa_tamu.constants.plants import type2color, type2label +from powersimdata.network.model import ModelImmutables from postreise.analyze.check import ( _check_resources_and_format, @@ -73,8 +73,13 @@ def plot_curtailment_time_series( directory if None. """ _check_scenario_is_in_analyze_state(scenario) - resources = _check_resources_and_format(resources) + resources = _check_resources_and_format( + resources, grid_model=scenario.info["grid_model"] + ) + mi = ModelImmutables(scenario.info["grid_model"]) + type2color = mi.plants["type2color"] + type2label = mi.plants["type2label"] if t2c: type2color.update(t2c) if t2l: diff --git a/postreise/plot/plot_energy_carbon_stack.py b/postreise/plot/plot_energy_carbon_stack.py index 83dea4b2..6c0c711e 100644 --- a/postreise/plot/plot_energy_carbon_stack.py +++ b/postreise/plot/plot_energy_carbon_stack.py @@ -1,9 +1,9 @@ # This plotting module has a corresponding demo notebook in -# PostREISE/postreise/plot/demo: plot_carbon_energy_carbon_stack.ipynb +# PostREISE/postreise/plot/demo: energy_emissions_stack_bar_demo.ipynb import matplotlib.pyplot as plt import numpy as np -from powersimdata.network.usa_tamu.constants.plants import type2color +from powersimdata.network.model import ModelImmutables from powersimdata.scenario.scenario import Scenario from postreise.analyze.generation.emissions import generate_emissions_stats @@ -29,7 +29,9 @@ def plot_n_scenarios(*args): scenarios = {id: scen for (id, scen) in zip(scenario_numbers, args)} grid = {id: scenario.state.get_grid() for id, scenario in scenarios.items()} plant = {k: v.plant for k, v in grid.items()} - carbon_by_type, energy_by_type = {}, {} + # First scenario is chosen to set fuel colors + type2color = ModelImmutables(args[0].info["grid_model"]).plants["type2color"] + carbon_by_type, energy_by_type, type2color = {}, {} for id, scenario in scenarios.items(): # Calculate raw numbers annual_plant_energy = scenario.state.get_pg().sum() @@ -39,7 +41,7 @@ def plot_n_scenarios(*args): # Drop fuels with zero energy (e.g. all offshore_wind scaled to 0 MW) energy_by_type[id] = raw_energy_by_type[raw_energy_by_type != 0] carbon_by_type[id] = raw_carbon_by_type[raw_energy_by_type != 0] - # carbon multiplier is inverse of carbon intensity, to scale bar heights + # Carbon multiplier is inverse of carbon intensity, to scale bar heights carbon_multiplier = energy_by_type[first_id].sum() / carbon_by_type[first_id].sum() # Determine the fuel types with generation in either scenario diff --git a/postreise/plot/plot_generation_ts_stack.py b/postreise/plot/plot_generation_ts_stack.py index fc6855d9..6c1b46da 100644 --- a/postreise/plot/plot_generation_ts_stack.py +++ b/postreise/plot/plot_generation_ts_stack.py @@ -3,11 +3,7 @@ import matplotlib.patches as mpatches import matplotlib.pyplot as plt import pandas as pd -from powersimdata.network.usa_tamu.constants.plants import ( - type2color, - type2hatchcolor, - type2label, -) +from powersimdata.network.model import ModelImmutables from postreise.analyze.check import _check_scenario_is_in_analyze_state from postreise.analyze.demand import get_demand_time_series, get_net_demand_time_series @@ -90,6 +86,11 @@ def plot_generation_time_series_stack( directory if None. """ _check_scenario_is_in_analyze_state(scenario) + + mi = ModelImmutables(scenario.info["grid_model"]) + type2color = mi.plants["type2color"] + type2label = mi.plants["type2label"] + type2hatchcolor = mi.plants["type2hatchcolor"] if t2c: type2color.update(t2c) if t2l: diff --git a/postreise/plot/plot_interconnection_map.py b/postreise/plot/plot_interconnection_map.py index c0c3be83..cb01fcfb 100644 --- a/postreise/plot/plot_interconnection_map.py +++ b/postreise/plot/plot_interconnection_map.py @@ -3,7 +3,7 @@ from bokeh.models import ColumnDataSource, HoverTool from bokeh.plotting import figure from bokeh.tile_providers import Vendors, get_provider -from powersimdata.network.usa_tamu.constants import zones +from powersimdata.network.model import ModelImmutables from powersimdata.utility import distance from postreise.plot.plot_states import get_state_borders @@ -11,13 +11,13 @@ def count_nodes_per_state(grid): - """ - count nodes per state to add as hover-over info in map_interconnections + """Count nodes per state to add as hover-over info in :func`map_interconnections` - :param powersimdata.input.grid.Grid grid: grid object - :return: -- dataframe containing state names and count of nodes per state + :param powersimdata.input.grid.Grid grid: grid object. + :return: (*pandas.DataFrame*) -- dataframe containing state names and count of nodes per state. """ - grid.bus["state"] = grid.bus["zone_id"].map(zones.id2state) + id2state = ModelImmutables(grid.get_grid_model()).zones["id2abv"] + grid.bus["state"] = grid.bus["zone_id"].map(id2state) liststates = grid.bus["state"].value_counts() state_counts = pd.DataFrame(liststates) state_counts.reset_index(inplace=True) @@ -29,16 +29,17 @@ def count_nodes_per_state(grid): def map_interconnections( grid, state_counts, hover_choice, hvdc_width=1, us_states_dat=None ): - """Maps transmission lines color coded by interconnection - - :param powersimdata.input.grid.Grid grid: grid object - :param pandas.DataFrame state_counts: state names and node counts, created by count_nodes_per_state - :param str hover_choice: "nodes" for state_counts nodes per state, otherwise hvdc - capacity in hover over tool tips for hvdc lines only - :param float hvdc_width: adjust width of HVDC lines on map + """Maps transmission lines color coded by interconnection. + + :param powersimdata.input.grid.Grid grid: grid object. + :param pandas.DataFrame state_counts: state names and node counts, created by + :func:`count_nodes_per_state`. + :param str hover_choice: "nodes" for state_counts nodes per state, otherwise HVDC + capacity in hover over tool tips for hvdc lines only. + :param float hvdc_width: adjust width of HVDC lines on map. :param dict us_states_dat: dictionary of state border lats/lons. If None, get from :func:`postreise.plot.plot_states.get_state_borders`. - :return: -- map of transmission lines + :return: (*bokeh.plotting.figure*) -- map of transmission lines. """ if us_states_dat is None: us_states_dat = get_state_borders() diff --git a/postreise/plot/plot_pie_generation_vs_capacity.py b/postreise/plot/plot_pie_generation_vs_capacity.py index 3acbf36c..387caed9 100644 --- a/postreise/plot/plot_pie_generation_vs_capacity.py +++ b/postreise/plot/plot_pie_generation_vs_capacity.py @@ -1,9 +1,7 @@ import matplotlib.pyplot as plt import numpy as np import pandas as pd -from powersimdata.input.grid import Grid -from powersimdata.network.usa_tamu.constants.plants import type2color, type2label -from powersimdata.network.usa_tamu.usa_tamu_model import area_to_loadzone +from powersimdata.network.model import ModelImmutables, area_to_loadzone from powersimdata.scenario.scenario import Scenario from postreise.analyze.generation.capacity_value import sum_capacity_by_type_zone @@ -100,29 +98,32 @@ def plot_pie_generation_vs_capacity( if not isinstance(resource_colors, dict): raise TypeError("ERROR: resource_colors should be a dictionary") - type2label.update(resource_labels) - type2color.update(resource_colors) - - grid = Grid(["USA"]) - id2loadzone = grid.id2zone all_loadzone_data = {} scenario_data = {} for i, sid in enumerate(scenario_ids): scenario = Scenario(sid) + mi = ModelImmutables(scenario.info["grid_model"]) all_loadzone_data[sid] = { "gen": sum_generation_by_type_zone(scenario, time_range, time_zone).rename( - columns=id2loadzone + columns=mi.zones["id2loadzone"] + ), + "cap": sum_capacity_by_type_zone(scenario).rename( + columns=mi.zones["id2loadzone"] ), - "cap": sum_capacity_by_type_zone(scenario).rename(columns=id2loadzone), } scenario_data[sid] = { "name": scenario_names[i] if scenario_names else scenario.info["name"], + "grid_model": mi.model, + "type2color": {**mi.plants["type2color"], **resource_colors}, + "type2label": {**mi.plants["type2label"], **resource_labels}, "gen": {"label": "Generation", "unit": "TWh", "data": {}}, "cap": {"label": "Capacity", "unit": "GW", "data": {}}, } for area, area_type in zip(areas, area_types): - loadzone_set = area_to_loadzone(grid, area, area_type) for sid in scenario_ids: + loadzone_set = area_to_loadzone( + scenario_data[sid]["grid_model"], area, area_type + ) scenario_data[sid]["gen"]["data"][area] = ( all_loadzone_data[sid]["gen"][loadzone_set] .sum(axis=1) @@ -147,7 +148,7 @@ def plot_pie_generation_vs_capacity( for sd in scenario_data.values(): for side in ["gen", "cap"]: ax_data, labels = _roll_up_small_pie_wedges( - sd[side]["data"][area], min_percentage + sd[side]["data"][area], sd["type2label"], min_percentage ) ax_data_list.append( @@ -155,7 +156,7 @@ def plot_pie_generation_vs_capacity( "title": "{0}\n{1}".format(sd["name"], sd[side]["label"]), "labels": labels, "values": list(ax_data.values()), - "colors": [type2color[r] for r in ax_data.keys()], + "colors": [sd["type2color"][r] for r in ax_data.keys()], "unit": sd[side]["unit"], } ) @@ -163,15 +164,16 @@ def plot_pie_generation_vs_capacity( _construct_pie_visuals(area, ax_data_list) -def _roll_up_small_pie_wedges(resource_data, min_percentage): +def _roll_up_small_pie_wedges(resource_data, resource_label, min_percentage): """Combine small wedges into a single category. Removes wedges with value 0. :param dict resource_data: values for each resource type. + :param dict resource_label: labels for each resource type. :param float min_percentage: roll up small pie pieces into a single category, resources with percentage less than the set value will be pooled together, defaults to 0. - :return: (*dict*) -- returns updated axis data and a list of labels that includes - the small category label if it exists + :return: (*dict*) -- updated axis data and a list of labels that includes the small + category label if it exists """ resource_list = list(resource_data.keys()) total_resources = sum(resource_data.values()) @@ -186,7 +188,7 @@ def _roll_up_small_pie_wedges(resource_data, min_percentage): elif percentage <= min_percentage: small_categories.append(resource) small_category_label += "{0} {1}%\n".format( - type2label[resource], percentage + resource_label[resource], percentage ) small_category_value += resource_data[resource] @@ -194,7 +196,7 @@ def _roll_up_small_pie_wedges(resource_data, min_percentage): for resource in small_categories: resource_data.pop(resource) - labels = [type2label[resource] for resource in resource_data.keys()] + labels = [resource_label[resource] for resource in resource_data.keys()] if len(small_categories) > 1: resource_data["other"] = small_category_value