From 284b0c8258cdfe7916db0d295f8ef5b0e6abeb4e Mon Sep 17 00:00:00 2001 From: Majk Shkurti Date: Tue, 2 May 2023 01:45:14 +0200 Subject: [PATCH] [ADD] Isochrone buffer --- app/api/src/core/opportunity.py | 27 ++- app/api/src/crud/crud_isochrone.py | 107 +++++--- app/api/src/schemas/isochrone.py | 33 ++- app/api/src/utils.py | 60 ++++- .../src/components/isochrones/Isochrones.vue | 228 +++++++++++++----- .../other/IsochroneAmenitiesLineChart.vue | 11 +- app/client/src/locales/en.json | 7 +- app/client/src/utils/IsochroneUtils.js | 19 ++ 8 files changed, 382 insertions(+), 110 deletions(-) diff --git a/app/api/src/core/opportunity.py b/app/api/src/core/opportunity.py index 5d1b8cb9f..396c241c7 100644 --- a/app/api/src/core/opportunity.py +++ b/app/api/src/core/opportunity.py @@ -120,7 +120,9 @@ def read_modified_data( if layer not in self.layers_modifiable: raise ValueError(f"Layer {layer} not in modifiable layers {self.layers_modifiable}.") - sql_query = f"""SELECT * FROM customer.{layer}_modified p WHERE p.scenario_id = {scenario_id} """ + sql_query = ( + f"""SELECT * FROM customer.{layer}_modified p WHERE p.scenario_id = {scenario_id} """ + ) if layer == "poi": sql_query += f""" AND p.edit_type IN (SELECT UNNEST(ARRAY{edit_type}::text[]))""" @@ -128,7 +130,6 @@ def read_modified_data( if bbox_wkt is not None: sql_query += f""" AND ST_Intersects('SRID=4326;{bbox_wkt}'::geometry, p.geom)""" - modified_gdf = read_postgis( sql_query, db, @@ -202,7 +203,10 @@ def read_poi( ).iloc[0][0] # - Get the poi categories for one entrance and multiple entrances. # Returns {'true': [restaurant, shop...], 'false': [bus_stop, train_station...]} - poi_categories = pd.read_sql(f"SELECT * FROM basic.poi_categories({user_id})", db,).iloc[ + poi_categories = pd.read_sql( + f"SELECT * FROM basic.poi_categories({user_id})", + db, + ).iloc[ 0 ][0] poi_multiple_entrances = poi_categories["true"] @@ -418,9 +422,10 @@ def count_poi(self, group_by_column: str): poi_multiple_entrances_gdf = poi_gdf[poi_gdf["entrance_type"] == "multiple"] agg_func = {} - if group_by_column == "minute": + if group_by_column == "minute" or group_by_column == "steps": # relevant for isochrone inputs - agg_func = {"minute": "mean"} + agg_func_ = {"minute": "mean", "steps": "min"} + agg_func = agg_func_[group_by_column] else: # case when a multi entrance poi is in multiple shapes agg_func = {group_by_column: "_".join} @@ -428,10 +433,10 @@ def count_poi(self, group_by_column: str): poi_multiple_entrances_gdf_grouped = ( poi_multiple_entrances_gdf.groupby(["category", "name"]).agg(agg_func).reset_index() ) - if group_by_column == "minute": - poi_multiple_entrances_gdf_grouped["minute"] = poi_multiple_entrances_gdf_grouped[ - "minute" - ].astype(int) + if group_by_column == "minute" or group_by_column == "steps": + poi_multiple_entrances_gdf_grouped[ + group_by_column + ] = poi_multiple_entrances_gdf_grouped[group_by_column].astype(int) poi_multiple_entrances_gdf_grouped = poi_multiple_entrances_gdf_grouped.groupby( [group_by_column, "category"] @@ -455,7 +460,7 @@ def count_population(self, group_by_columns: list[str]): population_gdf_grouped = population_gdf_join.groupby(group_by_columns).agg( {"population": "sum"} ) - for (key, value) in population_gdf_grouped.iterrows(): + for key, value in population_gdf_grouped.iterrows(): population_count[key]["population"] = value["population"] return dict(population_count) @@ -481,4 +486,4 @@ def count_aoi(self, group_by_columns: list[str]): return dict(aoi_count) -opportunity = Opportunity() \ No newline at end of file +opportunity = Opportunity() diff --git a/app/api/src/crud/crud_isochrone.py b/app/api/src/crud/crud_isochrone.py index 11eb35c32..a2cb0ad3a 100644 --- a/app/api/src/crud/crud_isochrone.py +++ b/app/api/src/crud/crud_isochrone.py @@ -10,6 +10,7 @@ import numpy as np import pandas as pd +import geopandas as gpd import pyproj import requests from fastapi import Response @@ -40,6 +41,7 @@ R5TravelTimePayloadTemplate, ) from src.utils import ( + buffer, decode_r5_grid, delete_dir, encode_r5_grid, @@ -49,20 +51,21 @@ class CRUDIsochrone: - def restructure_dict(self, original_dict, max_minute=None): + def restructure_dict(self, original_dict, max_value=None, step=1): """ - Restructures a dictionary of counts of various categories at different minutes - into a dictionary of cumulative counts of categories at each minute. + Restructures a dictionary of counts of various categories at different steps + into a dictionary of cumulative counts of categories at each step. Args: - original_dict (dict): A dictionary of the form {minute: {category: count, ...}, ...}, where minute is an integer, category is a string, and count is an integer. - - max_minute (int): The maximum minute to consider. If None, the maximum minute is + - max_value (int): The maximum step (e.g minute) to consider. If None, the maximum value is automatically determined from the original dictionary. + - step (int): The step size. For example, if step=1, the cumulative counts are computed Returns: - new_dict (dict): A dictionary of the form {category: [count1, count2, ...]}, - where count1, count2, ... are the cumulative counts of the category at each minute. + where count1, count2, ... are the cumulative counts of the category at each step. Example usage: original_dict = { @@ -74,24 +77,24 @@ def restructure_dict(self, original_dict, max_minute=None): # Output: {"atm": [3, 6], "post": [4, 8], "park": [0, 300]} In the returned dictionary, the root keys are the categories from the original dictionary, - and each value is a list of the cumulative counts of the category at each minute. + and each value is a list of the cumulative counts of the category at each step. """ - # Find the maximum minute and collect all categories + # Find the maximum step and collect all categories if len(original_dict) == 0: return {} - if max_minute is None: - max_minute = max(original_dict.keys()) + if max_value is None: + max_value = max(original_dict.keys()) categories = set() - for minute_dict in original_dict.values(): - categories.update(minute_dict.keys()) + for steps_dict in original_dict.values(): + categories.update(steps_dict.keys()) # Convert the original dictionary to a 2D NumPy array - arr = np.zeros((len(categories), max_minute)) + arr = np.zeros((len(categories), max_value // step)) for i, category in enumerate(categories): - for j in range(max_minute): - if j + 1 in original_dict: - arr[i, j] = original_dict[j + 1].get(category, 0) + for index, j in enumerate(range(0, max_value, step)): + if j + step in original_dict: + arr[i, index] = original_dict[j + step].get(category, 0) # Compute the cumulative sum along the rows cumulative_arr = arr.cumsum(axis=1) @@ -99,7 +102,7 @@ def restructure_dict(self, original_dict, max_minute=None): # Convert the result back to a dictionary new_dict = {category: [] for category in categories} for i, category in enumerate(categories): - for j in range(max_minute): + for j in range(max_value): if j < cumulative_arr.shape[1]: new_dict[category].append(cumulative_arr[i, j]) else: @@ -111,7 +114,6 @@ def restructure_dict(self, original_dict, max_minute=None): async def read_network( self, db, obj_in: IsochroneDTO, current_user, isochrone_type, table_prefix=None ) -> Any: - sql_text = "" if isochrone_type == IsochroneTypeEnum.single.value: sql_text = f"""SELECT id, source, target, cost, reverse_cost, coordinates_3857 as geom, length_3857 AS length, starting_ids, starting_geoms @@ -225,7 +227,6 @@ async def export_isochrone( return_type, geojson_dictionary: dict, ) -> Any: - features = geojson_dictionary["features"] # Remove the payload and set to true to use it later geojson_dictionary = True @@ -339,6 +340,7 @@ async def starting_points_opportunities( "region_geom": None, "study_area_ids": None, } + if obj_in.starting_point.region_type.value == IsochroneMultiRegionType.STUDY_AREA.value: obj_in_data["study_area_ids"] = [ int(study_area_id) for study_area_id in obj_in.starting_point.region @@ -363,6 +365,7 @@ async def calculate( grid = None result = None network = None + step_size = 1 if len(obj_in.starting_point.input) == 1 and isinstance( obj_in.starting_point.input[0], IsochroneStartingPointCoord ): @@ -384,7 +387,7 @@ async def calculate( obj_in.output.resolution, ) # == Public transport isochrone == - else: + elif obj_in.mode.value in [IsochroneMode.TRANSIT.value]: starting_point_geom = Point( obj_in.starting_point.input[0].lon, obj_in.starting_point.input[0].lat ).wkt @@ -418,9 +421,47 @@ async def calculate( ) grid = decode_r5_grid(response.content) - isochrone_shapes = generate_jsolines( - grid=grid, travel_time=obj_in.settings.travel_time, percentile=5 - ) + if obj_in.mode.value in [IsochroneMode.BUFFER.value]: + if isochrone_type == IsochroneTypeEnum.multi.value: + starting_points = await self.starting_points_opportunities( + current_user, db, obj_in + ) + x = starting_points[0][0] + y = starting_points[0][1] + starting_points_gdf = gpd.GeoDataFrame( + geometry=gpd.points_from_xy(x, y), crs="EPSG:4326" + ) + starting_point_geom = str(starting_points_gdf["geometry"].to_wkt().to_list()) + else: + starting_point_geom = Point( + obj_in.starting_point.input[0].lon, obj_in.starting_point.input[0].lat + ).wkt + starting_points = [ + Point(data.lon, data.lat) for data in obj_in.starting_point.input + ] + starting_points_gdf = gpd.GeoDataFrame(geometry=starting_points, crs="EPSG:4326") + isochrone_shapes = buffer( + starting_points_gdf=starting_points_gdf, + buffer_distance=obj_in.settings.buffer_distance, + increment=50, + ) + group_by_column = "steps" + grid = { + "surface": [], + "data": [], + "width": 0, + "height": 0, + "west": 0, + "north": 0, + "zoom": 0, + "depth": 1, + "version": 0, + } + else: + isochrone_shapes = generate_jsolines( + grid=grid, travel_time=obj_in.settings.travel_time, percentile=5 + ) + group_by_column = "minute" if obj_in.output.type.value != IsochroneOutputType.NETWORK.value: # Opportunity intersect opportunity_user_args = { @@ -436,20 +477,22 @@ async def calculate( ) # Poi - poi_count = opportunity_count.count_poi(group_by_column="minute") + poi_count = opportunity_count.count_poi(group_by_column=group_by_column) # Population - population_count = opportunity_count.count_population(group_by_columns=["minute"]) + population_count = opportunity_count.count_population( + group_by_columns=[group_by_column] + ) opportunities = [poi_count, population_count] if obj_in.mode.value != IsochroneMode.TRANSIT.value: # Aoi aoi_count = opportunity_count.count_aoi( - group_by_columns=["minute", "category"] + group_by_columns=[group_by_column, "category"] ) opportunities.append(aoi_count) opportunities = merge_dicts(*opportunities) - opportunities = self.restructure_dict(opportunities) + opportunities = self.restructure_dict(opportunities, step=step_size) grid["accessibility"] = { "starting_points": starting_point_geom, "opportunities": opportunities, @@ -483,8 +526,9 @@ async def calculate( # clip the isochrone shapes to the regions isochrone_clip = clip(isochrone_shapes["incremental"], region["geometry"]) # adds a column which combines the region id and the isochrone minute to avoid calling the opportunity intersect multiple times within the loop + isochrone_clip["region"] = isochrone_clip.apply( - lambda x: "{}_x_{}".format(region["name"], x.minute), axis=1 + lambda x: "{}_x_{}".format(region["name"], x.get(group_by_column)), axis=1 ) isochrone_clip.set_crs(epsg=4326, inplace=True) @@ -517,8 +561,7 @@ async def calculate( # population count for region, population_count in regions_count.items(): population_reached = self.restructure_dict( - remove_keys(population_count, ["total"]), - max_minute=obj_in.settings.travel_time, + remove_keys(population_count, ["total"]) ) population_reached["population"] = [ int(x) for x in population_reached["population"] @@ -539,6 +582,12 @@ async def calculate( "starting_points": starting_point_geom, "opportunities": dict(opportunities), } + if obj_in.mode.value == IsochroneMode.BUFFER.value: + grid["accessibility"]["buffer"] = { + "steps": 50, # in meters + "distance": obj_in.settings.buffer_distance, + "geojson": isochrone_shapes["full"].to_json(), + } grid_encoded = encode_r5_grid(grid) result = Response(bytes(grid_encoded)) else: diff --git a/app/api/src/schemas/isochrone.py b/app/api/src/schemas/isochrone.py index d83bd1a5b..626b8cb47 100644 --- a/app/api/src/schemas/isochrone.py +++ b/app/api/src/schemas/isochrone.py @@ -41,6 +41,7 @@ class IsochroneMode(Enum): CYCLING = "cycling" TRANSIT = "transit" CAR = "car" + BUFFER = "buffer" class IsochroneWalkingProfile(Enum): @@ -105,11 +106,17 @@ class IsochroneDecayFunction(BaseModel): class IsochroneSettings(BaseModel): # === SETTINGS FOR WALKING AND CYCLING ===# - travel_time: int = Field( + travel_time: Optional[int] = Field( 10, gt=0, description="Travel time in **minutes**", ) + buffer_distance: Optional[int] = Field( + 1000, + gt=50, + le=3000, + description="Buffer distance in **meters**", + ) speed: Optional[float] = Field( 5, gt=0, @@ -262,11 +269,17 @@ def validate_output(cls, values): IsochroneAccessMode.BICYCLE.value, ] ): - raise ValueError( "Resolution must be between 9 and 14 for walking and cycling isochrones" ) + # validate to check if buffer_distance is provided for "BUFFER" mode + if ( + values["mode"].value == IsochroneMode.BUFFER.value + and not values["settings"].buffer_distance + ): + raise ValueError("Buffer distance is required for buffer catchment area") + # Validation check on grid resolution and number of steps for geojson for public transport isochrones if ( values["output"].type.value == IsochroneOutputType.GRID.value @@ -340,6 +353,7 @@ def validate_output(cls, values): return values + # R5 R5AvailableDates = { 0: "2022-05-16", @@ -462,6 +476,21 @@ def validate_output(cls, values): }, }, }, + "single_buffer_catchment": { + "summary": "Single Buffer Catchment", + "value": { + "mode": "buffer", + "settings": {"buffer_distance": "2000"}, + "starting_point": { + "input": [{"lat": 48.1502132, "lon": 11.5696284}], + }, + "scenario": {"id": 0, "modus": "default"}, + "output": { + "type": "grid", + "resolution": "12", + }, + }, + }, }, "to_export": { "single_isochrone": { diff --git a/app/api/src/utils.py b/app/api/src/utils.py index 5ef88c687..eba570448 100644 --- a/app/api/src/utils.py +++ b/app/api/src/utils.py @@ -252,11 +252,14 @@ def encode_r5_grid(grid_data: Any) -> bytes: header_bin = array.tobytes() # - reshape the data grid_size = grid_data["width"] * grid_data["height"] - data = grid_data["data"].reshape(grid_data["depth"], grid_size) - reshaped_data = np.array([]) - for i in range(grid_data["depth"]): - reshaped_data = np.append(reshaped_data, np.diff(data[i], prepend=0)) - data = reshaped_data.astype(np.int32) + if len(grid_data["data"]) == 0: + data = np.array([], dtype=np.int32) + else: + data = grid_data["data"].reshape(grid_data["depth"], grid_size) + reshaped_data = np.array([]) + for i in range(grid_data["depth"]): + reshaped_data = np.append(reshaped_data, np.diff(data[i], prepend=0)) + data = reshaped_data.astype(np.int32) z_diff_bin = data.tobytes() # - encode metadata @@ -459,6 +462,51 @@ def coordinate_from_pixel(input, zoom, round_int=False, web_mercator=False): return [x, y] +def buffer(starting_points_gdf, buffer_distance, increment): + """ + Incrementally buffer a set of points + + Parameters + ---------- + starting_points_gdf : geopandas.GeoDataFrame. The starting points to buffer + buffer_distance : float. The initial buffer distance + increment : float. The increment to add to the buffer distance + + Returns + ------- + geopandas.GeoDataFrame. The buffered points + + """ + starting_points_gdf = starting_points_gdf.to_crs(epsg=3857) + results = {} + steps = [] + incremental_shapes = [] + full_shapes = [] + + for i in range(increment, buffer_distance, increment): + steps.append(i // increment) + union_geom = starting_points_gdf.buffer(i).unary_union + full_shapes.append(union_geom) + + full_gdf = geopandas.GeoDataFrame({"geometry": full_shapes, "steps": steps}) + full_gdf.set_crs(epsg=3857, inplace=True) + full_gdf = full_gdf.to_crs(epsg=4326) + results["full"] = full_gdf + + for i in range(len(full_shapes)): + if i == 0: + incremental_shapes.append(full_shapes[i]) + else: + incremental_shapes.append(full_shapes[i].difference(full_shapes[i - 1])) + + incremental_gdf = geopandas.GeoDataFrame({"geometry": incremental_shapes, "steps": steps}) + incremental_gdf.set_crs(epsg=3857, inplace=True) + incremental_gdf = incremental_gdf.to_crs(epsg=4326) + results["incremental"] = incremental_gdf + + return results + + def coordinate_to_pixel(input, zoom, return_dict=True, round_int=False, web_mercator=False): """ Convert coordinate to pixel coordinate @@ -520,7 +568,6 @@ def geometry_to_pixel(geometry, zoom): ) if geometry["type"] == "LineString": for coordinate in geometry["coordinates"]: - pixel_coordinates.append( np.unique( np.array( @@ -655,7 +702,6 @@ def print_warning(message: str): def tablify(s): - # Replace file suffix dot with underscore s = s.replace(".", "_") diff --git a/app/client/src/components/isochrones/Isochrones.vue b/app/client/src/components/isochrones/Isochrones.vue index 5d4670164..ebeb34970 100644 --- a/app/client/src/components/isochrones/Isochrones.vue +++ b/app/client/src/components/isochrones/Isochrones.vue @@ -68,7 +68,9 @@ -