Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[ADD] Isochrone buffer #2192

Merged
merged 1 commit into from
May 1, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 16 additions & 11 deletions app/api/src/core/opportunity.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,15 +120,16 @@ def read_modified_data(
if layer not in self.layers_modifiable:
raise ValueError(f"Layer {layer} not in modifiable layers {self.layers_modifiable}.")

sql_query = f"""SELECT * FROM customer.{layer}_modified p WHERE p.scenario_id = {scenario_id} """
sql_query = (
f"""SELECT * FROM customer.{layer}_modified p WHERE p.scenario_id = {scenario_id} """
)

if layer == "poi":
sql_query += f""" AND p.edit_type IN (SELECT UNNEST(ARRAY{edit_type}::text[]))"""

if bbox_wkt is not None:
sql_query += f""" AND ST_Intersects('SRID=4326;{bbox_wkt}'::geometry, p.geom)"""


modified_gdf = read_postgis(
sql_query,
db,
Expand Down Expand Up @@ -202,7 +203,10 @@ def read_poi(
).iloc[0][0]
# - Get the poi categories for one entrance and multiple entrances.
# Returns {'true': [restaurant, shop...], 'false': [bus_stop, train_station...]}
poi_categories = pd.read_sql(f"SELECT * FROM basic.poi_categories({user_id})", db,).iloc[
poi_categories = pd.read_sql(
f"SELECT * FROM basic.poi_categories({user_id})",
db,
).iloc[
0
][0]
poi_multiple_entrances = poi_categories["true"]
Expand Down Expand Up @@ -418,20 +422,21 @@ def count_poi(self, group_by_column: str):
poi_multiple_entrances_gdf = poi_gdf[poi_gdf["entrance_type"] == "multiple"]

agg_func = {}
if group_by_column == "minute":
if group_by_column == "minute" or group_by_column == "steps":
# relevant for isochrone inputs
agg_func = {"minute": "mean"}
agg_func_ = {"minute": "mean", "steps": "min"}
agg_func = agg_func_[group_by_column]
else:
# case when a multi entrance poi is in multiple shapes
agg_func = {group_by_column: "_".join}

poi_multiple_entrances_gdf_grouped = (
poi_multiple_entrances_gdf.groupby(["category", "name"]).agg(agg_func).reset_index()
)
if group_by_column == "minute":
poi_multiple_entrances_gdf_grouped["minute"] = poi_multiple_entrances_gdf_grouped[
"minute"
].astype(int)
if group_by_column == "minute" or group_by_column == "steps":
poi_multiple_entrances_gdf_grouped[
group_by_column
] = poi_multiple_entrances_gdf_grouped[group_by_column].astype(int)

poi_multiple_entrances_gdf_grouped = poi_multiple_entrances_gdf_grouped.groupby(
[group_by_column, "category"]
Expand All @@ -455,7 +460,7 @@ def count_population(self, group_by_columns: list[str]):
population_gdf_grouped = population_gdf_join.groupby(group_by_columns).agg(
{"population": "sum"}
)
for (key, value) in population_gdf_grouped.iterrows():
for key, value in population_gdf_grouped.iterrows():
population_count[key]["population"] = value["population"]

return dict(population_count)
Expand All @@ -481,4 +486,4 @@ def count_aoi(self, group_by_columns: list[str]):
return dict(aoi_count)


opportunity = Opportunity()
opportunity = Opportunity()
107 changes: 78 additions & 29 deletions app/api/src/crud/crud_isochrone.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@

import numpy as np
import pandas as pd
import geopandas as gpd
import pyproj
import requests
from fastapi import Response
Expand Down Expand Up @@ -40,6 +41,7 @@
R5TravelTimePayloadTemplate,
)
from src.utils import (
buffer,
decode_r5_grid,
delete_dir,
encode_r5_grid,
Expand All @@ -49,20 +51,21 @@


class CRUDIsochrone:
def restructure_dict(self, original_dict, max_minute=None):
def restructure_dict(self, original_dict, max_value=None, step=1):
"""
Restructures a dictionary of counts of various categories at different minutes
into a dictionary of cumulative counts of categories at each minute.
Restructures a dictionary of counts of various categories at different steps
into a dictionary of cumulative counts of categories at each step.

Args:
- original_dict (dict): A dictionary of the form {minute: {category: count, ...}, ...},
where minute is an integer, category is a string, and count is an integer.
- max_minute (int): The maximum minute to consider. If None, the maximum minute is
- max_value (int): The maximum step (e.g minute) to consider. If None, the maximum value is
automatically determined from the original dictionary.
- step (int): The step size. For example, if step=1, the cumulative counts are computed

Returns:
- new_dict (dict): A dictionary of the form {category: [count1, count2, ...]},
where count1, count2, ... are the cumulative counts of the category at each minute.
where count1, count2, ... are the cumulative counts of the category at each step.

Example usage:
original_dict = {
Expand All @@ -74,32 +77,32 @@ def restructure_dict(self, original_dict, max_minute=None):
# Output: {"atm": [3, 6], "post": [4, 8], "park": [0, 300]}

In the returned dictionary, the root keys are the categories from the original dictionary,
and each value is a list of the cumulative counts of the category at each minute.
and each value is a list of the cumulative counts of the category at each step.
"""
# Find the maximum minute and collect all categories
# Find the maximum step and collect all categories
if len(original_dict) == 0:
return {}
if max_minute is None:
max_minute = max(original_dict.keys())
if max_value is None:
max_value = max(original_dict.keys())
categories = set()
for minute_dict in original_dict.values():
categories.update(minute_dict.keys())
for steps_dict in original_dict.values():
categories.update(steps_dict.keys())

# Convert the original dictionary to a 2D NumPy array
arr = np.zeros((len(categories), max_minute))
arr = np.zeros((len(categories), max_value // step))

for i, category in enumerate(categories):
for j in range(max_minute):
if j + 1 in original_dict:
arr[i, j] = original_dict[j + 1].get(category, 0)
for index, j in enumerate(range(0, max_value, step)):
if j + step in original_dict:
arr[i, index] = original_dict[j + step].get(category, 0)

# Compute the cumulative sum along the rows
cumulative_arr = arr.cumsum(axis=1)

# Convert the result back to a dictionary
new_dict = {category: [] for category in categories}
for i, category in enumerate(categories):
for j in range(max_minute):
for j in range(max_value):
if j < cumulative_arr.shape[1]:
new_dict[category].append(cumulative_arr[i, j])
else:
Expand All @@ -111,7 +114,6 @@ def restructure_dict(self, original_dict, max_minute=None):
async def read_network(
self, db, obj_in: IsochroneDTO, current_user, isochrone_type, table_prefix=None
) -> Any:

sql_text = ""
if isochrone_type == IsochroneTypeEnum.single.value:
sql_text = f"""SELECT id, source, target, cost, reverse_cost, coordinates_3857 as geom, length_3857 AS length, starting_ids, starting_geoms
Expand Down Expand Up @@ -225,7 +227,6 @@ async def export_isochrone(
return_type,
geojson_dictionary: dict,
) -> Any:

features = geojson_dictionary["features"]
# Remove the payload and set to true to use it later
geojson_dictionary = True
Expand Down Expand Up @@ -339,6 +340,7 @@ async def starting_points_opportunities(
"region_geom": None,
"study_area_ids": None,
}

if obj_in.starting_point.region_type.value == IsochroneMultiRegionType.STUDY_AREA.value:
obj_in_data["study_area_ids"] = [
int(study_area_id) for study_area_id in obj_in.starting_point.region
Expand All @@ -363,6 +365,7 @@ async def calculate(
grid = None
result = None
network = None
step_size = 1
if len(obj_in.starting_point.input) == 1 and isinstance(
obj_in.starting_point.input[0], IsochroneStartingPointCoord
):
Expand All @@ -384,7 +387,7 @@ async def calculate(
obj_in.output.resolution,
)
# == Public transport isochrone ==
else:
elif obj_in.mode.value in [IsochroneMode.TRANSIT.value]:
starting_point_geom = Point(
obj_in.starting_point.input[0].lon, obj_in.starting_point.input[0].lat
).wkt
Expand Down Expand Up @@ -418,9 +421,47 @@ async def calculate(
)
grid = decode_r5_grid(response.content)

isochrone_shapes = generate_jsolines(
grid=grid, travel_time=obj_in.settings.travel_time, percentile=5
)
if obj_in.mode.value in [IsochroneMode.BUFFER.value]:
if isochrone_type == IsochroneTypeEnum.multi.value:
starting_points = await self.starting_points_opportunities(
current_user, db, obj_in
)
x = starting_points[0][0]
y = starting_points[0][1]
starting_points_gdf = gpd.GeoDataFrame(
geometry=gpd.points_from_xy(x, y), crs="EPSG:4326"
)
starting_point_geom = str(starting_points_gdf["geometry"].to_wkt().to_list())
else:
starting_point_geom = Point(
obj_in.starting_point.input[0].lon, obj_in.starting_point.input[0].lat
).wkt
starting_points = [
Point(data.lon, data.lat) for data in obj_in.starting_point.input
]
starting_points_gdf = gpd.GeoDataFrame(geometry=starting_points, crs="EPSG:4326")
isochrone_shapes = buffer(
starting_points_gdf=starting_points_gdf,
buffer_distance=obj_in.settings.buffer_distance,
increment=50,
)
group_by_column = "steps"
grid = {
"surface": [],
"data": [],
"width": 0,
"height": 0,
"west": 0,
"north": 0,
"zoom": 0,
"depth": 1,
"version": 0,
}
else:
isochrone_shapes = generate_jsolines(
grid=grid, travel_time=obj_in.settings.travel_time, percentile=5
)
group_by_column = "minute"
if obj_in.output.type.value != IsochroneOutputType.NETWORK.value:
# Opportunity intersect
opportunity_user_args = {
Expand All @@ -436,20 +477,22 @@ async def calculate(
)

# Poi
poi_count = opportunity_count.count_poi(group_by_column="minute")
poi_count = opportunity_count.count_poi(group_by_column=group_by_column)
# Population
population_count = opportunity_count.count_population(group_by_columns=["minute"])
population_count = opportunity_count.count_population(
group_by_columns=[group_by_column]
)

opportunities = [poi_count, population_count]
if obj_in.mode.value != IsochroneMode.TRANSIT.value:
# Aoi
aoi_count = opportunity_count.count_aoi(
group_by_columns=["minute", "category"]
group_by_columns=[group_by_column, "category"]
)
opportunities.append(aoi_count)

opportunities = merge_dicts(*opportunities)
opportunities = self.restructure_dict(opportunities)
opportunities = self.restructure_dict(opportunities, step=step_size)
grid["accessibility"] = {
"starting_points": starting_point_geom,
"opportunities": opportunities,
Expand Down Expand Up @@ -483,8 +526,9 @@ async def calculate(
# clip the isochrone shapes to the regions
isochrone_clip = clip(isochrone_shapes["incremental"], region["geometry"])
# adds a column which combines the region id and the isochrone minute to avoid calling the opportunity intersect multiple times within the loop

isochrone_clip["region"] = isochrone_clip.apply(
lambda x: "{}_x_{}".format(region["name"], x.minute), axis=1
lambda x: "{}_x_{}".format(region["name"], x.get(group_by_column)), axis=1
)
isochrone_clip.set_crs(epsg=4326, inplace=True)

Expand Down Expand Up @@ -517,8 +561,7 @@ async def calculate(
# population count
for region, population_count in regions_count.items():
population_reached = self.restructure_dict(
remove_keys(population_count, ["total"]),
max_minute=obj_in.settings.travel_time,
remove_keys(population_count, ["total"])
)
population_reached["population"] = [
int(x) for x in population_reached["population"]
Expand All @@ -539,6 +582,12 @@ async def calculate(
"starting_points": starting_point_geom,
"opportunities": dict(opportunities),
}
if obj_in.mode.value == IsochroneMode.BUFFER.value:
grid["accessibility"]["buffer"] = {
"steps": 50, # in meters
"distance": obj_in.settings.buffer_distance,
"geojson": isochrone_shapes["full"].to_json(),
}
grid_encoded = encode_r5_grid(grid)
result = Response(bytes(grid_encoded))
else:
Expand Down
33 changes: 31 additions & 2 deletions app/api/src/schemas/isochrone.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ class IsochroneMode(Enum):
CYCLING = "cycling"
TRANSIT = "transit"
CAR = "car"
BUFFER = "buffer"


class IsochroneWalkingProfile(Enum):
Expand Down Expand Up @@ -105,11 +106,17 @@ class IsochroneDecayFunction(BaseModel):

class IsochroneSettings(BaseModel):
# === SETTINGS FOR WALKING AND CYCLING ===#
travel_time: int = Field(
travel_time: Optional[int] = Field(
10,
gt=0,
description="Travel time in **minutes**",
)
buffer_distance: Optional[int] = Field(
1000,
gt=50,
le=3000,
description="Buffer distance in **meters**",
)
speed: Optional[float] = Field(
5,
gt=0,
Expand Down Expand Up @@ -262,11 +269,17 @@ def validate_output(cls, values):
IsochroneAccessMode.BICYCLE.value,
]
):

raise ValueError(
"Resolution must be between 9 and 14 for walking and cycling isochrones"
)

# validate to check if buffer_distance is provided for "BUFFER" mode
if (
values["mode"].value == IsochroneMode.BUFFER.value
and not values["settings"].buffer_distance
):
raise ValueError("Buffer distance is required for buffer catchment area")

# Validation check on grid resolution and number of steps for geojson for public transport isochrones
if (
values["output"].type.value == IsochroneOutputType.GRID.value
Expand Down Expand Up @@ -340,6 +353,7 @@ def validate_output(cls, values):

return values


# R5
R5AvailableDates = {
0: "2022-05-16",
Expand Down Expand Up @@ -462,6 +476,21 @@ def validate_output(cls, values):
},
},
},
"single_buffer_catchment": {
"summary": "Single Buffer Catchment",
"value": {
"mode": "buffer",
"settings": {"buffer_distance": "2000"},
"starting_point": {
"input": [{"lat": 48.1502132, "lon": 11.5696284}],
},
"scenario": {"id": 0, "modus": "default"},
"output": {
"type": "grid",
"resolution": "12",
},
},
},
},
"to_export": {
"single_isochrone": {
Expand Down
Loading