diff --git a/notebooks/new_convergence/hdm_c7_riskdistribution.ipynb b/notebooks/new_convergence/hdm_c7_riskdistribution.ipynb
new file mode 100644
index 0000000..15d776a
--- /dev/null
+++ b/notebooks/new_convergence/hdm_c7_riskdistribution.ipynb
@@ -0,0 +1,738 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "406bc48f-325a-4391-90e2-6fa3355e1e6f",
+ "metadata": {},
+ "source": [
+ "# analysis of initial risk attitude distributions & population outcomes\n",
+ "\n",
+ "Analyzing data generated from a batch run with 1000 iterations each per risk distribution\n",
+ "- maximum run length of 3000 steps\n",
+ "- convergence threshold at <=7% agents changing for two adjustment rounds in a row\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 95,
+ "id": "378baa15-bbdf-4fdc-a6a9-e86b5708a2ed",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import polars as pl\n",
+ "\n",
+ "\n",
+ "df = pl.read_csv(\"../../data/hawkdovemulti/dist_c7_3k_2024-02-27T162947_580557_model.csv\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 96,
+ "id": "068553c1-5911-4ed0-b615-d91082b6e935",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Analyzing 5000 runs\n"
+ ]
+ }
+ ],
+ "source": [
+ "total_runs = len(df)\n",
+ "\n",
+ "print(f\"Analyzing {total_runs} runs\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "0fe16c85-8807-4048-b2f5-22ebb98f5efd",
+ "metadata": {},
+ "source": [
+ "## what % converged?"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 97,
+ "id": "ae6f6d58-3247-4f5e-a53c-8b1cb1d0e41c",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "4594"
+ ]
+ },
+ "execution_count": 97,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "converged_df = df.filter(df[\"status\"] == \"converged\")\n",
+ "len(converged_df)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "23b86d00-17cd-4f22-bcbb-8132421e9d7c",
+ "metadata": {},
+ "source": [
+ "almost all of them!"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "6adc4c4c-527b-4afa-b9d7-77fdeed4ad27",
+ "metadata": {},
+ "source": [
+ "## how long does it take to converge?"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 98,
+ "id": "6beece01-384a-4564-9f43-425129c65997",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "
\n",
+ "
shape: (9, 2)statistic | value |
---|
str | f64 |
"count" | 4594.0 |
"null_count" | 0.0 |
"mean" | 147.572921 |
"std" | 135.641494 |
"min" | 50.0 |
"25%" | 70.0 |
"50%" | 110.0 |
"75%" | 170.0 |
"max" | 2410.0 |
"
+ ],
+ "text/plain": [
+ "shape: (9, 2)\n",
+ "┌────────────┬────────────┐\n",
+ "│ statistic ┆ value │\n",
+ "│ --- ┆ --- │\n",
+ "│ str ┆ f64 │\n",
+ "╞════════════╪════════════╡\n",
+ "│ count ┆ 4594.0 │\n",
+ "│ null_count ┆ 0.0 │\n",
+ "│ mean ┆ 147.572921 │\n",
+ "│ std ┆ 135.641494 │\n",
+ "│ min ┆ 50.0 │\n",
+ "│ 25% ┆ 70.0 │\n",
+ "│ 50% ┆ 110.0 │\n",
+ "│ 75% ┆ 170.0 │\n",
+ "│ max ┆ 2410.0 │\n",
+ "└────────────┴────────────┘"
+ ]
+ },
+ "execution_count": 98,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "converged_df[\"Step\"].describe()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 99,
+ "id": "2f2d00f7-5d25-4313-8032-14b465b7124e",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {},
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "application/vnd.holoviews_exec.v0+json": "",
+ "text/html": [
+ "\n",
+ ""
+ ],
+ "text/plain": [
+ ":Histogram [Step] (Step_count)"
+ ]
+ },
+ "execution_count": 99,
+ "metadata": {
+ "application/vnd.holoviews_exec.v0+json": {
+ "id": "p1330"
+ }
+ },
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "converged_df[\"Step\"].plot.hist()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "d4c671c7-3cfd-4b9d-b765-1bfd94d052b0",
+ "metadata": {},
+ "source": [
+ "## compare different initial distributions"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 100,
+ "id": "91116b11-aa00-401a-9ab1-5de936f196e5",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "
shape: (5,)risk_distribution |
---|
str |
"skewed right" |
"uniform" |
"skewed left" |
"bimodal" |
"normal" |
"
+ ],
+ "text/plain": [
+ "shape: (5,)\n",
+ "Series: 'risk_distribution' [str]\n",
+ "[\n",
+ "\t\"skewed right\"\n",
+ "\t\"uniform\"\n",
+ "\t\"skewed left\"\n",
+ "\t\"bimodal\"\n",
+ "\t\"normal\"\n",
+ "]"
+ ]
+ },
+ "execution_count": 100,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "df[\"risk_distribution\"].unique()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "23171e3e-7ec9-4a71-9ee5-aeee4bb8faaa",
+ "metadata": {},
+ "source": [
+ "How many converged runs in each subset?\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 101,
+ "id": "3d540440-e528-4535-98f3-59d051cb6e3b",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "
shape: (5, 2)risk_distribution | count |
---|
str | u32 |
"uniform" | 938 |
"skewed right" | 987 |
"bimodal" | 710 |
"normal" | 996 |
"skewed left" | 963 |
"
+ ],
+ "text/plain": [
+ "shape: (5, 2)\n",
+ "┌───────────────────┬───────┐\n",
+ "│ risk_distribution ┆ count │\n",
+ "│ --- ┆ --- │\n",
+ "│ str ┆ u32 │\n",
+ "╞═══════════════════╪═══════╡\n",
+ "│ uniform ┆ 938 │\n",
+ "│ skewed right ┆ 987 │\n",
+ "│ bimodal ┆ 710 │\n",
+ "│ normal ┆ 996 │\n",
+ "│ skewed left ┆ 963 │\n",
+ "└───────────────────┴───────┘"
+ ]
+ },
+ "execution_count": 101,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "converged_df.group_by(\"risk_distribution\").count()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 102,
+ "id": "9b324c1b-26f7-45a5-95e7-a2c4bf434d13",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# filter converged run data into subsets by risk distribution\n",
+ "\n",
+ "subset = {}\n",
+ "\n",
+ "for distribution in converged_df[\"risk_distribution\"].unique():\n",
+ " subset[distribution] = converged_df.filter(pl.col(\"risk_distribution\") == distribution)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "dbfcdd7d-4df4-4fd7-b6d1-e1a8601f78a8",
+ "metadata": {},
+ "source": [
+ "### How does initial distribution affect convergence?"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 103,
+ "id": "89213472-05ca-46a1-a6b9-be0763513618",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "
shape: (10, 3)risk_distribution | status | count |
---|
str | str | u32 |
"uniform" | "converged" | 938 |
"normal" | "converged" | 996 |
"skewed left" | "running" | 37 |
"normal" | "running" | 4 |
"skewed left" | "converged" | 963 |
"skewed right" | "running" | 13 |
"bimodal" | "running" | 290 |
"skewed right" | "converged" | 987 |
"uniform" | "running" | 62 |
"bimodal" | "converged" | 710 |
"
+ ],
+ "text/plain": [
+ "shape: (10, 3)\n",
+ "┌───────────────────┬───────────┬───────┐\n",
+ "│ risk_distribution ┆ status ┆ count │\n",
+ "│ --- ┆ --- ┆ --- │\n",
+ "│ str ┆ str ┆ u32 │\n",
+ "╞═══════════════════╪═══════════╪═══════╡\n",
+ "│ uniform ┆ converged ┆ 938 │\n",
+ "│ normal ┆ converged ┆ 996 │\n",
+ "│ skewed left ┆ running ┆ 37 │\n",
+ "│ normal ┆ running ┆ 4 │\n",
+ "│ … ┆ … ┆ … │\n",
+ "│ bimodal ┆ running ┆ 290 │\n",
+ "│ skewed right ┆ converged ┆ 987 │\n",
+ "│ uniform ┆ running ┆ 62 │\n",
+ "│ bimodal ┆ converged ┆ 710 │\n",
+ "└───────────────────┴───────────┴───────┘"
+ ]
+ },
+ "execution_count": 103,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "status_by_dist = df.group_by(\"risk_distribution\", \"status\").count()\n",
+ "status_by_dist"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 104,
+ "id": "48fd71eb-31b5-4bb6-bef0-c0a827991492",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ ""
+ ],
+ "text/plain": [
+ "alt.Chart(...)"
+ ]
+ },
+ "execution_count": 104,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "alt.Chart(status_by_dist).mark_bar().encode(\n",
+ " x='risk_distribution:N',\n",
+ " y='count',\n",
+ " color='status:N'\n",
+ ").properties(title=\"Simulation status (converged/running) by risk distribution\", width=250, height=400)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 105,
+ "id": "50282f62-38ce-4709-9f9b-e845bca67606",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ ""
+ ],
+ "text/plain": [
+ "alt.Chart(...)"
+ ]
+ },
+ "execution_count": 105,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "alt.Chart(converged_df).mark_boxplot(size=20).encode(\n",
+ " x='risk_distribution:N',\n",
+ " y='Step',\n",
+ ").properties(\n",
+ " title=alt.TitleParams(\n",
+ " \"Simulation run length by risk distribution\", \n",
+ " subtitle=\"(converged runs only)\"), \n",
+ " width=350, height=450)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "f02d3914-5c01-47c6-a4ff-cf43a88f2444",
+ "metadata": {},
+ "source": [
+ "### population categories by risk distribution"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 106,
+ "id": "8f0e0645-d71a-456f-bc96-5d38ce898062",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "\n",
+ ""
+ ],
+ "text/plain": [
+ "alt.HConcatChart(...)"
+ ]
+ },
+ "execution_count": 106,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "import altair as alt\n",
+ "from simulatingrisk.hawkdovemulti import analysis_utils\n",
+ "\n",
+ "\n",
+ "uniform_chart = analysis_utils.graph_population_risk_category(\n",
+ " analysis_utils.groupby_population_risk_category(subset[\"uniform\"])\n",
+ ").properties(title=\"risk distribution: uniform/random\")\n",
+ "\n",
+ "normal_chart = analysis_utils.graph_population_risk_category(\n",
+ " analysis_utils.groupby_population_risk_category(subset[\"normal\"])\n",
+ ").properties(title=\"risk distribution: normal\")\n",
+ "\n",
+ "bimodal_chart = analysis_utils.graph_population_risk_category(\n",
+ " analysis_utils.groupby_population_risk_category(subset[\"bimodal\"])\n",
+ ").properties(title=\"risk distribution: bimodal\")\n",
+ "\n",
+ "skewedleft_chart = analysis_utils.graph_population_risk_category(\n",
+ " analysis_utils.groupby_population_risk_category(subset[\"skewed left\"])\n",
+ ").properties(title=\"risk distribution: skewed left\")\n",
+ "\n",
+ "skewedright_chart = analysis_utils.graph_population_risk_category(\n",
+ " analysis_utils.groupby_population_risk_category(subset[\"skewed right\"])\n",
+ ").properties(title=\"risk distribution: skewed right\")\n",
+ "\n",
+ "(uniform_chart | normal_chart | bimodal_chart | skewedleft_chart | skewedright_chart) \\\n",
+ ".properties(title=alt.TitleParams(\"Population category by run over initial risk distributions\", anchor=\"middle\")).resolve_scale(y='shared')\n"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.9.13"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/simulatingrisk/hawkdove/server.py b/simulatingrisk/hawkdove/server.py
index e160462..90ddd7a 100644
--- a/simulatingrisk/hawkdove/server.py
+++ b/simulatingrisk/hawkdove/server.py
@@ -28,6 +28,9 @@ def agent_portrayal(agent):
"size": 25,
# "color": "tab:gray",
}
+ # specific to multiple risk attitude variant
+ if hasattr(agent, "risk_level_changed"):
+ portrayal["risk_level_changed"] = agent.risk_level_changed
# color based on risk level; risk levels are always 0-9
colors = divergent_colors_10
@@ -154,18 +157,32 @@ def draw_hawkdove_agent_space(model, agent_portrayal):
.scale(domain=hawkdove_domain, range=["orange", "blue"])
)
- chart = (
+ # optionally display information from multi-risk attitude variant
+ if "risk_level_changed" in df.columns:
+ outer_color = alt.Color(
+ "risk_level_changed", title="adjusted risk level"
+ ).scale(
+ domain=[False, True],
+ range=["transparent", "black"],
+ )
+ else:
+ outer_color = chart_color
+
+ agent_chart = (
alt.Chart(df)
- .mark_point(filled=True)
+ .mark_point() # filled=True)
.encode(
x=alt.X("x", axis=None), # no x-axis label
y=alt.Y("y", axis=None), # no y-axis label
size=alt.Size("size", title="points rank"), # relabel size for legend
- color=chart_color,
+ # when fill and color differ, color acts as an outline
+ fill=chart_color,
+ color=outer_color,
shape=alt.Shape( # use shape to indicate choice
"choice", scale=alt.Scale(domain=hawkdove_domain, range=shape_range)
),
)
.configure_view(strokeOpacity=0) # hide grid/chart lines
)
- return solara.FigureAltair(chart)
+
+ return solara.FigureAltair(agent_chart)
diff --git a/simulatingrisk/hawkdovemulti/README.md b/simulatingrisk/hawkdovemulti/README.md
index 031720f..a6bac8f 100644
--- a/simulatingrisk/hawkdovemulti/README.md
+++ b/simulatingrisk/hawkdovemulti/README.md
@@ -20,6 +20,20 @@ Like the base hawk/dove risk attitude game, there is also a
configuration to add some chance of agents playing hawk/dove randomly
instead of choosing based on the rules of the game.
+## Convergence
+
+The model is configured to stop automatically when it has stabilized.
+Convergence is reached when an adjustment round occurs and zero agents
+adjust their risk attitude.
+
+If adjustment is not enabled, convergence logic falls back to the
+implementation of the hawk/dove single-risk attitude simulation, which is
+based on a stable rolling % average of agents playing hawk.
+
+Model and agent data collection also includes reports on whether agents
+updated their risk level in the last adjustment round, and model data collection
+includes a status of "running" or "converged".
+
## Batch running
This module includes a custom batch run script to run the simulation and
diff --git a/simulatingrisk/hawkdovemulti/analysis_utils.py b/simulatingrisk/hawkdovemulti/analysis_utils.py
new file mode 100644
index 0000000..b9a8caf
--- /dev/null
+++ b/simulatingrisk/hawkdovemulti/analysis_utils.py
@@ -0,0 +1,52 @@
+"""
+utility methods for analyzing data collected generated by this model
+"""
+import altair as alt
+import polars as pl
+
+from simulatingrisk.hawkdovemulti.model import RiskState
+
+
+def groupby_population_risk_category(df):
+ """takes a polars dataframe populated with model data generated
+ by hawk/dove multi model, groups by population risk category and
+ adds group labels."""
+ # currently written for polars dataframe
+
+ # group on risk category to get totals for the number of runs that
+ # ended up in each different type
+ poprisk_grouped = df.group_by("population_risk_category").count()
+ poprisk_grouped = poprisk_grouped.rename(
+ {"population_risk_category": "risk_category"}
+ )
+ poprisk_grouped = poprisk_grouped.sort("risk_category")
+
+ # add column with readable group labels for the numeric categories
+ poprisk_grouped = poprisk_grouped.with_columns(
+ pl.Series(
+ name="type",
+ values=poprisk_grouped["risk_category"].map_elements(RiskState.category),
+ )
+ )
+ return poprisk_grouped
+
+
+def graph_population_risk_category(poprisk_grouped):
+ """given a dataframe grouped by :meth:`groupby_population_risk_category`,
+ generate an altair chart graphing the number of runs in each type,
+ grouped and labeled by the larger categories."""
+ return (
+ alt.Chart(poprisk_grouped)
+ .mark_bar(width=15)
+ .encode(
+ x=alt.X(
+ "risk_category",
+ title="risk category",
+ axis=alt.Axis(tickCount=13), # 13 categories
+ scale=alt.Scale(domain=[1, 13]),
+ ),
+ y=alt.Y("count", title="Number of runs"),
+ color=alt.Color("type", title="type"),
+ )
+ .properties(title="Distribution of runs by final population risk category")
+ )
diff --git a/simulatingrisk/hawkdovemulti/app.py b/simulatingrisk/hawkdovemulti/app.py
index 0a51f29..aa8273f 100644
--- a/simulatingrisk/hawkdovemulti/app.py
+++ b/simulatingrisk/hawkdovemulti/app.py
@@ -12,7 +12,6 @@
neighborhood_sizes,
)
from simulatingrisk.hawkdove.model import divergent_colors_10
-from simulatingrisk.hawkdove.app import plot_hawks
# start with common hawk/dove params, then add params for variable risk
jupyterviz_params_var = common_jupyterviz_params.copy()
@@ -93,12 +92,42 @@ def plot_agents_by_risk(model):
# distracting from the main point of this chart, which is quantitative
# color=alt.Color("risk_level:N").scale(**color_scale_opts),
)
+ .properties(title="Number of agents in each risk level")
)
return solara.FigureAltair(bar_chart)
+def plot_agents_risklevel_changed(model):
+ """plot the number of agents who updated their risk attitude on
+ the last adjustment round"""
+ model_df = model.datacollector.get_model_vars_dataframe().reset_index()
+ if model_df.empty:
+ return
+ # model_df = model_df[model_df.index % model.adjust_round_n == 0]
+ model_df = model_df[:: model.adjust_round_n]
+ if model_df.empty:
+ return
+
+ line_chart = (
+ alt.Chart(model_df)
+ .mark_line()
+ .encode(
+ y=alt.Y(
+ "num_agents_risk_changed",
+ title="# agents who updated risk level",
+ # axis=alt.Axis(tickCount=model.max_risk_level + 1),
+ scale=alt.Scale(domain=[0, model.num_agents]),
+ ),
+ x=alt.X("index"),
+ )
+ .properties(title="Number of agents with adjusted risk level")
+ )
+
+ return solara.FigureAltair(line_chart)
+
+
def plot_hawks_by_risk(model):
- """plot rolling mean of percent of agents in each risk attitude
+ """plot rolling mean of percent of agents in each risk level
who chose hawk over last several rounds"""
# in the first round, mesa returns a dataframe full of NAs; ignore that
@@ -146,14 +175,46 @@ def plot_hawks_by_risk(model):
),
color=alt.Color("risk_level:N").scale(**color_scale_opts),
)
+ .properties(title="Rolling average percent hawk by risk level")
)
return solara.FigureAltair(chart)
+def plot_wealth_by_risklevel(model):
+ """plot wealth distribution for each risk level"""
+ agent_df = model.datacollector.get_agent_vars_dataframe().reset_index().dropna()
+ if agent_df.empty:
+ return
+
+ last_step = agent_df.Step.max()
+ # plot current status / last round
+ last_round = agent_df[agent_df.Step == last_step]
+
+ wealth_chart = (
+ alt.Chart(last_round)
+ .mark_boxplot(extent="min-max")
+ .encode(
+ alt.X(
+ "risk_level",
+ scale=alt.Scale(domain=[model.min_risk_level, model.max_risk_level]),
+ ),
+ alt.Y("points").scale(zero=False),
+ )
+ .properties(title="Cumulative wealth by risk level")
+ )
+ return solara.FigureAltair(wealth_chart)
+
+
page = JupyterViz(
HawkDoveMultipleRiskModel,
jupyterviz_params_var,
- measures=[plot_hawks, plot_agents_by_risk, plot_hawks_by_risk],
+ measures=[
+ plot_agents_by_risk,
+ plot_hawks_by_risk,
+ plot_wealth_by_risklevel,
+ plot_agents_risklevel_changed,
+ # plot_hawks,
+ ],
name="Hawk/Dove game with multiple risk attitudes",
agent_portrayal=agent_portrayal,
space_drawer=draw_hawkdove_agent_space,
diff --git a/simulatingrisk/hawkdovemulti/batch_run.py b/simulatingrisk/hawkdovemulti/batch_run.py
index 73e24ea..40c1b8e 100755
--- a/simulatingrisk/hawkdovemulti/batch_run.py
+++ b/simulatingrisk/hawkdovemulti/batch_run.py
@@ -15,18 +15,46 @@
neighborhood_sizes = list(HawkDoveMultipleRiskModel.neighborhood_sizes)
+# NOTE: it's better to be explicit about even parameters
+# instead of relying on model defaults, because
+# parameters specified here are included in data exports
+
+
# combination of parameters we want to run
params = {
- "grid_size": [10, 25, 50], # 100],
- "risk_adjustment": ["adopt", "average"],
- "play_neighborhood": neighborhood_sizes,
- "observed_neighborhood": neighborhood_sizes,
- "adjust_neighborhood": neighborhood_sizes,
- "hawk_odds": [0.5, 0.25, 0.75],
- "adjust_every": [2, 10, 20],
- "risk_distribution": HawkDoveMultipleRiskModel.risk_distribution_options,
- "adjust_payoff": HawkDoveMultipleRiskModel.supported_adjust_payoffs,
- # random?
+ "default": {
+ "grid_size": [10, 25, 50], # 100],
+ "risk_adjustment": ["adopt", "average"],
+ "play_neighborhood": neighborhood_sizes,
+ "observed_neighborhood": neighborhood_sizes,
+ "adjust_neighborhood": neighborhood_sizes,
+ "hawk_odds": [0.5, 0.25, 0.75],
+ "adjust_every": [2, 10, 20],
+ "risk_distribution": HawkDoveMultipleRiskModel.risk_distribution_options,
+ "adjust_payoff": HawkDoveMultipleRiskModel.supported_adjust_payoffs,
+ # random?
+ },
+ # specific scenarios to allow paired statistical tests
+ "risk_adjust": {
+ # ary risk adjustment
+ "risk_adjustment": ["adopt", "average"],
+ "risk_distribution": "uniform",
+ # use model defaults; grid size must be specified
+ "grid_size": 10, # 25,
+ },
+ "payoff": {
+ "adjust_payoff": HawkDoveMultipleRiskModel.supported_adjust_payoffs,
+ "risk_distribution": "uniform",
+ # use model defaults; grid size must be specified
+ "grid_size": 25,
+ },
+ "distribution": {
+ "risk_distribution": HawkDoveMultipleRiskModel.risk_distribution_options,
+ # adopt tends to converge faster; LB also says it's more interesting & simpler
+ "risk_adjustment": "adopt",
+ # use model defaults; grid size must be specified
+ "grid_size": 10,
+ },
}
@@ -37,7 +65,14 @@ def run_hawkdovemulti_model(args):
model = HawkDoveMultipleRiskModel(**params)
while model.running and model.schedule.steps <= max_steps:
- model.step()
+ try:
+ model.step()
+ # by default, signals propagate to all processes
+ # take advantage of that to exit and save results
+ except KeyboardInterrupt:
+ # if we get a ctrl-c / keyboard interrupt, stop looping
+ # and finish data collection to report on whatever was completed
+ break
# collect data for the last step
# (scheduler is 1-based index but data collection is 0-based)
@@ -72,8 +107,11 @@ def batch_run(
collect_agent_data,
file_prefix,
max_runs,
+ param_choice,
):
- param_combinations = _make_model_kwargs(params)
+ run_params = params.get(param_choice)
+
+ param_combinations = _make_model_kwargs(run_params)
total_param_combinations = len(param_combinations)
total_runs = total_param_combinations * iterations
print(
@@ -169,7 +207,7 @@ def main():
"--max-steps",
help="Maximum steps to run simulations if they have not already "
+ "converged (default: %(default)s)",
- default=125, # typically converges quickly, around step 60 without randomness
+ default=1000, # new convergence logic seems to converge around 400
type=int,
)
parser.add_argument(
@@ -203,7 +241,12 @@ def main():
type=int,
default=None,
)
- # may want to add an option to configure output dir
+ parser.add_argument(
+ "--params",
+ help="Run a specific set of parameters",
+ choices=params.keys(),
+ default="default",
+ )
args = parser.parse_args()
batch_run(
@@ -215,6 +258,7 @@ def main():
args.agent_data,
args.file_prefix,
args.max_runs,
+ args.params,
)
diff --git a/simulatingrisk/hawkdovemulti/model.py b/simulatingrisk/hawkdovemulti/model.py
index ad64274..9d0d210 100644
--- a/simulatingrisk/hawkdovemulti/model.py
+++ b/simulatingrisk/hawkdovemulti/model.py
@@ -17,6 +17,9 @@ class HawkDoveMultipleRiskAgent(HawkDoveAgent):
#: points since last adjustment round; starts at 0
recent_points = 0
+ #: whether or not risk level changed on the last adjustment round
+ risk_level_changed = False
+
def set_risk_level(self):
# get risk attitude from model based on configured distribution
self.risk_level = self.model.get_risk_attitude()
@@ -72,6 +75,7 @@ def adjust_risk(self):
# either adopt their risk attitude or average theirs with yours
best = self.most_successful_neighbor
+
# if most successful neighbor has more points and a different
# risk attitude, adjust
if (
@@ -89,6 +93,12 @@ def adjust_risk(self):
statistics.mean([self.risk_level, best.risk_level])
)
+ # track that risk attitude has been updated
+ self.risk_level_changed = True
+ else:
+ # track that risk attitude was not changed
+ self.risk_level_changed = False
+
class RiskState(IntEnum):
"""Categorization of population risk states"""
@@ -167,8 +177,8 @@ class HawkDoveMultipleRiskModel(HawkDoveModel):
def __init__(
self,
grid_size,
- risk_adjustment=None,
- risk_distribution="normal",
+ risk_adjustment="adopt",
+ risk_distribution="uniform",
adjust_every=10,
adjust_neighborhood=None,
adjust_payoff="recent",
@@ -287,12 +297,16 @@ def get_data_collector_options(self):
# in addition to common hawk/dove data points,
# we want to include population risk category
opts = super().get_data_collector_options()
- model_reporters = {"population_risk_category": "population_risk_category"}
+ model_reporters = {
+ "population_risk_category": "population_risk_category",
+ "num_agents_risk_changed": "num_agents_risk_changed",
+ }
for risk_level in range(self.min_risk_level, self.max_risk_level + 1):
field = f"total_r{risk_level}"
model_reporters[field] = field
opts["model_reporters"].update(model_reporters)
+ opts["agent_reporters"].update({"risk_level_changed": "risk_level_changed"})
return opts
def step(self):
@@ -305,6 +319,26 @@ def step(self):
pass
super().step()
+ @property
+ def num_agents_risk_changed(self):
+ return len([a for a in self.schedule.agents if a.risk_level_changed])
+
+ @property
+ def converged(self):
+ # check if the simulation is stable and should stop running
+ # based on the number of agents changing their risk level
+
+ # checking whether agents risk level changed only works
+ # when adjustmend is enabled; if it is not, fallback
+ # do base model logic, which is based on rolling avg % hawk
+ if not self.risk_adjustment:
+ return super().converged
+
+ return (
+ self.schedule.steps > max(self.adjust_round_n, 50)
+ and self.num_agents_risk_changed == 0
+ )
+
@cached_property
def total_per_risk_level(self):
# tally the number of agents for each risk level
diff --git a/tests/test_hawkdovemulti.py b/tests/test_hawkdovemulti.py
index 45e9814..9dd756e 100644
--- a/tests/test_hawkdovemulti.py
+++ b/tests/test_hawkdovemulti.py
@@ -14,12 +14,12 @@
def test_init():
model = HawkDoveMultipleRiskModel(5)
# defaults
- assert model.risk_adjustment is None
+ assert model.risk_adjustment == "adopt"
assert model.hawk_odds == 0.5
assert model.play_neighborhood == 8
assert model.adjust_neighborhood == 8
- # unused but should be set to default
assert model.adjust_round_n == 10
+ assert model.risk_distribution == "uniform"
# init with risk adjustment
model = HawkDoveMultipleRiskModel(
@@ -348,6 +348,8 @@ def test_adjust_risk_adopt_recent():
agent.adjust_risk()
# default behavior is to adopt successful risk level
assert agent.risk_level == neighbor.risk_level
+ # agent should track that risk attitude was updated
+ assert agent.risk_level_changed
# now simulate a wealthiest neighbor with fewer points than current agent
neighbor.recent_points = 12
@@ -357,6 +359,8 @@ def test_adjust_risk_adopt_recent():
agent.adjust_risk()
# risk level should not be changed
assert agent.risk_level == prev_risk_level
+ # agent should track that risk attitude was not changed
+ assert not agent.risk_level_changed
def test_adjust_risk_average():