Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Plot control #42

Merged
merged 16 commits into from
Oct 24, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 25 additions & 16 deletions config/base/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,15 +8,30 @@ defaults:
- optimizer: ???
- override hydra/job_logging: custom # Override logging config

# Defaults for the various subconfigs. Can be overriden from the commandline
# with e.g. experiment/brain=new_brain, where new_brain.yaml lives in the brain
# subdirectory
### Core Parameters ###

# We run scan by default
command: scan

# Default run name format, using current date and time
run_name: 'run_${now:%Y-%m-%d-%H-%M-%S}'

# General simulation settings
system:
device: cuda # We use cuda by default

logging:
use_wandb: False # Whether to use Weights & Biases for logging
checkpoint_step: 5 # Save checkpoints every 5 steps
max_checkpoints: 5 # Maximum number of checkpoints to keep
channel_analysis: False # Whether to do in depth channel analysis
plot_sample_size: 1000
wandb_preempt: False # Whether to enable Weights & Biases preemption

### "Immutable" Parameters ###

# These parameters are not intendend to be changed by the user.

# Hydra configuration
hydra:
searchpath:
Expand All @@ -33,22 +48,16 @@ hydra:
- command
sweep:
dir: experiments/multirun # Directory for sweep runs
subdir: ${hydra:runtime.choices.experiment}/sweep/${oc.env:WANDB_SWEEP_ID,local}${run_name} # Individual sweep run subdirectory
subdir: ${hydra:runtime.choices.experiment}/sweep/${oc.env:WANDB_SWEEP_ID,local}/${run_name} # Individual sweep run subdirectory

# System configuration
system:
device: cuda # We use cuda by default
# Path configuration
path:
run_dir: ${hydra:runtime.output_dir} # Access run directory at runtime
data_dir: data # Root directory in run_dir where all run data is saved
checkpoint_step: 5 # Save checkpoints every 5 steps
max_checkpoints: 5 # Maximum number of checkpoints to keep
checkpoint_dir: ${system.data_dir}/checkpoints # Directory for checkpoints
plot_dir: ${system.data_dir}/plots # Directory for plots
checkpoint_plot_dir: ${system.plot_dir}/checkpoints # Directory for checkpoint plots
wandb_preempt: False # Whether to enable Weights & Biases preemption

# Whether to use Weights & Biases for logging
use_wandb: False
checkpoint_dir: ${path.data_dir}/checkpoints # Directory for checkpoints
plot_dir: ${path.data_dir}/plots # Directory for plots
checkpoint_plot_dir: ${path.plot_dir}/checkpoints # Directory for checkpoint plots
wandb_dir: ${path.run_dir}/wandb

# Sweep command setup
sweep:
Expand Down
3 changes: 1 addition & 2 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,8 +56,7 @@ def _program(cfg: DictConfig):
warnings.warn("No optimizer config specified, is that wanted?")

if cfg.command == "scan":
brain.scan()
brain.scan_circuits()
print(brain.scan())
sys.exit(0)

framework: TrainingFramework
Expand Down
92 changes: 0 additions & 92 deletions resources/config_templates/user/brain/deep-autoencoder.yaml

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -2,15 +2,19 @@
defaults:
- _self_
- override /dataset: cifar10
- override /brain: shallow-autoencoder
- override /brain: classifying-autoencoder
- override /optimizer: class-recon


# This is the main entry point for control of a retinal-rl experiment. Variables
# created here will be top-level, and defaults can be set for the various parts
# of an experiment (NB: do not add comments above the defaults list or it will
# break the config system.)

framework: classification

### Interpolation Parameters ###

# This is a free list of parameters that can be interpolated by the subconfigs
# in sweep, dataset, brain, and optimizer. A major use for this is interpolating
# values in the subconfigs, and then looping over them in a sweep.
Expand Down
2 changes: 1 addition & 1 deletion resources/config_templates/user/optimizer/class-recon.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ num_epochs: 100

# The optimizer to use
optimizer: # torch.optim Class and parameters
_target_: torch.optim.Adam
_target_: torch.optim.AdamW
lr: 0.0003
weight_decay: ${weight_decay}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,9 @@ method: grid
project: retinal-rl

parameters:
use_wandb:
simulation.use_wandb:
value: True
simulation.channel_analysis:
value: True
recon_weight_retina:
values: [0,0.9,0.999,1]
Expand Down
29 changes: 0 additions & 29 deletions resources/config_templates/user/sweep/shallow-transform-sweep.yaml

This file was deleted.

58 changes: 42 additions & 16 deletions retinal_rl/analysis/plot.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,19 +2,18 @@

from typing import Dict, List, Tuple

import matplotlib.gridspec as gridspec
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import numpy.fft as fft
import seaborn as sns
import torch
from matplotlib import gridspec, patches
from matplotlib.axes import Axes
from matplotlib.figure import Figure
from matplotlib.lines import Line2D
from matplotlib.patches import Circle, Wedge
from matplotlib.ticker import MaxNLocator
from numpy import fft
from torch import Tensor
from torchvision.utils import make_grid

Expand Down Expand Up @@ -74,7 +73,9 @@ def plot_transforms(
# Display the grid
ax.imshow(grid.permute(1, 2, 0))
ax.set_title(f"Source Transform: {transform_name}")
ax.set_xticks([(i + 0.5) * grid.shape[2] / len(steps) for i in range(len(steps))])
ax.set_xticks(
[(i + 0.5) * grid.shape[2] / len(steps) for i in range(len(steps))]
)
ax.set_xticklabels([f"{step:.2f}" for step in steps])
ax.set_yticks([])

Expand All @@ -98,7 +99,9 @@ def plot_transforms(
# Display the grid
ax.imshow(grid.permute(1, 2, 0))
ax.set_title(f"Noise Transform: {transform_name}")
ax.set_xticks([(i + 0.5) * grid.shape[2] / len(steps) for i in range(len(steps))])
ax.set_xticks(
[(i + 0.5) * grid.shape[2] / len(steps) for i in range(len(steps))]
)
ax.set_xticklabels([f"{step:.2f}" for step in steps])
ax.set_yticks([])

Expand All @@ -114,7 +117,9 @@ def plot_brain_and_optimizers(brain: Brain, objective: Objective[ContextT]) -> F
# Compute the depth of each node
depths: Dict[str, int] = {}
for node in nx.topological_sort(graph):
depths[node] = max([depths[pred] for pred in graph.predecessors(node)] + [-1]) + 1
depths[node] = (
max([depths[pred] for pred in graph.predecessors(node)] + [-1]) + 1
)

# Create a position dictionary based on depth
pos: Dict[str, Tuple[float, float]] = {}
Expand All @@ -128,7 +133,10 @@ def plot_brain_and_optimizers(brain: Brain, objective: Objective[ContextT]) -> F
for depth, nodes in nodes_at_depth.items():
width = len(nodes)
for i, node in enumerate(nodes):
pos[node] = ((i - width / 2) / (width + 1), -(max_depth - depth) / max_depth)
pos[node] = (
(i - width / 2) / (width + 1),
-(max_depth - depth) / max_depth,
)

# Set up the plot
fig, ax = plt.subplots(figsize=(12, 10))
Expand Down Expand Up @@ -297,10 +305,10 @@ def plot_receptive_field_sizes(results: Dict[str, Dict[str, FloatArray]]) -> Fig
def plot_histories(histories: Dict[str, List[float]]) -> Figure:
"""Plot training and test losses over epochs."""
train_metrics = [
key.split("_", 1)[1] for key in histories.keys() if key.startswith("train_")
key.split("_", 1)[1] for key in histories if key.startswith("train_")
]
test_metrics = [
key.split("_", 1)[1] for key in histories.keys() if key.startswith("test_")
key.split("_", 1)[1] for key in histories if key.startswith("test_")
]

# Use the intersection of train and test metrics to ensure we have both for each metric
Expand Down Expand Up @@ -444,6 +452,8 @@ def _set_integer_ticks(ax: Axes):

# Function to plot the original and reconstructed images
def plot_reconstructions(
normalization_mean: List[float],
normalization_std: List[float],
train_sources: List[Tuple[Tensor, int]],
train_inputs: List[Tuple[Tensor, int]],
train_estimates: List[Tuple[Tensor, int]],
Expand All @@ -463,13 +473,29 @@ def plot_reconstructions(
test_input, test_class = test_inputs[i]
test_recon, test_pred = test_estimates[i]

# Unnormalize the original images
train_source = train_source.permute(1, 2, 0).numpy() * 0.5 + 0.5
train_input = train_input.permute(1, 2, 0).numpy() * 0.5 + 0.5
train_recon = train_recon.permute(1, 2, 0).numpy() * 0.5 + 0.5
test_source = test_source.permute(1, 2, 0).numpy() * 0.5 + 0.5
test_input = test_input.permute(1, 2, 0).numpy() * 0.5 + 0.5
test_recon = test_recon.permute(1, 2, 0).numpy() * 0.5 + 0.5
# Unnormalize the original images using the normalization lists
train_source = (
train_source.permute(1, 2, 0).numpy() * normalization_std
+ normalization_mean
)
train_input = (
train_input.permute(1, 2, 0).numpy() * normalization_std
+ normalization_mean
)
train_recon = (
train_recon.permute(1, 2, 0).numpy() * normalization_std
+ normalization_mean
)
test_source = (
test_source.permute(1, 2, 0).numpy() * normalization_std
+ normalization_mean
)
test_input = (
test_input.permute(1, 2, 0).numpy() * normalization_std + normalization_mean
)
test_recon = (
test_recon.permute(1, 2, 0).numpy() * normalization_std + normalization_mean
)

axes[0, i].imshow(np.clip(train_source, 0, 1))
axes[0, i].axis("off")
Expand Down
Loading