From 20b2c67e155b3ce91871c76930d88e25ffa5a194 Mon Sep 17 00:00:00 2001 From: IkemOkoh <154571143+IkemOkoh@users.noreply.github.com> Date: Wed, 5 Jun 2024 01:48:01 -0700 Subject: [PATCH] Update leakyparallel.py (Hopefully) fixed newlines and wrap. --- snntorch/_neurons/leakyparallel.py | 26 ++++++++------------------ 1 file changed, 8 insertions(+), 18 deletions(-) diff --git a/snntorch/_neurons/leakyparallel.py b/snntorch/_neurons/leakyparallel.py index 531671ee..291fc29b 100644 --- a/snntorch/_neurons/leakyparallel.py +++ b/snntorch/_neurons/leakyparallel.py @@ -29,19 +29,13 @@ class LeakyParallel(nn.Module): * Linear weights are included in addition to recurrent weights. * `beta` is clipped between [0,1] and cloned to - `weight_hh_l` only upon layer initialization. - It is unused otherwise. + `weight_hh_l` only upon layer initialization. It is unused otherwise. * There is no explicit reset mechanism. * Several functions such as `init_hidden`, `output`, - `inhibition`, and `state_quant` are unavailable - in `LeakyParallel`. - * Only the output spike is returned. Membrane potential - is not accessible by default. - * RNN uses a hidden matrix of size (num_hidden, num_hidden) - to transform the hidden state vector. This would 'leak' - the membrane potential between LIF neurons, and so the - hidden matrix is forced to a diagonal matrix by default. - This can be disabled by setting `weight_hh_enable=True`. + `inhibition`, and `state_quant` are unavailable in `LeakyParallel`. + * Only the output spike is returned. Membrane potential is not accessible by default. + * RNN uses a hidden matrix of size (num_hidden, num_hidden) + to transform the hidden state vector. This would 'leak' the membrane potential between LIF neurons, and so the hidden matrix is forced to a diagonal matrix by default. This can be disabled by setting `weight_hh_enable=True`. Example:: @@ -77,10 +71,8 @@ def forward(self, x): :param hidden_size: The number of features in the hidden state `h` :type hidden_size: int - :param beta: membrane potential decay rate. Clipped between 0 and 1 - during the forward-pass. May be a single-valued tensor (i.e., equal - decay rate for all neurons in a layer), or multi-valued (one weight per - neuron). If left unspecified, then the decay rates will be randomly initialized based on PyTorch's initialization for RNN. Defaults to None + :param beta: membrane potential decay rate. Clipped between 0 and 1 + during the forward-pass. May be a single-valued tensor (i.e., equal decay rate for all neurons in a layer), or multi-valued (one weight per neuron). If left unspecified, then the decay rates will be randomly initialized based on PyTorch's initialization for RNN. Defaults to None :type beta: float or torch.tensor, optional :param bias: If `False`, then the layer does not use bias weights `b_ih` and `b_hh`. Defaults to True @@ -112,9 +104,7 @@ def forward(self, x): :type learn_threshold: bool, optional :param weight_hh_enable: Option to set the hidden matrix to be dense or - diagonal. Diagonal (i.e., False) adheres to how a LIF neuron works. - Dense (True) would allow the membrane potential of one LIF neuron to - influence all others, and follow the RNN default implementation. Defaults to False + diagonal. Diagonal (i.e., False) adheres to how a LIF neuron works. Dense (True) would allow the membrane potential of one LIF neuron to influence all others, and follow the RNN default implementation. Defaults to False :type weight_hh_enable: bool, optional