Skip to content

Commit

Permalink
fix baseline errors
Browse files Browse the repository at this point in the history
  • Loading branch information
jhljx committed Sep 7, 2020
1 parent 20f2417 commit 8a27811
Show file tree
Hide file tree
Showing 5 changed files with 11 additions and 11 deletions.
2 changes: 2 additions & 0 deletions baseline/dynAE.py
Original file line number Diff line number Diff line change
Expand Up @@ -386,6 +386,8 @@ def dyngem_embedding(method, args):
start_idx = max_time_num + start_idx
if end_idx < 0: # original time range is [start_idx, end_idx] containing start_idx and end_idx
end_idx = max_time_num + end_idx + 1
else:
end_idx = end_idx + 1

if method == 'DynGEM':
assert duration == 1
Expand Down
9 changes: 1 addition & 8 deletions baseline/egcn.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,16 +18,14 @@ class EvolveGCN(torch.nn.Module):
input_dim: int
hidden_dim: int
output_dim: int
duration: int
method_name: str
egcn_type: str

def __init__(self, input_dim, hidden_dim, output_dim, duration, egcn_type='EGCNH'):
def __init__(self, input_dim, hidden_dim, output_dim, egcn_type='EGCNH'):
super().__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.duration = duration
self.method_name = 'EvolveGCN'
self.egcn_type = egcn_type

Expand All @@ -43,8 +41,6 @@ def forward(self, Nodes_list, A_list, nodes_mask_list=None):
Nodes_list = unit(A_list, Nodes_list, nodes_mask_list)
else: # 'EGCNH'
Nodes_list = unit(A_list, Nodes_list, nodes_mask_list)
# output = Nodes_list[-1]
# output = self.norm(output)
return Nodes_list


Expand All @@ -55,7 +51,6 @@ def __init__(self, input_dim, output_dim, egcn_type='EGCNH'):
self.egcn_type = egcn_type
self.GCN_init_weights = Parameter(torch.FloatTensor(input_dim, output_dim))
self.reset_param(self.GCN_init_weights)
self.norm = nn.LayerNorm(output_dim)
assert self.egcn_type in ['EGCNO', 'EGCNH']

def reset_param(self, t):
Expand All @@ -77,8 +72,6 @@ def forward(self, A_list, node_embs_list, mask_list=None):
else:
GCN_weights = self.evolve_weights(GCN_weights, node_embs)
node_embs = F.rrelu(Ahat.matmul(node_embs.matmul(GCN_weights)))
node_embs = self.norm(node_embs)
# node_embs = F.normalize(node_embs, p=2, dim=1)
# node_embs = torch.sigmoid(Ahat.matmul(node_embs.matmul(GCN_weights)))
out_seq.append(node_embs)
return out_seq
Expand Down
7 changes: 5 additions & 2 deletions baseline/gcrn.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
# coding: utf-8
import torch
import torch.nn as nn
from baseline.gcn import TgGCN
import torch.nn.functional as F
from baseline.gcn import GCN, TgGCN

# Graph Convolutional Recurrent Network = Graph Convolutional Network + Gated Recurrent Unit
# This model is similar to the model proposed in paper 'Structured Sequence Modeling with Graph Convolutional Recurrent Networks'.
Expand Down Expand Up @@ -39,7 +40,8 @@ def __init__(self, input_dim, feature_dim, hidden_dim, output_dim, feature_pre=T

self.gcn_list = nn.ModuleList()
for i in range(self.duration):
self.gcn_list.append(TgGCN(input_dim, feature_dim, hidden_dim, output_dim, feature_pre=feature_pre, layer_num=layer_num, dropout=dropout, bias=bias))
# self.gcn_list.append(TgGCN(input_dim, feature_dim, hidden_dim, output_dim, feature_pre=feature_pre, layer_num=layer_num, dropout=dropout, bias=bias))
self.gcn_list.append(GCN(input_dim, hidden_dim, output_dim, dropout=dropout, bias=bias))
assert self.rnn_type in ['LSTM', 'GRU']
if self.rnn_type == 'LSTM':
self.rnn = nn.LSTM(output_dim, output_dim, num_layers=1, bias=bias, batch_first=True)
Expand All @@ -52,6 +54,7 @@ def forward(self, x_list, edge_list):
hx_list = []
for i in range(time_num):
x = self.gcn_list[i](x_list[i], edge_list[i])
x = F.normalize(x, p=2)
hx_list.append(x)
hx = torch.stack(hx_list, dim=0).transpose(0, 1)
out, _ = self.rnn(hx)
Expand Down
4 changes: 3 additions & 1 deletion baseline/gin.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,9 +101,11 @@ def __init__(self, input_dim, hidden_dim, output_dim, layer_num, mlp_layer_num,
# List of batchnorms applied to the output of MLP (input of the final prediction linear layer)
self.batch_norms = torch.nn.ModuleList()

for layer in range(self.layer_num):
for layer in range(self.layer_num - 1):
self.mlps.append(MLP(hidden_dim, hidden_dim, hidden_dim, mlp_layer_num, bias=bias))
self.batch_norms.append(nn.BatchNorm1d(hidden_dim))
self.mlps.append(MLP(hidden_dim, hidden_dim, output_dim, mlp_layer_num, bias=bias))
self.batch_norms.append(nn.BatchNorm1d(output_dim))

def __preprocess_neighbors_maxpool(self, adj):
# create padded_neighbor_list in a graph
Expand Down
File renamed without changes.

0 comments on commit 8a27811

Please sign in to comment.