Skip to content

Commit

Permalink
latent dim in layers, some minor changes
Browse files Browse the repository at this point in the history
  • Loading branch information
Günther Eder committed Jan 12, 2021
1 parent 3652bb0 commit 1d41ee1
Show file tree
Hide file tree
Showing 4 changed files with 10 additions and 15 deletions.
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
.ipy*
__py*
9 changes: 2 additions & 7 deletions DCN.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ def __init__(self, args):
self.args = args
self.beta = args.beta # coefficient of the clustering term
self.lamda = args.lamda # coefficient of the reconstruction term
self.device = torch.device('cuda' if args.cuda else 'cpu')
self.device = torch.device('cuda:2' if args.cuda else 'cpu')

# Validation check
if not self.beta > 0:
Expand All @@ -24,11 +24,6 @@ def __init__(self, args):
msg = 'lambda should be greater than 0 but got value = {}.'
raise ValueError(msg.format(self.lamda))

if not self.args.n_clusters == self.args.n_classes:
msg = '`args.n_clusters = {} should equal `args.n_classes = {}`.'
raise ValueError(msg.format(self.args.n_clusters,
self.args.n_classes))

if len(self.args.hidden_dims) == 0:
raise ValueError('No hidden layer specified.')

Expand Down Expand Up @@ -145,4 +140,4 @@ def fit(self, epoch, train_loader, verbose=True):
print(msg.format(epoch, batch_idx,
loss.detach().cpu().numpy(),
rec_loss, dist_loss))


6 changes: 3 additions & 3 deletions autoencoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,11 +13,11 @@ def __init__(self, args):
self.dims_list = (args.hidden_dims +
args.hidden_dims[:-1][::-1]) # mirrored structure
self.n_layers = len(self.dims_list)
self.n_classes = args.n_classes
self.n_clusters = args.n_clusters

# Validation check
assert self.n_layers % 2 > 0
assert self.dims_list[self.n_layers // 2] == self.n_classes
assert self.dims_list[self.n_layers // 2] == args.latent_dim

# Encoder Network
layers = OrderedDict()
Expand Down Expand Up @@ -60,7 +60,7 @@ def __repr__(self):
repr_str += '{}-'.format(dim)
repr_str += str(self.output_dim) + '\n'
repr_str += '[n_layers]: {}'.format(self.n_layers) + '\n'
repr_str += '[n_classes]: {}'.format(self.n_classes) + '\n'
repr_str += '[n_clusters]: {}'.format(self.n_clusters) + '\n'
repr_str += '[input_dims]: {}'.format(self.input_dim)
return repr_str

Expand Down
8 changes: 3 additions & 5 deletions mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,8 +53,6 @@ def solver(args, model, train_loader, test_loader):
help='dataset directory')
parser.add_argument('--input-dim', type=int, default=28*28,
help='input dimension')
parser.add_argument('--n-classes', type=int, default=10,
help='output dimension')

# Training parameters
parser.add_argument('--lr', type=float, default=1e-4,
Expand All @@ -74,9 +72,9 @@ def solver(args, model, train_loader, test_loader):
parser.add_argument('--beta', type=float, default=1,
help='coefficient of the regularization term on ' \
'clustering')
parser.add_argument('--hidden-dims', default=[500, 500, 2000, 10],
parser.add_argument('--hidden-dims', default=[500, 500, 2000, 3],
help='learning rate (default: 1e-4)')
parser.add_argument('--latent_dim', type=int, default=10,
parser.add_argument('--latent_dim', type=int, default=3,
help='latent space dimension')
parser.add_argument('--n-clusters', type=int, default=10,
help='number of clusters in the latent space')
Expand All @@ -97,7 +95,7 @@ def solver(args, model, train_loader, test_loader):
transforms.Normalize((0.1307,),
(0.3081,))])
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(args.dir, train=True, download=False,
datasets.MNIST(args.dir, train=True, download=True,
transform=transformer),
batch_size=args.batch_size, shuffle=False)

Expand Down

0 comments on commit 1d41ee1

Please sign in to comment.