Skip to content

Commit

Permalink
Update fewshot.py
Browse files Browse the repository at this point in the history
  • Loading branch information
shenqq377 authored Oct 16, 2022
1 parent 9be4301 commit 6a16c72
Showing 1 changed file with 5 additions and 5 deletions.
10 changes: 5 additions & 5 deletions models/fewshot.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def __init__(self, pretrained_weights="deeplabv3", alpha=0.9):
self.criterion = nn.NLLLoss()
self.alpha = torch.Tensor([alpha, 1-alpha])

def forward(self, supp_imgs, supp_mask, qry_imgs, train=False, n_iters=0, lr=0.01):
def forward(self, supp_imgs, supp_mask, qry_imgs, train=False, n_iters=0):
"""
Args:
supp_imgs: support images
Expand Down Expand Up @@ -92,7 +92,7 @@ def forward(self, supp_imgs, supp_mask, qry_imgs, train=False, n_iters=0, lr=0.0
if (not train) and n_iters > 0: # iteratively update prototypes
for n in range(len(qry_fts)):
fg_prototypes_.append(
self.updatePrototype(qry_fts[n], fg_prototypes[n], qry_pred[n], n_iters, lr, epi))
self.updatePrototype(qry_fts[n], fg_prototypes[n], qry_pred[n], n_iters, epi))

qry_pred = [torch.stack(
[self.getPred(qry_fts[n][epi], fg_prototypes_[n][way], self.thresh_pred[way]) for way in
Expand All @@ -118,12 +118,12 @@ def forward(self, supp_imgs, supp_mask, qry_imgs, train=False, n_iters=0, lr=0.0

return output, align_loss / supp_bs

def updatePrototype(self, fts, prototype, pred, update_iters, lr, epi):
def updatePrototype(self, fts, prototype, pred, update_iters, epi):

prototype_0 = torch.stack(prototype, dim=0)
prototype_ = Parameter(torch.stack(prototype, dim=0))

optimizer = torch.optim.Adam([prototype_], lr=lr)
optimizer = torch.optim.Adam([prototype_], lr=0.01)

while update_iters > 0:
with torch.enable_grad():
Expand All @@ -139,7 +139,7 @@ def updatePrototype(self, fts, prototype, pred, update_iters, lr, epi):
fts_norm = torch.sigmoid((fts[epi] - fts[epi].min()) / (fts[epi].max() - fts[epi].min()))
new_fts_norm = torch.sigmoid((new_fts - new_fts.min()) / (new_fts.max() - new_fts.min()))
bce_loss = nn.BCELoss()
loss = bce_loss(fts_norm, new_fts_norm) # + beta * mse_loss(prototype_, prototype_0)
loss = bce_loss(fts_norm, new_fts_norm)

optimizer.zero_grad()
# loss.requires_grad_()
Expand Down

0 comments on commit 6a16c72

Please sign in to comment.