Skip to content

Commit

Permalink
loss_tgt can't converage. need fix.
Browse files Browse the repository at this point in the history
  • Loading branch information
corenel committed Aug 19, 2017
1 parent 6bb2908 commit 332e662
Show file tree
Hide file tree
Showing 2 changed files with 23 additions and 28 deletions.
43 changes: 19 additions & 24 deletions core/adapt.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,50 +44,45 @@ def train_tgt(model_src, model_tgt, model_critic,

feat_src, _ = model_src(images_src)
feat_tgt, _ = model_tgt(images_tgt)
feat_concat = torch.cat((feat_src, feat_tgt), 0)

labels_src = make_variable(
torch.LongTensor(feat_src.size()[0]).fill_(1))
labels_tgt = make_variable(
torch.LongTensor(feat_tgt.size()[0]).fill_(0))
label_concat = torch.cat((
make_variable(torch.ones(feat_concat.size(0) // 2).long()),
make_variable(torch.zeros(feat_concat.size(0) // 2).long())
), 0)

pred_src = model_critic(feat_src)
loss_src = criterion(pred_src, labels_src)
loss_src.backward()

pred_tgt = model_critic(feat_tgt)
loss_tgt = criterion(pred_tgt, labels_tgt)
loss_tgt.backward()

loss_critic = loss_src + loss_tgt
pred_concat = model_critic(feat_concat)
loss_critic = criterion(pred_concat, label_concat)
loss_critic.backward(retain_graph=True)

optimizer_critic.step()

pred_cls = torch.squeeze(pred_concat.max(1)[1])
acc = (pred_cls == label_concat).float().mean()

# train target encoder
optimizer_tgt.zero_grad()
optimizer_critic.zero_grad()

feat_tgt, _ = model_tgt(images_tgt)
labels_src = make_variable(
torch.LongTensor(feat_tgt.size()[0]).fill_(1))

pred_tgt = model_critic(feat_tgt)
loss_gen = criterion(pred_tgt, labels_src)
loss_gen.backward()
loss_tgt = criterion(
feat_concat[feat_concat.size(0) // 2:, ...],
make_variable(torch.ones(feat_concat.size(0) // 2).long())
)
loss_tgt.backward()

optimizer_tgt.step()

# print step info
if ((step + 1) % params.log_step == 0):
print("Epoch [{}/{}] Step [{}/{}]:"
"d_loss={} g_loss={} D(src)={} D(tgt)={}"
"d_loss={:.3f} g_loss={:.3f} acc={:.3f}"
.format(epoch + 1,
params.num_epochs,
step + 1,
len_data_loader,
loss_critic.data[0],
loss_gen.data[0],
loss_src.data[0],
loss_tgt.data[0]))
loss_tgt.data[0],
acc.data[0]))

# save model parameters
if ((epoch + 1) % params.save_step == 0):
Expand Down
8 changes: 4 additions & 4 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,14 +32,14 @@
restore=params.d_model_restore)

# train and eval source model
# if not (model_src.restored and params.src_model_trained):
# model_src = train_src(model_src, src_data_loader)
# eval_src(model_src, src_data_loader_eval)
if not (model_src.restored and params.src_model_trained):
model_src = train_src(model_src, src_data_loader)
eval_src(model_src, src_data_loader_eval)

# train target encoder by GAN
# if not (model_tgt.restored and params.tgt_model_trained):
model_tgt = train_tgt(model_src, model_tgt, model_critic,
src_data_loader, tgt_data_loader)

# eval target encoder on test set of target dataset
# eval_tgt(model_src, model_tgt, tgt_data_loader_eval)
eval_tgt(model_src, model_tgt, tgt_data_loader_eval)

0 comments on commit 332e662

Please sign in to comment.