Skip to content

Commit

Permalink
Updated defaults for inference and test scripts
Browse files Browse the repository at this point in the history
  • Loading branch information
YuvalNirkin committed Apr 16, 2020
1 parent e6da240 commit 09ab56a
Show file tree
Hide file tree
Showing 20 changed files with 156 additions and 512 deletions.
24 changes: 12 additions & 12 deletions inference/expression_reenactment_video2video.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,11 +100,11 @@ def plot_kpt(image, kpt, circle_color=(0, 0, 255), line_color=(255, 255, 255), l

def main(source_path, target_path,
arch='res_unet_split.MultiScaleResUNet(in_nc=71,out_nc=(3,3),flat_layers=(2,0,2,3),ngf=128)',
reenactment_model_path='../weights/ijbc_v2_msrunet_256_2_0_reenactment.pth',
seg_model_path='../weights/lfw_figaro_unet_256_segmentation.pth',
inpainting_model_path='../weights/ijbc_v2_msrunet_256_2_0_inpainting.pth',
blend_model_path='../weights/ijbc_v2_msrunet_256_2_0_blending.pth',
pose_model_path='../weights/hopenet_robust_alpha1.pkl',
reenactment_model_path='../weights/ijbc_msrunet_256_2_0_reenactment_v1.pth',
seg_model_path='../weights/lfw_figaro_unet_256_2_0_segmentation_v1.pth',
inpainting_model_path='../weights/ijbc_msrunet_256_2_0_inpainting_v1.pth',
blend_model_path='../weights/ijbc_msrunet_256_2_0_blending_v1.pth',
pose_model_path='../weights/hopenet_robust_alpha1.pth',
pil_transforms1=None, pil_transforms2=None,
tensor_transforms1=('landmark_transforms.ToTensor()',
'transforms.Normalize(mean=[0.5,0.5,0.5],std=[0.5,0.5,0.5])'),
Expand Down Expand Up @@ -161,7 +161,7 @@ def main(source_path, target_path,
# Initialize pose
Gp = Hopenet().to(device)
checkpoint = torch.load(pose_model_path)
Gp.load_state_dict(checkpoint)
Gp.load_state_dict(checkpoint['state_dict'])
Gp.train(False)

# Initialize transformations
Expand Down Expand Up @@ -325,15 +325,15 @@ def main(source_path, target_path,
parser.add_argument('-a', '--arch',
default='res_unet_split.MultiScaleResUNet(in_nc=71,out_nc=(3,3),flat_layers=(2,0,2,3),ngf=128)',
help='model architecture object')
parser.add_argument('-rm', '--reenactment_model', default='../weights/ijbc_v2_msrunet_256_2_0_reenactment.pth',
parser.add_argument('-rm', '--reenactment_model', default='../weights/ijbc_msrunet_256_2_0_reenactment_v1.pth',
metavar='PATH', help='path to face reenactment model')
parser.add_argument('-sm', '--seg_model', default='../weights/lfw_figaro_unet_256_segmentation.pth', metavar='PATH',
help='path to face segmentation model')
parser.add_argument('-im', '--inpainting_model', default='../weights/ijbc_v2_msrunet_256_2_0_inpainting.pth',
parser.add_argument('-sm', '--seg_model', default='../weights/lfw_figaro_unet_256_2_0_segmentation_v1.pth',
metavar='PATH', help='path to face segmentation model')
parser.add_argument('-im', '--inpainting_model', default='../weights/ijbc_msrunet_256_2_0_inpainting_v1.pth',
metavar='PATH', help='path to face inpainting model')
parser.add_argument('-bm', '--blending_model', default='../weights/ijbc_v2_msrunet_256_2_0_blending.pth',
parser.add_argument('-bm', '--blending_model', default='../weights/ijbc_msrunet_256_2_0_blending_v1.pth',
metavar='PATH', help='path to face blending model')
parser.add_argument('-pm', '--pose_model', default='../weights/hopenet_robust_alpha1.pkl', metavar='PATH',
parser.add_argument('-pm', '--pose_model', default='../weights/hopenet_robust_alpha1.pth', metavar='PATH',
help='path to face pose model')
parser.add_argument('-pt1', '--pil_transforms1', default=None, nargs='+', help='first PIL transforms')
parser.add_argument('-pt2', '--pil_transforms2', default=None, nargs='+', help='second PIL transforms')
Expand Down
22 changes: 11 additions & 11 deletions inference/expression_reenactment_video2video_batch.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,11 +10,11 @@


def main(input, out_dir, arch='res_unet_split.MultiScaleResUNet(in_nc=71,out_nc=(3,3),flat_layers=(2,0,2,3),ngf=128)',
reenactment_model_path='../weights/ijbc_v2_msrunet_256_2_0_reenactment.pth',
seg_model_path='../weights/lfw_figaro_unet_256_segmentation.pth',
inpainting_model_path='../weights/ijbc_v2_msrunet_256_2_0_inpainting.pth',
blend_model_path='../weights/ijbc_v2_msrunet_256_2_0_blending.pth',
pose_model_path='../weights/hopenet_robust_alpha1.pkl',
reenactment_model_path='../weights/ijbc_msrunet_256_2_0_reenactment_v1.pth',
seg_model_path='../weights/lfw_figaro_unet_256_2_0_segmentation_v1.pth',
inpainting_model_path='../weights/ijbc_msrunet_256_2_0_inpainting_v1.pth',
blend_model_path='../weights/ijbc_msrunet_256_2_0_blending_v1.pth',
pose_model_path='../weights/hopenet_robust_alpha1.pth',
pil_transforms1=None, pil_transforms2=None,
tensor_transforms1=('landmark_transforms.ToTensor()',
'transforms.Normalize(mean=[0.5,0.5,0.5],std=[0.5,0.5,0.5])'),
Expand Down Expand Up @@ -79,15 +79,15 @@ def main(input, out_dir, arch='res_unet_split.MultiScaleResUNet(in_nc=71,out_nc=
parser.add_argument('-a', '--arch',
default='res_unet_split.MultiScaleResUNet(in_nc=71,out_nc=(3,3),flat_layers=(2,0,2,3),ngf=128)',
help='model architecture object')
parser.add_argument('-rm', '--reenactment_model', default='../weights/ijbc_v2_msrunet_256_2_0_reenactment.pth',
parser.add_argument('-rm', '--reenactment_model', default='../weights/ijbc_msrunet_256_2_0_reenactment_v1.pth',
metavar='PATH', help='path to face reenactment model')
parser.add_argument('-sm', '--seg_model', default='../weights/lfw_figaro_unet_256_segmentation.pth', metavar='PATH',
help='path to face segmentation model')
parser.add_argument('-im', '--inpainting_model', default='../weights/ijbc_v2_msrunet_256_2_0_inpainting.pth',
parser.add_argument('-sm', '--seg_model', default='../weights/lfw_figaro_unet_256_2_0_segmentation_v1.pth',
metavar='PATH', help='path to face segmentation model')
parser.add_argument('-im', '--inpainting_model', default='../weights/ijbc_msrunet_256_2_0_inpainting_v1.pth',
metavar='PATH', help='path to face inpainting model')
parser.add_argument('-bm', '--blending_model', default='../weights/ijbc_v2_msrunet_256_2_0_blending.pth',
parser.add_argument('-bm', '--blending_model', default='../weights/ijbc_msrunet_256_2_0_blending_v1.pth',
metavar='PATH', help='path to face blending model')
parser.add_argument('-pm', '--pose_model', default='../weights/hopenet_robust_alpha1.pkl', metavar='PATH',
parser.add_argument('-pm', '--pose_model', default='../weights/hopenet_robust_alpha1.pth', metavar='PATH',
help='path to face pose model')
parser.add_argument('-pt1', '--pil_transforms1', default=None, nargs='+', help='first PIL transforms')
parser.add_argument('-pt2', '--pil_transforms2', default=None, nargs='+', help='second PIL transforms')
Expand Down
24 changes: 12 additions & 12 deletions inference/face_swap_image2video.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,11 +95,11 @@ def crop2img(img, crop, bbox):

def main(source_path, target_path,
arch='res_unet_split.MultiScaleResUNet(in_nc=71,out_nc=(3,3),flat_layers=(2,0,2,3),ngf=128)',
reenactment_model_path='../weights/ijbc_v2_msrunet_256_2_0_reenactment.pth',
seg_model_path='../weights/lfw_figaro_unet_256_segmentation.pth',
inpainting_model_path='../weights/ijbc_v2_msrunet_256_2_0_inpainting.pth',
blend_model_path='../weights/ijbc_v2_msrunet_256_2_0_blending.pth',
pose_model_path='../weights/hopenet_robust_alpha1.pkl',
reenactment_model_path='../weights/ijbc_msrunet_256_2_0_reenactment_v1.pth',
seg_model_path='../weights/lfw_figaro_unet_256_2_0_segmentation_v1.pth',
inpainting_model_path='../weights/ijbc_msrunet_256_2_0_inpainting_v1.pth',
blend_model_path='../weights/ijbc_msrunet_256_2_0_blending_v1.pth',
pose_model_path='../weights/hopenet_robust_alpha1.pth',
pil_transforms1=None, pil_transforms2=None,
tensor_transforms1=('landmark_transforms.ToTensor()',
'transforms.Normalize(mean=[0.5,0.5,0.5],std=[0.5,0.5,0.5])'),
Expand Down Expand Up @@ -157,7 +157,7 @@ def main(source_path, target_path,
# Initialize pose
Gp = Hopenet().to(device)
checkpoint = torch.load(pose_model_path)
Gp.load_state_dict(checkpoint)
Gp.load_state_dict(checkpoint['state_dict'])
Gp.train(False)

# Initialize transformations
Expand Down Expand Up @@ -298,15 +298,15 @@ def main(source_path, target_path,
parser.add_argument('-a', '--arch',
default='res_unet_split.MultiScaleResUNet(in_nc=71,out_nc=(3,3),flat_layers=(2,0,2,3),ngf=128)',
help='model architecture object')
parser.add_argument('-rm', '--reenactment_model', default='../weights/ijbc_v2_msrunet_256_2_0_reenactment.pth',
parser.add_argument('-rm', '--reenactment_model', default='../weights/ijbc_msrunet_256_2_0_reenactment_v1.pth',
metavar='PATH', help='path to face reenactment model')
parser.add_argument('-sm', '--seg_model', default='../weights/lfw_figaro_unet_256_segmentation.pth', metavar='PATH',
help='path to face segmentation model')
parser.add_argument('-im', '--inpainting_model', default='../weights/ijbc_v2_msrunet_256_2_0_inpainting.pth',
parser.add_argument('-sm', '--seg_model', default='../weights/lfw_figaro_unet_256_2_0_segmentation_v1.pth',
metavar='PATH', help='path to face segmentation model')
parser.add_argument('-im', '--inpainting_model', default='../weights/ijbc_msrunet_256_2_0_inpainting_v1.pth',
metavar='PATH', help='path to face inpainting model')
parser.add_argument('-bm', '--blending_model', default='../weights/ijbc_v2_msrunet_256_2_0_blending.pth',
parser.add_argument('-bm', '--blending_model', default='../weights/ijbc_msrunet_256_2_0_blending_v1.pth',
metavar='PATH', help='path to face blending model')
parser.add_argument('-pm', '--pose_model', default='../weights/hopenet_robust_alpha1.pkl', metavar='PATH',
parser.add_argument('-pm', '--pose_model', default='../weights/hopenet_robust_alpha1.pth', metavar='PATH',
help='path to face pose model')
parser.add_argument('-pt1', '--pil_transforms1', nargs='+', help='first PIL transforms')
parser.add_argument('-pt2', '--pil_transforms2', nargs='+', help='second PIL transforms')
Expand Down
24 changes: 12 additions & 12 deletions inference/face_swap_image2video_finetuned.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,11 +95,11 @@ def crop2img(img, crop, bbox):

def main(source_path, target_path,
arch='res_unet_split.MultiScaleResUNet(in_nc=71,out_nc=(3,3),flat_layers=(2,0,2,3),ngf=128)',
reenactment_model_path='../weights/ijbc_v2_msrunet_256_2_0_reenactment.pth',
seg_model_path='../weights/lfw_figaro_unet_256_segmentation.pth',
inpainting_model_path='../weights/ijbc_v2_msrunet_256_2_0_inpainting.pth',
blend_model_path='../weights/ijbc_v2_msrunet_256_2_0_blending.pth',
pose_model_path='../weights/hopenet_robust_alpha1.pkl',
reenactment_model_path='../weights/ijbc_msrunet_256_2_0_reenactment_v1.pth',
seg_model_path='../weights/lfw_figaro_unet_256_2_0_segmentation_v1.pth',
inpainting_model_path='../weights/ijbc_msrunet_256_2_0_inpainting_v1.pth',
blend_model_path='../weights/ijbc_msrunet_256_2_0_blending_v1.pth',
pose_model_path='../weights/hopenet_robust_alpha1.pth',
pil_transforms1=None, pil_transforms2=None,
tensor_transforms1=('landmark_transforms.ToTensor()',
'transforms.Normalize(mean=[0.5,0.5,0.5],std=[0.5,0.5,0.5])'),
Expand Down Expand Up @@ -157,7 +157,7 @@ def main(source_path, target_path,
# Initialize pose
Gp = Hopenet().to(device)
checkpoint = torch.load(pose_model_path)
Gp.load_state_dict(checkpoint)
Gp.load_state_dict(checkpoint['state_dict'])
Gp.train(False)

# Initialize transformations
Expand Down Expand Up @@ -297,15 +297,15 @@ def main(source_path, target_path,
parser.add_argument('-a', '--arch',
default='res_unet_split.MultiScaleResUNet(in_nc=71,out_nc=(3,3),flat_layers=(2,0,2,3),ngf=128)',
help='model architecture object')
parser.add_argument('-rm', '--reenactment_model', default='../weights/ijbc_v2_msrunet_256_2_0_reenactment.pth',
parser.add_argument('-rm', '--reenactment_model', default='../weights/ijbc_msrunet_256_2_0_reenactment_v1.pth',
metavar='PATH', help='path to face reenactment model')
parser.add_argument('-sm', '--seg_model', default='../weights/lfw_figaro_unet_256_segmentation.pth', metavar='PATH',
help='path to face segmentation model')
parser.add_argument('-im', '--inpainting_model', default='../weights/ijbc_v2_msrunet_256_2_0_inpainting.pth',
parser.add_argument('-sm', '--seg_model', default='../weights/lfw_figaro_unet_256_2_0_segmentation_v1.pth',
metavar='PATH', help='path to face segmentation model')
parser.add_argument('-im', '--inpainting_model', default='../weights/ijbc_msrunet_256_2_0_inpainting_v1.pth',
metavar='PATH', help='path to face inpainting model')
parser.add_argument('-bm', '--blending_model', default='../weights/ijbc_v2_msrunet_256_2_0_blending.pth',
parser.add_argument('-bm', '--blending_model', default='../weights/ijbc_msrunet_256_2_0_blending_v1.pth',
metavar='PATH', help='path to face blending model')
parser.add_argument('-pm', '--pose_model', default='../weights/hopenet_robust_alpha1.pkl', metavar='PATH',
parser.add_argument('-pm', '--pose_model', default='../weights/hopenet_robust_alpha1.pth', metavar='PATH',
help='path to face pose model')
parser.add_argument('-pt1', '--pil_transforms1', default=None, nargs='+', help='first PIL transforms')
parser.add_argument('-pt2', '--pil_transforms2', default=None, nargs='+', help='second PIL transforms')
Expand Down
24 changes: 12 additions & 12 deletions inference/face_swap_images2images.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,11 +96,11 @@ def crop2img(img, crop, bbox):

def main(source_path, target_path,
arch='res_unet_split.MultiScaleResUNet(in_nc=71,out_nc=(3,3),flat_layers=(2,0,2,3),ngf=128)',
reenactment_model_path='../weights/ijbc_v2_msrunet_256_2_0_reenactment.pth',
seg_model_path='../weights/lfw_figaro_unet_256_segmentation.pth',
inpainting_model_path='../weights/ijbc_v2_msrunet_256_2_0_inpainting.pth',
blend_model_path='../weights/ijbc_v2_msrunet_256_2_0_blending.pth',
pose_model_path='../weights/hopenet_robust_alpha1.pkl',
reenactment_model_path='../weights/ijbc_msrunet_256_2_0_reenactment_v1.pth',
seg_model_path='../weights/lfw_figaro_unet_256_2_0_segmentation_v1.pth',
inpainting_model_path='../weights/ijbc_msrunet_256_2_0_inpainting_v1.pth',
blend_model_path='../weights/ijbc_msrunet_256_2_0_blending_v1.pth',
pose_model_path='../weights/hopenet_robust_alpha1.pth',
pil_transforms1=None, pil_transforms2=None,
tensor_transforms1=('landmark_transforms.ToTensor()',
'transforms.Normalize(mean=[0.5,0.5,0.5],std=[0.5,0.5,0.5])'),
Expand Down Expand Up @@ -159,7 +159,7 @@ def main(source_path, target_path,
# Initialize pose
Gp = Hopenet().to(device)
checkpoint = torch.load(pose_model_path)
Gp.load_state_dict(checkpoint)
Gp.load_state_dict(checkpoint['state_dict'])
Gp.train(False)

# Initialize transformations
Expand Down Expand Up @@ -316,15 +316,15 @@ def main(source_path, target_path,
parser.add_argument('-a', '--arch',
default='res_unet_split.MultiScaleResUNet(in_nc=71,out_nc=(3,3),flat_layers=(2,0,2,3),ngf=128)',
help='model architecture object')
parser.add_argument('-rm', '--reenactment_model', default='../weights/ijbc_v2_msrunet_256_2_0_reenactment.pth',
parser.add_argument('-rm', '--reenactment_model', default='../weights/ijbc_msrunet_256_2_0_reenactment_v1.pth',
metavar='PATH', help='path to face reenactment model')
parser.add_argument('-sm', '--seg_model', default='../weights/lfw_figaro_unet_256_segmentation.pth', metavar='PATH',
help='path to face segmentation model')
parser.add_argument('-im', '--inpainting_model', default='../weights/ijbc_v2_msrunet_256_2_0_inpainting.pth',
parser.add_argument('-sm', '--seg_model', default='../weights/lfw_figaro_unet_256_2_0_segmentation_v1.pth',
metavar='PATH', help='path to face segmentation model')
parser.add_argument('-im', '--inpainting_model', default='../weights/ijbc_msrunet_256_2_0_inpainting_v1.pth',
metavar='PATH', help='path to face inpainting model')
parser.add_argument('-bm', '--blending_model', default='../weights/ijbc_v2_msrunet_256_2_0_blending.pth',
parser.add_argument('-bm', '--blending_model', default='../weights/ijbc_msrunet_256_2_0_blending_v1.pth',
metavar='PATH', help='path to face blending model')
parser.add_argument('-pm', '--pose_model', default='../weights/hopenet_robust_alpha1.pkl', metavar='PATH',
parser.add_argument('-pm', '--pose_model', default='../weights/hopenet_robust_alpha1.pth', metavar='PATH',
help='path to face pose model')
parser.add_argument('-pt1', '--pil_transforms1', default=None, nargs='+', help='first PIL transforms')
parser.add_argument('-pt2', '--pil_transforms2', default=None, nargs='+', help='second PIL transforms')
Expand Down
Loading

0 comments on commit 09ab56a

Please sign in to comment.