Created
August 9, 2019 23:53
-
-
Save ksivaman/7daab6027bf489d8867e94f9610af92b to your computer and use it in GitHub Desktop.
Loop for modifying target image for neural style transfer
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
for ii in range(1, steps+1): | |
# get the features from your target image | |
target_features = get_features(target, vgg) | |
# the content loss | |
content_loss = torch.mean((target_features['conv4_2'] - content_features['conv4_2'])**2) | |
# the style loss | |
# initialize the style loss to 0 | |
style_loss = 0 | |
# then add to it for each layer's gram matrix loss | |
for layer in style_weights: | |
# get the "target" style representation for the layer | |
target_feature = target_features[layer] | |
target_gram = gram_matrix(target_feature) | |
_, d, h, w = target_feature.shape | |
# get the "style" style representation | |
style_gram = style_grams[layer] | |
# the style loss for one layer, weighted appropriately | |
layer_style_loss = style_weights[layer] * torch.mean((target_gram - style_gram)**2) | |
# add to the style loss | |
style_loss += layer_style_loss / (d * h * w) | |
# calculate the *total* loss | |
total_loss = content_weight * content_loss + style_weight * style_loss | |
# update your target image | |
optimizer.zero_grad() | |
total_loss.backward() | |
optimizer.step() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment