-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmain.py
More file actions
73 lines (53 loc) · 1.87 KB
/
main.py
File metadata and controls
73 lines (53 loc) · 1.87 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
import torch
from torchvision import datasets, transforms
from torch.autograd import Variable
from modules import my_vgg, ae_module, loss_module
import os
###
learning_rate = 0.001
batch_size = 64
epochs = 5
content_layers = ['r11', 'r21', 'r31']
cuda = torch.cuda.is_available()
dtype = torch.cuda.FloatTensor if cuda else torch.FloatTensor
print_every = 500
###
data_dir = '/media/peter/HDD 1/datasets_peter/CelebA/Img'
prep_transform = transforms.Compose([
transforms.Resize(64),
transforms.CenterCrop(64),
transforms.ToTensor()
])
dataset = datasets.ImageFolder(root=data_dir, transform=prep_transform)
train_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True)
###
auto_encoder = ae_module.Auto_Encoder(input_channels=3, bn_momentum=0.9)
vgg = my_vgg.VGG()
content_loss = loss_module.Content_Loss()
for param in vgg.parameters():
param.requires_grad = False
if cuda:
auto_encoder.cuda()
vgg.cuda()
content_loss.cuda()
###
adam = torch.optim.Adam(auto_encoder.parameters(), lr=learning_rate)
###
for e in range(epochs):
print('\n\nEpoch {} of {}'.format(e, epochs))
auto_encoder.train()
loss_counter = 0.
for i, (images, _) in enumerate(train_loader):
if i % print_every == 0: print('Batch {} of {}'.format(i, len(train_loader)))
images = Variable(images.type(dtype))
adam.zero_grad()
target = vgg(images, out_keys=content_layers)
reconstruced, mean, logvar = auto_encoder(images)
output = vgg(reconstruced, out_keys=content_layers)
loss = content_loss(output, target, mean, logvar)
loss_counter += loss.data
loss.backward()
adam.step()
print('Average loss over epoch = {}'.format(loss_counter / (i + 1)))
os.makedirs('./models', exist_ok=True)
torch.save(auto_encoder.state_dict(), './models/ae_params_epoch{}.pt'.format(e))