-
Notifications
You must be signed in to change notification settings - Fork 40
/
train_lanenet.py
122 lines (99 loc) · 4.47 KB
/
train_lanenet.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import time
import copy
from model.lanenet.loss import DiscriminativeLoss, FocalLoss
def compute_loss(net_output, binary_label, instance_label, loss_type = 'FocalLoss'):
k_binary = 10 #1.7
k_instance = 0.3
k_dist = 1.0
if(loss_type == 'FocalLoss'):
loss_fn = FocalLoss(gamma=2, alpha=[0.25, 0.75])
elif(loss_type == 'CrossEntropyLoss'):
loss_fn = nn.CrossEntropyLoss()
else:
# print("Wrong loss type, will use the default CrossEntropyLoss")
loss_fn = nn.CrossEntropyLoss()
binary_seg_logits = net_output["binary_seg_logits"]
binary_loss = loss_fn(binary_seg_logits, binary_label)
pix_embedding = net_output["instance_seg_logits"]
ds_loss_fn = DiscriminativeLoss(0.5, 1.5, 1.0, 1.0, 0.001)
var_loss, dist_loss, reg_loss = ds_loss_fn(pix_embedding, instance_label)
binary_loss = binary_loss * k_binary
var_loss = var_loss * k_instance
dist_loss = dist_loss * k_dist
instance_loss = var_loss + dist_loss
total_loss = binary_loss + instance_loss
out = net_output["binary_seg_pred"]
return total_loss, binary_loss, instance_loss, out
def train_model(model, optimizer, scheduler, dataloaders, dataset_sizes, device, loss_type = 'FocalLoss', num_epochs=25):
since = time.time()
training_log = {'epoch':[], 'training_loss':[], 'val_loss':[]}
best_loss = float("inf")
best_model_wts = copy.deepcopy(model.state_dict())
for epoch in range(num_epochs):
training_log['epoch'].append(epoch)
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_loss_b = 0.0
running_loss_i = 0.0
# Iterate over data.
for inputs, binarys, instances in dataloaders[phase]:
inputs = inputs.type(torch.FloatTensor).to(device)
binarys = binarys.type(torch.LongTensor).to(device)
instances = instances.type(torch.FloatTensor).to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
loss = compute_loss(outputs, binarys, instances, loss_type)
# backward + optimize only if in training phase
if phase == 'train':
loss[0].backward()
optimizer.step()
# statistics
running_loss += loss[0].item() * inputs.size(0)
running_loss_b += loss[1].item() * inputs.size(0)
running_loss_i += loss[2].item() * inputs.size(0)
if phase == 'train':
if scheduler != None:
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
binary_loss = running_loss_b / dataset_sizes[phase]
instance_loss = running_loss_i / dataset_sizes[phase]
print('{} Total Loss: {:.4f} Binary Loss: {:.4f} Instance Loss: {:.4f}'.format(phase, epoch_loss, binary_loss, instance_loss))
# deep copy the model
if phase == 'train':
training_log['training_loss'].append(epoch_loss)
if phase == 'val':
training_log['val_loss'].append(epoch_loss)
if epoch_loss < best_loss:
best_loss = epoch_loss
best_model_wts = copy.deepcopy(model.state_dict())
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val_loss: {:4f}'.format(best_loss))
training_log['training_loss'] = np.array(training_log['training_loss'])
training_log['val_loss'] = np.array(training_log['val_loss'])
# load best model weights
model.load_state_dict(best_model_wts)
return model, training_log
def trans_to_cuda(variable):
if torch.cuda.is_available():
return variable.cuda()
else:
return variable