Slide 130
Slide 130 text
# Update new layers with standard learning rate
new_updates = lasagne.updates.adam(
training_loss, new_params, learning_rate=lr)
# Update new layers with standard learning rate
pretrained_updates = lasagne.updates.adam(
training_loss, pretrained_params, learning_rate=lr *
0.1)
# Combine updates
updates = new_updates.copy()
updates.update(pretrained_updates)