library(tensorflow)
library(keras)
library(tfdatasets)
Writing a training loop from scratch
Setup
Introduction
Keras provides default training and evaluation loops, fit()
and evaluate()
. Their usage is covered in the guide Training & evaluation with the built-in methods.
If you want to customize the learning algorithm of your model while still leveraging the convenience of fit()
(for instance, to train a GAN using fit()
), you can subclass the Model
class and implement your own train_step()
method, which is called repeatedly during fit()
. This is covered in the guide Customizing what happens in fit()
.
Now, if you want very low-level control over training & evaluation, you should write your own training & evaluation loops from scratch. This is what this guide is about.
Using the GradientTape
: a first end-to-end example
Calling a model inside a GradientTape
scope enables you to retrieve the gradients of the trainable weights of the layer with respect to a loss value. Using an optimizer instance, you can use these gradients to update these variables (which you can retrieve using model$trainable_weights
).
Let’s consider a simple MNIST model:
<- layer_input(shape = shape(784), name = "digits")
inputs <- inputs %>%
outputs layer_dense(64, activation = "relu") %>%
layer_dense(64, activation = "relu") %>%
layer_dense(10, name = "predictions")
<- keras_model(inputs = inputs, outputs = outputs) model
Let’s train it using mini-batch gradient with a custom training loop. First, we’re going to need an optimizer, a loss function, and a dataset:
# Instantiate an optimizer.
<- optimizer_sgd(learning_rate = 1e-3)
optimizer
# Instantiate a loss function.
<- loss_sparse_categorical_crossentropy(from_logits = TRUE)
loss_fn
# Prepare the training dataset.
<- 64
batch_size c(c(x_train, y_train), c(x_test, y_test)) %<-% dataset_mnist()
<- x_train %>% array_reshape(dim = c(60000, 784))/255
x_train <- x_test %>% array_reshape(dim = c(10000, 784))/255
x_test
# Reserve 10,000 samples for validation.
<- x_train[-(1:50000),]
x_val <- y_train[-(1:50000)]
y_val <- x_train[(1:50000),]
x_train <- y_train[(1:50000)]
y_train
# Prepare the training dataset.
<- list(x_train, y_train) %>%
train_dataset tensor_slices_dataset() %>%
dataset_shuffle(buffer_size = 1024) %>%
dataset_batch(batch_size)
# Prepare the validation dataset.
<- list(x_val, y_val) %>%
val_dataset tensor_slices_dataset() %>%
dataset_batch(batch_size)
Here’s our training loop:
- We open a
for
loop that iterates over epochs - For each epoch, we open a
for
loop that iterates over the dataset, in batches - For each batch, we open a
GradientTape()
scope - Inside this scope, we call the model (forward pass) and compute the loss
- Outside the scope, we retrieve the gradients of the weights of the model with regard to the loss
- Finally, we use the optimizer to update the weights of the model based on the gradients
In the example train_dataset
is a TensorFlow Dataset, thus it can’t be iterated in normal R for
loops. That’s why we wrap the second loop into a autograph
call. autograph
will compile the expression into efficient TensorFlow code to quickly evaluate the loop.
<- 2
epochs for(epoch in seq_len(epochs)) {
cat("Start of epoch ", epoch, "\n")
# Iterate over the batches of the dataset.
::autograph(for (batch in train_dataset) {
tfautograph# Open a GradientTape to record the operations run
# during the forward pass, which enables auto-differentiation.
with(tf$GradientTape() %as% tape, {
# Run the forward pass of the layer.
# The operations that the layer applies
# to its inputs are going to be recorded
# on the GradientTape.
<- model(batch[[1]], training = TRUE) # Logits for this minibatch
logits
# Compute the loss value for this minibatch.
<- loss_fn(batch[[2]], logits)
loss_value
})
# Use the gradient tape to automatically retrieve
# the gradients of the trainable variables with respect to the loss.
<- tape$gradient(loss_value, model$trainable_weights)
grads
# Run one step of gradient descent by updating
# the value of the variables to minimize the loss.
$apply_gradients(zip_lists(grads, model$trainable_weights))
optimizer
}) }
Start of epoch 1
Start of epoch 2
Low-level handling of metrics
Let’s add metrics monitoring to this basic loop.
You can readily reuse the built-in metrics (or custom ones you wrote) in such training loops written from scratch. Here’s the flow:
- Instantiate the metric at the start of the loop
- Call
metric$update_state()
after each batch - Call
metric$result()
when you need to display the current value of the metric - Call
metric$reset_states()
when you need to clear the state of the metric (typically at the end of an epoch)
Let’s use this knowledge to compute sparse_categorical_accuracy
on validation data at the end of each epoch:
# Get model
<- layer_input(shape = shape(784), name = "digits")
inputs <- inputs %>%
outputs layer_dense(64, activation = "relu") %>%
layer_dense(64, activation = "relu") %>%
layer_dense(10, name = "predictions")
<- keras_model(inputs = inputs, outputs = outputs)
model
# Instantiate an optimizer.
<- optimizer_sgd(learning_rate = 1e-3)
optimizer
# Instantiate a loss function.
<- loss_sparse_categorical_crossentropy(from_logits = TRUE)
loss_fn
# Prepare the metrics.
<- metric_sparse_categorical_accuracy()
train_acc_metric <- metric_sparse_categorical_accuracy() val_acc_metric
Here’s our training & evaluation loop:
<- 2
epochs for(epoch in seq_len(epochs)) {
cat("Start of epoch ", epoch, "\n")
::autograph(for (batch in train_dataset) {
tfautographwith(tf$GradientTape() %as% tape, {
<- model(batch[[1]], training = TRUE)
logits <- loss_fn(batch[[2]], logits)
loss_value
})<- tape$gradient(loss_value, model$trainable_weights)
grads $apply_gradients(zip_lists(grads, model$trainable_weights))
optimizer
# Update training metric.
$update_state(batch[[2]], logits)
train_acc_metric
})
<- as.numeric(train_acc_metric$result())
train_acc cat("Training acc over epoch: ", train_acc, "\n")
$reset_states()
train_acc_metric
# Run a validation loop at the end of each epoch.
::autograph(for(batch in val_dataset) {
tfautograph<- model(batch[[1]], training = FALSE)
val_logits # Update val metrics
$update_state(batch[[2]], val_logits)
val_acc_metric
})
<- as.numeric(val_acc_metric$result())
val_acc cat("Validation acc over epoch: ", val_acc, "\n")
$reset_states()
val_acc_metric }
Start of epoch 1
Training acc over epoch: 0.25572
Validation acc over epoch: 0.4228
Start of epoch 2
Training acc over epoch: 0.5342
Validation acc over epoch: 0.6522
It’s common to extract out the expressin inside the second loop into a new function called train_step
. For example:
<- function(batch) {
train_step with(tf$GradientTape() %as% tape, {
<- model(batch[[1]], training = TRUE)
logits <- loss_fn(batch[[2]], logits)
loss_value
})<- tape$gradient(loss_value, model$trainable_weights)
grads $apply_gradients(zip_lists(grads, model$trainable_weights))
optimizer
# Update training metric.
$update_state(batch[[2]], logits)
train_acc_metric }
Low-level handling of losses tracked by the model
Layers & models recursively track any losses created during the forward pass by layers that call self$add_loss(value)
. The resulting list of scalar loss values are available via the property model$losses
at the end of the forward pass.
If you want to be using these loss components, you should sum them and add them to the main loss in your training step.
Consider this layer, that creates an activity regularization loss:
<- new_layer_class(
layer_activity_regularization "activity_regularization",
call = function(inputs) {
$add_loss(1e-2 * tf$reduce_sum(inputs))
self
inputs
} )
Let’s build a really simple model that uses it:
<- layer_input(shape = shape(784), name = "digits")
inputs <- inputs %>%
outputs layer_dense(64, activation = "relu") %>%
# Insert activity regularization as a layer
layer_activity_regularization() %>%
layer_dense(64, activation = "relu") %>%
layer_dense(10, name = "predictions")
<- keras_model(inputs = inputs, outputs = outputs) model
Here’s what our training step should look like now:
<- function(batch) {
train_step with(tf$GradientTape() %as% tape, {
<- model(batch[[1]], training = TRUE)
logits <- loss_fn(batch[[2]], logits)
loss_value # Add any extra losses created during the forward pass.
<- loss_value + do.call(sum, model$losses)
loss_value
})<- tape$gradient(loss_value, model$trainable_weights)
grads $apply_gradients(zip_lists(grads, model$trainable_weights))
optimizer$update_state(batch[[2]], logits)
train_acc_metric
loss_value }
Summary
Now you know everything there is to know about using built-in training loops and writing your own from scratch.
To conclude, here’s a simple end-to-end example that ties together everything you’ve learned in this guide: a DCGAN trained on MNIST digits.
End-to-end example: a GAN training loop from scratch
You may be familiar with Generative Adversarial Networks (GANs). GANs can generate new images that look almost real, by learning the latent distribution of a training dataset of images (the “latent space” of the images).
A GAN is made of two parts: a “generator” model that maps points in the latent space to points in image space, a “discriminator” model, a classifier that can tell the difference between real images (from the training dataset) and fake images (the output of the generator network).
A GAN training loop looks like this:
- Train the discriminator.
- Sample a batch of random points in the latent space.
- Turn the points into fake images via the “generator” model.
- Get a batch of real images and combine them with the generated images.
- Train the “discriminator” model to classify generated vs. real images.
- Train the generator.
- Sample random points in the latent space.
- Turn the points into fake images via the “generator” network.
- Get a batch of real images and combine them with the generated images.
- Train the “generator” model to “fool” the discriminator and classify the fake images as real.
For a much more detailed overview of how GANs works, see Deep Learning with Python.
Let’s implement this training loop. First, create the discriminator meant to classify fake vs real digits:
<- keras_model_sequential(
discriminator name = "discriminator",
input_shape = shape(28, 28, 1)
%>%
) layer_conv_2d(64, c(3, 3), strides = c(2, 2), padding = "same") %>%
layer_activation_leaky_relu(alpha = 0.2) %>%
layer_conv_2d(128, c(3, 3), strides = c(2, 2), padding = "same") %>%
layer_activation_leaky_relu(alpha = 0.2) %>%
layer_global_max_pooling_2d() %>%
layer_dense(1)
summary(discriminator)
Model: "discriminator"
____________________________________________________________________________
Layer (type) Output Shape Param #
============================================================================
conv2d_1 (Conv2D) (None, 14, 14, 64) 640
leaky_re_lu_1 (LeakyReLU) (None, 14, 14, 64) 0
conv2d (Conv2D) (None, 7, 7, 128) 73856
leaky_re_lu (LeakyReLU) (None, 7, 7, 128) 0
global_max_pooling2d (GlobalMaxP (None, 128) 0
ooling2D)
dense_6 (Dense) (None, 1) 129
============================================================================
Total params: 74,625
Trainable params: 74,625
Non-trainable params: 0
____________________________________________________________________________
Then let’s create a generator network, that turns latent vectors into outputs of shape (28, 28, 1)
(representing MNIST digits):
<- 128
latent_dim <- keras_model_sequential(
generator input_shape = shape(latent_dim),
name = "generator"
%>%
) # We want to generate 128 coefficients to reshape into a 7x7x128 map
layer_dense(7 * 7 * 128) %>%
layer_activation_leaky_relu(alpha = 0.2) %>%
layer_reshape(c(7, 7, 128)) %>%
layer_conv_2d_transpose(128, c(4, 4), strides = c(2, 2), padding = "same") %>%
layer_activation_leaky_relu(alpha = 0.2) %>%
layer_conv_2d_transpose(128, c(4, 4), strides = c(2, 2), padding = "same") %>%
layer_activation_leaky_relu(alpha = 0.2) %>%
layer_conv_2d(1, c(7, 7), padding = "same", activation = "sigmoid")
Here’s the key bit: the training loop. As you can see it is quite straightforward. The training step function only takes 17 lines.
# Instantiate one optimizer for the discriminator and another for the generator.
<- optimizer_adam(learning_rate = 0.0003)
d_optimizer <- optimizer_adam(learning_rate = 0.0004)
g_optimizer
# Instantiate a loss function.
<- loss_binary_crossentropy(from_logits = TRUE)
loss_fn
<- function(real_images) {
train_step # Sample random points in the latent space
<- tf$random$normal(shape = shape(batch_size, latent_dim))
random_latent_vectors # Decode them to fake images
<- generator(random_latent_vectors)
generated_images # Combine them with real images
<- tf$concat(list(generated_images, real_images), axis = 0L)
combined_images
# Assemble labels discriminating real from fake images\
<- tf$concat(list(
labels $ones(shape(batch_size, 1)),
tf$zeros(shape(real_images$shape[[1]], 1))),
tfaxis = 0L
)# Add random noise to the labels - important trick!
<- labels + 0.05 * tf$random$uniform(labels$shape)
labels
# Train the discriminator
with(tf$GradientTape() %as% tape, {
<- discriminator(combined_images)
predictions <- loss_fn(labels, predictions)
d_loss
})
<- tape$gradient(d_loss, discriminator$trainable_weights)
grads $apply_gradients(zip_lists(grads, discriminator$trainable_weights))
d_optimizer
# Sample random points in the latent space
<- tf$random$normal(shape = shape(batch_size, latent_dim))
random_latent_vectors # Assemble labels that say "all real images"
<- tf$zeros(shape(batch_size, 1))
misleading_labels
# Train the generator (note that we should *not* update the weights
# of the discriminator)!
with(tf$GradientTape() %as% tape, {
<- discriminator(generator(random_latent_vectors))
predictions <- loss_fn(misleading_labels, predictions)
g_loss
})
<- tape$gradient(g_loss, generator$trainable_weights)
grads $apply_gradients(zip_lists(grads, generator$trainable_weights))
g_optimizerlist(d_loss, g_loss, generated_images)
}
Let’s train our GAN, by repeatedly calling train_step
on batches of images. Since our discriminator and generator are convnets, you’re going to want to run this code on a GPU.
# Prepare the dataset. We use both the training & test MNIST digits.
<- 64
batch_size c(c(x_train, y_train), c(x_test, y_test)) %<-% dataset_mnist()
<- x_train/255
x_train[] <- x_test/255
x_test[]
<- tensor_slices_dataset(x_train) %>%
dataset dataset_concatenate(tensor_slices_dataset(x_test)) %>%
dataset_map(function(x) {
$cast(tf$expand_dims(x, -1L), tf$float32)
tf%>%
}) dataset_shuffle(1024) %>%
dataset_batch(batch_size)
<- 1 # In practice you need at least 20 epochs to generate nice digits.
epochs <- "./"
save_dir for (epoch in seq_len(epochs)) {
cat("\nStart epoch ", epoch, "\n")
::autograph(for (real_images in dataset) {
tfautographc(d_loss, g_loss, generated_images) %<-% train_step(real_images)
}) }
Start epoch 1
1,,,] %>%
generated_images[image_array_save(path = "generated_img.png")
That’s it! You’ll get nice-looking fake MNIST digits after just ~30s of training on the Colab GPU.