library(tensorflow)
library(tfdatasets)
library(keras)
Advanced
TensorFlow 2 quickstart for experts
The Keras functional and subclassing APIs provide a define-by-run interface for customization and advanced research. Build your model, then write the forward and backward pass. Create custom layers, activations, and training loops.
Import TensorFlow into your program. If you haven’t installed TensorFlow yet, go to the installation guide.
Load and prepare the MNIST dataset.
c(c(x_train, y_train), c(x_test, y_test)) %<-% keras::dataset_mnist()
%<>% { . / 255 }
x_train %<>% { . / 255 } x_test
Use TensorFlow Datasets to batch and shuffle the dataset:
<- list(x_train, y_train) %>%
train_ds tensor_slices_dataset() %>%
dataset_shuffle(10000) %>%
dataset_batch(32)
<- list(x_test, y_test) %>%
test_ds tensor_slices_dataset() %>%
dataset_batch(32)
Build the a model using the Keras model subclassing API:
<- new_model_class(
my_model classname = "MyModel",
initialize = function(...) {
$initialize()
super$conv1 <- layer_conv_2d(filters = 32, kernel_size = 3,
selfactivation = 'relu')
$flatten <- layer_flatten()
self$d1 <- layer_dense(units = 128, activation = 'relu')
self$d2 <- layer_dense(units = 10)
self
},call = function(inputs) {
%>%
inputs $expand_dims(3L) %>%
tf$conv1() %>%
self$flatten() %>%
self$d1() %>%
self$d2()
self
}
)
# Create an instance of the model
<- my_model() model
Choose an optimizer and loss function for training:
<- loss_sparse_categorical_crossentropy(from_logits = TRUE)
loss_object <- optimizer_adam() optimizer
Select metrics to measure the loss and the accuracy of the model. These metrics accumulate the values over epochs and then print the overall result.
<- metric_mean(name = "train_loss")
train_loss <- metric_sparse_categorical_accuracy(name = "train_accuracy")
train_accuracy
<- metric_mean(name = "test_loss")
test_loss <- metric_sparse_categorical_accuracy(name = "test_accuracy") test_accuracy
Use tf$GradientTape()
to train the model:
<- function(images, labels) {
train_step with(tf$GradientTape() %as% tape, {
# training = TRUE is only needed if there are layers with different
# behavior during training versus inference (e.g. Dropout).
<- model(images, training = TRUE)
predictions <- loss_object(labels, predictions)
loss
})<- tape$gradient(loss, model$trainable_variables)
gradients $apply_gradients(zip_lists(gradients, model$trainable_variables))
optimizertrain_loss(loss)
train_accuracy(labels, predictions)
}
<- tf_function(function(train_ds) {
train for (batch in train_ds) {
c(images, labels) %<-% batch
train_step(images, labels)
} })
Test the model:
<- function(images, labels) {
test_step # training = FALSE is only needed if there are layers with different
# behavior during training versus inference (e.g. Dropout).
<- model(images, training = FALSE)
predictions <- loss_object(labels, predictions)
t_loss test_loss(t_loss)
test_accuracy(labels, predictions)
}
<- tf_function(function(test_ds) {
test for (batch in test_ds) {
c(images, labels) %<-% batch
test_step(images, labels)
} })
<- function() {
reset_metrics for (metric in list(train_loss, train_accuracy,
test_loss, test_accuracy))$reset_states()
metric }
<- 1
EPOCHS for (epoch in seq_len(EPOCHS)) {
# Reset the metrics at the start of the next epoch
reset_metrics()
train(train_ds)
test(test_ds)
cat(sprintf('Epoch %d', epoch), "\n")
cat(sprintf('Loss: %f', train_loss$result()), "\n")
cat(sprintf('Accuracy: %f', train_accuracy$result() * 100), "\n")
cat(sprintf('Test Loss: %f', test_loss$result()), "\n")
cat(sprintf('Test Accuracy: %f', test_accuracy$result() * 100), "\n")
}
Epoch 1
Loss: 0.148424
Accuracy: 95.521667
Test Loss: 0.068080
Test Accuracy: 97.739998
The image classifier is now trained to ~98% accuracy on this dataset. To learn more, read the TensorFlow tutorials.