#%% """ Credits: This code is adapted from the textbook "Deep Learning with Python", 2nd Edition, by François Chollet. """ #%% import tensorflow as tf from tensorflow import keras import matplotlib.pyplot as plt import numpy as np #%% mnist_dataset = tf.keras.datasets.mnist (training_set, test_set) = mnist_dataset.load_data() (training_images, training_labels) = training_set (test_images, test_labels) = test_set # Scale images to the [0, 1] range training_inputs = training_images / 255 test_inputs = test_images / 255 # Make sure images have shape (28, 28, 1) training_inputs = np.expand_dims(training_inputs, -1).astype('float32') test_inputs = np.expand_dims(test_inputs, -1) #%% index = np.random.randint(0, training_inputs.shape[0]) x = training_inputs[index] plt.axis("off") plt.imshow((x * 255).astype("int32"), cmap='gray') #%% from tensorflow.keras import layers input_shape = training_inputs[0].shape """ discriminator_v1 = keras.Sequential( [ keras.Input(shape=input_shape), layers.Conv2D(16, kernel_size=(4,4), strides=2, padding="same"), layers.LeakyReLU(alpha=0.2), layers.Conv2D(32, kernel_size=(4,4), strides=2, padding="same"), layers.LeakyReLU(alpha=0.2), # layers.Conv2D(32, kernel_size=4, strides=2, padding="same"), # layers.LeakyReLU(alpha=0.2), layers.Flatten(), layers.Dropout(0.2), layers.Dense(1, activation="sigmoid"), ], name="discriminator", ) discriminator_v1.summary() #%% latent_dim = 32 generator_v1 = keras.Sequential( [ keras.Input(shape=(latent_dim,)), layers.Dense(7 * 7 * 8), layers.Reshape((7, 7, 8)), layers.Conv2DTranspose(32, kernel_size=4, strides=2, padding="same"), layers.LeakyReLU(alpha=0.2), layers.Conv2DTranspose(64, kernel_size=4, strides=2, padding="same"), layers.LeakyReLU(alpha=0.2), # layers.Conv2DTranspose(128, kernel_size=4, strides=2, padding="same"), # layers.LeakyReLU(alpha=0.2), layers.Conv2D(1, kernel_size=5, padding="same", activation="sigmoid"), ], name="generator", ) generator_v1.summary() #%% """ discriminator_v2 = keras.Sequential( [ keras.Input(shape=input_shape), layers.Conv2D(32, kernel_size=(4,4), strides=2, padding="same"), layers.LeakyReLU(alpha=0.2), layers.Conv2D(64, kernel_size=(4,4), strides=2, padding="same"), layers.LeakyReLU(alpha=0.2), layers.Conv2D(64, kernel_size=4, strides=1, padding="same"), layers.LeakyReLU(alpha=0.2), layers.Flatten(), layers.Dropout(0.2), layers.Dense(1, activation="sigmoid"), ], name="discriminator", ) discriminator_v2.summary() #%% latent_dim = 64 generator_v2 = keras.Sequential( [ keras.Input(shape=(latent_dim,)), layers.Dense(7 * 7 * 8), layers.Reshape((7, 7, 8)), layers.Conv2DTranspose(64, kernel_size=4, strides=2, padding="same"), layers.LeakyReLU(alpha=0.2), layers.Conv2DTranspose(128, kernel_size=4, strides=2, padding="same"), layers.LeakyReLU(alpha=0.2), layers.Conv2DTranspose(128, kernel_size=4, strides=1, padding="same"), layers.LeakyReLU(alpha=0.2), layers.Conv2D(1, kernel_size=5, padding="same", activation="sigmoid"), ], name="generator", ) generator_v2.summary() #%% class GAN(keras.Model): def __init__(self, discriminator, generator, latent_dim): super().__init__() self.discriminator = discriminator self.generator = generator self.latent_dim = latent_dim self.d_loss_metric = keras.metrics.Mean(name="d_loss") self.g_loss_metric = keras.metrics.Mean(name="g_loss") def compile(self, d_optimizer, g_optimizer, loss_fn): super(GAN, self).compile() self.d_optimizer = d_optimizer self.g_optimizer = g_optimizer self.loss_fn = loss_fn @property def metrics(self): return [self.d_loss_metric, self.g_loss_metric] def train_step(self, real_images): batch_size = tf.shape(real_images)[0] random_latent_vectors = tf.random.normal( shape=(batch_size, self.latent_dim)) generated_images = self.generator(random_latent_vectors) combined_images = tf.concat([generated_images, real_images], axis=0) labels = tf.concat( [tf.ones((batch_size, 1)), tf.zeros((batch_size, 1))], axis=0 ) labels += 0.05 * tf.random.uniform(tf.shape(labels)) with tf.GradientTape() as tape: predictions = self.discriminator(combined_images) d_loss = self.loss_fn(labels, predictions) grads = tape.gradient(d_loss, self.discriminator.trainable_weights) self.d_optimizer.apply_gradients( zip(grads, self.discriminator.trainable_weights) ) random_latent_vectors = tf.random.normal( shape=(batch_size, self.latent_dim)) misleading_labels = tf.zeros((batch_size, 1)) with tf.GradientTape() as tape: predictions = self.discriminator( self.generator(random_latent_vectors)) g_loss = self.loss_fn(misleading_labels, predictions) grads = tape.gradient(g_loss, self.generator.trainable_weights) self.g_optimizer.apply_gradients( zip(grads, self.generator.trainable_weights)) self.d_loss_metric.update_state(d_loss) self.g_loss_metric.update_state(g_loss) return {"d_loss": self.d_loss_metric.result(), "g_loss": self.g_loss_metric.result()} #%% class GANMonitor(keras.callbacks.Callback): def __init__(self, num_img=3, latent_dim=128): self.num_img = num_img self.latent_dim = latent_dim def on_epoch_end(self, epoch, logs=None): random_latent_vectors = tf.random.normal(shape=(self.num_img, self.latent_dim)) generated_images = self.model.generator(random_latent_vectors) generated_images *= 255 generated_images.numpy() for i in range(self.num_img): img = keras.utils.array_to_img(generated_images[i]) img.save(f"mnist_results/generated_img_{epoch:03d}_{i}.png") #%% training_size = training_inputs.shape[0] #new_labels = np.zeros((training_size,1,1,1)) epochs = 100 #train_dataset = tf.data.Dataset.from_tensor_slices((training_inputs, new_labels)) train_dataset_1 = tf.data.Dataset.from_tensor_slices(training_inputs) train_dataset = train_dataset_1.batch(32) #%% gan = GAN(discriminator=discriminator, generator=generator, latent_dim=latent_dim) gan.compile( d_optimizer=keras.optimizers.Adam(learning_rate=0.0001), g_optimizer=keras.optimizers.Adam(learning_rate=0.0001), loss_fn=keras.losses.BinaryCrossentropy(), ) gan.fit(train_dataset, epochs=epochs, callbacks=[GANMonitor(num_img=10, latent_dim=latent_dim)] ) #%%