Redes Neurais Recorrentes (RNNs) são adequadas para dados sequenciais, enquanto Redes Geradoras Adversariais (GANs) são usadas para gerar novos dados semelhantes aos dados de treinamento.
Exemplo: Rede Neural Recorrente (RNN) para Previsão de Séries Temporais
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import SimpleRNN, Dense
# Gerar dados de exemplo
def generate_data(seq_length, num_samples):
X = np.random.rand(num_samples, seq_length, 1)
y = np.sum(X, axis=1)
return X, y
seq_length = 10
num_samples = 1000
X, y = generate_data(seq_length, num_samples)
# Dividir os dados em conjuntos de treinamento e teste
train_size = int(0.8 * num_samples)
X_train, X_test = X[:train_size], X[train_size:]
y_train, y_test = y[:train_size], y[train_size:]
# Construir o modelo RNN
model = Sequential([
SimpleRNN(50, activation='relu', input_shape=(seq_length, 1)),
Dense(1)
])
# Compilar e treinar o modelo
model.compile(optimizer='adam', loss='erro_quadratico_medio')
model.fit(X_train, y_train, epochs=10, batch_size=32, validation_data=(X_test, y_test))
# Avaliar o modelo
loss = model.evaluate(X_test, y_test)
print(f'Test Loss: {loss}')
Exemplo: Rede Geradora Adversarial (GAN) para Gerar Imagens Simples
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LeakyReLU, Reshape, Flatten
from tensorflow.keras.optimizers import Adam
import numpy as np
import matplotlib.pyplot as plt
# Definir gerador
def build_generator():
model = Sequential([
Dense(128, input_dim=100),
LeakyReLU(alpha=0.01),
Dense(784, activation='tanh'),
Reshape((28, 28))
])
return model
# Definir discriminador
def build_discriminator():
model = Sequential([
Flatten(input_shape=(28, 28)),
Dense(128),
LeakyReLU(alpha=0.01),
Dense(1, activation='sigmoid')
])
return model
# Compilar modelo GAN
def build_gan(generator, discriminator):
model = Sequential([generator, discriminator])
return model
# Hyperparameters
epochs = 10000
batch_size = 64
sample_interval = 1000
# Carregar e pré-processar dados MNIST
(X_train, _), (_, _) = tf.keras.datasets.mnist.load_data()
X_train = (X_train / 127.5) - 1.0 # Normalize para a faixa [-1, 1]
X_train = np.expand_dims(X_train, axis=3)
# Criar e compilar os modelos
optimizer = Adam(0.0002, 0.5)
discriminator = build_discriminator()
discriminator.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
generator = build_generator()
discriminator.trainable = False
gan = build_gan(generator, discriminator)
gan.compile(loss='binary_crossentropy', optimizer=optimizer)
# Treinar GAN
real = np.ones((batch_size, 1))
fake = np.zeros((batch_size, 1))
for epoch in range(epochs):
# Treinar discriminador
idx = np.random.randint(0, X_train.shape[0], batch_size)
imgs = X_train[idx]
noise = np.random.normal(0, 1, (batch_size, 100))
gen_imgs = generator.predict(noise)
d_loss_real = discriminator.train_on_batch(imgs, real)
d_loss_fake = discriminator.train_on_batch(gen_imgs, fake)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# Treinar gerador
noise = np.random.normal(0, 1, (batch_size, 100))
g_loss = gan.train_on_batch(noise, real)
# Mostrar progresso
if epoch % sample_interval == 0:
print(f"{epoch} [D loss: {d_loss[0]}, acc.: {100*d_loss[1]}] [G loss: {g_loss}]")
# Gerar imagens de exemplo
noise = np.random.normal(0, 1, (25, 100))
gen_imgs = generator.predict(noise)
gen_imgs = 0.5 * gen_imgs + 0.5
fig, axs = plt.subplots(5, 5)
cnt = 0
for i in range(5):
for j in range(5):
axs[i, j].imshow(gen_imgs[cnt, :, :], cmap='gray')
axs[i, j].axis('off')
cnt += 1
plt.show()