import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import numpy as np
# Load the Reuters dataset
num_words = 10000 # Use the top 10,000 most common words
(train_data, train_labels), (test_data, test_labels) = keras.datasets.reuters.load_data(num_words=num_words)
# Decode a sample review (Optional: Just for understanding)
word_index = keras.datasets.reuters.get_word_index()
reverse_word_index = {value: key for key, value in word_index.items()}
decoded_newswire = " ".join([reverse_word_index.get(i - 3, "?") for i in train_data[0]])
# Convert sequences into multi-hot encoded vectors (One-hot encoding)
def vectorize_sequences(sequences, dimension=num_words):
results = np.zeros((len(sequences), dimension))
for i, sequence in enumerate(sequences):
results[i, sequence] = 1.0
return results
x_train = vectorize_sequences(train_data)
x_test = vectorize_sequences(test_data)
# One-hot encode labels (Categorical encoding)
y_train = keras.utils.to_categorical(train_labels)
y_test = keras.utils.to_categorical(test_labels)
# Define the Neural Network Model
model = keras.Sequential([
layers.Dense(64, activation="relu", input_shape=(num_words,)),
layers.Dense(64, activation="relu"),
layers.Dense(46, activation="softmax") # 46 output classes
])
# Compile the model
model.compile(optimizer="adam",
loss="categorical_crossentropy",
metrics=["accuracy"])
# Train the model
history = model.fit(x_train, y_train, epochs=10, batch_size=512, validation_data=(x_test, y_test))
# Evaluate the model
test_loss, test_acc = model.evaluate(x_test, y_test)
print(f"Test Accuracy: {test_acc:.4f}")
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/reuters.npz
2110848/2110848 ━━━━━━━━━━━━━━━━━━━━ 1s 0us/step
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/reuters_word_index.json
550378/550378 ━━━━━━━━━━━━━━━━━━━━ 0s 1us/step
/usr/local/lib/python3.11/dist-packages/keras/src/layers/core/dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim` arg
super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/10
18/18 ━━━━━━━━━━━━━━━━━━━━ 3s 90ms/step - accuracy: 0.3072 - loss: 3.5538 - val_accuracy: 0.5289 - val_loss: 2.4046
Epoch 2/10
18/18 ━━━━━━━━━━━━━━━━━━━━ 2s 70ms/step - accuracy: 0.5608 - loss: 2.0756 - val_accuracy: 0.6723 - val_loss: 1.5617
Epoch 3/10
18/18 ━━━━━━━━━━━━━━━━━━━━ 2s 56ms/step - accuracy: 0.7239 - loss: 1.3044 - val_accuracy: 0.7155 - val_loss: 1.2853
Epoch 4/10
18/18 ━━━━━━━━━━━━━━━━━━━━ 1s 60ms/step - accuracy: 0.7872 - loss: 0.9904 - val_accuracy: 0.7516 - val_loss: 1.1347
Epoch 5/10
18/18 ━━━━━━━━━━━━━━━━━━━━ 1s 58ms/step - accuracy: 0.8316 - loss: 0.7871 - val_accuracy: 0.7738 - val_loss: 1.0291
Epoch 6/10
18/18 ━━━━━━━━━━━━━━━━━━━━ 1s 62ms/step - accuracy: 0.8737 - loss: 0.5986 - val_accuracy: 0.7876 - val_loss: 0.9594
Epoch 7/10
18/18 ━━━━━━━━━━━━━━━━━━━━ 2s 96ms/step - accuracy: 0.9051 - loss: 0.4452 - val_accuracy: 0.7916 - val_loss: 0.9251
Epoch 8/10
18/18 ━━━━━━━━━━━━━━━━━━━━ 2s 108ms/step - accuracy: 0.9251 - loss: 0.3510 - val_accuracy: 0.7916 - val_loss: 0.9153
Epoch 9/10
18/18 ━━━━━━━━━━━━━━━━━━━━ 2s 63ms/step - accuracy: 0.9401 - loss: 0.2726 - val_accuracy: 0.7979 - val_loss: 0.9212
Epoch 10/10
18/18 ━━━━━━━━━━━━━━━━━━━━ 1s 56ms/step - accuracy: 0.9504 - loss: 0.2224 - val_accuracy: 0.7961 - val_loss: 0.9372
71/71 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - accuracy: 0.8033 - loss: 0.9080
Test Accuracy: 0.7961
31/01/2025, 10:08 Untitled0.ipynb - Colab