# ============================================================ # MNIST Digit Classification using a Simple Neural Network (PyTorch) # ============================================================ import torch import torch.nn as nn import torch.optim as optim from torch.utils.data import DataLoader import torchvision import torchvision.transforms as transforms import matplotlib.pyplot as plt import numpy as np # ------------------------------------------------------------ # 1. Device Configuration (GPU if available) # ------------------------------------------------------------ device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print("Using device:", device) # ------------------------------------------------------------ # 2. Load MNIST Dataset # ------------------------------------------------------------ transform = transforms.Compose([ transforms.ToTensor(), # convert to tensor transforms.Normalize((0.5,), (0.5,)) # normalize ]) train_dataset = torchvision.datasets.MNIST( root='./data', train=True, transform=transform, download=True) test_dataset = torchvision.datasets.MNIST( root='./data', train=False, transform=transform, download=True) train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True) test_loader = DataLoader(test_dataset, batch_size=64, shuffle=False) # ------------------------------------------------------------ # 3. Define Simple Neural Network # ------------------------------------------------------------ class SimpleNN(nn.Module): def __init__(self): super(SimpleNN, self).__init__() self.fc1 = nn.Linear(28*28, 128) self.fc2 = nn.Linear(128, 64) self.fc3 = nn.Linear(64, 10) self.relu = nn.ReLU() def forward(self, x): x = x.view(-1, 784) # flatten image x = self.relu(self.fc1(x)) x = self.relu(self.fc2(x)) x = self.fc3(x) # no softmax (CrossEntropyLoss handles it) return x model = SimpleNN().to(device) # ------------------------------------------------------------ # 4. Loss and Optimizer # ------------------------------------------------------------ criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=0.001) # ------------------------------------------------------------ # 5. Training Loop # ------------------------------------------------------------ num_epochs = 5 train_loss_list = [] train_acc_list = [] for epoch in range(num_epochs): total_loss = 0 correct = 0 total = 0 for images, labels in train_loader: images, labels = images.to(device), labels.to(device) # forward pass outputs = model(images) loss = criterion(outputs, labels) # backward pass optimizer.zero_grad() loss.backward() optimizer.step() total_loss += loss.item() # accuracy _, predicted = outputs.max(1) total += labels.size(0) correct += (predicted == labels).sum().item() avg_loss = total_loss / len(train_loader) accuracy = 100 * correct / total train_loss_list.append(avg_loss) train_acc_list.append(accuracy) print(f"Epoch [{epoch+1}/{num_epochs}] Loss: {avg_loss:.4f} Accuracy: {accuracy:.2f}%") # ------------------------------------------------------------ # 6. Testing / Evaluation # ------------------------------------------------------------ model.eval() correct = 0 total = 0 with torch.no_grad(): for images, labels in test_loader: images, labels = images.to(device), labels.to(device) outputs = model(images) _, predicted = outputs.max(1) total += labels.size(0) correct += (predicted == labels).sum().item() test_accuracy = 100 * correct / total print("\nTest Accuracy:", test_accuracy) # ------------------------------------------------------------ # 7. Plot Accuracy and Loss Graphs # ------------------------------------------------------------ plt.figure(figsize=(8, 4)) plt.plot(train_acc_list, label="Training Accuracy") plt.title("Training Accuracy") plt.xlabel("Epoch") plt.ylabel("Accuracy (%)") plt.grid() plt.legend() plt.show() plt.figure(figsize=(8, 4)) plt.plot(train_loss_list, label="Training Loss") plt.title("Training Loss") plt.xlabel("Epoch") plt.ylabel("Loss") plt.grid() plt.legend() plt.show() # ------------------------------------------------------------ # 8. Show Predictions for First 5 Test Images # ------------------------------------------------------------ data_iter = iter(test_loader) images, labels = next(data_iter) images, labels = images.to(device), labels.to(device) outputs = model(images[:5]) _, preds = outputs.max(1) print("\nPredicted:", preds.cpu().numpy()) print("Actual: ", labels[:5].cpu().numpy()) # Plot the first 5 images plt.figure(figsize=(10, 2)) for i in range(5): plt.subplot(1, 5, i + 1) plt.imshow(images[i].cpu().squeeze(), cmap="gray") plt.title(f"P:{preds[i].item()} / T:{labels[i].item()}") plt.axis("off") plt.show() //IMG DATASET import torch import torch.nn as nn import torch.optim as optim from torch.utils.data import DataLoader, random_split from torchvision import datasets, transforms import matplotlib.pyplot as plt import os # ----------------------------------------------------------- # 1. Device Configuration # ----------------------------------------------------------- device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print("Using device:", device) # ----------------------------------------------------------- # 2. Dataset Path # ----------------------------------------------------------- dataset_dir = r"C:\Users\ADMIN\Downloads\archive\natural_images" # ----------------------------------------------------------- # 3. Transformations (preprocessing + augmentation) # ----------------------------------------------------------- transform = transforms.Compose([ transforms.Resize((128,128)), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485,0.456,0.406], std=[0.229,0.224,0.225]) ]) # Load the full dataset full_dataset = datasets.ImageFolder(root=dataset_dir, transform=transform) # Split dataset into training and validation (80%-20%) train_size = int(0.8 * len(full_dataset)) val_size = len(full_dataset)-train_size train_dataset, val_dataset = random_split(full_dataset, [train_size, val_size]) train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True) val_loader = DataLoader(val_dataset, batch_size=32, shuffle=False) num_classes = len(full_dataset.classes) print("Classes:", full_dataset.classes) # ----------------------------------------------------------- # 4. Define CNN Model # ----------------------------------------------------------- class SimpleCNN(nn.Module): def __init__(self, num_classes): super(SimpleCNN, self).__init__() self.conv_layers = nn.Sequential( nn.Conv2d(3, 32, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool2d(2,2), nn.Conv2d(32, 64, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool2d(2,2), nn.Conv2d(64, 128, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool2d(2,2) ) self.fc_layers = nn.Sequential( nn.Flatten(), nn.Linear(128*16*16, 256), nn.ReLU(), nn.Linear(256, num_classes) ) def forward(self, x): x = self.conv_layers(x) x = self.fc_layers(x) return x model = SimpleCNN(num_classes=num_classes).to(device) # ----------------------------------------------------------- # 5. Loss and Optimizer # ----------------------------------------------------------- criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=0.001) # ----------------------------------------------------------- # 6. Training Loop # ----------------------------------------------------------- num_epochs = 10 train_loss_list, train_acc_list = [], [] for epoch in range(num_epochs): model.train() total_loss, correct, total = 0, 0, 0 for images, labels in train_loader: images, labels = images.to(device), labels.to(device) # Forward pass outputs = model(images) loss = criterion(outputs, labels) # Backward pass optimizer.zero_grad() loss.backward() optimizer.step() total_loss += loss.item() _, predicted = outputs.max(1) total += labels.size(0) correct += (predicted == labels).sum().item() avg_loss = total_loss / len(train_loader) accuracy = 100 * correct / total train_loss_list.append(avg_loss) train_acc_list.append(accuracy) print(f"Epoch [{epoch+1}/{num_epochs}] Loss: {avg_loss:.4f} Accuracy: {accuracy # ----------------------------------------------------------- # 7. Validation / Evaluation # ----------------------------------------------------------- model.eval() correct, total = 0, 0 with torch.no_grad(): for images, labels in val_loader: images, labels = images.to(device), labels.to(device) outputs = model(images) _, predicted = outputs.max(1) total += labels.size(0) correct += (predicted == labels).sum().item() val_accuracy = 100 * correct / total print("\nValidation Accuracy:", val_accuracy) # ----------------------------------------------------------- # 8. Plot Training Accuracy and Loss # ----------------------------------------------------------- plt.figure(figsize=(8,4)) plt.plot(train_acc_list, label="Training Accuracy") plt.title("Training Accuracy") plt.xlabel("Epoch") plt.ylabel("Accuracy (%)") plt.grid() plt.legend() plt.show() plt.figure(figsize=(8,4)) plt.plot(train_loss_list, label="Training Loss") plt.title("Training Loss") plt.xlabel("Epoch") plt.ylabel("Loss") plt.grid() plt.legend() plt.show() # ----------------------------------------------------------- # 9. Show Predictions for First 5 Validation Images # ----------------------------------------------------------- data_iter = iter(val_loader) images, labels = next(data_iter) images, labels = images.to(device), labels.to(device) outputs = model(images[:5]) _, preds = outputs.max(1) print("\nPredicted:", [full_dataset.classes[i] for i in preds.cpu().numpy()]) print("Actual: ", [full_dataset.classes[i] for i in labels[:5].cpu().numpy()]) plt.figure(figsize=(10,2)) for i in range(5): plt.subplot(1,5,i+1) img = images[i].cpu().permute(1,2,0).numpy() img = img * 0.229 + 0.485 # unnormalize plt.imshow(img) plt.title(f"P:{full_dataset.classes[preds[i]]}\nT:{full_dataset.classes[labels plt.axis("off") plt.show() //IRIS DATASET import torch, torch.nn as nn, torch.optim as optim from torch.utils.data import DataLoader, TensorDataset, random_split from sklearn.datasets import load_iris import matplotlib.pyplot as plt device = 'cuda' if torch.cuda.is_available() else 'cpu' print("Dataset: IRIS — built-in, no external files needed") # ----- DATA ----- X, y = load_iris(return_X_y=True) X = torch.tensor(X, dtype=torch.float32) y = torch.tensor(y, dtype=torch.long) X = (X - X.mean(0)) / X.std(0) train_ds, test_ds = random_split(TensorDataset(X,y), [120,30]) train_loader = DataLoader(train_ds, 16, shuffle=True) test_loader = DataLoader(test_ds, 16) # ----- MODEL ----- model = nn.Sequential( nn.Linear(4, 32), nn.ReLU(), nn.Linear(32, 3) ).to(device) loss_fn = nn.CrossEntropyLoss() opt = optim.Adam(model.parameters(), lr=0.01) # ----- TRAIN ----- loss_hist, acc_hist = [], [] for _ in range(50): correct = total = running_loss = 0 for x,y in train_loader: x,y = x.to(device), y.to(device) out = model(x) loss = loss_fn(out,y) opt.zero_grad(); loss.backward(); opt.step() running_loss += loss.item() correct += (out.argmax(1)==y).sum().item() total += y.size(0) loss_hist.append(running_loss/len(train_loader)) acc_hist.append(100*correct/total) print(f"Train Accuracy: {acc_hist[-1]:.2f}%") # ----- TEST ----- correct = 0 with torch.no_grad(): for x,y in test_loader: x,y = x.to(device), y.to(device) correct += (model(x).argmax(1)==y).sum().item() print(f"Test Accuracy: {100*correct/len(test_ds):.2f}%") # ----- PLOT ----- plt.plot(loss_hist); plt.title("Loss"); plt.show() plt.plot(acc_hist); plt.title("Accuracy"); plt.show()