# ============================================================ # MNIST Digit Classification using a Simple Neural Network (PyTorch) # ============================================================ import torch import torch.nn as nn import torch.optim as optim from torch.utils.data import DataLoader import torchvision import torchvision.transforms as transforms import matplotlib.pyplot as plt import numpy as np # ------------------------------------------------------------ # 1. Device Configuration (GPU if available) # ------------------------------------------------------------ device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print("Using device:", device) # ------------------------------------------------------------ # 2. Load MNIST Dataset # ------------------------------------------------------------ transform = transforms.Compose([ transforms.ToTensor(), # convert to tensor transforms.Normalize((0.5,), (0.5,)) # normalize ]) train_dataset = torchvision.datasets.MNIST( root='./data', train=True, transform=transform, download=True) test_dataset = torchvision.datasets.MNIST( root='./data', train=False, transform=transform, download=True) train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True) test_loader = DataLoader(test_dataset, batch_size=64, shuffle=False) # ------------------------------------------------------------ # 3. Define Simple Neural Network # ------------------------------------------------------------ class SimpleNN(nn.Module): def __init__(self): super(SimpleNN, self).__init__() self.fc1 = nn.Linear(28*28, 128) self.fc2 = nn.Linear(128, 64) self.fc3 = nn.Linear(64, 10) self.relu = nn.ReLU() def forward(self, x): x = x.view(-1, 784) # flatten image x = self.relu(self.fc1(x)) x = self.relu(self.fc2(x)) x = self.fc3(x) # no softmax (CrossEntropyLoss handles it) return x model = SimpleNN().to(device) # ------------------------------------------------------------ # 4. Loss and Optimizer # ------------------------------------------------------------ criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=0.001) # ------------------------------------------------------------ # 5. Training Loop # ------------------------------------------------------------ num_epochs = 5 train_loss_list = [] train_acc_list = [] for epoch in range(num_epochs): total_loss = 0 correct = 0 total = 0 for images, labels in train_loader: images, labels = images.to(device), labels.to(device) # forward pass outputs = model(images) loss = criterion(outputs, labels) # backward pass optimizer.zero_grad() loss.backward() optimizer.step() total_loss += loss.item() # accuracy _, predicted = outputs.max(1) total += labels.size(0) correct += (predicted == labels).sum().item() avg_loss = total_loss / len(train_loader) accuracy = 100 * correct / total train_loss_list.append(avg_loss) train_acc_list.append(accuracy) print(f"Epoch [{epoch+1}/{num_epochs}] Loss: {avg_loss:.4f} Accuracy: {accuracy:.2f}%") # ------------------------------------------------------------ # 6. Testing / Evaluation # ------------------------------------------------------------ model.eval() correct = 0 total = 0 with torch.no_grad(): for images, labels in test_loader: images, labels = images.to(device), labels.to(device) outputs = model(images) _, predicted = outputs.max(1) total += labels.size(0) correct += (predicted == labels).sum().item() test_accuracy = 100 * correct / total print("\nTest Accuracy:", test_accuracy) # ------------------------------------------------------------ # 7. Plot Accuracy and Loss Graphs # ------------------------------------------------------------ plt.figure(figsize=(8, 4)) plt.plot(train_acc_list, label="Training Accuracy") plt.title("Training Accuracy") plt.xlabel("Epoch") plt.ylabel("Accuracy (%)") plt.grid() plt.legend() plt.show() plt.figure(figsize=(8, 4)) plt.plot(train_loss_list, label="Training Loss") plt.title("Training Loss") plt.xlabel("Epoch") plt.ylabel("Loss") plt.grid() plt.legend() plt.show() # ------------------------------------------------------------ # 8. Show Predictions for First 5 Test Images # ------------------------------------------------------------ data_iter = iter(test_loader) images, labels = next(data_iter) images, labels = images.to(device), labels.to(device) outputs = model(images[:5]) _, preds = outputs.max(1) print("\nPredicted:", preds.cpu().numpy()) print("Actual: ", labels[:5].cpu().numpy()) # Plot the first 5 images plt.figure(figsize=(10, 2)) for i in range(5): plt.subplot(1, 5, i + 1) plt.imshow(images[i].cpu().squeeze(), cmap="gray") plt.title(f"P:{preds[i].item()} / T:{labels[i].item()}") plt.axis("off") plt.show() ------------------------------------------------------------------------------------------------- //Image dataset import torch import torch.nn as nn import torch.optim as optim from torch.utils.data import DataLoader, random_split from torchvision import datasets, transforms import matplotlib.pyplot as plt import numpy as np # Added for image visualization import os # ----------------------------------------------------------- # 1. Device Configuration # ----------------------------------------------------------- device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print("Using device:", device) # ----------------------------------------------------------- # 2. Dataset Path # ----------------------------------------------------------- # UPDATE THIS PATH to point to your actual dataset folder dataset_dir = r"C:\Users\LOMESH\Downloads\archive\natural_images" # Check if path exists to avoid confusing errors later if not os.path.exists(dataset_dir): print(f"ERROR: The path '{dataset_dir}' does not exist.") print("Please update the 'dataset_dir' variable.") else: # ----------------------------------------------------------- # 3. Transformations (preprocessing + augmentation) # ----------------------------------------------------------- transform = transforms.Compose([ transforms.Resize((128, 128)), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) # Load the full dataset full_dataset = datasets.ImageFolder(root=dataset_dir, transform=transform) # Split dataset into training and validation (80%-20%) train_size = int(0.8 * len(full_dataset)) val_size = len(full_dataset) - train_size train_dataset, val_dataset = random_split(full_dataset, [train_size, val_size]) train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True) val_loader = DataLoader(val_dataset, batch_size=32, shuffle=False) num_classes = len(full_dataset.classes) print("Classes:", full_dataset.classes) # ----------------------------------------------------------- # 4. Define CNN Model # ----------------------------------------------------------- class SimpleCNN(nn.Module): def __init__(self, num_classes): super(SimpleCNN, self).__init__() self.conv_layers = nn.Sequential( nn.Conv2d(3, 32, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool2d(2, 2), nn.Conv2d(32, 64, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool2d(2, 2), nn.Conv2d(64, 128, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool2d(2, 2) ) # Calculate flattened size: 128 -> 64 -> 32 -> 16 (after 3 pools) # 128 channels * 16 * 16 spatial size self.fc_layers = nn.Sequential( nn.Flatten(), nn.Linear(128 * 16 * 16, 256), nn.ReLU(), nn.Linear(256, num_classes) ) def forward(self, x): x = self.conv_layers(x) x = self.fc_layers(x) return x model = SimpleCNN(num_classes=num_classes).to(device) # ----------------------------------------------------------- # 5. Loss and Optimizer # ----------------------------------------------------------- criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=0.001) # ----------------------------------------------------------- # 6. Training Loop # ----------------------------------------------------------- num_epochs = 10 train_loss_list, train_acc_list = [], [] print("Starting Training...") for epoch in range(num_epochs): model.train() total_loss, correct, total = 0, 0, 0 for images, labels in train_loader: images, labels = images.to(device), labels.to(device) # Forward pass outputs = model(images) loss = criterion(outputs, labels) # Backward pass optimizer.zero_grad() loss.backward() optimizer.step() total_loss += loss.item() _, predicted = outputs.max(1) total += labels.size(0) correct += (predicted == labels).sum().item() avg_loss = total_loss / len(train_loader) accuracy = 100 * correct / total train_loss_list.append(avg_loss) train_acc_list.append(accuracy) print(f"Epoch [{epoch+1}/{num_epochs}] Loss: {avg_loss:.4f} Accuracy: {accuracy:.2f}%") # ----------------------------------------------------------- # 7. Validation / Evaluation # ----------------------------------------------------------- model.eval() correct, total = 0, 0 with torch.no_grad(): for images, labels in val_loader: images, labels = images.to(device), labels.to(device) outputs = model(images) _, predicted = outputs.max(1) total += labels.size(0) correct += (predicted == labels).sum().item() val_accuracy = 100 * correct / total print(f"\nValidation Accuracy: {val_accuracy:.2f}%") # ----------------------------------------------------------- # 8. Plot Training Accuracy and Loss # ----------------------------------------------------------- plt.figure(figsize=(10, 4)) # Plot Accuracy plt.subplot(1, 2, 1) plt.plot(train_acc_list, label="Training Accuracy", color='blue') plt.title("Training Accuracy") plt.xlabel("Epoch") plt.ylabel("Accuracy (%)") plt.grid() plt.legend() # Plot Loss plt.subplot(1, 2, 2) plt.plot(train_loss_list, label="Training Loss", color='red') plt.title("Training Loss") plt.xlabel("Epoch") plt.ylabel("Loss") plt.grid() plt.legend() plt.tight_layout() plt.show() # ----------------------------------------------------------- # 9. Show Predictions for First 5 Validation Images # ----------------------------------------------------------- data_iter = iter(val_loader) images, labels = next(data_iter) images, labels = images.to(device), labels.to(device) # Get predictions outputs = model(images[:5]) _, preds = outputs.max(1) print("\nPredicted:", [full_dataset.classes[i] for i in preds.cpu().numpy()]) print("Actual: ", [full_dataset.classes[i] for i in labels[:5].cpu().numpy()]) plt.figure(figsize=(12, 3)) for i in range(5): plt.subplot(1, 5, i+1) # Un-normalize image for display img = images[i].cpu().permute(1, 2, 0).numpy() mean = np.array([0.485, 0.456, 0.406]) std = np.array([0.229, 0.224, 0.225]) img = std * img + mean img = np.clip(img, 0, 1) # Clip values to be between 0 and 1 plt.imshow(img) plt.title(f"P:{full_dataset.classes[preds[i]]}\nT:{full_dataset.classes[labels[i]]}", fontsize=10) plt.axis("off") plt.show() ----------------------------------------------------------------------------- //Iris dataset import torch import torch.nn as nn import torch.optim as optim from torch.utils.data import DataLoader, TensorDataset, random_split from sklearn.datasets import load_iris import matplotlib.pyplot as plt # --------------------------------------------------------- # NOTE: If this crashes with "ImportError: numpy.core...", # run this in a separate cell first: !pip install "numpy<2" # --------------------------------------------------------- device = 'cuda' if torch.cuda.is_available() else 'cpu' print(f"Using device: {device}") # ----- DATA ----- # Load data X, y = load_iris(return_X_y=True) # Convert to PyTorch tensors X = torch.tensor(X, dtype=torch.float32) y = torch.tensor(y, dtype=torch.long) # Normalize (Standardization) # Note: In real projects, calculate mean/std on train_ds only to avoid leakage X = (X - X.mean(0)) / X.std(0) # Split 120 train / 30 test train_ds, test_ds = random_split(TensorDataset(X, y), [120, 30]) train_loader = DataLoader(train_ds, batch_size=16, shuffle=True) test_loader = DataLoader(test_ds, batch_size=16) # ----- MODEL ----- model = nn.Sequential( nn.Linear(4, 32), nn.ReLU(), nn.Linear(32, 3) ).to(device) loss_fn = nn.CrossEntropyLoss() opt = optim.Adam(model.parameters(), lr=0.01) # ----- TRAIN ----- loss_hist, acc_hist = [], [] print("Starting training...") for epoch in range(50): correct = 0 total = 0 running_loss = 0.0 for x, y_batch in train_loader: x, y_batch = x.to(device), y_batch.to(device) # Forward out = model(x) loss = loss_fn(out, y_batch) # Backward opt.zero_grad() loss.backward() opt.step() # Metrics running_loss += loss.item() correct += (out.argmax(1) == y_batch).sum().item() total += y_batch.size(0) # Store epoch metrics epoch_loss = running_loss / len(train_loader) epoch_acc = 100 * correct / total loss_hist.append(epoch_loss) acc_hist.append(epoch_acc) print(f"Final Train Accuracy: {acc_hist[-1]:.2f}%") # ----- TEST ----- model.eval() # Good practice to set eval mode correct = 0 total = 0 with torch.no_grad(): for x, y_batch in test_loader: x, y_batch = x.to(device), y_batch.to(device) correct += (model(x).argmax(1) == y_batch).sum().item() total += y_batch.size(0) print(f"Test Accuracy: {100 * correct / total:.2f}%") # ----- PLOT ----- plt.figure(figsize=(10, 4)) plt.subplot(1, 2, 1) plt.plot(loss_hist) plt.title("Loss") plt.xlabel("Epoch") plt.subplot(1, 2, 2) plt.plot(acc_hist) plt.title("Accuracy") plt.xlabel("Epoch") plt.show()