assignment 1
activation functions in nn:
import numpy as np
import matplotlib.pyplot as plt

# Input values
x = np.linspace(-10, 10, 100)

# Activation functions
sigmoid = 1 / (1 + np.exp(-x))
tanh = np.tanh(x)
relu = np.maximum(0, x)
linear = x

# Plot all in one graph
plt.figure(figsize=(8,6))

plt.plot(x, sigmoid, label="Sigmoid")
plt.plot(x, tanh, label="Tanh")
plt.plot(x, relu, label="ReLU")
plt.plot(x, linear, label="Linear")

# Labels and title
plt.title("Activation Functions in Neural Networks")
plt.xlabel("Input")
plt.ylabel("Output")
plt.legend()
plt.grid()

plt.show()



orrrrrr


import numpy as np
import matplotlib.pyplot as plt

def sigmoid(temp_arr):
    return 1 / (1 + np.exp(-temp_arr))

arr = np.linspace(-8, 8)
plt.plot(arr, sigmoid(arr))
plt.axis('tight')
plt.title('Activation Function : Sigmoid')
# only for grid
plt.grid()
plt.show()
print("\nName : Rushikesh Dilip Shirsath \nRoll No : 83")


def tanh(temp_arr):
    return np.tanh(temp_arr)

x = np.linspace(-10, 10)
plt.plot(x, tanh(x))
plt.axis('tight')
plt.title('Activation Function : Tanh')
plt.grid()
plt.show()
print("\nName : Rushikesh Dilip Shirsath \nRoll No : 83")










ASSIGNMENT 2:
MCCULLOCH PITTS MODEL:

# McCulloch-Pitts AND-NOT implementation # Inputs (Truth table) inputs = [(0,0), (0,1), (1,0), (1,1)] # Weights w1 = 1 w2 = -1 # Threshold theta = 1 # Function def mc_culloch_pitts(x1, x2): summation = x1*w1 + x2*w2 if summation >= theta: return 1 else: return 0 # Output print("X1 X2 Output") for x1, x2 in inputs: y = mc_culloch_pitts(x1, x2) print(x1, x2, " ", y)




orrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr

import numpy as np
def mp_neuron(inputs, weights, threshold):
    # Compute the dot product of the inputs and weights
    net = np.dot(inputs, weights)
    # Apply the threshold function
    if net >= threshold:
        output = 1
    else:
        output = 0
    return output
def andnot_nn(inputs):
    weights = [-2, 1]
    threshold = 0
    # Compute the outputs of the two neurons in the first layer
    outputs = [mp_neuron(inputs, weights, threshold) for weights in [[-1, 1], [1, -1]]]
    # Compute the final output of the ANDNOT function
    if outputs[0] == 1 and outputs[1] == 0:
        output = 1
    else:
        output = 0
    return output
inputs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
outputs = []
for i in range(inputs.shape[0]):
    y = andnot_nn(inputs[i].reshape(1, -1))
    outputs.append(y)
print("Inputs:", inputs)
print("Outputs:", outputs)

print("\nName : Rushikesh dilip Shirsath \nRoll No : 83")










ASSIGNMENT:3
ASCII FROM 0 TO 9
import numpy as np # ASCII values of digits 0–9 X = np.array([[48], [49], [50], [51], [52], [53], [54], [55], [56], [57]]) # Target output: Even = 0, Odd = 1 y = np.array([0,1,0,1,0,1,0,1,0,1]) # Initialize weights and bias w = np.random.rand(1) b = np.random.rand() # Learning rate lr = 0.01 # Training for epoch in range(20): for i in range(len(X)): net = X[i] * w + b # Activation function y_pred = 1 if net >= 0 else 0 # Error error = y[i] - y_pred # Update weights and bias w = w + lr * error * X[i] b = b + lr * error # Testing print("Digit ASCII Prediction (0=Even,1=Odd)") for i in range(len(X)): net = X[i] * w + b y_pred = 1 if net >= 0 else 0 print(chr(X[i][0]), " ", X[i][0], " ", y_pred)






this to be donee
import numpy as np

# Input data (ASCII values of numbers 0 to 9)
X = np.array([48, 49, 50, 51, 52, 53, 54, 55, 56, 57])

# Target output
# Even = 1, Odd = 0
Y = np.array([1, 0, 1, 0, 1, 0, 1, 0, 1, 0])

# Normalize input values
X = X / 100

# Initialize weight and bias
weight = np.random.rand()
bias = np.random.rand()

# Learning rate
lr = 0.1

# Activation function
def activation(x):
    if x >= 0:
        return 1
    else:
        return 0

# Training the perceptron
for epoch in range(100):

    for i in range(len(X)):

        # Calculate net input
        net_input = X[i] * weight + bias

        # Predicted output
        output = activation(net_input)

        # Error
        error = Y[i] - output

        # Update weight and bias
        weight = weight + lr * error * X[i]
        bias = bias + lr * error

# Testing
print("Testing Perceptron for Even/Odd Recognition\n")

for i in range(len(X)):
    net_input = X[i] * weight + bias
    output = activation(net_input)

    number = chr(int(X[i] * 100))

    if output == 1:
        result = "Even"
    else:
        result = "Odd"

    print("Number:", number, "->", result)









ASSIGNMENT:4
DEMONSTRATE PERCEPTRON LEARNING LAW:

import numpy as np
import matplotlib.pyplot as plt

# Input dataset
X = np.array([
    [0, 0],
    [0, 1],
    [1, 0],
    [1, 1]
])

# Target output
Y = np.array([0, 0, 0, 1])

# Initialize weights and bias
weights = np.zeros(2)
bias = 0

# Learning rate
lr = 0.1

# Training perceptron
for epoch in range(10):

    for i in range(len(X)):

        # Net input
        net = np.dot(X[i], weights) + bias

        # Activation function
        if net >= 0:
            output = 1
        else:
            output = 0

        # Error
        error = Y[i] - output

        # Update weights and bias
        weights = weights + lr * error * X[i]
        bias = bias + lr * error

# Print final weights
print("Final Weights:", weights)
print("Final Bias:", bias)

# Plotting points
for i in range(len(X)):

    if Y[i] == 0:
        plt.scatter(X[i][0], X[i][1], color='red', marker='o', s=100)

    else:
        plt.scatter(X[i][0], X[i][1], color='blue', marker='x', s=100)

# Decision boundary
x_values = np.linspace(-0.5, 1.5, 100)

# w1*x1 + w2*x2 + b = 0
y_values = -(weights[0] * x_values + bias) / weights[1]

plt.plot(x_values, y_values)

# Labels
plt.xlabel("X1")
plt.ylabel("X2")
plt.title("Perceptron Decision Region")

plt.grid(True)
plt.show()



ASSIGNMENT:5
BIDIRECTIONAL ASSOCIATIVE MEMORY WITH TWO PAIRS OF VECTORS

import numpy as np
# Bipolar step function
def bipolar_step(x):
    return np.where(x >= 0, 1, -1)
# Input vector pairs
X1 = np.array([1, -1, 1])
Y1 = np.array([1, -1])
X2 = np.array([-1, 1, -1])
Y2 = np.array([-1, 1])
# Weight matrix calculation (Hebbian learning)
W = np.outer(X1, Y1) + np.outer(X2, Y2)
print("Weight Matrix W:\n", W)
# Forward recall (X → Y)
def recall_Y(X, W):
    return bipolar_step(np.dot(X, W))
# Backward recall (Y → X)
def recall_X(Y, W):
    return bipolar_step(np.dot(Y, W.T))
# Testing
print("\n--- Forward Recall (X → Y) ---")
print("Input X1:", X1, "→ Output Y:", recall_Y(X1, W))
print("Input X2:", X2, "→ Output Y:", recall_Y(X2, W))
print("\n--- Backward Recall (Y → X) ---")
print("Input Y1:", Y1, "→ Output X:", recall_X(Y1, W))
print("Input Y2:", Y2, "→ Output X:", recall_X(Y2, W))






ASSIGNMENT:6
RECOGNIZE NUMBER 0,1,2......

import numpy as np

# Training patterns for numbers 0,1,2,3 using 5x3 matrix

patterns = {

    '0': np.array([
        [1,1,1],
        [1,0,1],
        [1,0,1],
        [1,0,1],
        [1,1,1]
    ]),

    '1': np.array([
        [0,1,0],
        [1,1,0],
        [0,1,0],
        [0,1,0],
        [1,1,1]
    ]),

    '2': np.array([
        [1,1,1],
        [0,0,1],
        [1,1,1],
        [1,0,0],
        [1,1,1]
    ]),

    '3': np.array([
        [1,1,1],
        [0,0,1],
        [0,1,1],
        [0,0,1],
        [1,1,1]
    ])
}

# Convert matrices into vectors
training_data = {}

for key, value in patterns.items():
    training_data[key] = value.flatten()

# Test pattern
test = np.array([
    [1,1,1],
    [0,0,1],
    [1,1,1],
    [1,0,0],
    [1,1,1]
])

test_vector = test.flatten()

# Recognition using minimum distance
min_distance = float('inf')
recognized_number = None

for number, vector in training_data.items():

    distance = np.sum((test_vector - vector) ** 2)

    if distance < min_distance:
        min_distance = distance
        recognized_number = number

# Output
print("Test Pattern:\n")
print(test)

print("\nRecognized Number is:", recognized_number)









ASSIGNMENT:7
FORWARD AND BACK PROPAGATION

import numpy as np

# Input dataset
X = np.array([
    [0, 0],
    [0, 1],
    [1, 0],
    [1, 1]
])

# Target output
Y = np.array([
    [0],
    [1],
    [1],
    [0]
])

# Sigmoid activation function
def sigmoid(x):
    return 1 / (1 + np.exp(-x))

# Derivative of sigmoid
def sigmoid_derivative(x):
    return x * (1 - x)

# Initialize weights randomly
np.random.seed(1)

input_neurons = 2
hidden_neurons = 2
output_neurons = 1

# Weights
wh = np.random.uniform(size=(input_neurons, hidden_neurons))
bh = np.random.uniform(size=(1, hidden_neurons))

wo = np.random.uniform(size=(hidden_neurons, output_neurons))
bo = np.random.uniform(size=(1, output_neurons))

# Learning rate
lr = 0.1

# Training
for epoch in range(10000):

    # Forward Propagation

    hidden_input = np.dot(X, wh) + bh
    hidden_output = sigmoid(hidden_input)

    final_input = np.dot(hidden_output, wo) + bo
    predicted_output = sigmoid(final_input)

    # Error calculation
    error = Y - predicted_output

    # Back Propagation

    d_predicted_output = error * sigmoid_derivative(predicted_output)

    error_hidden_layer = d_predicted_output.dot(wo.T)

    d_hidden_layer = error_hidden_layer * sigmoid_derivative(hidden_output)

    # Updating weights and bias
    wo += hidden_output.T.dot(d_predicted_output) * lr
    bo += np.sum(d_predicted_output, axis=0, keepdims=True) * lr

    wh += X.T.dot(d_hidden_layer) * lr
    bh += np.sum(d_hidden_layer, axis=0, keepdims=True) * lr

# Output
print("Predicted Output:\n")

print(predicted_output)









ASSIGNMENT:8
Multi-Class Classification using Neural Network from Scratch in Python
Small Information

A Neural Network is a machine learning model inspired by the human brain.
It consists of:

Input layer
Hidden layer(s)
Output layer

In this program:

Multi-class classification is performed

Hidden layer uses 100 neurons

Activation function used is ReLU

Output layer has multiple neurons

Softmax is used for classification

Gradient Descent optimization is used


We use the Iris Dataset for classification.


import numpy as np
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler

# Load dataset
iris = load_iris()

X = iris.data
Y = iris.target.reshape(-1, 1)

# One Hot Encoding
encoder = OneHotEncoder(sparse_output=False)
Y = encoder.fit_transform(Y)

# Split dataset
X_train, X_test, Y_train, Y_test = train_test_split(
    X, Y, test_size=0.2, random_state=42
)

# Feature scaling
scaler = StandardScaler()

X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)

# Network architecture
input_neurons = X_train.shape[1]
hidden_neurons = 100
output_neurons = Y_train.shape[1]

# Initialize weights and bias
np.random.seed(1)

W1 = np.random.randn(input_neurons, hidden_neurons)
b1 = np.zeros((1, hidden_neurons))

W2 = np.random.randn(hidden_neurons, output_neurons)
b2 = np.zeros((1, output_neurons))

# Learning rate
lr = 0.01

# ReLU activation
def relu(x):
    return np.maximum(0, x)

# ReLU derivative
def relu_derivative(x):
    return x > 0

# Softmax function
def softmax(x):
    exp_x = np.exp(x - np.max(x, axis=1, keepdims=True))
    return exp_x / np.sum(exp_x, axis=1, keepdims=True)

# Training
epochs = 1000

for epoch in range(epochs):

    # Forward Propagation

    Z1 = np.dot(X_train, W1) + b1
    A1 = relu(Z1)

    Z2 = np.dot(A1, W2) + b2
    A2 = softmax(Z2)

    # Error
    error = A2 - Y_train

    # Back Propagation

    dW2 = np.dot(A1.T, error)
    db2 = np.sum(error, axis=0, keepdims=True)

    d_hidden = np.dot(error, W2.T) * relu_derivative(Z1)

    dW1 = np.dot(X_train.T, d_hidden)
    db1 = np.sum(d_hidden, axis=0, keepdims=True)

    # Update weights
    W1 -= lr * dW1
    b1 -= lr * db1

    W2 -= lr * dW2
    b2 -= lr * db2

# Testing
Z1_test = np.dot(X_test, W1) + b1
A1_test = relu(Z1_test)

Z2_test = np.dot(A1_test, W2) + b2
A2_test = softmax(Z2_test)

# Predictions
predictions = np.argmax(A2_test, axis=1)
actual = np.argmax(Y_test, axis=1)

# Accuracy
accuracy = np.mean(predictions == actual)

print("Predicted Classes:")
print(predictions)

print("\nActual Classes:")
print(actual)

print("\nAccuracy:", accuracy * 100, "%")








ASSIGNMENT B1(9)
Back Propagation Network for XOR Function using binary input and output in Python

import numpy as np

# XOR input dataset
X = np.array([
    [0, 0],
    [0, 1],
    [1, 0],
    [1, 1]
])

# XOR output
Y = np.array([
    [0],
    [1],
    [1],
    [0]
])

# Sigmoid activation function
def sigmoid(x):
    return 1 / (1 + np.exp(-x))

# Derivative of sigmoid
def sigmoid_derivative(x):
    return x * (1 - x)

# Random weights initialization
np.random.seed(1)

input_neurons = 2
hidden_neurons = 2
output_neurons = 1

# Hidden layer weights and bias
wh = np.random.uniform(size=(input_neurons, hidden_neurons))
bh = np.random.uniform(size=(1, hidden_neurons))

# Output layer weights and bias
wo = np.random.uniform(size=(hidden_neurons, output_neurons))
bo = np.random.uniform(size=(1, output_neurons))

# Learning rate
lr = 0.1

# Training process
for epoch in range(10000):

    # Forward Propagation

    hidden_input = np.dot(X, wh) + bh
    hidden_output = sigmoid(hidden_input)

    final_input = np.dot(hidden_output, wo) + bo
    predicted_output = sigmoid(final_input)

    # Error calculation
    error = Y - predicted_output

    # Back Propagation

    d_output = error * sigmoid_derivative(predicted_output)

    error_hidden = d_output.dot(wo.T)

    d_hidden = error_hidden * sigmoid_derivative(hidden_output)

    # Update output weights and bias
    wo += hidden_output.T.dot(d_output) * lr
    bo += np.sum(d_output, axis=0, keepdims=True) * lr

    # Update hidden weights and bias
    wh += X.T.dot(d_hidden) * lr
    bh += np.sum(d_hidden, axis=0, keepdims=True) * lr

# Output
print("Predicted Output:\n")

print(predicted_output)






ASSIGNMENT B3(NO)
PROGRAM FOR CREATING A BACK PROPAGATION FEED FORWARD NEURAL NETWORK'

import numpy as np

# Input dataset
X = np.array([
    [0, 0],
    [0, 1],
    [1, 0],
    [1, 1]
])

# Target output
Y = np.array([
    [0],
    [1],
    [1],
    [0]
])

# Sigmoid activation function
def sigmoid(x):
    return 1 / (1 + np.exp(-x))

# Derivative of sigmoid
def sigmoid_derivative(x):
    return x * (1 - x)

# Initialize random weights
np.random.seed(1)

input_neurons = 2
hidden_neurons = 4
output_neurons = 1

# Hidden layer weights and bias
W1 = np.random.uniform(size=(input_neurons, hidden_neurons))
B1 = np.random.uniform(size=(1, hidden_neurons))

# Output layer weights and bias
W2 = np.random.uniform(size=(hidden_neurons, output_neurons))
B2 = np.random.uniform(size=(1, output_neurons))

# Learning rate
lr = 0.1

# Training process
epochs = 10000

for epoch in range(epochs):

    # ---------------------------
    # Forward Propagation
    # ---------------------------

    hidden_input = np.dot(X, W1) + B1
    hidden_output = sigmoid(hidden_input)

    final_input = np.dot(hidden_output, W2) + B2
    predicted_output = sigmoid(final_input)

    # ---------------------------
    # Error Calculation
    # ---------------------------

    error = Y - predicted_output

    # ---------------------------
    # Back Propagation
    # ---------------------------

    d_output = error * sigmoid_derivative(predicted_output)

    error_hidden = d_output.dot(W2.T)

    d_hidden = error_hidden * sigmoid_derivative(hidden_output)

    # Update output layer weights
    W2 += hidden_output.T.dot(d_output) * lr
    B2 += np.sum(d_output, axis=0, keepdims=True) * lr

    # Update hidden layer weights
    W1 += X.T.dot(d_hidden) * lr
    B1 += np.sum(d_hidden, axis=0, keepdims=True) * lr

# Final Output
print("Predicted Output:\n")

print(predicted_output)







ASSIGNMENT B4
PYTHON PROGRAM FOR CREATING BACK PROPAGATION FEEDFORWARD NEURAL NETWORK
HOPFIELD NETWORK

import numpy as np

# Define 4 bipolar vectors
V1 = np.array([1, -1, 1, -1])
V2 = np.array([-1, 1, -1, 1])
V3 = np.array([1, 1, -1, -1])
V4 = np.array([-1, -1, 1, 1])

# Store vectors in list
patterns = [V1, V2, V3, V4]

# Number of neurons
n = len(V1)

# Initialize weight matrix
W = np.zeros((n, n))

# Training Hopfield Network
for p in patterns:

    W += np.outer(p, p)

# Remove self connections
np.fill_diagonal(W, 0)

print("Weight Matrix:\n")
print(W)

# Test pattern (noisy input)
test = np.array([1, -1, 1, -1])

print("\nTest Pattern:")
print(test)

# Recall process
net = np.dot(W, test)

# Activation function
output = np.where(net >= 0, 1, -1)

print("\nRecovered Pattern:")
print(output)






ASSIGNMENT B5(NO)
OBJECT DETECTION

FIRST
import sys
!{sys.executable} -m pip install ultralytics
import ultralytics

print("Ultralytics Installed Successfully")


THENNNNNNNNNNNNNNNNNN
# Install YOLOv5
!pip install ultralytics -q

# Import libraries
from ultralytics import YOLO
import cv2
import matplotlib.pyplot as plt

# Load pre-trained YOLOv5 model
model = YOLO("yolov5s.pt")

# Load image
image_path = "https://ultralytics.com/images/bus.jpg"

# Run object detection
results = model(image_path)

# Display result image
for r in results:

    img = r.plot()

    plt.imshow(img)
    plt.axis('off')

    plt.title("Object Detection using CNN (YOLOv5)")
    plt.show()

# Print detected objects
for r in results:

    print("\nDetected Objects:\n")

    for box in r.boxes:

        cls = int(box.cls[0])
        conf = float(box.conf[0])

        print(f"Class: {model.names[cls]}, Confidence: {conf:.2f}")










ASSIGNMENT C1(NO)
HOW TO TRAIN NEURAL NETWORK WITH TENSORFLOW AND EVALUATION OF LOGISTIC REGRESSION USING
TENSORFLOW


import tensorflow as tf
from tensorflow.keras import layers, models
# Load dataset
(X_train, y_train), (X_test, y_test) = tf.keras.datasets.mnist.load_data()
# Normalize
X_train = X_train / 255.0
X_test = X_test / 255.0
# Build model
model = models.Sequential([
layers.Flatten(input_shape=(28, 28)),
layers.Dense(128, activation='relu'),
layers.Dense(10, activation='softmax')
])
# Compile model
model.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
# Train
model.fit(X_train, y_train, epochs=5)
# Evaluate
loss, accuracy = model.evaluate(X_test, y_test)
print("\nNeural Network Accuracy:", accuracy)







ASSIGNMENT C2
IMPLEMENTATION OF CNN USING TENSORFLOW/PYTORCH
import tensorflow as tf
from tensorflow.keras import layers, models
 
# Load dataset
(X_train, y_train), (X_test, y_test) = tf.keras.datasets.mnist.load_data()
 
# Normalize and reshape
X_train = X_train.reshape(-1, 28, 28, 1) / 255.0
X_test = X_test.reshape(-1, 28, 28, 1) / 255.0
 
# Build CNN model
model = models.Sequential([
    layers.Conv2D(32, (3,3), activation='relu', input_shape=(28,28,1)),
    layers.MaxPooling2D((2,2)),
     
    layers.Conv2D(64, (3,3), activation='relu'),
    layers.MaxPooling2D((2,2)),
     
    layers.Flatten(),
    layers.Dense(64, activation='relu'),
    layers.Dense(10, activation='softmax')
])
 
# Compile
model.compile(
    optimizer='adam',
    loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
# Train
model.fit(X_train, y_train, epochs=5)
# Evaluate
loss, accuracy = model.evaluate(X_test, y_test)
print("\nCNN Accuracy:", accuracy)







ASSIGNMENT C4
IMPLEMENTATION OF MNIST HANDWRITTEN CHARACTER DETETCTION USING PYTORCH,KERAS AND
TENSORFLOW

FIRST
!pip install tensorflow
!pip install keras

SECOND
import sys
!{sys.executable} -m pip install tensorflow

THIRD
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten

# Load MNIST dataset
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()

# Normalize data
x_train = x_train / 255.0
x_test = x_test / 255.0

# Build model
model = Sequential([
    Flatten(input_shape=(28, 28)),
    Dense(128, activation='relu'),
    Dense(10, activation='softmax')
])

# Compile model
model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])

# Train model
model.fit(x_train, y_train, epochs=5)

# Evaluate model
loss, accuracy = model.evaluate(x_test, y_test)

print("Accuracy:", accuracy)


FOURTH
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms

# Load dataset
transform = transforms.ToTensor()

train_data = datasets.MNIST(root='./data',
                            train=True,
                            download=True,
                            transform=transform)

train_loader = torch.utils.data.DataLoader(dataset=train_data,
                                           batch_size=64,
                                           shuffle=True)

# Define Neural Network
class NeuralNet(nn.Module):

    def __init__(self):
        super(NeuralNet, self).__init__()

        self.flatten = nn.Flatten()

        self.fc1 = nn.Linear(28 * 28, 128)
        self.relu = nn.ReLU()

        self.fc2 = nn.Linear(128, 10)

    def forward(self, x):
        x = self.flatten(x)
        x = self.fc1(x)
        x = self.relu(x)
        x = self.fc2(x)

        return x

model = NeuralNet()

# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)

# Training
for epoch in range(5):

    for images, labels in train_loader:

        outputs = model(images)

        loss = criterion(outputs, labels)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

    print("Epoch:", epoch + 1, "Loss:", loss.item())

print("Training Completed")





ALSO CAN DO IN SECOND WAY:
SECOND CELL

# Baseline MLP for MNIST dataset
import numpy
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.utils import np_utils

# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)

# load data
(X_train, y_train), (X_test, y_test) = mnist.load_data()

# flatten 28*28 images to a 784 vector for each image
num_pixels = X_train.shape[1] * X_train.shape[2]
X_train = X_train.reshape(X_train.shape[0], num_pixels).astype('float32')
X_test = X_test.reshape(X_test.shape[0], num_pixels).astype('float32')

# normalize inputs from 0-255 to 0-1
X_train = X_train / 255
X_test = X_test / 255

# one hot encode outputs
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]




THIRD CELL:
# define baseline model
def baseline_model():
	# create model
	model = Sequential()
	model.add(Dense(num_pixels, input_dim=num_pixels, kernel_initializer='normal', activation='relu'))
	model.add(Dense(num_classes, kernel_initializer='normal', activation='softmax'))
	# Compile model
	model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
	return model




FORTH CELL

# build the model
model = baseline_model()

# Fit the model
model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=200, verbose=2)

# Final evaluation of the model
scores = model.evaluate(X_test, y_test, verbose=0)
print("Baseline Error: %.2f%%" % (100-scores[1]*100))
print("\nName : Shantanu Anant Gaikwad\nRoll No : 27")































































Comments