Machine Learning Practical Exam Codes
Keras and Tensorflow
import os
import numpy as np
import tensorflow as tf
from keras.utils import to_categorical
# Load the MNIST dataset
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
# Normalize the input data
x_train = tf.keras.utils.normalize(x_train, axis=1)
x_test = tf.keras.utils.normalize(x_test, axis=1)
# One-hot encode the labels
y_train = to_categorical(y_train, 10)
y_test = to_categorical(y_test, 10)
# Define the neural network model
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1))) # Convolutional layer with 32 filters, kernel size (3,3), ReLU activation, and input shape (28,28,1)
model.add(tf.keras.layers.MaxPooling2D((2, 2))) # Max pooling layer with pool size (2,2)
model.add(tf.keras.layers.Flatten()) # Flatten layer to convert 2D output to 1D
model.add(tf.keras.layers.Dense(128, activation='relu')) # Dense (fully connected) layer with 128 neurons and ReLU activation
model.add(tf.keras.layers.Dense(10, activation='softmax')) # Output layer with 10 neurons (one for each class) and softmax activation
# Display the model summary
model.summary()
# Compile the model
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
# Train the model
model.fit(x_train, y_train, epochs=2)
# Evaluate the model on the test data
loss, accuracy = model.evaluate(x_test, y_test)
# Print the loss and accuracy
print("The model has a loss of: ", loss)
print("The model has an accuracy of about: ", accuracy*100, "%")
Linear Regression
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data = pd.read_csv("salary.csv")
X = data.iloc[:, 0]
y = data.iloc[:, 1]
# Manual calculation of linear regression coefficients
n = len(data)
sum_x = sum(X)
sum_y = sum(y)
sum_xy = sum(X * y)
sum_x_squared = sum(X ** 2)
a = (sum_y * sum_x_squared - sum_x * sum_xy) / (n * sum_x_squared - sum_x ** 2)
b = (n * sum_xy - sum_x * sum_y) / (n * sum_x_squared - sum_x ** 2)
# Predicting salary for a new value (e.g., years of experience = 1.7)
x_test = np.array([1.7, 2.5, 6.5, 1, 2.2])
y_pred = a + b * x_test
# Plotting
plt.scatter(X, y, color="orange", label="Original data")
plt.plot(X, a + b * X, color="blue", label="Linear regression line")
plt.scatter(x_test, y_pred, color="red", label="Predictions")
plt.xlabel("Experience")
plt.ylabel("Salary")
plt.title(" Linear Regression")
plt.legend()
plt.show()
## dataset
,YearsExperience,Salary
0,1.2,39344
1,1.4,46206
2,1.6,37732
3,2.1,43526
4,2.3,39892
5,3,56643
6,3.1,60151
7,3.3,54446
8,3.3,64446
9,3.8,57190
10,4,63219
11,4.1,55795
12,4.1,56958
13,4.2,57082
14,4.6,61112
15,5,67939
16,5.2,66030
17,5.4,83089
18,6,81364
19,6.1,93941
20,6.9,91739
21,7.2,98274
22,8,101303
23,8.3,113813
24,8.8,109432
25,9.1,105583
26,9.6,116970
27,9.7,112636
28,10.4,122392
29,10.6,121873
Logistic Regression
import numpy as np
import matplotlib.pyplot as plt
n = int(input("Enter the number of values (n): "))
X1_values = input("Enter X1 values separated by commas: ").split(',')
X1 = np.array([float(x.strip()) for x in X1_values])
X2_values = input("Enter X2 values separated by commas: ").split(',')
X2 = np.array([float(x.strip()) for x in X2_values])
Y_values = input("Enter Y values separated by commas: ").split(',')
Y = np.array([float(y.strip()) for y in Y_values])
print("X1:", X1)
print("X2:", X2)
print("Y:", Y)
b0, b1, b2 = 0, 0, 0
s = float(input("Enter threshold between 0 to 1: "))
p = []
pc = []
for i in range(n):
P = 1 / (1 + np.exp(-(b0 + b1 * X1[i] + b2 * X2[i] + b2 * X2[i])))
b0 = b0 + 0.01 * (Y[i] - P) * P * (1 - P) * 1
b1 = b1 + 0.01 * (Y[i] - P) * P * (1 - P) * X1[i]
b2 = b2 + 0.01 * (Y[i] - P) * P * (1 - P) * X2[i]
print(f"Calculated coefficients: b0 = {b0}, b1 = {b1}, b2 = {b2}")
print("X1\tX2\tActual Class\tP\tPc")
print("X1\tX2\tActual class\tPediction p\tPredictiion class pc")
for i in range(n):
P = 1 / (1 + np.exp(-(b0 + b1 * X1[i] + b2 * X2[i] + b2 * X2[i])))
pc.append(1 if P > s else 0) # threshold wala loop
print(f"{X1[i]}\t{X2[i]}\t{Y[i]}\t\t{P}\t\t{pc[i]}")
SVM
from sklearn import datasets
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
# Load breast cancer dataset
data = datasets.load_breast_cancer()
X = data.data
y = data.target
# Print dataset information
print("Number of classes:", len(set(y)))
print("Number of samples per class:")
print(pd.Series(y).value_counts())
print("Total number of samples:", len(y))
print("Dimensionality:", X.shape[1])
print("Features:", data.feature_names)
# Split data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=60)
# Initialize and train SVM classifier
object_SVM = SVC(kernel='linear')
object_SVM.fit(X_train, y_train)
# Make predictions on the testing set
y_pred = object_SVM.predict(X_test)
# Calculate evaluation metrics
accuracy = accuracy_score(y_test, y_pred)
precision = precision_score(y_test, y_pred, average='weighted')
recall = recall_score(y_test, y_pred, average='weighted')
f1 = f1_score(y_test, y_pred, average='weighted')
# Print evaluation metrics
print("Accuracy:", accuracy)
print("Precision:", precision)
print("Recall:", recall)
print("F1-score:", f1)
Hebb
def heb_learning(samples):
print(f'{"Input" : ^8}{"Target" : ^16}{"Weight changes" : ^15}{"Weight" : ^28}')
w1 , w2 , b = 0 , 0 , 0
print(' ' * 48 , f'({w1:2} , {w2:2} , {b:2})')
for x1,x2,y in samples:
w1 = w1 + x1 * y
w2 = w2 + x2 * y
b=b+y
print(f'({x1:2} , {x2:2})\t {y:2}\t ({x1*y:2},{x2*y:2},{y:2})\t\t ({w1:2} , {w2:2} , {b:2})')
OR_samples = {
'bipolar_input_bipolar_output' : [
[1,1,1],
[1,-1,1],
[-1,1,1],
[-1,-1,-1]
]
}
print('-'*20,'Hebbian Learning','-'*20)
print('OR with bipolar input and bipoloar output')
heb_learning(OR_samples['bipolar_input_bipolar_output'])
def heb_learnings(samples):
print(f'{"Input" : ^6}{"Target" : ^12}{"Changes" : ^12}{"Initial" : ^12}')
w1=0
w2=0
b=0
print(' '*32, f'{w1:2}{w2:2}{b:2}')
for x1,x2,y in samples:
w1 = w1 + x1*y
w2 = w2 + x2*y
b = b+y
print(f'{x1:2}{x2:2}\t {y:2}\t {x1*y:2}{x2*y:2}{y:2}\t\t {w1:2}{w2:2}{b:2}')
AND_samples = {
'bipolar_input_bipolar_output' : [
[1,1,1],
[1,-1,-1],
[-1,1,-1],
[-1,-1,-1]
]
}
print("AND gate hebbian Learning\n")
heb_learnings(AND_samples['bipolar_input_bipolar_output'])
MP neuron
import numpy as np
while True :
x1 = np.array([0, 0, 1, 1])
x2 = np.array([0, 1, 0, 1])
t = np.array([0,1,1,1])
w1 = float(input("Enter W1 Weight Value: "))
w2 = float(input("Enter W2 Weight Value: "))
T = float(input("Enter Threshold Value: "))
yin = w1 * x1 + w2 * x2
print("Yin:")
print(yin)
y = np.zeros_like(t) #to make 0 for -ve values
for i in range(len(yin)):
if yin[i] >= T:
y[i] = 1
print("")
print("Target O/P", t)
print("Calculated O/P", y)
print("")
if np.array_equal(y, t):
print("Correct Weight And Threshold Values")
break
else:
print("Incorrect Weights, Re-running Code")
print("\n")
perceptron error backward
import numpy as np
# Input array and desired output
input_array = np.array([1, 1, 0, 1])
desired_output = 1
# Define parameters
hidden_input_weights = np.array([[0.3, 0.1], [-0.2, 0.4], [0.2, -0.3], [0.1, 0.4]])
hidden_bias_input = np.array([0.2, 0.1])
output_input_weights = np.array([-0.3, 0.2])
bias_output = -0.3
learning_rate = 0.8
# Initialize iteration counter
iteration = 0
max_iterations = int(input("Enter the maximum number of iterations for the algorithm: "))
while iteration < max_iterations:
# Forward pass
hidden_netinput = np.dot(input_array, hidden_input_weights) + hidden_bias_input
hidden_netoutput = 1 / (1 + np.exp(-hidden_netinput))
net_input_output = np.dot(hidden_netoutput, output_input_weights) + bias_output
hidden_out = 1 / (1 + np.exp(-net_input_output))
# Error calculation
error = desired_output - hidden_out
# Backpropagation
error_output_layer = hidden_out * (1 - hidden_out) * error
error_hidden_layer = hidden_netoutput * (1 - hidden_netoutput) * np.dot(error_output_layer, output_input_weights)
# Update weights and biases
hidden_input_weights += learning_rate * np.outer(input_array, error_hidden_layer)
hidden_bias_input += learning_rate * error_hidden_layer
output_input_weights += learning_rate * error_output_layer * hidden_netoutput
bias_output += learning_rate * error_output_layer
# Increment iteration counter
iteration += 1
print("Weights for Hidden Layer:")
print(hidden_input_weights)
print("Bias for Hidden Layer:", hidden_bias_input)
print("Weights for Output Layer:", output_input_weights)
print("Bias for Output Layer:", bias_output)
print("Number of iterations executed:", iteration)
print("Final Error:", error)
perceptron single layer
import numpy as np
# Input array and desired output
input_array = np.array([1, 1, 0, 1])
desired_output = 1
# Define parameters
input_weights = np.array([0.3, -0.2, 0.2, 0.1])
bias = 0.2
learning_rate = 0.8
# Initialize iteration counter
iteration = 0
max_iterations = int(input("Enter the maximum number of iterations for the algorithm: "))
while iteration < max_iterations:
# Forward pass
net_input = np.dot(input_array, input_weights) + bias
output = 1 / (1 + np.exp(-net_input))
# Error calculation
error = desired_output - output
# Update weights and bias
input_weights += learning_rate * error * input_array
bias += learning_rate * error
# Increment iteration counter
iteration += 1
print("Weights:", input_weights)
print("Bias:", bias)
print("Number of iterations executed:", iteration)
print("Final Error:", error)
def perceptron_learning(samples):
print(f'{"Input":^8}{"Target":^16}{"Weight changes":^15}{"Weights":^28}')
w1, w2, b = 0, 0, 0
print(' ' * 48, f'({w1:2}, {w2:2}, {b:2})')
for x1, x2, y in samples:
# Calculate the predicted output
output = 1 if w1 * x1 + w2 * x2 + b >= 0 else 0
# Update weights and bias using perceptron learning rule
w1_change = x1 * (y - output)
w2_change = x2 * (y - output)
b_change = y - output
w1 += w1_change
w2 += w2_change
b += b_change
print(f'({x1:2}, {x2:2})\t {y:2}\t ({w1_change:2}, {w2_change:2}, {b_change:2})\t\t ({w1:2}, {w2:2}, {b:2})')
AND_samples = {
'binary_input_binary_output': [
[1, 1, 1],
[1, 0, 0],
[0, 1, 0],
[0, 0, 0]
]
}
print('-' * 20, 'Perceptron Learning', '-' * 20)
print('AND with binary input and binary output')
perceptron_learning(AND_samples['binary_input_binary_output'])
PCA
import numpy as np
import matplotlib.pyplot as plt
# Define dataset
x = np.array([4, 8, 13, 7])
y = np.array([11, 4, 5, 14])
dataset = np.array([x, y])
print("Define Dataset")
print(dataset)
print()
# Finding mean
xMean = np.mean(x)
yMean = np.mean(y)
# Adjusting the mean obtained
MeanAdjusted = np.array([x - xMean, y - yMean])
print("Mean adjusted:")
print(MeanAdjusted)
print("\n")
# Finding the Covariance
covariance_matrix = np.cov(dataset)
print("Covariance Matrix")
print(covariance_matrix)
print("\n")
# Compute the Eigen Values and Eigen Vectors
eigen_values, eigen_vectors = np.linalg.eig(covariance_matrix)
print("Eigen Values")
print(eigen_values)
print()
# Sort result in descending order
sorted_indices = np.argsort(eigen_values)[::-1]
sorted_eigen_values = eigen_values[sorted_indices]
sorted_eigen_vectors = eigen_vectors[:, sorted_indices]
print("Sorted Eigen Values")
print(sorted_eigen_values)
print()
print("Sorted Eigen Vectors")
print(sorted_eigen_vectors)
print()
# Perform PCA
PCA = np.dot(sorted_eigen_vectors.T, MeanAdjusted)
print("Principal Component Analysis:")
print(PCA)
# Scatter plot after PCA
plt.subplot(1, 2, 2)
plt.scatter(PCA[0], PCA[1], color='red')
plt.title('After PCA')
plt.xlabel('Principal Component 1')
plt.ylabel('Principal Component 2')
plt.tight_layout()
plt.show()