DL LAB EXTERNAL


DL LAB EXTERNAL

EXP 1 (Image Processing)
import matplotlib.pyplot as plt
import numpy as np
import cv2
img = cv2.imread("/content/1.JPG")
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # Convert BGR to RGB
plt.imshow(img)
plt.show()
gray_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
eql_img = cv2.equalizeHist(gray_img)
hist = cv2.calcHist([gray_img], [0], None, [256], [0, 255])
eql_hist = cv2.calcHist([eql_img], [0], None, [256], [0, 255])
plt.figure()
plt.plot(hist)
plt.plot(eql_hist)
plt.show()
_, thres_img = cv2.threshold(gray_img, 100, 200, cv2.THRESH_BINARY)
plt.imshow(thres_img, cmap='gray')
plt.show()
gb = cv2.GaussianBlur(img, (3, 3), 0)
edg = cv2.Canny(gb, 80, 200)
edg = cv2.erode(edg, (3, 3), iterations=2)
edg = cv2.dilate(edg, (3, 3), iterations=2)
flipped_lr = np.fliplr(img)
flipped_ud = np.flipud(img)
plt.imshow(flipped_lr)
plt.show()
plt.imshow(flipped_ud)

EXP 2 a (KNN)
import numpy as np
import matplotlib.pyplot as plt
import time
from keras.datasets import cifar10
%matplotlib inline
plt.rcParams['figure.figsize'] = (10,8)
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
class_names=['airplane','automobile','bird','cat','deer','dog','frog','horse','ship','truck']
plt.figure(figsize=(8,8))
for i in range(16):
plt.subplot(4,4,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(X_train[i])
plt.xlabel(class_names[y_train[i][0]])
X_train = np.reshape(X_train, (X_train.shape[0], -1))
X_test = np.reshape(X_test, (X_test.shape[0], -1))
from sklearn.neighbors import KNeighborsClassifier
from sklearn import metrics
k = 4
neigh = KNeighborsClassifier(n_neighbors = k).fit(X_train, y_train.ravel())
y_test_pred = neigh.predict(X_test)
num_correct = np.sum(y_test_pred == y_test.ravel())
acc = float(num_correct)/len(X_test)
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
cm = confusion_matrix(y_test_pred, y_test.ravel())
disp = ConfusionMatrixDisplay(confusion_matrix=cm)
disp.plot()
plt.show()

EXP 2 b (Layer NN)
import tensorflow as tf
from tensorflow.keras import datasets,layers,models
import matplotlib.pyplot as plt
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Flatten,Dense,Activation
(train_images,train_labels),(test_images,test_labels)= datasets.cifar10.load_data()
train_images,test_images=train_images/255.0,test_images/255.0
class_names=['airplane','automobile','bird','cat','deer','dog','frog','horse','ship','truck']
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i])
plt.xlabel(class_names[train_labels[i][0]])
model=Sequential([
Flatten(input_shape=(32,32,3)),
Dense(250,activation='softmax'),
Dense(128,activation='softmax'),
Dense(18,activation='softmax'),
Dense(10,activation='softmax'),
])
model.summary()
model.compile(optimizer='adam',loss=tf.losses.SparseCategoricalCrossentropy(from_logits=True),metrics=['accuracy'])
history=model.fit(train_images,train_labels,epochs=10,validation_data=(test_images,test_labels))

EXP 3 (Batch Normalization)
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from keras.models import Sequential
from keras.preprocessing.image import ImageDataGenerator
from keras.layers import BatchNormalization
from keras.layers import Conv2D,MaxPooling2D,Dense,Flatten,Dropout
from keras.datasets import cifar10
from keras.utils import normalize,to_categorical
(X_train,y_train),(X_test,y_test)=cifar10.load_data()
y_train=to_categorical(y_train)
y_test=to_categorical(y_test)
train_datagen=ImageDataGenerator(rotation_range=45,width_shift_range=0.2,zoom_range=0.2,horizontal_flip=True)
train_datagen.fit(X_train)
train_generator=train_datagen.flow(X_train,y_train,batch_size=32)
activation='relu'
model=Sequential()
model.add(Conv2D(32,(3,3),activation=activation,padding='same',input_shape=(32,32,3)))
model.add(BatchNormalization())
model.add(Conv2D(32,(3,3),activation=activation,padding='same',kernel_initializer='he_uniform'))
model.add(BatchNormalization())
model.add(MaxPooling2D())
model.add(Dropout(0.2))
model.add(Conv2D(64,(3,3),activation=activation,padding='same',kernel_initializer='he_uniform'))
model.add(BatchNormalization())
model.add(Conv2D(64,(3,3),activation=activation,padding='same',kernel_initializer='he_uniform'))
model.add(BatchNormalization())
model.add(MaxPooling2D())
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(128,activation=activation,kernel_initializer='he_uniform'))
model.add(Dense(10,activation='softmax'))
model.compile(optimizer="rmsprop",loss='categorical_crossentropy',metrics=['accuracy'])
print(model.summary())
history=model.fit_generator(train_generator,steps_per_epoch=250,epochs=10,validation_data=(X_test,y_test))
loss=history.history['loss']
val_loss=history.history['val_loss']
epochs=range(1,len(loss)+1)
plt.plot(epochs,loss,'y',label='Training_loss')
plt.plot(epochs,val_loss,'r',label='Validation_loss')
plt.title("Training and validation loss")
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()

EXP 6 (Object Detection)(YOLO)
import numpy as np
import ultralytics
import cv2
model=torch.hub.load('ultralytics/yolov5','yolov5l',pretrained=True)
img=np.array(Image.open("/content/1.JPG"))
results=model(img)
df=results.pandas().xyxy[0]
for i in range(len(df)):
xmin= int(df['xmin'][i])
ymin= int(df['ymin'][i])
xmax= int(df['xmax'][i])
ymax= int(df['ymax'][i])
name=df['name'][i]
cv2.rectangle(img,(xmin,ymin),(xmax,ymax),(255,255,0),2)
cv2.putText(img,name,org=(xmin,ymin),fontFace=cv2.FONT_HERSHEY_SIMPLEX,fontScale=2,color=(255,0,0),thickness=2)
image=Image.fromarray(img)
image

EXP 7 & 8 (RNN / LSTM)
import numpy as np
import keras
from keras.models import Sequential
from keras.datasets import reuters
from keras.utils import pad_sequences
from keras.layers import SimpleRNN
from keras.layers import Dense
from keras.layers import LSTM
(x_train,y_train),(x_test,y_test)=reuters.load_data(num_words=30000,test_split=0.2,maxlen=50)
x_train=pad_sequences(x_train,padding="post",maxlen=50)
x_test=pad_sequences(x_test,padding="post",maxlen=50)
x_train=np.array(x_train).reshape([x_train.shape[0],x_train.shape[1],1])
x_test=np.array(x_test).reshape([x_test.shape[0],x_test.shape[1],1])
def vanilla_rnn():
model = Sequential()
model.add(SimpleRNN(50, input_shape = (x_train.shape[1],1),
return_sequences = False))
model.add(Dense(46,activation="softmax"))
adam = keras.optimizers.Adam(lr = 0.001)
model.compile(loss = 'sparse_categorical_crossentropy', optimizer = adam, metrics = ['accuracy'])
return model
model=vanilla_rnn()
model.fit(x_train,y_train,epochs=10)
y_proba=model.predict(x_test)
y_hat=y_proba.argmax(axis=1)
y_hat[0],y_test[0]

EXP 9 (Saliency Map)
from torchvision.models import VGG19_Weights
from PIL import Image
import torchvision
import torch
import matplotlib.pyplot as plt
img=Image.open("/content/1.JPG")
model=torchvision.models.vgg19(weights=True)
for prams in model.parameters():
prams.requires_grad=False
model.eval()
w=VGG19_Weights.DEFAULT
t=w.transforms()
img=t(img)
X =img.unsqueeze_(0)
X.requires_grad_()
scores=model(X)
scores_max_arg=scores.argmax(dim=1)
scores_max=scores[0][scores_max_arg]
scores_max.backward()
s,_=torch.max(X.grad.data.abs(),dim=1)
plt.imshow(s[0],cmap=plt.cm.hot)

EXP 11 (Chatbot)
import numpy as np
import tensorflow as tf
from tensorflow import keras
from keras.layers import Dense,Conv2D,Flatten,SimpleRNN,LSTM,GRU,MaxPooling2D,Activation
from keras.models import Sequential
from keras.optimizers import Adam
from keras.preprocessing.text import Tokenizer
questions = ["hi","what is your name?","bye"]
answers = ["Hello","My name is Chatbot.","Good bye"]
t=Tokenizer()
t.fit_on_texts(questions)
X=t.texts_to_matrix(questions)
Y=np.identity(len(answers))
model = tf.keras.Sequential([
tf.keras.layers.Dense(64, activation='relu', input_shape=(X.shape[1],)),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(Y.shape[1], activation='softmax')
])
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.fit(X,Y ,epochs=10)
x=t.texts_to_matrix(["hello"], mode='binary')
print(answers[model.predict(x)[0].argmax()]);