Anna University Regional Campus - Tirunelveli
Tirunelveli - 627007
BONAFIDE CERTIFICATE
NAME : ………….…………………………………………………
REGISTER NUMBER : …………………………………………………………….
PROGRAMME : ………….…………………………………………………
COURSE CODE & TITLE : ………….………………………………………………….
DEPARTMENT : ………….…………………………………………………
SEMESTER : ………….…………………………………………………
Certified that this is a Bonafide Record of Practical Work Done
by Mr. /Ms. in the
Laboratory during the Period of
2024-2025.
DATE:
SIGNATURE OF FACULTY-IN-CHARGE SIGNATURE OF HOD
Submitted for the Practical Held On …………………………………………..
INTERNAL EXAMINER EXTERNAL EXAMINER
Index
S. Staff
Date Experiment Title Page No
No Signature
T-pyramid of an image.
AIM
ALGORITHM
1
OUTPUT
2
PROGRAM
import cv2
def build_t_pyramid(image_path):
img = [Link](image_path)
levels = [img] # Level 0 (original)
for i in range(4):
img = [Link](img)
[Link](img)
for i, level in enumerate(levels):
[Link](f'Level {i}', level)
[Link](0)
[Link]()
build_t_pyramid('[Link]')
RESULT
3
4
QUAD TREE
AIM
ALGORITHM
5
OUTPUT
6
PROGRAM
import cv2
import numpy as np
def quad_tree(image_path):
img = [Link](image_path)
img_copy = [Link]()
h, w, _ = [Link]
def split(x, y, w, h):
region = img[y:y+h, x:x+w]
gray = [Link](region, cv2.COLOR_BGR2GRAY)
if w <= 16 or h <= 16 or [Link](gray) < 10:
[Link](img_copy, (x, y), (x+w, y+h), (0, 255, 0), 1)
else:
split(x, y, w//2, h//2)
split(x+w//2, y, w//2, h//2)
split(x, y+h//2, w//2, h//2)
split(x+w//2, y+h//2, w//2, h//2)
split(0, 0, w, h)
[Link]('Quad Tree RGB', img_copy)
[Link](0)
[Link]()
quad_tree('computer_boy.jpg')
RESULT
7
OUTPUT
8
GEOMETRIC TRANSFORMATIONS OF AN IMAGE
AIM
ALGORITHM
9
10
PROGRAM
import cv2
import numpy as np
img = [Link]('[Link]')
h, w = [Link][:2]
rotated = [Link](img, cv2.getRotationMatrix2D((w//2, h//2), 45, 1), (w, h))
scaled = [Link](img, (w//2, h//2))
skewed = [Link](img, np.float32([[1, 0.3, 0], [0.3, 1, 0]]), (w, h))
M_affine = [Link](np.float32([[50,50],[200,50],[50,200]]),
np.float32([[10,100],[200,50],[100,250]]))
affine = [Link](img, M_affine, (w, h))
M_persp = [Link](np.float32([[0,0],[w-1,0],[0,h-1],[w-1,h-1]]),
np.float32([[0,0],[w-50,50],[50,h-50],[w-1,h-1]]))
bilinear = [Link](img, M_persp, (w, h))
[Link]('Original', img)
[Link]('Rotated', rotated)
[Link]('Scaled', scaled)
[Link]('Skewed', skewed)
[Link]('Affine', affine)
[Link]('Bilinear', bilinear)
[Link](0)
[Link]()
RESULT
11
12
OBJECT DETECTION AND RECOGNITION
AIM
ALGORITHM
13
OUTPUT
14
PROGRAM
from ultralytics import YOLO
import cv2
model = YOLO('[Link]') # 'n' = nano (small and
fast model)
img = [Link]('[Link]')
results = [Link](source=img, save=False,
conf=0.5)
for r in results:
for box in [Link]:
x1, y1, x2, y2 = map(int, [Link][0])
label = [Link][int([Link][0])]
conf = [Link][0]
[Link](img, (x1, y1), (x2, y2), (0, 255, 0), 2)
[Link](img, f"{label} {conf:.2f}", (x1, y1-10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
[Link]('YOLOv8 Detection', img)
[Link](0)
[Link]()
RESULT :
15
16
MOTION ANALYSIS USING MOVING EDGES
AIM
ALGORITHM
OUTPUT
PROGRAM
import cv2
cap = [Link]('video.mp4')
ret, prev = [Link]()
while [Link]():
ret, frame = [Link]()
if not ret:
break
diff = [Link](prev, frame)
gray = [Link](diff, cv2.COLOR_BGR2GRAY)
_, thresh = [Link](gray, 30, 255, cv2.THRESH_BINARY)
contours,_=[Link](thresh,cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for c in contours:
if [Link](c) > 500:
x, y, w, h = [Link](c)
[Link](frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
[Link](frame,"Motion",(x,y-10),
cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,255,0),2)
[Link]('Motion Detection', frame)
if [Link](30) == 27:
break
prev = [Link]()
[Link]()
[Link]()
RESULT
FACIAL DETECTION AND RECOGNITION
AIM
ALGORITHM
OUTPUT
PROGRAM
import cv2
face_cascade=[Link]([Link] +
'haarcascade_frontalface_default.xml')
cap = [Link](0)
while True:
ret, frame = [Link]()
if not ret:
break
gray = [Link](frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.1, 4)
for (x, y, w, h) in faces:
[Link](frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
[Link](frame, "Face", (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255,
0), 2)
[Link]('Face Detection', frame)
if [Link](1) == 27: # Press ESC to exit
break
[Link]()
[Link]()
RESULT
HAND GESTURE RECOGNITION
AIM
ALGORITHM
OUTPUT
PROGRAM
import cv2
cap = [Link](0)
fgbg = cv2.createBackgroundSubtractorMOG2()
while True:
ret, frame = [Link]()
if not ret:
break
mask = [Link](frame)
contours,_=[Link](mask,cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
for c in contours:
if [Link](c) > 500:
x, y, w, h = [Link](c)
[Link](frame, (x, y), (x+w, y+h), (0,255,0), 2)
[Link]('Motion Detection', frame)
if [Link](1) == 27:
break
[Link]()
[Link]()
RESULT