312 lines
10 KiB
Python
312 lines
10 KiB
Python
import cv2
|
|
import mediapipe as mp
|
|
import os
|
|
import numpy as np
|
|
import time
|
|
from flask import Flask, Response, render_template, request, jsonify
|
|
import json
|
|
|
|
app = Flask(__name__)
|
|
|
|
# Create the assets directory if it doesn't exist
|
|
if not os.path.exists('assets'):
|
|
os.makedirs('assets')
|
|
|
|
|
|
saved_landmarks = []
|
|
saved_filenames = []
|
|
def load_saved_data():
|
|
with mp.solutions.pose.Pose(min_detection_confidence=0.7, min_tracking_confidence=0.7,model_complexity=2) as pose:
|
|
# read the dataset.json file
|
|
dataset_file = 'dataset.json'
|
|
if os.path.exists(dataset_file):
|
|
with open(dataset_file, 'r') as json_file:
|
|
data = json.load(json_file)
|
|
else:
|
|
data = []
|
|
|
|
l = len(data)
|
|
|
|
|
|
# Load saved pose images and store their landmarks and filenames
|
|
for i in range(l):
|
|
filename = f'assets/{data[i]["nama"]}.jpg'
|
|
image = cv2.imread(filename)
|
|
if image is not None:
|
|
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
|
results = pose.process(image_rgb)
|
|
if results.pose_landmarks:
|
|
landmarks = np.array([[landmark.x, landmark.y, landmark.z] for landmark in results.pose_landmarks.landmark])
|
|
saved_landmarks.append(landmarks)
|
|
saved_filenames.append(filename)
|
|
|
|
|
|
|
|
def extract_data():
|
|
# read the dataset.json file
|
|
dataset_file = 'dataset.json'
|
|
if os.path.exists(dataset_file):
|
|
with open(dataset_file, 'r') as json_file:
|
|
data = json.load(json_file)
|
|
else:
|
|
data = []
|
|
|
|
# get the nama and ket
|
|
nama = []
|
|
ket = []
|
|
for item in data:
|
|
nama.append(item['nama'])
|
|
ket.append(item['ket'])
|
|
|
|
l = len(data)
|
|
|
|
return nama, ket, l
|
|
|
|
|
|
|
|
@app.route('/')
|
|
def index():
|
|
load_saved_data()
|
|
return render_template('index.html')
|
|
|
|
|
|
def gen():
|
|
with mp.solutions.pose.Pose(min_detection_confidence=0.7, min_tracking_confidence=0.7,model_complexity=2) as pose:
|
|
# read the the_data.json file
|
|
the_data_file = 'the_data.json'
|
|
with open(the_data_file, 'r') as json_file:
|
|
the_data = json.load(json_file)
|
|
|
|
# read the dataset.json file
|
|
dataset_file = 'dataset.json'
|
|
with open(dataset_file, 'r') as json_file:
|
|
data = json.load(json_file)
|
|
cap = cv2.VideoCapture(0)
|
|
while True:
|
|
ret, frame = cap.read()
|
|
if not ret:
|
|
break
|
|
# Convert the image to RGB
|
|
image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
|
# Process the image and find the landmarks
|
|
results = pose.process(image)
|
|
# Draw the landmarks on the image
|
|
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
|
|
mp.solutions.drawing_utils.draw_landmarks(image, results.pose_landmarks, mp.solutions.pose.POSE_CONNECTIONS)
|
|
|
|
# Compare the pose with saved pose dataset images
|
|
highest_similarity = -1
|
|
most_similar_filename = ""
|
|
if results.pose_landmarks:
|
|
detected_landmarks = np.array([[landmark.x, landmark.y, landmark.z] for landmark in results.pose_landmarks.landmark])
|
|
|
|
for i, saved_landmark in enumerate(saved_landmarks):
|
|
# Calculate cosine similarity between the landmarks
|
|
similarity = np.dot(detected_landmarks.flatten(), saved_landmark.flatten()) / (np.linalg.norm(detected_landmarks) * np.linalg.norm(saved_landmark))
|
|
|
|
if similarity > highest_similarity:
|
|
highest_similarity = similarity
|
|
most_similar_filename = saved_filenames[i]
|
|
|
|
|
|
print(highest_similarity)
|
|
print(most_similar_filename)
|
|
|
|
# Calculate similarity percentage
|
|
similarity_percentage = round(highest_similarity * 100, 2)
|
|
|
|
# Display the most similar filename and similarity percentage if similarity is above 93%
|
|
if similarity_percentage > 95:
|
|
# get the filename by removing the extension
|
|
most_similar_filename = os.path.splitext(os.path.basename(most_similar_filename))[0]
|
|
# get the ket by nama
|
|
for item in data:
|
|
if item['nama'] == most_similar_filename:
|
|
ket = item['ket']
|
|
break
|
|
#check if the_data['ket] == ket
|
|
# print(ket)
|
|
if the_data['ket'] == ket:
|
|
pass
|
|
else:
|
|
# write the ket to the the_data.json file on the 'ket' field
|
|
the_data['ket'] = ket
|
|
# save the_data.json file
|
|
with open(the_data_file, 'w') as json_file:
|
|
json.dump(the_data, json_file, indent=4)
|
|
# #write the ket to the the_data.json file on the 'ket' field
|
|
# the_data['ket'] = ket
|
|
|
|
text = f"Most Similar: {most_similar_filename} - Similarity: {similarity_percentage}%"
|
|
cv2.putText(image, text, (10, image.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
|
|
else:
|
|
ket = None
|
|
# check if the_data['ket'] == ket
|
|
if the_data['ket'] == ket:
|
|
pass
|
|
else:
|
|
# write the ket to the the_data.json file on the 'ket' field
|
|
the_data['ket'] = ket
|
|
# save the_data.json file
|
|
with open(the_data_file, 'w') as json_file:
|
|
json.dump(the_data, json_file, indent=4)
|
|
|
|
|
|
# Encode the image as a JPEG frame
|
|
ret, jpeg = cv2.imencode('.jpg', image)
|
|
frame = jpeg.tobytes()
|
|
yield (b'--frame\r\n'
|
|
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
|
|
|
|
# Release the capture
|
|
cap.release()
|
|
|
|
@app.route('/video_feed')
|
|
def video_feed():
|
|
return Response(gen(),
|
|
mimetype='multipart/x-mixed-replace; boundary=frame')
|
|
|
|
|
|
@app.route('/images_add')
|
|
def images_add():
|
|
return render_template('images_add.html')
|
|
|
|
def generate_frames_add():
|
|
# read the dataset.json file last saved pose dataset image
|
|
dataset_file = 'dataset.json'
|
|
if os.path.exists(dataset_file):
|
|
with open(dataset_file, 'r') as json_file:
|
|
data = json.load(json_file)
|
|
else:
|
|
data = []
|
|
|
|
l = len(data)
|
|
nama = data[l-1]['nama']
|
|
ket = data[l-1]['ket']
|
|
|
|
print(nama)
|
|
print(ket)
|
|
|
|
|
|
# count = len([name for name in os.listdir('assets') if name.endswith('.jpg')])
|
|
with mp.solutions.pose.Pose(min_detection_confidence=0.7, min_tracking_confidence=0.7,model_complexity=2) as pose:
|
|
cap = cv2.VideoCapture(0)
|
|
start_time = time.time()
|
|
while True:
|
|
ret, frame = cap.read()
|
|
if not ret:
|
|
break
|
|
|
|
image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
|
image.flags.writeable = False
|
|
results = pose.process(image)
|
|
image.flags.writeable = True
|
|
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
|
|
mp.solutions.drawing_utils.draw_landmarks(
|
|
image, results.pose_landmarks, mp.solutions.pose.POSE_CONNECTIONS)
|
|
|
|
ret, buffer = cv2.imencode('.jpg', image)
|
|
if ret:
|
|
frame = buffer.tobytes()
|
|
yield (b'--frame\r\n'
|
|
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
|
|
|
|
if time.time() - start_time > 10:
|
|
filename = f'assets/{nama}.jpg'
|
|
while os.path.exists(filename):
|
|
# count += 1
|
|
filename = f'assets/{nama}.jpg'
|
|
cv2.imwrite(filename, image)
|
|
print(f'Saved pose dataset image: {filename}')
|
|
# count += 1
|
|
cap.release()
|
|
break
|
|
|
|
@app.route('/video_feed_add')
|
|
def video_feed_add():
|
|
return Response(generate_frames_add(), mimetype='multipart/x-mixed-replace; boundary=frame')
|
|
|
|
|
|
@app.route('/add', methods=['GET', 'POST','DELETE'])
|
|
def add():
|
|
if request.method == 'POST':
|
|
nama = request.form['nama']
|
|
ket = request.form['ket']
|
|
|
|
# Read the dataset.json file
|
|
dataset_file = 'dataset.json'
|
|
if os.path.exists(dataset_file):
|
|
with open(dataset_file, 'r') as json_file:
|
|
data = json.load(json_file)
|
|
else:
|
|
data = []
|
|
|
|
|
|
print(data)
|
|
|
|
# Add the new data to the dataset
|
|
data.append({
|
|
'nama': nama,
|
|
'ket': ket
|
|
})
|
|
|
|
|
|
# print(data)
|
|
|
|
|
|
# Write the updated data back to the dataset.json file
|
|
with open(dataset_file, 'w') as json_file:
|
|
json.dump(data, json_file, indent=4)
|
|
|
|
|
|
|
|
# # # Redirect to the add page
|
|
# # return redirect(url_for('add'))
|
|
|
|
#return json
|
|
response = {'status':'OK','nama':nama,'ket':ket}
|
|
return jsonify(response)
|
|
elif request.method == 'DELETE':
|
|
# get from parameter
|
|
index = request.args.get('index')
|
|
# print(index)
|
|
# Read the dataset.json file
|
|
dataset_file = 'dataset.json'
|
|
if os.path.exists(dataset_file):
|
|
with open(dataset_file, 'r') as json_file:
|
|
data = json.load(json_file)
|
|
else:
|
|
data = []
|
|
|
|
# get the nama
|
|
nama = data[int(index)]['nama']
|
|
image = f'assets/{nama}.jpg'
|
|
# delete the image
|
|
os.remove(image)
|
|
# delete the data
|
|
del data[int(index)]
|
|
|
|
# Write the updated data back to the dataset.json file
|
|
with open(dataset_file, 'w') as json_file:
|
|
json.dump(data, json_file, indent=4)
|
|
|
|
|
|
|
|
|
|
return jsonify({'status':'OK'})
|
|
else:
|
|
nama, ket , l = extract_data()
|
|
return render_template('add.html', nama=nama, ket=ket, l=l)
|
|
|
|
@app.route('/cari', methods=['GET'])
|
|
def cari():
|
|
# read the the_data.json file
|
|
the_data_file = 'the_data.json'
|
|
with open(the_data_file, 'r') as json_file:
|
|
the_data = json.load(json_file)
|
|
ket = the_data['ket']
|
|
return jsonify({'ket':ket})
|
|
|
|
if __name__ == '__main__':
|
|
app.run(debug=True)
|