first commit
This commit is contained in:
3
.gitignore
vendored
Normal file
3
.gitignore
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
__pycache__/
|
||||
env/
|
||||
|
||||
425
.ipynb_checkpoints/my_main-checkpoint.ipynb
Normal file
425
.ipynb_checkpoints/my_main-checkpoint.ipynb
Normal file
File diff suppressed because one or more lines are too long
BIN
both hand.jpg
Normal file
BIN
both hand.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 2.8 MiB |
50
main.py
Normal file
50
main.py
Normal file
@ -0,0 +1,50 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
import mediapipe as mp
|
||||
import matplotlib.pyplot as plt
|
||||
from google.protobuf.json_format import MessageToDict
|
||||
|
||||
# First step is to initialize the Hands class an store it in a variable
|
||||
mp_hands = mp.solutions.hands
|
||||
|
||||
# Now second step is to set the hands function which will hold the landmarks points
|
||||
hands = mp_hands.Hands(static_image_mode=True, max_num_hands=2, min_detection_confidence=0.3)
|
||||
|
||||
# Last step is to set up the drawing function of hands landmarks on the image
|
||||
mp_drawing = mp.solutions.drawing_utils
|
||||
|
||||
|
||||
# Reading the sample image on which we will perform the detection
|
||||
# sample_img = cv2.imread('coba.jpg')
|
||||
sample_img = cv2.imread('coba2.png')
|
||||
|
||||
# Here we are specifing the size of the figure i.e. 10 -height; 10- width.
|
||||
plt.figure(figsize = [10, 10])
|
||||
|
||||
# Here we will display the sample image as the output.
|
||||
# plt.title("Sample Image");plt.axis('off');plt.imshow(sample_img[:,:,::-1]);plt.show()
|
||||
|
||||
|
||||
results = hands.process(cv2.cvtColor(sample_img, cv2.COLOR_BGR2RGB))
|
||||
|
||||
if results.multi_hand_landmarks:
|
||||
|
||||
for hand_no, hand_landmarks in enumerate(results.multi_hand_landmarks):
|
||||
|
||||
print(f'HAND NUMBER: {hand_no+1}')
|
||||
print('-----------------------')
|
||||
|
||||
for i in range(2):
|
||||
|
||||
print(f'{mp_hands.HandLandmark(i).name}:')
|
||||
print(f'{hand_landmarks.landmark[mp_hands.HandLandmark(i).value]}')
|
||||
|
||||
# Return whether it is Right or Left Hand
|
||||
i = results.multi_handedness
|
||||
label = MessageToDict(i)['classification'][0]['label']
|
||||
|
||||
if label == 'Left':
|
||||
print("ini telapak tangan kiri")
|
||||
if label == 'Right':
|
||||
print("ini telapak tangan kanan")
|
||||
|
||||
33
main1.py
Normal file
33
main1.py
Normal file
@ -0,0 +1,33 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
# from matplotlib.pyplot import imshow
|
||||
# from google.colab.patches import cv2_imshow
|
||||
# !wget https://i.stack.imgur.com/sDQLM.png
|
||||
#read image
|
||||
image = cv2.imread( "croped.png")
|
||||
|
||||
width = 350
|
||||
height = 450
|
||||
dim = (width, height)
|
||||
|
||||
image = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)
|
||||
|
||||
#convert to gray
|
||||
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
||||
|
||||
#performing binary thresholding
|
||||
kernel_size = 3
|
||||
ret,thresh = cv2.threshold(gray,200,255,cv2.THRESH_BINARY)
|
||||
|
||||
#finding contours
|
||||
cnts = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
|
||||
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
|
||||
|
||||
#drawing Contours
|
||||
radius =2
|
||||
color = (30,255,50)
|
||||
cv2.drawContours(image, cnts, -1,color , radius)
|
||||
# cv2.imshow(image) commented as colab don't support cv2.imshow()
|
||||
cv2.imshow("",image)
|
||||
# cv2.waitKey()
|
||||
cv2.waitKey(0)
|
||||
597
my_main.ipynb
Normal file
597
my_main.ipynb
Normal file
File diff suppressed because one or more lines are too long
BIN
requirements.txt
Normal file
BIN
requirements.txt
Normal file
Binary file not shown.
83
server.py
Normal file
83
server.py
Normal file
@ -0,0 +1,83 @@
|
||||
import os
|
||||
import shutil
|
||||
import time
|
||||
from fastapi import FastAPI, File, UploadFile,HTTPException ,Request
|
||||
# from fastapi.responses import FileResponse
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import mediapipe as mp
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
|
||||
# First step is to initialize the Hands class an store it in a variable
|
||||
mp_hands = mp.solutions.hands
|
||||
|
||||
# Now second step is to set the hands function which will hold the landmarks points
|
||||
hands = mp_hands.Hands(static_image_mode=True, max_num_hands=2, min_detection_confidence=0.3)
|
||||
|
||||
# Last step is to set up the drawing function of hands landmarks on the image
|
||||
mp_drawing = mp.solutions.drawing_utils
|
||||
|
||||
app = FastAPI()
|
||||
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=["*"],
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
)
|
||||
|
||||
|
||||
@app.get("/")
|
||||
def read_root():
|
||||
return {"Hello": "World"}
|
||||
|
||||
@app.post("/")
|
||||
async def image(image: UploadFile = File(...)):
|
||||
content_type = image.content_type
|
||||
if(content_type != "image/jpeg" and content_type != "image/png") :
|
||||
raise HTTPException(status_code=404, detail="Fail bukan foto")
|
||||
image_name = image.filename
|
||||
with open("temp/"+image_name, "wb") as buffer:
|
||||
shutil.copyfileobj(image.file, buffer)
|
||||
time.sleep(2)
|
||||
sample_img = cv2.imread("temp/"+image_name)
|
||||
sample_img = cv2.flip(sample_img, 1)
|
||||
results = hands.process(cv2.cvtColor(sample_img, cv2.COLOR_BGR2RGB))
|
||||
if not results.multi_hand_landmarks:
|
||||
raise HTTPException(status_code=404, detail="Foto harus ada telapak tangan")
|
||||
|
||||
if len(results.multi_handedness) > 1:
|
||||
raise HTTPException(status_code=404, detail="Hanya satu telapak tangan yang bisa diramal")
|
||||
|
||||
tangan = results.multi_handedness[0].classification[0].label
|
||||
if(tangan == 'Right'):
|
||||
raise HTTPException(status_code=404, detail="Hanya Tangan Kiri Yang Bisa Diramal")
|
||||
|
||||
thumb = None
|
||||
pinky = None
|
||||
for hand_no, hand_landmarks in enumerate(results.multi_hand_landmarks):
|
||||
thumb = hand_landmarks.landmark[mp_hands.HandLandmark.THUMB_TIP].x
|
||||
pinky = hand_landmarks.landmark[mp_hands.HandLandmark.PINKY_TIP].x
|
||||
cek_tapak_tangan = thumb > pinky
|
||||
if(cek_tapak_tangan == False):
|
||||
raise HTTPException(status_code=404, detail="Sila foto telapak tangan kiri anda")
|
||||
|
||||
if os.path.exists("temp/"+image_name):
|
||||
os.remove("temp/"+image_name)
|
||||
|
||||
return {"message": "lakukan ramalan"}
|
||||
|
||||
|
||||
@app.post("/ramalan")
|
||||
async def image( request: Request):
|
||||
|
||||
body = await request.form()
|
||||
if body:
|
||||
print(body['image'].file)
|
||||
return {"filename": "image.filename"}
|
||||
else :
|
||||
raise HTTPException(status_code=404, detail="error")
|
||||
BIN
tangan 1.jpg
Normal file
BIN
tangan 1.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 2.2 MiB |
BIN
tangan kanan1.jpg
Normal file
BIN
tangan kanan1.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 2.7 MiB |
BIN
tangan kanan2.jpg
Normal file
BIN
tangan kanan2.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 2.7 MiB |
BIN
tanganku.jpg
Normal file
BIN
tanganku.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 2.5 MiB |
BIN
tanganriska.jpg
Normal file
BIN
tanganriska.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 2.2 MiB |
Reference in New Issue
Block a user