first commit

This commit is contained in:
kicap 2024-02-22 01:28:19 +08:00
commit fd09fc6bc2
8 changed files with 709 additions and 0 deletions

5
.gitignore vendored Normal file
View File

@ -0,0 +1,5 @@
/.ipnyb_checkpoints/
/.vscode/
/env/
/video*
/inference/

View File

@ -0,0 +1,474 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "7b423cf2-b549-4aa3-9b8a-11d016aace3b",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/var/folders/9r/fngx7sv11bl1k4rvtyflv1pw0000gn/T/ipykernel_52043/172184081.py:3: DeprecationWarning: \n",
"Pyarrow will become a required dependency of pandas in the next major release of pandas (pandas 3.0),\n",
"(to allow more performant data types, such as the Arrow string type, and better interoperability with other libraries)\n",
"but was not found to be installed on your system.\n",
"If this would cause problems for you,\n",
"please provide us feedback at https://github.com/pandas-dev/pandas/issues/54466\n",
" \n",
" import pandas as pd\n"
]
}
],
"source": [
"import numpy as np\n",
"import cv2\n",
"import pandas as pd\n",
"import matplotlib.pyplot as plt"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "387b1580-2cff-48b0-9854-29428ef033b8",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"372.0 25.0 2560 1440\n"
]
}
],
"source": [
"cap = cv2.VideoCapture('video3.mp4')\n",
"frames_count, fps, width, height = cap.get(cv2.CAP_PROP_FRAME_COUNT), cap.get(cv2.CAP_PROP_FPS), cap.get(\n",
" cv2.CAP_PROP_FRAME_WIDTH), cap.get(cv2.CAP_PROP_FRAME_HEIGHT)\n",
"width = int(width)\n",
"height = int(height)\n",
"print(frames_count, fps, width, height)"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "0589d5a1-1fec-4e1e-ac2f-e64396d0d539",
"metadata": {},
"outputs": [],
"source": [
"# creates a pandas data frame with the number of rows the same length as frame count\n",
"df = pd.DataFrame(index=range(int(frames_count)))\n",
"df.index.name = \"Frames\"\n"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "1943e0e6-7175-4e1a-a795-d3f2b64b559a",
"metadata": {},
"outputs": [],
"source": [
"framenumber = 0 # keeps track of current frame\n",
"carscrossedup = 0 # keeps track of cars that crossed up\n",
"carscrosseddown = 0 # keeps track of cars that crossed down\n",
"carids = [] # blank list to add car ids\n",
"caridscrossed = [] # blank list to add car ids that have crossed\n",
"totalcars = 0 # keeps track of total cars\n",
"fgbg = cv2.createBackgroundSubtractorMOG2() # create background subtractor"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "0fb78ee3-ae0f-47d4-9adb-ca7dc0871a3f",
"metadata": {},
"outputs": [],
"source": [
"# information to start saving a video file\n",
"ret, frame = cap.read() # import image\n",
"ratio = .5 # resize ratio\n",
"image = cv2.resize(frame, (0, 0), None, ratio, ratio) # resize image\n",
"width2, height2, channels = image.shape\n",
"video = cv2.VideoWriter('traffic_counter.avi', cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), fps, (height2, width2), 1)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "17b207f1-ca8f-4f15-9da1-ba84e71c7f3d",
"metadata": {},
"outputs": [
{
"ename": "NameError",
"evalue": "name 'fgmask_resized' is not defined",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
"Cell \u001b[0;32mIn[6], line 272\u001b[0m\n\u001b[1;32m 266\u001b[0m cv2\u001b[38;5;241m.\u001b[39mmoveWindow(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcountours\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;241m0\u001b[39m, \u001b[38;5;241m0\u001b[39m)\n\u001b[1;32m 268\u001b[0m \u001b[38;5;66;03m# cv2.imshow(\"fgmask\", fgmask)\u001b[39;00m\n\u001b[1;32m 269\u001b[0m \u001b[38;5;66;03m# cv2.moveWindow(\"fgmask\", int(width * ratio), 0)\u001b[39;00m\n\u001b[1;32m 270\u001b[0m \n\u001b[1;32m 271\u001b[0m \u001b[38;5;66;03m# Concatenate the original frame and the resized fgmask horizontally\u001b[39;00m\n\u001b[0;32m--> 272\u001b[0m concatenated_image \u001b[38;5;241m=\u001b[39m np\u001b[38;5;241m.\u001b[39mconcatenate((frame, \u001b[43mfgmask_resized\u001b[49m), axis\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m1\u001b[39m)\n\u001b[1;32m 274\u001b[0m \u001b[38;5;66;03m# Display the concatenated image\u001b[39;00m\n\u001b[1;32m 275\u001b[0m plt\u001b[38;5;241m.\u001b[39mimshow(cv2\u001b[38;5;241m.\u001b[39mcvtColor(concatenated_image, cv2\u001b[38;5;241m.\u001b[39mCOLOR_BGR2RGB))\n",
"\u001b[0;31mNameError\u001b[0m: name 'fgmask_resized' is not defined"
]
}
],
"source": [
"while True:\n",
"\n",
" ret, frame = cap.read() # import image\n",
"\n",
" if ret: # if there is a frame continue with code\n",
"\n",
" image = cv2.resize(frame, (0, 0), None, ratio, ratio) # resize image\n",
"\n",
" gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # converts image to gray\n",
"\n",
" fgmask = fgbg.apply(gray) # uses the background subtraction\n",
"\n",
" # applies different thresholds to fgmask to try and isolate cars\n",
" # just have to keep playing around with settings until cars are easily identifiable\n",
" kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)) # kernel to apply to the morphology\n",
" closing = cv2.morphologyEx(fgmask, cv2.MORPH_CLOSE, kernel)\n",
" opening = cv2.morphologyEx(closing, cv2.MORPH_OPEN, kernel)\n",
" dilation = cv2.dilate(opening, kernel)\n",
" retvalbin, bins = cv2.threshold(dilation, 220, 255, cv2.THRESH_BINARY) # removes the shadows\n",
"\n",
" # creates contours\n",
" contours, hierarchy = cv2.findContours(bins, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n",
"\n",
" # use convex hull to create polygon around contours\n",
" hull = [cv2.convexHull(c) for c in contours]\n",
"\n",
" # draw contours\n",
" cv2.drawContours(image, hull, -1, (0, 255, 0), 3)\n",
"\n",
" # line created to stop counting contours, needed as cars in distance become one big contour\n",
" lineypos = 250\n",
" cv2.line(image, (0, lineypos), (width, lineypos), (255, 0, 0), 5)\n",
"\n",
" # line y position created to count contours\n",
" lineypos2 = 150\n",
" cv2.line(image, (0, lineypos2), (width, lineypos2), (0, 255, 0), 5)\n",
"\n",
" # min area for contours in case a bunch of small noise contours are created\n",
" minarea = 300\n",
"\n",
" # max area for contours, can be quite large for buses\n",
" maxarea = 50000\n",
"\n",
" # vectors for the x and y locations of contour centroids in current frame\n",
" cxx = np.zeros(len(contours))\n",
" cyy = np.zeros(len(contours))\n",
"\n",
" for i in range(len(contours)): # cycles through all contours in current frame\n",
"\n",
" if hierarchy[0, i, 3] == -1: # using hierarchy to only count parent contours (contours not within others)\n",
"\n",
" area = cv2.contourArea(contours[i]) # area of contour\n",
"\n",
" if minarea < area < maxarea: # area threshold for contour\n",
"\n",
" # calculating centroids of contours\n",
" cnt = contours[i]\n",
" M = cv2.moments(cnt)\n",
" cx = int(M['m10'] / M['m00'])\n",
" cy = int(M['m01'] / M['m00'])\n",
"\n",
" if cy > lineypos: # filters out contours that are above line (y starts at top)\n",
"\n",
" # gets bounding points of contour to create rectangle\n",
" # x,y is top left corner and w,h is width and height\n",
" x, y, w, h = cv2.boundingRect(cnt)\n",
"\n",
" # creates a rectangle around contour\n",
" cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2)\n",
"\n",
" # Prints centroid text in order to double check later on\n",
" cv2.putText(image, str(cx) + \",\" + str(cy), (cx + 10, cy + 10), cv2.FONT_HERSHEY_SIMPLEX,\n",
" .3, (0, 0, 255), 1)\n",
"\n",
" cv2.drawMarker(image, (cx, cy), (0, 0, 255), cv2.MARKER_STAR, markerSize=5, thickness=1,\n",
" line_type=cv2.LINE_AA)\n",
"\n",
" # adds centroids that passed previous criteria to centroid list\n",
" cxx[i] = cx\n",
" cyy[i] = cy\n",
"\n",
" # eliminates zero entries (centroids that were not added)\n",
" cxx = cxx[cxx != 0]\n",
" cyy = cyy[cyy != 0]\n",
"\n",
" # empty list to later check which centroid indices were added to dataframe\n",
" minx_index2 = []\n",
" miny_index2 = []\n",
"\n",
" # maximum allowable radius for current frame centroid to be considered the same centroid from previous frame\n",
" maxrad = 25\n",
"\n",
" # The section below keeps track of the centroids and assigns them to old carids or new carids\n",
"\n",
" if len(cxx): # if there are centroids in the specified area\n",
"\n",
" if not carids: # if carids is empty\n",
"\n",
" for i in range(len(cxx)): # loops through all centroids\n",
"\n",
" carids.append(i) # adds a car id to the empty list carids\n",
" df[str(carids[i])] = \"\" # adds a column to the dataframe corresponding to a carid\n",
"\n",
" # assigns the centroid values to the current frame (row) and carid (column)\n",
" df.at[int(framenumber), str(carids[i])] = [cxx[i], cyy[i]]\n",
"\n",
" totalcars = carids[i] + 1 # adds one count to total cars\n",
"\n",
" else: # if there are already car ids\n",
"\n",
" dx = np.zeros((len(cxx), len(carids))) # new arrays to calculate deltas\n",
" dy = np.zeros((len(cyy), len(carids))) # new arrays to calculate deltas\n",
"\n",
" for i in range(len(cxx)): # loops through all centroids\n",
"\n",
" for j in range(len(carids)): # loops through all recorded car ids\n",
"\n",
" # acquires centroid from previous frame for specific carid\n",
" oldcxcy = df.iloc[int(framenumber - 1)][str(carids[j])]\n",
"\n",
" # acquires current frame centroid that doesn't necessarily line up with previous frame centroid\n",
" curcxcy = np.array([cxx[i], cyy[i]])\n",
"\n",
" if not oldcxcy: # checks if old centroid is empty in case car leaves screen and new car shows\n",
"\n",
" continue # continue to next carid\n",
"\n",
" else: # calculate centroid deltas to compare to current frame position later\n",
"\n",
" dx[i, j] = oldcxcy[0] - curcxcy[0]\n",
" dy[i, j] = oldcxcy[1] - curcxcy[1]\n",
"\n",
" for j in range(len(carids)): # loops through all current car ids\n",
"\n",
" sumsum = np.abs(dx[:, j]) + np.abs(dy[:, j]) # sums the deltas wrt to car ids\n",
"\n",
" # finds which index carid had the min difference and this is true index\n",
" correctindextrue = np.argmin(np.abs(sumsum))\n",
" minx_index = correctindextrue\n",
" miny_index = correctindextrue\n",
"\n",
" # acquires delta values of the minimum deltas in order to check if it is within radius later on\n",
" mindx = dx[minx_index, j]\n",
" mindy = dy[miny_index, j]\n",
"\n",
" if mindx == 0 and mindy == 0 and np.all(dx[:, j] == 0) and np.all(dy[:, j] == 0):\n",
" # checks if minimum value is 0 and checks if all deltas are zero since this is empty set\n",
" # delta could be zero if centroid didn't move\n",
"\n",
" continue # continue to next carid\n",
"\n",
" else:\n",
"\n",
" # if delta values are less than maximum radius then add that centroid to that specific carid\n",
" if np.abs(mindx) < maxrad and np.abs(mindy) < maxrad:\n",
"\n",
" # adds centroid to corresponding previously existing carid\n",
" df.at[int(framenumber), str(carids[j])] = [cxx[minx_index], cyy[miny_index]]\n",
" minx_index2.append(minx_index) # appends all the indices that were added to previous carids\n",
" miny_index2.append(miny_index)\n",
"\n",
" for i in range(len(cxx)): # loops through all centroids\n",
"\n",
" # if centroid is not in the minindex list then another car needs to be added\n",
" if i not in minx_index2 and miny_index2:\n",
"\n",
" df[str(totalcars)] = \"\" # create another column with total cars\n",
" totalcars = totalcars + 1 # adds another total car the count\n",
" t = totalcars - 1 # t is a placeholder to total cars\n",
" carids.append(t) # append to list of car ids\n",
" df.at[int(framenumber), str(t)] = [cxx[i], cyy[i]] # add centroid to the new car id\n",
"\n",
" elif curcxcy[0] and not oldcxcy and not minx_index2 and not miny_index2:\n",
" # checks if current centroid exists but previous centroid does not\n",
" # new car to be added in case minx_index2 is empty\n",
"\n",
" df[str(totalcars)] = \"\" # create another column with total cars\n",
" totalcars = totalcars + 1 # adds another total car the count\n",
" t = totalcars - 1 # t is a placeholder to total cars\n",
" carids.append(t) # append to list of car ids\n",
" df.at[int(framenumber), str(t)] = [cxx[i], cyy[i]] # add centroid to the new car id\n",
"\n",
" # The section below labels the centroids on screen\n",
"\n",
" currentcars = 0 # current cars on screen\n",
" currentcarsindex = [] # current cars on screen carid index\n",
"\n",
" for i in range(len(carids)): # loops through all carids\n",
"\n",
" if df.at[int(framenumber), str(carids[i])] != '':\n",
" # checks the current frame to see which car ids are active\n",
" # by checking in centroid exists on current frame for certain car id\n",
"\n",
" currentcars = currentcars + 1 # adds another to current cars on screen\n",
" currentcarsindex.append(i) # adds car ids to current cars on screen\n",
"\n",
" for i in range(currentcars): # loops through all current car ids on screen\n",
"\n",
" # grabs centroid of certain carid for current frame\n",
" curcent = df.iloc[int(framenumber)][str(carids[currentcarsindex[i]])]\n",
"\n",
" # grabs centroid of certain carid for previous frame\n",
" oldcent = df.iloc[int(framenumber - 1)][str(carids[currentcarsindex[i]])]\n",
"\n",
" if curcent: # if there is a current centroid\n",
"\n",
" # On-screen text for current centroid\n",
" cv2.putText(image, \"Centroid\" + str(curcent[0]) + \",\" + str(curcent[1]),\n",
" (int(curcent[0]), int(curcent[1])), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 255, 255), 2)\n",
"\n",
" cv2.putText(image, \"ID:\" + str(carids[currentcarsindex[i]]), (int(curcent[0]), int(curcent[1] - 15)),\n",
" cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 255, 255), 2)\n",
"\n",
" cv2.drawMarker(image, (int(curcent[0]), int(curcent[1])), (0, 0, 255), cv2.MARKER_STAR, markerSize=5,\n",
" thickness=1, line_type=cv2.LINE_AA)\n",
"\n",
" if oldcent: # checks if old centroid exists\n",
" # adds radius box from previous centroid to current centroid for visualization\n",
" xstart = oldcent[0] - maxrad\n",
" ystart = oldcent[1] - maxrad\n",
" xwidth = oldcent[0] + maxrad\n",
" yheight = oldcent[1] + maxrad\n",
" cv2.rectangle(image, (int(xstart), int(ystart)), (int(xwidth), int(yheight)), (0, 125, 0), 1)\n",
"\n",
" # checks if old centroid is on or below line and curcent is on or above line\n",
" # to count cars and that car hasn't been counted yet\n",
" if oldcent[1] >= lineypos2 and curcent[1] <= lineypos2 and carids[\n",
" currentcarsindex[i]] not in caridscrossed:\n",
"\n",
" carscrossedup = carscrossedup + 1\n",
" cv2.line(image, (0, lineypos2), (width, lineypos2), (0, 0, 255), 5)\n",
" caridscrossed.append(\n",
" currentcarsindex[i]) # adds car id to list of count cars to prevent double counting\n",
"\n",
" # checks if old centroid is on or above line and curcent is on or below line\n",
" # to count cars and that car hasn't been counted yet\n",
" elif oldcent[1] <= lineypos2 and curcent[1] >= lineypos2 and carids[\n",
" currentcarsindex[i]] not in caridscrossed:\n",
"\n",
" carscrosseddown = carscrosseddown + 1\n",
" cv2.line(image, (0, lineypos2), (width, lineypos2), (0, 0, 125), 5)\n",
" caridscrossed.append(currentcarsindex[i])\n",
"\n",
" # Top left hand corner on-screen text\n",
" cv2.rectangle(image, (0, 0), (250, 100), (255, 0, 0), -1) # background rectangle for on-screen text\n",
"\n",
" cv2.putText(image, \"Cars in Area: \" + str(currentcars), (0, 15), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 170, 0), 1)\n",
"\n",
" cv2.putText(image, \"Cars Crossed Up: \" + str(carscrossedup), (0, 30), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 170, 0),\n",
" 1)\n",
"\n",
" cv2.putText(image, \"Cars Crossed Down: \" + str(carscrosseddown), (0, 45), cv2.FONT_HERSHEY_SIMPLEX, .5,\n",
" (0, 170, 0), 1)\n",
"\n",
" cv2.putText(image, \"Total Cars Detected: \" + str(len(carids)), (0, 60), cv2.FONT_HERSHEY_SIMPLEX, .5,\n",
" (0, 170, 0), 1)\n",
"\n",
" cv2.putText(image, \"Frame: \" + str(framenumber) + ' of ' + str(frames_count), (0, 75), cv2.FONT_HERSHEY_SIMPLEX,\n",
" .5, (0, 170, 0), 1)\n",
"\n",
" cv2.putText(image, 'Time: ' + str(round(framenumber / fps, 2)) + ' sec of ' + str(round(frames_count / fps, 2))\n",
" + ' sec', (0, 90), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 170, 0), 1)\n",
"\n",
" # displays images and transformations\n",
" cv2.imshow(\"countours\", image)\n",
" cv2.moveWindow(\"countours\", 0, 0)\n",
"\n",
" # cv2.imshow(\"fgmask\", fgmask)\n",
" # cv2.moveWindow(\"fgmask\", int(width * ratio), 0)\n",
"\n",
" # Resize fgmask to match the dimensions of the original frame\n",
" fgmask_resized = cv2.resize(fgmask, (width, height)) \n",
"\n",
" # Concatenate the original frame and the resized fgmask horizontally\n",
" concatenated_image = np.concatenate((frame, fgmask_resized), axis=1)\n",
" \n",
" # Display the concatenated image\n",
" plt.imshow(cv2.cvtColor(concatenated_image, cv2.COLOR_BGR2RGB))\n",
" plt.axis('off') # Turn off axis\n",
" plt.show()\n",
"\n",
" cv2.imshow(\"closing\", closing)\n",
" cv2.moveWindow(\"closing\", width * 2, 0)\n",
"\n",
" cv2.imshow(\"opening\", opening)\n",
" cv2.moveWindow(\"opening\", 0, int(height * ratio))\n",
"\n",
" cv2.imshow(\"dilation\", dilation)\n",
" cv2.moveWindow(\"dilation\", int(width * ratio), int(height * ratio))\n",
"\n",
" cv2.imshow(\"binary\", bins)\n",
" cv2.moveWindow(\"binary\", width * 2, int(height * ratio))\n",
"\n",
" # adds to dataframe frame number\n",
" df.at[int(framenumber), \"framenumber\"] = framenumber\n",
"\n",
" k = cv2.waitKey(1)\n",
" if k == ord('q'):\n",
" break\n",
" if framenumber == frames_count - 1: # if this is the last frame, exit\n",
" break\n",
" else:\n",
" framenumber += 1 # increase the frame number\n",
"\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "43926339-1ac7-46f6-9b1b-7f01b523226e",
"metadata": {},
"outputs": [],
"source": [
"# Done processing the video, release resources\n",
"cap.release()\n",
"video.release()\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "db30c949-f81f-4cdc-ac3c-1a24d64fcfb9",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "a9be421d-8df7-4655-bd01-783b4e39e7a9",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.10"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

101
main.py Normal file
View File

@ -0,0 +1,101 @@
#from email.policy import default
import cv2
import imutils #elimizdeki fotoğrafı yeniden boyutlandırmak için: en ve boy oranı
import numpy as np
from ultralytics import YOLO
from collections import defaultdict
color = (0,255,0)
color_red = (0,0,255)
thickness = 2
font = cv2.FONT_HERSHEY_SIMPLEX
font_scale = 0.5
# video_path = "inference/test.mp4"
video_path = "video.mp4"
model_path = "models/yolov8n.pt"
cap = cv2.VideoCapture(video_path) #videoyu okumak için
model = YOLO(model_path) #modelimizi dahil etme
#kayit islemleri icin
width = 1280
height = 720
fourcc = cv2.VideoWriter_fourcc(*'XVID')
writer = cv2.VideoWriter("video.avi", fourcc, 20.0, (width,height))
vehicle_ids = [2, 3, 5, 7] #coco-classes.txt'den alınan takip etmek istediğmiz nesne id'leri
track_history = defaultdict(lambda: []) #araclarin gidis y onu tespiti icin tails
up = {}
down = {}
threshold = 450
while True: #görüntüyü okumayı deneyeceğiz.
ret, frame = cap.read() #videoyu okuduk.
if ret == False:
break
frame = imutils.resize(frame, width = 1280)#işlemek için kull. frame
results = model.track(frame, persist=True, verbose=False)[0] #model.track denildiğinde yolov8'in takip modülü calisiyor. / Verbose her çıkıtıyı term. yazdirma islemi. /persist: frame'lar arası nesne takibi
#track_ids = results.boxes.id.int().cpu().tolist() #id.it: id int foramtında iletir. /cpu.tolist: cpu'yu list formatında
bboxes = np.array(results.boxes.data.tolist(), dtype="int") #xyxy
cv2.line(frame, (0,threshold), (1280,threshold), color, thickness) #bu referans çizgisi gecildiyse arac sayimi yapilacak
cv2.putText(frame, "Reference Line", (620, 445), font, 0.7, color_red, thickness)
for box in bboxes:
x1, y1, x2, y2, track_id, score, class_id = box #x1, y1=dikdörtgenin sol üst köşesi,x2, y2:sag alt kösesi
cx = int((x1+x2)/2) #merkez hesaplama
cy = int((y1+y2)/2)
if class_id in vehicle_ids:
class_name = results.names[int(class_id)].upper() #class_name'lere eriştik. float olarak dönmemesi için int. çevirdik.
# print("BBoxes: ",(x1, y1, x2, y2))
# print("Class: ", class_name)
# print("ID: ", track_id)
track = track_history[track_id]
track.append((cx, cy)) #kordinatlari depoluyorz
if len(track) > 20: #eger kuyruk sayisi 20den fazlaysa sifirla.
track.pop(0)
points = np.hstack(track).astype("int32").reshape(-1,1,2) #yatay olarak yanyana sıralamak icin, eshape(-1,1,2) 3b diziye dönüstürür
cv2.polylines(frame, [points], isClosed=False, color=color, thickness=thickness)
cv2.rectangle(frame, (x1,y1), (x2,y2), color, thickness)
text = "ID: {} {}".format(track_id, class_name)
cv2.putText(frame, text, (x1, y1-5), font, font_scale, color, thickness)
if cy>threshold-5 and cy<threshold +5 and cx<670:
down[track_id] = x1,y1, x2, y2
if cy>threshold-5 and cy<threshold +5 and cx>670:
up[track_id] = x1,y1, x2, y2
print("UP Dictionary Keys:", list(up.keys()))
print("DOWN Dictionary Keys:", list(down.keys()))
up_text = "Giden:{}".format(len(list(up.keys())))
down_text = "Gelen:{}".format(len(list(down.keys())))
cv2.putText(frame, up_text, (1150, threshold-5), font, 0.8, color_red, thickness)
cv2.putText(frame, down_text, (0, threshold-5), font, 0.8, color_red, thickness)
writer.write(frame)
#görüntüyü gösterdiğimiz yer
cv2.imshow("Test", frame) # ilk parametre penc. ismi
if cv2.waitKey(10) & 0xFF==ord("q"): #q'ya basılınca break olacak.
break
cap.release() ıktıktan sonra video serbest birakilmali.
writer.release()
cv2.destroyAllWindows()
print("[INFO]..The video was succesfully precessed/saved!")

128
main2.py Normal file
View File

@ -0,0 +1,128 @@
import cv2 # Import library OpenCV untuk pengolahan citra dan video
import imutils # Import library imutils untuk mempermudah manipulasi citra
import numpy as np # Import library numpy untuk operasi numerik
from ultralytics import YOLO # Import class YOLO dari library ultralytics untuk deteksi objek
from collections import defaultdict # Import class defaultdict dari library collections untuk struktur data default dictionary
color = (0, 255, 0) # Warna hijau untuk penggambaran objek dan garis
color_red = (0, 0, 255) # Warna merah untuk teks dan garis
thickness = 2 # Ketebalan garis untuk penggambaran objek dan garis
font = cv2.FONT_HERSHEY_SIMPLEX # Jenis font untuk teks
font_scale = 0.5 # Skala font untuk teks
# Path video yang akan diproses
video_path = "video.mp4"
model_path = "models/yolov8n.pt"
# Buka video
cap = cv2.VideoCapture(video_path)
# Inisialisasi model YOLO dengan file weight yang telah dilatih sebelumnya
model = YOLO(model_path)
# Ukuran frame video
width = 1280
height = 720
# Inisialisasi objek untuk menyimpan video hasil pemrosesan
fourcc = cv2.VideoWriter_fourcc(*'XVID')
writer = cv2.VideoWriter("video.avi", fourcc, 20.0, (width, height))
# Id objek kendaraan yang ingin dilacak berdasarkan kelas di file coco-classes.txt
vehicle_ids = [2, 3, 5, 7]
# Dictionary untuk menyimpan sejarah pergerakan setiap kendaraan yang terdeteksi
track_history = defaultdict(lambda: [])
up = {} # Dictionary untuk kendaraan yang melewati garis atas
down = {} # Dictionary untuk kendaraan yang melewati garis bawah
threshold = 400 # Ambang batas garis pemisah kendaraan
# Fungsi untuk mengambil titik tengah dari bounding box objek
def pega_centro(x, y, w, h):
x1 = int(w / 2)
y1 = int(h / 2)
cx = x + x1
cy = y + y1
return cx, cy
# Background subtraction menggunakan MOG2
subtracao = cv2.createBackgroundSubtractorMOG2()
# Loop utama untuk membaca setiap frame dari video
while True:
ret, frame = cap.read() # Membaca frame dari video
if ret == False: # Keluar dari loop jika tidak ada frame yang dapat dibaca
break
try:
frame_color = frame.copy() # Salin frame ke mode warna untuk pengolahan dan penggambaran
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Konversi frame ke citra grayscale
frame_gray = cv2.cvtColor(frame_gray, cv2.COLOR_GRAY2BGR) # Konversi kembali ke citra BGR untuk tampilan grayscale
frame_bw = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Konversi ke citra grayscale untuk mode black and white
# Deteksi objek menggunakan model YOLO
results = model.track(frame_color, persist=True, verbose=False)[0]
bboxes = np.array(results.boxes.data.tolist(), dtype="int") # Koordinat bounding box objek yang terdeteksi
# Gambar garis pembatas untuk menghitung jumlah kendaraan yang melewati garis
cv2.line(frame_color, (0, threshold), (1280, threshold), color, thickness)
cv2.putText(frame_color, "Pembatas Jalan", (620, 445), font, 0.7, color_red, thickness)
# Loop untuk setiap objek yang terdeteksi
for box in bboxes:
x1, y1, x2, y2, track_id, score, class_id = box # Ambil koordinat dan informasi lainnya
cx = int((x1 + x2) / 2) # Hitung koordinat x pusat objek
cy = int((y1 + y2) / 2) # Hitung koordinat y pusat objek
if class_id in vehicle_ids: # Periksa apakah objek merupakan kendaraan yang ingin dilacak
class_name = results.names[int(class_id)].upper() # Dapatkan nama kelas objek
track = track_history[track_id] # Ambil sejarah pergerakan objek berdasarkan ID
track.append((cx, cy)) # Tambahkan koordinat pusat objek ke dalam sejarah pergerakan
if len(track) > 20: # Batasi panjang sejarah pergerakan agar tidak terlalu panjang
track.pop(0) # Hapus elemen pertama jika sejarah sudah melebihi batas
points = np.hstack(track).astype("int32").reshape(-1, 1, 2) # Konversi sejarah pergerakan ke format yang sesuai untuk penggambaran
cv2.polylines(frame_color, [points], isClosed=False, color=color, thickness=thickness) # Gambar garis yang merepresentasikan sejarah pergerakan
cv2.rectangle(frame_color, (x1, y1), (x2, y2), color, thickness) # Gambar bounding box objek
text = "ID: {} {}".format(track_id, class_name) # Buat teks ID objek dan nama kelasnya
cv2.putText(frame_color, text, (x1, y1 - 5), font, font_scale, color, thickness) # Tampilkan teks di atas objek
if cy > threshold - 5 and cy < threshold + 5 and cx < 670: # Periksa apakah objek melewati garis atas
down[track_id] = x1, y1, x2, y2 # Simpan informasi objek yang melewati garis atas
if cy > threshold - 5 and cy < threshold + 5 and cx > 670: # Periksa apakah objek melewati garis bawah
up[track_id] = x1, y1, x2, y2 # Simpan informasi objek yang melewati garis bawah
up_text = "Kanan:{}".format(len(list(up.keys()))) # Buat teks jumlah kendaraan yang melewati garis atas
down_text = "Kiri:{}".format(len(list(down.keys()))) # Buat teks jumlah kendaraan yang melewati garis bawah
cv2.putText(frame_color, up_text, (1150, threshold - 5), font, 0.8, color_red, thickness) # Tampilkan teks jumlah kendaraan yang melewati garis atas
cv2.putText(frame_color, down_text, (0, threshold - 5), font, 0.8, color_red, thickness) # Tampilkan teks jumlah kendaraan yang melewati garis bawah
# Background subtraction dan deteksi kontur
grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Konversi frame ke citra grayscale
blur = cv2.GaussianBlur(grey, (3, 3), 5) # Reduksi noise menggunakan Gaussian Blur
img_sub = subtracao.apply(blur) # Background subtraction
dilat = cv2.dilate(img_sub, np.ones((5, 5))) # Dilasi untuk meningkatkan ketebalan objek
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)) # Kernel untuk operasi morfologi
dilatada = cv2.morphologyEx(dilat, cv2.MORPH_CLOSE, kernel) # Operasi closing untuk mengisi lubang kecil pada objek
dilatada = cv2.morphologyEx(dilatada, cv2.MORPH_CLOSE, kernel) # Operasi closing tambahan
contorno, h = cv2.findContours(dilatada, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # Deteksi kontur objek
writer.write(frame_color) # Menyimpan frame hasil pemrosesan
# Menampilkan gambar
cv2.imshow("Warna", frame_color) # Tampilkan mode warna
cv2.imshow("Grayscale", frame_gray) # Tampilkan mode grayscale
cv2.imshow("Detectar", dilatada) # Tampilkan mode Detectar dilatada
if cv2.waitKey(10) & 0xFF == ord("q"): # Keluar saat tombol q ditekan
break
except Exception as e:
print("Terjadi kesalahan:", str(e)) # Tangkap dan tampilkan kesalahan yang terjadi
continue # Lanjutkan ke iterasi berikutnya
cap.release() # Bebaskan sumber daya setelah selesai pemrosesan video
writer.release() # Tutup objek writer
cv2.destroyAllWindows() # Tutup semua jendela yang dibuka oleh OpenCV
print("[INFO]..Video berhasil diproses/disimpan!") # Tampilkan pesan ketika pemrosesan selesai

BIN
models/yolov8n.pt Normal file

Binary file not shown.

1
readme.md Normal file
View File

@ -0,0 +1 @@
# This is a traffic counter using yolo , opencv and python

0
torch Normal file
View File

0
traffic_counter.avi Normal file
View File