added new process the video

This commit is contained in:
kicap 2024-05-06 08:26:54 +08:00
parent 28eebbe33a
commit 9b30890b15
13 changed files with 2479 additions and 427 deletions

BIN
.DS_Store vendored

Binary file not shown.

View File

@ -0,0 +1,384 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "93b77493-0a01-4421-b2a0-380991740ff6",
"metadata": {},
"outputs": [],
"source": [
"import numpy as np\n",
"import cv2\n",
"import pandas as pd\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "80b4ff7c-1f3b-4e1d-896c-d88c0966f33e",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"6868.0 30.03550936578534 848 478\n"
]
}
],
"source": [
"cap = cv2.VideoCapture('video/video.mp4')\n",
"# mendapatkan jumlah frame, fps, lebar, dan tinggi dari video\n",
"frames_count, fps, width, height = cap.get(cv2.CAP_PROP_FRAME_COUNT), cap.get(cv2.CAP_PROP_FPS), cap.get(\n",
" cv2.CAP_PROP_FRAME_WIDTH), cap.get(cv2.CAP_PROP_FRAME_HEIGHT)\n",
"width = int(width)\n",
"height = int(height)\n",
"print(frames_count, fps, width, height)\n",
"\n",
"# membuat sebuah frame pandas dengan jumlah baris yang sama dengan jumlah frame\n",
"df = pd.DataFrame(index=range(int(frames_count)))\n",
"df.index.name = \"Frame\" # menandai kolom frame\n",
"\n",
"framenumber = 0 # mencatat frame saat ini\n",
"carscrossedup = 0 # mencatat mobil yang melintasi jalan ke atas\n",
"carscrosseddown = 0 # mencatat mobil yang melintasi jalan ke bawah\n",
"carids = [] # daftar kosong untuk menyimpan ID mobil\n",
"caridscrossed = [] # daftar kosong untuk menyimpan ID mobil yang sudah melintasi\n",
"totalcars = 0 # mencatat jumlah total mobil\n",
"\n",
"fgbg = cv2.createBackgroundSubtractorMOG2() # membuat pengambil gambar latar belakang\n",
"\n",
"# informasi untuk mulai menyimpan video\n",
"ret, frame = cap.read() # mengimpor gambar\n",
"ratio = .5 # rasio ukuran pengubahan ukuran\n",
"image = cv2.resize(frame, (0, 0), None, ratio, ratio) # mengubah ukuran gambar\n",
"width2, height2, channels = image.shape\n"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "5c8d5645-9df8-457c-88d7-2d3bbc0fade9",
"metadata": {},
"outputs": [
{
"ename": "KeyboardInterrupt",
"evalue": "",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
"Cell \u001b[0;32mIn[3], line 265\u001b[0m\n\u001b[1;32m 260\u001b[0m \u001b[38;5;66;03m# video.write(image) # save the current image to video file from earlier\u001b[39;00m\n\u001b[1;32m 261\u001b[0m \n\u001b[1;32m 262\u001b[0m \u001b[38;5;66;03m# adds to framecount\u001b[39;00m\n\u001b[1;32m 263\u001b[0m framenumber \u001b[38;5;241m=\u001b[39m framenumber \u001b[38;5;241m+\u001b[39m \u001b[38;5;241m1\u001b[39m\n\u001b[0;32m--> 265\u001b[0m k \u001b[38;5;241m=\u001b[39m \u001b[43mcv2\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mwaitKey\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mint\u001b[39;49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m1000\u001b[39;49m\u001b[38;5;241;43m/\u001b[39;49m\u001b[43mfps\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m \u001b[38;5;241m&\u001b[39m \u001b[38;5;241m0xff\u001b[39m \u001b[38;5;66;03m# int(1000/fps) is normal speed since waitkey is in ms\u001b[39;00m\n\u001b[1;32m 266\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m k \u001b[38;5;241m==\u001b[39m \u001b[38;5;241m27\u001b[39m:\n\u001b[1;32m 267\u001b[0m \u001b[38;5;28;01mbreak\u001b[39;00m\n",
"\u001b[0;31mKeyboardInterrupt\u001b[0m: "
]
}
],
"source": [
"while True:\n",
"\n",
" ret, frame = cap.read() # mengimpor gambar\n",
"\n",
" if ret: # jika ada frame lanjutkan dengan kode\n",
"\n",
" image = cv2.resize(frame, (0, 0), None, ratio, ratio) # mengubah ukuran gambar\n",
"\n",
" gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # mengubah gambar ke hitam putih\n",
"\n",
" fgmask = fgbg.apply(gray) # menggunakan pengambil gambar latar belakang\n",
"\n",
" # menerapkan berbagai batasan pada fgmask untuk menyaring mobil\n",
" # perlu bermain dengan setelan tersebut hingga mobil dapat diidentifikasi dengan mudah\n",
" kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)) # kernel untuk dilakukan pada morphology\n",
" closing = cv2.morphologyEx(fgmask, cv2.MORPH_CLOSE, kernel)\n",
" opening = cv2.morphologyEx(closing, cv2.MORPH_OPEN, kernel)\n",
" dilation = cv2.dilate(opening, kernel)\n",
" retvalbin, bins = cv2.threshold(dilation, 220, 255, cv2.THRESH_BINARY) # menghapus shadow\n",
"\n",
" # membuat kontur\n",
" contours, hierarchy = cv2.findContours(bins, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2:]\n",
"\n",
" # menggunakan konveks hull untuk membuat poligon di sekitar kontur\n",
" hull = [cv2.convexHull(c) for c in contours]\n",
"\n",
" # menggambar kontur\n",
" cv2.drawContours(image, hull, -1, (0, 255, 0), 3)\n",
"\n",
" # garis dibuat untuk menghentikan menghitung kontur, perlu dilakukan karena mobil yang jauh akan menjadi satu kontur besar\n",
" lineypos = 225\n",
" cv2.line(image, (0, lineypos), (width, lineypos), (255, 0, 0), 5)\n",
"\n",
" # garis y pos dibuat untuk menghitung kontur\n",
" lineypos2 = 250\n",
" cv2.line(image, (0, lineypos2), (width, lineypos2), (0, 255, 0), 5)\n",
"\n",
" # minimum area untuk kontur\n",
" minarea = 300\n",
"\n",
" # maksimum area untuk kontur\n",
" maxarea = 50000\n",
"\n",
" # vektor untuk x dan y lokasi centroid di frame saat ini\n",
" cxx = np.zeros(len(contours))\n",
" cyy = np.zeros(len(contours))\n",
"\n",
" for i in range(len(contours)): # mengulangi seluruh kontur dalam frame saat ini\n",
"\n",
" if hierarchy[0, i, 3] == -1: # menggunakan hierarchy untuk hanya menghitung kontur induk (tidak termasuk dalam kontur lain)\n",
"\n",
" area = cv2.contourArea(contours[i]) # menghitung area kontur\n",
"\n",
" if minarea < area < maxarea: # area threshold untuk kontur\n",
"\n",
" # menghitung centroid dari kontur\n",
" cnt = contours[i]\n",
" M = cv2.moments(cnt)\n",
" cx = int(M['m10'] / M['m00'])\n",
" cy = int(M['m01'] / M['m00'])\n",
"\n",
" if cy > lineypos: # menghapus kontur yang di atas garis\n",
"\n",
" # mengambil titik teratas, kiri, dan lebar dari kontur untuk membuat kotak\n",
" # x,y adalah kiri atas dan w,h adalah lebar dan tinggi\n",
" x, y, w, h = cv2.boundingRect(cnt)\n",
"\n",
" # membuat kotak di sekitar kontur\n",
" cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2)\n",
"\n",
" # Menuliskan teks centroid untuk memastikan kembali nanti\n",
" cv2.putText(image, str(cx) + \",\" + str(cy), (cx + 10, cy + 10), cv2.FONT_HERSHEY_SIMPLEX,\n",
" .3, (0, 0, 255), 1)\n",
"\n",
" cv2.drawMarker(image, (cx, cy), (0, 0, 255), cv2.MARKER_STAR, markerSize=5, thickness=1,\n",
" line_type=cv2.LINE_AA)\n",
"\n",
" # menambahkan centroid yang lulus pada kriteria ke dalam list centroid\n",
" cxx[i] = cx\n",
" cyy[i] = cy\n",
"\n",
" # menghapus entri 0 dari list centroid\n",
" cxx = cxx[cxx != 0]\n",
" cyy = cyy[cyy != 0]\n",
"\n",
" # list kosong untuk nanti menyimpan indices centroid yang di tambahkan ke dataframe\n",
" minx_index2 = []\n",
" miny_index2 = []\n",
"\n",
" # batas maksimum untuk radius dari centroid dari frame saat ini untuk dianggap sama dengan centroid dari frame sebelumnya\n",
" maxrad = 25\n",
"\n",
" # Bagian ini mengelola centroid dan menetapkan mereka untuk carid lama atau carid baru\n",
"\n",
" if len(cxx): # jika ada centroid dalam area yang ditentukan\n",
"\n",
" if not carids: # jika carids kosong\n",
"\n",
" for i in range(len(cxx)): # melalui semua centroid\n",
"\n",
" carids.append(i) # menambahkan car id ke list carids kosong\n",
" df[str(carids[i])] = \"\" # menambahkan kolom ke dataframe sesuai carid\n",
"\n",
" # menetapkan nilai centroid ke frame (baris) dan carid (kolom) yang sesuai\n",
" df.at[int(framenumber), str(carids[i])] = [cxx[i], cyy[i]]\n",
"\n",
" totalcars = carids[i] + 1 # menambahkan count car\n",
"\n",
" else: # jika carids sudah ada\n",
"\n",
" dx = np.zeros((len(cxx), len(carids))) # array baru untuk menghitung deltas\n",
" dy = np.zeros((len(cyy), len(carids))) # array baru untuk menghitung deltas\n",
"\n",
" for i in range(len(cxx)): # melalui semua centroid\n",
"\n",
" for j in range(len(carids)): # melalui semua car id yang sudah ada\n",
"\n",
" # mengambil centroid dari frame sebelumnya untuk carid tertentu\n",
" oldcxcy = df.iloc[int(framenumber - 1)][str(carids[j])]\n",
"\n",
" # mengambil centroid dari frame saat ini yang tidak selalu sesuai dengan centroid frame sebelumnya\n",
" curcxcy = np.array([cxx[i], cyy[i]])\n",
"\n",
" if not oldcxcy: # periksa apakah centroid sebelumnya kosong jika arah sudah tidak ada di layar\n",
"\n",
" continue # lanjutkan ke carid berikutnya\n",
"\n",
" else: # hitung delta centroid untuk membandingkan dengan centroid frame saat ini\n",
"\n",
" dx[i, j] = oldcxcy[0] - curcxcy[0]\n",
" dy[i, j] = oldcxcy[1] - curcxcy[1]\n",
"\n",
" for j in range(len(carids)): # melalui semua car id saat ini\n",
"\n",
" sumsum = np.abs(dx[:, j]) + np.abs(dy[:, j]) # menghitung delta wrt car id\n",
"\n",
" # mengambil indeks centroid yang memiliki nilai delta minimum dan ini indeks benar\n",
" correctindextrue = np.argmin(np.abs(sumsum))\n",
" minx_index = correctindextrue\n",
" miny_index = correctindextrue\n",
"\n",
" # mengambil delta nilai minimum untuk dibandingkan dengan radius\n",
" mindx = dx[minx_index, j]\n",
" mindy = dy[miny_index, j]\n",
"\n",
" if mindx == 0 and mindy == 0 and np.all(dx[:, j] == 0) and np.all(dy[:, j] == 0):\n",
" # periksa apakah minimum nilai adalah 0 dan semua delta adalah nol\n",
" # delta dapat berupa nol jika centroid tidak bergerak\n",
"\n",
" continue # lanjutkan ke carid berikutnya\n",
"\n",
" else:\n",
"\n",
" # jika delta nilai adalah kurang dari maksimal radius maka tambahkan centroid ke carid sebelumnya\n",
" if np.abs(mindx) < maxrad and np.abs(mindy) < maxrad:\n",
"\n",
" # tambahkan centroid ke carid yang sudah ada\n",
" df.at[int(framenumber), str(carids[j])] = [cxx[minx_index], cyy[miny_index]]\n",
" minx_index2.append(minx_index) # tambahkan semua indeks yang ditambahkan ke carid ke list\n",
" miny_index2.append(miny_index)\n",
"\n",
" currentcars = 0 # current cars on screen\n",
" currentcarsindex = [] # current cars on screen carid index\n",
"\n",
" for i in range(len(carids)): # loops through all carids\n",
"\n",
" if df.at[int(framenumber), str(carids[i])] != '':\n",
" # checks the current frame to see which car ids are active\n",
" # by checking in centroid exists on current frame for certain car id\n",
"\n",
" currentcars = currentcars + 1 # adds another to current cars on screen\n",
" currentcarsindex.append(i) # adds car ids to current cars on screen\n",
"\n",
" for i in range(currentcars): # loops through all current car ids on screen\n",
"\n",
" # grabs centroid of certain carid for current frame\n",
" curcent = df.iloc[int(framenumber)][str(carids[currentcarsindex[i]])]\n",
"\n",
" # grabs centroid of certain carid for previous frame\n",
" oldcent = df.iloc[int(framenumber - 1)][str(carids[currentcarsindex[i]])]\n",
"\n",
" if curcent: # if there is a current centroid\n",
"\n",
" # On-screen text for current centroid\n",
" cv2.putText(image, \"Centroid\" + str(curcent[0]) + \",\" + str(curcent[1]),\n",
" (int(curcent[0]), int(curcent[1])), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 255, 255), 2)\n",
"\n",
" cv2.putText(image, \"ID:\" + str(carids[currentcarsindex[i]]), (int(curcent[0]), int(curcent[1] - 15)),\n",
" cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 255, 255), 2)\n",
"\n",
" cv2.drawMarker(image, (int(curcent[0]), int(curcent[1])), (0, 0, 255), cv2.MARKER_STAR, markerSize=5,\n",
" thickness=1, line_type=cv2.LINE_AA)\n",
"\n",
" if oldcent: # checks if old centroid exists\n",
" # adds radius box from previous centroid to current centroid for visualization\n",
" xstart = oldcent[0] - maxrad\n",
" ystart = oldcent[1] - maxrad\n",
" xwidth = oldcent[0] + maxrad\n",
" yheight = oldcent[1] + maxrad\n",
" cv2.rectangle(image, (int(xstart), int(ystart)), (int(xwidth), int(yheight)), (0, 125, 0), 1)\n",
"\n",
" # checks if old centroid is on or below line and curcent is on or above line\n",
" # to count cars and that car hasn't been counted yet\n",
" if oldcent[1] >= lineypos2 and curcent[1] <= lineypos2 and carids[\n",
" currentcarsindex[i]] not in caridscrossed:\n",
"\n",
" carscrossedup = carscrossedup + 1\n",
" cv2.line(image, (0, lineypos2), (width, lineypos2), (0, 0, 255), 5)\n",
" caridscrossed.append(\n",
" currentcarsindex[i]) # adds car id to list of count cars to prevent double counting\n",
"\n",
" # checks if old centroid is on or above line and curcent is on or below line\n",
" # to count cars and that car hasn't been counted yet\n",
" elif oldcent[1] <= lineypos2 and curcent[1] >= lineypos2 and carids[\n",
" currentcarsindex[i]] not in caridscrossed:\n",
"\n",
" carscrosseddown = carscrosseddown + 1\n",
" cv2.line(image, (0, lineypos2), (width, lineypos2), (0, 0, 125), 5)\n",
" caridscrossed.append(currentcarsindex[i])\n",
"\n",
" # Top left hand corner on-screen text\n",
" cv2.rectangle(image, (0, 0), (250, 100), (255, 0, 0), -1) # background rectangle for on-screen text\n",
"\n",
" cv2.putText(image, \"Cars in Area: \" + str(currentcars), (0, 15), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 170, 0), 1)\n",
"\n",
" cv2.putText(image, \"Cars Crossed Up: \" + str(carscrossedup), (0, 30), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 170, 0),\n",
" 1)\n",
"\n",
" cv2.putText(image, \"Cars Crossed Down: \" + str(carscrosseddown), (0, 45), cv2.FONT_HERSHEY_SIMPLEX, .5,\n",
" (0, 170, 0), 1)\n",
"\n",
" cv2.putText(image, \"Total Cars Detected: \" + str(len(carids)), (0, 60), cv2.FONT_HERSHEY_SIMPLEX, .5,\n",
" (0, 170, 0), 1)\n",
"\n",
" cv2.putText(image, \"Frame: \" + str(framenumber) + ' of ' + str(frames_count), (0, 75), cv2.FONT_HERSHEY_SIMPLEX,\n",
" .5, (0, 170, 0), 1)\n",
"\n",
" cv2.putText(image, 'Time: ' + str(round(framenumber / fps, 2)) + ' sec of ' + str(round(frames_count / fps, 2))\n",
" + ' sec', (0, 90), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 170, 0), 1)\n",
"\n",
" # displays images and transformations\n",
" cv2.imshow(\"countours\", image)\n",
" cv2.moveWindow(\"countours\", 0, 0)\n",
"\n",
" cv2.imshow(\"fgmask\", fgmask)\n",
" cv2.moveWindow(\"fgmask\", int(width * ratio), 0)\n",
"\n",
" cv2.imshow(\"closing\", closing)\n",
" cv2.moveWindow(\"closing\", width, 0)\n",
"\n",
" cv2.imshow(\"opening\", opening)\n",
" cv2.moveWindow(\"opening\", 0, int(height * ratio))\n",
"\n",
" cv2.imshow(\"dilation\", dilation)\n",
" cv2.moveWindow(\"dilation\", int(width * ratio), int(height * ratio))\n",
"\n",
" cv2.imshow(\"binary\", bins)\n",
" cv2.moveWindow(\"binary\", width, int(height * ratio))\n",
"\n",
" # video.write(image) # save the current image to video file from earlier\n",
"\n",
" # adds to framecount\n",
" framenumber = framenumber + 1\n",
"\n",
" k = cv2.waitKey(int(1000/fps)) & 0xff # int(1000/fps) is normal speed since waitkey is in ms\n",
" if k == 27:\n",
" break\n",
"\n",
" else: # if video is finished then break loop\n",
"\n",
" break\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "af84e6b4-dd55-447e-ac8c-a02a5f6f34be",
"metadata": {},
"outputs": [],
"source": [
"cap.release()\n",
"cv2.destroyAllWindows()\n"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.10"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@ -0,0 +1,344 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "37fe6724-f5fe-412a-ab9a-6a1df878c308",
"metadata": {},
"source": [
"## Import Library"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "11b66fe3-8d38-4bf9-b9c5-f8bd3213bd55",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Selesai Import Library\n"
]
}
],
"source": [
"import cv2 # Import library OpenCV untuk pengolahan citra dan video\n",
"import imutils # Import library imutils untuk mempermudah manipulasi citra\n",
"import numpy as np # Import library numpy untuk operasi numerik\n",
"from ultralytics import YOLO # Import class YOLO dari library ultralytics untuk deteksi objek\n",
"from collections import defaultdict # Import class defaultdict dari library collections untuk struktur data default dictionary\n",
"\n",
"print(\"Selesai Import Library\")"
]
},
{
"cell_type": "markdown",
"id": "243e5a8f-46c2-4fe1-b174-52a46f0a26ee",
"metadata": {},
"source": [
"## Deklarasi Variable"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "bbeb303b-5683-44cc-a924-0f2481d75528",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"selesai deklarasi variable\n"
]
}
],
"source": [
"color = (0, 255, 0) # Warna hijau untuk penggambaran objek dan garis\n",
"color_red = (0, 0, 255) # Warna merah untuk teks dan garis\n",
"thickness = 2 # Ketebalan garis untuk penggambaran objek dan garis\n",
"\n",
"font = cv2.FONT_HERSHEY_SIMPLEX # Jenis font untuk teks\n",
"font_scale = 0.5 # Skala font untuk teks\n",
"\n",
"# Path video yang akan diproses\n",
"video_path = \"video/videonya.mp4\"\n",
"model_path = \"models/yolov8n.pt\"\n",
"\n",
"# Buka video\n",
"cap = cv2.VideoCapture(video_path)\n",
"# Inisialisasi model YOLO dengan file weight yang telah dilatih sebelumnya\n",
"model = YOLO(model_path)\n",
"\n",
"# Ukuran frame video\n",
"width = 1280\n",
"height = 720\n",
"\n",
"# Inisialisasi objek untuk menyimpan video hasil pemrosesan\n",
"# fourcc = cv2.VideoWriter_fourcc(*'XVID')\n",
"# writer = cv2.VideoWriter(\"video.avi\", fourcc, 20.0, (width, height))\n",
"\n",
"# Id objek kendaraan yang ingin dilacak berdasarkan kelas di file coco-classes.txt\n",
"vehicle_ids = [1,2, 3, 5, 6,7]\n",
"# Dictionary untuk menyimpan sejarah pergerakan setiap kendaraan yang terdeteksi\n",
"track_history = defaultdict(lambda: [])\n",
"\n",
"up = {} # Dictionary untuk kendaraan yang melewati garis atas\n",
"down = {} # Dictionary untuk kendaraan yang melewati garis bawah\n",
"threshold = 400 # Ambang batas garis pemisah kendaraan\n",
"\n",
"print(\"selesai deklarasi variable\")"
]
},
{
"cell_type": "markdown",
"id": "00596875-56e1-445a-bd8b-b2b3a73a411a",
"metadata": {},
"source": [
"### Fungsi untuk mengambil titik tengah dari bounding box objek "
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "ffcffbd1-ad9b-4908-8930-bea2ba6b6ecb",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Selesai membuat fungsi\n"
]
}
],
"source": [
"def pega_centro(x, y, w, h):\n",
" x1 = int(w / 2)\n",
" y1 = int(h / 2)\n",
" cx = x + x1\n",
" cy = y + y1\n",
" return cx, cy\n",
"\n",
"print(\"Selesai membuat fungsi\")"
]
},
{
"cell_type": "markdown",
"id": "9f2e6c12-a70b-49f2-9083-a9c85b04e842",
"metadata": {},
"source": [
"### Background subtraction menggunakan MOG2"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "4b0f68b8-9216-49e6-892e-bbf2282d73b3",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"selesai\n"
]
}
],
"source": [
"subtracao = cv2.createBackgroundSubtractorMOG2()\n",
"print(\"selesai\")"
]
},
{
"cell_type": "markdown",
"id": "0e9ea925-a617-45d3-b50c-273f4ee0163b",
"metadata": {},
"source": [
"## Proses Video "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "705c59f4-fba5-498d-9e51-d002a0dc3226",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n"
]
}
],
"source": [
"# Loop utama untuk membaca setiap frame dari video\n",
"while True:\n",
" ret, frame = cap.read() # Membaca frame dari video\n",
" if ret == False: # Keluar dari loop jika tidak ada frame yang dapat dibaca\n",
" break\n",
" \n",
" try:\n",
" frame = imutils.resize(frame, width = 1280, height = 720) # ubah frame menjadi tinggi 720 x lebar 1280\n",
" frame_color = frame.copy() # Salin frame ke mode warna untuk pengolahan dan penggambaran\n",
" frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Konversi frame ke citra grayscale\n",
" frame_gray = cv2.cvtColor(frame_gray, cv2.COLOR_GRAY2BGR) # Konversi kembali ke citra BGR untuk tampilan grayscale\n",
" frame_bw = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Konversi ke citra grayscale untuk mode black and white\n",
"\n",
" # Deteksi objek menggunakan model YOLO\n",
" results = model.track(frame_color, persist=True, verbose=False)[0]\n",
" bboxes = np.array(results.boxes.data.tolist(), dtype=\"int\") # Koordinat bounding box objek yang terdeteksi\n",
"\n",
" # Gambar garis pembatas untuk menghitung jumlah kendaraan yang melewati garis\n",
" cv2.line(frame_color, (0, threshold), (1280, threshold), color, thickness)\n",
" cv2.putText(frame_color, \"Pembatas Jalan\", (620, 445), font, 0.7, color_red, thickness)\n",
"\n",
" # Loop untuk setiap objek yang terdeteksi\n",
" for box in bboxes:\n",
" x1, y1, x2, y2, track_id, score, class_id = box # Ambil koordinat dan informasi lainnya\n",
" cx = int((x1 + x2) / 2) # Hitung koordinat x pusat objek\n",
" cy = int((y1 + y2) / 2) # Hitung koordinat y pusat objek\n",
" if class_id in vehicle_ids: # Periksa apakah objek merupakan kendaraan yang ingin dilacak\n",
" class_name = results.names[int(class_id)].upper() # Dapatkan nama kelas objek\n",
"\n",
" track = track_history[track_id] # Ambil sejarah pergerakan objek berdasarkan ID\n",
" track.append((cx, cy)) # Tambahkan koordinat pusat objek ke dalam sejarah pergerakan\n",
" if len(track) > 20: # Batasi panjang sejarah pergerakan agar tidak terlalu panjang\n",
" track.pop(0) # Hapus elemen pertama jika sejarah sudah melebihi batas\n",
"\n",
" points = np.hstack(track).astype(\"int32\").reshape(-1, 1, 2) # Konversi sejarah pergerakan ke format yang sesuai untuk penggambaran\n",
" cv2.polylines(frame_color, [points], isClosed=False, color=color, thickness=thickness) # Gambar garis yang merepresentasikan sejarah pergerakan\n",
" cv2.rectangle(frame_color, (x1, y1), (x2, y2), color, thickness) # Gambar bounding box objek\n",
" text = \"ID: {} {}\".format(track_id, class_name) # Buat teks ID objek dan nama kelasnya\n",
" cv2.putText(frame_color, text, (x1, y1 - 5), font, font_scale, color, thickness) # Tampilkan teks di atas objek\n",
"\n",
" if cy > threshold - 5 and cy < threshold + 5 and cx < 670: # Periksa apakah objek melewati garis atas\n",
" down[track_id] = x1, y1, x2, y2 # Simpan informasi objek yang melewati garis atas\n",
"\n",
" if cy > threshold - 5 and cy < threshold + 5 and cx > 670: # Periksa apakah objek melewati garis bawah\n",
" up[track_id] = x1, y1, x2, y2 # Simpan informasi objek yang melewati garis bawah\n",
"\n",
" up_text = \"Kanan:{}\".format(len(list(up.keys()))) # Buat teks jumlah kendaraan yang melewati garis atas\n",
" down_text = \"Kiri:{}\".format(len(list(down.keys()))) # Buat teks jumlah kendaraan yang melewati garis bawah\n",
"\n",
" cv2.putText(frame_color, up_text, (1150, threshold - 5), font, 0.8, color_red, thickness) # Tampilkan teks jumlah kendaraan yang melewati garis atas\n",
" cv2.putText(frame_color, down_text, (0, threshold - 5), font, 0.8, color_red, thickness) # Tampilkan teks jumlah kendaraan yang melewati garis bawah\n",
"\n",
" # Background subtraction dan deteksi kontur\n",
" grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Konversi frame ke citra grayscale\n",
" blur = cv2.GaussianBlur(grey, (3, 3), 5) # Reduksi noise menggunakan Gaussian Blur\n",
" img_sub = subtracao.apply(blur) # Background subtraction\n",
" dilat = cv2.dilate(img_sub, np.ones((5, 5))) # Dilasi untuk meningkatkan ketebalan objek\n",
" kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)) # Kernel untuk operasi morfologi\n",
" dilatada = cv2.morphologyEx(dilat, cv2.MORPH_CLOSE, kernel) # Operasi closing untuk mengisi lubang kecil pada objek\n",
" dilatada = cv2.morphologyEx(dilatada, cv2.MORPH_CLOSE, kernel) # Operasi closing tambahan\n",
" contorno, h = cv2.findContours(dilatada, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # Deteksi kontur objek\n",
"\n",
" #writer.write(frame_color) # Menyimpan frame hasil pemrosesan\n",
" # Menampilkan gambar\n",
" cv2.imshow(\"Input\",frame) # inputan video\n",
" cv2.imshow(\"Warna\", frame_color) # Tampilkan mode warna\n",
" cv2.imshow(\"Grayscale\", frame_gray) # Tampilkan mode grayscale\n",
" cv2.imshow(\"Detectar\", dilatada) # Tampilkan mode Detectar dilatada\n",
" if cv2.waitKey(10) & 0xFF == ord(\"q\"): # Keluar saat tombol q ditekan\n",
" break\n",
"\n",
" except Exception as e:\n",
" print(\"Terjadi kesalahan:\", str(e)) # Tangkap dan tampilkan kesalahan yang terjadi\n",
" continue # Lanjutkan ke iterasi berikutnya\n"
]
},
{
"cell_type": "markdown",
"id": "ae345f06-2af7-4b93-b833-a14cc20f7d64",
"metadata": {},
"source": [
"## Menutup Window OpenCV"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "15c70b25-1b92-43d8-9167-ebb88b2a8df7",
"metadata": {},
"outputs": [],
"source": [
"cap.release() # Bebaskan sumber daya setelah selesai pemrosesan video\n",
"writer.release() # Tutup objek writer\n",
"cv2.destroyAllWindows() # Tutup semua jendela yang dibuka oleh OpenCV"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.10"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@ -52,13 +52,6 @@
"text": [
"selesai deklarasi variable\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"OpenCV: Couldn't read video stream from file \"vivideo2.mp4\"\n"
]
}
],
"source": [
@ -70,7 +63,7 @@
"font_scale = 0.5 # Skala font untuk teks\n",
"\n",
"# Path video yang akan diproses\n",
"video_path = \"video/video2.mp4\"\n",
"video_path = \"video/videonya.mp4\"\n",
"model_path = \"models/yolov8n.pt\"\n",
"\n",
"# Buka video\n",
@ -83,11 +76,11 @@
"height = 720\n",
"\n",
"# Inisialisasi objek untuk menyimpan video hasil pemrosesan\n",
"fourcc = cv2.VideoWriter_fourcc(*'XVID')\n",
"writer = cv2.VideoWriter(\"video.avi\", fourcc, 20.0, (width, height))\n",
"# fourcc = cv2.VideoWriter_fourcc(*'XVID')\n",
"# writer = cv2.VideoWriter(\"video.avi\", fourcc, 20.0, (width, height))\n",
"\n",
"# Id objek kendaraan yang ingin dilacak berdasarkan kelas di file coco-classes.txt\n",
"vehicle_ids = [2, 3, 5, 7]\n",
"vehicle_ids = [1,2, 3, 5, 6,7]\n",
"# Dictionary untuk menyimpan sejarah pergerakan setiap kendaraan yang terdeteksi\n",
"track_history = defaultdict(lambda: [])\n",
"\n",
@ -168,10 +161,40 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": null,
"id": "705c59f4-fba5-498d-9e51-d002a0dc3226",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n"
]
}
],
"source": [
"# Loop utama untuk membaca setiap frame dari video\n",
"while True:\n",
@ -235,8 +258,9 @@
" dilatada = cv2.morphologyEx(dilatada, cv2.MORPH_CLOSE, kernel) # Operasi closing tambahan\n",
" contorno, h = cv2.findContours(dilatada, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # Deteksi kontur objek\n",
"\n",
" writer.write(frame_color) # Menyimpan frame hasil pemrosesan\n",
" #writer.write(frame_color) # Menyimpan frame hasil pemrosesan\n",
" # Menampilkan gambar\n",
" cv2.imshow(\"Input\",frame) # inputan video\n",
" cv2.imshow(\"Warna\", frame_color) # Tampilkan mode warna\n",
" cv2.imshow(\"Grayscale\", frame_gray) # Tampilkan mode grayscale\n",
" cv2.imshow(\"Detectar\", dilatada) # Tampilkan mode Detectar dilatada\n",
@ -258,7 +282,7 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": null,
"id": "15c70b25-1b92-43d8-9167-ebb88b2a8df7",
"metadata": {},
"outputs": [],

View File

@ -0,0 +1,6 @@
{
"cells": [],
"metadata": {},
"nbformat": 4,
"nbformat_minor": 5
}

384
Pengujian baru.ipynb Normal file
View File

@ -0,0 +1,384 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "93b77493-0a01-4421-b2a0-380991740ff6",
"metadata": {},
"outputs": [],
"source": [
"import numpy as np\n",
"import cv2\n",
"import pandas as pd\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "80b4ff7c-1f3b-4e1d-896c-d88c0966f33e",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"6868.0 30.03550936578534 848 478\n"
]
}
],
"source": [
"cap = cv2.VideoCapture('video/video.mp4')\n",
"# mendapatkan jumlah frame, fps, lebar, dan tinggi dari video\n",
"frames_count, fps, width, height = cap.get(cv2.CAP_PROP_FRAME_COUNT), cap.get(cv2.CAP_PROP_FPS), cap.get(\n",
" cv2.CAP_PROP_FRAME_WIDTH), cap.get(cv2.CAP_PROP_FRAME_HEIGHT)\n",
"width = int(width)\n",
"height = int(height)\n",
"print(frames_count, fps, width, height)\n",
"\n",
"# membuat sebuah frame pandas dengan jumlah baris yang sama dengan jumlah frame\n",
"df = pd.DataFrame(index=range(int(frames_count)))\n",
"df.index.name = \"Frame\" # menandai kolom frame\n",
"\n",
"framenumber = 0 # mencatat frame saat ini\n",
"carscrossedup = 0 # mencatat mobil yang melintasi jalan ke atas\n",
"carscrosseddown = 0 # mencatat mobil yang melintasi jalan ke bawah\n",
"carids = [] # daftar kosong untuk menyimpan ID mobil\n",
"caridscrossed = [] # daftar kosong untuk menyimpan ID mobil yang sudah melintasi\n",
"totalcars = 0 # mencatat jumlah total mobil\n",
"\n",
"fgbg = cv2.createBackgroundSubtractorMOG2() # membuat pengambil gambar latar belakang\n",
"\n",
"# informasi untuk mulai menyimpan video\n",
"ret, frame = cap.read() # mengimpor gambar\n",
"ratio = .5 # rasio ukuran pengubahan ukuran\n",
"image = cv2.resize(frame, (0, 0), None, ratio, ratio) # mengubah ukuran gambar\n",
"width2, height2, channels = image.shape\n"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "5c8d5645-9df8-457c-88d7-2d3bbc0fade9",
"metadata": {},
"outputs": [
{
"ename": "KeyboardInterrupt",
"evalue": "",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
"Cell \u001b[0;32mIn[3], line 265\u001b[0m\n\u001b[1;32m 260\u001b[0m \u001b[38;5;66;03m# video.write(image) # save the current image to video file from earlier\u001b[39;00m\n\u001b[1;32m 261\u001b[0m \n\u001b[1;32m 262\u001b[0m \u001b[38;5;66;03m# adds to framecount\u001b[39;00m\n\u001b[1;32m 263\u001b[0m framenumber \u001b[38;5;241m=\u001b[39m framenumber \u001b[38;5;241m+\u001b[39m \u001b[38;5;241m1\u001b[39m\n\u001b[0;32m--> 265\u001b[0m k \u001b[38;5;241m=\u001b[39m \u001b[43mcv2\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mwaitKey\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mint\u001b[39;49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m1000\u001b[39;49m\u001b[38;5;241;43m/\u001b[39;49m\u001b[43mfps\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m \u001b[38;5;241m&\u001b[39m \u001b[38;5;241m0xff\u001b[39m \u001b[38;5;66;03m# int(1000/fps) is normal speed since waitkey is in ms\u001b[39;00m\n\u001b[1;32m 266\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m k \u001b[38;5;241m==\u001b[39m \u001b[38;5;241m27\u001b[39m:\n\u001b[1;32m 267\u001b[0m \u001b[38;5;28;01mbreak\u001b[39;00m\n",
"\u001b[0;31mKeyboardInterrupt\u001b[0m: "
]
}
],
"source": [
"while True:\n",
"\n",
" ret, frame = cap.read() # mengimpor gambar\n",
"\n",
" if ret: # jika ada frame lanjutkan dengan kode\n",
"\n",
" image = cv2.resize(frame, (0, 0), None, ratio, ratio) # mengubah ukuran gambar\n",
"\n",
" gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # mengubah gambar ke hitam putih\n",
"\n",
" fgmask = fgbg.apply(gray) # menggunakan pengambil gambar latar belakang\n",
"\n",
" # menerapkan berbagai batasan pada fgmask untuk menyaring mobil\n",
" # perlu bermain dengan setelan tersebut hingga mobil dapat diidentifikasi dengan mudah\n",
" kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)) # kernel untuk dilakukan pada morphology\n",
" closing = cv2.morphologyEx(fgmask, cv2.MORPH_CLOSE, kernel)\n",
" opening = cv2.morphologyEx(closing, cv2.MORPH_OPEN, kernel)\n",
" dilation = cv2.dilate(opening, kernel)\n",
" retvalbin, bins = cv2.threshold(dilation, 220, 255, cv2.THRESH_BINARY) # menghapus shadow\n",
"\n",
" # membuat kontur\n",
" contours, hierarchy = cv2.findContours(bins, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2:]\n",
"\n",
" # menggunakan konveks hull untuk membuat poligon di sekitar kontur\n",
" hull = [cv2.convexHull(c) for c in contours]\n",
"\n",
" # menggambar kontur\n",
" cv2.drawContours(image, hull, -1, (0, 255, 0), 3)\n",
"\n",
" # garis dibuat untuk menghentikan menghitung kontur, perlu dilakukan karena mobil yang jauh akan menjadi satu kontur besar\n",
" lineypos = 225\n",
" cv2.line(image, (0, lineypos), (width, lineypos), (255, 0, 0), 5)\n",
"\n",
" # garis y pos dibuat untuk menghitung kontur\n",
" lineypos2 = 250\n",
" cv2.line(image, (0, lineypos2), (width, lineypos2), (0, 255, 0), 5)\n",
"\n",
" # minimum area untuk kontur\n",
" minarea = 300\n",
"\n",
" # maksimum area untuk kontur\n",
" maxarea = 50000\n",
"\n",
" # vektor untuk x dan y lokasi centroid di frame saat ini\n",
" cxx = np.zeros(len(contours))\n",
" cyy = np.zeros(len(contours))\n",
"\n",
" for i in range(len(contours)): # mengulangi seluruh kontur dalam frame saat ini\n",
"\n",
" if hierarchy[0, i, 3] == -1: # menggunakan hierarchy untuk hanya menghitung kontur induk (tidak termasuk dalam kontur lain)\n",
"\n",
" area = cv2.contourArea(contours[i]) # menghitung area kontur\n",
"\n",
" if minarea < area < maxarea: # area threshold untuk kontur\n",
"\n",
" # menghitung centroid dari kontur\n",
" cnt = contours[i]\n",
" M = cv2.moments(cnt)\n",
" cx = int(M['m10'] / M['m00'])\n",
" cy = int(M['m01'] / M['m00'])\n",
"\n",
" if cy > lineypos: # menghapus kontur yang di atas garis\n",
"\n",
" # mengambil titik teratas, kiri, dan lebar dari kontur untuk membuat kotak\n",
" # x,y adalah kiri atas dan w,h adalah lebar dan tinggi\n",
" x, y, w, h = cv2.boundingRect(cnt)\n",
"\n",
" # membuat kotak di sekitar kontur\n",
" cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2)\n",
"\n",
" # Menuliskan teks centroid untuk memastikan kembali nanti\n",
" cv2.putText(image, str(cx) + \",\" + str(cy), (cx + 10, cy + 10), cv2.FONT_HERSHEY_SIMPLEX,\n",
" .3, (0, 0, 255), 1)\n",
"\n",
" cv2.drawMarker(image, (cx, cy), (0, 0, 255), cv2.MARKER_STAR, markerSize=5, thickness=1,\n",
" line_type=cv2.LINE_AA)\n",
"\n",
" # menambahkan centroid yang lulus pada kriteria ke dalam list centroid\n",
" cxx[i] = cx\n",
" cyy[i] = cy\n",
"\n",
" # menghapus entri 0 dari list centroid\n",
" cxx = cxx[cxx != 0]\n",
" cyy = cyy[cyy != 0]\n",
"\n",
" # list kosong untuk nanti menyimpan indices centroid yang di tambahkan ke dataframe\n",
" minx_index2 = []\n",
" miny_index2 = []\n",
"\n",
" # batas maksimum untuk radius dari centroid dari frame saat ini untuk dianggap sama dengan centroid dari frame sebelumnya\n",
" maxrad = 25\n",
"\n",
" # Bagian ini mengelola centroid dan menetapkan mereka untuk carid lama atau carid baru\n",
"\n",
" if len(cxx): # jika ada centroid dalam area yang ditentukan\n",
"\n",
" if not carids: # jika carids kosong\n",
"\n",
" for i in range(len(cxx)): # melalui semua centroid\n",
"\n",
" carids.append(i) # menambahkan car id ke list carids kosong\n",
" df[str(carids[i])] = \"\" # menambahkan kolom ke dataframe sesuai carid\n",
"\n",
" # menetapkan nilai centroid ke frame (baris) dan carid (kolom) yang sesuai\n",
" df.at[int(framenumber), str(carids[i])] = [cxx[i], cyy[i]]\n",
"\n",
" totalcars = carids[i] + 1 # menambahkan count car\n",
"\n",
" else: # jika carids sudah ada\n",
"\n",
" dx = np.zeros((len(cxx), len(carids))) # array baru untuk menghitung deltas\n",
" dy = np.zeros((len(cyy), len(carids))) # array baru untuk menghitung deltas\n",
"\n",
" for i in range(len(cxx)): # melalui semua centroid\n",
"\n",
" for j in range(len(carids)): # melalui semua car id yang sudah ada\n",
"\n",
" # mengambil centroid dari frame sebelumnya untuk carid tertentu\n",
" oldcxcy = df.iloc[int(framenumber - 1)][str(carids[j])]\n",
"\n",
" # mengambil centroid dari frame saat ini yang tidak selalu sesuai dengan centroid frame sebelumnya\n",
" curcxcy = np.array([cxx[i], cyy[i]])\n",
"\n",
" if not oldcxcy: # periksa apakah centroid sebelumnya kosong jika arah sudah tidak ada di layar\n",
"\n",
" continue # lanjutkan ke carid berikutnya\n",
"\n",
" else: # hitung delta centroid untuk membandingkan dengan centroid frame saat ini\n",
"\n",
" dx[i, j] = oldcxcy[0] - curcxcy[0]\n",
" dy[i, j] = oldcxcy[1] - curcxcy[1]\n",
"\n",
" for j in range(len(carids)): # melalui semua car id saat ini\n",
"\n",
" sumsum = np.abs(dx[:, j]) + np.abs(dy[:, j]) # menghitung delta wrt car id\n",
"\n",
" # mengambil indeks centroid yang memiliki nilai delta minimum dan ini indeks benar\n",
" correctindextrue = np.argmin(np.abs(sumsum))\n",
" minx_index = correctindextrue\n",
" miny_index = correctindextrue\n",
"\n",
" # mengambil delta nilai minimum untuk dibandingkan dengan radius\n",
" mindx = dx[minx_index, j]\n",
" mindy = dy[miny_index, j]\n",
"\n",
" if mindx == 0 and mindy == 0 and np.all(dx[:, j] == 0) and np.all(dy[:, j] == 0):\n",
" # periksa apakah minimum nilai adalah 0 dan semua delta adalah nol\n",
" # delta dapat berupa nol jika centroid tidak bergerak\n",
"\n",
" continue # lanjutkan ke carid berikutnya\n",
"\n",
" else:\n",
"\n",
" # jika delta nilai adalah kurang dari maksimal radius maka tambahkan centroid ke carid sebelumnya\n",
" if np.abs(mindx) < maxrad and np.abs(mindy) < maxrad:\n",
"\n",
" # tambahkan centroid ke carid yang sudah ada\n",
" df.at[int(framenumber), str(carids[j])] = [cxx[minx_index], cyy[miny_index]]\n",
" minx_index2.append(minx_index) # tambahkan semua indeks yang ditambahkan ke carid ke list\n",
" miny_index2.append(miny_index)\n",
"\n",
" currentcars = 0 # current cars on screen\n",
" currentcarsindex = [] # current cars on screen carid index\n",
"\n",
" for i in range(len(carids)): # loops through all carids\n",
"\n",
" if df.at[int(framenumber), str(carids[i])] != '':\n",
" # checks the current frame to see which car ids are active\n",
" # by checking in centroid exists on current frame for certain car id\n",
"\n",
" currentcars = currentcars + 1 # adds another to current cars on screen\n",
" currentcarsindex.append(i) # adds car ids to current cars on screen\n",
"\n",
" for i in range(currentcars): # loops through all current car ids on screen\n",
"\n",
" # grabs centroid of certain carid for current frame\n",
" curcent = df.iloc[int(framenumber)][str(carids[currentcarsindex[i]])]\n",
"\n",
" # grabs centroid of certain carid for previous frame\n",
" oldcent = df.iloc[int(framenumber - 1)][str(carids[currentcarsindex[i]])]\n",
"\n",
" if curcent: # if there is a current centroid\n",
"\n",
" # On-screen text for current centroid\n",
" cv2.putText(image, \"Centroid\" + str(curcent[0]) + \",\" + str(curcent[1]),\n",
" (int(curcent[0]), int(curcent[1])), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 255, 255), 2)\n",
"\n",
" cv2.putText(image, \"ID:\" + str(carids[currentcarsindex[i]]), (int(curcent[0]), int(curcent[1] - 15)),\n",
" cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 255, 255), 2)\n",
"\n",
" cv2.drawMarker(image, (int(curcent[0]), int(curcent[1])), (0, 0, 255), cv2.MARKER_STAR, markerSize=5,\n",
" thickness=1, line_type=cv2.LINE_AA)\n",
"\n",
" if oldcent: # checks if old centroid exists\n",
" # adds radius box from previous centroid to current centroid for visualization\n",
" xstart = oldcent[0] - maxrad\n",
" ystart = oldcent[1] - maxrad\n",
" xwidth = oldcent[0] + maxrad\n",
" yheight = oldcent[1] + maxrad\n",
" cv2.rectangle(image, (int(xstart), int(ystart)), (int(xwidth), int(yheight)), (0, 125, 0), 1)\n",
"\n",
" # checks if old centroid is on or below line and curcent is on or above line\n",
" # to count cars and that car hasn't been counted yet\n",
" if oldcent[1] >= lineypos2 and curcent[1] <= lineypos2 and carids[\n",
" currentcarsindex[i]] not in caridscrossed:\n",
"\n",
" carscrossedup = carscrossedup + 1\n",
" cv2.line(image, (0, lineypos2), (width, lineypos2), (0, 0, 255), 5)\n",
" caridscrossed.append(\n",
" currentcarsindex[i]) # adds car id to list of count cars to prevent double counting\n",
"\n",
" # checks if old centroid is on or above line and curcent is on or below line\n",
" # to count cars and that car hasn't been counted yet\n",
" elif oldcent[1] <= lineypos2 and curcent[1] >= lineypos2 and carids[\n",
" currentcarsindex[i]] not in caridscrossed:\n",
"\n",
" carscrosseddown = carscrosseddown + 1\n",
" cv2.line(image, (0, lineypos2), (width, lineypos2), (0, 0, 125), 5)\n",
" caridscrossed.append(currentcarsindex[i])\n",
"\n",
" # Top left hand corner on-screen text\n",
" cv2.rectangle(image, (0, 0), (250, 100), (255, 0, 0), -1) # background rectangle for on-screen text\n",
"\n",
" cv2.putText(image, \"Cars in Area: \" + str(currentcars), (0, 15), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 170, 0), 1)\n",
"\n",
" cv2.putText(image, \"Cars Crossed Up: \" + str(carscrossedup), (0, 30), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 170, 0),\n",
" 1)\n",
"\n",
" cv2.putText(image, \"Cars Crossed Down: \" + str(carscrosseddown), (0, 45), cv2.FONT_HERSHEY_SIMPLEX, .5,\n",
" (0, 170, 0), 1)\n",
"\n",
" cv2.putText(image, \"Total Cars Detected: \" + str(len(carids)), (0, 60), cv2.FONT_HERSHEY_SIMPLEX, .5,\n",
" (0, 170, 0), 1)\n",
"\n",
" cv2.putText(image, \"Frame: \" + str(framenumber) + ' of ' + str(frames_count), (0, 75), cv2.FONT_HERSHEY_SIMPLEX,\n",
" .5, (0, 170, 0), 1)\n",
"\n",
" cv2.putText(image, 'Time: ' + str(round(framenumber / fps, 2)) + ' sec of ' + str(round(frames_count / fps, 2))\n",
" + ' sec', (0, 90), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 170, 0), 1)\n",
"\n",
" # displays images and transformations\n",
" cv2.imshow(\"countours\", image)\n",
" cv2.moveWindow(\"countours\", 0, 0)\n",
"\n",
" cv2.imshow(\"fgmask\", fgmask)\n",
" cv2.moveWindow(\"fgmask\", int(width * ratio), 0)\n",
"\n",
" cv2.imshow(\"closing\", closing)\n",
" cv2.moveWindow(\"closing\", width, 0)\n",
"\n",
" cv2.imshow(\"opening\", opening)\n",
" cv2.moveWindow(\"opening\", 0, int(height * ratio))\n",
"\n",
" cv2.imshow(\"dilation\", dilation)\n",
" cv2.moveWindow(\"dilation\", int(width * ratio), int(height * ratio))\n",
"\n",
" cv2.imshow(\"binary\", bins)\n",
" cv2.moveWindow(\"binary\", width, int(height * ratio))\n",
"\n",
" # video.write(image) # save the current image to video file from earlier\n",
"\n",
" # adds to framecount\n",
" framenumber = framenumber + 1\n",
"\n",
" k = cv2.waitKey(int(1000/fps)) & 0xff # int(1000/fps) is normal speed since waitkey is in ms\n",
" if k == 27:\n",
" break\n",
"\n",
" else: # if video is finished then break loop\n",
"\n",
" break\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "af84e6b4-dd55-447e-ac8c-a02a5f6f34be",
"metadata": {},
"outputs": [],
"source": [
"cap.release()\n",
"cv2.destroyAllWindows()\n"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.10"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

344
Pengujian lama.ipynb Normal file
View File

@ -0,0 +1,344 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "37fe6724-f5fe-412a-ab9a-6a1df878c308",
"metadata": {},
"source": [
"## Import Library"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "11b66fe3-8d38-4bf9-b9c5-f8bd3213bd55",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Selesai Import Library\n"
]
}
],
"source": [
"import cv2 # Import library OpenCV untuk pengolahan citra dan video\n",
"import imutils # Import library imutils untuk mempermudah manipulasi citra\n",
"import numpy as np # Import library numpy untuk operasi numerik\n",
"from ultralytics import YOLO # Import class YOLO dari library ultralytics untuk deteksi objek\n",
"from collections import defaultdict # Import class defaultdict dari library collections untuk struktur data default dictionary\n",
"\n",
"print(\"Selesai Import Library\")"
]
},
{
"cell_type": "markdown",
"id": "243e5a8f-46c2-4fe1-b174-52a46f0a26ee",
"metadata": {},
"source": [
"## Deklarasi Variable"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "bbeb303b-5683-44cc-a924-0f2481d75528",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"selesai deklarasi variable\n"
]
}
],
"source": [
"color = (0, 255, 0) # Warna hijau untuk penggambaran objek dan garis\n",
"color_red = (0, 0, 255) # Warna merah untuk teks dan garis\n",
"thickness = 2 # Ketebalan garis untuk penggambaran objek dan garis\n",
"\n",
"font = cv2.FONT_HERSHEY_SIMPLEX # Jenis font untuk teks\n",
"font_scale = 0.5 # Skala font untuk teks\n",
"\n",
"# Path video yang akan diproses\n",
"video_path = \"video/videonya.mp4\"\n",
"model_path = \"models/yolov8n.pt\"\n",
"\n",
"# Buka video\n",
"cap = cv2.VideoCapture(video_path)\n",
"# Inisialisasi model YOLO dengan file weight yang telah dilatih sebelumnya\n",
"model = YOLO(model_path)\n",
"\n",
"# Ukuran frame video\n",
"width = 1280\n",
"height = 720\n",
"\n",
"# Inisialisasi objek untuk menyimpan video hasil pemrosesan\n",
"# fourcc = cv2.VideoWriter_fourcc(*'XVID')\n",
"# writer = cv2.VideoWriter(\"video.avi\", fourcc, 20.0, (width, height))\n",
"\n",
"# Id objek kendaraan yang ingin dilacak berdasarkan kelas di file coco-classes.txt\n",
"vehicle_ids = [1,2, 3, 5, 6,7]\n",
"# Dictionary untuk menyimpan sejarah pergerakan setiap kendaraan yang terdeteksi\n",
"track_history = defaultdict(lambda: [])\n",
"\n",
"up = {} # Dictionary untuk kendaraan yang melewati garis atas\n",
"down = {} # Dictionary untuk kendaraan yang melewati garis bawah\n",
"threshold = 400 # Ambang batas garis pemisah kendaraan\n",
"\n",
"print(\"selesai deklarasi variable\")"
]
},
{
"cell_type": "markdown",
"id": "00596875-56e1-445a-bd8b-b2b3a73a411a",
"metadata": {},
"source": [
"### Fungsi untuk mengambil titik tengah dari bounding box objek "
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "ffcffbd1-ad9b-4908-8930-bea2ba6b6ecb",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Selesai membuat fungsi\n"
]
}
],
"source": [
"def pega_centro(x, y, w, h):\n",
" x1 = int(w / 2)\n",
" y1 = int(h / 2)\n",
" cx = x + x1\n",
" cy = y + y1\n",
" return cx, cy\n",
"\n",
"print(\"Selesai membuat fungsi\")"
]
},
{
"cell_type": "markdown",
"id": "9f2e6c12-a70b-49f2-9083-a9c85b04e842",
"metadata": {},
"source": [
"### Background subtraction menggunakan MOG2"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "4b0f68b8-9216-49e6-892e-bbf2282d73b3",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"selesai\n"
]
}
],
"source": [
"subtracao = cv2.createBackgroundSubtractorMOG2()\n",
"print(\"selesai\")"
]
},
{
"cell_type": "markdown",
"id": "0e9ea925-a617-45d3-b50c-273f4ee0163b",
"metadata": {},
"source": [
"## Proses Video "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "705c59f4-fba5-498d-9e51-d002a0dc3226",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n",
"Terjadi kesalahan: not enough values to unpack (expected 7, got 6)\n"
]
}
],
"source": [
"# Loop utama untuk membaca setiap frame dari video\n",
"while True:\n",
" ret, frame = cap.read() # Membaca frame dari video\n",
" if ret == False: # Keluar dari loop jika tidak ada frame yang dapat dibaca\n",
" break\n",
" \n",
" try:\n",
" frame = imutils.resize(frame, width = 1280, height = 720) # ubah frame menjadi tinggi 720 x lebar 1280\n",
" frame_color = frame.copy() # Salin frame ke mode warna untuk pengolahan dan penggambaran\n",
" frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Konversi frame ke citra grayscale\n",
" frame_gray = cv2.cvtColor(frame_gray, cv2.COLOR_GRAY2BGR) # Konversi kembali ke citra BGR untuk tampilan grayscale\n",
" frame_bw = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Konversi ke citra grayscale untuk mode black and white\n",
"\n",
" # Deteksi objek menggunakan model YOLO\n",
" results = model.track(frame_color, persist=True, verbose=False)[0]\n",
" bboxes = np.array(results.boxes.data.tolist(), dtype=\"int\") # Koordinat bounding box objek yang terdeteksi\n",
"\n",
" # Gambar garis pembatas untuk menghitung jumlah kendaraan yang melewati garis\n",
" cv2.line(frame_color, (0, threshold), (1280, threshold), color, thickness)\n",
" cv2.putText(frame_color, \"Pembatas Jalan\", (620, 445), font, 0.7, color_red, thickness)\n",
"\n",
" # Loop untuk setiap objek yang terdeteksi\n",
" for box in bboxes:\n",
" x1, y1, x2, y2, track_id, score, class_id = box # Ambil koordinat dan informasi lainnya\n",
" cx = int((x1 + x2) / 2) # Hitung koordinat x pusat objek\n",
" cy = int((y1 + y2) / 2) # Hitung koordinat y pusat objek\n",
" if class_id in vehicle_ids: # Periksa apakah objek merupakan kendaraan yang ingin dilacak\n",
" class_name = results.names[int(class_id)].upper() # Dapatkan nama kelas objek\n",
"\n",
" track = track_history[track_id] # Ambil sejarah pergerakan objek berdasarkan ID\n",
" track.append((cx, cy)) # Tambahkan koordinat pusat objek ke dalam sejarah pergerakan\n",
" if len(track) > 20: # Batasi panjang sejarah pergerakan agar tidak terlalu panjang\n",
" track.pop(0) # Hapus elemen pertama jika sejarah sudah melebihi batas\n",
"\n",
" points = np.hstack(track).astype(\"int32\").reshape(-1, 1, 2) # Konversi sejarah pergerakan ke format yang sesuai untuk penggambaran\n",
" cv2.polylines(frame_color, [points], isClosed=False, color=color, thickness=thickness) # Gambar garis yang merepresentasikan sejarah pergerakan\n",
" cv2.rectangle(frame_color, (x1, y1), (x2, y2), color, thickness) # Gambar bounding box objek\n",
" text = \"ID: {} {}\".format(track_id, class_name) # Buat teks ID objek dan nama kelasnya\n",
" cv2.putText(frame_color, text, (x1, y1 - 5), font, font_scale, color, thickness) # Tampilkan teks di atas objek\n",
"\n",
" if cy > threshold - 5 and cy < threshold + 5 and cx < 670: # Periksa apakah objek melewati garis atas\n",
" down[track_id] = x1, y1, x2, y2 # Simpan informasi objek yang melewati garis atas\n",
"\n",
" if cy > threshold - 5 and cy < threshold + 5 and cx > 670: # Periksa apakah objek melewati garis bawah\n",
" up[track_id] = x1, y1, x2, y2 # Simpan informasi objek yang melewati garis bawah\n",
"\n",
" up_text = \"Kanan:{}\".format(len(list(up.keys()))) # Buat teks jumlah kendaraan yang melewati garis atas\n",
" down_text = \"Kiri:{}\".format(len(list(down.keys()))) # Buat teks jumlah kendaraan yang melewati garis bawah\n",
"\n",
" cv2.putText(frame_color, up_text, (1150, threshold - 5), font, 0.8, color_red, thickness) # Tampilkan teks jumlah kendaraan yang melewati garis atas\n",
" cv2.putText(frame_color, down_text, (0, threshold - 5), font, 0.8, color_red, thickness) # Tampilkan teks jumlah kendaraan yang melewati garis bawah\n",
"\n",
" # Background subtraction dan deteksi kontur\n",
" grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Konversi frame ke citra grayscale\n",
" blur = cv2.GaussianBlur(grey, (3, 3), 5) # Reduksi noise menggunakan Gaussian Blur\n",
" img_sub = subtracao.apply(blur) # Background subtraction\n",
" dilat = cv2.dilate(img_sub, np.ones((5, 5))) # Dilasi untuk meningkatkan ketebalan objek\n",
" kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)) # Kernel untuk operasi morfologi\n",
" dilatada = cv2.morphologyEx(dilat, cv2.MORPH_CLOSE, kernel) # Operasi closing untuk mengisi lubang kecil pada objek\n",
" dilatada = cv2.morphologyEx(dilatada, cv2.MORPH_CLOSE, kernel) # Operasi closing tambahan\n",
" contorno, h = cv2.findContours(dilatada, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # Deteksi kontur objek\n",
"\n",
" #writer.write(frame_color) # Menyimpan frame hasil pemrosesan\n",
" # Menampilkan gambar\n",
" cv2.imshow(\"Input\",frame) # inputan video\n",
" cv2.imshow(\"Warna\", frame_color) # Tampilkan mode warna\n",
" cv2.imshow(\"Grayscale\", frame_gray) # Tampilkan mode grayscale\n",
" cv2.imshow(\"Detectar\", dilatada) # Tampilkan mode Detectar dilatada\n",
" if cv2.waitKey(10) & 0xFF == ord(\"q\"): # Keluar saat tombol q ditekan\n",
" break\n",
"\n",
" except Exception as e:\n",
" print(\"Terjadi kesalahan:\", str(e)) # Tangkap dan tampilkan kesalahan yang terjadi\n",
" continue # Lanjutkan ke iterasi berikutnya\n"
]
},
{
"cell_type": "markdown",
"id": "ae345f06-2af7-4b93-b833-a14cc20f7d64",
"metadata": {},
"source": [
"## Menutup Window OpenCV"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "15c70b25-1b92-43d8-9167-ebb88b2a8df7",
"metadata": {},
"outputs": [],
"source": [
"cap.release() # Bebaskan sumber daya setelah selesai pemrosesan video\n",
"writer.release() # Tutup objek writer\n",
"cv2.destroyAllWindows() # Tutup semua jendela yang dibuka oleh OpenCV"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.10"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@ -1,322 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "37fe6724-f5fe-412a-ab9a-6a1df878c308",
"metadata": {},
"source": [
"## Import Library"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "11b66fe3-8d38-4bf9-b9c5-f8bd3213bd55",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Selesai Import Library\n"
]
}
],
"source": [
"import cv2 # Import library OpenCV untuk pengolahan citra dan video\n",
"import imutils # Import library imutils untuk mempermudah manipulasi citra\n",
"import numpy as np # Import library numpy untuk operasi numerik\n",
"from ultralytics import YOLO # Import class YOLO dari library ultralytics untuk deteksi objek\n",
"from collections import defaultdict # Import class defaultdict dari library collections untuk struktur data default dictionary\n",
"\n",
"print(\"Selesai Import Library\")"
]
},
{
"cell_type": "markdown",
"id": "243e5a8f-46c2-4fe1-b174-52a46f0a26ee",
"metadata": {},
"source": [
"## Deklarasi Variable"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "bbeb303b-5683-44cc-a924-0f2481d75528",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"selesai deklarasi variable\n"
]
}
],
"source": [
"color = (0, 255, 0) # Warna hijau untuk penggambaran objek dan garis\n",
"color_red = (0, 0, 255) # Warna merah untuk teks dan garis\n",
"thickness = 2 # Ketebalan garis untuk penggambaran objek dan garis\n",
"\n",
"font = cv2.FONT_HERSHEY_SIMPLEX # Jenis font untuk teks\n",
"font_scale = 0.5 # Skala font untuk teks\n",
"\n",
"# Path video yang akan diproses\n",
"video_path = \"video/video2.mp4\"\n",
"model_path = \"models/yolov8n.pt\"\n",
"\n",
"# Buka video\n",
"cap = cv2.VideoCapture(video_path)\n",
"# Inisialisasi model YOLO dengan file weight yang telah dilatih sebelumnya\n",
"model = YOLO(model_path)\n",
"\n",
"# Ukuran frame video\n",
"width = 1280\n",
"height = 720\n",
"\n",
"# Inisialisasi objek untuk menyimpan video hasil pemrosesan\n",
"fourcc = cv2.VideoWriter_fourcc(*'XVID')\n",
"writer = cv2.VideoWriter(\"video.avi\", fourcc, 20.0, (width, height))\n",
"\n",
"# Id objek kendaraan yang ingin dilacak berdasarkan kelas di file coco-classes.txt\n",
"vehicle_ids = [2, 3, 5, 7]\n",
"# Dictionary untuk menyimpan sejarah pergerakan setiap kendaraan yang terdeteksi\n",
"track_history = defaultdict(lambda: [])\n",
"\n",
"up = {} # Dictionary untuk kendaraan yang melewati garis atas\n",
"down = {} # Dictionary untuk kendaraan yang melewati garis bawah\n",
"threshold = 400 # Ambang batas garis pemisah kendaraan\n",
"\n",
"print(\"selesai deklarasi variable\")"
]
},
{
"cell_type": "markdown",
"id": "00596875-56e1-445a-bd8b-b2b3a73a411a",
"metadata": {},
"source": [
"### Fungsi untuk mengambil titik tengah dari bounding box objek "
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "ffcffbd1-ad9b-4908-8930-bea2ba6b6ecb",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Selesai membuat fungsi\n"
]
}
],
"source": [
"def pega_centro(x, y, w, h):\n",
" x1 = int(w / 2)\n",
" y1 = int(h / 2)\n",
" cx = x + x1\n",
" cy = y + y1\n",
" return cx, cy\n",
"\n",
"print(\"Selesai membuat fungsi\")"
]
},
{
"cell_type": "markdown",
"id": "9f2e6c12-a70b-49f2-9083-a9c85b04e842",
"metadata": {},
"source": [
"### Background subtraction menggunakan MOG2"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "4b0f68b8-9216-49e6-892e-bbf2282d73b3",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"selesai\n"
]
}
],
"source": [
"subtracao = cv2.createBackgroundSubtractorMOG2()\n",
"print(\"selesai\")"
]
},
{
"cell_type": "markdown",
"id": "0e9ea925-a617-45d3-b50c-273f4ee0163b",
"metadata": {},
"source": [
"## Proses Video "
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "705c59f4-fba5-498d-9e51-d002a0dc3226",
"metadata": {},
"outputs": [
{
"ename": "KeyboardInterrupt",
"evalue": "",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
"Cell \u001b[0;32mIn[11], line 15\u001b[0m\n\u001b[1;32m 12\u001b[0m frame_bw \u001b[38;5;241m=\u001b[39m cv2\u001b[38;5;241m.\u001b[39mcvtColor(frame, cv2\u001b[38;5;241m.\u001b[39mCOLOR_BGR2GRAY) \u001b[38;5;66;03m# Konversi ke citra grayscale untuk mode black and white\u001b[39;00m\n\u001b[1;32m 14\u001b[0m \u001b[38;5;66;03m# Deteksi objek menggunakan model YOLO\u001b[39;00m\n\u001b[0;32m---> 15\u001b[0m results \u001b[38;5;241m=\u001b[39m \u001b[43mmodel\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mtrack\u001b[49m\u001b[43m(\u001b[49m\u001b[43mframe_color\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mpersist\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mverbose\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m)\u001b[49m[\u001b[38;5;241m0\u001b[39m]\n\u001b[1;32m 16\u001b[0m bboxes \u001b[38;5;241m=\u001b[39m np\u001b[38;5;241m.\u001b[39marray(results\u001b[38;5;241m.\u001b[39mboxes\u001b[38;5;241m.\u001b[39mdata\u001b[38;5;241m.\u001b[39mtolist(), dtype\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mint\u001b[39m\u001b[38;5;124m\"\u001b[39m) \u001b[38;5;66;03m# Koordinat bounding box objek yang terdeteksi\u001b[39;00m\n\u001b[1;32m 18\u001b[0m \u001b[38;5;66;03m# Gambar garis pembatas untuk menghitung jumlah kendaraan yang melewati garis\u001b[39;00m\n",
"File \u001b[0;32m~/Desktop/Python/YOLOv8-Projects/Traffic Analysis Projects/Highway Car Counter/env/lib/python3.10/site-packages/ultralytics/engine/model.py:469\u001b[0m, in \u001b[0;36mModel.track\u001b[0;34m(self, source, stream, persist, **kwargs)\u001b[0m\n\u001b[1;32m 467\u001b[0m kwargs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mconf\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m kwargs\u001b[38;5;241m.\u001b[39mget(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mconf\u001b[39m\u001b[38;5;124m\"\u001b[39m) \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;241m0.1\u001b[39m \u001b[38;5;66;03m# ByteTrack-based method needs low confidence predictions as input\u001b[39;00m\n\u001b[1;32m 468\u001b[0m kwargs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mmode\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtrack\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m--> 469\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpredict\u001b[49m\u001b[43m(\u001b[49m\u001b[43msource\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43msource\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mstream\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstream\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
"File \u001b[0;32m~/Desktop/Python/YOLOv8-Projects/Traffic Analysis Projects/Highway Car Counter/env/lib/python3.10/site-packages/ultralytics/engine/model.py:430\u001b[0m, in \u001b[0;36mModel.predict\u001b[0;34m(self, source, stream, predictor, **kwargs)\u001b[0m\n\u001b[1;32m 428\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m prompts \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28mhasattr\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpredictor, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mset_prompts\u001b[39m\u001b[38;5;124m\"\u001b[39m): \u001b[38;5;66;03m# for SAM-type models\u001b[39;00m\n\u001b[1;32m 429\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpredictor\u001b[38;5;241m.\u001b[39mset_prompts(prompts)\n\u001b[0;32m--> 430\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpredictor\u001b[38;5;241m.\u001b[39mpredict_cli(source\u001b[38;5;241m=\u001b[39msource) \u001b[38;5;28;01mif\u001b[39;00m is_cli \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpredictor\u001b[49m\u001b[43m(\u001b[49m\u001b[43msource\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43msource\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mstream\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstream\u001b[49m\u001b[43m)\u001b[49m\n",
"File \u001b[0;32m~/Desktop/Python/YOLOv8-Projects/Traffic Analysis Projects/Highway Car Counter/env/lib/python3.10/site-packages/ultralytics/engine/predictor.py:204\u001b[0m, in \u001b[0;36mBasePredictor.__call__\u001b[0;34m(self, source, model, stream, *args, **kwargs)\u001b[0m\n\u001b[1;32m 202\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstream_inference(source, model, \u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[1;32m 203\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m--> 204\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mlist\u001b[39;49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mstream_inference\u001b[49m\u001b[43m(\u001b[49m\u001b[43msource\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmodel\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\n",
"File \u001b[0;32m~/Desktop/Python/YOLOv8-Projects/Traffic Analysis Projects/Highway Car Counter/env/lib/python3.10/site-packages/torch/utils/_contextlib.py:35\u001b[0m, in \u001b[0;36m_wrap_generator.<locals>.generator_context\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 32\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 33\u001b[0m \u001b[38;5;66;03m# Issuing `None` to a generator fires it up\u001b[39;00m\n\u001b[1;32m 34\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m ctx_factory():\n\u001b[0;32m---> 35\u001b[0m response \u001b[38;5;241m=\u001b[39m \u001b[43mgen\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43msend\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m)\u001b[49m\n\u001b[1;32m 37\u001b[0m \u001b[38;5;28;01mwhile\u001b[39;00m \u001b[38;5;28;01mTrue\u001b[39;00m:\n\u001b[1;32m 38\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 39\u001b[0m \u001b[38;5;66;03m# Forward the response to our caller and get its next request\u001b[39;00m\n",
"File \u001b[0;32m~/Desktop/Python/YOLOv8-Projects/Traffic Analysis Projects/Highway Car Counter/env/lib/python3.10/site-packages/ultralytics/engine/predictor.py:283\u001b[0m, in \u001b[0;36mBasePredictor.stream_inference\u001b[0;34m(self, source, model, *args, **kwargs)\u001b[0m\n\u001b[1;32m 281\u001b[0m \u001b[38;5;66;03m# Inference\u001b[39;00m\n\u001b[1;32m 282\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m profilers[\u001b[38;5;241m1\u001b[39m]:\n\u001b[0;32m--> 283\u001b[0m preds \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minference\u001b[49m\u001b[43m(\u001b[49m\u001b[43mim\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 284\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39margs\u001b[38;5;241m.\u001b[39membed:\n\u001b[1;32m 285\u001b[0m \u001b[38;5;28;01myield from\u001b[39;00m [preds] \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(preds, torch\u001b[38;5;241m.\u001b[39mTensor) \u001b[38;5;28;01melse\u001b[39;00m preds \u001b[38;5;66;03m# yield embedding tensors\u001b[39;00m\n",
"File \u001b[0;32m~/Desktop/Python/YOLOv8-Projects/Traffic Analysis Projects/Highway Car Counter/env/lib/python3.10/site-packages/ultralytics/engine/predictor.py:140\u001b[0m, in \u001b[0;36mBasePredictor.inference\u001b[0;34m(self, im, *args, **kwargs)\u001b[0m\n\u001b[1;32m 134\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"Runs inference on a given image using the specified model and arguments.\"\"\"\u001b[39;00m\n\u001b[1;32m 135\u001b[0m visualize \u001b[38;5;241m=\u001b[39m (\n\u001b[1;32m 136\u001b[0m increment_path(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39msave_dir \u001b[38;5;241m/\u001b[39m Path(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mbatch[\u001b[38;5;241m0\u001b[39m][\u001b[38;5;241m0\u001b[39m])\u001b[38;5;241m.\u001b[39mstem, mkdir\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m)\n\u001b[1;32m 137\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39margs\u001b[38;5;241m.\u001b[39mvisualize \u001b[38;5;129;01mand\u001b[39;00m (\u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39msource_type\u001b[38;5;241m.\u001b[39mtensor)\n\u001b[1;32m 138\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28;01mFalse\u001b[39;00m\n\u001b[1;32m 139\u001b[0m )\n\u001b[0;32m--> 140\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mmodel\u001b[49m\u001b[43m(\u001b[49m\u001b[43mim\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43maugment\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43margs\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43maugment\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mvisualize\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mvisualize\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43membed\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43margs\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43membed\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
"File \u001b[0;32m~/Desktop/Python/YOLOv8-Projects/Traffic Analysis Projects/Highway Car Counter/env/lib/python3.10/site-packages/torch/nn/modules/module.py:1511\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1509\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m 1510\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1511\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
"File \u001b[0;32m~/Desktop/Python/YOLOv8-Projects/Traffic Analysis Projects/Highway Car Counter/env/lib/python3.10/site-packages/torch/nn/modules/module.py:1520\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1515\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1516\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1517\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1518\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1519\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1520\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1522\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 1523\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n",
"File \u001b[0;32m~/Desktop/Python/YOLOv8-Projects/Traffic Analysis Projects/Highway Car Counter/env/lib/python3.10/site-packages/ultralytics/nn/autobackend.py:384\u001b[0m, in \u001b[0;36mAutoBackend.forward\u001b[0;34m(self, im, augment, visualize, embed)\u001b[0m\n\u001b[1;32m 381\u001b[0m im \u001b[38;5;241m=\u001b[39m im\u001b[38;5;241m.\u001b[39mpermute(\u001b[38;5;241m0\u001b[39m, \u001b[38;5;241m2\u001b[39m, \u001b[38;5;241m3\u001b[39m, \u001b[38;5;241m1\u001b[39m) \u001b[38;5;66;03m# torch BCHW to numpy BHWC shape(1,320,192,3)\u001b[39;00m\n\u001b[1;32m 383\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpt \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mnn_module: \u001b[38;5;66;03m# PyTorch\u001b[39;00m\n\u001b[0;32m--> 384\u001b[0m y \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mmodel\u001b[49m\u001b[43m(\u001b[49m\u001b[43mim\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43maugment\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43maugment\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mvisualize\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mvisualize\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43membed\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43membed\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 385\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mjit: \u001b[38;5;66;03m# TorchScript\u001b[39;00m\n\u001b[1;32m 386\u001b[0m y \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mmodel(im)\n",
"File \u001b[0;32m~/Desktop/Python/YOLOv8-Projects/Traffic Analysis Projects/Highway Car Counter/env/lib/python3.10/site-packages/torch/nn/modules/module.py:1511\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1509\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m 1510\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1511\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
"File \u001b[0;32m~/Desktop/Python/YOLOv8-Projects/Traffic Analysis Projects/Highway Car Counter/env/lib/python3.10/site-packages/torch/nn/modules/module.py:1520\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1515\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1516\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1517\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1518\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1519\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1520\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1522\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 1523\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n",
"File \u001b[0;32m~/Desktop/Python/YOLOv8-Projects/Traffic Analysis Projects/Highway Car Counter/env/lib/python3.10/site-packages/ultralytics/nn/tasks.py:83\u001b[0m, in \u001b[0;36mBaseModel.forward\u001b[0;34m(self, x, *args, **kwargs)\u001b[0m\n\u001b[1;32m 81\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(x, \u001b[38;5;28mdict\u001b[39m): \u001b[38;5;66;03m# for cases of training and validating while training.\u001b[39;00m\n\u001b[1;32m 82\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mloss(x, \u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[0;32m---> 83\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpredict\u001b[49m\u001b[43m(\u001b[49m\u001b[43mx\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
"File \u001b[0;32m~/Desktop/Python/YOLOv8-Projects/Traffic Analysis Projects/Highway Car Counter/env/lib/python3.10/site-packages/ultralytics/nn/tasks.py:101\u001b[0m, in \u001b[0;36mBaseModel.predict\u001b[0;34m(self, x, profile, visualize, augment, embed)\u001b[0m\n\u001b[1;32m 99\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m augment:\n\u001b[1;32m 100\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_predict_augment(x)\n\u001b[0;32m--> 101\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_predict_once\u001b[49m\u001b[43m(\u001b[49m\u001b[43mx\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mprofile\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mvisualize\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43membed\u001b[49m\u001b[43m)\u001b[49m\n",
"File \u001b[0;32m~/Desktop/Python/YOLOv8-Projects/Traffic Analysis Projects/Highway Car Counter/env/lib/python3.10/site-packages/ultralytics/nn/tasks.py:122\u001b[0m, in \u001b[0;36mBaseModel._predict_once\u001b[0;34m(self, x, profile, visualize, embed)\u001b[0m\n\u001b[1;32m 120\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m profile:\n\u001b[1;32m 121\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_profile_one_layer(m, x, dt)\n\u001b[0;32m--> 122\u001b[0m x \u001b[38;5;241m=\u001b[39m \u001b[43mm\u001b[49m\u001b[43m(\u001b[49m\u001b[43mx\u001b[49m\u001b[43m)\u001b[49m \u001b[38;5;66;03m# run\u001b[39;00m\n\u001b[1;32m 123\u001b[0m y\u001b[38;5;241m.\u001b[39mappend(x \u001b[38;5;28;01mif\u001b[39;00m m\u001b[38;5;241m.\u001b[39mi \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39msave \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m) \u001b[38;5;66;03m# save output\u001b[39;00m\n\u001b[1;32m 124\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m visualize:\n",
"File \u001b[0;32m~/Desktop/Python/YOLOv8-Projects/Traffic Analysis Projects/Highway Car Counter/env/lib/python3.10/site-packages/torch/nn/modules/module.py:1511\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1509\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m 1510\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1511\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
"File \u001b[0;32m~/Desktop/Python/YOLOv8-Projects/Traffic Analysis Projects/Highway Car Counter/env/lib/python3.10/site-packages/torch/nn/modules/module.py:1520\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1515\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1516\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1517\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1518\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1519\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1520\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1522\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 1523\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n",
"File \u001b[0;32m~/Desktop/Python/YOLOv8-Projects/Traffic Analysis Projects/Highway Car Counter/env/lib/python3.10/site-packages/ultralytics/nn/modules/block.py:171\u001b[0m, in \u001b[0;36mSPPF.forward\u001b[0;34m(self, x)\u001b[0m\n\u001b[1;32m 169\u001b[0m y1 \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mm(x)\n\u001b[1;32m 170\u001b[0m y2 \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mm(y1)\n\u001b[0;32m--> 171\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcv2\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtorch\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcat\u001b[49m\u001b[43m(\u001b[49m\u001b[43m(\u001b[49m\u001b[43mx\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43my1\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43my2\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mm\u001b[49m\u001b[43m(\u001b[49m\u001b[43my2\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\n",
"File \u001b[0;32m~/Desktop/Python/YOLOv8-Projects/Traffic Analysis Projects/Highway Car Counter/env/lib/python3.10/site-packages/torch/nn/modules/module.py:1511\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1509\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m 1510\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1511\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
"File \u001b[0;32m~/Desktop/Python/YOLOv8-Projects/Traffic Analysis Projects/Highway Car Counter/env/lib/python3.10/site-packages/torch/nn/modules/module.py:1520\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1515\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1516\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1517\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1518\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1519\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1520\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1522\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 1523\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n",
"File \u001b[0;32m~/Desktop/Python/YOLOv8-Projects/Traffic Analysis Projects/Highway Car Counter/env/lib/python3.10/site-packages/ultralytics/nn/modules/conv.py:54\u001b[0m, in \u001b[0;36mConv.forward_fuse\u001b[0;34m(self, x)\u001b[0m\n\u001b[1;32m 52\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mforward_fuse\u001b[39m(\u001b[38;5;28mself\u001b[39m, x):\n\u001b[1;32m 53\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"Perform transposed convolution of 2D data.\"\"\"\u001b[39;00m\n\u001b[0;32m---> 54\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mact(\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mconv\u001b[49m\u001b[43m(\u001b[49m\u001b[43mx\u001b[49m\u001b[43m)\u001b[49m)\n",
"File \u001b[0;32m~/Desktop/Python/YOLOv8-Projects/Traffic Analysis Projects/Highway Car Counter/env/lib/python3.10/site-packages/torch/nn/modules/module.py:1511\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1509\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m 1510\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1511\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
"File \u001b[0;32m~/Desktop/Python/YOLOv8-Projects/Traffic Analysis Projects/Highway Car Counter/env/lib/python3.10/site-packages/torch/nn/modules/module.py:1520\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1515\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1516\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1517\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1518\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1519\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1520\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1522\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 1523\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n",
"File \u001b[0;32m~/Desktop/Python/YOLOv8-Projects/Traffic Analysis Projects/Highway Car Counter/env/lib/python3.10/site-packages/torch/nn/modules/conv.py:460\u001b[0m, in \u001b[0;36mConv2d.forward\u001b[0;34m(self, input)\u001b[0m\n\u001b[1;32m 459\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mforward\u001b[39m(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;28minput\u001b[39m: Tensor) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Tensor:\n\u001b[0;32m--> 460\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_conv_forward\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mweight\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbias\u001b[49m\u001b[43m)\u001b[49m\n",
"File \u001b[0;32m~/Desktop/Python/YOLOv8-Projects/Traffic Analysis Projects/Highway Car Counter/env/lib/python3.10/site-packages/torch/nn/modules/conv.py:456\u001b[0m, in \u001b[0;36mConv2d._conv_forward\u001b[0;34m(self, input, weight, bias)\u001b[0m\n\u001b[1;32m 452\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpadding_mode \u001b[38;5;241m!=\u001b[39m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mzeros\u001b[39m\u001b[38;5;124m'\u001b[39m:\n\u001b[1;32m 453\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m F\u001b[38;5;241m.\u001b[39mconv2d(F\u001b[38;5;241m.\u001b[39mpad(\u001b[38;5;28minput\u001b[39m, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_reversed_padding_repeated_twice, mode\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpadding_mode),\n\u001b[1;32m 454\u001b[0m weight, bias, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstride,\n\u001b[1;32m 455\u001b[0m _pair(\u001b[38;5;241m0\u001b[39m), \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdilation, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mgroups)\n\u001b[0;32m--> 456\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mF\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mconv2d\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mweight\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mbias\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mstride\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 457\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpadding\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdilation\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mgroups\u001b[49m\u001b[43m)\u001b[49m\n",
"\u001b[0;31mKeyboardInterrupt\u001b[0m: "
]
}
],
"source": [
"# Loop utama untuk membaca setiap frame dari video\n",
"while True:\n",
" ret, frame = cap.read() # Membaca frame dari video\n",
" if ret == False: # Keluar dari loop jika tidak ada frame yang dapat dibaca\n",
" break\n",
" \n",
" try:\n",
" frame = imutils.resize(frame, width = 1280, height = 720) # ubah frame menjadi tinggi 720 x lebar 1280\n",
" frame_color = frame.copy() # Salin frame ke mode warna untuk pengolahan dan penggambaran\n",
" frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Konversi frame ke citra grayscale\n",
" frame_gray = cv2.cvtColor(frame_gray, cv2.COLOR_GRAY2BGR) # Konversi kembali ke citra BGR untuk tampilan grayscale\n",
" frame_bw = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Konversi ke citra grayscale untuk mode black and white\n",
"\n",
" # Deteksi objek menggunakan model YOLO\n",
" results = model.track(frame_color, persist=True, verbose=False)[0]\n",
" bboxes = np.array(results.boxes.data.tolist(), dtype=\"int\") # Koordinat bounding box objek yang terdeteksi\n",
"\n",
" # Gambar garis pembatas untuk menghitung jumlah kendaraan yang melewati garis\n",
" cv2.line(frame_color, (0, threshold), (1280, threshold), color, thickness)\n",
" cv2.putText(frame_color, \"Pembatas Jalan\", (620, 445), font, 0.7, color_red, thickness)\n",
"\n",
" # Loop untuk setiap objek yang terdeteksi\n",
" for box in bboxes:\n",
" x1, y1, x2, y2, track_id, score, class_id = box # Ambil koordinat dan informasi lainnya\n",
" cx = int((x1 + x2) / 2) # Hitung koordinat x pusat objek\n",
" cy = int((y1 + y2) / 2) # Hitung koordinat y pusat objek\n",
" if class_id in vehicle_ids: # Periksa apakah objek merupakan kendaraan yang ingin dilacak\n",
" class_name = results.names[int(class_id)].upper() # Dapatkan nama kelas objek\n",
"\n",
" track = track_history[track_id] # Ambil sejarah pergerakan objek berdasarkan ID\n",
" track.append((cx, cy)) # Tambahkan koordinat pusat objek ke dalam sejarah pergerakan\n",
" if len(track) > 20: # Batasi panjang sejarah pergerakan agar tidak terlalu panjang\n",
" track.pop(0) # Hapus elemen pertama jika sejarah sudah melebihi batas\n",
"\n",
" points = np.hstack(track).astype(\"int32\").reshape(-1, 1, 2) # Konversi sejarah pergerakan ke format yang sesuai untuk penggambaran\n",
" cv2.polylines(frame_color, [points], isClosed=False, color=color, thickness=thickness) # Gambar garis yang merepresentasikan sejarah pergerakan\n",
" cv2.rectangle(frame_color, (x1, y1), (x2, y2), color, thickness) # Gambar bounding box objek\n",
" text = \"ID: {} {}\".format(track_id, class_name) # Buat teks ID objek dan nama kelasnya\n",
" cv2.putText(frame_color, text, (x1, y1 - 5), font, font_scale, color, thickness) # Tampilkan teks di atas objek\n",
"\n",
" if cy > threshold - 5 and cy < threshold + 5 and cx < 670: # Periksa apakah objek melewati garis atas\n",
" down[track_id] = x1, y1, x2, y2 # Simpan informasi objek yang melewati garis atas\n",
"\n",
" if cy > threshold - 5 and cy < threshold + 5 and cx > 670: # Periksa apakah objek melewati garis bawah\n",
" up[track_id] = x1, y1, x2, y2 # Simpan informasi objek yang melewati garis bawah\n",
"\n",
" up_text = \"Kanan:{}\".format(len(list(up.keys()))) # Buat teks jumlah kendaraan yang melewati garis atas\n",
" down_text = \"Kiri:{}\".format(len(list(down.keys()))) # Buat teks jumlah kendaraan yang melewati garis bawah\n",
"\n",
" cv2.putText(frame_color, up_text, (1150, threshold - 5), font, 0.8, color_red, thickness) # Tampilkan teks jumlah kendaraan yang melewati garis atas\n",
" cv2.putText(frame_color, down_text, (0, threshold - 5), font, 0.8, color_red, thickness) # Tampilkan teks jumlah kendaraan yang melewati garis bawah\n",
"\n",
" # Background subtraction dan deteksi kontur\n",
" grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Konversi frame ke citra grayscale\n",
" blur = cv2.GaussianBlur(grey, (3, 3), 5) # Reduksi noise menggunakan Gaussian Blur\n",
" img_sub = subtracao.apply(blur) # Background subtraction\n",
" dilat = cv2.dilate(img_sub, np.ones((5, 5))) # Dilasi untuk meningkatkan ketebalan objek\n",
" kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)) # Kernel untuk operasi morfologi\n",
" dilatada = cv2.morphologyEx(dilat, cv2.MORPH_CLOSE, kernel) # Operasi closing untuk mengisi lubang kecil pada objek\n",
" dilatada = cv2.morphologyEx(dilatada, cv2.MORPH_CLOSE, kernel) # Operasi closing tambahan\n",
" contorno, h = cv2.findContours(dilatada, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # Deteksi kontur objek\n",
"\n",
" writer.write(frame_color) # Menyimpan frame hasil pemrosesan\n",
" # Menampilkan gambar\n",
" cv2.imshow(\"Warna\", frame_color) # Tampilkan mode warna\n",
" cv2.imshow(\"Grayscale\", frame_gray) # Tampilkan mode grayscale\n",
" cv2.imshow(\"Detectar\", dilatada) # Tampilkan mode Detectar dilatada\n",
" if cv2.waitKey(10) & 0xFF == ord(\"q\"): # Keluar saat tombol q ditekan\n",
" break\n",
"\n",
" except Exception as e:\n",
" print(\"Terjadi kesalahan:\", str(e)) # Tangkap dan tampilkan kesalahan yang terjadi\n",
" continue # Lanjutkan ke iterasi berikutnya\n"
]
},
{
"cell_type": "markdown",
"id": "ae345f06-2af7-4b93-b833-a14cc20f7d64",
"metadata": {},
"source": [
"## Menutup Window OpenCV"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "15c70b25-1b92-43d8-9167-ebb88b2a8df7",
"metadata": {},
"outputs": [],
"source": [
"cap.release() # Bebaskan sumber daya setelah selesai pemrosesan video\n",
"writer.release() # Tutup objek writer\n",
"cv2.destroyAllWindows() # Tutup semua jendela yang dibuka oleh OpenCV"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.10"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

532
app.py
View File

@ -5,20 +5,21 @@ import numpy as np
from ultralytics import YOLO
from collections import defaultdict
import os
import pandas as pd
app = Flask(__name__, static_folder='assets')
video_list = []
color = (0, 255, 0)
color_red = (0, 0, 255)
thickness = 2
# color = (0, 255, 0)
# color_red = (0, 0, 255)
# thickness = 2
font = cv2.FONT_HERSHEY_SIMPLEX
font_scale = 0.5
# font = cv2.FONT_HERSHEY_SIMPLEX
# font_scale = 0.5
# Background subtraction menggunakan MOG2
subtracao = cv2.createBackgroundSubtractorMOG2()
# # Background subtraction menggunakan MOG2
# subtracao = cv2.createBackgroundSubtractorMOG2()
jumlah_kenderaan = 0
kenderaan_kiri = 0
@ -27,17 +28,115 @@ kenderaan_kanan = 0
# Define the generate_frames function with parameters for video, threshold, and state
def generate_frames(video, threshold, stat):
model_path = "models/yolov8n.pt"
cap = cv2.VideoCapture(video)
model = YOLO(model_path)
# def generate_frames(video, threshold, stat):
# model_path = "models/yolov8n.pt"
# cap = cv2.VideoCapture(video)
# model = YOLO(model_path)
vehicle_ids = [2, 3, 5, 7]
track_history = defaultdict(lambda: [])
# vehicle_ids = [2, 3, 5, 7]
# track_history = defaultdict(lambda: [])
up = {}
down = {}
# up = {}
# down = {}
# global jumlah_kenderaan
# global kenderaan_kiri
# global kenderaan_kanan
# jumlah_kenderaan = 0
# kenderaan_kiri = 0
# kenderaan_kanan = 0
# while True:
# ret, frame = cap.read()
# if not ret:
# break
# try:
# frame = imutils.resize(frame, width=1280, height=720)
# # freame_original = frame.copy()
# frame_color = frame.copy()
# frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# frame_gray = cv2.cvtColor(frame_gray, cv2.COLOR_GRAY2BGR)
# frame_bw = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# results = model.track(frame_color, persist=True, verbose=False)[0]
# bboxes = np.array(results.boxes.data.tolist(), dtype="int")
# # Gambar garis pembatas untuk menghitung jumlah kendaraan yang melewati garis
# cv2.line(frame_color, (0, threshold), (1280, threshold), color, thickness)
# text_position = (620, threshold - 5) # Adjust the Y coordinate to place the text just above the line
# cv2.putText(frame_color, "Pembatas Jalan", text_position, font, 0.7, color_red, thickness)
# for box in bboxes:
# x1, y1, x2, y2, track_id, score, class_id = box
# cx = int((x1 + x2) / 2)
# cy = int((y1 + y2) / 2)
# if class_id in vehicle_ids:
# class_name = results.names[int(class_id)].upper()
# track = track_history[track_id]
# track.append((cx, cy))
# if len(track) > 20:
# track.pop(0)
# points = np.hstack(track).astype("int32").reshape(-1, 1, 2)
# cv2.polylines(frame_color, [points], isClosed=False, color=color, thickness=thickness)
# cv2.rectangle(frame_color, (x1, y1), (x2, y2), color, thickness)
# text = "ID: {} {}".format(track_id, class_name)
# cv2.putText(frame_color, text, (x1, y1 - 5), font, font_scale, color, thickness)
# if cy > threshold - 5 and cy < threshold + 5 and cx < 670:
# down[track_id] = x1, y1, x2, y2
# if cy > threshold - 5 and cy < threshold + 5 and cx > 670:
# up[track_id] = x1, y1, x2, y2
# up_text = "Kanan:{}".format(len(list(up.keys())))
# down_text = "Kiri:{}".format(len(list(down.keys())))
# kenderaan_kanan = len(list(up.keys()))
# kenderaan_kiri = len(list(down.keys()))
# cv2.putText(frame_color, up_text, (1150, threshold - 5), font, 0.8, color_red, thickness)
# cv2.putText(frame_color, down_text, (0, threshold - 5), font, 0.8, color_red, thickness)
# # Background subtraction dan deteksi kontur
# grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Konversi frame ke citra grayscale
# blur = cv2.GaussianBlur(grey, (3, 3), 5) # Reduksi noise menggunakan Gaussian Blur
# img_sub = subtracao.apply(blur) # Background subtraction
# dilat = cv2.dilate(img_sub, np.ones((5, 5))) # Dilasi untuk meningkatkan ketebalan objek
# kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)) # Kernel untuk operasi morfologi
# dilatada = cv2.morphologyEx(dilat, cv2.MORPH_CLOSE, kernel) # Operasi closing untuk mengisi lubang kecil pada objek
# dilatada = cv2.morphologyEx(dilatada, cv2.MORPH_CLOSE, kernel) # Operasi closing tambahan
# contorno, h = cv2.findContours(dilatada, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # Deteksi kontur objek
# frame_bw = cv2.cvtColor(dilatada, cv2.COLOR_GRAY2BGR) # Konversi frame grayscale ke BGR
# if stat == 'color':
# frame_to_encode = frame_color
# elif stat == 'grayscale':
# frame_to_encode = frame_gray
# elif stat == 'original':
# frame_to_encode = frame
# else: # Assuming 'detectar' state
# frame_to_encode = frame_bw
# _, buffer = cv2.imencode('.jpg', frame_to_encode)
# frame_bytes = buffer.tobytes()
# yield (b'--frame\r\n'
# b'Content-Type: image/jpeg\r\n\r\n' + frame_bytes + b'\r\n')
# except Exception as e:
# print("Terjadi kesalahan:", str(e))
# continue
# jumlah_kenderaan = kenderaan_kiri + kenderaan_kanan
# cap.release()
def generate_frames2(video, threshold,stat):
global jumlah_kenderaan
global kenderaan_kiri
global kenderaan_kanan
@ -46,94 +145,355 @@ def generate_frames(video, threshold, stat):
kenderaan_kiri = 0
kenderaan_kanan = 0
cap = cv2.VideoCapture(video)
frames_count, fps, width, height = cap.get(cv2.CAP_PROP_FRAME_COUNT), cap.get(cv2.CAP_PROP_FPS), cap.get(
cv2.CAP_PROP_FRAME_WIDTH), cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
width = int(width)
height = int(height)
print(frames_count, fps, width, height)
# creates a pandas data frame with the number of rows the same length as frame count
df = pd.DataFrame(index=range(int(frames_count)))
df.index.name = "Frames"
framenumber = 0 # keeps track of current frame
carscrossedup = 0 # keeps track of cars that crossed up
carscrosseddown = 0 # keeps track of cars that crossed down
carids = [] # blank list to add car ids
caridscrossed = [] # blank list to add car ids that have crossed
totalcars = 0 # keeps track of total cars
fgbg = cv2.createBackgroundSubtractorMOG2() # create background subtractor
# information to start saving a video file
ret, frame = cap.read() # import image
ratio = .5 # resize ratio
image = cv2.resize(frame, (0, 0), None, ratio, ratio) # resize image
width2, height2, channels = image.shape
video = cv2.VideoWriter('traffic_counter.avi', cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), fps, (height2, width2), 1)
while True:
ret, frame = cap.read()
if not ret:
break
try:
frame = imutils.resize(frame, width=1280, height=720)
# freame_original = frame.copy()
frame_color = frame.copy()
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame_gray = cv2.cvtColor(frame_gray, cv2.COLOR_GRAY2BGR)
frame_bw = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
ret, frame = cap.read() # import image
results = model.track(frame_color, persist=True, verbose=False)[0]
bboxes = np.array(results.boxes.data.tolist(), dtype="int")
if ret: # if there is a frame continue with code
# Gambar garis pembatas untuk menghitung jumlah kendaraan yang melewati garis
cv2.line(frame_color, (0, threshold), (1280, threshold), color, thickness)
text_position = (620, threshold - 5) # Adjust the Y coordinate to place the text just above the line
cv2.putText(frame_color, "Pembatas Jalan", text_position, font, 0.7, color_red, thickness)
image = cv2.resize(frame, (0, 0), None, ratio, ratio) # resize image
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # converts image to gray
fgmask = fgbg.apply(gray) # uses the background subtraction
# applies different thresholds to fgmask to try and isolate cars
# just have to keep playing around with settings until cars are easily identifiable
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)) # kernel to apply to the morphology
closing = cv2.morphologyEx(fgmask, cv2.MORPH_CLOSE, kernel)
opening = cv2.morphologyEx(closing, cv2.MORPH_OPEN, kernel)
dilation = cv2.dilate(opening, kernel)
retvalbin, bins = cv2.threshold(dilation, 220, 255, cv2.THRESH_BINARY) # removes the shadows
# creates contours
contours, hierarchy = cv2.findContours(bins, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2:]
# use convex hull to create polygon around contours
hull = [cv2.convexHull(c) for c in contours]
# draw contours
cv2.drawContours(image, hull, -1, (0, 255, 0), 3)
# line created to stop counting contours, needed as cars in distance become one big contour
lineypos = 100
cv2.line(image, (0, lineypos), (width, lineypos), (255, 0, 0), 5)
# line y position created to count contours
lineypos2 = 125
cv2.line(image, (0, lineypos2), (width, lineypos2), (0, 255, 0), 5)
# min area for contours in case a bunch of small noise contours are created
minarea = 400
# max area for contours, can be quite large for buses
maxarea = 50000
# vectors for the x and y locations of contour centroids in current frame
cxx = np.zeros(len(contours))
cyy = np.zeros(len(contours))
for i in range(len(contours)): # cycles through all contours in current frame
if hierarchy[0, i, 3] == -1: # using hierarchy to only count parent contours (contours not within others)
area = cv2.contourArea(contours[i]) # area of contour
if minarea < area < maxarea: # area threshold for contour
# calculating centroids of contours
cnt = contours[i]
M = cv2.moments(cnt)
cx = int(M['m10'] / M['m00'])
cy = int(M['m01'] / M['m00'])
if cy > lineypos: # filters out contours that are above line (y starts at top)
# gets bounding points of contour to create rectangle
# x,y is top left corner and w,h is width and height
x, y, w, h = cv2.boundingRect(cnt)
# creates a rectangle around contour
cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2)
# Prints centroid text in order to double check later on
cv2.putText(image, str(cx) + "," + str(cy), (cx + 10, cy + 10), cv2.FONT_HERSHEY_SIMPLEX,
.3, (0, 0, 255), 1)
cv2.drawMarker(image, (cx, cy), (0, 0, 255), cv2.MARKER_STAR, markerSize=5, thickness=1,
line_type=cv2.LINE_AA)
# adds centroids that passed previous criteria to centroid list
cxx[i] = cx
cyy[i] = cy
# eliminates zero entries (centroids that were not added)
cxx = cxx[cxx != 0]
cyy = cyy[cyy != 0]
# empty list to later check which centroid indices were added to dataframe
minx_index2 = []
miny_index2 = []
# maximum allowable radius for current frame centroid to be considered the same centroid from previous frame
maxrad = 25
# The section below keeps track of the centroids and assigns them to old carids or new carids
if len(cxx): # if there are centroids in the specified area
if not carids: # if carids is empty
for i in range(len(cxx)): # loops through all centroids
carids.append(i) # adds a car id to the empty list carids
df[str(carids[i])] = "" # adds a column to the dataframe corresponding to a carid
# assigns the centroid values to the current frame (row) and carid (column)
df.at[int(framenumber), str(carids[i])] = [cxx[i], cyy[i]]
totalcars = carids[i] + 1 # adds one count to total cars
else: # if there are already car ids
dx = np.zeros((len(cxx), len(carids))) # new arrays to calculate deltas
dy = np.zeros((len(cyy), len(carids))) # new arrays to calculate deltas
for i in range(len(cxx)): # loops through all centroids
for j in range(len(carids)): # loops through all recorded car ids
# acquires centroid from previous frame for specific carid
oldcxcy = df.iloc[int(framenumber - 1)][str(carids[j])]
# acquires current frame centroid that doesn't necessarily line up with previous frame centroid
curcxcy = np.array([cxx[i], cyy[i]])
if not oldcxcy: # checks if old centroid is empty in case car leaves screen and new car shows
continue # continue to next carid
else: # calculate centroid deltas to compare to current frame position later
dx[i, j] = oldcxcy[0] - curcxcy[0]
dy[i, j] = oldcxcy[1] - curcxcy[1]
for j in range(len(carids)): # loops through all current car ids
sumsum = np.abs(dx[:, j]) + np.abs(dy[:, j]) # sums the deltas wrt to car ids
# finds which index carid had the min difference and this is true index
correctindextrue = np.argmin(np.abs(sumsum))
minx_index = correctindextrue
miny_index = correctindextrue
# acquires delta values of the minimum deltas in order to check if it is within radius later on
mindx = dx[minx_index, j]
mindy = dy[miny_index, j]
if mindx == 0 and mindy == 0 and np.all(dx[:, j] == 0) and np.all(dy[:, j] == 0):
# checks if minimum value is 0 and checks if all deltas are zero since this is empty set
# delta could be zero if centroid didn't move
continue # continue to next carid
else:
# if delta values are less than maximum radius then add that centroid to that specific carid
if np.abs(mindx) < maxrad and np.abs(mindy) < maxrad:
# adds centroid to corresponding previously existing carid
df.at[int(framenumber), str(carids[j])] = [cxx[minx_index], cyy[miny_index]]
minx_index2.append(minx_index) # appends all the indices that were added to previous carids
miny_index2.append(miny_index)
for i in range(len(cxx)): # loops through all centroids
# if centroid is not in the minindex list then another car needs to be added
if i not in minx_index2 and miny_index2:
df[str(totalcars)] = "" # create another column with total cars
totalcars = totalcars + 1 # adds another total car the count
t = totalcars - 1 # t is a placeholder to total cars
carids.append(t) # append to list of car ids
df.at[int(framenumber), str(t)] = [cxx[i], cyy[i]] # add centroid to the new car id
elif curcxcy[0] and not oldcxcy and not minx_index2 and not miny_index2:
# checks if current centroid exists but previous centroid does not
# new car to be added in case minx_index2 is empty
df[str(totalcars)] = "" # create another column with total cars
totalcars = totalcars + 1 # adds another total car the count
t = totalcars - 1 # t is a placeholder to total cars
carids.append(t) # append to list of car ids
df.at[int(framenumber), str(t)] = [cxx[i], cyy[i]] # add centroid to the new car id
# The section below labels the centroids on screen
currentcars = 0 # current cars on screen
currentcarsindex = [] # current cars on screen carid index
for i in range(len(carids)): # loops through all carids
if df.at[int(framenumber), str(carids[i])] != '':
# checks the current frame to see which car ids are active
# by checking in centroid exists on current frame for certain car id
currentcars = currentcars + 1 # adds another to current cars on screen
currentcarsindex.append(i) # adds car ids to current cars on screen
for i in range(currentcars): # loops through all current car ids on screen
# grabs centroid of certain carid for current frame
curcent = df.iloc[int(framenumber)][str(carids[currentcarsindex[i]])]
# grabs centroid of certain carid for previous frame
oldcent = df.iloc[int(framenumber - 1)][str(carids[currentcarsindex[i]])]
if curcent: # if there is a current centroid
# On-screen text for current centroid
cv2.putText(image, "Centroid" + str(curcent[0]) + "," + str(curcent[1]),
(int(curcent[0]), int(curcent[1])), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 255, 255), 2)
cv2.putText(image, "ID:" + str(carids[currentcarsindex[i]]), (int(curcent[0]), int(curcent[1] - 15)),
cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 255, 255), 2)
cv2.drawMarker(image, (int(curcent[0]), int(curcent[1])), (0, 0, 255), cv2.MARKER_STAR, markerSize=5,
thickness=1, line_type=cv2.LINE_AA)
if oldcent: # checks if old centroid exists
# adds radius box from previous centroid to current centroid for visualization
xstart = oldcent[0] - maxrad
ystart = oldcent[1] - maxrad
xwidth = oldcent[0] + maxrad
yheight = oldcent[1] + maxrad
cv2.rectangle(image, (int(xstart), int(ystart)), (int(xwidth), int(yheight)), (0, 125, 0), 1)
# checks if old centroid is on or below line and curcent is on or above line
# to count cars and that car hasn't been counted yet
if oldcent[1] >= lineypos2 and curcent[1] <= lineypos2 and carids[
currentcarsindex[i]] not in caridscrossed:
carscrossedup = carscrossedup + 1
cv2.line(image, (0, lineypos2), (width, lineypos2), (0, 0, 255), 5)
caridscrossed.append(
currentcarsindex[i]) # adds car id to list of count cars to prevent double counting
# checks if old centroid is on or above line and curcent is on or below line
# to count cars and that car hasn't been counted yet
elif oldcent[1] <= lineypos2 and curcent[1] >= lineypos2 and carids[
currentcarsindex[i]] not in caridscrossed:
carscrosseddown = carscrosseddown + 1
cv2.line(image, (0, lineypos2), (width, lineypos2), (0, 0, 125), 5)
caridscrossed.append(currentcarsindex[i])
# Top left hand corner on-screen text
#cv2.rectangle(image, (0, 0), (250, 100), (255, 0, 0), -1) # background rectangle for on-screen text
cv2.putText(image, "Kenderaan Sebelah Kiri: " + str(carscrossedup), (0, 15), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 170, 0),
1)
cv2.putText(image, "Kenderaan Sebelah Kanan: " + str(carscrosseddown), (0, 30), cv2.FONT_HERSHEY_SIMPLEX, .5,
(0, 170, 0), 1)
# cv2.putText(image, "Total Cars Detected: " + str(len(carids)), (0, 60), cv2.FONT_HERSHEY_SIMPLEX, .5,
# (0, 170, 0), 1)
cv2.putText(image, "Frame: " + str(framenumber) + ' dari ' + str(frames_count), (0, 45), cv2.FONT_HERSHEY_SIMPLEX,
.5, (0, 170, 0), 1)
cv2.putText(image, 'Waktu: ' + str(round(framenumber / fps, 2)) + ' detik dari ' + str(round(frames_count / fps, 2))
+ ' detik', (0, 60), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 170, 0), 1)
kenderaan_kanan = carscrosseddown
kenderaan_kiri = carscrossedup
jumlah_kenderaan = carscrossedup + carscrosseddown
for box in bboxes:
x1, y1, x2, y2, track_id, score, class_id = box
cx = int((x1 + x2) / 2)
cy = int((y1 + y2) / 2)
if class_id in vehicle_ids:
class_name = results.names[int(class_id)].upper()
track = track_history[track_id]
track.append((cx, cy))
if len(track) > 20:
track.pop(0)
points = np.hstack(track).astype("int32").reshape(-1, 1, 2)
cv2.polylines(frame_color, [points], isClosed=False, color=color, thickness=thickness)
cv2.rectangle(frame_color, (x1, y1), (x2, y2), color, thickness)
text = "ID: {} {}".format(track_id, class_name)
cv2.putText(frame_color, text, (x1, y1 - 5), font, font_scale, color, thickness)
if cy > threshold - 5 and cy < threshold + 5 and cx < 670:
down[track_id] = x1, y1, x2, y2
if cy > threshold - 5 and cy < threshold + 5 and cx > 670:
up[track_id] = x1, y1, x2, y2
up_text = "Kanan:{}".format(len(list(up.keys())))
down_text = "Kiri:{}".format(len(list(down.keys())))
kenderaan_kanan = len(list(up.keys()))
kenderaan_kiri = len(list(down.keys()))
cv2.putText(frame_color, up_text, (1150, threshold - 5), font, 0.8, color_red, thickness)
cv2.putText(frame_color, down_text, (0, threshold - 5), font, 0.8, color_red, thickness)
# Background subtraction dan deteksi kontur
grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Konversi frame ke citra grayscale
blur = cv2.GaussianBlur(grey, (3, 3), 5) # Reduksi noise menggunakan Gaussian Blur
img_sub = subtracao.apply(blur) # Background subtraction
dilat = cv2.dilate(img_sub, np.ones((5, 5))) # Dilasi untuk meningkatkan ketebalan objek
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)) # Kernel untuk operasi morfologi
dilatada = cv2.morphologyEx(dilat, cv2.MORPH_CLOSE, kernel) # Operasi closing untuk mengisi lubang kecil pada objek
dilatada = cv2.morphologyEx(dilatada, cv2.MORPH_CLOSE, kernel) # Operasi closing tambahan
contorno, h = cv2.findContours(dilatada, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # Deteksi kontur objek
frame_bw = cv2.cvtColor(dilatada, cv2.COLOR_GRAY2BGR) # Konversi frame grayscale ke BGR
# displays images and transformations and resize to 1280x720
# cv2.imshow("countours", image)
# cv2.moveWindow("countours", 0, 0)
if stat == 'color':
frame_to_encode = frame_color
# frame_to_encode = frame
# resize to 1280x720
frame_to_encode = cv2.resize(image, (1280, 720))
# cv2.imshow("fgmask", fgmask)
# cv2.moveWindow("fgmask", int(width * ratio), 0)
elif stat == 'grayscale':
frame_to_encode = frame_gray
elif stat == 'original':
frame_to_encode = frame
else: # Assuming 'detectar' state
frame_to_encode = frame_bw
# frame_to_encode = gray
frame_to_encode = cv2.resize(gray, (1280, 720))
# cv2.imshow("closing", closing)
# cv2.moveWindow("closing", width, 0)
elif stat == 'detectar':
# frame_to_encode = closing
frame_to_encode = cv2.resize(closing, (1280, 720))
else :
# frame_to_encode = opening
frame_to_encode = cv2.resize(frame, (1280, 720))
_, buffer = cv2.imencode('.jpg', frame_to_encode)
frame_bytes = buffer.tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame_bytes + b'\r\n')
except Exception as e:
print("Terjadi kesalahan:", str(e))
continue
jumlah_kenderaan = kenderaan_kiri + kenderaan_kanan
# cv2.imshow("opening", opening)
# cv2.moveWindow("opening", 0, int(height * ratio))
# cv2.imshow("dilation", dilation)
# cv2.moveWindow("dilation", int(width * ratio), int(height * ratio))
# cv2.imshow("binary", bins)
# cv2.moveWindow("binary", width, int(height * ratio))
# video.write(image) # save the current image to video file from earlier
# adds to framecount
framenumber = framenumber + 1
k = cv2.waitKey(int(1000/fps)) & 0xff # int(1000/fps) is normal speed since waitkey is in ms
if k == 27:
break
else: # if video is finished then break loop
break
cap.release()
def update_video_list():
global video_list
# add "video/" to the video_list and only take video extensions
@ -155,7 +515,7 @@ def video_feed():
stat = request.args.get('stat', 'color') # Default to 'color' if state is not specified
# Return the response with the generator function
print("ini semua variable:", video, threshold, stat)
return Response(generate_frames(video, threshold, stat), mimetype='multipart/x-mixed-replace; boundary=frame')
return Response(generate_frames2(video, threshold, stat), mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/video_list')

194
ini app sebelumnya.py Normal file
View File

@ -0,0 +1,194 @@
from flask import Flask, render_template, Response, request,jsonify,send_from_directory
import cv2
import imutils
import numpy as np
from ultralytics import YOLO
from collections import defaultdict
import os
app = Flask(__name__, static_folder='assets')
video_list = []
color = (0, 255, 0)
color_red = (0, 0, 255)
thickness = 2
font = cv2.FONT_HERSHEY_SIMPLEX
font_scale = 0.5
# Background subtraction menggunakan MOG2
subtracao = cv2.createBackgroundSubtractorMOG2()
jumlah_kenderaan = 0
kenderaan_kiri = 0
kenderaan_kanan = 0
# Define the generate_frames function with parameters for video, threshold, and state
def generate_frames(video, threshold, stat):
model_path = "models/yolov8n.pt"
cap = cv2.VideoCapture(video)
model = YOLO(model_path)
vehicle_ids = [2, 3, 5, 7]
track_history = defaultdict(lambda: [])
up = {}
down = {}
global jumlah_kenderaan
global kenderaan_kiri
global kenderaan_kanan
jumlah_kenderaan = 0
kenderaan_kiri = 0
kenderaan_kanan = 0
while True:
ret, frame = cap.read()
if not ret:
break
try:
frame = imutils.resize(frame, width=1280, height=720)
# freame_original = frame.copy()
frame_color = frame.copy()
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame_gray = cv2.cvtColor(frame_gray, cv2.COLOR_GRAY2BGR)
frame_bw = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
results = model.track(frame_color, persist=True, verbose=False)[0]
bboxes = np.array(results.boxes.data.tolist(), dtype="int")
# Gambar garis pembatas untuk menghitung jumlah kendaraan yang melewati garis
cv2.line(frame_color, (0, threshold), (1280, threshold), color, thickness)
text_position = (620, threshold - 5) # Adjust the Y coordinate to place the text just above the line
cv2.putText(frame_color, "Pembatas Jalan", text_position, font, 0.7, color_red, thickness)
for box in bboxes:
x1, y1, x2, y2, track_id, score, class_id = box
cx = int((x1 + x2) / 2)
cy = int((y1 + y2) / 2)
if class_id in vehicle_ids:
class_name = results.names[int(class_id)].upper()
track = track_history[track_id]
track.append((cx, cy))
if len(track) > 20:
track.pop(0)
points = np.hstack(track).astype("int32").reshape(-1, 1, 2)
cv2.polylines(frame_color, [points], isClosed=False, color=color, thickness=thickness)
cv2.rectangle(frame_color, (x1, y1), (x2, y2), color, thickness)
text = "ID: {} {}".format(track_id, class_name)
cv2.putText(frame_color, text, (x1, y1 - 5), font, font_scale, color, thickness)
if cy > threshold - 5 and cy < threshold + 5 and cx < 670:
down[track_id] = x1, y1, x2, y2
if cy > threshold - 5 and cy < threshold + 5 and cx > 670:
up[track_id] = x1, y1, x2, y2
up_text = "Kanan:{}".format(len(list(up.keys())))
down_text = "Kiri:{}".format(len(list(down.keys())))
kenderaan_kanan = len(list(up.keys()))
kenderaan_kiri = len(list(down.keys()))
cv2.putText(frame_color, up_text, (1150, threshold - 5), font, 0.8, color_red, thickness)
cv2.putText(frame_color, down_text, (0, threshold - 5), font, 0.8, color_red, thickness)
# Background subtraction dan deteksi kontur
grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Konversi frame ke citra grayscale
blur = cv2.GaussianBlur(grey, (3, 3), 5) # Reduksi noise menggunakan Gaussian Blur
img_sub = subtracao.apply(blur) # Background subtraction
dilat = cv2.dilate(img_sub, np.ones((5, 5))) # Dilasi untuk meningkatkan ketebalan objek
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)) # Kernel untuk operasi morfologi
dilatada = cv2.morphologyEx(dilat, cv2.MORPH_CLOSE, kernel) # Operasi closing untuk mengisi lubang kecil pada objek
dilatada = cv2.morphologyEx(dilatada, cv2.MORPH_CLOSE, kernel) # Operasi closing tambahan
contorno, h = cv2.findContours(dilatada, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # Deteksi kontur objek
frame_bw = cv2.cvtColor(dilatada, cv2.COLOR_GRAY2BGR) # Konversi frame grayscale ke BGR
if stat == 'color':
frame_to_encode = frame_color
elif stat == 'grayscale':
frame_to_encode = frame_gray
elif stat == 'original':
frame_to_encode = frame
else: # Assuming 'detectar' state
frame_to_encode = frame_bw
_, buffer = cv2.imencode('.jpg', frame_to_encode)
frame_bytes = buffer.tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame_bytes + b'\r\n')
except Exception as e:
print("Terjadi kesalahan:", str(e))
continue
jumlah_kenderaan = kenderaan_kiri + kenderaan_kanan
cap.release()
def update_video_list():
global video_list
# add "video/" to the video_list and only take video extensions
video_list = [f"video/{f}" for f in os.listdir("video") if f.endswith(".mp4")]
@app.route('/')
def index():
update_video_list()
print("video_list:", video_list)
video = request.args.get('video', 'video/video.mp4')
threshold = int(request.args.get('threshold', 450))
# Pass the video file path and threshold value to the template
return render_template('index.html', video=video, threshold=threshold, video_list=video_list)
def video_feed():
# Get the video file path, threshold value, and state from the URL parameters
video = request.args.get('video')
threshold = int(request.args.get('threshold', 450))
stat = request.args.get('stat', 'color') # Default to 'color' if state is not specified
# Return the response with the generator function
print("ini semua variable:", video, threshold, stat)
return Response(generate_frames(video, threshold, stat), mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/video_list')
def video_list():
update_video_list()
return render_template('video_list.html', video_list=video_list)
@app.route('/videos/<path:video>')
def video(video):
return send_from_directory('', video)
# Add route for the video feed
app.add_url_rule('/video_feed', 'video_feed', video_feed)
@app.route('/check_jumlah_kenderaan', methods=['GET'])
def check_jumlah_kenderaan():
global jumlah_kenderaan
global kenderaan_kiri
global kenderaan_kanan
return jsonify({'jumlah_kenderaan': jumlah_kenderaan, 'kenderaan_kiri': kenderaan_kiri, 'kenderaan_kanan': kenderaan_kanan})
UPLOAD_FOLDER = 'video'
@app.route('/upload', methods=['POST'])
def upload_file():
file = request.files['file']
if file.filename == '':
return jsonify({'status': False, 'message': 'No file selected'})
if file:
filename = file.filename
file.save(os.path.join(UPLOAD_FOLDER, filename))
return jsonify({'status': True, 'message': 'File uploaded successfully', 'filename': filename})
if __name__ == "__main__":
app.run(debug=True)

View File

@ -14,7 +14,7 @@ font = cv2.FONT_HERSHEY_SIMPLEX
font_scale = 0.5
# video_path = "inference/test.mp4"
video_path = "video3.mp4"
video_path = "video/videonya.mp4"
model_path = "models/yolov8n.pt"
cap = cv2.VideoCapture(video_path) #videoyu okumak için

333
main3.py Normal file
View File

@ -0,0 +1,333 @@
import numpy as np
import cv2
import pandas as pd
cap = cv2.VideoCapture('video/video.mp4')
frames_count, fps, width, height = cap.get(cv2.CAP_PROP_FRAME_COUNT), cap.get(cv2.CAP_PROP_FPS), cap.get(
cv2.CAP_PROP_FRAME_WIDTH), cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
width = int(width)
height = int(height)
print(frames_count, fps, width, height)
# membuat data frame pandas dengan jumlah baris sama dengan jumlah frame
df = pd.DataFrame(index=range(int(frames_count)))
df.index.name = "Frame" # frame dalam bahasa indonesia
framenumber = 0 # mencatat frame saat ini
carscrossedup = 0 # mencatat mobil yang melintasi atas
carscrosseddown = 0 # mencatat mobil yang melintasi bawah
carids = [] # list kosong untuk menambah id mobil
caridscrossed = [] # list kosong untuk menambah id mobil yang telah melintasi
totalcars = 0 # mencatat total mobil
fgbg = cv2.createBackgroundSubtractorMOG2() # membuat subtractor latar belakang MOG2
# informasi untuk memulai menyimpan file video
ret, frame = cap.read() # impor gambar
ratio = .5 # rasio pengubah ukuran
image = cv2.resize(frame, (0, 0), None, ratio, ratio) # ubah ukuran gambar
width2, height2, channels = image.shape
# video = cv2.VideoWriter('penghitung_kendaraan.avi', cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), fps, (height2, width2), 1)
while True:
ret, frame = cap.read() # impor gambar
if ret: # jika ada frame lanjutkan kode
image = cv2.resize(frame, (0, 0), None, ratio, ratio) # ubah ukuran gambar
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # konversi gambar ke warna abu-abu
fgmask = fgbg.apply(gray) # menggunakan pengurangan latar belakang MOG2
# menerapkan tingkat kesulitan pada fgmask untuk mencoba mengisolasi mobil
# perlu mencoba berbagai pengaturan hingga mobil mudah diidentifikasi
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)) # membuat kernel untuk operasi morfologi
closing = cv2.morphologyEx(fgmask, cv2.MORPH_CLOSE, kernel)
opening = cv2.morphologyEx(closing, cv2.MORPH_OPEN, kernel)
dilation = cv2.dilate(opening, kernel)
retvalbin, bins = cv2.threshold(dilation, 220, 255, cv2.THRESH_BINARY) # menghapus shadow
# membuat kontur
contours, hierarchy = cv2.findContours(bins, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2:]
# menggunakan konveks hull untuk membuat poligon kait dengan kontur
hull = [cv2.convexHull(c) for c in contours]
# menggambar kontur
cv2.drawContours(image, hull, -1, (0, 255, 0), 3)
# garis dibuat untuk menghentikan penghitungan kontur, diperlukan karena mobil jauh menjadi kontur satu
lineypos = 100
cv2.line(image, (0, lineypos), (width, lineypos), (255, 0, 0), 5)
# garis y posisi dibuat untuk menghitung kontur
lineypos2 = 125
cv2.line(image, (0, lineypos2), (width, lineypos2), (0, 255, 0), 5)
# area minimal untuk kontur agar tidak dihitung sebagai rumit
minarea = 400
# area maksimal untuk kontur, dapat cukup besar untuk bus
maxarea = 40000
# vektor untuk x dan y lokasi tengah kontur dalam frame saat ini
cxx = np.zeros(len(contours))
cyy = np.zeros(len(contours))
for i in range(len(contours)): # melakukan iterasi pada semua kontur dalam frame saat ini
# menggunakan hierarki untuk hanya menghitung kontur induk (kontur yang tidak berada dalam kontur lain)
if hierarchy[0, i, 3] == -1:
area = cv2.contourArea(contours[i]) # menghitung luas kontur
if minarea < area < maxarea: # menggunakan area sebagai garis pembatas untuk kontur
# menghitung centroid dari kontur
cnt = contours[i]
M = cv2.moments(cnt)
cx = int(M['m10'] / M['m00'])
cy = int(M['m01'] / M['m00'])
if cy > lineypos: # menghapus kontur yang berada di atas garis (y dimulai dari atas)
# mengambil titik koordinat untuk membuat kotak lingkaran
x, y, w, h = cv2.boundingRect(cnt)
# membuat kotak lingkaran dari kontur
cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2)
# Menambahkan teks centroid untuk memverifikasi pada tahap selanjutnya
cv2.putText(image, str(cx) + "," + str(cy), (cx + 10, cy + 10), cv2.FONT_HERSHEY_SIMPLEX,
0.3, (0, 0, 255), 1)
cv2.drawMarker(image, (cx, cy), (0, 0, 255), cv2.MARKER_STAR, markerSize=5, thickness=1,
line_type=cv2.LINE_AA)
# menambahkan centroid yang telah memenuhi kriteria ke dalam list centroid
cxx[i] = cx
cyy[i] = cy
# menghapus nol dalam vector centroid yang tidak dihitung (centroid yang tidak dikirim ke dataframe)
cxx = cxx[cxx != 0]
cyy = cyy[cyy != 0]
# list kosong untuk nanti mencatat indeks centroid yang dikirim ke dataframe
minx_index2 = []
miny_index2 = []
# jumlah maksimum yang diizinkan untuk centroid dalam frame saat ini untuk dikaitkan dengan centroid dari frame sebelumnya
maxrad = 25
# bagian berikut mengelola centroid dan mengasignasinya ke id mobil lama atau id mobil baru
# jika terdapat centroid dalam area yang ditentukan
if len(cxx): # jika ada centroid dalam area yang ditentukan
if not carids: # jika daftar carids kosong
for i in range(len(cxx)): # melakukan loop sebanyak centroid yang ada
carids.append(i) # menambahkan id mobil ke dalam daftar kosong
df[str(carids[i])] = "" # menambahkan kolom ke dalam dataframe berdasarkan id mobil
# mengisi nilai centroid pada frame saat ini dan id mobil yang sesuai
df.at[int(framenumber), str(carids[i])] = [cxx[i], cyy[i]]
totalcars = carids[i] + 1 # menambahkan 1 pada jumlah mobil
else: # jika sudah ada id mobil
dx = np.zeros((len(cxx), len(carids))) # array untuk menghitung deltanya
dy = np.zeros((len(cyy), len(carids))) # array untuk menghitung deltanya
for i in range(len(cxx)): # melakukan loop sebanyak centroid yang ada
for j in range(len(carids)): # melakukan loop sebanyak id mobil yang ada
# mengambil centroid dari frame sebelumnya untuk id mobil tertentu
oldcxcy = df.iloc[int(framenumber - 1)][str(carids[j])]
# mengambil centroid dari frame sekarang yang tidak selalu sesuai dengan centroid dari frame sebelumnya
curcxcy = np.array([cxx[i], cyy[i]])
if not oldcxcy: # jika centroid dari frame sebelumnya kosong karena mobil keluar layar
continue # lanjutkan ke id mobil selanjutnya
else: # hitung deltanya untuk dibandingkan dengan centroid dari frame sekarang
dx[i, j] = oldcxcy[0] - curcxcy[0]
dy[i, j] = oldcxcy[1] - curcxcy[1]
for j in range(len(carids)): # melakukan loop sebanyak id mobil yang ada
jumlahjumlah = np.abs(dx[:, j]) + np.abs(dy[:, j]) # menghitung jumlah delta wrt id mobil tertentu
# mencari indeks id mobil yang memiliki nilai minimum dan ini indeks yang tepat
indeksindextrue = np.argmin(np.abs(jumlahjumlah))
minx_index = indeksindextrue
miny_index = indeksindextrue
# mengambil nilai delta untuk id mobil yang dipilih
deltadeltadx = dx[minx_index, j]
deltadeltady = dy[miny_index, j]
if deltadeltadx == 0 and deltadeltady == 0 and np.all(dx[:, j] == 0) and np.all(dy[:, j] == 0):
# periksa apakah nilai minimum adalah 0 dan periksa apakah semua delta adalah nol karena ini adalah kumpulan kosong
# delta dapat berupa nol jika centroid tidak berpindah
continue # lanjutkan ke id mobil selanjutnya
else:
# jika nilai delta kurang dari radius maksimum maka tambahkan centroid ke id mobil yang sesuai
if np.abs(deltadeltadx) < maxrad and np.abs(deltadeltady) < maxrad:
# menambahkan centroid ke id mobil yang sudah ada
df.at[int(framenumber), str(carids[j])] = [cxx[minx_index], cyy[miny_index]]
minx_index2.append(minx_index) # menambahkan indeks centroid yang sudah ditambahkan ke id mobil lain
miny_index2.append(miny_index)
for i in range(len(cxx)): # melakukan loop sebanyak centroid yang ada
# jika centroid tidak ada dalam list minindex maka mobil baru perlu ditambahkan
if i not in minx_index2 and miny_index2:
df[str(totalcars)] = "" # membuat kolom baru untuk mobil baru yang tercatat
totalcars = totalcars + 1 # menambahkan jumlah mobil yang tercatat
t = totalcars - 1 # t adalah placeholder untuk jumlah mobil
carids.append(t) # menambahkan id mobil ke list id mobil
df.at[int(framenumber), str(t)] = [cxx[i], cyy[i]] # menambahkan centroid ke mobil yang sudah ada
elif curcxcy[0] and not oldcxcy and not minx_index2 and not miny_index2:
# jika centroid saat ini ada namun centroid sebelumnya tidak ada
# mobil baru perlu ditambahkan jika minindex2 kosong
df[str(totalcars)] = "" # membuat kolom baru untuk mobil baru yang tercatat
totalcars = totalcars + 1 # menambahkan jumlah mobil yang tercatat
t = totalcars - 1 # t adalah placeholder untuk jumlah mobil
carids.append(t) # menambahkan id mobil ke list id mobil
df.at[int(framenumber), str(t)] = [cxx[i], cyy[i]] # menambahkan centroid ke mobil yang sudah ada
# Bagian di bawah menglabel centroid yang ada di layar
currentcars = 0 # mobil yang ada di layar
currentcarsindex = [] # indeks id mobil yang ada di layar
for i in range(len(carids)): # melakukan loops sebanyak jumlah id mobil
# memeriksa frame saat ini untuk mengetahui id mobil yang sedang aktif
# dengan memeriksa adanya centroid pada frame saat ini untuk id mobil tertentu
if df.at[int(framenumber), str(carids[i])] != '':
currentcars = currentcars + 1 # menambahkan mobil yang ada di layar
currentcarsindex.append(i) # menambahkan id mobil yang ada di layar
for i in range(currentcars): # melakukan loops sebanyak jumlah mobil yang ada di layar
# mengambil centroid untuk id mobil tertentu pada frame saat ini
curcent = df.iloc[int(framenumber)][str(carids[currentcarsindex[i]])]
# mengambil centroid untuk id mobil tertentu pada frame sebelumnya
oldcent = df.iloc[int(framenumber - 1)][str(carids[currentcarsindex[i]])]
if curcent: # jika ada centroid pada frame saat ini
# Teks di layar untuk centroid saat ini
cv2.putText(image, "Centroid" + str(curcent[0]) + "," + str(curcent[1]),
(int(curcent[0]), int(curcent[1])), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 255, 255), 2)
cv2.putText(image, "ID:" + str(carids[currentcarsindex[i]]), (int(curcent[0]), int(curcent[1] - 15)),
cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 255, 255), 2)
cv2.drawMarker(image, (int(curcent[0]), int(curcent[1])), (0, 0, 255), cv2.MARKER_STAR, markerSize=5,
thickness=1, line_type=cv2.LINE_AA)
# Periksa apakah centroid lama ada
# Tambahkan kotak radius dari centroid lama ke centroid saat ini untuk visualisasi
if oldcent:
xmulai = oldcent[0] - maxrad
ymulai = oldcent[1] - maxrad
xakhir = oldcent[0] + maxrad
yakhir = oldcent[1] + maxrad
cv2.rectangle(image, (int(xmulai), int(ymulai)), (int(xakhir), int(yakhir)), (0, 125, 0), 1)
# Periksa apakah centroid lama di bawah garis dan centroid baru di atas garis
# Untuk menghitung mobil dan memastikan mobil tidak dihitung dua kali
if oldcent[1] >= lineypos2 and curcent[1] <= lineypos2 and carids[
currentcarsindex[i]] not in caridscrossed:
carscrossedup = carscrossedup + 1
cv2.line(image, (0, lineypos2), (width, lineypos2), (0, 0, 255), 5)
caridscrossed.append(
currentcarsindex[i]) # Tambahkan id mobil ke daftar mobil yang dihitung untuk mencegah penghitungan dua kali
# Periksa apakah centroid lama di atas garis dan centroid baru di bawah garis
# Untuk menghitung mobil dan memastikan mobil tidak dihitung dua kali
elif oldcent[1] <= lineypos2 and curcent[1] >= lineypos2 and carids[
currentcarsindex[i]] not in caridscrossed:
carscrosseddown = carscrosseddown + 1
cv2.line(image, (0, lineypos2), (width, lineypos2), (0, 0, 125), 5)
caridscrossed.append(currentcarsindex[i])
# menampilkan jumlah mobil yang melintasi atas
cv2.putText(image, "Mobil yang Melintasi Atas: " + str(carscrossedup), (0, 15), cv2.FONT_HERSHEY_SIMPLEX, .5, (255, 255, 255),
1)
# menampilkan jumlah mobil yang melintasi bawah
cv2.putText(image, "Mobil yang Melintasi Bawah: " + str(carscrosseddown), (0, 30), cv2.FONT_HERSHEY_SIMPLEX, .5,
(255, 255, 255), 1)
# # menampilkan jumlah total mobil yang terdeteksi
# cv2.putText(image, "Total Mobil yang Terdeteksi: " + str(len(carids)), (0, 60), cv2.FONT_HERSHEY_SIMPLEX, .5,
# (255, 255, 255), 1)
# menampilkan frame saat ini dan total frame
cv2.putText(image, "Frame: " + str(framenumber) + ' dari ' + str(frames_count), (0, 45), cv2.FONT_HERSHEY_SIMPLEX,
.5, (255, 255, 255), 1)
# menampilkan waktu yang sudah berlalu dan total waktu
cv2.putText(image, 'Waktu: ' + str(round(framenumber / fps, 2)) + ' detik dari ' + str(round(frames_count / fps, 2))
+ ' detik', (0, 60), cv2.FONT_HERSHEY_SIMPLEX, .5, (255, 255, 255), 1)
# menampilkan images dan transformasi
cv2.imshow("Output", image)
cv2.moveWindow("Output", 0, 0)
cv2.imshow("gray", gray)
cv2.moveWindow("gray", int(width * ratio), 0)
cv2.imshow("closing", closing)
cv2.moveWindow("closing", width, 0)
# cv2.imshow("opening", opening)
# cv2.moveWindow("opening", 0, int(height * ratio))
# cv2.imshow("dilation", dilation)
# cv2.moveWindow("dilation", int(width * ratio), int(height * ratio))
# cv2.imshow("binary", bins)
# cv2.moveWindow("binary", width, int(height * ratio))
# adds to framecount
framenumber = framenumber + 1
# Menunggu key dari user dalam milidetik, fps adalah frame per detik, dan 0xff adalah binary
# bahasa indonesia: Menunggu key dari user dalam milidetik
k = cv2.waitKey(int(1000/fps)) & 0xff
if k == 27: # bahasa indonesia: Jika key nya adalah 27 (ESC) maka break loop
break
else: # bahasa indonesia: Jika video selesai maka break loop
break
cap.release()
cv2.destroyAllWindows()

View File

@ -108,11 +108,11 @@
{% endfor %}
</select>
</div>
<div class="form-group">
<!-- <div class="form-group">
<label for="video">Threshold</label>
<input type="text" class="form-control" id="threshold" name="threshold"
value="{{ threshold }}">
</div>
</div> -->
<div class="form-group text-center">
<button type="button" class="btn btn-primary" onclick="olah_video()">Proses
Video</button>
@ -224,7 +224,8 @@
<script>
function olah_video() {
var video = document.getElementById('video').value;
var threshold = document.getElementById('threshold').value;
// var threshold = document.getElementById('threshold').value;
var threshold = 450;
window.location.href = '/?video=' + video + '&threshold=' + threshold;
}