Skip to content
Advertisement

DNN OpenCV Python using RSTP always crash after few minutes

Description:

I want to create a people counter using DNN. The model I’m using is MobileNetSSD. The camera I use is IPCam from Hikvision. Python communicates with IPCam using the RSTP protocol.

The program that I made is good and there are no bugs, when running the sample video the program does its job well. But when I replaced it with IPcam there was an unknown error.

Error:

Sometimes the error is:

[h264 @ 000001949f7adfc0] error while decoding MB 13 4, bytestream -6
[h264 @ 000001949f825ac0] left block unavailable for requested intra4x4 mode -1
[h264 @ 000001949f825ac0] error while decoding MB 0 17, bytestream 762

Sometimes the error does not appear and the program is killed.


Update Error

After revising the code, I caught the error. The error found is

[h264 @ 0000019289b3fa80] error while decoding MB 4 5, bytestream -25

Now I don’t know what to do, because the error is not in Google.

Source Code:

Old Code

This is my very earliest code before getting suggestions from the comments field.

import time
import cv2
import numpy as np
import math
import threading

print("Load MobileNeteSSD model")

prototxt = "MobileNetSSD_deploy.prototxt"
model = "MobileNetSSD_deploy.caffemodel"

CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
           "bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
           "dog", "horse", "motorbike", "person", "pottedplant", "sheep",
           "sofa", "train", "tvmonitor"]

net = cv2.dnn.readNetFromCaffe(prototxt, model)

pos_line = 0
offset = 50
car = 0
detected = False
check = 0
prev_frame_time = 0


def detect():
    global check, car, detected
    check = 0
    if(detected == False):
        car += 1
        detected = True


def center_object(x, y, w, h):
    cx = x + int(w / 2)
    cy = y + int(h / 2)
    return cx, cy


def process_frame_MobileNetSSD(next_frame):
    global car, check, detected

    rgb = cv2.cvtColor(next_frame, cv2.COLOR_BGR2RGB)
    (H, W) = next_frame.shape[:2]

    blob = cv2.dnn.blobFromImage(next_frame, size=(300, 300), ddepth=cv2.CV_8U)
    net.setInput(blob, scalefactor=1.0/127.5, mean=[127.5, 127.5, 127.5])
    detections = net.forward()

    for i in np.arange(0, detections.shape[2]):
        confidence = detections[0, 0, i, 2]

        if confidence > 0.5:

            idx = int(detections[0, 0, i, 1])
            if CLASSES[idx] != "person":
                continue

            label = CLASSES[idx]

            box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
            (startX, startY, endX, endY) = box.astype("int")

            center_ob = center_object(startX, startY, endX-startX, endY-startY)
            cv2.circle(next_frame, center_ob, 4, (0, 0, 255), -1)

            if center_ob[0] < (pos_line+offset) and center_ob[0] > (pos_line-offset):
                # car+=1
                detect()

            else:
                check += 1
                if(check >= 5):
                    detected = False

            cv2.putText(next_frame, label+' '+str(round(confidence, 2)),
                        (startX, startY-10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
            cv2.rectangle(next_frame, (startX, startY),
                          (endX, endY), (0, 255, 0), 3)

    return next_frame


def PersonDetection_UsingMobileNetSSD():
    cap = cv2.VideoCapture()
    cap.open("rtsp://admin:Admin12345@192.168.100.20:554/Streaming/channels/2/")

    global car,pos_line,prev_frame_time

    frame_count = 0

    while True:
        try:
            time.sleep(0.1)
            new_frame_time = time.time()
            fps = int(1/(new_frame_time-prev_frame_time))
            prev_frame_time = new_frame_time

            ret, next_frame = cap.read()
            w_video = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
            h_video = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
            pos_line = int(h_video/2)-50

            if ret == False: break

            frame_count += 1
            cv2.line(next_frame, (int(h_video/2), 0),
                     (int(h_video/2), int(h_video)), (255, 127, 0), 3)
            next_frame = process_frame_MobileNetSSD(next_frame)

            cv2.rectangle(next_frame, (248,22), (342,8), (0,0,0), -1)
            cv2.putText(next_frame, "Counter : "+str(car), (250, 20),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
            cv2.putText(next_frame, "FPS : "+str(fps), (0, int(h_video)-10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
            cv2.imshow("Video Original", next_frame)
            # print(car)

        except Exception as e:
            print(str(e))

        if cv2.waitKey(1) & 0xFF == ord('q'): 
            break


    print("/MobileNetSSD Person Detector")


    cap.release()
    cv2.destroyAllWindows()

if __name__ == "__main__":
    t1 = threading.Thread(PersonDetection_UsingMobileNetSSD())
    t1.start()

New Code

I have revised my code and the program still stops taking frames. I just revised the PersonDetection_UsingMobileNetSSD() function. I’ve also removed the multithreading I was using. The code has been running for about 30 minutes but after a broken frame, the code will never re-execute the program block if ret == True.

def PersonDetection_UsingMobileNetSSD():
    cap = cv2.VideoCapture()
    cap.open("rtsp://admin:Admin12345@192.168.100.20:554/Streaming/channels/2/")

    global car,pos_line,prev_frame_time

    frame_count = 0

    while True:
        try:
            if cap.isOpened():
                ret, next_frame = cap.read()
                if ret:
                    new_frame_time = time.time()
                    fps = int(1/(new_frame_time-prev_frame_time))
                    prev_frame_time = new_frame_time
                    w_video = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
                    h_video = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
                    pos_line = int(h_video/2)-50

                    # next_frame = cv2.resize(next_frame,(720,480),fx=0,fy=0, interpolation = cv2.INTER_CUBIC)

                    if ret == False: break

                    frame_count += 1
                    cv2.line(next_frame, (int(h_video/2), 0),
                            (int(h_video/2), int(h_video)), (255, 127, 0), 3)
                    next_frame = process_frame_MobileNetSSD(next_frame)

                    cv2.rectangle(next_frame, (248,22), (342,8), (0,0,0), -1)
                    cv2.putText(next_frame, "Counter : "+str(car), (250, 20),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
                    cv2.putText(next_frame, "FPS : "+str(fps), (0, int(h_video)-10),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
                    cv2.imshow("Video Original", next_frame)
                    # print(car)
                else:
                    print("Crashed Frame")
            else:
                print("Cap is not open")

        except Exception as e:
            print(str(e))

        if cv2.waitKey(1) & 0xFF == ord('q'): 
            break


    print("/MobileNetSSD Person Detector")


    cap.release()
    cv2.destroyAllWindows()

Requirement:

Hardware : Intel i5-1035G1, RAM 8 GB, NVIDIA GeForce MX330

Software : Python 3.6.2 , OpenCV 4.5.1, Numpy 1.16.0

Question:

  1. What should i do for fixing this error?
  2. What causes this to happen?

Best Regards,


Thanks

Advertisement

Answer

The main problem here is that RSTP always has some corrupted frames in it. The solution is to run video capture on thread 1 and video processing on thread 2.

As an example:

import cv2
import threading
import queue

q=queue.Queue()

def this_receive(q):
    cap = cv2.VideoCapture("rtsp://admin:Admin12345@192.168.10.20:554/Streaming/channels/2/")
    cap.set(cv2.CAP_PROP_FPS, 5)
    ret, next_frame = cap.read()
    q.put(next_frame)
    while ret:
        ret, next_frame = cap.read()
        w_video = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
        h_video = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
        q.put(next_frame)

def main_program(q):
    while True:
        try:
            if q.empty() != True:
                next_frame=q.get()
        except Exception as e:
            print(str(e))

        if cv2.waitKey(1) & 0xFF == ord('q'): 
            break

if __name__ == "__main__":
    print("Main Program")
    p2 = threading.Thread(target=this_receive,args=((q),))
    p2.start()
    p1 = threading.Thread(target=main_program,args=((q),))
    p1.start()

This example will work according to the case you are experiencing. Damage to the frame will not affect the quality of data processing. It’s just that this method can cause delays in processing. Time on video and real time have a delay of up to 10 minutes. Want to know what kind of delay? Just try it!

User contributions licensed under: CC BY-SA
2 People found this is helpful
Advertisement