How to capture multiple camera streams with OpenCV?

I have to stitch the images captured from many (9) cameras. Initially, I tried to capture the frames from 2 cameras with rate 15 FPS. Then, I connected 4 cameras (I also used externally powered USB hub to provide enough power) but I could only see only one stream.

For testing, I used the following script:

import numpy as np
import cv2
import imutils

index = 0
arr = []
while True:
    cap = cv2.VideoCapture(index)

    if not cap.read()[0]:
        break
    else:
        arr.append(index)
    cap.release()
    index += 1

video_captures = [cv2.VideoCapture(idx) for idx in arr]

while True:
    # Capture frame-by-frame
    frames = []
    frames_preview = []

    for i in arr:
        # skip webcam capture
        if i == 1: continue
        ret, frame = video_captures[i].read()
        if ret:
            frames.append(frame)
            small = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
            frames_preview.append(small)

    for i, frame in enumerate(frames_preview):
        cv2.imshow('Cam {}'.format(i), frame)


    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

# When everything is done, release the capture
for video_capture in video_captures:
    video_capture.release()
cv2.destroyAllWindows()

Is there any limit for the number of cameras? Does anyone know what is the right way to capture frames from multiple cameras?


Solution 1:

enter image description here

To capture multiple streams with OpenCV, I recommend using threading which can improve performance by alleviating the heavy I/O operations to a separate thread. Since accessing the webcam/IP/RTSP stream using cv2.VideoCapture().read() is a blocking operation, our main program is stuck until the frame is read from the camera device. If you have multiple streams, this latency will definitely be visible. To remedy this problem, we can use threading to spawn another thread to handle retrieving the frames using a deque in parallel instead of relying on a single thread to obtain the frames in sequential order. Threading allows frames to be continuously read without impacting the performance of our main program. The idea to capture a single stream using threading and OpenCV, is from a previous answer in Python OpenCV multithreading streaming from camera.

But if you want to capture multiple streams, OpenCV alone is not enough. You can use OpenCV in combination with a GUI framework to stitch each image onto a nice display. I will use PyQt4 as the framework, qdarkstyle for GUI CSS, and imutils for OpenCV convenience functions.


Here is a very stripped down version of the camera GUI I currently use without the placeholder images, credential admin login page, and camera switching ability. I've kept the automatic camera reconnect feature incase the internet dies or the camera connection is lost. I only have 8 cameras as shown in the image above, but it is very simple to add in another camera and should not impact performance. This camera GUI currently performs at about ~60 FPS so it is real-time. You can easily rearrange the layout using PyQt layouts so feel free to modify the code! Remember to change the stream links!

from PyQt4 import QtCore, QtGui
import qdarkstyle
from threading import Thread
from collections import deque
from datetime import datetime
import time
import sys
import cv2
import imutils

class CameraWidget(QtGui.QWidget):
    """Independent camera feed
    Uses threading to grab IP camera frames in the background

    @param width - Width of the video frame
    @param height - Height of the video frame
    @param stream_link - IP/RTSP/Webcam link
    @param aspect_ratio - Whether to maintain frame aspect ratio or force into fraame
    """

    def __init__(self, width, height, stream_link=0, aspect_ratio=False, parent=None, deque_size=1):
        super(CameraWidget, self).__init__(parent)

        # Initialize deque used to store frames read from the stream
        self.deque = deque(maxlen=deque_size)

        # Slight offset is needed since PyQt layouts have a built in padding
        # So add offset to counter the padding 
        self.offset = 16
        self.screen_width = width - self.offset
        self.screen_height = height - self.offset
        self.maintain_aspect_ratio = aspect_ratio

        self.camera_stream_link = stream_link

        # Flag to check if camera is valid/working
        self.online = False
        self.capture = None
        self.video_frame = QtGui.QLabel()

        self.load_network_stream()

        # Start background frame grabbing
        self.get_frame_thread = Thread(target=self.get_frame, args=())
        self.get_frame_thread.daemon = True
        self.get_frame_thread.start()

        # Periodically set video frame to display
        self.timer = QtCore.QTimer()
        self.timer.timeout.connect(self.set_frame)
        self.timer.start(.5)

        print('Started camera: {}'.format(self.camera_stream_link))

    def load_network_stream(self):
        """Verifies stream link and open new stream if valid"""

        def load_network_stream_thread():
            if self.verify_network_stream(self.camera_stream_link):
                self.capture = cv2.VideoCapture(self.camera_stream_link)
                self.online = True
        self.load_stream_thread = Thread(target=load_network_stream_thread, args=())
        self.load_stream_thread.daemon = True
        self.load_stream_thread.start()

    def verify_network_stream(self, link):
        """Attempts to receive a frame from given link"""

        cap = cv2.VideoCapture(link)
        if not cap.isOpened():
            return False
        cap.release()
        return True

    def get_frame(self):
        """Reads frame, resizes, and converts image to pixmap"""

        while True:
            try:
                if self.capture.isOpened() and self.online:
                    # Read next frame from stream and insert into deque
                    status, frame = self.capture.read()
                    if status:
                        self.deque.append(frame)
                    else:
                        self.capture.release()
                        self.online = False
                else:
                    # Attempt to reconnect
                    print('attempting to reconnect', self.camera_stream_link)
                    self.load_network_stream()
                    self.spin(2)
                self.spin(.001)
            except AttributeError:
                pass

    def spin(self, seconds):
        """Pause for set amount of seconds, replaces time.sleep so program doesnt stall"""

        time_end = time.time() + seconds
        while time.time() < time_end:
            QtGui.QApplication.processEvents()

    def set_frame(self):
        """Sets pixmap image to video frame"""

        if not self.online:
            self.spin(1)
            return

        if self.deque and self.online:
            # Grab latest frame
            frame = self.deque[-1]

            # Keep frame aspect ratio
            if self.maintain_aspect_ratio:
                self.frame = imutils.resize(frame, width=self.screen_width)
            # Force resize
            else:
                self.frame = cv2.resize(frame, (self.screen_width, self.screen_height))

            # Add timestamp to cameras
            cv2.rectangle(self.frame, (self.screen_width-190,0), (self.screen_width,50), color=(0,0,0), thickness=-1)
            cv2.putText(self.frame, datetime.now().strftime('%H:%M:%S'), (self.screen_width-185,37), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (255,255,255), lineType=cv2.LINE_AA)

            # Convert to pixmap and set to video frame
            self.img = QtGui.QImage(self.frame, self.frame.shape[1], self.frame.shape[0], QtGui.QImage.Format_RGB888).rgbSwapped()
            self.pix = QtGui.QPixmap.fromImage(self.img)
            self.video_frame.setPixmap(self.pix)

    def get_video_frame(self):
        return self.video_frame

def exit_application():
    """Exit program event handler"""

    sys.exit(1)

if __name__ == '__main__':

    # Create main application window
    app = QtGui.QApplication([])
    app.setStyleSheet(qdarkstyle.load_stylesheet_pyqt())
    app.setStyle(QtGui.QStyleFactory.create("Cleanlooks"))
    mw = QtGui.QMainWindow()
    mw.setWindowTitle('Camera GUI')
    mw.setWindowFlags(QtCore.Qt.FramelessWindowHint)

    cw = QtGui.QWidget()
    ml = QtGui.QGridLayout()
    cw.setLayout(ml)
    mw.setCentralWidget(cw)
    mw.showMaximized()

    # Dynamically determine screen width/height
    screen_width = QtGui.QApplication.desktop().screenGeometry().width()
    screen_height = QtGui.QApplication.desktop().screenGeometry().height()

    # Create Camera Widgets 
    username = 'Your camera username!'
    password = 'Your camera password!'

    # Stream links
    camera0 = 'rtsp://{}:{}@192.168.1.43:554/cam/realmonitor?channel=1&subtype=0'.format(username, password)
    camera1 = 'rtsp://{}:{}@192.168.1.45/axis-media/media.amp'.format(username, password)
    camera2 = 'rtsp://{}:{}@192.168.1.47:554/cam/realmonitor?channel=1&subtype=0'.format(username, password)
    camera3 = 'rtsp://{}:{}@192.168.1.40:554/cam/realmonitor?channel=1&subtype=0'.format(username, password)
    camera4 = 'rtsp://{}:{}@192.168.1.44:554/cam/realmonitor?channel=1&subtype=0'.format(username, password)
    camera5 = 'rtsp://{}:{}@192.168.1.42:554/cam/realmonitor?channel=1&subtype=0'.format(username, password)
    camera6 = 'rtsp://{}:{}@192.168.1.46:554/cam/realmonitor?channel=1&subtype=0'.format(username, password)
    camera7 = 'rtsp://{}:{}@192.168.1.41:554/cam/realmonitor?channel=1&subtype=0'.format(username, password)

    # Create camera widgets
    print('Creating Camera Widgets...')
    zero = CameraWidget(screen_width//3, screen_height//3, camera0)
    one = CameraWidget(screen_width//3, screen_height//3, camera1)
    two = CameraWidget(screen_width//3, screen_height//3, camera2)
    three = CameraWidget(screen_width//3, screen_height//3, camera3)
    four = CameraWidget(screen_width//3, screen_height//3, camera4)
    five = CameraWidget(screen_width//3, screen_height//3, camera5)
    six = CameraWidget(screen_width//3, screen_height//3, camera6)
    seven = CameraWidget(screen_width//3, screen_height//3, camera7)

    # Add widgets to layout
    print('Adding widgets to layout...')
    ml.addWidget(zero.get_video_frame(),0,0,1,1)
    ml.addWidget(one.get_video_frame(),0,1,1,1)
    ml.addWidget(two.get_video_frame(),0,2,1,1)
    ml.addWidget(three.get_video_frame(),1,0,1,1)
    ml.addWidget(four.get_video_frame(),1,1,1,1)
    ml.addWidget(five.get_video_frame(),1,2,1,1)
    ml.addWidget(six.get_video_frame(),2,0,1,1)
    ml.addWidget(seven.get_video_frame(),2,1,1,1)

    print('Verifying camera credentials...')

    mw.show()

    QtGui.QShortcut(QtGui.QKeySequence('Ctrl+Q'), mw, exit_application)

    if(sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
        QtGui.QApplication.instance().exec_()