RPI Display

Дистрибутив: ubuntu desktop 22.04 (20.04?)

import os
import time
import math
from collections import deque

import cv2
import numpy as np
from numba import njit, jit
from matplotlib import pyplot as plt
import matplotlib.style as mplstyle
mplstyle.use('fast')

X_SIZE = 1280
Y_SIZE = 800

file = '/dev/fb0'

fb = np.memmap(file, dtype=np.uint16, mode='w+', shape=(X_SIZE, Y_SIZE), order='C')

pixels_linear = np.zeros((X_SIZE*Y_SIZE), dtype=np.uint16)
pixels = np.zeros((X_SIZE, Y_SIZE))

@njit
def rgb_to_bits(rgb_linear, arr):
    for i, rgb in enumerate(rgb_linear):
        arr[i] = np.uint16( (rgb[0] // 8) << 11 | (rgb[1] // 4) << 5 | (rgb[2] // 8) << 0)


def flush_image(rgb_img):
    rgb_data_linear = rgb_img.reshape(X_SIZE*Y_SIZE, 3)
    rgb_to_bits(rgb_data_linear, pixels_linear)
    pixels[:] = pixels_linear.reshape(X_SIZE, Y_SIZE)
    fb[:] = pixels[:]
    fb.flush()


def fig_to_np(figure):
    return np.array(figure.canvas.buffer_rgba(), dtype=np.uint8)[:,:,(0,1,2)]


fig, ax = plt.subplots(figsize=(16,10), dpi=80)


cap = cv2.VideoCapture('robo-eye.mp4')
if (cap.isOpened()== False): 
    print("Error opening video stream or file")
fps = cap.get(cv2.CAP_PROP_FPS)
delay = 1 / fps


def draw(ax_, figure, xs_, ys_):
    ax_.clear()
    ax_.plot(xs_, ys_, antialiased=True)
    figure.canvas.draw()


xs = deque(maxlen=30)
ys = deque(maxlen=30)

while True:
    #t = time.time()
    #xs.append(t)
    #ys.append(math.sin(t))
    #draw(ax, fig, xs, ys)
    #flush_image(fig_to_np(fig))
    ret, frame = cap.read()
    if ret == True:
        cv_img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        resize = cv2.resize(cv_img, (X_SIZE, Y_SIZE))
        flush_image(resize)
    else:
        cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
    time.sleep(delay)

sudo service gdm stop

sudo systemctl set-default multi-user.target
update-rc.d gdm remove
systemctl disable gdm

sudo apt install grub2-common

GRUB_CMDLINE_LINUX_DEFAULT=""
GRUB_TERMINAL=console

sudo update-grub

sudo systemctl edit getty@service

[Service]
ExecStart=
ExecStart=/sbin/agetty -a rpi-free --noclear %I $TERM

crontab -e @reboot echo 0 > /sys/class/graphics/fbcon/cursor_blink

if "Permission denied" execute:

chmod a+w /sys/class/graphics/fbcon/cursor_blink


To get info about framebuffer use: fbset -fb /dev/fb0


Another example of output pictures, colors and video (Ubuntu Server 20.04)


import numpy as np
import cv2
import time
# Map the screen as Numpy array
# N.B. Numpy stores in format HEIGHT then WIDTH, not WIDTH then HEIGHT!
# c is the number of channels, 4 because BGRA
h, w, c = 800, 1280, 4 # Height, width, count on color-channels (Blue, Green, Red, Alpha), to check use: fbset -fb /dev/fb0
fb = np.memmap('/dev/fb0', dtype='uint8',mode='w+', shape=(h,w,c)) 

# Fill entire screen with blue - takes 29 ms on Raspi 4
fb[:] = [255,0,0, 255]
time.sleep(0.5)

# Fill top half with red - takes 15 ms on Raspi 4
fb[:h//2] = [0,0,255, 255]
time.sleep(0.5)

# Fill bottom right quarter with green - takes 7 ms on Raspi 4
fb[h//2:, w//2:] = [0,255,0, 255]
time.sleep(0.5)

# Show image with resolution 500x500
fb[:500, :500] = cv2.cvtColor(cv2.imread('pic_500x500.jpg'), cv2.COLOR_BGR2BGRA)
time.sleep(0.5)

# Show image with same framebuffer
fb[:] = cv2.cvtColor(cv2.imread('pic.jpg'), cv2.COLOR_BGR2BGRA)
time.sleep(0.5)

# Play video, ATTENTION: resolution of video must be equal resolution of framebuffer - or use: cv2.resize(img, (h, w))
cap = cv2.VideoCapture('video_1280x800.mp4')
if (cap.isOpened()== False): 
    print("Error opening video stream or file")
fps = cap.get(cv2.CAP_PROP_FPS)
delay = 1 / fps

while True:
    ret, frame = cap.read()
    if ret == True:
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2BGRA)
        fb[:] = frame[:]
    else:
        cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
    time.sleep(delay)

Example output video, pics. Ubuntu Desktop 20.04

import cv2
import numpy as np

file_name = "video_1280x800.mp4"
window_name = "windowyy"
interframe_wait_ms = 30
cv2.namedWindow(window_name, cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
cv2.moveWindow(window_name, 1920,0)

cap = cv2.VideoCapture(file_name)
if not cap.isOpened():
    print("Error: Could not open video.")
    exit()

while (True):
    ret, frame = cap.read()
    if not ret:
        print("Reached end of video, exiting.")
        break
    if cv2.waitKey(interframe_wait_ms) & 0x7F == ord('q'):
        print("Exit requested.")
        break
    cv2.imshow(window_name, frame)

cap.release()



img = cv2.imread('img.jpg')
# Show the image
cv2.imshow(window_name, img)
cv2.waitKey(2000)



cap = cv2.VideoCapture('video.mp4')
if not cap.isOpened():
    print("Error: Could not open video.")
    exit()

while (True):
    ret, frame = cap.read()
    if not ret:
        print("Reached end of video, exiting.")
        break
    if cv2.waitKey(interframe_wait_ms) & 0x7F == ord('q'):
        print("Exit requested.")
        break
    cv2.imshow(window_name, frame)

cv2.waitKey(2000)
cap.release()


cv2.destroyAllWindows()

Last updated