Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

L515-Export images from recorded bag file, but each time the output images' quantities are different. How to resolve the problem? #11066

Closed
Ceciliammm opened this issue Nov 4, 2022 · 6 comments

Comments

@Ceciliammm
Copy link

image

My codes are shown below:

## License: Apache 2.0. See LICENSE file in root directory.
## Copyright(c) 2015-2017 Intel Corporation. All Rights Reserved.

###############################################
##      Open CV and Numpy integration        ##
###############################################

import pyrealsense2 as rs
import numpy as np
import cv2
import os
# import matplotlib.pyplot as plt
# from PIL import Image
from skimage.color import rgb2gray


# Configure depth and color streams
pipeline = rs.pipeline()
config = rs.config()
rs.config.enable_device_from_file(config, "./111.bag", repeat_playback=False)

# Get device product line for setting a supporting resolution
pipeline_wrapper = rs.pipeline_wrapper(pipeline)
pipeline_profile = config.resolve(pipeline_wrapper)
device = pipeline_profile.get_device()
device_product_line = str(device.get_info(rs.camera_info.product_line))

found_rgb = False
for s in device.sensors:
    if s.get_info(rs.camera_info.name) == 'RGB Camera':
        found_rgb = True
        break
if not found_rgb:
    print("The demo requires Depth camera with Color sensor")
    exit(0)

config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.infrared, 640, 480, rs.format.y8, 30)

if device_product_line == 'L500':
    config.enable_stream(rs.stream.color, 1280, 720, rs.format.rgb8, 30)
else:
    config.enable_stream(rs.stream.color, 640, 480, rs.format.rgb8, 30)

# Start streaming
pipeline.start(config)
i = 0
try:
    while True:

        # Wait for a coherent pair of frames: depth and color
        frames = pipeline.wait_for_frames()
        depth_frame = frames.get_depth_frame()
        color_frame = frames.get_color_frame()
        infrared_frame = frames.get_infrared_frame()
        if not depth_frame or not color_frame:
            continue

        # Convert images to numpy arrays
        depth_image = np.asanyarray(depth_frame.get_data())
        color_image = np.asanyarray(color_frame.get_data())
        color_image = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB)
        infrared_image = np.asanyarray(infrared_frame.get_data())

        # Apply colormap on depth image (image must be converted to 8-bit per pixel first)
        depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
        # depth_colormap = cv2.cvtColor(depth_colormap, cv2.COLOR_RGB2GRAY)
        # depth_colormap = depth_image

        depth_colormap_dim = depth_colormap.shape
        color_colormap_dim = color_image.shape

        # If depth and color resolutions are different, resize color image to match depth image for display
        if depth_colormap_dim != color_colormap_dim:
            resized_color_image = cv2.resize(color_image, dsize=(depth_colormap_dim[1], depth_colormap_dim[0]), interpolation=cv2.INTER_AREA)
            images = np.hstack((resized_color_image, depth_colormap))
        else:
            images = np.hstack((color_image, depth_colormap))

        # Show images
        cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
        cv2.imshow('RealSense', images)
        cv2.waitKey(1)

        # Save data
        base_path = "F:/NeRF_Project_dataset/test/"
        img_name = str(i) + '.jpg'
        os.makedirs(os.path.join(base_path, "Depth_orig"), exist_ok=True)
        os.makedirs(os.path.join(base_path, "RGB_orig"), exist_ok=True)
        os.makedirs(os.path.join(base_path, "Infrared_orig"), exist_ok=True)

        depth_path = os.path.join(base_path, "Depth_orig", img_name)
        RGB_path = os.path.join(base_path, "RGB_orig", img_name)
        infrared_path = os.path.join(base_path, "Infrared_orig", img_name)

        # np.savetxt('F:/NeRF_Project_dataset/test/depth_data.txt', depth_data)
        depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)

        depth_colormap = rgb2gray(depth_colormap)
        cv2.imwrite(depth_path, depth_colormap)
        if depth_colormap_dim != color_colormap_dim:
            resized_color_image = cv2.resize(color_image, dsize=(depth_colormap_dim[1], depth_colormap_dim[0]), interpolation=cv2.INTER_AREA)
            cv2.imwrite(RGB_path, resized_color_image)
        else:
            cv2.imwrite(RGB_path, color_image)
        cv2.imwrite(infrared_path, infrared_image)

        i = i+1

finally:

    # Stop streaming
    pipeline.stop()
@MartyG-RealSense
Copy link
Collaborator

MartyG-RealSense commented Nov 4, 2022

Hi @Ceciliammm Please try inserting the two lines below on the line after your pipeline.start(config) line to define set_real_time as 'false'. When accessing a bag file, this can make playback of frames more stable and reliable than when set_real_time is at its default setting of 'true'.

playback = profile.get_device().as_playback()
playback.set_real_time(False)

So the code should look like this:

pipeline.start(config)
playback = profile.get_device().as_playback()
playback.set_real_time(False)

@Ceciliammm
Copy link
Author

Hi @Ceciliammm Please try inserting the two lines below on the line after your pipeline.start(config) line to define set_real_time as 'false'. When accessing a bag file, this can make playback of frames more stable and reliable than when set_real_time is at its default setting of 'true'.

playback = profile.get_device().as_playback()
playback.set_real_time(False)

So the code should look like this:

pipeline.start(config)
playback = profile.get_device().as_playback()
playback.set_real_time(False)

Hi, @MartyG-RealSense , whether I put these two lines before or after pipeline.start(config), the images' quantity changes each time.
The following are my modified codes:

import pyrealsense2 as rs
import numpy as np
import cv2
import os
from skimage.color import rgb2gray
import time


# Configure depth and color streams
pipeline = rs.pipeline()
config = rs.config()
rs.config.enable_device_from_file(config, "./4.bag", repeat_playback=False)


# Get device product line for setting a supporting resolution
pipeline_wrapper = rs.pipeline_wrapper(pipeline)
pipeline_profile = config.resolve(pipeline_wrapper)
device = pipeline_profile.get_device().as_playback()
device.set_real_time(False)
device_product_line = str(device.get_info(rs.camera_info.product_line))

found_rgb = False
for s in device.sensors:
    if s.get_info(rs.camera_info.name) == 'RGB Camera':
        found_rgb = True
        break
if not found_rgb:
    print("The demo requires Depth camera with Color sensor")
    exit(0)

config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.infrared, 640, 480, rs.format.y8, 30)

if device_product_line == 'L500':
    config.enable_stream(rs.stream.color, 1280, 720, rs.format.rgb8, 30)
else:
    config.enable_stream(rs.stream.color, 640, 480, rs.format.rgb8, 30)

# Start streaming
pipeline.start(config)
device = pipeline_profile.get_device().as_playback()
device.set_real_time(False)
i = 0
try:
    while True:

        # Wait for a coherent pair of frames: depth and color
        frames = pipeline.wait_for_frames()
        depth_frame = frames.get_depth_frame()
        color_frame = frames.get_color_frame()
        infrared_frame = frames.get_infrared_frame()
        # G:\PycharmProject\NeRF_related\realsense_data_processing\librealsense-master\librealsense-master\wrappers\python\examples\opencv_pointcloud_viewer.py 287
        # depth_frame = decimate.process(depth_frame)
        # color_frame = decimate.process(color_frame)
        # infrared_frame = decimate.process(infrared_frame)
        if not depth_frame or not color_frame:
            continue

        # Convert images to numpy arrays
        depth_image = np.asanyarray(depth_frame.get_data())
        color_image = np.asanyarray(color_frame.get_data())
        color_image = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB)
        infrared_image = np.asanyarray(infrared_frame.get_data())

        # Apply colormap on depth image (image must be converted to 8-bit per pixel first)
        depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
        # depth_colormap = cv2.cvtColor(depth_colormap, cv2.COLOR_RGB2GRAY)
        # depth_colormap = depth_image

        depth_colormap_dim = depth_colormap.shape
        color_colormap_dim = color_image.shape

        # If depth and color resolutions are different, resize color image to match depth image for display
        if depth_colormap_dim != color_colormap_dim:
            resized_color_image = cv2.resize(color_image, dsize=(depth_colormap_dim[1], depth_colormap_dim[0]), interpolation=cv2.INTER_AREA)
            images = np.hstack((resized_color_image, depth_colormap))
        else:
            images = np.hstack((color_image, depth_colormap))

        # Show images
        cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
        cv2.imshow('RealSense', images)
        cv2.waitKey(1)

        # Save data
        base_path = "F:/NeRF_Project_dataset/test/"
        img_name = str(i) + '.jpg'
        os.makedirs(os.path.join(base_path, "Depth_orig"), exist_ok=True)
        os.makedirs(os.path.join(base_path, "RGB_orig"), exist_ok=True)
        os.makedirs(os.path.join(base_path, "Infrared_orig"), exist_ok=True)

        depth_path = os.path.join(base_path, "Depth_orig", img_name)
        RGB_path = os.path.join(base_path, "RGB_orig", img_name)
        infrared_path = os.path.join(base_path, "Infrared_orig", img_name)

        # np.savetxt('F:/NeRF_Project_dataset/test/depth_data.txt', depth_data)
        depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)

        depth_colormap = rgb2gray(depth_colormap)
        cv2.imwrite(depth_path, depth_colormap)
        if depth_colormap_dim != color_colormap_dim:
            resized_color_image = cv2.resize(color_image, dsize=(depth_colormap_dim[1], depth_colormap_dim[0]), interpolation=cv2.INTER_AREA)
            cv2.imwrite(RGB_path, resized_color_image)
        else:
            cv2.imwrite(RGB_path, color_image)
        cv2.imwrite(infrared_path, infrared_image)

        i = i+1

finally:

    # Stop streaming
    pipeline.stop()

@MartyG-RealSense
Copy link
Collaborator

It is common for the number of frames exported from a bag file to change from session to session. It occurs even on the RealSense SDK's official bag extraction tool rs-convert.

As you are using three stream types simultaneously (depth, infrared and color), you may achieve more stability if you increase the frame queue size value to add latency to the pipeline and so reduce the risk of frame drops. Code for doing so in Python can be found at #6448

@MartyG-RealSense
Copy link
Collaborator

Hi @Ceciliammm Do you require further assistance with this case, please? Thanks!

@Ceciliammm
Copy link
Author

Hi @Ceciliammm Do you require further assistance with this case, please? Thanks!

no, thank you very much!

@MartyG-RealSense
Copy link
Collaborator

You are very welcome. Thanks very much for the update! As you do not require further assistance, I will close this case. Thanks again!

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Projects
None yet
Development

No branches or pull requests

2 participants