Skip to content

Commit

Permalink
Update export to limit the points inside the image. Closes #430
Browse files Browse the repository at this point in the history
  • Loading branch information
dari1495 committed Jan 4, 2021
1 parent 73485b5 commit 8978522
Show file tree
Hide file tree
Showing 2 changed files with 223 additions and 36 deletions.
135 changes: 99 additions & 36 deletions src/python/logic/datasetService.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from werkzeug.utils import secure_filename
import zipfile
import numpy as np
import cv2

from python.infrastructure.datasetManager import DatasetManager
from python.infrastructure.videoManager import VideoManager
Expand All @@ -20,6 +21,8 @@
from python.objects.annotation import Annotation
from python.objects.video import Video

from random import randrange

# DatasetService logger
log = logging.getLogger('datasetService')

Expand Down Expand Up @@ -471,10 +474,43 @@ def create_missing_params_pt(self, obj):
obj["keypoints"] = []
return obj

def check_limits_kps(self, kps, width, height, person=False):
kps2 = list(kps)
jump = 3 if person else 2
# Check kps for width
for i in range(0, len(kps), jump):
kps2[i] = max(min(width - 1, kps[i]), 0)
if kps[i] < 0:
print("for", i, "og: ", kps[i], " --> ", kps2[i])
# Check kps for height
for i in range(1, len(kps), jump):
kps2[i] = max(min(height - 1, kps[i]), 0)
if kps[i] < 0:
print("for", i, "og: ", kps[i], " --> ", kps2[i])
return kps2

def check_limits_ir(self, kps_y, kps_x, width, height):
kps2_x, kps2_y = list(kps_x), list(kps_y)
print("check")
print(kps_x)
if kps2_x != [] and isinstance(kps2_x[0], list):
for i in range(len(kps_x)):
for k in range(len(kps_x[i])):
kps2_x[i][k] = max(min(width - 1, kps_x[i][k]), 0)
kps2_y[i][k] = max(min(height - 1, kps_y[i][k]), 0)
elif kps2_x:
for i in range(len(kps_x)):
kps2_x[i] = max(min(width - 1, kps_x[i]), 0)
kps2_y[i] = max(min(height - 1, kps_y[i]), 0)
return kps2_y, kps2_x

# Export annotation file for PT datasets to a file for a given dataset
def export_dataset_PT(self, dataset):
videos = videoManager.get_videos(dataset)
counter = 0
for j in range(0, len(videos)):
print("Exporting, please wait...") if counter % 20 == 0 else None
counter += 1
# Ignore buggy videos
if videos[j].name not in ptService.video_ignore_list:
final_annotation = dict()
Expand All @@ -483,36 +519,21 @@ def export_dataset_PT(self, dataset):
# _, annotations_db, _ = annotationService.get_annotations(dataset, dataset.pt, videos[j].name, "root")
_, annotations_db, _ = annotationService.get_annotations(annotation)

# Process frame data
_, frames, _ = frameService.get_frames(videos[j])
for i in range(0, len(frames)):
frames[i]["vid_id"] = frames[i]["video"]
frames[i]["file_name"] = '/'.join((frames[i]["path"]).split("/")[-4:])
frames[i]["has_no_densepose"] = True
frames[i]["nframes"] = len(frames)
frames[i]["is_labeled"] = True if annotations_db[i]["objects"] != [] else False
# True if the pose has been modified. Meaning it has to be in the frames to annotate array.
_, list_of_frames_to_annotate, _ = ptService.get_frames_to_annotate_per_video(frames[i]["video"])
frames[i]["has_labeled_pose"] = True \
if frames[i]["number"] in list_of_frames_to_annotate \
else False
# Add ignore regions
annotation = Annotation(dataset, videos[j].name, frame=i)
frames[i]["ignore_regions_y"], frames[i]["ignore_regions_x"] = self.export_ignore_regions(annotation)
del(frames[i]["number"])
del(frames[i]["dataset"])
del(frames[i]["video"])
del(frames[i]["path"])
del(frames[i]["has_ignore_regions"])
final_annotation["images"] = frames

# Process annotation data
''' Process annotation data '''

# Export data only in the original range of annotations
video = Video(videos[j].name, dataset)
result, frames_info = frameService.get_frame_info_of_video(video)
if result:
min_frame, max_frame = frames_info[0].number, frames_info[1].number
# Get width and height of the video for later
random_nr = randrange(min_frame, max_frame)
_, rnd_frame, _ = videoService.get_video_frames(video, random_nr, random_nr)
rnd_frame = rnd_frame[0]["image"]
mat_img = cv2.imread(rnd_frame)
height, width, _ = mat_img.shape
height = height * 2 if video.name in ptService.resized_videos else height
width = width * 2 if video.name in ptService.resized_videos else width

annotations_file = list()
for i in range(0, len(annotations_db)):
Expand All @@ -525,21 +546,23 @@ def export_dataset_PT(self, dataset):
if index == -1:
if obj["type"] == "bbox":
kps = ptService.transform_to_XYWH(obj["keypoints"])
# Set negative numbers to 0
kps = [0 if i < 0 else i for i in kps]
# Set kps outside the frame to an extreme within
kps = self.check_limits_kps(kps, width, height)
obj["bbox"] = kps
del(obj["keypoints"])
elif obj["type"] == "bbox_head":
kps = ptService.transform_to_XYWH(obj["keypoints"])
# Set negative numbers to 0
kps = [0 if i < 0 else i for i in kps]
# Set kps outside the frame to an extreme within
kps = self.check_limits_kps(kps, width, height)
obj["bbox_head"] = kps
del(obj["keypoints"])
elif obj["type"] == "person": # flatten keypoints array
kps = list(obj["keypoints"])
del(obj["keypoints"])
kps = [0 if i < 0 else i for i in kps]
obj["keypoints"] = self.process_keypoints_person(kps)
# Set kps outside the frame to an extreme within
kps = self.process_keypoints_person(kps)
kps = self.check_limits_kps(kps, width, height, person=True)
obj["keypoints"] = kps
# Always delete type field, as it is unnecessary
del(obj["type"])
obj["id"] = obj["uid"]
Expand All @@ -552,23 +575,63 @@ def export_dataset_PT(self, dataset):
if obj["type"] == "bbox":
annotations_file[index]["person_id"] = obj["person_id"]
kps = ptService.transform_to_XYWH(obj["keypoints"])
# Set negative numbers to 0
kps = [0 if i < 0 else i for i in kps]
# Set kps outside the frame to an extreme within
kps = self.check_limits_kps(kps, width, height)
annotations_file[index]["bbox"] = kps
elif obj["type"] == "bbox_head":
kps = ptService.transform_to_XYWH(obj["keypoints"])
# Set negative numbers to 0
kps = [0 if i < 0 else i for i in kps]
# Set kps outside the frame to an extreme within
kps = self.check_limits_kps(kps, width, height)
annotations_file[index]["bbox_head"] = kps
elif obj["type"] == "person":
# annotations_file[index]["keypoints"] = np.array(obj["keypoints"]).flatten().tolist()
kps = list(obj["keypoints"])
del(obj["keypoints"])
kps = [0 if i < 0 else i for i in kps]
annotations_file[index]["keypoints"] = self.process_keypoints_person(kps)
# Set kps outside the frame to an extreme within
kps = self.process_keypoints_person(kps)
kps = self.check_limits_kps(kps, width, height, person=True)
annotations_file[index]["keypoints"] = kps
# annotations_correct = self.check_annotations_file(annotations_file, videos[j].name)
final_annotation["annotations"] = annotations_file

''' Process frame data '''
width, height = 1920, 1080
if result:
min_frame, max_frame = frames_info[0].number, frames_info[1].number
# Get width and height of the video for later
random_nr = randrange(min_frame, max_frame)
_, rnd_frame, _ = videoService.get_video_frames(video, random_nr, random_nr)
rnd_frame = rnd_frame[0]["image"]
mat_img = cv2.imread(rnd_frame)
height, width, _ = mat_img.shape
height = height * 2 if video.name in ptService.resized_videos else height
width = width * 2 if video.name in ptService.resized_videos else width

_, frames, _ = frameService.get_frames(videos[j])
for i in range(0, len(frames)):
frames[i]["vid_id"] = frames[i]["video"]
frames[i]["file_name"] = '/'.join((frames[i]["path"]).split("/")[-4:])
frames[i]["has_no_densepose"] = True
frames[i]["nframes"] = len(frames)
frames[i]["is_labeled"] = True if annotations_db[i]["objects"] != [] else False
# True if the pose has been modified. Meaning it has to be in the frames to annotate array.
_, list_of_frames_to_annotate, _ = ptService.get_frames_to_annotate_per_video(frames[i]["video"])
frames[i]["has_labeled_pose"] = True \
if frames[i]["number"] in list_of_frames_to_annotate \
else False
# Add ignore regions
annotation = Annotation(dataset, videos[j].name, frame=i)
ir_y, ir_x = self.export_ignore_regions(annotation)
print(ir_y)
ir_y, ir_x = self.check_limits_ir(ir_y, ir_x, width, height)
frames[i]["ignore_regions_y"], frames[i]["ignore_regions_x"] = ir_y, ir_x
del(frames[i]["number"])
del(frames[i]["dataset"])
del(frames[i]["video"])
del(frames[i]["path"])
del(frames[i]["has_ignore_regions"])
final_annotation["images"] = frames

# Hardcoded categories because they don't change and are a very special case...
categories = [{
"supercategory": "person",
Expand Down
Loading

0 comments on commit 8978522

Please sign in to comment.