Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Develop #457

Merged
merged 8 commits into from
Apr 20, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 8 additions & 1 deletion src/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -478,6 +478,14 @@ def interpolate_annotation():
start_frames, req_data['endFrame'])
return json.dumps({'success': success, 'msg': msg}), status, {'ContentType': 'application/json'}

# Replicate object between start and end frame
@app.route('/api/annotation/getSanityCheck', methods=['GET'])
@flask_login.login_required
def get_sanity_check():
dataset = Dataset(request.headers['dataset'], request.headers['datasetType'])
success, msg, status = annotationService.get_sanity_check(dataset, request.headers['scene'], request.headers['user'],
request.headers['startFrame'], request.headers['endFrame'])
return json.dumps({'success': success, 'msg': msg}), status, {'ContentType': 'application/json'}

# Autocomplete between 2 points and store the completed 3d points
# startFrame is an array with the frame of each label
Expand All @@ -492,7 +500,6 @@ def autocomplete_annotation():
start_frames, req_data['endFrame'])
return json.dumps({'success': success, 'msg': msg}), status, {'ContentType': 'application/json'}


# Replicate object between start and end frame
@app.route('/api/annotation/replicate/object', methods=['POST'])
@flask_login.login_required
Expand Down
108 changes: 108 additions & 0 deletions src/python/logic/annotationService.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,12 +16,14 @@
from python.logic.ptService import PTService

from python.objects.frame import Frame
from python.objects.video import Video
from python.objects.annotation import Annotation
from python.objects.object import Object
from python.objects.user_action import UserAction
from python.objects.object_type import Object_type
from python.objects.pose_property import PoseProperty

import numbers

# AnnotationService logger
log = logging.getLogger('annotationService')
Expand All @@ -30,6 +32,7 @@
objectTypeManager = ObjectTypeManager()
frameManager = FrameManager()
actionManager = ActionManager()
videoManager = VideoManager()
datasetManager = DatasetManager()
aikService = AIKService()
ptService = PTService()
Expand Down Expand Up @@ -595,6 +598,111 @@ def autocomplete_annotation(self, dataset, scene, user, uid_object, object_type,
log.error('Error filling in keypoints')
return False, final_result, 500

# Run a sanity check to check correctness of data within the specified frames for the given video
def get_sanity_check(self, dataset, scene, user, start_frame, end_frame):
# Maybe register the action by the user, but at the moment, not necessary
# Get all annotations for the video for the specified frames
start_annotation = Annotation(dataset, scene, frame=start_frame)
end_annotation = Annotation(dataset, scene, frame=end_frame)
annotations = annotationManager.get_annotations_by_frame_range(start_annotation, end_annotation)
# Get details from video
video = videoManager.get_video(Video(scene, dataset))
print("annotations received:")
print(len(annotations))
# Initialize errors list
errors_detected = []
# Run sanity check
# For every frame, one annotation
for annotation in annotations:
# Check if it is an annotable frame
try:
annotable_frame = annotation["frame"] in ptService.frames_to_annotate_persons[annotation["scene"]]
except KeyError as e:
print("Frame not in list")
annotable_frame = False
# Separate into lists
bbox_list, bbox_head_list, person_list, ir_list = ptService.divide_objects_in_arrays(annotation["objects"])
# Normal checks
if not (len(bbox_list) == len(bbox_head_list) == len(person_list)):
errors_detected.append({
"number": "xx",
"track_id": "xx",
"type": "xx",
"reason": "Wrong number of objects. There may be duplicated track_ids in the sequence."
})
# Check properties of objects are correct
for nr_bbox, bbox in enumerate(bbox_list):
errors_detected = ptService.check_object_correctness(bbox, errors_detected)
# If bbox is not annotated, then there's nothing to check
if len(bbox["keypoints"]) > 0:
poly_bbox = ptService.transform_to_poly(bbox["keypoints"])
if video.type == "val":
# --- Every bbox must have a head_bbox inside (or at least half of it, given that head_bbox can be
# outside of the canvas)
bbox_head = ptService.find(bbox_head_list, "track_id", bbox["track_id"])
poly_bbox_head = ptService.transform_to_poly(bbox_head["keypoints"])
head_inside, prcnt_points_inside = ptService.is_A_in_B(poly_bbox_head, poly_bbox)
# If the bbox_head is not completely inside bbox, check the percentage of points inside
if not head_inside:
# If it's not at least half, it's wrong. Add error
if prcnt_points_inside < 0.5:
errors_detected.append({
"number": bbox["uid"]//100 % 10000,
"track_id": bbox["track_id"],
"type": "bbox_head",
"reason": "bbox_head outside of corresponding bbox."
})
# If it's an annotable frame, do further checks. These checks are the same in train and val
if annotable_frame:
# --- Every bbox must have a pose inside, unless it is inside an ignore region
# Search if bbox is inside an ignore region
bbox_in_ir = False
for ir in ir_list:
poly_ir = ptService.transform_to_poly(ir["keypoints"])
if ptService.is_A_in_B(poly_bbox, poly_ir):
bbox_in_ir = True
break
# If it's not in an ignore region, check that the pose is inside the bbox
if not bbox_in_ir:
person = ptService.find(person_list, "track_id", bbox["track_id"])
person_inside = ptService.is_person_in_B(person["keypoints"], poly_bbox)
if not person_inside:
errors_detected.append({
"number": bbox["uid"]//100 % 10000,
"track_id": bbox["track_id"],
"type": "person",
"reason": "At least one person keypoint outside of corresponding bbox"
})
for nr_bbox_head, bbox_head in enumerate(bbox_head_list):
if video.type == "train" and len(bbox_head["keypoints"]) > 0:
# If there is a bbox_head, we must ensure that it is within the bounds of its corresponding bbox
bbox = ptService.find(bbox_list, "track_id", bbox_head["track_id"])
if len(bbox["keypoints"]) > 0:
poly_bbox = ptService.transform_to_poly(bbox["keypoints"])
poly_bbox_head = ptService.transform_to_poly(bbox_head["keypoints"])
bbox_head_inside, prcnt = ptService.is_A_in_B(poly_bbox_head, poly_bbox)
if not bbox_head_inside and prcnt < 0.5:
errors_detected.append({
"number": bbox["uid"]//100 % 10000,
"track_id": bbox["track_id"],
"type": "bbox_head",
"reason": "bbox_head outside of corresponding bbox"
})
else:
errors_detected.append({
"number": bbox["uid"]//100 % 10000,
"track_id": bbox["track_id"],
"type": bbox["type"],
"reason": "bbox_head annotated with no corresponding annotated bbox"
})
errors_detected = ptService.check_object_correctness(bbox_head, errors_detected)
for nr_person, person in enumerate(person_list):
errors_detected = ptService.check_object_correctness(person, errors_detected)
for nr_ir, ir in enumerate(ir_list):
errors_detected = ptService.check_object_correctness(ir, errors_detected)

return True, errors_detected, 200

# Replicate and store the annotation between start and enf frame
# Always a single object in "objects" so always objects[0] !!
def replicate_annotation(self, dataset, scene, user, uid_object, object_type, start_frame, end_frame, track_id,
Expand Down
62 changes: 1 addition & 61 deletions src/python/logic/datasetService.py
Original file line number Diff line number Diff line change
Expand Up @@ -561,7 +561,7 @@ def export_dataset_PT(self, dataset):
if min_frame <= frame <= max_frame:
objects = annotations_db[i]["objects"]
# Separate into specific arrays for every object, except ignore regions (already processed)
bbox_objs, bbox_head_objs, person_objs = ptService.divide_objects_in_arrays(objects)
bbox_objs, bbox_head_objs, person_objs, _ = ptService.divide_objects_in_arrays(objects)
# Process bbox first to merge person_id
for bbox in bbox_objs:
# Find if person_id is on the annotation file already
Expand Down Expand Up @@ -669,66 +669,6 @@ def export_dataset_PT(self, dataset):
"id": person["uid"],
"reason": "Duplicated non-empty person for this person_id"
})
pass
# else if both are empty, we don't do anything
# for obj in objects:
# if obj["type"] != 'ignore_region': # Ignore 'ignore_regions' --> already exported
# index = self.is_track_id_on_list(annotations_file, obj["uid"], obj["track_id"])
# if index == -1:
# if obj["type"] == "bbox":
# # If there is a bbox with a nonempty array of kps, it's valid
# kps = ptService.transform_to_XYWH(obj["keypoints"])
# # Set kps outside the frame to an extreme within
# kps = self.check_limits_kps(kps, width, height)
# obj["bbox"] = kps
# del(obj["keypoints"])
# elif obj["type"] == "bbox_head":
# # kps = ptService.transform_to_XYWH(obj["keypoints"])
# # Do not transform or clamp values of head_bboxes as per issue #430
# kps = obj["keypoints"]
# # kps = self.check_limits_kps(kps, width, height)
# # Flatten list of kps
# obj["bbox_head"] = [item for sublist in kps for item in sublist]
# del(obj["keypoints"])
# elif obj["type"] == "person": # flatten keypoints array
# kps = list(obj["keypoints"])
# del(obj["keypoints"])
# # Set kps outside the frame to an extreme within
# kps = self.process_keypoints_person(kps)
# kps = self.check_limits_kps(kps, width, height, person=True)
# obj["keypoints"] = kps
# # Always delete type field, as it is unnecessary
# del(obj["type"])
# obj["id"] = obj["uid"]
# del(obj["uid"])
# obj["image_id"] = int(obj["id"]/100)
# obj["scores"] = []
# obj = self.create_missing_params_pt(obj)
# annotations_file.append(obj)
# else: # If already in annotation, just add what we want
# if obj["type"] == "bbox":
# # If there is a bbox with a nonempty array of kps, it's valid
# annotations_file[index]["person_id"] = obj["person_id"]
# kps = ptService.transform_to_XYWH(obj["keypoints"])
# # Set kps outside the frame to an extreme within
# kps = self.check_limits_kps(kps, width, height)
# annotations_file[index]["bbox"] = kps
# elif obj["type"] == "bbox_head":
# # kps = ptService.transform_to_XYWH(obj["keypoints"])
# # Do not transform or clamp values of head_bboxes as per issue #430
# kps = obj["keypoints"]
# # Set kps outside the frame to an extreme within
# # kps = self.check_limits_kps(kps, width, height)
# # Flatten list of kps
# annotations_file[index]["bbox_head"] = [item for sublist in kps for item in sublist]
# elif obj["type"] == "person":
# # annotations_file[index]["keypoints"] = np.array(obj["keypoints"]).flatten().tolist()
# kps = list(obj["keypoints"])
# del(obj["keypoints"])
# # Set kps outside the frame to an extreme within
# kps = self.process_keypoints_person(kps)
# kps = self.check_limits_kps(kps, width, height, person=True)
# annotations_file[index]["keypoints"] = kps
# If there were errors in this frame, add an entry to the dictionary of errors in this video
if frame_errors:
video_errors[str(frame)] = frame_errors
Expand Down
Loading