Skip to content

Commit

Permalink
Merge pull request #1429 from alicevision/dev/pipeline_PG_CT
Browse files Browse the repository at this point in the history
[multiview] New pipeline "Photogrammetry and Camera Tracking"
  • Loading branch information
fabiencastan authored May 27, 2021
2 parents 204c175 + 1999b7c commit 81fe0d5
Show file tree
Hide file tree
Showing 4 changed files with 71 additions and 22 deletions.
55 changes: 38 additions & 17 deletions meshroom/multiview.py
Original file line number Diff line number Diff line change
Expand Up @@ -472,7 +472,7 @@ def sfmAugmentation(graph, sourceSfm, withMVS=False):
return sfmNodes, mvsNodes


def cameraTrackingPipeline(graph):
def cameraTrackingPipeline(graph, sourceSfm=None):
"""
Instantiate a camera tracking pipeline inside 'graph'.
Expand All @@ -484,30 +484,33 @@ def cameraTrackingPipeline(graph):
"""

with GraphModification(graph):

cameraInit, featureExtraction, imageMatching, featureMatching, structureFromMotion = sfmPipeline(graph)
if sourceSfm is None:
cameraInitT, featureExtractionT, imageMatchingT, featureMatchingT, structureFromMotionT = sfmPipeline(graph)
else:
sfmNodes, _ = sfmAugmentation(graph, sourceSfm)
cameraInitT, featureExtractionT, imageMatchingT, featureMatchingT, structureFromMotionT = sfmNodes

imageMatching.attribute("nbMatches").value = 5 # voctree nb matches
imageMatching.attribute("nbNeighbors").value = 10
imageMatchingT.attribute("nbMatches").value = 5 # voctree nb matches
imageMatchingT.attribute("nbNeighbors").value = 10

structureFromMotion.attribute("minNumberOfMatches").value = 0
structureFromMotion.attribute("minInputTrackLength").value = 5
structureFromMotion.attribute("minNumberOfObservationsForTriangulation").value = 3
structureFromMotion.attribute("minAngleForTriangulation").value = 1.0
structureFromMotion.attribute("minAngleForLandmark").value = 0.5
structureFromMotionT.attribute("minNumberOfMatches").value = 0
structureFromMotionT.attribute("minInputTrackLength").value = 5
structureFromMotionT.attribute("minNumberOfObservationsForTriangulation").value = 3
structureFromMotionT.attribute("minAngleForTriangulation").value = 1.0
structureFromMotionT.attribute("minAngleForLandmark").value = 0.5

exportAnimatedCamera = graph.addNewNode('ExportAnimatedCamera', input=structureFromMotion.output)
exportAnimatedCameraT = graph.addNewNode('ExportAnimatedCamera', input=structureFromMotionT.output)

# store current pipeline version in graph header
graph.header.update({'pipelineVersion': __version__})

return [
cameraInit,
featureExtraction,
imageMatching,
featureMatching,
structureFromMotion,
exportAnimatedCamera,
cameraInitT,
featureExtractionT,
imageMatchingT,
featureMatchingT,
structureFromMotionT,
exportAnimatedCameraT,
]


Expand All @@ -527,3 +530,21 @@ def cameraTracking(inputImages=list(), inputViewpoints=list(), inputIntrinsics=l

return graph


def photogrammetryAndCameraTracking(inputImages=list(), inputViewpoints=list(), inputIntrinsics=list(), output='', graph=None):
if not graph:
graph = Graph('Photogrammetry And Camera Tracking')
with GraphModification(graph):
cameraInit, featureExtraction, imageMatching, featureMatching, structureFromMotion = sfmPipeline(graph)

cameraInitT, featureExtractionT, imageMatchingMultiT, featureMatchingT, structureFromMotionT, exportAnimatedCameraT = cameraTrackingPipeline(graph, structureFromMotion)

cameraInit.viewpoints.extend([{'path': image} for image in inputImages])
cameraInit.viewpoints.extend(inputViewpoints)
cameraInit.intrinsics.extend(inputIntrinsics)

if output:
graph.addNewNode('Publish', output=output, inputFiles=[exportAnimatedCameraT.output])

return graph

23 changes: 18 additions & 5 deletions meshroom/nodes/aliceVision/ImageMatchingMultiSfM.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,9 +48,16 @@ class ImageMatchingMultiSfM(desc.CommandLineNode):
desc.ChoiceParam(
name='method',
label='Method',
description='Method used to select the image pairs to match.',
value='VocabularyTree',
values=['VocabularyTree', 'Sequential', 'SequentialAndVocabularyTree','Exhaustive','Frustum'],
description='Method used to select the image pairs to match:\n'
' * VocabularyTree: It uses image retrieval techniques to find images that share some content without the cost of resolving all \n'
'feature matches in details. Each image is represented in a compact image descriptor which allows to compute the distance between all \n'
'images descriptors very efficiently. If your scene contains less than "Voc Tree: Minimal Number of Images", all image pairs will be selected.\n'
' * SequentialAndVocabularyTree: Combines sequential approach with VocTree to enable connections between keyframes at different times.\n'
' * Exhaustive: Export all image pairs.\n'
' * Frustum: If images have known poses, computes the intersection between cameras frustums to create the list of image pairs.\n'
' * FrustumOrVocabularyTree: If images have known poses, use frustum intersection else use VocabularyTree.\n',
value='SequentialAndVocabularyTree',
values=['VocabularyTree', 'SequentialAndVocabularyTree', 'Exhaustive', 'Frustum'],
exclusive=True,
uid=[0],
),
Expand All @@ -60,6 +67,7 @@ class ImageMatchingMultiSfM(desc.CommandLineNode):
description='Input name for the vocabulary tree file.',
value=os.environ.get('ALICEVISION_VOCTREE', ''),
uid=[],
enabled=lambda node: 'VocabularyTree' in node.method.value,
),
desc.File(
name='weights',
Expand All @@ -68,6 +76,7 @@ class ImageMatchingMultiSfM(desc.CommandLineNode):
value='',
uid=[0],
advanced=True,
enabled=lambda node: 'VocabularyTree' in node.method.value,
),
desc.ChoiceParam(
name='matchingMode',
Expand All @@ -86,6 +95,7 @@ class ImageMatchingMultiSfM(desc.CommandLineNode):
range=(0, 500, 1),
uid=[0],
advanced=True,
enabled=lambda node: 'VocabularyTree' in node.method.value,
),
desc.IntParam(
name='maxDescriptors',
Expand All @@ -95,24 +105,27 @@ class ImageMatchingMultiSfM(desc.CommandLineNode):
range=(0, 100000, 1),
uid=[0],
advanced=True,
enabled=lambda node: 'VocabularyTree' in node.method.value,
),
desc.IntParam(
name='nbMatches',
label='Voc Tree: Nb Matches',
description='The number of matches to retrieve for each image (If 0 it will retrieve all the matches).',
value=50,
value=40,
range=(0, 1000, 1),
uid=[0],
advanced=True,
enabled=lambda node: 'VocabularyTree' in node.method.value,
),
desc.IntParam(
name='nbNeighbors',
label='Sequential: Nb Neighbors',
description='The number of neighbors to retrieve for each image (If 0 it will retrieve all the neighbors).',
value=50,
value=5,
range=(0, 1000, 1),
uid=[0],
advanced=True,
enabled=lambda node: 'Sequential' in node.method.value,
),
desc.ChoiceParam(
name='verboseLevel',
Expand Down
12 changes: 12 additions & 0 deletions meshroom/ui/qml/main.qml
Original file line number Diff line number Diff line change
Expand Up @@ -408,6 +408,13 @@ ApplicationWindow {
}
Menu {
title: "New Pipeline"
TextMetrics {
id: textMetrics
font: action_PG_CT.font
elide: Text.ElideNone
text: action_PG_CT.text
}
implicitWidth: textMetrics.width + 10 // largest text width + margin
Action {
text: "Photogrammetry"
onTriggered: ensureSaved(function() { _reconstruction.new("photogrammetry") })
Expand All @@ -420,6 +427,11 @@ ApplicationWindow {
text: "Panorama Fisheye HDR"
onTriggered: ensureSaved(function() { _reconstruction.new("panoramafisheyehdr") })
}
Action {
id: action_PG_CT
text: "Photogrammetry and Camera Tracking (experimental)"
onTriggered: ensureSaved(function() { _reconstruction.new("photogrammetryandcameratracking") })
}
Action {
text: "Camera Tracking (experimental)"
onTriggered: ensureSaved(function() { _reconstruction.new("cameratracking") })
Expand Down
3 changes: 3 additions & 0 deletions meshroom/ui/reconstruction.py
Original file line number Diff line number Diff line change
Expand Up @@ -490,6 +490,9 @@ def new(self, pipeline=None):
elif p.lower() == "panoramafisheyehdr":
# default panorama fisheye hdr pipeline
self.setGraph(multiview.panoramaFisheyeHdr())
elif p.lower() == "photogrammetryandcameratracking":
# default camera tracking pipeline
self.setGraph(multiview.photogrammetryAndCameraTracking())
elif p.lower() == "cameratracking":
# default camera tracking pipeline
self.setGraph(multiview.cameraTracking())
Expand Down

0 comments on commit 81fe0d5

Please sign in to comment.