diff --git a/examples/Pose3Localization.cpp b/examples/Pose3Localization.cpp index 1667b43d9d..c18a9e9cee 100644 --- a/examples/Pose3Localization.cpp +++ b/examples/Pose3Localization.cpp @@ -43,7 +43,7 @@ int main(const int argc, const char* argv[]) { auto priorModel = noiseModel::Diagonal::Variances( (Vector(6) << 1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4).finished()); Key firstKey = 0; - for (const Values::ConstKeyValuePair& key_value : *initial) { + for (const auto key_value : *initial) { std::cout << "Adding prior to g2o file " << std::endl; firstKey = key_value.key; graph->addPrior(firstKey, Pose3(), priorModel); @@ -74,7 +74,7 @@ int main(const int argc, const char* argv[]) { // Calculate and print marginal covariances for all variables Marginals marginals(*graph, result); - for (const auto& key_value : result) { + for (const auto key_value : result) { auto p = dynamic_cast*>(&key_value.value); if (!p) continue; std::cout << marginals.marginalCovariance(key_value.key) << endl; diff --git a/examples/Pose3SLAMExample_changeKeys.cpp b/examples/Pose3SLAMExample_changeKeys.cpp index abadce975e..5d4ed6657d 100644 --- a/examples/Pose3SLAMExample_changeKeys.cpp +++ b/examples/Pose3SLAMExample_changeKeys.cpp @@ -55,7 +55,7 @@ int main(const int argc, const char *argv[]) { std::cout << "Rewriting input to file: " << inputFileRewritten << std::endl; // Additional: rewrite input with simplified keys 0,1,... Values simpleInitial; - for(const Values::ConstKeyValuePair& key_value: *initial) { + for(const auto key_value: *initial) { Key key; if(add) key = key_value.key + firstKey; diff --git a/examples/Pose3SLAMExample_g2o.cpp b/examples/Pose3SLAMExample_g2o.cpp index 1cca92f591..3679643072 100644 --- a/examples/Pose3SLAMExample_g2o.cpp +++ b/examples/Pose3SLAMExample_g2o.cpp @@ -42,7 +42,7 @@ int main(const int argc, const char* argv[]) { auto priorModel = noiseModel::Diagonal::Variances( (Vector(6) << 1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4).finished()); Key firstKey = 0; - for (const Values::ConstKeyValuePair& key_value : *initial) { + for (const auto key_value : *initial) { std::cout << "Adding prior to g2o file " << std::endl; firstKey = key_value.key; graph->addPrior(firstKey, Pose3(), priorModel); diff --git a/examples/Pose3SLAMExample_initializePose3Chordal.cpp b/examples/Pose3SLAMExample_initializePose3Chordal.cpp index 00a546bb2f..992750fed8 100644 --- a/examples/Pose3SLAMExample_initializePose3Chordal.cpp +++ b/examples/Pose3SLAMExample_initializePose3Chordal.cpp @@ -42,7 +42,7 @@ int main(const int argc, const char* argv[]) { auto priorModel = noiseModel::Diagonal::Variances( (Vector(6) << 1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4).finished()); Key firstKey = 0; - for (const Values::ConstKeyValuePair& key_value : *initial) { + for (const auto key_value : *initial) { std::cout << "Adding prior to g2o file " << std::endl; firstKey = key_value.key; graph->addPrior(firstKey, Pose3(), priorModel); diff --git a/examples/Pose3SLAMExample_initializePose3Gradient.cpp b/examples/Pose3SLAMExample_initializePose3Gradient.cpp index 2f92afb9ed..384f290a19 100644 --- a/examples/Pose3SLAMExample_initializePose3Gradient.cpp +++ b/examples/Pose3SLAMExample_initializePose3Gradient.cpp @@ -42,7 +42,7 @@ int main(const int argc, const char* argv[]) { auto priorModel = noiseModel::Diagonal::Variances( (Vector(6) << 1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4).finished()); Key firstKey = 0; - for (const Values::ConstKeyValuePair& key_value : *initial) { + for (const auto key_value : *initial) { std::cout << "Adding prior to g2o file " << std::endl; firstKey = key_value.key; graph->addPrior(firstKey, Pose3(), priorModel); diff --git a/examples/SolverComparer.cpp b/examples/SolverComparer.cpp index 718ae62868..7bae41403a 100644 --- a/examples/SolverComparer.cpp +++ b/examples/SolverComparer.cpp @@ -559,7 +559,7 @@ void runPerturb() // Perturb values VectorValues noise; - for(const Values::KeyValuePair& key_val: initial) + for(const Values::KeyValuePair key_val: initial) { Vector noisev(key_val.value.dim()); for(Vector::Index i = 0; i < noisev.size(); ++i) diff --git a/gtsam/geometry/Cal3.h b/gtsam/geometry/Cal3.h index 74c9868f34..08ce4c1e67 100644 --- a/gtsam/geometry/Cal3.h +++ b/gtsam/geometry/Cal3.h @@ -99,6 +99,9 @@ class GTSAM_EXPORT Cal3 { */ Cal3(double fov, int w, int h); + /// Virtual destructor + virtual ~Cal3() {} + /// @} /// @name Advanced Constructors /// @{ diff --git a/gtsam/geometry/tests/testRot3.cpp b/gtsam/geometry/tests/testRot3.cpp index 889f685807..2909458378 100644 --- a/gtsam/geometry/tests/testRot3.cpp +++ b/gtsam/geometry/tests/testRot3.cpp @@ -825,7 +825,7 @@ TEST(Rot3, RQ_derivative) { const auto R = Rot3::RzRyRx(xyz).matrix(); const auto num = numericalDerivative11(RQ_proxy, R); Matrix39 calc; - RQ(R, calc).second; + auto dummy = RQ(R, calc).second; const auto err = vec_err.second; CHECK(assert_equal(num, calc, err)); diff --git a/gtsam/linear/VectorValues.cpp b/gtsam/linear/VectorValues.cpp index 9b5868744b..7462758476 100644 --- a/gtsam/linear/VectorValues.cpp +++ b/gtsam/linear/VectorValues.cpp @@ -161,7 +161,7 @@ namespace gtsam { bool VectorValues::equals(const VectorValues& x, double tol) const { if(this->size() != x.size()) return false; - for(const auto& values: boost::combine(*this, x)) { + for(const auto values: boost::combine(*this, x)) { if(values.get<0>().first != values.get<1>().first || !equal_with_abs_tol(values.get<0>().second, values.get<1>().second, tol)) return false; @@ -233,7 +233,7 @@ namespace gtsam { double result = 0.0; typedef boost::tuple ValuePair; using boost::adaptors::map_values; - for(const ValuePair& values: boost::combine(*this, v)) { + for(const ValuePair values: boost::combine(*this, v)) { assert_throw(values.get<0>().first == values.get<1>().first, invalid_argument("VectorValues::dot called with a VectorValues of different structure")); assert_throw(values.get<0>().second.size() == values.get<1>().second.size(), diff --git a/gtsam/nonlinear/NonlinearFactorGraph.cpp b/gtsam/nonlinear/NonlinearFactorGraph.cpp index 3063aa329b..6a9e0cd0a4 100644 --- a/gtsam/nonlinear/NonlinearFactorGraph.cpp +++ b/gtsam/nonlinear/NonlinearFactorGraph.cpp @@ -376,7 +376,7 @@ static Scatter scatterFromValues(const Values& values) { scatter.reserve(values.size()); // use "natural" ordering with keys taken from the initial values - for (const auto& key_value : values) { + for (const auto key_value : values) { scatter.add(key_value.key, key_value.value.dim()); } diff --git a/gtsam/nonlinear/Values-inl.h b/gtsam/nonlinear/Values-inl.h index 6829e859b6..19813adbae 100644 --- a/gtsam/nonlinear/Values-inl.h +++ b/gtsam/nonlinear/Values-inl.h @@ -217,7 +217,7 @@ namespace gtsam { /** Constructor from a Filtered view copies out all values */ template Values::Values(const Values::Filtered& view) { - for(const typename Filtered::KeyValuePair& key_value: view) { + for(const auto key_value: view) { Key key = key_value.key; insert(key, static_cast(key_value.value)); } @@ -226,7 +226,7 @@ namespace gtsam { /* ************************************************************************* */ template Values::Values(const Values::ConstFiltered& view) { - for(const typename ConstFiltered::KeyValuePair& key_value: view) { + for(const auto key_value: view) { Key key = key_value.key; insert(key, static_cast(key_value.value)); } diff --git a/gtsam/nonlinear/Values.cpp b/gtsam/nonlinear/Values.cpp index b672031cad..89a4206eed 100644 --- a/gtsam/nonlinear/Values.cpp +++ b/gtsam/nonlinear/Values.cpp @@ -206,7 +206,7 @@ namespace gtsam { /* ************************************************************************* */ size_t Values::dim() const { size_t result = 0; - for(const ConstKeyValuePair& key_value: *this) { + for(const auto key_value: *this) { result += key_value.value.dim(); } return result; @@ -215,7 +215,7 @@ namespace gtsam { /* ************************************************************************* */ VectorValues Values::zeroVectors() const { VectorValues result; - for(const ConstKeyValuePair& key_value: *this) + for(const auto key_value: *this) result.insert(key_value.key, Vector::Zero(key_value.value.dim())); return result; } diff --git a/gtsam/nonlinear/internal/LevenbergMarquardtState.h b/gtsam/nonlinear/internal/LevenbergMarquardtState.h index 8ab2e74663..cee8395407 100644 --- a/gtsam/nonlinear/internal/LevenbergMarquardtState.h +++ b/gtsam/nonlinear/internal/LevenbergMarquardtState.h @@ -126,7 +126,7 @@ struct LevenbergMarquardtState : public NonlinearOptimizerState { noiseModelCache.resize(0); // for each of the variables, add a prior damped.reserve(damped.size() + values.size()); - for (const auto& key_value : values) { + for (const auto key_value : values) { const Key key = key_value.key; const size_t dim = key_value.value.dim(); const CachedModel* item = getCachedModel(dim); diff --git a/gtsam/nonlinear/tests/testValues.cpp b/gtsam/nonlinear/tests/testValues.cpp index 88cfb666f0..a095cc381f 100644 --- a/gtsam/nonlinear/tests/testValues.cpp +++ b/gtsam/nonlinear/tests/testValues.cpp @@ -335,7 +335,7 @@ TEST(Values, filter) { int i = 0; Values::Filtered filtered = values.filter(boost::bind(std::greater_equal(), _1, 2)); EXPECT_LONGS_EQUAL(2, (long)filtered.size()); - for(const Values::Filtered<>::KeyValuePair& key_value: filtered) { + for(const auto key_value: filtered) { if(i == 0) { LONGS_EQUAL(2, (long)key_value.key); try {key_value.value.cast();} catch (const std::bad_cast& e) { FAIL("can't cast Value to Pose2");} @@ -370,7 +370,7 @@ TEST(Values, filter) { i = 0; Values::ConstFiltered pose_filtered = values.filter(); EXPECT_LONGS_EQUAL(2, (long)pose_filtered.size()); - for(const Values::ConstFiltered::KeyValuePair& key_value: pose_filtered) { + for(const auto key_value: pose_filtered) { if(i == 0) { EXPECT_LONGS_EQUAL(1, (long)key_value.key); EXPECT(assert_equal(pose1, key_value.value)); @@ -408,7 +408,7 @@ TEST(Values, Symbol_filter) { values.insert(Symbol('y', 3), pose3); int i = 0; - for(const Values::Filtered::KeyValuePair& key_value: values.filter(Symbol::ChrTest('y'))) { + for(const auto key_value: values.filter(Symbol::ChrTest('y'))) { if(i == 0) { LONGS_EQUAL(Symbol('y', 1), (long)key_value.key); EXPECT(assert_equal(pose1, key_value.value.cast())); diff --git a/gtsam/slam/InitializePose.h b/gtsam/slam/InitializePose.h index be249b0b54..f9b99e47e3 100644 --- a/gtsam/slam/InitializePose.h +++ b/gtsam/slam/InitializePose.h @@ -62,7 +62,7 @@ static Values computePoses(const Values& initialRot, // Upgrade rotations to full poses Values initialPose; - for (const auto& key_value : initialRot) { + for (const auto key_value : initialRot) { Key key = key_value.key; const auto& rot = initialRot.at(key); Pose initializedPose = Pose(rot, origin); @@ -86,7 +86,7 @@ static Values computePoses(const Values& initialRot, // put into Values structure Values estimate; - for (const auto& key_value : GNresult) { + for (const auto key_value : GNresult) { Key key = key_value.key; if (key != kAnchorKey) { const Pose& pose = GNresult.at(key); diff --git a/gtsam/slam/InitializePose3.cpp b/gtsam/slam/InitializePose3.cpp index af1fc609ee..43404df537 100644 --- a/gtsam/slam/InitializePose3.cpp +++ b/gtsam/slam/InitializePose3.cpp @@ -124,7 +124,7 @@ Values InitializePose3::computeOrientationsGradient( // this works on the inverse rotations, according to Tron&Vidal,2011 Values inverseRot; inverseRot.insert(initialize::kAnchorKey, Rot3()); - for(const auto& key_value: givenGuess) { + for(const auto key_value: givenGuess) { Key key = key_value.key; const Pose3& pose = givenGuess.at(key); inverseRot.insert(key, pose.rotation().inverse()); @@ -139,7 +139,7 @@ Values InitializePose3::computeOrientationsGradient( // calculate max node degree & allocate gradient size_t maxNodeDeg = 0; VectorValues grad; - for(const auto& key_value: inverseRot) { + for(const auto key_value: inverseRot) { Key key = key_value.key; grad.insert(key,Z_3x1); size_t currNodeDeg = (adjEdgesMap.at(key)).size(); @@ -162,7 +162,7 @@ Values InitializePose3::computeOrientationsGradient( ////////////////////////////////////////////////////////////////////////// // compute the gradient at each node maxGrad = 0; - for (const auto& key_value : inverseRot) { + for (const auto key_value : inverseRot) { Key key = key_value.key; Vector gradKey = Z_3x1; // collect the gradient for each edge incident on key @@ -203,7 +203,7 @@ Values InitializePose3::computeOrientationsGradient( // Return correct rotations const Rot3& Rref = inverseRot.at(initialize::kAnchorKey); // This will be set to the identity as so far we included no prior Values estimateRot; - for(const auto& key_value: inverseRot) { + for(const auto key_value: inverseRot) { Key key = key_value.key; if (key != initialize::kAnchorKey) { const Rot3& R = inverseRot.at(key); diff --git a/gtsam/slam/dataset.cpp b/gtsam/slam/dataset.cpp index 74e074c9e6..c8a8b15c52 100644 --- a/gtsam/slam/dataset.cpp +++ b/gtsam/slam/dataset.cpp @@ -586,7 +586,7 @@ void save2D(const NonlinearFactorGraph &graph, const Values &config, fstream stream(filename.c_str(), fstream::out); // save poses - for (const Values::ConstKeyValuePair key_value : config) { + for (const auto key_value : config) { const Pose2 &pose = key_value.value.cast(); stream << "VERTEX2 " << key_value.key << " " << pose.x() << " " << pose.y() << " " << pose.theta() << endl; diff --git a/gtsam/slam/tests/testLago.cpp b/gtsam/slam/tests/testLago.cpp index 55449d86ed..781317d7a9 100644 --- a/gtsam/slam/tests/testLago.cpp +++ b/gtsam/slam/tests/testLago.cpp @@ -284,7 +284,7 @@ TEST( Lago, largeGraphNoisy_orientations ) { Values::shared_ptr expected; boost::tie(gmatlab, expected) = readG2o(matlabFile); - for(const Values::KeyValuePair& key_val: *expected){ + for(const auto key_val: *expected){ Key k = key_val.key; EXPECT(assert_equal(expected->at(k), actual.at(k), 1e-5)); } @@ -310,7 +310,7 @@ TEST( Lago, largeGraphNoisy ) { Values::shared_ptr expected; boost::tie(gmatlab, expected) = readG2o(matlabFile); - for(const Values::KeyValuePair& key_val: *expected){ + for(const auto key_val: *expected){ Key k = key_val.key; EXPECT(assert_equal(expected->at(k), actual.at(k), 1e-2)); } diff --git a/gtsam_unstable/examples/ConcurrentFilteringAndSmoothingExample.cpp b/gtsam_unstable/examples/ConcurrentFilteringAndSmoothingExample.cpp index 939d3a5c84..1494d784b3 100644 --- a/gtsam_unstable/examples/ConcurrentFilteringAndSmoothingExample.cpp +++ b/gtsam_unstable/examples/ConcurrentFilteringAndSmoothingExample.cpp @@ -308,11 +308,11 @@ int main(int argc, char** argv) { // And to demonstrate the fixed-lag aspect, print the keys contained in each smoother after 3.0 seconds cout << "After 15.0 seconds, each version contains to the following keys:" << endl; cout << " Concurrent Filter Keys: " << endl; - for(const auto& key_value: concurrentFilter.getLinearizationPoint()) { + for(const auto key_value: concurrentFilter.getLinearizationPoint()) { cout << setprecision(5) << " Key: " << key_value.key << endl; } cout << " Concurrent Smoother Keys: " << endl; - for(const auto& key_value: concurrentSmoother.getLinearizationPoint()) { + for(const auto key_value: concurrentSmoother.getLinearizationPoint()) { cout << setprecision(5) << " Key: " << key_value.key << endl; } cout << " Fixed-Lag Smoother Keys: " << endl; diff --git a/gtsam_unstable/examples/SmartRangeExample_plaza1.cpp b/gtsam_unstable/examples/SmartRangeExample_plaza1.cpp index 5fdc7a7432..6a1239fba3 100644 --- a/gtsam_unstable/examples/SmartRangeExample_plaza1.cpp +++ b/gtsam_unstable/examples/SmartRangeExample_plaza1.cpp @@ -233,7 +233,7 @@ int main(int argc, char** argv) { } } countK = 0; - for(const Values::ConstFiltered::KeyValuePair& it: result.filter()) + for(const auto it: result.filter()) os2 << it.key << "\t" << it.value.x() << "\t" << it.value.y() << "\t1" << endl; if (smart) { @@ -256,7 +256,7 @@ int main(int argc, char** argv) { // Write result to file Values result = isam.calculateEstimate(); ofstream os("rangeResult.txt"); - for(const Values::ConstFiltered::KeyValuePair& it: result.filter()) + for(const auto it: result.filter()) os << it.key << "\t" << it.value.x() << "\t" << it.value.y() << "\t" << it.value.theta() << endl; exit(0); diff --git a/gtsam_unstable/examples/SmartRangeExample_plaza2.cpp b/gtsam_unstable/examples/SmartRangeExample_plaza2.cpp index 90b2a30ff3..95aff85a7f 100644 --- a/gtsam_unstable/examples/SmartRangeExample_plaza2.cpp +++ b/gtsam_unstable/examples/SmartRangeExample_plaza2.cpp @@ -202,11 +202,11 @@ int main(int argc, char** argv) { // Write result to file Values result = isam.calculateEstimate(); ofstream os2("rangeResultLM.txt"); - for(const Values::ConstFiltered::KeyValuePair& it: result.filter()) + for(const auto it: result.filter()) os2 << it.key << "\t" << it.value.x() << "\t" << it.value.y() << "\t1" << endl; ofstream os("rangeResult.txt"); - for(const Values::ConstFiltered::KeyValuePair& it: result.filter()) + for(const auto it: result.filter()) os << it.key << "\t" << it.value.x() << "\t" << it.value.y() << "\t" << it.value.theta() << endl; exit(0); diff --git a/gtsam_unstable/nonlinear/BatchFixedLagSmoother.cpp b/gtsam_unstable/nonlinear/BatchFixedLagSmoother.cpp index 777e4b2d3f..fd18e7c6dc 100644 --- a/gtsam_unstable/nonlinear/BatchFixedLagSmoother.cpp +++ b/gtsam_unstable/nonlinear/BatchFixedLagSmoother.cpp @@ -59,7 +59,7 @@ FixedLagSmoother::Result BatchFixedLagSmoother::update( // Add the new variables to theta theta_.insert(newTheta); // Add new variables to the end of the ordering - for (const auto& key_value : newTheta) { + for (const auto key_value : newTheta) { ordering_.push_back(key_value.key); } // Augment Delta @@ -267,7 +267,7 @@ FixedLagSmoother::Result BatchFixedLagSmoother::optimize() { // Put the linearization points and deltas back for specific variables if (enforceConsistency_ && (linearKeys_.size() > 0)) { theta_.update(linearKeys_); - for(const auto& key_value: linearKeys_) { + for(const auto key_value: linearKeys_) { delta_.at(key_value.key) = newDelta.at(key_value.key); } } diff --git a/gtsam_unstable/nonlinear/ConcurrentBatchFilter.cpp b/gtsam_unstable/nonlinear/ConcurrentBatchFilter.cpp index 758bcfe488..83d0ab719f 100644 --- a/gtsam_unstable/nonlinear/ConcurrentBatchFilter.cpp +++ b/gtsam_unstable/nonlinear/ConcurrentBatchFilter.cpp @@ -139,7 +139,7 @@ ConcurrentBatchFilter::Result ConcurrentBatchFilter::update(const NonlinearFacto // Add the new variables to theta theta_.insert(newTheta); // Add new variables to the end of the ordering - for(const Values::ConstKeyValuePair& key_value: newTheta) { + for(const auto key_value: newTheta) { ordering_.push_back(key_value.key); } // Augment Delta @@ -222,7 +222,7 @@ void ConcurrentBatchFilter::synchronize(const NonlinearFactorGraph& smootherSumm // Find the set of new separator keys KeySet newSeparatorKeys; - for(const Values::ConstKeyValuePair& key_value: separatorValues_) { + for(const auto key_value: separatorValues_) { newSeparatorKeys.insert(key_value.key); } @@ -236,7 +236,7 @@ void ConcurrentBatchFilter::synchronize(const NonlinearFactorGraph& smootherSumm graph.push_back(smootherShortcut_); Values values; values.insert(smootherSummarizationValues); - for(const Values::ConstKeyValuePair& key_value: separatorValues_) { + for(const auto key_value: separatorValues_) { if(!values.exists(key_value.key)) { values.insert(key_value.key, key_value.value); } @@ -471,7 +471,7 @@ void ConcurrentBatchFilter::optimize(const NonlinearFactorGraph& factors, Values // Put the linearization points and deltas back for specific variables if(linearValues.size() > 0) { theta.update(linearValues); - for(const Values::ConstKeyValuePair& key_value: linearValues) { + for(const auto key_value: linearValues) { delta.at(key_value.key) = newDelta.at(key_value.key); } } @@ -574,7 +574,7 @@ void ConcurrentBatchFilter::moveSeparator(const FastList& keysToMove) { // Calculate the set of new separator keys: AffectedKeys + PreviousSeparatorKeys - KeysToMove KeySet newSeparatorKeys = removedFactors.keys(); - for(const Values::ConstKeyValuePair& key_value: separatorValues_) { + for(const auto key_value: separatorValues_) { newSeparatorKeys.insert(key_value.key); } for(Key key: keysToMove) { diff --git a/gtsam_unstable/nonlinear/ConcurrentBatchSmoother.cpp b/gtsam_unstable/nonlinear/ConcurrentBatchSmoother.cpp index 600baa9f0f..75d491bde7 100644 --- a/gtsam_unstable/nonlinear/ConcurrentBatchSmoother.cpp +++ b/gtsam_unstable/nonlinear/ConcurrentBatchSmoother.cpp @@ -61,7 +61,7 @@ ConcurrentBatchSmoother::Result ConcurrentBatchSmoother::update(const NonlinearF theta_.insert(newTheta); // Add new variables to the end of the ordering - for(const Values::ConstKeyValuePair& key_value: newTheta) { + for(const auto key_value: newTheta) { ordering_.push_back(key_value.key); } @@ -135,7 +135,7 @@ void ConcurrentBatchSmoother::synchronize(const NonlinearFactorGraph& smootherFa removeFactors(filterSummarizationSlots_); // Insert new linpoints into the values, augment the ordering, and store new dims to augment delta - for(const Values::ConstKeyValuePair& key_value: smootherValues) { + for(const auto key_value: smootherValues) { std::pair iter_inserted = theta_.tryInsert(key_value.key, key_value.value); if(iter_inserted.second) { // If the insert succeeded @@ -146,7 +146,7 @@ void ConcurrentBatchSmoother::synchronize(const NonlinearFactorGraph& smootherFa iter_inserted.first->value = key_value.value; } } - for(const Values::ConstKeyValuePair& key_value: separatorValues) { + for(const auto key_value: separatorValues) { std::pair iter_inserted = theta_.tryInsert(key_value.key, key_value.value); if(iter_inserted.second) { // If the insert succeeded @@ -322,7 +322,7 @@ ConcurrentBatchSmoother::Result ConcurrentBatchSmoother::optimize() { // Put the linearization points and deltas back for specific variables if(separatorValues_.size() > 0) { theta_.update(separatorValues_); - for(const Values::ConstKeyValuePair& key_value: separatorValues_) { + for(const auto key_value: separatorValues_) { delta_.at(key_value.key) = newDelta.at(key_value.key); } } @@ -372,7 +372,7 @@ void ConcurrentBatchSmoother::updateSmootherSummarization() { // Get the set of separator keys gtsam::KeySet separatorKeys; - for(const Values::ConstKeyValuePair& key_value: separatorValues_) { + for(const auto key_value: separatorValues_) { separatorKeys.insert(key_value.key); } diff --git a/gtsam_unstable/nonlinear/ConcurrentIncrementalFilter.cpp b/gtsam_unstable/nonlinear/ConcurrentIncrementalFilter.cpp index b70b9efc2c..b9cf66a976 100644 --- a/gtsam_unstable/nonlinear/ConcurrentIncrementalFilter.cpp +++ b/gtsam_unstable/nonlinear/ConcurrentIncrementalFilter.cpp @@ -69,13 +69,13 @@ ConcurrentIncrementalFilter::Result ConcurrentIncrementalFilter::update(const No int group = 1; // Set all existing variables to Group1 if(isam2_.getLinearizationPoint().size() > 0) { - for(const Values::ConstKeyValuePair& key_value: isam2_.getLinearizationPoint()) { + for(const auto key_value: isam2_.getLinearizationPoint()) { orderingConstraints->operator[](key_value.key) = group; } ++group; } // Assign new variables to the root - for(const Values::ConstKeyValuePair& key_value: newTheta) { + for(const auto key_value: newTheta) { orderingConstraints->operator[](key_value.key) = group; } // Set marginalizable variables to Group0 @@ -201,7 +201,7 @@ void ConcurrentIncrementalFilter::synchronize(const NonlinearFactorGraph& smooth // Force iSAM2 not to relinearize anything during this iteration FastList noRelinKeys; - for(const Values::ConstKeyValuePair& key_value: isam2_.getLinearizationPoint()) { + for(const auto key_value: isam2_.getLinearizationPoint()) { noRelinKeys.push_back(key_value.key); } diff --git a/gtsam_unstable/nonlinear/ConcurrentIncrementalSmoother.cpp b/gtsam_unstable/nonlinear/ConcurrentIncrementalSmoother.cpp index 714d7c8d0a..3886d0e422 100644 --- a/gtsam_unstable/nonlinear/ConcurrentIncrementalSmoother.cpp +++ b/gtsam_unstable/nonlinear/ConcurrentIncrementalSmoother.cpp @@ -66,7 +66,7 @@ ConcurrentIncrementalSmoother::Result ConcurrentIncrementalSmoother::update(cons // Also, mark the separator keys as fixed linearization points FastMap constrainedKeys; FastList noRelinKeys; - for(const Values::ConstKeyValuePair& key_value: separatorValues_) { + for(const auto key_value: separatorValues_) { constrainedKeys[key_value.key] = 1; noRelinKeys.push_back(key_value.key); } @@ -82,12 +82,12 @@ ConcurrentIncrementalSmoother::Result ConcurrentIncrementalSmoother::update(cons Values values(newTheta); // Unfortunately, we must be careful here, as some of the smoother values // and/or separator values may have been added previously - for(const Values::ConstKeyValuePair& key_value: smootherValues_) { + for(const auto key_value: smootherValues_) { if(!isam2_.getLinearizationPoint().exists(key_value.key)) { values.insert(key_value.key, smootherValues_.at(key_value.key)); } } - for(const Values::ConstKeyValuePair& key_value: separatorValues_) { + for(const auto key_value: separatorValues_) { if(!isam2_.getLinearizationPoint().exists(key_value.key)) { values.insert(key_value.key, separatorValues_.at(key_value.key)); } @@ -246,7 +246,7 @@ void ConcurrentIncrementalSmoother::updateSmootherSummarization() { // Get the set of separator keys KeySet separatorKeys; - for(const Values::ConstKeyValuePair& key_value: separatorValues_) { + for(const auto key_value: separatorValues_) { separatorKeys.insert(key_value.key); } diff --git a/gtsam_unstable/nonlinear/tests/testConcurrentBatchFilter.cpp b/gtsam_unstable/nonlinear/tests/testConcurrentBatchFilter.cpp index 1f19c0e8a5..61db051673 100644 --- a/gtsam_unstable/nonlinear/tests/testConcurrentBatchFilter.cpp +++ b/gtsam_unstable/nonlinear/tests/testConcurrentBatchFilter.cpp @@ -64,7 +64,7 @@ NonlinearFactorGraph CalculateMarginals(const NonlinearFactorGraph& factorGraph, std::set KeysToKeep; - for(const Values::ConstKeyValuePair& key_value: linPoint) { // we cycle over all the keys of factorGraph + for(const auto key_value: linPoint) { // we cycle over all the keys of factorGraph KeysToKeep.insert(key_value.key); } // so far we are keeping all keys, but we want to delete the ones that we are going to marginalize for(Key key: keysToMarginalize) { diff --git a/gtsam_unstable/nonlinear/tests/testConcurrentBatchSmoother.cpp b/gtsam_unstable/nonlinear/tests/testConcurrentBatchSmoother.cpp index ae9a3f827e..b5fb614285 100644 --- a/gtsam_unstable/nonlinear/tests/testConcurrentBatchSmoother.cpp +++ b/gtsam_unstable/nonlinear/tests/testConcurrentBatchSmoother.cpp @@ -560,7 +560,7 @@ TEST( ConcurrentBatchSmoother, synchronize_3 ) GaussianFactorGraph::shared_ptr linearFactors = allFactors.linearize(allValues); KeySet eliminateKeys = linearFactors->keys(); - for(const Values::ConstKeyValuePair& key_value: filterSeparatorValues) { + for(const auto key_value: filterSeparatorValues) { eliminateKeys.erase(key_value.key); } KeyVector variables(eliminateKeys.begin(), eliminateKeys.end()); diff --git a/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalFilter.cpp b/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalFilter.cpp index c5dba1a694..08d71a4202 100644 --- a/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalFilter.cpp +++ b/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalFilter.cpp @@ -80,7 +80,7 @@ NonlinearFactorGraph CalculateMarginals(const NonlinearFactorGraph& factorGraph, std::set KeysToKeep; - for(const Values::ConstKeyValuePair& key_value: linPoint) { // we cycle over all the keys of factorGraph + for(const auto key_value: linPoint) { // we cycle over all the keys of factorGraph KeysToKeep.insert(key_value.key); } // so far we are keeping all keys, but we want to delete the ones that we are going to marginalize for(Key key: keysToMarginalize) { diff --git a/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherDL.cpp b/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherDL.cpp index 5de1150132..ccb5a104e3 100644 --- a/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherDL.cpp +++ b/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherDL.cpp @@ -512,7 +512,7 @@ TEST( ConcurrentIncrementalSmootherDL, synchronize_2 ) // Values expectedLinearizationPoint = BatchOptimize(allFactors, allValues, 1); Values expectedLinearizationPoint = filterSeparatorValues; Values actualLinearizationPoint; - for(const Values::ConstKeyValuePair& key_value: filterSeparatorValues) { + for(const auto key_value: filterSeparatorValues) { actualLinearizationPoint.insert(key_value.key, smoother.getLinearizationPoint().at(key_value.key)); } CHECK(assert_equal(expectedLinearizationPoint, actualLinearizationPoint, 1e-6)); @@ -580,7 +580,7 @@ TEST( ConcurrentIncrementalSmootherDL, synchronize_3 ) // GaussianBayesNet::shared_ptr GBNsptr = GSS.eliminate(); KeySet allkeys = LinFactorGraph->keys(); - for(const Values::ConstKeyValuePair& key_value: filterSeparatorValues) { + for(const auto key_value: filterSeparatorValues) { allkeys.erase(key_value.key); } KeyVector variables(allkeys.begin(), allkeys.end()); diff --git a/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherGN.cpp b/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherGN.cpp index ec78ee6e2c..53c3d16103 100644 --- a/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherGN.cpp +++ b/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherGN.cpp @@ -513,7 +513,7 @@ TEST( ConcurrentIncrementalSmootherGN, synchronize_2 ) // Values expectedLinearizationPoint = BatchOptimize(allFactors, allValues, 1); Values expectedLinearizationPoint = filterSeparatorValues; Values actualLinearizationPoint; - for(const Values::ConstKeyValuePair& key_value: filterSeparatorValues) { + for(const auto key_value: filterSeparatorValues) { actualLinearizationPoint.insert(key_value.key, smoother.getLinearizationPoint().at(key_value.key)); } CHECK(assert_equal(expectedLinearizationPoint, actualLinearizationPoint, 1e-6)); @@ -582,7 +582,7 @@ TEST( ConcurrentIncrementalSmootherGN, synchronize_3 ) // GaussianBayesNet::shared_ptr GBNsptr = GSS.eliminate(); KeySet allkeys = LinFactorGraph->keys(); - for(const Values::ConstKeyValuePair& key_value: filterSeparatorValues) + for(const auto key_value: filterSeparatorValues) allkeys.erase(key_value.key); KeyVector variables(allkeys.begin(), allkeys.end()); std::pair result = LinFactorGraph->eliminatePartialSequential(variables, EliminateCholesky);