[opengm] 113/386: Remove Loss from StructMaxMargin template parameters. Update parameter object, and actually pass it to the optimizer. Use infParameter in Oracle. Use Gurobi OR Cplex as INF in test.

Ghislain Vaillant ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:35:15 UTC 2016


This is an automated email from the git hooks/post-receive script.

ghisvail-guest pushed a commit to branch debian/master
in repository opengm.

commit 45d07c34a7f56efb7dfb622b7b95b868670cd24d
Author: Carsten Haubold <carstenhaubold at googlemail.com>
Date:   Thu Dec 18 11:50:37 2014 +0100

    Remove Loss from StructMaxMargin template parameters. Update parameter object, and actually pass it to the optimizer. Use infParameter in Oracle. Use Gurobi OR Cplex as INF in test.
---
 include/opengm/learning/struct-max-margin.hxx | 35 ++++++++++++---------------
 src/unittest/learning/test_learning.cxx       | 25 +++++++++++++------
 2 files changed, 33 insertions(+), 27 deletions(-)

diff --git a/include/opengm/learning/struct-max-margin.hxx b/include/opengm/learning/struct-max-margin.hxx
index 405489e..775732d 100644
--- a/include/opengm/learning/struct-max-margin.hxx
+++ b/include/opengm/learning/struct-max-margin.hxx
@@ -11,34 +11,27 @@ namespace learning {
 
 template <
 		typename DS,
-		typename LG,
 		typename O = BundleOptimizer<typename DS::ValueType> >
 class StructMaxMargin {
 
 public:
 
 	typedef DS DatasetType;
-	typedef LG LossGeneratorType;
 	typedef O  OptimizerType;
 
 	typedef typename DatasetType::ValueType       ValueType;
     typedef typename DatasetType::Weights         Weights;
 
 	struct Parameter {
-
-		Parameter() :
-			regularizerWeight(1.0) {}
-
-		typedef typename OptimizerType::Parameter OptimizerParameter;
-
-		ValueType regularizerWeight;
-
-		OptimizerParameter optimizerParameter;
+        typedef typename OptimizerType::Parameter OptimizerParameter;
+        OptimizerParameter optimizerParameter_;
 	};
 
 	StructMaxMargin(DatasetType& dataset, const Parameter& parameter = Parameter()) :
 		_dataset(dataset),
-		_parameter(parameter) {}
+        _parameter(parameter),
+        _optimizer(parameter.optimizerParameter_)
+    {}
 
 	Parameter& parameter() { return _parameter; }
 
@@ -54,8 +47,10 @@ private:
 
 		public:
 
-			Oracle(DatasetType& dataset) :
-				_dataset(dataset) {}
+            Oracle(DatasetType& dataset, typename InferenceType::Parameter& infParam) :
+                _dataset(dataset),
+                _infParam(infParam)
+            {}
 
 			/**
 			 * Evaluate the loss-augmented energy value of the dataset and its 
@@ -118,8 +113,7 @@ private:
 
 					// find the minimizer y* of F(y,w)
 					ConfigurationType mostViolated;
-					typename InferenceType::Parameter p;
-					InferenceType inference(gml, p);
+                    InferenceType inference(gml, _infParam);
 					inference.infer();
 					inference.arg(mostViolated);
 
@@ -143,6 +137,7 @@ private:
 		private:
 
 			DatasetType& _dataset;
+            typename InferenceType::Parameter& _infParam;
 	};
 
 	DatasetType& _dataset;
@@ -154,12 +149,12 @@ private:
     Weights _weights;
 };
 
-template <typename DS, typename LG, typename O>
+template <typename DS, typename O>
 template <typename InfereneType>
 void
-StructMaxMargin<DS, LG, O>::learn(typename InfereneType::Parameter& infParams) {
+StructMaxMargin<DS, O>::learn(typename InfereneType::Parameter& infParams) {
 
-	Oracle<InfereneType> oracle(_dataset);
+    Oracle<InfereneType> oracle(_dataset, infParams);
 
 	_weights = _dataset.getWeights();
 
@@ -173,7 +168,7 @@ StructMaxMargin<DS, LG, O>::learn(typename InfereneType::Parameter& infParams) {
 		std::cout << "optimization converged to requested precision" << std::endl;
 
 	if (result == ReachedSteps)
-		std::cout << "optimization stopped after " << parameter().optimizerParameter.steps << " iterations" << std::endl;
+        std::cout << "optimization stopped after " << parameter().optimizerParameter_.steps << " iterations" << std::endl;
 }
 
 } // namespace learning
diff --git a/src/unittest/learning/test_learning.cxx b/src/unittest/learning/test_learning.cxx
index 4a5153b..9a52ac2 100644
--- a/src/unittest/learning/test_learning.cxx
+++ b/src/unittest/learning/test_learning.cxx
@@ -6,9 +6,15 @@
 #include <opengm/operations/adder.hxx>
 #include <opengm/operations/minimizer.hxx>
 #include <opengm/inference/external/trws.hxx>
-#include <opengm/inference/lpgurobi.hxx>
 #include <opengm/utilities/metaprogramming.hxx>
 
+#if WITH_GUROBI
+#include <opengm/inference/lpgurobi.hxx>
+#else
+#include <opengm/inference/lpcplex.hxx>
+#endif
+
+
 #include <opengm/functions/learnable/lpotts.hxx>
 #include <opengm/functions/learnable/sum_of_experts.hxx>
 #include <opengm/learning/struct-max-margin.hxx>
@@ -25,7 +31,12 @@ typedef size_t LabelType;
 typedef opengm::meta::TypeListGenerator<opengm::ExplicitFunction<ValueType,IndexType,LabelType>, opengm::functions::learnable::LPotts<ValueType,IndexType,LabelType>, opengm::functions::learnable::SumOfExperts<ValueType,IndexType,LabelType> >::type FunctionListType;
 typedef opengm::GraphicalModel<ValueType,opengm::Adder, FunctionListType, opengm::DiscreteSpace<IndexType,LabelType> > GM; 
 typedef opengm::learning::HammingLoss     LOSS;
+
+#if WITH_GUROBI
 typedef opengm::LPGurobi<GM,opengm::Minimizer> INF;
+#else
+typedef opengm::LPCplex<GM,opengm::Minimizer> INF;
+#endif
 typedef opengm::datasets::TestDataset1<GM,LOSS> DS1;
 typedef opengm::datasets::TestDataset2<GM,LOSS> DS2;
 typedef opengm::datasets::TestDatasetSimple<GM,LOSS> DSS;
@@ -41,8 +52,8 @@ int main() {
 	  std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
 	  
 	  
-	  opengm::learning::StructMaxMargin<DSS,LOSS>::Parameter para;
-	  opengm::learning::StructMaxMargin<DSS,LOSS> learner(dataset,para);
+      opengm::learning::StructMaxMargin<DSS>::Parameter para;
+      opengm::learning::StructMaxMargin<DSS> learner(dataset,para);
 	  
 	  
 	  INF::Parameter infPara;
@@ -60,8 +71,8 @@ int main() {
 	  std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
 	  
 	  
-	  opengm::learning::StructMaxMargin<DS1,LOSS>::Parameter para;
-	  opengm::learning::StructMaxMargin<DS1,LOSS> learner(dataset,para);
+      opengm::learning::StructMaxMargin<DS1>::Parameter para;
+      opengm::learning::StructMaxMargin<DS1> learner(dataset,para);
 	  
 	  
 	  INF::Parameter infPara;
@@ -80,8 +91,8 @@ int main() {
 	  std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
 	  
 	  
-	  opengm::learning::StructMaxMargin<DS2,LOSS>::Parameter para;
-	  opengm::learning::StructMaxMargin<DS2,LOSS> learner(dataset,para);
+      opengm::learning::StructMaxMargin<DS2>::Parameter para;
+      opengm::learning::StructMaxMargin<DS2> learner(dataset,para);
 	  
 	  
 	  INF::Parameter infPara;

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git



More information about the debian-science-commits mailing list