[opengm] 117/386: bring StructMaxMargin learner to python. needed inlining of all bundle optimizer methods.

Ghislain Vaillant ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:35:21 UTC 2016


This is an automated email from the git hooks/post-receive script.

ghisvail-guest pushed a commit to branch debian/master
in repository opengm.

commit beeee2eeaf070f0a2393b24f8740524993c98120
Author: Carsten Haubold <carstenhaubold at googlemail.com>
Date:   Thu Dec 18 14:39:17 2014 +0100

    bring StructMaxMargin learner to python. needed inlining of all bundle optimizer methods.
---
 fubar/example1.py                                  |  7 ++
 include/opengm/learning/solver/CplexBackend.h      | 28 ++++----
 include/opengm/learning/solver/GurobiBackend.h     | 30 ++++-----
 include/opengm/learning/solver/LinearConstraint.h  | 13 ++--
 include/opengm/learning/solver/LinearConstraints.h |  7 +-
 .../opengm/learning/solver/QuadraticObjective.h    | 19 +++---
 include/opengm/learning/solver/Solution.h          |  4 +-
 include/opengm/learning/struct-max-margin.hxx      | 15 +++--
 .../python/opengm/learning/CMakeLists.txt          | 16 ++---
 src/interfaces/python/opengm/learning/__init__.py  | 24 ++++++-
 src/interfaces/python/opengm/learning/learning.cxx |  7 +-
 .../opengm/learning/pyStructMaxMarginLearner.cxx   | 75 ++++++++++++++++++++++
 12 files changed, 175 insertions(+), 70 deletions(-)

diff --git a/fubar/example1.py b/fubar/example1.py
index d547234..e5e49e7 100644
--- a/fubar/example1.py
+++ b/fubar/example1.py
@@ -28,3 +28,10 @@ learner = learning.gridSearchLearner(dataset=dataset,lowerBounds=lowerBounds, up
 
 learner.learn(infCls=opengm.inference.BeliefPropagation, 
               parameter=opengm.InfParam(damping=0.5))
+
+# for struct max margin learner
+smm_learnerParam = learning.StructMaxMargin_Bundle_HammingLossParameter(1.0, 0.01, 0)
+smm_learner = learning.StructMaxMargin_Bundle_HammingLoss(dataset, smm_learnerParam)
+smm_learner.learn(infCls=opengm.inference.Icm)
+smm_learner2 = learning.structMaxMarginLearner(dataset, 1.0, 0.001, 0)
+smm_learner2.learn(infCls=opengm.inference.BeliefPropagation, parameter=opengm.InfParam(damping=0.5))
\ No newline at end of file
diff --git a/include/opengm/learning/solver/CplexBackend.h b/include/opengm/learning/solver/CplexBackend.h
index 30c7572..d330917 100644
--- a/include/opengm/learning/solver/CplexBackend.h
+++ b/include/opengm/learning/solver/CplexBackend.h
@@ -139,7 +139,7 @@ private:
     ConstraintVector _constraints;
 };
 
-CplexBackend::CplexBackend(const Parameter& parameter) :
+inline CplexBackend::CplexBackend(const Parameter& parameter) :
     _parameter(parameter),
     model_(env_),
     x_(env_),
@@ -150,11 +150,11 @@ CplexBackend::CplexBackend(const Parameter& parameter) :
     std::cout << "constructing cplex solver" << std::endl;
 }
 
-CplexBackend::~CplexBackend() {
+inline CplexBackend::~CplexBackend() {
     std::cout << "destructing cplex solver..." << std::endl;
 }
 
-void
+inline void
 CplexBackend::initialize(
         unsigned int numVariables,
         VariableType variableType) {
@@ -162,7 +162,7 @@ CplexBackend::initialize(
     initialize(numVariables, variableType, std::map<unsigned int, VariableType>());
 }
 
-void
+inline void
 CplexBackend::initialize(
         unsigned int                                numVariables,
         VariableType                                defaultVariableType,
@@ -199,13 +199,13 @@ CplexBackend::initialize(
     std::cout << "creating " << _numVariables << " ceofficients" << std::endl;
 }
 
-void
+inline void
 CplexBackend::setObjective(const LinearObjective& objective) {
 
     setObjective((QuadraticObjective)objective);
 }
 
-void
+inline void
 CplexBackend::setObjective(const QuadraticObjective& objective) {
 
     try {
@@ -247,7 +247,7 @@ CplexBackend::setObjective(const QuadraticObjective& objective) {
     }
 }
 
-void
+inline void
 CplexBackend::setConstraints(const LinearConstraints& constraints) {
 
     // remove previous constraints
@@ -277,7 +277,7 @@ CplexBackend::setConstraints(const LinearConstraints& constraints) {
     }
 }
 
-void
+inline void
 CplexBackend::addConstraint(const LinearConstraint& constraint) {
 
     try {
@@ -292,7 +292,7 @@ CplexBackend::addConstraint(const LinearConstraint& constraint) {
     }
 }
 
-IloRange
+inline IloRange
 CplexBackend::createConstraint(const LinearConstraint& constraint) {
     // create the lhs expression
     IloExpr linearExpr(env_);
@@ -315,7 +315,7 @@ CplexBackend::createConstraint(const LinearConstraint& constraint) {
     }
 }
 
-bool
+inline bool
 CplexBackend::solve(Solution& x, double& value, std::string& msg) {
 
     try {
@@ -362,12 +362,12 @@ CplexBackend::solve(Solution& x, double& value, std::string& msg) {
     return true;
 }
 
-void
+inline void
 CplexBackend::setMIPGap(double gap) {
      cplex_.setParam(IloCplex::EpGap, gap);
 }
 
-void
+inline void
 CplexBackend::setMIPFocus(unsigned int focus) {
     /*
      * GUROBI and CPLEX have the same meaning for the values of the MIPFocus and MIPEmphasis parameter:
@@ -399,12 +399,12 @@ CplexBackend::setMIPFocus(unsigned int focus) {
     cplex_.setParam(IloCplex::MIPEmphasis, focus);
 }
 
-void
+inline void
 CplexBackend::setNumThreads(unsigned int numThreads) {
     cplex_.setParam(IloCplex::Threads, numThreads);
 }
 
-void
+inline void
 CplexBackend::setVerbose(bool verbose) {
 
     // setup GRB environment
diff --git a/include/opengm/learning/solver/GurobiBackend.h b/include/opengm/learning/solver/GurobiBackend.h
index 5c5fa71..2638063 100644
--- a/include/opengm/learning/solver/GurobiBackend.h
+++ b/include/opengm/learning/solver/GurobiBackend.h
@@ -144,13 +144,13 @@ private:
 	double _scale;
 };
 
-GurobiBackend::GurobiBackend(const Parameter& parameter) :
+inline GurobiBackend::GurobiBackend(const Parameter& parameter) :
 	_parameter(parameter),
 	_variables(0),
 	_model(_env) {
 }
 
-GurobiBackend::~GurobiBackend() {
+inline GurobiBackend::~GurobiBackend() {
 
 	std::cout << "destructing gurobi solver..." << std::endl;
 
@@ -158,7 +158,7 @@ GurobiBackend::~GurobiBackend() {
 		delete[] _variables;
 }
 
-void
+inline void
 GurobiBackend::initialize(
 		unsigned int numVariables,
 		VariableType variableType) {
@@ -166,7 +166,7 @@ GurobiBackend::initialize(
 	initialize(numVariables, variableType, std::map<unsigned int, VariableType>());
 }
 
-void
+inline void
 GurobiBackend::initialize(
 		unsigned int                                numVariables,
 		VariableType                                defaultVariableType,
@@ -240,13 +240,13 @@ GurobiBackend::initialize(
 	std::cout << "creating " << _numVariables << " ceofficients" << std::endl;
 }
 
-void
+inline void
 GurobiBackend::setObjective(const LinearObjective& objective) {
 
 	setObjective((QuadraticObjective)objective);
 }
 
-void
+inline void
 GurobiBackend::setObjective(const QuadraticObjective& objective) {
 
 	try {
@@ -287,7 +287,7 @@ GurobiBackend::setObjective(const QuadraticObjective& objective) {
 	}
 }
 
-void
+inline void
 GurobiBackend::setConstraints(const LinearConstraints& constraints) {
 
 	// remove previous constraints
@@ -316,7 +316,7 @@ GurobiBackend::setConstraints(const LinearConstraints& constraints) {
 	}
 }
 
-void
+inline void
 GurobiBackend::addConstraint(const LinearConstraint& constraint) {
 
     try {
@@ -331,7 +331,7 @@ GurobiBackend::addConstraint(const LinearConstraint& constraint) {
     }
 }
 
-GRBConstr
+inline GRBConstr
 GurobiBackend::createConstraint(const LinearConstraint& constraint)
 {
     // create the lhs expression
@@ -351,7 +351,7 @@ GurobiBackend::createConstraint(const LinearConstraint& constraint)
                 constraint.getValue());
 }
 
-bool
+inline bool
 GurobiBackend::solve(Solution& x, double& value, std::string& msg) {
 
 	try {
@@ -389,25 +389,25 @@ GurobiBackend::solve(Solution& x, double& value, std::string& msg) {
 	return true;
 }
 
-void
+inline void
 GurobiBackend::setMIPGap(double gap) {
 
 	_model.getEnv().set(GRB_DoubleParam_MIPGap, gap);
 }
 
-void
+inline void
 GurobiBackend::setMIPFocus(unsigned int focus) {
 
 	_model.getEnv().set(GRB_IntParam_MIPFocus, focus);
 }
 
-void
+inline void
 GurobiBackend::setNumThreads(unsigned int numThreads) {
 
 	_model.getEnv().set(GRB_IntParam_Threads, numThreads);
 }
 
-void
+inline void
 GurobiBackend::setVerbose(bool verbose) {
 
 	// setup GRB environment
@@ -417,7 +417,7 @@ GurobiBackend::setVerbose(bool verbose) {
 		_model.getEnv().set(GRB_IntParam_OutputFlag, 0);
 }
 
-void
+inline void
 GurobiBackend::dumpProblem(std::string filename) {
 
 	try {
diff --git a/include/opengm/learning/solver/LinearConstraint.h b/include/opengm/learning/solver/LinearConstraint.h
index 5ffa7f3..bec224c 100644
--- a/include/opengm/learning/solver/LinearConstraint.h
+++ b/include/opengm/learning/solver/LinearConstraint.h
@@ -39,10 +39,11 @@ private:
 	double _value;
 };
 
+inline
 LinearConstraint::LinearConstraint() :
 	_relation(LessEqual) {}
 
-void
+inline void
 LinearConstraint::setCoefficient(unsigned int varNum, double coef) {
 
 	if (coef == 0) {
@@ -57,31 +58,31 @@ LinearConstraint::setCoefficient(unsigned int varNum, double coef) {
 	}
 }
 
-void
+inline void
 LinearConstraint::setRelation(Relation relation) {
 
 	_relation = relation;
 }
 
-void
+inline void
 LinearConstraint::setValue(double value) {
 
 	_value = value;
 }
 
-const std::map<unsigned int, double>&
+inline const std::map<unsigned int, double>&
 LinearConstraint::getCoefficients() const {
 
 	return _coefs;
 }
 
-const Relation&
+inline const Relation&
 LinearConstraint::getRelation() const {
 
 	return _relation;
 }
 
-double
+inline double
 LinearConstraint::getValue() const {
 
 	return _value;
diff --git a/include/opengm/learning/solver/LinearConstraints.h b/include/opengm/learning/solver/LinearConstraints.h
index 3468643..ef2d4f3 100644
--- a/include/opengm/learning/solver/LinearConstraints.h
+++ b/include/opengm/learning/solver/LinearConstraints.h
@@ -73,24 +73,25 @@ private:
 	linear_constraints_type _linearConstraints;
 };
 
+inline
 LinearConstraints::LinearConstraints(size_t size) {
 
 	_linearConstraints.resize(size);
 }
 
-void
+inline void
 LinearConstraints::add(const LinearConstraint& linearConstraint) {
 
 	_linearConstraints.push_back(linearConstraint);
 }
 
-void
+inline void
 LinearConstraints::addAll(const LinearConstraints& linearConstraints) {
 
 	_linearConstraints.insert(_linearConstraints.end(), linearConstraints.begin(), linearConstraints.end());
 }
 
-std::vector<unsigned int>
+inline std::vector<unsigned int>
 LinearConstraints::getConstraints(const std::vector<unsigned int>& variableIds) {
 
 	std::vector<unsigned int> indices;
diff --git a/include/opengm/learning/solver/QuadraticObjective.h b/include/opengm/learning/solver/QuadraticObjective.h
index 5b127a4..f0ffcc7 100644
--- a/include/opengm/learning/solver/QuadraticObjective.h
+++ b/include/opengm/learning/solver/QuadraticObjective.h
@@ -106,6 +106,7 @@ private:
 	std::map<std::pair<unsigned int, unsigned int>, double> _quadraticCoefs;
 };
 
+inline
 QuadraticObjective::QuadraticObjective(unsigned int size) :
 	_sense(Minimize),
 	_constant(0) {
@@ -113,31 +114,31 @@ QuadraticObjective::QuadraticObjective(unsigned int size) :
 	resize(size);
 }
 
-void
+inline void
 QuadraticObjective::setConstant(double constant) {
 
 	_constant = constant;
 }
 
-double
+inline double
 QuadraticObjective::getConstant() const {
 
 	return _constant;
 }
 
-void
+inline void
 QuadraticObjective::setCoefficient(unsigned int varNum, double coef) {
 
 	_coefs[varNum] = coef;
 }
 
-const std::vector<double>&
+inline const std::vector<double>&
 QuadraticObjective::getCoefficients() const {
 
 	return _coefs;
 }
 
-void
+inline void
 QuadraticObjective::setQuadraticCoefficient(unsigned int varNum1, unsigned int varNum2, double coef) {
 
 	if (coef == 0) {
@@ -150,25 +151,25 @@ QuadraticObjective::setQuadraticCoefficient(unsigned int varNum1, unsigned int v
 	}
 }
 
-const std::map<std::pair<unsigned int, unsigned int>, double>&
+inline const std::map<std::pair<unsigned int, unsigned int>, double>&
 QuadraticObjective::getQuadraticCoefficients() const {
 
 	return _quadraticCoefs;
 }
 
-void
+inline void
 QuadraticObjective::setSense(Sense sense) {
 
 	_sense = sense;
 }
 
-Sense
+inline Sense
 QuadraticObjective::getSense() const {
 
 	return _sense;
 }
 
-void
+inline void
 QuadraticObjective::resize(unsigned int size) {
 
 	_coefs.resize(size, 0.0);
diff --git a/include/opengm/learning/solver/Solution.h b/include/opengm/learning/solver/Solution.h
index b2a2f72..8016bda 100644
--- a/include/opengm/learning/solver/Solution.h
+++ b/include/opengm/learning/solver/Solution.h
@@ -32,12 +32,12 @@ private:
 	double _value;
 };
 
-Solution::Solution(unsigned int size) {
+inline Solution::Solution(unsigned int size) {
 
 	resize(size);
 }
 
-void
+inline void
 Solution::resize(unsigned int size) {
 
 	_solution.resize(size);
diff --git a/include/opengm/learning/struct-max-margin.hxx b/include/opengm/learning/struct-max-margin.hxx
index 775732d..04634ed 100644
--- a/include/opengm/learning/struct-max-margin.hxx
+++ b/include/opengm/learning/struct-max-margin.hxx
@@ -19,6 +19,7 @@ public:
 	typedef DS DatasetType;
 	typedef O  OptimizerType;
 
+    typedef typename DatasetType::GMType GMType;
 	typedef typename DatasetType::ValueType       ValueType;
     typedef typename DatasetType::Weights         Weights;
 
@@ -35,8 +36,8 @@ public:
 
 	Parameter& parameter() { return _parameter; }
 
-	template <typename InferenceType>
-	void learn(typename InferenceType::Parameter& parameter);
+    template <typename InferenceType>
+    void learn(const typename InferenceType::Parameter& parameter);
 
     const Weights& getWeights() { return _weights; }
 
@@ -47,7 +48,7 @@ private:
 
 		public:
 
-            Oracle(DatasetType& dataset, typename InferenceType::Parameter& infParam) :
+            Oracle(DatasetType& dataset, const typename InferenceType::Parameter& infParam) :
                 _dataset(dataset),
                 _infParam(infParam)
             {}
@@ -137,7 +138,7 @@ private:
 		private:
 
 			DatasetType& _dataset;
-            typename InferenceType::Parameter& _infParam;
+            const typename InferenceType::Parameter& _infParam;
 	};
 
 	DatasetType& _dataset;
@@ -150,11 +151,11 @@ private:
 };
 
 template <typename DS, typename O>
-template <typename InfereneType>
+template <typename InferenceType>
 void
-StructMaxMargin<DS, O>::learn(typename InfereneType::Parameter& infParams) {
+StructMaxMargin<DS, O>::learn(const typename InferenceType::Parameter& infParams) {
 
-    Oracle<InfereneType> oracle(_dataset, infParams);
+    Oracle<InferenceType> oracle(_dataset, infParams);
 
 	_weights = _dataset.getWeights();
 
diff --git a/src/interfaces/python/opengm/learning/CMakeLists.txt b/src/interfaces/python/opengm/learning/CMakeLists.txt
index 507752a..c657fc0 100644
--- a/src/interfaces/python/opengm/learning/CMakeLists.txt
+++ b/src/interfaces/python/opengm/learning/CMakeLists.txt
@@ -17,22 +17,18 @@ include_directories(
 #--------------------------------------------------------------
 # Add opengmcore library
 #--------------------------------------------------------------
-if(APPLE)
-    add_library(_learning MODULE learning.cxx
+set(PY_OPENGM_CORE_SOURCES
+            learning.cxx
             pyWeights.cxx
             pyDataset.cxx
             pyLoss.cxx
             pyGridSearchLearner.cxx
+            pyStructMaxMarginLearner.cxx)
 
-    )
+if(APPLE)
+    add_library(_learning MODULE ${PY_OPENGM_CORE_SOURCES})
 else()
-    add_library(_learning SHARED  learning.cxx
-            pyWeights.cxx
-            pyDataset.cxx
-            pyLoss.cxx
-            pyGridSearchLearner.cxx
-
-    )
+    add_library(_learning SHARED ${PY_OPENGM_CORE_SOURCES})
 endif(APPLE)
 
 
diff --git a/src/interfaces/python/opengm/learning/__init__.py b/src/interfaces/python/opengm/learning/__init__.py
index 855453b..b9f90f7 100644
--- a/src/interfaces/python/opengm/learning/__init__.py
+++ b/src/interfaces/python/opengm/learning/__init__.py
@@ -11,12 +11,15 @@ DatasetWithGeneralizedHammingLoss.lossType = 'generalized-hamming'
 
 def _extendedLearn(self, infCls, parameter = None):
     if parameter is None:
+        import opengm
         parameter = opengm.InfParam()
     cppParam  =  infCls.get_cpp_parameter(operator='adder',accumulator='minimizer',parameter=parameter)
     self._learn(cppParam)
 
 GridSearch_HammingLoss.learn  =_extendedLearn
 GridSearch_GeneralizedHammingLoss.learn  =_extendedLearn
+StructMaxMargin_Bundle_HammingLoss.learn = _extendedLearn
+StructMaxMargin_Bundle_GeneralizedHammingLoss = _extendedLearn
         
 def createDataset(loss='hamming', numInstances=0):
     
@@ -37,10 +40,10 @@ def gridSearchLearner(dataset, lowerBounds, upperBounds, nTestPoints):
 
     if dataset.__class__.lossType == 'hamming':
         learnerCls = GridSearch_HammingLoss
-        leanerParamCls = GridSearch_HammingLossParameter
+        learnerParamCls = GridSearch_HammingLossParameter
     elif dataset.__class__.lossType == 'generalized-hamming':
         learnerCls = GridSearch_GeneralizedHammingLoss
-        leanerParamCls = GridSearch_GeneralizedHammingLossParameter
+        learnerParamCls = GridSearch_GeneralizedHammingLossParameter
 
     nr = numpy.require 
     sizeT_type = 'uint64'
@@ -48,7 +51,7 @@ def gridSearchLearner(dataset, lowerBounds, upperBounds, nTestPoints):
     if struct.calcsize("P") * 8 == 32:
         sizeT_type = 'uint32'
 
-    param = leanerParamCls(nr(lowerBounds,dtype='float64'), nr(lowerBounds,dtype='float64'), 
+    param = learnerParamCls(nr(lowerBounds,dtype='float64'), nr(lowerBounds,dtype='float64'), 
                            nr(lowerBounds,dtype=sizeT_type))
 
     learner = learnerCls(dataset, param)
@@ -56,6 +59,21 @@ def gridSearchLearner(dataset, lowerBounds, upperBounds, nTestPoints):
 
 
 
+def structMaxMarginLearner(dataset, regularizerWeight=1.0, minGap=1e-5, nSteps=0, optimizer='bundle'):
+    if optimizer != 'bundle':
+        raise RuntimeError("Optimizer type must be 'bundle' for now!")
+
+    if dataset.__class__.lossType == 'hamming':
+        learnerCls = StructMaxMargin_Bundle_HammingLoss
+        learnerParamCls = StructMaxMargin_Bundle_HammingLossParameter
+    elif dataset.__class__.lossType == 'generalized-hamming':
+        learnerCls = StructMaxMargin_Bundle_GeneralizedHammingLoss
+        learnerParamCls = StructMaxMargin_Bundle_GeneralizedHammingLossParameter
+
+    param = learnerParamCls(regularizerWeight, minGap, nSteps)
+    learner = learnerCls(dataset, param)
+    
+    return learner
 
 
 def lPottsFunctions(nFunctions, numberOfLabels, features, weightIds):
diff --git a/src/interfaces/python/opengm/learning/learning.cxx b/src/interfaces/python/opengm/learning/learning.cxx
index 2f2e0a6..bbaa07a 100644
--- a/src/interfaces/python/opengm/learning/learning.cxx
+++ b/src/interfaces/python/opengm/learning/learning.cxx
@@ -9,6 +9,7 @@
 #include <opengm/learning/loss/hammingloss.hxx>
 #include <opengm/learning/loss/generalized-hammingloss.hxx>
 #include <opengm/learning/loss/noloss.hxx>
+#include <opengm/learning/bundle-optimizer.hxx>
 
 namespace bp = boost::python;
 namespace op = opengm::python;
@@ -26,6 +27,9 @@ namespace opengm{
 
     template<class DATASET>
     void export_grid_search_learner(const std::string & clsName);
+
+    template<class DATASET, class OPTIMIZER>
+    void export_struct_max_margin_bundle_learner(const std::string & clsName);
 }
 
 
@@ -53,6 +57,7 @@ BOOST_PYTHON_MODULE_INIT(_learning) {
     opengm::export_grid_search_learner<op::GmAdderHammingLossDataset>("GridSearch_HammingLoss");
     opengm::export_grid_search_learner<op::GmAdderGeneralizedHammingLossDataset>("GridSearch_GeneralizedHammingLoss");
     
-    
+    opengm::export_struct_max_margin_bundle_learner< op::GmAdderHammingLossDataset, ol::BundleOptimizer<op::GmValueType> >("StructMaxMargin_Bundle_HammingLoss");
+    opengm::export_struct_max_margin_bundle_learner< op::GmAdderGeneralizedHammingLossDataset, ol::BundleOptimizer<op::GmValueType> >("StructMaxMargin_Bundle_GeneralizedHammingLoss");
 
 }
diff --git a/src/interfaces/python/opengm/learning/pyStructMaxMarginLearner.cxx b/src/interfaces/python/opengm/learning/pyStructMaxMarginLearner.cxx
new file mode 100644
index 0000000..51dab25
--- /dev/null
+++ b/src/interfaces/python/opengm/learning/pyStructMaxMarginLearner.cxx
@@ -0,0 +1,75 @@
+#include <boost/python.hpp>
+#include <boost/python/module.hpp>
+#include <opengm/python/opengmpython.hxx>
+#include <opengm/python/converter.hxx>
+#include <opengm/python/numpyview.hxx>
+
+#include <opengm/inference/icm.hxx>
+#include <opengm/learning/struct-max-margin.hxx>
+
+#include <opengm/inference/icm.hxx>
+#include <opengm/learning/gridsearch-learning.hxx>
+#include <opengm/inference/messagepassing/messagepassing.hxx>
+
+namespace bp = boost::python;
+namespace op = opengm::python;
+namespace ol = opengm::learning;
+
+namespace opengm{
+
+
+    template<class PARAM>
+    PARAM * pyStructMaxMarginBundleParamConstructor(
+        double regularizerWeight,
+        op::GmValueType minGap,
+        unsigned int steps
+    ){
+        PARAM * p  = new PARAM();
+        p->optimizerParameter_.lambda  = regularizerWeight;
+        p->optimizerParameter_.min_gap = minGap;
+        p->optimizerParameter_.steps   = steps;
+        return p;
+    }
+
+    template<class LEARNER, class INF>
+    void pyLearnWithInf(LEARNER & learner, const typename INF::Parameter & param){
+        learner. template learn<INF>(param);
+    }
+
+    template<class DATASET, class OPTIMIZER>
+    void export_struct_max_margin_bundle_learner(const std::string & clsName){
+        typedef learning::StructMaxMargin<DATASET, OPTIMIZER> PyLearner;
+        typedef typename PyLearner::Parameter PyLearnerParam;
+        typedef typename PyLearner::GMType GMType;
+        typedef typename PyLearner::DatasetType DatasetType;
+
+        const std::string paramClsName = clsName + std::string("Parameter");
+
+
+        bp::class_<PyLearnerParam>(paramClsName.c_str(), bp::init<>())
+            .def("__init__", make_constructor(&pyStructMaxMarginBundleParamConstructor<PyLearnerParam> ,boost::python::default_call_policies()))
+        ;
+
+        // SOME INFERENCE METHODS
+        typedef typename  PyLearner::GMType GMType;
+        typedef opengm::Minimizer ACC;
+
+        typedef opengm::ICM<GMType, ACC> IcmInf;
+        typedef opengm::BeliefPropagationUpdateRules<GMType, ACC> UpdateRulesType;
+        typedef opengm::MessagePassing<GMType, ACC, UpdateRulesType, opengm::MaxDistance> BpInf;
+
+        bp::class_<PyLearner>( clsName.c_str(), bp::init<DatasetType &, const PyLearnerParam &>() )
+            .def("_learn",&pyLearnWithInf<PyLearner, IcmInf>)
+            .def("_learn",&pyLearnWithInf<PyLearner, BpInf>)
+        ;
+    }
+
+    template void
+    export_struct_max_margin_bundle_learner<op::GmAdderHammingLossDataset, ol::BundleOptimizer<op::GmValueType> > (const std::string& className);
+
+    template void
+    export_struct_max_margin_bundle_learner<op::GmAdderGeneralizedHammingLossDataset, ol::BundleOptimizer<op::GmValueType> > (const std::string& className);
+}
+
+
+

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git



More information about the debian-science-commits mailing list