[opengm] 180/386: Update structure of maxLikelihood. Export to python, run from real example 2

Ghislain Vaillant ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:37:36 UTC 2016


This is an automated email from the git hooks/post-receive script.

ghisvail-guest pushed a commit to branch debian/master
in repository opengm.

commit fe358c119545672f69cf664566b01c4c130a2119
Author: Carsten Haubold <carstenhaubold at googlemail.com>
Date:   Fri Dec 19 16:20:23 2014 +0100

    Update structure of maxLikelihood. Export to python, run from real example 2
---
 fubar/real_example_2.py                            | 21 ++++----
 .../learning/maximum-likelihood-learning.hxx       | 46 +++++++-----------
 .../python/opengm/learning/CMakeLists.txt          |  1 +
 src/interfaces/python/opengm/learning/__init__.py  | 16 +++++++
 src/interfaces/python/opengm/learning/helper.hxx   |  2 +-
 src/interfaces/python/opengm/learning/learning.cxx |  6 +++
 .../opengm/learning/pyStructMaxMarginLearner.cxx   |  2 +-
 .../opengm/learning/pymaxlikelihoodlearner.cxx     | 56 ++++++++++++++++++++++
 .../learning/test_maximum_likelihood_learner.cxx   |  7 ++-
 9 files changed, 112 insertions(+), 45 deletions(-)

diff --git a/fubar/real_example_2.py b/fubar/real_example_2.py
index d0b4230..6b991f5 100644
--- a/fubar/real_example_2.py
+++ b/fubar/real_example_2.py
@@ -5,9 +5,9 @@ import vigra
 import pylab as plt
 import pylab
 
-nModels = 3
+nModels = 1
 nLables = 2 
-shape = [20, 20]
+shape = [10, 10]
 numVar = shape[0]*shape[1]
 
 sSmooth = [1.0, 1.5]
@@ -42,13 +42,13 @@ def makeFeatures(gt):
     #plt.colorbar()
     #vigra.show()
 
-    f = pylab.figure()
-    for n, a in enumerate([gt, randGt]):
-        f.add_subplot(2, 1, n)  # this line outputs images on top of each other
-        # f.add_subplot(1, 2, n)  # this line outputs images side-by-side
-        pylab.imshow(a,cmap='gray')
-    pylab.title('Double image')
-    pylab.show()
+    # f = pylab.figure()
+    # for n, a in enumerate([gt, randGt]):
+    #     f.add_subplot(2, 1, n)  # this line outputs images on top of each other
+    #     # f.add_subplot(1, 2, n)  # this line outputs images side-by-side
+    #     pylab.imshow(a,cmap='gray')
+    # pylab.title('Double image')
+    # pylab.show()
 
 
 
@@ -128,8 +128,9 @@ lowerBounds = numpy.ones(nWeights)*-2.0
 upperBounds = numpy.ones(nWeights)*2.0
 nTestPoints  =numpy.ones(nWeights).astype('uint64')*5
 
-learner = learning.gridSearchLearner(dataset=dataset,lowerBounds=lowerBounds, upperBounds=upperBounds,nTestPoints=nTestPoints)
+# learner = learning.gridSearchLearner(dataset=dataset,lowerBounds=lowerBounds, upperBounds=upperBounds,nTestPoints=nTestPoints)
 #learner = learning.structMaxMarginLearner(dataset, 1.0, 0.001, 0)
+learner = learning.maxLikelihoodLearner(dataset)
 
 learner.learn(infCls=opengm.inference.TrwsExternal, 
               parameter=opengm.InfParam())
diff --git a/include/opengm/learning/maximum-likelihood-learning.hxx b/include/opengm/learning/maximum-likelihood-learning.hxx
index 537599c..be6dfc4 100644
--- a/include/opengm/learning/maximum-likelihood-learning.hxx
+++ b/include/opengm/learning/maximum-likelihood-learning.hxx
@@ -3,13 +3,12 @@
 #define OPENGM_MAXIMUM_LIKELIHOOD_LEARNER_HXX
 
 #include <vector>
-#include <opengm/functions/learnablefunction.hxx>
 #include <fstream>
 #include <opengm/inference/messagepassing/messagepassing.hxx>
 #include <opengm/functions/explicit_function.hxx>
 #include <opengm/functions/view_convert_function.hxx>
 #include <opengm/functions/learnable/lpotts.hxx>
-#include <opengm/functions/learnable/sum_of_experts.hxx>
+#include <opengm/functions/learnable/lsum_of_experts.hxx>
 #include <opengm/graphicalmodel/graphicalmodel.hxx>
 #include <opengm/inference/icm.hxx>
 
@@ -19,7 +18,7 @@ typedef size_t LabelType;
 typedef opengm::meta::TypeListGenerator<
     opengm::ExplicitFunction<ValueType,IndexType,LabelType>,
     opengm::functions::learnable::LPotts<ValueType,IndexType,LabelType>,
-    opengm::functions::learnable::SumOfExperts<ValueType,IndexType,LabelType>
+    opengm::functions::learnable::LSumOfExperts<ValueType,IndexType,LabelType>
 >::type FunctionListType;
 
 typedef opengm::GraphicalModel<
@@ -57,56 +56,48 @@ struct WeightGradientFunctor{
 namespace opengm {
 namespace learning {
 
-template<class DATASET, class LOSS>
+template<class DATASET>
 class MaximumLikelihoodLearner
 {
 public:
+    typedef DATASET DatasetType;
     typedef typename DATASET::GMType   GMType;
     typedef typename GMType::ValueType ValueType;
     typedef typename GMType::IndexType IndexType;
     typedef typename GMType::LabelType LabelType;
     typedef typename GMType::FactorType FactorType;
 
-    class Weight{
+    class Parameter{
     public:
-        std::vector<double> weightUpperbound_;
-        std::vector<double> weightLowerbound_;
-        std::vector<IndexType> testingPoints_;
-        Weight(){;}
+        Parameter(){;}
     };
 
 
-    MaximumLikelihoodLearner(DATASET&, Weight& );
+    MaximumLikelihoodLearner(DATASET&, const Parameter & );
 
     template<class INF>
-    void learn(typename INF::Parameter& weight);
+    void learn(const typename INF::Parameter& weight);
 
     const opengm::learning::Weights<ValueType>& getModelWeights(){return modelWeights_;}
-    Weight& getLerningWeights(){return weight_;}
+    Parameter& getLerningWeights(){return param_;}
 
 private:
     DATASET& dataset_;
     opengm::learning::Weights<ValueType> modelWeights_;
-    Weight weight_;
+    Parameter param_;
 };
 
-template<class DATASET, class LOSS>
-MaximumLikelihoodLearner<DATASET, LOSS>::MaximumLikelihoodLearner(DATASET& ds, Weight& w )
-    : dataset_(ds), weight_(w)
+template<class DATASET>
+MaximumLikelihoodLearner<DATASET>::MaximumLikelihoodLearner(DATASET& ds, const Parameter& w )
+    : dataset_(ds), param_(w)
 {
     modelWeights_ = opengm::learning::Weights<ValueType>(ds.getNumberOfWeights());
-    if(weight_.weightUpperbound_.size() != ds.getNumberOfWeights())
-        weight_.weightUpperbound_.resize(ds.getNumberOfWeights(),10.0);
-    if(weight_.weightLowerbound_.size() != ds.getNumberOfWeights())
-        weight_.weightLowerbound_.resize(ds.getNumberOfWeights(),0.0);
-    if(weight_.testingPoints_.size() != ds.getNumberOfWeights())
-        weight_.testingPoints_.resize(ds.getNumberOfWeights(),10);
 }
 
 
-template<class DATASET, class LOSS>
+template<class DATASET>
 template<class INF>
-void MaximumLikelihoodLearner<DATASET, LOSS>::learn(typename INF::Parameter& weight){
+void MaximumLikelihoodLearner<DATASET>::learn(const typename INF::Parameter &weight){
 
     opengm::learning::Weights<ValueType> modelWeight( dataset_.getNumberOfWeights() );
     opengm::learning::Weights<ValueType> bestModelWeight( dataset_.getNumberOfWeights() );
@@ -117,16 +108,13 @@ void MaximumLikelihoodLearner<DATASET, LOSS>::learn(typename INF::Parameter& wei
     for(IndexType p=0; p<dataset_.getNumberOfWeights(); ++p)
         point[p] = ValueType((0));
 
-    //weight_.weightUpperbound_[p]-weight_.weightLowerbound_[p])/2);
-        //point[p] = ValueType(weight_.weightUpperbound_[p]);
-        //point[p] = ValueType(weight_.weightLowerbound_[p]);
 
     // test only
     //point[0]=0.5;
     //point[1]=0.7;
     //point[2]=0.9;
 
-    LOSS lossFunction;
+    typename DATASET::LossType lossFunction;
     bool search=true;
     int count=0;
 
@@ -171,7 +159,7 @@ void MaximumLikelihoodLearner<DATASET, LOSS>::learn(typename INF::Parameter& wei
            inf.infer();
            inf.arg(confs[m]);
            const std::vector<typename INF::LabelType>& gt =  dataset_.getGT(m);
-           loss += lossFunction.loss(confs[m].begin(), confs[m].end(), gt.begin(), gt.end());
+           loss += lossFunction.loss(dataset_.getModel(m), confs[m].begin(), confs[m].end(), gt.begin(), gt.end());
         }
 
         std::cout << " eta = " << eta << "   weights  ";//<< std::endl;
diff --git a/src/interfaces/python/opengm/learning/CMakeLists.txt b/src/interfaces/python/opengm/learning/CMakeLists.txt
index 0ca1ec1..d50feb1 100644
--- a/src/interfaces/python/opengm/learning/CMakeLists.txt
+++ b/src/interfaces/python/opengm/learning/CMakeLists.txt
@@ -23,6 +23,7 @@ set(PY_OPENGM_CORE_SOURCES
             pyDataset.cxx
             pyLoss.cxx
             pyGridSearchLearner.cxx
+            pyMaxLikelihoodLearner.cxx
             pyStructMaxMarginLearner.cxx)
 
 if(APPLE)
diff --git a/src/interfaces/python/opengm/learning/__init__.py b/src/interfaces/python/opengm/learning/__init__.py
index c51d40e..7a7620e 100644
--- a/src/interfaces/python/opengm/learning/__init__.py
+++ b/src/interfaces/python/opengm/learning/__init__.py
@@ -20,6 +20,9 @@ def _extendedLearn(self, infCls, parameter = None):
 GridSearch_HammingLoss.learn  =_extendedLearn
 GridSearch_GeneralizedHammingLoss.learn  =_extendedLearn
 
+MaxLikelihood_HammingLoss.learn  =_extendedLearn
+MaxLikelihood_GeneralizedHammingLoss.learn  =_extendedLearn
+
 if opengmConfig.withCplex or opengmConfig.withGurobi :
     StructMaxMargin_Bundle_HammingLoss.learn = _extendedLearn
     StructMaxMargin_Bundle_GeneralizedHammingLoss.learn = _extendedLearn
@@ -93,6 +96,19 @@ def structMaxMarginLearner(dataset, regularizerWeight=1.0, minEps=1e-5, nSteps=0
     else:
         raise RuntimeError("this learner needs widthCplex or withGurobi")
 
+def maxLikelihoodLearner(dataset):
+    if dataset.__class__.lossType == 'hamming':
+        learnerCls = MaxLikelihood_HammingLoss
+        learnerParamCls = MaxLikelihood_HammingLossParameter
+    elif dataset.__class__.lossType == 'generalized-hamming':
+        learnerCls = MaxLikelihood_GeneralizedHammingLoss
+        learnerParamCls = MaxLikelihood_GeneralizedHammingLossParameter
+
+    param = learnerParamCls()
+    learner = learnerCls(dataset, param)
+        
+    return learner
+
 
 def lPottsFunctions(nFunctions, numberOfLabels, features, weightIds):
 
diff --git a/src/interfaces/python/opengm/learning/helper.hxx b/src/interfaces/python/opengm/learning/helper.hxx
index bb29616..eeabf68 100644
--- a/src/interfaces/python/opengm/learning/helper.hxx
+++ b/src/interfaces/python/opengm/learning/helper.hxx
@@ -61,7 +61,7 @@ public:
 #ifdef WITH_QPBO
        typedef opengm::external::QPBO<GMType>  QpboExternal;
 #endif
-#ifdef WITH_QPBO
+#ifdef WITH_TRWS
        typedef opengm::external::TRWS<GMType>  TrwsExternal;
 #endif
 
diff --git a/src/interfaces/python/opengm/learning/learning.cxx b/src/interfaces/python/opengm/learning/learning.cxx
index 8dbafed..fafc9ce 100644
--- a/src/interfaces/python/opengm/learning/learning.cxx
+++ b/src/interfaces/python/opengm/learning/learning.cxx
@@ -33,6 +33,9 @@ namespace opengm{
 
     template<class DATASET, class OPTIMIZER>
     void export_struct_max_margin_bundle_learner(const std::string & clsName);
+
+    template<class DATASET>
+    void export_max_likelihood_learner(const std::string & clsName);
 }
 
 
@@ -59,6 +62,9 @@ BOOST_PYTHON_MODULE_INIT(_learning) {
 
     opengm::export_grid_search_learner<op::GmAdderHammingLossDataset>("GridSearch_HammingLoss");
     opengm::export_grid_search_learner<op::GmAdderGeneralizedHammingLossDataset>("GridSearch_GeneralizedHammingLoss");
+
+    opengm::export_max_likelihood_learner<op::GmAdderHammingLossDataset>("MaxLikelihood_HammingLoss");
+    opengm::export_max_likelihood_learner<op::GmAdderGeneralizedHammingLossDataset>("MaxLikelihood_GeneralizedHammingLoss");
     
     #if defined(WITH_CPLEX) || defined(WITH_GUROBI)
         opengm::export_struct_max_margin_bundle_learner< op::GmAdderHammingLossDataset, ol::BundleOptimizer<op::GmValueType> >("StructMaxMargin_Bundle_HammingLoss");
diff --git a/src/interfaces/python/opengm/learning/pyStructMaxMarginLearner.cxx b/src/interfaces/python/opengm/learning/pyStructMaxMarginLearner.cxx
index 0c3ebde..271ec1e 100644
--- a/src/interfaces/python/opengm/learning/pyStructMaxMarginLearner.cxx
+++ b/src/interfaces/python/opengm/learning/pyStructMaxMarginLearner.cxx
@@ -9,7 +9,7 @@
 #include <opengm/inference/icm.hxx>
 #include <opengm/learning/struct-max-margin.hxx>
 
-#define DefaultErrorFn DefaultErrorFn_TrwsExternalA 
+#define DefaultErrorFn DefaultErrorFn_TrwsExternal_SMM
 #include "helper.hxx"
 
 namespace bp = boost::python;
diff --git a/src/interfaces/python/opengm/learning/pymaxlikelihoodlearner.cxx b/src/interfaces/python/opengm/learning/pymaxlikelihoodlearner.cxx
new file mode 100644
index 0000000..0749629
--- /dev/null
+++ b/src/interfaces/python/opengm/learning/pymaxlikelihoodlearner.cxx
@@ -0,0 +1,56 @@
+#if defined(WITH_CPLEX) || defined(WITH_GUROBI)
+
+#include <boost/python.hpp>
+#include <boost/python/module.hpp>
+#include <opengm/python/opengmpython.hxx>
+#include <opengm/python/converter.hxx>
+#include <opengm/python/numpyview.hxx>
+
+#include <opengm/inference/icm.hxx>
+#include <opengm/learning/maximum-likelihood-learning.hxx>
+
+#define DefaultErrorFn DefaultErrorFn_TrwsExternal_ML
+#include "helper.hxx"
+
+namespace bp = boost::python;
+namespace op = opengm::python;
+namespace ol = opengm::learning;
+
+namespace opengm{
+
+
+    template<class PARAM>
+    PARAM * pyMaxLikelihoodParamConstructor(
+    ){
+        PARAM * p  = new PARAM();
+        return p;
+    }
+
+    template<class DATASET>
+    void export_max_likelihood_learner(const std::string & clsName){
+        typedef learning::MaximumLikelihoodLearner<DATASET> PyLearner;
+        typedef typename PyLearner::Parameter PyLearnerParam;
+        typedef typename PyLearner::DatasetType DatasetType;
+
+        const std::string paramClsName = clsName + std::string("Parameter");
+
+        bp::class_<PyLearnerParam>(paramClsName.c_str(), bp::init<>())
+            .def("__init__", make_constructor(&pyMaxLikelihoodParamConstructor<PyLearnerParam> ,boost::python::default_call_policies()))
+        ;
+
+        boost::python::class_<PyLearner>( clsName.c_str(), boost::python::init<DatasetType &, const PyLearnerParam &>() )
+            .def(LearnerInferenceSuite<PyLearner>())
+        ;
+    }
+
+    template void
+    export_max_likelihood_learner<op::GmAdderHammingLossDataset> (const std::string& className);
+
+    template void
+    export_max_likelihood_learner<op::GmAdderGeneralizedHammingLossDataset> (const std::string& className);
+}
+
+
+
+#endif
+
diff --git a/src/unittest/learning/test_maximum_likelihood_learner.cxx b/src/unittest/learning/test_maximum_likelihood_learner.cxx
index 4d1fda2..181497d 100644
--- a/src/unittest/learning/test_maximum_likelihood_learner.cxx
+++ b/src/unittest/learning/test_maximum_likelihood_learner.cxx
@@ -9,7 +9,6 @@
 #include <opengm/utilities/metaprogramming.hxx>
 
 #include <opengm/functions/learnable/lpotts.hxx>
-#include <opengm/functions/learnable/sum_of_experts.hxx>
 #include <opengm/learning/maximum-likelihood-learning.hxx>
 #include <opengm/learning/loss/hammingloss.hxx>
 #include <opengm/learning/dataset/testdatasets.hxx>
@@ -23,7 +22,7 @@ typedef size_t LabelType;
 typedef opengm::meta::TypeListGenerator<
     opengm::ExplicitFunction<ValueType,IndexType,LabelType>,
     opengm::functions::learnable::LPotts<ValueType,IndexType,LabelType>,
-    opengm::functions::learnable::SumOfExperts<ValueType,IndexType,LabelType>
+    opengm::functions::learnable::LSumOfExperts<ValueType,IndexType,LabelType>
 >::type FunctionListType;
 
 typedef opengm::GraphicalModel<
@@ -59,8 +58,8 @@ int main() {
    {
       DS1 dataset;
       std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
-      opengm::learning::MaximumLikelihoodLearner<DS1,LOSS>::Weight weight;
-      opengm::learning::MaximumLikelihoodLearner<DS1,LOSS> learner(dataset,weight);
+      opengm::learning::MaximumLikelihoodLearner<DS1>::Parameter weight;
+      opengm::learning::MaximumLikelihoodLearner<DS1> learner(dataset,weight);
       INF::Parameter infWeight;
       learner.learn<INF>(infWeight);
       

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git



More information about the debian-science-commits mailing list