[opengm] 229/386: implemented py wrapper for maxlh-learning

Ghislain Vaillant ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:37:55 UTC 2016


This is an automated email from the git hooks/post-receive script.

ghisvail-guest pushed a commit to branch debian/master
in repository opengm.

commit 618d379f6b6bc21f56bee23e3c3d6fbf7acec562
Author: DerThorsten <thorsten.beier at iwr.uni-heidelberg.de>
Date:   Tue Jan 13 13:54:33 2015 +0100

    implemented py wrapper for maxlh-learning
---
 fubar/toy_dataset.py                               | 21 ++++++----
 .../learning/maximum-likelihood-learning.hxx       |  4 +-
 .../learning/maximum_likelihood_learning.hxx       |  3 ++
 include/opengm/learning/subgradient_ssvm.hxx       | 46 ++++++++++++++++++----
 .../python/opengm/learning/CMakeLists.txt          |  2 +-
 src/interfaces/python/opengm/learning/__init__.py  | 32 +++++++--------
 src/interfaces/python/opengm/learning/learning.cxx |  8 ++--
 .../opengm/learning/pyMaxLikelihoodLearner.cxx     |  6 +--
 .../python/opengm/learning/pyWeights.cxx           |  2 +-
 9 files changed, 81 insertions(+), 43 deletions(-)

diff --git a/fubar/toy_dataset.py b/fubar/toy_dataset.py
index 48dda9d..6b8db90 100644
--- a/fubar/toy_dataset.py
+++ b/fubar/toy_dataset.py
@@ -11,8 +11,8 @@ from make_grid_potts_dset import secondOrderImageDataset, getPbar
 numpy.random.seed(42)
 
 nImages = 8 
-shape = [100, 100]
-noise = 3
+shape = [20, 20]
+noise = 1.0
 imgs = []
 gts = []
 
@@ -29,12 +29,12 @@ for i in range(nImages):
 
     gtImg = vigra.sampling.rotateImageDegree(gtImg.astype(numpy.float32),int(ra),splineOrder=0)
 
-    if i<3 :
+    if i<1 :
         vigra.imshow(gtImg)
         vigra.show()
 
     img = gtImg + numpy.random.random(shape)*float(noise)
-    if i<3 :
+    if i<1 :
         vigra.imshow(img)
         vigra.show()
 
@@ -98,9 +98,16 @@ lm = 0
 infCls = opengm.inference.TrwsExternal
 param = opengm.InfParam()
 
+if True:
+    print "construct learner"
+    learner = learning.maxLikelihoodLearner(dataset)
+    print "start to learn"
+    learner.learn()
+    print "exit"
 
-learner = learning.structMaxMarginLearner(dataset, 0.1, 0.001, 0)
-learner.learn(infCls=infCls,parameter=param,connectedComponents=True,infMode='n')
+else:
+   learner =  learning.subgradientSSVM(dataset, learningRate=0.5, C=100, learningMode='batch',maxIterations=200,averaging=-1)
+   learner.learn(infCls=infCls,parameter=param,connectedComponents=True,infMode='n')
 
 #with opengm.Timer("n  2"):
 #    learner.learn(infCls=infCls,parameter=param,connectedComponents=True,infMode='n')
@@ -122,7 +129,7 @@ learner.learn(infCls=infCls,parameter=param,connectedComponents=True,infMode='n'
 # predict on test test
 for (rgbImg, gtImg, gm) in test_set :
     # infer for test image
-    inf = opengm.inference.Multicut(gm)
+    inf = opengm.inference.TrwsExternal(gm)
     inf.infer()
     arg = inf.arg()
     arg = arg.reshape( numpy.squeeze(gtImg.shape))
diff --git a/include/opengm/learning/maximum-likelihood-learning.hxx b/include/opengm/learning/maximum-likelihood-learning.hxx
index d282ba8..15cf819 100644
--- a/include/opengm/learning/maximum-likelihood-learning.hxx
+++ b/include/opengm/learning/maximum-likelihood-learning.hxx
@@ -89,7 +89,7 @@ public:
     };
    
 
-    MaximumLikelihoodLearner(DATASET&, const Parameter & );
+    MaximumLikelihoodLearner(DATASET&, const Parameter & w= Parameter() );
 
    //  template<class INF>
    void learn();//const typename INF::Parameter&);
@@ -104,7 +104,7 @@ private:
 };
 
 template<class DATASET>
-MaximumLikelihoodLearner<DATASET>::MaximumLikelihoodLearner(DATASET& ds, const Parameter& w )
+MaximumLikelihoodLearner<DATASET>::MaximumLikelihoodLearner(DATASET& ds, const Parameter& w)
     : dataset_(ds), param_(w)
 {
     modelWeights_ = opengm::learning::Weights<ValueType>(ds.getNumberOfWeights());
diff --git a/include/opengm/learning/maximum_likelihood_learning.hxx b/include/opengm/learning/maximum_likelihood_learning.hxx
index 3b34d29..e13805d 100644
--- a/include/opengm/learning/maximum_likelihood_learning.hxx
+++ b/include/opengm/learning/maximum_likelihood_learning.hxx
@@ -50,6 +50,7 @@ namespace opengm {
                for(size_t i=0;i<function.size();++i, ++shapeWalker) {                   
                   for(size_t i=0; i<function.numberOfWeights();++i){
                      size_t wID = function.weightIndex(i);
+                     std::cout<<"m "<<(*marg_)(shapeWalker.coordinateTuple().begin())<<"\n";
                      gradient_[wID] += (*marg_)(shapeWalker.coordinateTuple().begin()) * function.weightGradient(wID, shapeWalker.coordinateTuple().begin() );
                   }
                }              
@@ -131,6 +132,8 @@ namespace opengm {
                bp.infer();
                for(IndexType f=0; f<dataset_.getModel(m).numberOfFactors();++f){
                   bp.factorMarginal(f, marg);
+                  
+                  
                   wgf.setMarg(&marg);
                   dataset_.getModel(m)[f].callFunctor(wgf);
                }
diff --git a/include/opengm/learning/subgradient_ssvm.hxx b/include/opengm/learning/subgradient_ssvm.hxx
index 1f6fa97..9d466fd 100644
--- a/include/opengm/learning/subgradient_ssvm.hxx
+++ b/include/opengm/learning/subgradient_ssvm.hxx
@@ -10,7 +10,7 @@
 #include <opengm/learning/gradient-accumulator.hxx>
 #include <opengm/learning/weight_averaging.hxx>
 #include <omp.h>
-
+#include <boost/circular_buffer.hpp>
 
 
 
@@ -35,6 +35,10 @@ namespace opengm {
         typedef typename std::vector<LabelType>::const_iterator LabelIterator;
         typedef FeatureAccumulator<GMType, LabelIterator> FeatureAcc;
 
+        typedef std::vector<LabelType> ConfType;
+        typedef boost::circular_buffer<ConfType> ConfBuffer;
+        typedef std::vector<ConfBuffer> ConfBufferVec;
+
         class Parameter{
         public:
 
@@ -52,6 +56,7 @@ namespace opengm {
                 C_ = 1.0;
                 learningMode_ = Batch;
                 averaging_ = -1;
+                nConf_ = 0;
             }       
 
             double eps_;
@@ -61,6 +66,7 @@ namespace opengm {
             double C_;
             LearningMode learningMode_;
             int averaging_;
+            int nConf_;
         };
 
 
@@ -141,7 +147,11 @@ namespace opengm {
             dataset_.getWeights().setWeight(wi, 0.0);
         }
 
+        const bool useWorkingSets = para_.nConf_>0;
 
+        ConfBufferVec buffer(useWorkingSets? nModels : 0, ConfBuffer(para_.nConf_));
+
+        std::vector<bool> isViolated(para_.nConf_);
 
         if(para_.learningMode_ == Parameter::Online){
             RandomUniform<size_t> randModel(0, nModels);
@@ -213,15 +223,37 @@ namespace opengm {
 
                     totalLoss = totalLoss + getLoss(gm, gmWithLoss, arg);
 
-                    // 
-                    FeatureAcc featureAcc(nWegihts);
-                    featureAcc.accumulateModelFeatures(gm, dataset_.getGT(gmi).begin(), arg.begin());
+             
+                    if(useWorkingSets){
+                        // append current solution
+                        buffer[gmi].push_back(arg);
+
+                        size_t c=0;
+                        // check which violates
+                        for(size_t cc=0; cc<buffer[gmi].size(); +cc){
+                            //const double mLoss = dataset_.getLoss(buffer[gmi][cc], gmi);
+                            //const double argVal = gm.evaluate(buffer[gmi][cc]);
+                            //const double gtVal =  gm.evaluate(dataset_.getGT());
+                            //const double ll = argVal + mLoss - gtVal;
+                            //std::cout<<" argVal "<<argVal<<" gtVal "<<gtVal<<" mLoss "<<mLoss<<"   VV "<<ll<<"\n";
+
+                        }
+
+                    }
+                    else{
+                        FeatureAcc featureAcc(nWegihts);
+                        featureAcc.accumulateModelFeatures(gm, dataset_.getGT(gmi).begin(), arg.begin());
+                        omp_set_lock(&featureAccLock);
+                        featureAcc_.accumulateFromOther(featureAcc);
+                        omp_unset_lock(&featureAccLock);
+                    }
+
 
 
                     // acc features
-                    omp_set_lock(&featureAccLock);
-                    featureAcc_.accumulateFromOther(featureAcc);
-                    omp_unset_lock(&featureAccLock);
+                    //omp_set_lock(&featureAccLock);
+                    //featureAcc_.accumulateFromOther(featureAcc);
+                    //omp_unset_lock(&featureAccLock);
 
                     // unlock the model
                     omp_set_lock(&modelLockUnlock);
diff --git a/src/interfaces/python/opengm/learning/CMakeLists.txt b/src/interfaces/python/opengm/learning/CMakeLists.txt
index 0b34301..45ab4ce 100644
--- a/src/interfaces/python/opengm/learning/CMakeLists.txt
+++ b/src/interfaces/python/opengm/learning/CMakeLists.txt
@@ -24,7 +24,7 @@ set(PY_OPENGM_CORE_SOURCES
             pyDataset.cxx
             pyLoss.cxx
             pyGridSearchLearner.cxx
-            #pyMaxLikelihoodLearner.cxx
+            pyMaxLikelihoodLearner.cxx
             pyStructMaxMarginLearner.cxx
             pySubgradientSSVM.cxx
             pyStructPerceptron.cxx
diff --git a/src/interfaces/python/opengm/learning/__init__.py b/src/interfaces/python/opengm/learning/__init__.py
index 4a88903..332b94d 100644
--- a/src/interfaces/python/opengm/learning/__init__.py
+++ b/src/interfaces/python/opengm/learning/__init__.py
@@ -139,8 +139,7 @@ DatasetWithFlexibleLoss.getTotalLoss = _extendedGetTotalLoss
 
 
 def createDataset(numWeights,  numInstances=0):
-    weightVals = numpy.ones(numWeights)
-    weights = Weights(weightVals)
+    w  = Weights(numWeights)
 
     # if loss not in ['hamming','h','gh','generalized-hamming']:
     #     raise RuntimeError("loss must be 'hamming' /'h' or 'generalized-hamming'/'gh' ")    
@@ -151,7 +150,10 @@ def createDataset(numWeights,  numInstances=0):
     # else:
     #     raise RuntimeError("loss must be 'hamming' /'h' or 'generalized-hamming'/'gh' ")   
     dataset = DatasetWithFlexibleLoss(numInstances)
-    dataset.setWeights(weights)
+    dataset.setWeights(w)
+    weights = dataset.getWeights()
+    for wi in range(numWeights):
+        weights[wi] = 0.0
     return dataset
 
 
@@ -225,7 +227,7 @@ def subgradientSSVM(dataset, learningMode='batch',eps=1e-5, maxIterations=10000,
     param.C = float(C)
     param.learningMode = lm
     param.averaging = int(averaging)
-    param.nConf = int(nConf)
+    #param.nConf = int(nConf)
     learner = learnerCls(dataset, param)
     return learner
 
@@ -237,8 +239,8 @@ def structMaxMarginLearner(dataset, regularizerWeight=1.0, minEps=1e-5, nSteps=0
 
 
         assert dataset.__class__.lossType == 'flexible'
-        learnerCls = StructMaxMargin_FlexibleLoss
-        learnerParamCls = StructMaxMargin_FlexibleLossParameter
+        learnerCls = StructMaxMargin_Bundle_FlexibleLoss
+        learnerParamCls = StructMaxMargin_Bundle_FlexibleLossParameter
 
         epsFromGap = False
         if epsStrategy == 'gap':
@@ -254,19 +256,15 @@ def structMaxMarginLearner(dataset, regularizerWeight=1.0, minEps=1e-5, nSteps=0
         raise RuntimeError("this learner needs widthCplex or withGurobi")
 
 
-# def maxLikelihoodLearner(dataset):
-#     raise RuntimeError("not yet implemented / wrapped fully")
-#     if dataset.__class__.lossType == 'hamming':
-#         learnerCls = MaxLikelihood_HammingLoss
-#         learnerParamCls = MaxLikelihood_HammingLossParameter
-#     elif dataset.__class__.lossType == 'generalized-hamming':
-#         learnerCls = MaxLikelihood_GeneralizedHammingLoss
-#         learnerParamCls = MaxLikelihood_GeneralizedHammingLossParameter
+def maxLikelihoodLearner(dataset):
+    #raise RuntimeError("not yet implemented / wrapped fully")
+    learnerCls = MaxLikelihood_FlexibleLoss
+    learnerParamCls = MaxLikelihood_FlexibleLossParameter
 
-#     param = learnerParamCls()
-#     learner = learnerCls(dataset, param)
+    param = learnerParamCls()
+    learner = learnerCls(dataset, param)
         
-#     return learner
+    return learner
 
 
 
diff --git a/src/interfaces/python/opengm/learning/learning.cxx b/src/interfaces/python/opengm/learning/learning.cxx
index 4cbc994..2026108 100644
--- a/src/interfaces/python/opengm/learning/learning.cxx
+++ b/src/interfaces/python/opengm/learning/learning.cxx
@@ -37,8 +37,8 @@ namespace opengm{
     template<class DATASET, class OPTIMIZER>
     void export_struct_max_margin_bundle_learner(const std::string & clsName);
 
-    //template<class DATASET>
-    //void export_max_likelihood_learner(const std::string & clsName);
+    template<class DATASET>
+    void export_max_likelihood_learner(const std::string & clsName);
 
     template<class DATASET>
     void export_struct_perceptron_learner(const std::string & clsName);
@@ -79,9 +79,7 @@ BOOST_PYTHON_MODULE_INIT(_learning) {
     opengm::export_grid_search_learner<op::GmAdderFlexibleLossDataset>("GridSearch_FlexibleLoss");
     opengm::export_struct_perceptron_learner<op::GmAdderFlexibleLossDataset>("StructPerceptron_FlexibleLoss");
     opengm::export_subgradient_ssvm_learner<op::GmAdderFlexibleLossDataset>("SubgradientSSVM_FlexibleLoss");
-
-
-    //opengm::export_max_likelihood_learner<op::GmAdderFlexibleLossDataset>("MaxLikelihood_FlexibleLoss");
+    opengm::export_max_likelihood_learner<op::GmAdderFlexibleLossDataset>("MaxLikelihood_FlexibleLoss");
 
     
     #if defined(WITH_CPLEX) || defined(WITH_GUROBI)
diff --git a/src/interfaces/python/opengm/learning/pyMaxLikelihoodLearner.cxx b/src/interfaces/python/opengm/learning/pyMaxLikelihoodLearner.cxx
index 4fcd459..f1998b7 100644
--- a/src/interfaces/python/opengm/learning/pyMaxLikelihoodLearner.cxx
+++ b/src/interfaces/python/opengm/learning/pyMaxLikelihoodLearner.cxx
@@ -7,7 +7,7 @@
 #include <opengm/python/numpyview.hxx>
 
 #include <opengm/inference/icm.hxx>
-#include <opengm/learning/maximum-likelihood-learning.hxx>
+#include <opengm/learning/maximum_likelihood_learning.hxx>
 
 #define DefaultErrorFn DefaultErrorFn_TrwsExternal_ML
 #include "helper.hxx"
@@ -35,11 +35,11 @@ namespace opengm{
         const std::string paramClsName = clsName + std::string("Parameter");
 
         bp::class_<PyLearnerParam>(paramClsName.c_str(), bp::init<>())
-            .def("__init__", make_constructor(&pyMaxLikelihoodParamConstructor<PyLearnerParam> ,boost::python::default_call_policies()))
+            //.def("__init__", make_constructor(&pyMaxLikelihoodParamConstructor<PyLearnerParam> ,boost::python::default_call_policies()))
         ;
 
         boost::python::class_<PyLearner>( clsName.c_str(), boost::python::init<DatasetType &, const PyLearnerParam &>() )
-            .def(LearnerInferenceSuite<PyLearner>())
+            .def("learn",&PyLearner::learn)
         ;
     }
 
diff --git a/src/interfaces/python/opengm/learning/pyWeights.cxx b/src/interfaces/python/opengm/learning/pyWeights.cxx
index 44cf9e4..513a3e4 100644
--- a/src/interfaces/python/opengm/learning/pyWeights.cxx
+++ b/src/interfaces/python/opengm/learning/pyWeights.cxx
@@ -34,7 +34,7 @@ namespace opengm{
     void export_weight_constraints(){
         typedef  python::GmValueType V;
         typedef learning::WeightConstraints<V> Weights;
-        boost::python::class_<Weights>("Weights",boost::python::init<const size_t >())
+        boost::python::class_<Weights>("WeightConstraints",boost::python::init<const size_t >())
             //.def("__init__", make_constructor(&pyWeightsConstructor<V> ,boost::python::default_call_policies()))
             //.def("__getitem__", &Weights::getWeight)
             //.def("__setitem__", &Weights::setWeight)

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git



More information about the debian-science-commits mailing list