[opengm] 262/386: isActive added to the belief propagation parameters (trivial test is fubar/max_likelihood_example.py)

Ghislain Vaillant ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:38:06 UTC 2016


This is an automated email from the git hooks/post-receive script.

ghisvail-guest pushed a commit to branch debian/master
in repository opengm.

commit b846e3c6754e5b187cdc29b4acd7126511d1f6e4
Author: Janez Ales <janez.ales at iwr.uni-heidelberg.de>
Date:   Tue Jan 20 14:42:29 2015 +0100

    isActive added to the belief propagation parameters
    (trivial test is fubar/max_likelihood_example.py)
---
 fubar/max_likelihood_example.py                    | 21 ++++++--
 fubar/real_example_2.py                            | 40 ++++++---------
 .../learning/maximum_likelihood_learning.hxx       | 59 +++++++++++++++-------
 src/interfaces/python/opengm/learning/__init__.py  |  9 +++-
 .../opengm/learning/pyMaxLikelihoodLearner.cxx     | 20 ++++----
 .../learning/test_maximum_likelihood_learner.cxx   | 34 +++++++------
 6 files changed, 110 insertions(+), 73 deletions(-)

diff --git a/fubar/max_likelihood_example.py b/fubar/max_likelihood_example.py
index 3b361ce..059cc4e 100644
--- a/fubar/max_likelihood_example.py
+++ b/fubar/max_likelihood_example.py
@@ -3,12 +3,12 @@ import opengm.learning as learning
 from opengm import numpy
 
 # create a simple model with exactly one variable with two labels
-numWeights = 2
+numWeights = 4
 nLabels = 2
 nVars = 1
 
-# set weight ids and features for both labels
-weightIds = numpy.array([[0, 1],       [0, 1]])
+# set weight ids and features for all labels
+weightIds = numpy.array([[0, 1],       [2,3]])
 features = numpy.array( [[0.5, -0.25], [-0.5, -1.25]])
 
 # create dataset with 2 weights and get the 2 weights
@@ -25,5 +25,16 @@ ground_truth = numpy.array([0]).astype(opengm.label_type)
 dataset.pushBackInstance(gm, ground_truth)
 
 # set up learner and run
-learner = learning.maxLikelihoodLearner(dataset)
-learner.learn(infCls=opengm.inference.TrwsExternal,  parameter=opengm.InfParam())
\ No newline at end of file
+#learner = learning.structMaxMarginLearner(dataset, 0.1, 0.001, 0)
+#learner =  learning.subgradientSSVM(dataset, learningRate=1.0, C=100, learningMode='batch')
+#learner.learn(infCls=opengm.inference.TrwsExternal,  parameter=opengm.InfParam())
+
+
+learner = learning.maxLikelihoodLearner(
+    dataset,
+    maximumNumberOfIterations =1500,gradientStepSize = 0.9,weightStoppingCriteria =   0.001,gradientStoppingCriteria = 0.00000000001,infoFlag = True,infoEveryStep = False,weightRegularizer = 1.0, 
+    beliefPropagationMaximumNumberOfIterations = 20,beliefPropagationConvergenceBound = 0.0000000000001,beliefPropagationDamping = 0.5,beliefPropagationTemperature = 1,beliefPropagationIsAcyclic=opengm.Tribool(True))
+learner.learn()
+
+for w in range(numWeights):
+    print weights[w]
diff --git a/fubar/real_example_2.py b/fubar/real_example_2.py
index 2ae46aa..e14c607 100644
--- a/fubar/real_example_2.py
+++ b/fubar/real_example_2.py
@@ -5,13 +5,15 @@ import vigra
 import pylab as plt
 import pylab
 
-nModels = 20
+#nModels = 20
+nModels = 2
 nLables = 2 
-shape = [50, 50]
+#shape = [50, 50]
+shape = [16, 16]
 numVar = shape[0]*shape[1]
 
-sSmooth = [1.0,1.1,1.2, 1.5, 2.0, 3.0, 4.0]
-sGrad = [1.0, 1.5, 2.0, 3.0, 4.0]
+sSmooth = [1.0,1.2, 1.5, 2.0, 3.0, 4.0]
+sGrad = [1.0, 1.5, 2.0, 4.0]
 
 nUWeights = len(sSmooth) + 1
 nBWeights = len(sGrad) + 1
@@ -21,7 +23,7 @@ def makeGt(shape):
     gt=numpy.ones(shape,dtype='uint8')
     gt[0:shape[0]/2,:] = 0
     return gt
-
+ 
 
 
 weightVals = numpy.ones(nWeights)
@@ -128,30 +130,20 @@ lowerBounds = numpy.ones(nWeights)*-2.0
 upperBounds = numpy.ones(nWeights)*2.0
 nTestPoints  =numpy.ones(nWeights).astype('uint64')*5
 
-# learner = learning.gridSearchLearner(dataset=dataset,lowerBounds=lowerBounds, upperBounds=upperBounds,nTestPoints=nTestPoints)
+#learner = learning.gridSearchLearner(dataset=dataset,lowerBounds=lowerBounds, upperBounds=upperBounds,nTestPoints=nTestPoints)
+#learner =  learning.structPerceptron(dataset, decayExponent=-0.5, learningMode='batch')
 #learner = learning.structMaxMarginLearner(dataset, 0.1, 0.001, 0)
+#learner =  learning.subgradientSSVM(dataset, learningRate=1.0, C=100, learningMode='batch')
 
-learner = learning.maxLikelihoodLearner(
-    dataset,
-    maximumNumberOfIterations =99,
-    gradientStepSize = 0.1111,
-    weightStoppingCriteria = 0.0000000111,
-    gradientStoppingCriteria = 0.000000000011,
-    infoFlag = True,
-    infoEveryStep = True,
-    weightRegularizer = 1.0,
-    beliefPropagationMaximumNumberOfIterations = 30,
-    beliefPropagationConvergenceBound = 0.00011,
-    beliefPropagationDamping = 0.55,
-    beliefPropagationTemperature = 0.3000000001
-)
 
-#learner =  learning.structPerceptron(dataset, decayExponent=-0.5, learningMode='batch')
-# learner =  learning.subgradientSSVM(dataset, learningRate=1.0, C=100, learningMode='batch')
+#learner.learn(infCls=opengm.inference.TrwsExternal,
+#              parameter=opengm.InfParam())
 
 
-#learner.learn(infCls=opengm.inference.TrwsExternal, 
-#              parameter=opengm.InfParam())
+learner = learning.maxLikelihoodLearner(
+    dataset,
+    maximumNumberOfIterations =1000,gradientStepSize=0.9,weightStoppingCriteria=0.001,gradientStoppingCriteria=0.01,infoFlag=True,infoEveryStep=False,weightRegularizer=1.0,
+    beliefPropagationMaximumNumberOfIterations=5,beliefPropagationConvergenceBound=0.0001,beliefPropagationDamping=0.5,beliefPropagationTemperature=0.3,beliefPropagationIsAcyclic=opengm.Tribool(False))
 learner.learn()
 
 for w in range(nWeights):
diff --git a/include/opengm/learning/maximum_likelihood_learning.hxx b/include/opengm/learning/maximum_likelihood_learning.hxx
index 106ca81..3453ed1 100644
--- a/include/opengm/learning/maximum_likelihood_learning.hxx
+++ b/include/opengm/learning/maximum_likelihood_learning.hxx
@@ -7,6 +7,7 @@
 #include <opengm/graphicalmodel/graphicalmodel.hxx>
 #include <opengm/inference/messagepassing/messagepassing.hxx>
 #include <opengm/functions/view_convert_function.hxx>
+#include <iomanip>
 
 namespace opengm {
    namespace learning {
@@ -36,6 +37,7 @@ namespace opengm {
 	     double beliefPropagationConvergenceBound_;
 	     double beliefPropagationDamping_;
 	     double beliefPropagationTemperature_;
+	     opengm::Tribool beliefPropagationIsAcyclic_;
 	     Parameter():
 	         maximumNumberOfIterations_(100),
 	         gradientStepSize_(0.1),
@@ -47,7 +49,9 @@ namespace opengm {
 		 beliefPropagationMaximumNumberOfIterations_(40),
 		 beliefPropagationConvergenceBound_(0.0000001),
 		 beliefPropagationDamping_(0.5),
-		 beliefPropagationTemperature_(0.3)
+		 beliefPropagationTemperature_(0.3),
+		 beliefPropagationIsAcyclic_(opengm::Tribool::Maybe)
+
 	   {;}
          };
 
@@ -103,7 +107,7 @@ namespace opengm {
       {
           weights_ = opengm::learning::Weights<ValueType>(ds.getNumberOfWeights());
       }
-      
+
       template<class DATASET>
       void MaximumLikelihoodLearner<DATASET>::learn(){
 
@@ -118,8 +122,7 @@ namespace opengm {
          bool search = true; 
          double invTemperature = 1.0/param_.beliefPropagationTemperature_;
 
-         std::cout << std::endl;
-	 if(param_.infoFlag_){
+         if(param_.infoFlag_){
 	     std::cout << "INFO: Maximum Likelihood Learner: Maximum Number Of Iterations "<< param_.maximumNumberOfIterations_ << std::endl;
 	     std::cout << "INFO: Maximum Likelihood Learner: Gradient Step Size "<< param_.gradientStepSize_ << std::endl;
 	     std::cout << "INFO: Maximum Likelihood Learner: Gradient Stopping Criteria "<<param_. gradientStoppingCriteria_ << std::endl;
@@ -131,20 +134,32 @@ namespace opengm {
 	     std::cout << "INFO: Belief Propagation: Convergence Bound "<< param_.beliefPropagationConvergenceBound_ << std::endl;
 	     std::cout << "INFO: Belief Propagation: Damping "<< param_.beliefPropagationDamping_ << std::endl;
 	     std::cout << "INFO: Belief Propagation: Temperature "<< param_.beliefPropagationTemperature_ << std::endl;
+	     std::cout << "INFO: Belief Propagation: Acyclic Model "<< param_.beliefPropagationIsAcyclic_ << std::endl;
 	 }
 
-         typename BeliefPropagation::Parameter infParam(param_.beliefPropagationMaximumNumberOfIterations_, param_.beliefPropagationConvergenceBound_, param_.beliefPropagationDamping_);
+	 typename UpdateRules::SpecialParameterType specialParameter;//=UpdateRules::SpecialParameterType();
+         typename BeliefPropagation::Parameter infParam(
+	     param_.beliefPropagationMaximumNumberOfIterations_, 
+	     param_.beliefPropagationConvergenceBound_, 
+	     param_.beliefPropagationDamping_,
+	     specialParameter,
+	     param_.beliefPropagationIsAcyclic_
+	 );
 
          size_t iterationCount = 0;
          while(search){
             if(iterationCount>=param_.maximumNumberOfIterations_) break;
             ++iterationCount;
-            std::cout << "\r Progress :  " << iterationCount << "/"<<param_.maximumNumberOfIterations_ <<" iteration     0/"<< dataset_.getNumberOfModels() << " models "<< std::flush;
+	    if(param_.infoFlag_)
+	        std::cout << "\r Progress :  " << iterationCount << "/"<<param_.maximumNumberOfIterations_ <<" iteration     0/"<< dataset_.getNumberOfModels() << " models ";
+
             typename GMType::IndependentFactorType marg;
             WeightGradientFunctor wgf(dataset_); 
 
             for(IndexType m=0; m<dataset_.getNumberOfModels(); ++m){ 
-               std::cout << "\r Progress :  " << iterationCount << "/"<<param_.maximumNumberOfIterations_ << " iteration     "<<m<<"/"<< dataset_.getNumberOfModels()<<" models "<< std::flush;
+	       if(param_.infoFlag_)
+                  std::cout << "\r Progress :  " << iterationCount << "/"<<param_.maximumNumberOfIterations_ << " iteration     "<<m<<"/"<< dataset_.getNumberOfModels()<<" models ";
+
                dataset_.lockModel(m);
                wgf.setModel(m);
 
@@ -176,36 +191,46 @@ namespace opengm {
             //*****************************
             //** Gradient Step
             //************************
-            double norm = 0;
+            double gradientNorm = 0;
             for(IndexType p=0; p<dataset_.getNumberOfWeights(); ++p){
-               norm += (wgf.getGradient(p)-2*param_.weightRegularizer_*weights_.getWeight(p)) * (wgf.getGradient(p)-2*param_.weightRegularizer_*weights_.getWeight(p));
+               gradientNorm += (wgf.getGradient(p)-2*param_.weightRegularizer_*weights_.getWeight(p)) * (wgf.getGradient(p)-2*param_.weightRegularizer_*weights_.getWeight(p));
             }
-            norm = std::sqrt(norm);
-	    //if(norm < param_.gradientStoppingCriteria_)
-	    //    search = false;
+            gradientNorm = std::sqrt(gradientNorm);
+
+	    if(gradientNorm < param_.gradientStoppingCriteria_)
+	        search = false;
+
 	    if(param_.infoFlag_ and param_.infoEveryStep_)
 	        std::cout << "\r" << std::flush << " Iteration " << iterationCount <<" Gradient = ( ";
 
 	    double normGradientDelta = 0;
             for(IndexType p=0; p<dataset_.getNumberOfWeights(); ++p){
 	        if(param_.infoFlag_ and param_.infoEveryStep_)
-		    std::cout << (wgf.getGradient(p)-2*param_.weightRegularizer_*weights_.getWeight(p))/norm << " ";
+		    std::cout << std::left << std::setfill(' ') << std::setw(10) << (wgf.getGradient(p)-2*param_.weightRegularizer_*weights_.getWeight(p))/gradientNorm << " ";
+
+		double gradientDelta;
+		gradientDelta=param_.gradientStepSize_/iterationCount * (wgf.getGradient(p)-2*param_.weightRegularizer_*weights_.getWeight(p))/gradientNorm;
 
-		double gradientDelta=param_.gradientStepSize_/iterationCount * (wgf.getGradient(p)-2*param_.weightRegularizer_*weights_.getWeight(p))/norm;
 		normGradientDelta +=gradientDelta*gradientDelta;
                 dataset_.getWeights().setWeight(p, weights_.getWeight(p) + gradientDelta);
                 weights_.setWeight(p, weights_.getWeight(p) + gradientDelta); 
             }
 	    normGradientDelta=std::sqrt(normGradientDelta);
+	    if( normGradientDelta < param_.weightStoppingCriteria_)
+	        search = false;
+
 	    if(param_.infoFlag_ and param_.infoEveryStep_){
                 std::cout << ") ";
                 std::cout << " Weight = ( ";
                 for(IndexType p=0; p<dataset_.getNumberOfWeights(); ++p)
-                    std::cout <<  weights_.getWeight(p) << " ";
-                std::cout << ") normGradientDelta "<< normGradientDelta << std::endl;
+                    std::cout << std::left << std::setfill(' ') << std::setw(10) <<  weights_.getWeight(p) << " ";
+                std::cout << ") "<< "GradientNorm " << std::left << std::setfill(' ') << std::setw(10) << gradientNorm << " GradientDeltaNorm "<< std::setw(10) << normGradientDelta << "             " << std::endl;
 	    }
+	    else if (param_.infoFlag_)
+	      std::cout << "GradientNorm " << std::left << std::setfill(' ') << std::setw(10) << gradientNorm << " GradientDeltaNorm "<< std::setw(10) << normGradientDelta << "             " << std::flush;
          }
-         std::cout << "\r Stoped after "<< iterationCount  << "/" << param_.maximumNumberOfIterations_<< " iterations.                             " <<std::endl;
+	 std::cout << "\r                                                                                                                                                                                                                                                                                                                                                                                                            " << std::flush;
+         std::cout << "\r Stoped after "<< iterationCount  << "/" << param_.maximumNumberOfIterations_<< " iterations. " <<std::endl;
       }
    }
 }
diff --git a/src/interfaces/python/opengm/learning/__init__.py b/src/interfaces/python/opengm/learning/__init__.py
index d0c0b7b..23251fd 100644
--- a/src/interfaces/python/opengm/learning/__init__.py
+++ b/src/interfaces/python/opengm/learning/__init__.py
@@ -5,6 +5,7 @@ import struct
 from opengm import index_type,value_type, label_type, graphicalModel,gridVis
 from opengm import configuration as opengmConfig, LUnaryFunction
 from opengm import to_native_boost_python_enum_converter
+from opengm import Tribool
 from progressbar import *
 from functools import partial
 
@@ -268,7 +269,9 @@ def maxLikelihoodLearner(
         beliefPropagationConvergenceBound = 0.0001,
         beliefPropagationDamping = 0.5,
         beliefPropagationReg = 1.0,
-        beliefPropagationTemperature = 1.0):
+        beliefPropagationTemperature = 1.0,
+        beliefPropagationIsAcyclic = Tribool(0)
+):
 
     learnerCls = MaxLikelihood_FlexibleLoss
     learnerParamCls = MaxLikelihood_FlexibleLossParameter
@@ -284,7 +287,9 @@ def maxLikelihoodLearner(
         beliefPropagationMaximumNumberOfIterations,
         beliefPropagationConvergenceBound,
         beliefPropagationDamping,
-        beliefPropagationTemperature)
+        beliefPropagationTemperature,
+        beliefPropagationIsAcyclic
+    )
     #param.maxIterations = int(maxIterations)
     #param.reg = float(reg)
     #param.temperature = float(temp)
diff --git a/src/interfaces/python/opengm/learning/pyMaxLikelihoodLearner.cxx b/src/interfaces/python/opengm/learning/pyMaxLikelihoodLearner.cxx
index ec9094f..82fc5d0 100644
--- a/src/interfaces/python/opengm/learning/pyMaxLikelihoodLearner.cxx
+++ b/src/interfaces/python/opengm/learning/pyMaxLikelihoodLearner.cxx
@@ -21,17 +21,18 @@ namespace opengm{
 
     template<class PARAM>
     PARAM * pyMaxLikelihoodParamConstructor(
-	size_t maximumNumberOfIterations=1234567,
-	double gradientStepSize=0.1234567,
-	double weightStoppingCriteria=0.00001234567,
-	double gradientStoppingCriteria=0.00000001234567,
+	size_t maximumNumberOfIterations=100,
+	double gradientStepSize=0.1,
+	double weightStoppingCriteria=0.00000001,
+	double gradientStoppingCriteria=0.00000001,
 	bool infoFlag=true,
 	bool infoEveryStep=false,
-	double weightRegularizer = 1.00000001,
-	size_t beliefPropagationMaximumNumberOfIterations = 30,
-	double beliefPropagationConvergenceBound = 0.00011,
-	double beliefPropagationDamping = 0.55,
-	double beliefPropagationTemperature = 0.3000000001
+	double weightRegularizer = 1.0,
+	size_t beliefPropagationMaximumNumberOfIterations = 20,
+	double beliefPropagationConvergenceBound = 0.0001,
+	double beliefPropagationDamping = 0.5,
+	double beliefPropagationTemperature = 0.3,
+	opengm::Tribool beliefPropagationIsAcyclic=opengm::Tribool(opengm::Tribool::Maybe)
     ){
         PARAM * p  = new PARAM();
 	p->maximumNumberOfIterations_ = maximumNumberOfIterations;
@@ -45,6 +46,7 @@ namespace opengm{
 	p->beliefPropagationConvergenceBound_ = beliefPropagationConvergenceBound;
 	p->beliefPropagationDamping_ = beliefPropagationDamping;
 	p->beliefPropagationTemperature_ = beliefPropagationTemperature;
+	p->beliefPropagationIsAcyclic_ = beliefPropagationIsAcyclic;
         return p;
     }
 
diff --git a/src/unittest/learning/test_maximum_likelihood_learner.cxx b/src/unittest/learning/test_maximum_likelihood_learner.cxx
index 4e6f978..2530849 100644
--- a/src/unittest/learning/test_maximum_likelihood_learner.cxx
+++ b/src/unittest/learning/test_maximum_likelihood_learner.cxx
@@ -63,17 +63,18 @@ int main() {
       DS1 dataset;
       std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
       opengm::learning::MaximumLikelihoodLearner<DS1>::Parameter parameter;
-      parameter.maximumNumberOfIterations_ = 199;
-      parameter.gradientStepSize_ = 0.1111;
-      parameter.weightStoppingCriteria_ = 0.0000000111;
-      parameter.gradientStoppingCriteria_ = 0.000000000011;
+      parameter.maximumNumberOfIterations_ = 150;
+      parameter.gradientStepSize_ = 0.1;
+      parameter.weightStoppingCriteria_ = 0.001;
+      parameter.gradientStoppingCriteria_ = 0.00000000001;
       parameter.infoFlag_ = true;
       parameter.infoEveryStep_ = true;
       parameter.weightRegularizer_ = 1.0;
-      parameter.beliefPropagationMaximumNumberOfIterations_ = 30;
-      parameter.beliefPropagationConvergenceBound_ = 0.00011;
-      parameter.beliefPropagationDamping_ = 0.55;
-      parameter.beliefPropagationTemperature_ = 0.3000000001;
+      parameter.beliefPropagationMaximumNumberOfIterations_ = 5;
+      parameter.beliefPropagationConvergenceBound_ = 0.0001;
+      parameter.beliefPropagationDamping_ = 0.5;
+      parameter.beliefPropagationTemperature_ = 0.3;
+      parameter.beliefPropagationIsAcyclic_ = opengm::Tribool(opengm::Tribool::Maybe);
       opengm::learning::MaximumLikelihoodLearner<DS1> learner(dataset,parameter);
 
       learner.learn();
@@ -84,17 +85,18 @@ int main() {
       DS2 dataset;
       std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
       opengm::learning::MaximumLikelihoodLearner<DS2>::Parameter parameter;
-      parameter.maximumNumberOfIterations_ = 199;
-      parameter.gradientStepSize_ = 0.1111;
-      parameter.weightStoppingCriteria_ = 0.0000000111;
-      parameter.gradientStoppingCriteria_ = 0.000000000011;
+      parameter.maximumNumberOfIterations_ = 150;
+      parameter.gradientStepSize_ = 0.1;
+      parameter.weightStoppingCriteria_ = 0.001;
+      parameter.gradientStoppingCriteria_ = 0.00000000001;
       parameter.infoFlag_ = true;
       parameter.infoEveryStep_ = true;
       parameter.weightRegularizer_ = 1.0;
-      parameter.beliefPropagationMaximumNumberOfIterations_ = 30;
-      parameter.beliefPropagationConvergenceBound_ = 0.00011;
-      parameter.beliefPropagationDamping_ = 0.55;
-      parameter.beliefPropagationTemperature_ = 0.3000000001;
+      parameter.beliefPropagationMaximumNumberOfIterations_ = 5;
+      parameter.beliefPropagationConvergenceBound_ = 0.0001;
+      parameter.beliefPropagationDamping_ = 0.5;
+      parameter.beliefPropagationTemperature_ = 0.3;
+      parameter.beliefPropagationIsAcyclic_ = opengm::Tribool(opengm::Tribool::Maybe);
       opengm::learning::MaximumLikelihoodLearner<DS2> learner(dataset,parameter);
 
       learner.learn();

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git



More information about the debian-science-commits mailing list