[opengm] 253/386: Belief Propagation parameters added to learner.

Ghislain Vaillant ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:38:05 UTC 2016


This is an automated email from the git hooks/post-receive script.

ghisvail-guest pushed a commit to branch debian/master
in repository opengm.

commit a116bffdfcff046ad0425c70f54bdc6b9d03df49
Author: Janez Ales <janez.ales at iwr.uni-heidelberg.de>
Date:   Fri Jan 16 11:28:13 2015 +0100

    Belief Propagation parameters added to learner.
---
 fubar/real_example_2.py                            | 24 +++++--
 .../learning/maximum_likelihood_learning.hxx       | 84 ++++++++++------------
 src/interfaces/python/opengm/learning/__init__.py  | 17 ++++-
 .../opengm/learning/pyMaxLikelihoodLearner.cxx     | 28 +++++++-
 .../learning/test_maximum_likelihood_learner.cxx   | 66 ++++++++---------
 5 files changed, 126 insertions(+), 93 deletions(-)

diff --git a/fubar/real_example_2.py b/fubar/real_example_2.py
index 59adeaa..1a08c5e 100644
--- a/fubar/real_example_2.py
+++ b/fubar/real_example_2.py
@@ -129,14 +129,30 @@ upperBounds = numpy.ones(nWeights)*2.0
 nTestPoints  =numpy.ones(nWeights).astype('uint64')*5
 
 # learner = learning.gridSearchLearner(dataset=dataset,lowerBounds=lowerBounds, upperBounds=upperBounds,nTestPoints=nTestPoints)
-learner = learning.structMaxMarginLearner(dataset, 0.1, 0.001, 0)
-#learner = learning.maxLikelihoodLearner(dataset)
+#learner = learning.structMaxMarginLearner(dataset, 0.1, 0.001, 0)
+
+param = learnerParamCls(
+    maximumNumberOfIterations = 9,
+    gradientStepSize = 0.1111,
+    weightStoppingCriteria = 0.0000000111,
+    gradientStoppingCriteria = 0.000000000011,
+    infoFlag = true,
+    infoEveryStep = true,
+    beliefPropagationMaximumNumberOfIterations = 30,
+    beliefPropagationConvergenceBound = 0.00011,
+    beliefPropagationDamping = 0.55,
+    beliefPropagationReg = 1.00000001,
+    beliefPropagationTemperature = 0.3000000001
+)
+learner = learning.maxLikelihoodLearner(dataset,param)
+
 #learner =  learning.structPerceptron(dataset, decayExponent=-0.5, learningMode='batch')
 # learner =  learning.subgradientSSVM(dataset, learningRate=1.0, C=100, learningMode='batch')
 
 
-learner.learn(infCls=opengm.inference.TrwsExternal, 
-              parameter=opengm.InfParam())
+#learner.learn(infCls=opengm.inference.TrwsExternal, 
+#              parameter=opengm.InfParam())
+learner.learn()
 
 for w in range(nWeights):
     print weights[w]
diff --git a/include/opengm/learning/maximum_likelihood_learning.hxx b/include/opengm/learning/maximum_likelihood_learning.hxx
index 9d05bec..052c3a7 100644
--- a/include/opengm/learning/maximum_likelihood_learning.hxx
+++ b/include/opengm/learning/maximum_likelihood_learning.hxx
@@ -26,23 +26,28 @@ namespace opengm {
          class Parameter{
          public:
 	     size_t maximumNumberOfIterations_;
-	     double gradientStep_;
-	     double weightAccuracy_;
+	     double gradientStepSize_;
+	     double weightStoppingCriteria_;
              double gradientStoppingCriteria_;
              bool infoFlag_;
              bool infoEveryStep_;
-
-	     double reg_;
-	     double temperature_;
+	     size_t beliefPropagationMaximumNumberOfIterations_;
+	     double beliefPropagationConvergenceBound_;
+	     double beliefPropagationDamping_;
+	     double beliefPropagationReg_;
+	     double beliefPropagationTemperature_;
 	     Parameter():
-	         maximumNumberOfIterations_(10),
-	         gradientStep_(0.1),
-		 weightAccuracy_(0.0001),
-		 gradientStoppingCriteria_(0.00000001),
+	         maximumNumberOfIterations_(100),
+	         gradientStepSize_(0.1),
+		 weightStoppingCriteria_(0.0000000000000001),
+		 gradientStoppingCriteria_(0.0000000000000001),
 		 infoFlag_(true),
 		 infoEveryStep_(false),
-		 reg_(1.0), 
-		 temperature_(0.3)
+		 beliefPropagationMaximumNumberOfIterations_(40),
+		 beliefPropagationConvergenceBound_(0.0000001),
+		 beliefPropagationDamping_(0.5),
+		 beliefPropagationReg_(1.0),
+		 beliefPropagationTemperature_(0.3)
 	   {;}
          };
 
@@ -81,8 +86,9 @@ namespace opengm {
          
          MaximumLikelihoodLearner(DATASET&, const Parameter&);
 
-	 template<class INF>
-	 void learn(const typename INF::Parameter & infParametersBP);
+	//template<class INF>
+	 void learn();
+	//void learn(const typename INF::Parameter & infParametersBP);
          
          const opengm::learning::Weights<ValueType>& getModelWeights(){return weights_;}
          WeightType& getLerningWeights(){return weights_;}
@@ -101,8 +107,9 @@ namespace opengm {
       }
       
       template<class DATASET>
-      template<class INF>
-      void MaximumLikelihoodLearner<DATASET>::learn(const typename INF::Parameter & infParametersBP){
+      void MaximumLikelihoodLearner<DATASET>::learn(){
+	//template<class INF>
+	//void MaximumLikelihoodLearner<DATASET>::learn(const typename INF::Parameter & infParametersBP){
 
          typedef typename opengm::ExplicitFunction<ValueType,IndexType,LabelType>                                                    FunctionType;
          typedef typename opengm::ViewConvertFunction<GMType,Minimizer,ValueType>                                                    ViewFunctionType;
@@ -113,29 +120,24 @@ namespace opengm {
          typedef MessagePassing<GmBpType, opengm::Integrator, UpdateRules, opengm::MaxDistance>                                      BeliefPropagation;
          
          bool search = true; 
-         double invTemperature = 1.0/param_.temperature_;
+         double invTemperature = 1.0/param_.beliefPropagationTemperature_;
 
          std::cout << std::endl;
 	 if(param_.infoFlag_){
 	     std::cout << "INFO: Maximum Likelihood Learner: Maximum Number Of Iterations "<< param_.maximumNumberOfIterations_ << std::endl;
-	     std::cout << "INFO: Maximum Likelihood Learner: Gradient Step "<< param_.gradientStep_ << std::endl;
-	     std::cout << "INFO: Maximum Likelihood Learner: Weight Accuracy "<< param_.weightAccuracy_ << std::endl;
+	     std::cout << "INFO: Maximum Likelihood Learner: Gradient Step "<< param_.gradientStepSize_ << std::endl;
 	     std::cout << "INFO: Maximum Likelihood Learner: Gradient Stopping Criteria "<<param_. gradientStoppingCriteria_ << std::endl;
+	     std::cout << "INFO: Maximum Likelihood Learner: Weight Stopping Criteria "<< param_.weightStoppingCriteria_ << std::endl;
 	     std::cout << "INFO: Maximum Likelihood Learner: Info Flag "<< param_.infoFlag_ << std::endl;
 	     std::cout << "INFO: Maximum Likelihood Learner: Info Every Step "<< param_.infoEveryStep_ << std::endl;
+	     std::cout << "INFO: Belief Propagation: Maximum Number Of Belief Propagation Iterations "<< param_.beliefPropagationMaximumNumberOfIterations_ << std::endl;
+	     std::cout << "INFO: Belief Propagation: Convergence Bound "<< param_.beliefPropagationConvergenceBound_ << std::endl;
+	     std::cout << "INFO: Belief Propagation: Damping "<< param_.beliefPropagationDamping_ << std::endl;
+	     std::cout << "INFO: Belief Propagation: RegularizerMultiplier "<< param_.beliefPropagationReg_ << std::endl;
+	     std::cout << "INFO: Belief Propagation: Temperature "<< param_.beliefPropagationTemperature_ << std::endl;
 	 }
 
-         //Parameters for inference
-	 const IndexType maxNumberOfBPIterations = infParametersBP.maximumNumberOfSteps_; //40
-	 const double convergenceBound = infParametersBP.bound_; //1e-7;
-	 const double damping = infParametersBP.damping_; //0.5;
-
-	 if(param_.infoFlag_){
-	     std::cout << "INFO: Belief Propagation: Maximum Number Of Belief Propagation Iterations "<< maxNumberOfBPIterations << std::endl;
-	     std::cout << "INFO: Belief Propagation: Convergence Bound "<< convergenceBound << std::endl;
-	     std::cout << "INFO: Belief Propagation: Damping "<< damping << std::endl;
-	 }
-         typename BeliefPropagation::Parameter infParam(maxNumberOfBPIterations, convergenceBound, damping);
+         typename BeliefPropagation::Parameter infParam(param_.beliefPropagationMaximumNumberOfIterations_, param_.beliefPropagationConvergenceBound_, param_.beliefPropagationDamping_);
 
          size_t iterationCount = 0;
          while(search){
@@ -178,33 +180,19 @@ namespace opengm {
             //*****************************
             //** Gradient Step
             //************************
-	    /*
-	    if(param_.infoFlag_)
-	        std::cout << " Best weights: ";
-            for(IndexType p=0; p<dataset_.getNumberOfWeights(); ++p){
-	        dataset_.getWeights().setWeight(p, weights_.getWeight(p) + param_.gradientStep_ * wgf.getGradient(p));
-                weights_.setWeight(p, weights_.getWeight(p) + param_.gradientStep_ * wgf.getGradient(p));
-		if(param_.infoFlag_)
-		  std::cout << weights_.getWeight(p) << " ";
-            }  
-	    if(param_.infoFlag_)
-	      std::cout << std::endl;
-	    */
-
-            //*****************************
             double norm = 0;
             for(IndexType p=0; p<dataset_.getNumberOfWeights(); ++p){
-               norm += (wgf.getGradient(p)-2*param_.reg_*weights_.getWeight(p)) * (wgf.getGradient(p)-2*param_.reg_*weights_.getWeight(p));
+               norm += (wgf.getGradient(p)-2*param_.beliefPropagationReg_*weights_.getWeight(p)) * (wgf.getGradient(p)-2*param_.beliefPropagationReg_*weights_.getWeight(p));
             }
-            norm = std::sqrt(norm);
+            norm = std::sqrt(norm);  // check for the zero norm &
 
 	    if(param_.infoFlag_)
 	        std::cout << "gradient = ( ";  
             for(IndexType p=0; p<dataset_.getNumberOfWeights(); ++p){
 	        if(param_.infoFlag_)
-                    std::cout << (wgf.getGradient(p)-2*param_.reg_*weights_.getWeight(p))/norm << " ";
-                dataset_.getWeights().setWeight(p, weights_.getWeight(p) + param_.gradientStep_/iterationCount * (wgf.getGradient(p)-2*param_.reg_*weights_.getWeight(p))/norm);
-                weights_.setWeight(p, weights_.getWeight(p) + param_.gradientStep_/iterationCount * (wgf.getGradient(p)-2*param_.reg_*weights_.getWeight(p))/norm); 
+                    std::cout << (wgf.getGradient(p)-2*param_.beliefPropagationReg_*weights_.getWeight(p))/norm << " ";
+                dataset_.getWeights().setWeight(p, weights_.getWeight(p) + param_.gradientStepSize_/iterationCount * (wgf.getGradient(p)-2*param_.beliefPropagationReg_*weights_.getWeight(p))/norm);
+                weights_.setWeight(p, weights_.getWeight(p) + param_.gradientStepSize_/iterationCount * (wgf.getGradient(p)-2*param_.beliefPropagationReg_*weights_.getWeight(p))/norm); 
             } 
 	    if(param_.infoFlag_){
                 std::cout << ") ";
diff --git a/src/interfaces/python/opengm/learning/__init__.py b/src/interfaces/python/opengm/learning/__init__.py
index b91b86f..f844a32 100644
--- a/src/interfaces/python/opengm/learning/__init__.py
+++ b/src/interfaces/python/opengm/learning/__init__.py
@@ -253,7 +253,7 @@ def structMaxMarginLearner(dataset, regularizerWeight=1.0, minEps=1e-5, nSteps=0
         
         return learner
     else:
-        raise RuntimeError("this learner needs widthCplex or withGurobi")
+        raise RuntimeError("this learner needs withCplex or withGurobi")
 
 
 def maxLikelihoodLearner(dataset):
@@ -261,7 +261,20 @@ def maxLikelihoodLearner(dataset):
     learnerCls = MaxLikelihood_FlexibleLoss
     learnerParamCls = MaxLikelihood_FlexibleLossParameter
 
-    param = learnerParamCls()
+    param = learnerParamCls(
+        maximumNumberOfIterations,
+        gradientStep,
+        weightAccuracy,
+        gradientStoppingCriteria,
+        infoFlag,
+        infoEveryStep,
+	beliefPropagationMaximumNumberOfIterations,
+	beliefPropagationConvergenceBound,
+	beliefPropagationDamping,
+	beliefPropagationReg,
+	beliefPropagationTemperature
+    )
+
     learner = learnerCls(dataset, param)
         
     return learner
diff --git a/src/interfaces/python/opengm/learning/pyMaxLikelihoodLearner.cxx b/src/interfaces/python/opengm/learning/pyMaxLikelihoodLearner.cxx
index f1998b7..00b0667 100644
--- a/src/interfaces/python/opengm/learning/pyMaxLikelihoodLearner.cxx
+++ b/src/interfaces/python/opengm/learning/pyMaxLikelihoodLearner.cxx
@@ -21,8 +21,30 @@ namespace opengm{
 
     template<class PARAM>
     PARAM * pyMaxLikelihoodParamConstructor(
+	size_t maximumNumberOfIterations=1234567,
+	double gradientStepSize=0.1234567,
+	double weightStoppingCriteria=0.00001234567,
+	double gradientStoppingCriteria=0.00000001234567,
+	bool infoFlag=true,
+	bool infoEveryStep=false,
+	size_t beleifPropagationMaximumNumberOfIterations = 30,
+	double beleifPropagationConvergenceBound = 0.00011,
+	double beleifPropagationDamping = 0.55,
+	double beleifPropagationReg = 1.00000001,
+	double beleifPropagationTemperature = 0.3000000001
     ){
         PARAM * p  = new PARAM();
+	p->maximumNumberOfIterations_ = maximumNumberOfIterations;
+	p->gradientStepSize_ = gradientStepSize;
+	p->weightStoppingCriteria_ = weightStoppingCriteria;
+	p->gradientStoppingCriteria_ = gradientStoppingCriteria;
+	p->infoFlag_ = infoFlag;
+	p->infoEveryStep_ = infoEveryStep;
+	p->beliefPropagationMaximumNumberOfIterations_ = beleifPropagationMaximumNumberOfIterations;
+	p->beliefPropagationConvergenceBound_ = beleifPropagationConvergenceBound;
+	p->beliefPropagationDamping_ = beleifPropagationDamping;
+	p->beliefPropagationReg_ = beleifPropagationReg;
+	p->beliefPropagationTemperature_ = beleifPropagationTemperature;
         return p;
     }
 
@@ -35,7 +57,7 @@ namespace opengm{
         const std::string paramClsName = clsName + std::string("Parameter");
 
         bp::class_<PyLearnerParam>(paramClsName.c_str(), bp::init<>())
-            //.def("__init__", make_constructor(&pyMaxLikelihoodParamConstructor<PyLearnerParam> ,boost::python::default_call_policies()))
+	  //.def("__init__", make_constructor(&pyMaxLikelihoodParamConstructor<PyLearnerParam> ,boost::python::default_call_policies()))
         ;
 
         boost::python::class_<PyLearner>( clsName.c_str(), boost::python::init<DatasetType &, const PyLearnerParam &>() )
@@ -43,8 +65,8 @@ namespace opengm{
         ;
     }
 
-    //template void
-    //export_max_likelihood_learner<op::GmAdderHammingLossDataset> (const std::string& className);
+  //template void
+  //export_max_likelihood_learner<op::GmAdderHammingLossDataset> (const std::string& className);
 
     template void
     export_max_likelihood_learner<op::GmAdderFlexibleLossDataset> (const std::string& className);
diff --git a/src/unittest/learning/test_maximum_likelihood_learner.cxx b/src/unittest/learning/test_maximum_likelihood_learner.cxx
index 9387dfa..30b6212 100644
--- a/src/unittest/learning/test_maximum_likelihood_learner.cxx
+++ b/src/unittest/learning/test_maximum_likelihood_learner.cxx
@@ -62,48 +62,42 @@ int main() {
    {
       DS1 dataset;
       std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
-      opengm::learning::MaximumLikelihoodLearner<DS1>::Parameter gradientParameter;
-      gradientParameter.maximumNumberOfIterations_ = 3;
-      gradientParameter.gradientStep_ = 0.1111;
-      gradientParameter.weightAccuracy_ = 0.0000111;
-      gradientParameter.gradientStoppingCriteria_ = 0.000000011;
-      gradientParameter.infoFlag_ = true;
-      gradientParameter.infoEveryStep_ = true;
-      opengm::learning::MaximumLikelihoodLearner<DS1> learner(dataset,gradientParameter);
-
-      //INF::Parameter infParam;
-      //learner.learn<INF>(infParam);
-      //learner.learn();
-      const size_t maxNumberOfBPIterations = 40;
-      const ValueType convergenceBound = 1e-7;
-      const ValueType damping = 0.5;
-      BeliefPropagation::Parameter parametersBP(maxNumberOfBPIterations, convergenceBound, damping);
-
-      learner.learn<BeliefPropagation>(parametersBP);
+      opengm::learning::MaximumLikelihoodLearner<DS1>::Parameter parameter;
+      parameter.maximumNumberOfIterations_ = 9;
+      parameter.gradientStepSize_ = 0.1111;
+      parameter.weightStoppingCriteria_ = 0.0000000111;
+      parameter.gradientStoppingCriteria_ = 0.000000000011;
+      parameter.infoFlag_ = true;
+      parameter.infoEveryStep_ = true;
+      parameter.beliefPropagationMaximumNumberOfIterations_ = 30;
+      parameter.beliefPropagationConvergenceBound_ = 0.00011;
+      parameter.beliefPropagationDamping_ = 0.55;
+      parameter.beliefPropagationReg_ = 1.00000001;
+      parameter.beliefPropagationTemperature_ = 0.3000000001;
+      opengm::learning::MaximumLikelihoodLearner<DS1> learner(dataset,parameter);
+
+      learner.learn();
       
    }
 
    {
       DS2 dataset;
       std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
-      opengm::learning::MaximumLikelihoodLearner<DS2>::Parameter gradientParameter;
-      gradientParameter.maximumNumberOfIterations_ = 3;
-      gradientParameter.gradientStep_ = 0.1111;
-      gradientParameter.weightAccuracy_ = 0.0000111;
-      gradientParameter.gradientStoppingCriteria_ = 0.000000011;
-      gradientParameter.infoFlag_ = true;
-      gradientParameter.infoEveryStep_ = true;
-      opengm::learning::MaximumLikelihoodLearner<DS2> learner(dataset,gradientParameter);
-
-      //INF::Parameter infParam;
-      //learner.learn<INF>(infParam);
-      //learner.learn();
-      const size_t maxNumberOfBPIterations = 40;
-      const ValueType convergenceBound = 1e-7;
-      const ValueType damping = 0.5;
-      BeliefPropagation::Parameter parametersBP(maxNumberOfBPIterations, convergenceBound, damping);
-
-      learner.learn<BeliefPropagation>(parametersBP);
+      opengm::learning::MaximumLikelihoodLearner<DS2>::Parameter parameter;
+      parameter.maximumNumberOfIterations_ = 9;
+      parameter.gradientStepSize_ = 0.1111;
+      parameter.weightStoppingCriteria_ = 0.0000000111;
+      parameter.gradientStoppingCriteria_ = 0.000000000011;
+      parameter.infoFlag_ = true;
+      parameter.infoEveryStep_ = true;
+      parameter.beliefPropagationMaximumNumberOfIterations_ = 30;
+      parameter.beliefPropagationConvergenceBound_ = 0.00011;
+      parameter.beliefPropagationDamping_ = 0.55;
+      parameter.beliefPropagationReg_ = 1.00000001;
+      parameter.beliefPropagationTemperature_ = 0.3000000001;
+      opengm::learning::MaximumLikelihoodLearner<DS2> learner(dataset,parameter);
+
+      learner.learn();
       
    }
 /*

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git



More information about the debian-science-commits mailing list