[opengm] 261/386: All parameters added. C++ and Python tests added.

Ghislain Vaillant ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:38:06 UTC 2016


This is an automated email from the git hooks/post-receive script.

ghisvail-guest pushed a commit to branch debian/master
in repository opengm.

commit 377f74eaf126c10fad684a4f620111b7ead68c94
Author: Janez Ales <janez.ales at iwr.uni-heidelberg.de>
Date:   Fri Jan 16 15:47:14 2015 +0100

    All parameters added.
    C++ and Python tests added.
---
 fubar/real_example_2.py                            | 14 ++++-----
 .../learning/maximum_likelihood_learning.hxx       | 35 ++++++++++++----------
 src/interfaces/python/opengm/learning/__init__.py  | 32 +++++++++++++++++---
 .../opengm/learning/pyMaxLikelihoodLearner.cxx     | 24 +++++++--------
 .../learning/test_maximum_likelihood_learner.cxx   |  6 ++--
 5 files changed, 70 insertions(+), 41 deletions(-)

diff --git a/fubar/real_example_2.py b/fubar/real_example_2.py
index 1a08c5e..2ae46aa 100644
--- a/fubar/real_example_2.py
+++ b/fubar/real_example_2.py
@@ -74,7 +74,7 @@ def makeFeatures(gt):
     return a,b
 
 for mi in range(nModels):
-    print mi
+    #print mi
 
     gm = opengm.gm(numpy.ones(numVar)*nLables)
     gt = makeGt(shape) 
@@ -131,20 +131,20 @@ nTestPoints  =numpy.ones(nWeights).astype('uint64')*5
 # learner = learning.gridSearchLearner(dataset=dataset,lowerBounds=lowerBounds, upperBounds=upperBounds,nTestPoints=nTestPoints)
 #learner = learning.structMaxMarginLearner(dataset, 0.1, 0.001, 0)
 
-param = learnerParamCls(
-    maximumNumberOfIterations = 9,
+learner = learning.maxLikelihoodLearner(
+    dataset,
+    maximumNumberOfIterations =99,
     gradientStepSize = 0.1111,
     weightStoppingCriteria = 0.0000000111,
     gradientStoppingCriteria = 0.000000000011,
-    infoFlag = true,
-    infoEveryStep = true,
+    infoFlag = True,
+    infoEveryStep = True,
+    weightRegularizer = 1.0,
     beliefPropagationMaximumNumberOfIterations = 30,
     beliefPropagationConvergenceBound = 0.00011,
     beliefPropagationDamping = 0.55,
-    beliefPropagationReg = 1.00000001,
     beliefPropagationTemperature = 0.3000000001
 )
-learner = learning.maxLikelihoodLearner(dataset,param)
 
 #learner =  learning.structPerceptron(dataset, decayExponent=-0.5, learningMode='batch')
 # learner =  learning.subgradientSSVM(dataset, learningRate=1.0, C=100, learningMode='batch')
diff --git a/include/opengm/learning/maximum_likelihood_learning.hxx b/include/opengm/learning/maximum_likelihood_learning.hxx
index 60d7946..106ca81 100644
--- a/include/opengm/learning/maximum_likelihood_learning.hxx
+++ b/include/opengm/learning/maximum_likelihood_learning.hxx
@@ -43,10 +43,10 @@ namespace opengm {
 		 gradientStoppingCriteria_(0.0000000000000001),
 		 infoFlag_(true),
 		 infoEveryStep_(false),
+		 weightRegularizer_(1.0),
 		 beliefPropagationMaximumNumberOfIterations_(40),
 		 beliefPropagationConvergenceBound_(0.0000001),
 		 beliefPropagationDamping_(0.5),
-		 weightRegularizer_(1.0),
 		 beliefPropagationTemperature_(0.3)
 	   {;}
          };
@@ -86,9 +86,7 @@ namespace opengm {
          
          MaximumLikelihoodLearner(DATASET&, const Parameter&);
 
-	//template<class INF>
 	 void learn();
-	//void learn(const typename INF::Parameter & infParametersBP);
          
          const opengm::learning::Weights<ValueType>& getModelWeights(){return weights_;}
          WeightType& getLerningWeights(){return weights_;}
@@ -108,8 +106,6 @@ namespace opengm {
       
       template<class DATASET>
       void MaximumLikelihoodLearner<DATASET>::learn(){
-	//template<class INF>
-	//void MaximumLikelihoodLearner<DATASET>::learn(const typename INF::Parameter & infParametersBP){
 
          typedef typename opengm::ExplicitFunction<ValueType,IndexType,LabelType>                                                    FunctionType;
          typedef typename opengm::ViewConvertFunction<GMType,Minimizer,ValueType>                                                    ViewFunctionType;
@@ -184,22 +180,29 @@ namespace opengm {
             for(IndexType p=0; p<dataset_.getNumberOfWeights(); ++p){
                norm += (wgf.getGradient(p)-2*param_.weightRegularizer_*weights_.getWeight(p)) * (wgf.getGradient(p)-2*param_.weightRegularizer_*weights_.getWeight(p));
             }
-            norm = std::sqrt(norm);  // check for the zero norm &
+            norm = std::sqrt(norm);
+	    //if(norm < param_.gradientStoppingCriteria_)
+	    //    search = false;
+	    if(param_.infoFlag_ and param_.infoEveryStep_)
+	        std::cout << "\r" << std::flush << " Iteration " << iterationCount <<" Gradient = ( ";
 
-	    if(param_.infoFlag_)
-	        std::cout << "gradient = ( ";  
+	    double normGradientDelta = 0;
             for(IndexType p=0; p<dataset_.getNumberOfWeights(); ++p){
-	        if(param_.infoFlag_)
-                    std::cout << (wgf.getGradient(p)-2*param_.weightRegularizer_*weights_.getWeight(p))/norm << " ";
-                dataset_.getWeights().setWeight(p, weights_.getWeight(p) + param_.gradientStepSize_/iterationCount * (wgf.getGradient(p)-2*param_.weightRegularizer_*weights_.getWeight(p))/norm);
-                weights_.setWeight(p, weights_.getWeight(p) + param_.gradientStepSize_/iterationCount * (wgf.getGradient(p)-2*param_.weightRegularizer_*weights_.getWeight(p))/norm); 
-            } 
-	    if(param_.infoFlag_){
+	        if(param_.infoFlag_ and param_.infoEveryStep_)
+		    std::cout << (wgf.getGradient(p)-2*param_.weightRegularizer_*weights_.getWeight(p))/norm << " ";
+
+		double gradientDelta=param_.gradientStepSize_/iterationCount * (wgf.getGradient(p)-2*param_.weightRegularizer_*weights_.getWeight(p))/norm;
+		normGradientDelta +=gradientDelta*gradientDelta;
+                dataset_.getWeights().setWeight(p, weights_.getWeight(p) + gradientDelta);
+                weights_.setWeight(p, weights_.getWeight(p) + gradientDelta); 
+            }
+	    normGradientDelta=std::sqrt(normGradientDelta);
+	    if(param_.infoFlag_ and param_.infoEveryStep_){
                 std::cout << ") ";
-                std::cout << " weight = ( ";
+                std::cout << " Weight = ( ";
                 for(IndexType p=0; p<dataset_.getNumberOfWeights(); ++p)
                     std::cout <<  weights_.getWeight(p) << " ";
-                std::cout << ")"<<std::endl;
+                std::cout << ") normGradientDelta "<< normGradientDelta << std::endl;
 	    }
          }
          std::cout << "\r Stoped after "<< iterationCount  << "/" << param_.maximumNumberOfIterations_<< " iterations.                             " <<std::endl;
diff --git a/src/interfaces/python/opengm/learning/__init__.py b/src/interfaces/python/opengm/learning/__init__.py
index ded6d1d..d0c0b7b 100644
--- a/src/interfaces/python/opengm/learning/__init__.py
+++ b/src/interfaces/python/opengm/learning/__init__.py
@@ -255,13 +255,37 @@ def structMaxMarginLearner(dataset, regularizerWeight=1.0, minEps=1e-5, nSteps=0
         raise RuntimeError("this learner needs withCplex or withGurobi")
 
 
-def maxLikelihoodLearner(dataset, maxIterations=1000, reg=1.0, temp=1.0):
+def maxLikelihoodLearner(
+        dataset, 
+        maximumNumberOfIterations = 100,
+        gradientStepSize = 0.1,
+        weightStoppingCriteria = 0.00000001,
+        gradientStoppingCriteria = 0.00000000001,
+        infoFlag = True,
+        infoEveryStep = False,
+        weightRegularizer = 1.0,
+        beliefPropagationMaximumNumberOfIterations = 40,
+        beliefPropagationConvergenceBound = 0.0001,
+        beliefPropagationDamping = 0.5,
+        beliefPropagationReg = 1.0,
+        beliefPropagationTemperature = 1.0):
+
     learnerCls = MaxLikelihood_FlexibleLoss
     learnerParamCls = MaxLikelihood_FlexibleLossParameter
 
-
-    param = learnerParamCls()
-    param.maxIterations = int(maxIterations)
+    param = learnerParamCls(
+        maximumNumberOfIterations,
+        gradientStepSize,
+        weightStoppingCriteria,
+        gradientStoppingCriteria,
+        infoFlag,
+        infoEveryStep,
+        weightRegularizer,
+        beliefPropagationMaximumNumberOfIterations,
+        beliefPropagationConvergenceBound,
+        beliefPropagationDamping,
+        beliefPropagationTemperature)
+    #param.maxIterations = int(maxIterations)
     #param.reg = float(reg)
     #param.temperature = float(temp)
 
diff --git a/src/interfaces/python/opengm/learning/pyMaxLikelihoodLearner.cxx b/src/interfaces/python/opengm/learning/pyMaxLikelihoodLearner.cxx
index 724970e..ec9094f 100644
--- a/src/interfaces/python/opengm/learning/pyMaxLikelihoodLearner.cxx
+++ b/src/interfaces/python/opengm/learning/pyMaxLikelihoodLearner.cxx
@@ -27,11 +27,11 @@ namespace opengm{
 	double gradientStoppingCriteria=0.00000001234567,
 	bool infoFlag=true,
 	bool infoEveryStep=false,
-	size_t beleifPropagationMaximumNumberOfIterations = 30,
-	double beleifPropagationConvergenceBound = 0.00011,
-	double beleifPropagationDamping = 0.55,
-	double beleifPropagationReg = 1.00000001,
-	double beleifPropagationTemperature = 0.3000000001
+	double weightRegularizer = 1.00000001,
+	size_t beliefPropagationMaximumNumberOfIterations = 30,
+	double beliefPropagationConvergenceBound = 0.00011,
+	double beliefPropagationDamping = 0.55,
+	double beliefPropagationTemperature = 0.3000000001
     ){
         PARAM * p  = new PARAM();
 	p->maximumNumberOfIterations_ = maximumNumberOfIterations;
@@ -40,11 +40,11 @@ namespace opengm{
 	p->gradientStoppingCriteria_ = gradientStoppingCriteria;
 	p->infoFlag_ = infoFlag;
 	p->infoEveryStep_ = infoEveryStep;
-	p->beliefPropagationMaximumNumberOfIterations_ = beleifPropagationMaximumNumberOfIterations;
-	p->beliefPropagationConvergenceBound_ = beleifPropagationConvergenceBound;
-	p->beliefPropagationDamping_ = beleifPropagationDamping;
-	p->beliefPropagationReg_ = beleifPropagationReg;
-	p->beliefPropagationTemperature_ = beleifPropagationTemperature;
+	p->weightRegularizer_ = weightRegularizer;
+	p->beliefPropagationMaximumNumberOfIterations_ = beliefPropagationMaximumNumberOfIterations;
+	p->beliefPropagationConvergenceBound_ = beliefPropagationConvergenceBound;
+	p->beliefPropagationDamping_ = beliefPropagationDamping;
+	p->beliefPropagationTemperature_ = beliefPropagationTemperature;
         return p;
     }
 
@@ -57,8 +57,8 @@ namespace opengm{
         const std::string paramClsName = clsName + std::string("Parameter");
 
         bp::class_<PyLearnerParam>(paramClsName.c_str(), bp::init<>())
-	  //.def("__init__", make_constructor(&pyMaxLikelihoodParamConstructor<PyLearnerParam> ,boost::python::default_call_policies()))
-            .def_readwrite("maxIterations", &PyLearnerParam::maximumNumberOfIterations_)
+	  .def("__init__", make_constructor(&pyMaxLikelihoodParamConstructor<PyLearnerParam> ,boost::python::default_call_policies()))
+	  //.def_readwrite("maxIterations", &PyLearnerParam::maximumNumberOfIterations_)
         ;
 
         boost::python::class_<PyLearner>( clsName.c_str(), boost::python::init<DatasetType &, const PyLearnerParam &>() )
diff --git a/src/unittest/learning/test_maximum_likelihood_learner.cxx b/src/unittest/learning/test_maximum_likelihood_learner.cxx
index fc7f915..4e6f978 100644
--- a/src/unittest/learning/test_maximum_likelihood_learner.cxx
+++ b/src/unittest/learning/test_maximum_likelihood_learner.cxx
@@ -63,12 +63,13 @@ int main() {
       DS1 dataset;
       std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
       opengm::learning::MaximumLikelihoodLearner<DS1>::Parameter parameter;
-      parameter.maximumNumberOfIterations_ = 3;
+      parameter.maximumNumberOfIterations_ = 199;
       parameter.gradientStepSize_ = 0.1111;
       parameter.weightStoppingCriteria_ = 0.0000000111;
       parameter.gradientStoppingCriteria_ = 0.000000000011;
       parameter.infoFlag_ = true;
       parameter.infoEveryStep_ = true;
+      parameter.weightRegularizer_ = 1.0;
       parameter.beliefPropagationMaximumNumberOfIterations_ = 30;
       parameter.beliefPropagationConvergenceBound_ = 0.00011;
       parameter.beliefPropagationDamping_ = 0.55;
@@ -83,12 +84,13 @@ int main() {
       DS2 dataset;
       std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
       opengm::learning::MaximumLikelihoodLearner<DS2>::Parameter parameter;
-      parameter.maximumNumberOfIterations_ = 3;
+      parameter.maximumNumberOfIterations_ = 199;
       parameter.gradientStepSize_ = 0.1111;
       parameter.weightStoppingCriteria_ = 0.0000000111;
       parameter.gradientStoppingCriteria_ = 0.000000000011;
       parameter.infoFlag_ = true;
       parameter.infoEveryStep_ = true;
+      parameter.weightRegularizer_ = 1.0;
       parameter.beliefPropagationMaximumNumberOfIterations_ = 30;
       parameter.beliefPropagationConvergenceBound_ = 0.00011;
       parameter.beliefPropagationDamping_ = 0.55;

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git



More information about the debian-science-commits mailing list