[opengm] 176/386: Latest test.

Ghislain Vaillant ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:37:34 UTC 2016


This is an automated email from the git hooks/post-receive script.

ghisvail-guest pushed a commit to branch debian/master
in repository opengm.

commit 1d2c7c3e78821f2251267d239543131da1ee6151
Author: Janez Ales <janez.ales at iwr.uni-heidelberg.de>
Date:   Fri Dec 19 14:40:07 2014 +0100

    Latest test.
---
 .../learning/maximum-likelihood-learning.hxx       | 26 ++++++++++++++--------
 .../learning/test_maximum_likelihood_learner.cxx   |  6 +++--
 2 files changed, 21 insertions(+), 11 deletions(-)

diff --git a/include/opengm/learning/maximum-likelihood-learning.hxx b/include/opengm/learning/maximum-likelihood-learning.hxx
index df19be8..81aa089 100644
--- a/include/opengm/learning/maximum-likelihood-learning.hxx
+++ b/include/opengm/learning/maximum-likelihood-learning.hxx
@@ -119,6 +119,11 @@ void MaximumLikelihoodLearner<DATASET, LOSS>::learn(typename INF::Parameter& wei
         //point[p] = ValueType(weight_.weightUpperbound_[p]);
         //point[p] = ValueType(weight_.weightLowerbound_[p]);
 
+    // test only
+    point[0]=0.5;
+    point[1]=0.7;
+    point[2]=0.9;
+
     LOSS lossFunction;
     bool search=true;
     int count=0;
@@ -140,7 +145,7 @@ void MaximumLikelihoodLearner<DATASET, LOSS>::learn(typename INF::Parameter& wei
     ValueType eta = 0.1;
     ValueType delta = 0.25; // 0 <= delta <= 0.5
     ValueType D_a = 1.0; // distance treshold
-    ValueType optFunTmp, optFun, bestOptFun=0.0;
+    ValueType optFun, bestOptFun=0.0;
 
     while(search){
         ++count;
@@ -173,7 +178,6 @@ void MaximumLikelihoodLearner<DATASET, LOSS>::learn(typename INF::Parameter& wei
         }
 
         optFun=0.0;
-        optFunTmp=0.0;
 
         /***********************************************************************************************************/
         // Loopy Belief Propagation setup
@@ -210,6 +214,7 @@ void MaximumLikelihoodLearner<DATASET, LOSS>::learn(typename INF::Parameter& wei
             const std::vector<typename INF::LabelType>& gt =  dataset_.getGT(m);
             bp.infer();
             typename GMType::IndependentFactorType marg;
+
             for(IndexType f = 0; f<dataset_.getModel(m).numberOfFactors();++f){
                 bp.factorMarginal(f, marg);
                 std::vector<IndexType> indexVector( marg.variableIndicesBegin(), marg.variableIndicesEnd() );
@@ -251,19 +256,20 @@ void MaximumLikelihoodLearner<DATASET, LOSS>::learn(typename INF::Parameter& wei
                     sum[p] += (b[m][f] - piW[m][f]) * f_x;
                     // ( ground truth - marginals ) * factor
                     optFun += b[m][f] - piW[m][f] * factor(labelVector.begin());
-                    optFunTmp += (b[m][f] - piW[m][f]) * factor(labelVector.begin());
                 }
             }
         }
-        std::cout << " loss = " << loss << " optFun = " << optFun << " optFunTmp = " << optFunTmp << std::endl;
+        //std::cout << " loss = " << loss << " optFun = " << optFun << " optFunTmp = " << optFunTmp << std::endl;
+        std::cout << " loss = " << loss << " optFun = " << optFun << std::endl;
 
-        if(loss<=bestLoss){
-            bestLoss=loss;
+        if(optFun>=bestOptFun){
+            bestOptFun=optFun;
             bestModelWeight=modelWeight;
             bestOptFun=optFun;
+            bestLoss=loss;
         }
 
-        if (count>=200 ){
+        if (count>=1000000 ){
             search = false;
         }else{
             // Calculate the next point
@@ -274,8 +280,10 @@ void MaximumLikelihoodLearner<DATASET, LOSS>::learn(typename INF::Parameter& wei
             }
             norm2 = std::sqrt(norm2);
             for(IndexType p=0; p<dataset_.getNumberOfWeights(); ++p){
-                point[p] += eta * gradient[p]/norm2;
+                gradient[p] /= norm2;
                 std::cout << " gradient [" << p << "] = " << gradient[p] << std::endl;
+                point[p] += eta * gradient[p];
+
             }
             eta *= (ValueType)count/(count+1);
         }
@@ -286,7 +294,7 @@ void MaximumLikelihoodLearner<DATASET, LOSS>::learn(typename INF::Parameter& wei
         std::cout << bestModelWeight[p] <<" ";
     }
     std::cout << " ==> ";
-    std::cout << " loss = " << bestLoss << " bestOptFun = " << bestOptFun << std::endl;
+    std::cout << " loss = " << bestLoss << " bestOptFun = " << bestOptFun << " gradient [" << 0 << "] = " << gradient[0] << std::endl;
 
     modelWeights_ = bestModelWeight;
 };
diff --git a/src/unittest/learning/test_maximum_likelihood_learner.cxx b/src/unittest/learning/test_maximum_likelihood_learner.cxx
index 202776a..4f383e8 100644
--- a/src/unittest/learning/test_maximum_likelihood_learner.cxx
+++ b/src/unittest/learning/test_maximum_likelihood_learner.cxx
@@ -61,8 +61,8 @@ int main() {
       learner.learn<INF>(infWeight);
 
    }
-   */
-/*
+
+
    {
       DS1 dataset;
       std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
@@ -80,6 +80,7 @@ int main() {
       
    }
 */
+
    {
       DS2 dataset;
       std::cout << "Dataset includes " << dataset.getNumberOfModels() << " instances and has " << dataset.getNumberOfWeights() << " parameters."<<std::endl;
@@ -95,6 +96,7 @@ int main() {
       INF::Parameter infWeight;
       learner.learn<INF>(infWeight);
    }
+
 /*
    {
       DSSimple dataset;

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git



More information about the debian-science-commits mailing list