[opengm] 230/386: fubar

Ghislain Vaillant ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:37:55 UTC 2016


This is an automated email from the git hooks/post-receive script.

ghisvail-guest pushed a commit to branch debian/master
in repository opengm.

commit f66681ff2045d41d6b7e6b685f1b428206e29ad6
Author: DerThorsten <thorsten.beier at iwr.uni-heidelberg.de>
Date:   Tue Jan 13 16:30:23 2015 +0100

    fubar
---
 fubar/toy_dataset.py                               |  8 +++---
 include/opengm/graphicalmodel/weights.hxx          |  4 +++
 include/opengm/learning/struct-max-margin.hxx      |  1 +
 include/opengm/learning/subgradient_ssvm.hxx       | 29 ++++++++++++++++------
 src/interfaces/python/opengm/learning/__init__.py  |  4 +--
 .../python/opengm/learning/pySubgradientSSVM.cxx   |  1 +
 6 files changed, 34 insertions(+), 13 deletions(-)

diff --git a/fubar/toy_dataset.py b/fubar/toy_dataset.py
index 6b8db90..58e2e26 100644
--- a/fubar/toy_dataset.py
+++ b/fubar/toy_dataset.py
@@ -12,7 +12,7 @@ numpy.random.seed(42)
 
 nImages = 8 
 shape = [20, 20]
-noise = 1.0
+noise = 4.0
 imgs = []
 gts = []
 
@@ -98,7 +98,7 @@ lm = 0
 infCls = opengm.inference.TrwsExternal
 param = opengm.InfParam()
 
-if True:
+if False:
     print "construct learner"
     learner = learning.maxLikelihoodLearner(dataset)
     print "start to learn"
@@ -106,9 +106,11 @@ if True:
     print "exit"
 
 else:
-   learner =  learning.subgradientSSVM(dataset, learningRate=0.5, C=100, learningMode='batch',maxIterations=200,averaging=-1)
+   learner =  learning.subgradientSSVM(dataset, learningRate=0.5, C=100, learningMode='batch',maxIterations=1000,averaging=-1,nConf=15)
    learner.learn(infCls=infCls,parameter=param,connectedComponents=True,infMode='n')
 
+   learner =  learning.subgradientSSVM(dataset, learningRate=0.5, C=100, learningMode='batch',maxIterations=1000,averaging=-1,nConf=0)
+   learner.learn(infCls=infCls,parameter=param,connectedComponents=True,infMode='n')
 #with opengm.Timer("n  2"):
 #    learner.learn(infCls=infCls,parameter=param,connectedComponents=True,infMode='n')
 #with opengm.Timer("sf"):
diff --git a/include/opengm/graphicalmodel/weights.hxx b/include/opengm/graphicalmodel/weights.hxx
index 08f74b8..3f5c209 100644
--- a/include/opengm/graphicalmodel/weights.hxx
+++ b/include/opengm/graphicalmodel/weights.hxx
@@ -266,4 +266,8 @@ namespace learning{
 } // namespace opengm
 
 
+
+
+
+
 #endif /* OPENGM_LEARNING_WEIGHTS */
diff --git a/include/opengm/learning/struct-max-margin.hxx b/include/opengm/learning/struct-max-margin.hxx
index 709f0b8..55b03a1 100644
--- a/include/opengm/learning/struct-max-margin.hxx
+++ b/include/opengm/learning/struct-max-margin.hxx
@@ -158,6 +158,7 @@ void
 StructMaxMargin<DS, O>::learn(const typename InferenceType::Parameter& infParams) {
 
     typedef typename InferenceType:: template RebindGm<GMWITHLOSS>::type InfType;
+
     typedef typename InfType::Parameter InfTypeParam;
     InfTypeParam infTypeParam(infParams);
     Oracle<InfType> oracle(_dataset, infTypeParam);
diff --git a/include/opengm/learning/subgradient_ssvm.hxx b/include/opengm/learning/subgradient_ssvm.hxx
index 9d466fd..008cbb4 100644
--- a/include/opengm/learning/subgradient_ssvm.hxx
+++ b/include/opengm/learning/subgradient_ssvm.hxx
@@ -146,7 +146,7 @@ namespace opengm {
         for(size_t wi=0; wi<nWegihts; ++wi){
             dataset_.getWeights().setWeight(wi, 0.0);
         }
-
+        std::cout<<"PARAM nConf_"<<para_.nConf_<<"\n";
         const bool useWorkingSets = para_.nConf_>0;
 
         ConfBufferVec buffer(useWorkingSets? nModels : 0, ConfBuffer(para_.nConf_));
@@ -228,17 +228,30 @@ namespace opengm {
                         // append current solution
                         buffer[gmi].push_back(arg);
 
-                        size_t c=0;
+                        size_t vCount=0;
                         // check which violates
-                        for(size_t cc=0; cc<buffer[gmi].size(); +cc){
-                            //const double mLoss = dataset_.getLoss(buffer[gmi][cc], gmi);
-                            //const double argVal = gm.evaluate(buffer[gmi][cc]);
-                            //const double gtVal =  gm.evaluate(dataset_.getGT());
-                            //const double ll = argVal + mLoss - gtVal;
+                        for(size_t cc=0; cc<buffer[gmi].size(); ++cc){
+                            const double mLoss = dataset_.getLoss(buffer[gmi][cc], gmi);
+                            const double argVal = gm.evaluate(buffer[gmi][cc]);
+                            const double gtVal =  gm.evaluate(dataset_.getGT(gmi));
+                            const double ll = (argVal - mLoss) - gtVal;
                             //std::cout<<" argVal "<<argVal<<" gtVal "<<gtVal<<" mLoss "<<mLoss<<"   VV "<<ll<<"\n";
-
+                            if(ll<0){
+                                isViolated[cc] = true;
+                                ++vCount;
+                            }
                         }
+                        FeatureAcc featureAcc(nWegihts);
+                        for(size_t cc=0; cc<buffer[gmi].size(); ++cc){
+                            if(isViolated[cc]){
+
+                                featureAcc.accumulateModelFeatures(gm, dataset_.getGT(gmi).begin(), buffer[gmi][cc].begin(),1.0/double(vCount));
 
+                            }
+                        }
+                        omp_set_lock(&featureAccLock);
+                        featureAcc_.accumulateFromOther(featureAcc);
+                        omp_unset_lock(&featureAccLock);
                     }
                     else{
                         FeatureAcc featureAcc(nWegihts);
diff --git a/src/interfaces/python/opengm/learning/__init__.py b/src/interfaces/python/opengm/learning/__init__.py
index 332b94d..b91b86f 100644
--- a/src/interfaces/python/opengm/learning/__init__.py
+++ b/src/interfaces/python/opengm/learning/__init__.py
@@ -204,7 +204,7 @@ def structPerceptron(dataset, learningMode='online',eps=1e-5, maxIterations=1000
     return learner
 
 
-def subgradientSSVM(dataset, learningMode='batch',eps=1e-5, maxIterations=10000, stopLoss=0.0, learningRate=1.0, C=100.0, averaging=-1):
+def subgradientSSVM(dataset, learningMode='batch',eps=1e-5, maxIterations=10000, stopLoss=0.0, learningRate=1.0, C=100.0, averaging=-1, nConf=0):
 
     assert dataset.__class__.lossType == 'flexible'
     learnerCls = SubgradientSSVM_FlexibleLoss
@@ -227,7 +227,7 @@ def subgradientSSVM(dataset, learningMode='batch',eps=1e-5, maxIterations=10000,
     param.C = float(C)
     param.learningMode = lm
     param.averaging = int(averaging)
-    #param.nConf = int(nConf)
+    param.nConf = int(nConf)
     learner = learnerCls(dataset, param)
     return learner
 
diff --git a/src/interfaces/python/opengm/learning/pySubgradientSSVM.cxx b/src/interfaces/python/opengm/learning/pySubgradientSSVM.cxx
index f908a02..00d5a26 100644
--- a/src/interfaces/python/opengm/learning/pySubgradientSSVM.cxx
+++ b/src/interfaces/python/opengm/learning/pySubgradientSSVM.cxx
@@ -56,6 +56,7 @@ namespace opengm{
             .def_readwrite("C", &PyLearnerParam::C_)
             .def_readwrite("learningMode", &PyLearnerParam::learningMode_)
             .def_readwrite("averaging", &PyLearnerParam::averaging_)
+            .def_readwrite("nConf", &PyLearnerParam::nConf_)
         ;
 
 

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git



More information about the debian-science-commits mailing list