[opengm] 213/386: minor stuff

Ghislain Vaillant ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:37:51 UTC 2016


This is an automated email from the git hooks/post-receive script.

ghisvail-guest pushed a commit to branch debian/master
in repository opengm.

commit 5cc4982f63332f29ca9de61cead0d9ef04fc6b80
Author: DerThorsten <thorsten.beier at iwr.uni-heidelberg.de>
Date:   Mon Jan 12 00:46:34 2015 +0100

    minor stuff
---
 fubar/toy_dataset.py                               |  69 ++++++++++---
 fubar/whorse.py                                    |  79 +++++++++++++++
 include/opengm/inference/self_fusion.hxx           |  22 ++--
 include/opengm/learning/gradient-accumulator.hxx   |   3 +
 include/opengm/learning/loss/flexibleloss.hxx      |  73 ++++++++++++--
 include/opengm/learning/subgradient_ssvm.hxx       |  53 +++++++---
 src/interfaces/python/opengm/learning/__init__.py  | 111 +++++++++++++++------
 src/interfaces/python/opengm/learning/helper.hxx   | 109 ++++++++++++++++++++
 .../python/opengm/learning/pySubgradientSSVM.cxx   |   1 +
 9 files changed, 444 insertions(+), 76 deletions(-)

diff --git a/fubar/toy_dataset.py b/fubar/toy_dataset.py
index 8f95fe0..cffe2d3 100644
--- a/fubar/toy_dataset.py
+++ b/fubar/toy_dataset.py
@@ -10,9 +10,9 @@ from make_grid_potts_dset import secondOrderImageDataset, getPbar
 
 
 
-nImages = 15 
-shape = [30, 30]
-noise = 1
+nImages = 8 
+shape = [100, 100]
+noise = 3
 imgs = []
 gts = []
 
@@ -24,12 +24,20 @@ for i in range(nImages):
 
     gtImg[shape[0]/4: 3*shape[0]/4, shape[0]/4: 3*shape[0]/4]  = 2
 
+    ra = numpy.random.randint(180)
+    #print ra 
 
-    img = gtImg + numpy.random.random(shape)*float(noise)
+    gtImg = vigra.sampling.rotateImageDegree(gtImg.astype(numpy.float32),int(ra),splineOrder=0)
+
+    if i==0 :
+        vigra.imshow(gtImg)
+        vigra.show()
 
-    if i == 1000 :
+    img = gtImg + numpy.random.random(shape)*float(noise)
+    if i==0:
         vigra.imshow(img)
         vigra.show()
+
     imgs.append(img.astype('float32'))
     gts.append(gtImg)
 
@@ -43,15 +51,33 @@ def getSelf(img):
     return img
 
 
+def getSpecial(img, sigma):
+    simg = vigra.filters.gaussianSmoothing(img, sigma=sigma)
+
+    img0  = simg**2
+    img1  = (simg - 1.0)**2
+    img2  = (simg - 2.0)**2
+
+    img0=img0[:,:,None]
+    img1=img1[:,:,None]
+    img2=img2[:,:,None]
+
+    img3 = numpy.exp(-1.0*img0)
+    img4 = numpy.exp(-1.0*img1)
+    img5 = numpy.exp(-1.0*img2)
+    return numpy.concatenate([img0,img1,img2,img3,img4,img5],axis=2)
+
+
 fUnary = [
-    getSelf,
-    partial(vigra.filters.gaussianSmoothing, sigma=1.0),
-    partial(vigra.filters.gaussianSmoothing, sigma=1.5),
-    partial(vigra.filters.gaussianSmoothing, sigma=2.0),
-    partial(vigra.filters.gaussianSmoothing, sigma=3.0)
+    partial(getSpecial, sigma=0.5),
+    partial(getSpecial, sigma=1.0),
+    partial(getSpecial, sigma=1.5),
+    partial(getSpecial, sigma=2.0),
+    partial(getSpecial, sigma=3.0),
 ]
 
 fBinary = [
+    partial(vigra.filters.gaussianGradientMagnitude, sigma=0.5),
     partial(vigra.filters.gaussianGradientMagnitude, sigma=1.0),
     partial(vigra.filters.gaussianGradientMagnitude, sigma=1.5),
     partial(vigra.filters.gaussianGradientMagnitude, sigma=2.0),
@@ -66,15 +92,26 @@ dataset,test_set = secondOrderImageDataset(imgs=imgs, gts=gts, numberOfLabels=3,
 
 
 
-learner =  learning.subgradientSSVM(dataset, learningRate=0.05, C=100, 
-                                    learningMode='batch',maxIterations=10000)
 
-#learner = learning.structMaxMarginLearner(dataset, 0.1, 0.001, 0)
+
+learner =  learning.subgradientSSVM(dataset, learningRate=10.5, C=100, learningMode='batch',maxIterations=500,averaging=2)
+
+learningModi = ['normal','reducedinference','selfFusion','reducedinferenceSelfFusion']
+lm = 0
+
+
+infCls = opengm.inference.TrwsExternal
+param = opengm.InfParam()
 
 
-learner.learn(infCls=opengm.inference.TrwsExternal, 
-              redInf=True,
-              parameter=opengm.InfParam())
+#with opengm.Timer("n  2"):
+#    learner.learn(infCls=infCls,parameter=param,connectedComponents=True,infMode='n')
+#with opengm.Timer("sf"):
+#    learner.learn(infCls=infCls,parameter=param,connectedComponents=True,infMode='sf')
+with opengm.Timer("ri"):
+    learner.learn(infCls=infCls,parameter=param,connectedComponents=True,infMode='ri')
+#with opengm.Timer("risf"):
+#    learner.learn(infCls=infCls,parameter=param,connectedComponents=True,infMode='risf')
 
 
 
diff --git a/fubar/whorse.py b/fubar/whorse.py
new file mode 100644
index 0000000..a8f7851
--- /dev/null
+++ b/fubar/whorse.py
@@ -0,0 +1,79 @@
+import numpy
+import opengm
+from opengm import learning
+import vigra
+from progressbar import *
+
+from functools import partial
+from make_grid_potts_dset import secondOrderImageDataset
+
+# where is the dataset stored
+dsetRoot = '/home/tbeier/datasets/weizmann_horse_db/'
+imgPath = dsetRoot + 'rgb/'
+gtPath = dsetRoot + 'figure_ground/'
+    
+# how many should be loaded
+# (all if None)
+loadN = 20
+takeNth  = 3
+if loadN is None:
+    loadN = 0
+
+imgs = []
+gt = []
+
+for i in range(1,loadN+1):
+
+    hName = "horse%03d.jpg" % (i,)
+    rgbImg  = vigra.impex.readImage(imgPath+hName)
+    gtImg  = vigra.impex.readImage(gtPath+hName).astype('uint32')[::takeNth,::takeNth]
+    gtImg[gtImg<125] = 0
+    gtImg[gtImg>=125] = 1
+    rgbImg = vigra.resize(rgbImg, [gtImg.shape[0],gtImg.shape[1]])
+    imgs.append(rgbImg)
+    gt.append(gtImg)
+
+
+fUnary = [
+    vigra.colors.transform_RGB2Lab,
+    vigra.colors.transform_RGB2Luv,
+    partial(vigra.filters.gaussianGradientMagnitude, sigma=1.0),
+    partial(vigra.filters.gaussianGradientMagnitude, sigma=2.0),
+]
+
+fBinary = [
+    vigra.colors.transform_RGB2Lab,
+    vigra.colors.transform_RGB2Luv,
+    partial(vigra.filters.gaussianGradientMagnitude, sigma=1.0),
+    partial(vigra.filters.gaussianGradientMagnitude, sigma=2.0),
+]
+
+
+dataset,test_set = secondOrderImageDataset(imgs=imgs, gts=gt, numberOfLabels=2, 
+                                          fUnary=fUnary, fBinary=fBinary, 
+                                          addConstFeature=False)
+
+
+
+
+learner =  learning.subgradientSSVM(dataset, learningRate=0.1, C=100, 
+                                    learningMode='batch',maxIterations=10)
+
+
+
+learner.learn(infCls=opengm.inference.QpboExternal, 
+              parameter=opengm.InfParam())
+
+
+
+# predict on test test
+for (rgbImg, gtImg, gm) in test_set :
+    # infer for test image
+    inf = opengm.inference.QpboExternal(gm)
+    inf.infer()
+    arg = inf.arg()
+    arg = arg.reshape( numpy.squeeze(gtImg.shape))
+
+    vigra.segShow(rgbImg, arg+2)
+    vigra.show()
+
diff --git a/include/opengm/inference/self_fusion.hxx b/include/opengm/inference/self_fusion.hxx
index 37b342e..e2f282d 100644
--- a/include/opengm/inference/self_fusion.hxx
+++ b/include/opengm/inference/self_fusion.hxx
@@ -280,9 +280,9 @@ public:
     typedef INFERENCE ToFuseInferenceType;
 
     enum FusionSolver{
-        QpboFusion,
-        CplexFusion,
-        LazyFlipperFusion
+        QpboFusion=0,
+        CplexFusion=1,
+        LazyFlipperFusion=2
     };
 
 
@@ -294,7 +294,7 @@ public:
 
     template<class _GM,class _ACC>
     struct RebindGmAndAcc{
-        typedef typename INFERENCE:: template RebindGm<_GM, _ACC>::type RebindedInf;
+        typedef typename INFERENCE:: template RebindGmAndAcc<_GM, _ACC>::type RebindedInf;
         typedef SelfFusion<RebindedInf> type;
     };
 
@@ -330,7 +330,7 @@ public:
         const P & p
       )
       : fuseNth_(p.fuseNth_),
-        fusionSolver_(p.fusionSolver_),
+        fusionSolver_(),
         infParam_(p.infParam_),
         maxSubgraphSize_(p.maxSubgraphSize_),
         reducedInf_(p.reducedInf_),
@@ -338,8 +338,16 @@ public:
         tentacles_(p.tentacles_),
         fusionTimeLimit_(p.fusionTimeLimit_),
         numStopIt_(p.numStopIt_)
-      {
-
+      { 
+        if(p.fusionSolver_ == 0){
+            fusionSolver_ = QpboFusion;
+        }
+        else if(p.fusionSolver_ == 1){
+            fusionSolver_ = CplexFusion;
+        }
+        else if(p.fusionSolver_ == 2){
+            fusionSolver_ = LazyFlipperFusion;
+        }
       }
 
       UInt64Type fuseNth_;
diff --git a/include/opengm/learning/gradient-accumulator.hxx b/include/opengm/learning/gradient-accumulator.hxx
index e39b9f4..601a68f 100644
--- a/include/opengm/learning/gradient-accumulator.hxx
+++ b/include/opengm/learning/gradient-accumulator.hxx
@@ -104,6 +104,9 @@ struct FeatureAccumulator{
             accWeights_[i] = 0.0;
         }
     }
+    const Weights<double> &  getWeights(const size_t wi)const{
+        accWeights_;
+    }
     double getWeight(const size_t wi)const{
         return accWeights_[wi];
     }
diff --git a/include/opengm/learning/loss/flexibleloss.hxx b/include/opengm/learning/loss/flexibleloss.hxx
index 6c3fa9f..a7c3210 100644
--- a/include/opengm/learning/loss/flexibleloss.hxx
+++ b/include/opengm/learning/loss/flexibleloss.hxx
@@ -30,7 +30,6 @@ public:
 
         Parameter(){
             lossType_ = Hamming;
-            lambdaWeight = 1.0;
         }
 
 
@@ -57,8 +56,6 @@ public:
         static std::size_t getLossId() { return lossId_; }
 
         LossType lossType_;
-        double lambdaWeight;
-
         std::vector<double>     nodeLossMultiplier_;
         std::vector<double>     labelLossMultiplier_;
         std::vector<double>     factorMultipier_;
@@ -67,7 +64,7 @@ public:
 
 
     private:
-        static const std::size_t lossId_ = 16002;
+        static const std::size_t lossId_ = 16006;
 
     };
 
@@ -118,6 +115,13 @@ inline void FlexibleLoss::Parameter::save(hid_t& groupHandle) const {
     name.push_back(this->getLossId());
     marray::hdf5::save(groupHandle,"lossId",name);
 
+
+    std::vector<size_t> lossType(1, size_t(lossType_));
+    marray::hdf5::save(groupHandle,"lossType",lossType);
+
+    if (this->factorMultipier_.size() > 0) {
+        marray::hdf5::save(groupHandle,"factorLossMultiplier",this->factorMultipier_);
+    }
     if (this->nodeLossMultiplier_.size() > 0) {
         marray::hdf5::save(groupHandle,"nodeLossMultiplier",this->nodeLossMultiplier_);
     }
@@ -127,15 +131,47 @@ inline void FlexibleLoss::Parameter::save(hid_t& groupHandle) const {
 }
 
 inline void FlexibleLoss::Parameter::load(const hid_t& groupHandle) {
+    std::vector<size_t> lossType;
+    marray::hdf5::loadVec(groupHandle, "lossType", lossType);
+    if(lossType[0] == size_t(Hamming)){
+        lossType_ = Hamming;
+    }
+    else if(lossType[0] == size_t(L1)){
+        lossType_ = L1;
+    }
+    else if(lossType[0] == size_t(L1)){
+        lossType_ = L1;
+    }
+    else if(lossType[0] == size_t(L2)){
+        lossType_ = L2;
+    }
+    else if(lossType[0] == size_t(Partition)){
+        lossType_ = Partition;
+    }
+    else if(lossType[0] == size_t(ConfMat)){
+        lossType_ = ConfMat;
+    }
+
+
+
     if (H5Dopen(groupHandle, "nodeLossMultiplier", H5P_DEFAULT) >= 0) {
         marray::hdf5::loadVec(groupHandle, "nodeLossMultiplier", this->nodeLossMultiplier_);
-    } else {
+    } 
+    else {
         std::cout << "nodeLossMultiplier of FlexibleLoss not found, setting default values" << std::endl;
     }
 
+    if (H5Dopen(groupHandle, "factorLossMultiplier", H5P_DEFAULT) >= 0) {
+        marray::hdf5::loadVec(groupHandle, "factorLossMultiplier", this->factorMultipier_);
+    } 
+    else {
+        std::cout << "factorLossMultiplier of FlexibleLoss not found, setting default values" << std::endl;
+    }
+
     if (H5Dopen(groupHandle, "labelLossMultiplier", H5P_DEFAULT) >= 0) {
         marray::hdf5::loadVec(groupHandle, "labelLossMultiplier", this->labelLossMultiplier_);
-    } else {
+    } 
+    else {
         std::cout << "labelLossMultiplier of FlexibleLoss not found, setting default values" << std::endl;
     }
 }
@@ -143,7 +179,9 @@ inline void FlexibleLoss::Parameter::load(const hid_t& groupHandle) {
 template<class GM, class IT1, class IT2>
 double FlexibleLoss::loss(const GM & gm, IT1 labelBegin, const IT1 labelEnd, IT2 GTBegin, const IT2 GTEnd) const
 {
-
+    typedef typename  GM::LabelType LabelType;
+    typedef typename  GM::IndexType IndexType;
+    typedef typename  GM::ValueType ValueType;
 
     double loss = 0.0;
     size_t nodeIndex = 0;
@@ -158,7 +196,7 @@ double FlexibleLoss::loss(const GM & gm, IT1 labelBegin, const IT1 labelEnd, IT2
         const size_t norm = param_.lossType_ == Parameter::L1 ? 1 : 2;
         for(; labelBegin!= labelEnd; ++labelBegin, ++GTBegin, ++nodeIndex){
             if(*labelBegin != *GTBegin){            
-                loss += param_.getNodeLossMultiplier(nodeIndex) * std::pow(std::abs(*GTBegin - *labelBegin), norm) * param_.lambdaWeight;
+                loss += param_.getNodeLossMultiplier(nodeIndex) * std::pow(std::abs(*GTBegin - *labelBegin), norm);
             }
         }
     }
@@ -166,7 +204,22 @@ double FlexibleLoss::loss(const GM & gm, IT1 labelBegin, const IT1 labelEnd, IT2
         throw opengm::RuntimeError("ConfMat Loss is not yet implemented");
     }
     else if(param_.lossType_ == Parameter::Partition){
-        throw opengm::RuntimeError("Partition / Multicut Loss is not yet implemented");
+
+        const size_t nFac = gm.numberOfFactors();
+
+        for(size_t fi=0; fi<nFac; ++fi){
+            const size_t nVar = gm[fi].numberOfVariables();
+            OPENGM_CHECK_OP(nVar,==,2,"Partition / Multicut Loss  is only allowed if the graphical model has only"
+                                      " second order factors (this might be changed in the future");
+            const IndexType vis[2] = { gm[fi].variableIndex(0), gm[fi].variableIndex(1)};
+            const LabelType nl[2]  = { gm.numberOfLabels(vis[0]), gm.numberOfLabels(vis[1])};
+            const double facVal = param_.getFactorLossMultiplier(fi);
+            // in the gt they are in the same cluster
+            if( (GTBegin[vis[0]] == GTBegin[vis[1]]) !=
+                (labelBegin[vis[0]] == labelBegin[vis[1]])  ){
+                loss +=facVal;
+            }
+        }
     }
     else{
         throw opengm::RuntimeError("INTERNAL ERROR: unknown Loss Type");
@@ -202,7 +255,7 @@ void FlexibleLoss::addLoss(GM& gm, IT gt) const
             ExplicitFunction f(&numL, &numL+1, 0);
             const LabelType gtL = *gt;
             for(LabelType l = 0; l < numL; ++l){
-                f(l) = - param_.getNodeLossMultiplier(i) * std::pow(std::abs(gtL - l), norm) * param_.lambdaWeight;
+                f(l) = - param_.getNodeLossMultiplier(i) * std::pow(std::abs(gtL - l), norm);
             }
             f(*gt) = 0;
             ++gt;
diff --git a/include/opengm/learning/subgradient_ssvm.hxx b/include/opengm/learning/subgradient_ssvm.hxx
index ca2d412..8efdc11 100644
--- a/include/opengm/learning/subgradient_ssvm.hxx
+++ b/include/opengm/learning/subgradient_ssvm.hxx
@@ -2,11 +2,13 @@
 #ifndef OPENGM_SUBGRADIENT_SSVM_LEARNER_HXX
 #define OPENGM_SUBGRADIENT_SSVM_LEARNER_HXX
 
+#include <iomanip>
 #include <vector>
 #include <opengm/inference/inference.hxx>
 #include <opengm/graphicalmodel/weights.hxx>
 #include <opengm/utilities/random.hxx>
 #include <opengm/learning/gradient-accumulator.hxx>
+#include <opengm/learning/weight_averaging.hxx>
 #include <omp.h>
 
 
@@ -48,6 +50,7 @@ namespace opengm {
                 learningRate_ = 1.0;
                 C_ = 1.0;
                 learningMode_ = Batch;
+                averaging_ = -1;
             }       
 
             double eps_;
@@ -56,6 +59,7 @@ namespace opengm {
             double learningRate_;
             double C_;
             LearningMode learningMode_;
+            int averaging_;
         };
 
 
@@ -102,15 +106,19 @@ namespace opengm {
         Parameter para_;
         size_t iteration_;
         FeatureAcc featureAcc_;
+        WeightAveraging<double> weightAveraging_;
     }; 
 
     template<class DATASET>
     SubgradientSSVM<DATASET>::SubgradientSSVM(DATASET& ds, const Parameter& p )
-    : dataset_(ds), para_(p),iteration_(0),featureAcc_(ds.getNumberOfWeights())
+    :   dataset_(ds), 
+        para_(p),
+        iteration_(0),
+        featureAcc_(ds.getNumberOfWeights()),
+        weightAveraging_(ds.getWeights(),p.averaging_)
     {
         featureAcc_.resetWeights();
         weights_ = opengm::learning::Weights<double>(ds.getNumberOfWeights());
-  
     }
 
 
@@ -128,18 +136,18 @@ namespace opengm {
         const size_t nWegihts = dataset_.getNumberOfWeights();
 
         
-
+        for(size_t wi=0; wi<nWegihts; ++wi){
+            dataset_.getWeights().setWeight(wi, 0.0);
+        }
 
 
 
         if(para_.learningMode_ == Parameter::Online){
             RandomUniform<size_t> randModel(0, nModels);
-            std::cout<<"online mode\n";
+            //std::cout<<"online mode\n";
             for(iteration_=0 ; iteration_<para_.maxIterations_; ++iteration_){
 
-                if(iteration_%nModels*10==0){
-                    std::cout<<"loss :"<<dataset_. template getTotalLoss<INF>(para)<<"\n";
-                }
+
 
 
                 // get random model
@@ -158,10 +166,17 @@ namespace opengm {
                 // update weights
                 const double wChange =updateWeights();
 
+                if(iteration_%nModels*2 == 0 ){
+                    std::cout << '\r'
+                              << std::setw(6) << std::setfill(' ') << iteration_ << ':'
+                              << std::setw(8) << dataset_. template getTotalLossParallel<INF>(para) <<"  "<< std::flush;
+
+                }
+
             }
         }
         else if(para_.learningMode_ == Parameter::Batch){
-            std::cout<<"batch mode\n";
+            //std::cout<<"batch mode\n";
             for(iteration_=0 ; iteration_<para_.maxIterations_; ++iteration_){
                 // this 
                 
@@ -213,7 +228,9 @@ namespace opengm {
                     omp_unset_lock(&modelLockUnlock);
                 }
                 if(iteration_%1==0){
-                    std::cout<<iteration_<<" loss :"<< -1.0*totalLoss <<"\n";
+                    std::cout << '\r'
+                              << std::setw(6) << std::setfill(' ') << iteration_ << ':'
+                              << std::setw(8) << -1.0*totalLoss <<"  "<< std::flush;
                 }
                 // update the weights
                 const double wChange =updateWeights();
@@ -222,7 +239,7 @@ namespace opengm {
         }
         else if(para_.learningMode_ == Parameter::WorkingSets){
 
-            std::cout<<"working sets mode\n";
+            //std::cout<<"working sets mode\n";
             std::vector< std::vector< std::vector<LabelType> > > A(nModels);
 
             RandomUniform<size_t> randModel(0, nModels);
@@ -259,8 +276,11 @@ namespace opengm {
                         dataset_.getWeights().setWeight(wi, wNew);
                     }
                 }
-                if(iteration_%nModels == 0 ){
-                    std::cout<<iteration_<<" loss :"<<dataset_. template getTotalLossParallel<INF>(para)<<"\n";
+                if(iteration_%nModels*2 == 0 ){
+                    std::cout << '\r'
+                              << std::setw(6) << std::setfill(' ') << iteration_ << ':'
+                              << std::setw(8) << dataset_. template getTotalLossParallel<INF>(para)<<"  "<< std::flush;
+
                 }
                 dataset_.unlockModel(gmi);
             }
@@ -275,6 +295,7 @@ namespace opengm {
         const size_t nWegihts = dataset_.getNumberOfWeights();
 
         WeightsType p(nWegihts);
+        WeightsType newWeights(nWegihts);
 
         if(para_.learningMode_ == Parameter::Batch){
             for(size_t wi=0; wi<nWegihts; ++wi){
@@ -295,9 +316,13 @@ namespace opengm {
         for(size_t wi=0; wi<nWegihts; ++wi){
             const double wOld = dataset_.getWeights().getWeight(wi);
             const double wNew = wOld - (para_.learningRate_/double(iteration_+1))*p[wi];
-            wChange += std::pow(wOld-wNew,2);
-            dataset_.getWeights().setWeight(wi, wNew);
+            newWeights[wi] = wNew;
         }
+
+        weightAveraging_(newWeights);
+
+
+
         weights_ = dataset_.getWeights();
         return wChange;
     }
diff --git a/src/interfaces/python/opengm/learning/__init__.py b/src/interfaces/python/opengm/learning/__init__.py
index 24a9217..87e1914 100644
--- a/src/interfaces/python/opengm/learning/__init__.py
+++ b/src/interfaces/python/opengm/learning/__init__.py
@@ -9,27 +9,6 @@ from opengm import to_native_boost_python_enum_converter
 
 
 
-def _extendedLearn(self, infCls, parameter = None, redInf=False, persistency=True, tentacles=False, connectedComponents=False):
-    if parameter is None:
-        import opengm
-        parameter = opengm.InfParam()
-    cppParam  =  infCls.get_cpp_parameter(operator='adder',accumulator='minimizer',parameter=parameter)
-    if not redInf:
-        
-        try:
-          self._learn(cppParam)
-        except Exception, e:
-            #print "an error ",e,"\n\n"
-            if (str(e).find("did not match C++ signature")):
-                raise RuntimeError("infCls : '%s' is not (yet) exported from c++ to python for learning"%str(infCls))
-    else:
-        try:
-          self._learnReducedInf(cppParam, bool(persistency), bool(tentacles),bool(connectedComponents))
-        except Exception, e:
-            #print "an error ",e,"\n\n"
-            if (str(e).find("did not match C++ signature")):
-                raise RuntimeError("infCls : '%s' is not (yet) exported from c++ to python for learning with reduced inference"%str(infCls))
-
 
 def _extendedGetLoss(self, model_idx, infCls, parameter = None):
     if parameter is None:
@@ -71,15 +50,89 @@ class LossParameter(FlexibleLossParameter):
 
 
 
-GridSearch_FlexibleLoss.learn  =_extendedLearn
-#MaxLikelihood_FlexibleLoss.learn  =_extendedLearn
-StructPerceptron_FlexibleLoss.learn  =_extendedLearn
-SubgradientSSVM_FlexibleLoss.learn  =_extendedLearn
+def extend_learn():
+    
+    def learner_learn_normal(self, infCls, parameter = None):
+        if parameter is None:
+            import opengm
+            parameter = opengm.InfParam()
+        cppParam  =  infCls.get_cpp_parameter(operator='adder',accumulator='minimizer',parameter=parameter)
+        try:
+          self._learn(cppParam)
+        except Exception, e:
+            #print "an error ",e,"\n\n"
+            if (str(e).find("did not match C++ signature")):
+                raise RuntimeError("infCls : '%s' is not (yet) exported from c++ to python for learning"%str(infCls))
+
+
+    def learner_learn_reduced_inf(self, infCls, parameter = None, persistency=True, tentacles=False, connectedComponents=False):
+        if parameter is None:
+            import opengm
+            parameter = opengm.InfParam()
+        cppParam  =  infCls.get_cpp_parameter(operator='adder',accumulator='minimizer',parameter=parameter)
+        try:
+          self._learnReducedInf(cppParam, bool(persistency), bool(tentacles),bool(connectedComponents))
+        except Exception, e:
+            #print "an error ",e,"\n\n"
+            if (str(e).find("did not match C++ signature")):
+                raise RuntimeError("infCls : '%s' is not (yet) exported from c++ to python for learning with reduced inference"%str(infCls))
+
+    def learner_learn_reduced_inf_self_fusion(self, infCls, parameter = None, persistency=True, tentacles=False, connectedComponents=False):
+        if parameter is None:
+            import opengm
+            parameter = opengm.InfParam()
+        cppParam  =  infCls.get_cpp_parameter(operator='adder',accumulator='minimizer',parameter=parameter)
+        try:
+          self._learnReducedInf(cppParam, bool(persistency), bool(tentacles),bool(connectedComponents))
+        except Exception, e:
+            #print "an error ",e,"\n\n"
+            if (str(e).find("did not match C++ signature")):
+                raise RuntimeError("infCls : '%s' is not (yet) exported from c++ to python for learning with reduced inference"%str(infCls))
+
+    def learner_learn_self_fusion(self, infCls, parameter = None, fuseNth=1, fusionSolver="qpbo",maxSubgraphSize=2,
+                                  redInf=True, connectedComponents=False, fusionTimeLimit=100.9, numStopIt=10):
+        if parameter is None:
+            import opengm
+            parameter = opengm.InfParam()
+        cppParam  =  infCls.get_cpp_parameter(operator='adder',accumulator='minimizer',parameter=parameter)
+        try:
+          self._learnSelfFusion(cppParam, int(fuseNth),str(fusionSolver),int(maxSubgraphSize),bool(redInf),
+                                bool(connectedComponents),float(fusionTimeLimit),int(numStopIt))
+        except Exception, e:
+            #print "an error ",e,"\n\n"
+            if (str(e).find("did not match C++ signature")):
+                raise RuntimeError("infCls : '%s' is not (yet) exported from c++ to python for learning with self fusion inference"%str(infCls))
+
+    def learner_learn(self, infCls, parameter=None, infMode='normal',**kwargs):
+        assert infMode in ['normal','n','selfFusion','sf','reducedInference','ri','reducedInferenceSelfFusion','risf']
+
+        if infMode in ['normal','n']:
+            self.learnNormal(infCls=infCls, parameter=parameter)
+        elif infMode in ['selfFusion','sf']:
+            self.learnSelfFusion(infCls=infCls, parameter=parameter,**kwargs)
+        elif infMode in ['reducedInference','ri']:
+            self.learnReducedInf(infCls=infCls, parameter=parameter,**kwargs)
+        elif infMode in ['reducedInferenceSelfFusion','risf']:
+            self.learnReducedInfSelfFusion(infCls=infCls, parameter=parameter,**kwargs)
+
+    # all learner classes
+    learnerClss = [GridSearch_FlexibleLoss, StructPerceptron_FlexibleLoss,  SubgradientSSVM_FlexibleLoss] 
+    if opengmConfig.withCplex or opengmConfig.withGurobi :
+        learnerClss.append(StructMaxMargin_Bundle_FlexibleLoss)
+
+    for learnerCls in learnerClss:
+        learnerCls.learn = learner_learn
+        learnerCls.learnNormal = learner_learn_normal
+        learnerCls.learnReducedInf = learner_learn_reduced_inf
+        learnerCls.learnSelfFusion = learner_learn_self_fusion
+        learnerCls.learnReducedInfSelfFusion = learner_learn_reduced_inf_self_fusion
+
+extend_learn()
+del extend_learn
+
 
 
 
-if opengmConfig.withCplex or opengmConfig.withGurobi :
-    StructMaxMargin_Bundle_FlexibleLoss = _extendedLearn
 
 DatasetWithFlexibleLoss.getLoss = _extendedGetLoss
 DatasetWithFlexibleLoss.getTotalLoss = _extendedGetTotalLoss
@@ -149,7 +202,7 @@ def structPerceptron(dataset, learningMode='online',eps=1e-5, maxIterations=1000
     return learner
 
 
-def subgradientSSVM(dataset, learningMode='batch',eps=1e-5, maxIterations=10000, stopLoss=0.0, learningRate=1.0, C=100.0):
+def subgradientSSVM(dataset, learningMode='batch',eps=1e-5, maxIterations=10000, stopLoss=0.0, learningRate=1.0, C=100.0, averaging=-1):
 
     assert dataset.__class__.lossType == 'flexible'
     learnerCls = SubgradientSSVM_FlexibleLoss
@@ -166,7 +219,6 @@ def subgradientSSVM(dataset, learningMode='batch',eps=1e-5, maxIterations=10000,
         lm = learningModeEnum.batch
     if learningMode == 'workingSets':
         lm = learningModeEnum.workingSets
-
     param = learnerParamCls()
     param.eps = float(eps)
     param.maxIterations = int(maxIterations)
@@ -174,6 +226,7 @@ def subgradientSSVM(dataset, learningMode='batch',eps=1e-5, maxIterations=10000,
     param.learningRate = float(learningRate)
     param.C = float(C)
     param.learningMode = lm
+    param.averaging = int(averaging)
     learner = learnerCls(dataset, param)
     return learner
 
diff --git a/src/interfaces/python/opengm/learning/helper.hxx b/src/interfaces/python/opengm/learning/helper.hxx
index 707cdb6..4f0c8eb 100644
--- a/src/interfaces/python/opengm/learning/helper.hxx
+++ b/src/interfaces/python/opengm/learning/helper.hxx
@@ -9,6 +9,7 @@
 
 #include <opengm/inference/icm.hxx>
 #include <opengm/inference/lazyflipper.hxx>
+#include <opengm/inference/self_fusion.hxx>
 #include <opengm/learning/gridsearch-learning.hxx>
 #include <opengm/inference/messagepassing/messagepassing.hxx>
 
@@ -43,6 +44,7 @@ public:
         learner. template learn<INF>(param);
     }
 
+    #ifdef WITH_QPBO
     template<class INF>
     static void pyLearn_ReducedInf(
         LEARNER & learner, 
@@ -74,6 +76,103 @@ public:
 
         learner. template learn<RedInf>(redInfPara);
     }
+    #endif
+
+
+    #ifdef WITH_QPBO
+    template<class INF>
+    static void pyLearn_ReducedInfSelfFusion(
+        LEARNER & learner, 
+        const typename INF::Parameter & param,
+        const bool persistency,
+        const bool tentacles,
+        const bool connectedComponents
+    )
+    {
+
+        typedef typename INF::GraphicalModelType GmType;
+        typedef typename opengm::ReducedInferenceHelper<GmType>::InfGmType RedInfGm;
+
+        // rebind the inference to the RedInfGm
+        typedef typename INF:: template RebindGm<RedInfGm>::type RedInfRebindInf;
+
+
+        typedef typename RedInfRebindInf::Parameter RedInfRebindInfParam;
+        typedef opengm::ReducedInference<GmType, opengm::Minimizer, RedInfRebindInf> RedInf;
+        typedef typename RedInf::Parameter RedInfParam;
+
+        RedInfRebindInfParam redInfRebindInfParam(param);
+
+        RedInfParam redInfPara;
+        redInfPara.subParameter_ = redInfRebindInfParam;
+        redInfPara.Persistency_ = persistency;
+        redInfPara.Tentacle_ = tentacles;
+        redInfPara.ConnectedComponents_ = connectedComponents;
+
+
+        typedef opengm::SelfFusion<RedInf> SelfFusionInf;
+        typedef typename SelfFusionInf::Parameter SelfFusionInfParam;
+        SelfFusionInfParam sfParam;
+
+        sfParam.infParam_ = redInfPara;
+        sfParam.fuseNth_ = 10;
+        sfParam.maxSubgraphSize_ = 2;
+        sfParam.reducedInf_ = true;
+        sfParam.tentacles_ = false;
+        sfParam.connectedComponents_ = true;
+        sfParam.fusionTimeLimit_ = 100.0;
+        sfParam.numStopIt_ = 10.0;
+        sfParam.fusionSolver_ = SelfFusionInf::QpboFusion;
+
+        learner. template learn<SelfFusionInf>(sfParam);
+    }
+    #endif
+
+
+    template<class INF>
+    static void pyLearn_SelfFusion(
+        LEARNER & learner, 
+        const typename INF::Parameter & param,
+        const size_t fuseNth,
+        const std::string & fusionSolver,
+        const UInt64Type maxSubgraphSize,
+        const bool reducedInf,
+        const bool connectedComponents,
+        const double fusionTimeLimit,
+        const size_t numStopIt
+    )
+    {
+
+        typedef typename INF::GraphicalModelType GmType;
+        
+        typedef opengm::SelfFusion<INF> SelfFusionInf;
+        typedef typename SelfFusionInf::Parameter SelfFusionInfParam;
+
+
+        SelfFusionInfParam sfParam;
+
+        if(fusionSolver ==std::string("qpbo")){
+            sfParam.fusionSolver_ = SelfFusionInf::QpboFusion;
+        }
+        else if(fusionSolver ==std::string("cplex")){
+            sfParam.fusionSolver_ = SelfFusionInf::CplexFusion;
+        }
+        else if(fusionSolver ==std::string("lf")){
+            sfParam.fusionSolver_ = SelfFusionInf::LazyFlipperFusion;
+        }
+
+        sfParam.infParam_ = param;
+        sfParam.fuseNth_ = fuseNth;
+        sfParam.maxSubgraphSize_ = maxSubgraphSize;
+        sfParam.reducedInf_ = reducedInf;
+        sfParam.tentacles_ = false;
+        sfParam.connectedComponents_ = connectedComponents;
+        sfParam.fusionTimeLimit_ = fusionTimeLimit;
+        sfParam.numStopIt_ = numStopIt;
+
+        learner. template learn<SelfFusionInf>(sfParam);
+    }
+
 
 
 
@@ -129,6 +228,16 @@ public:
                 .def("_learnReducedInf",&pyLearn_ReducedInf<Cplex>)
                 #endif
             #endif
+
+            // SELF FUSION
+            #ifdef WITH_TRWS
+            .def("_learnSelfFusion",&pyLearn_SelfFusion<TrwsExternal>)
+            #endif
+
+            // REDUCED INFERNCE SELF FUSION
+            #ifdef WITH_TRWS
+            .def("_learnReducedInfSelfFusion",&pyLearn_ReducedInfSelfFusion<TrwsExternal>)
+            #endif
         ;
     }
 };
diff --git a/src/interfaces/python/opengm/learning/pySubgradientSSVM.cxx b/src/interfaces/python/opengm/learning/pySubgradientSSVM.cxx
index d7738df..11f7d1a 100644
--- a/src/interfaces/python/opengm/learning/pySubgradientSSVM.cxx
+++ b/src/interfaces/python/opengm/learning/pySubgradientSSVM.cxx
@@ -56,6 +56,7 @@ namespace opengm{
             .def_readwrite("learningRate", &PyLearnerParam::learningRate_)
             .def_readwrite("C", &PyLearnerParam::C_)
             .def_readwrite("learningMode", &PyLearnerParam::learningMode_)
+            .def_readwrite("averaging", &PyLearnerParam::averaging_)
         ;
 
 

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git



More information about the debian-science-commits mailing list