[opengm] 210/386: my local changes:

Ghislain Vaillant ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:37:50 UTC 2016


This is an automated email from the git hooks/post-receive script.

ghisvail-guest pushed a commit to branch debian/master
in repository opengm.

commit 714e2afec93431c3bb740e1f04e4a727f3e32d67
Author: DerThorsten <thorsten.beier at iwr.uni-heidelberg.de>
Date:   Sun Jan 11 14:15:58 2015 +0100

    my local changes:
---
 fubar/brown_horse.py                               |  10 +-
 fubar/make_grid_potts_dset.py                      |  29 ++--
 fubar/real_example_2.py                            |   6 +-
 include/opengm/graphicalmodel/weights.hxx          | 128 +++++++++++------
 include/opengm/inference/lazyflipper.hxx           | 138 +++++++++---------
 include/opengm/inference/mqpbo.hxx                 |   6 +-
 include/opengm/inference/reducedinference.hxx      |   6 +-
 include/opengm/learning/dataset/dataset.hxx        |  13 ++
 include/opengm/learning/subgradient_ssvm.hxx       |  47 ++++++-
 src/interfaces/python/opengm/learning/__init__.py  |  25 +++-
 src/interfaces/python/opengm/learning/helper.hxx   | 156 ++++++++++++++-------
 .../python/opengm/learning/pySubgradientSSVM.cxx   |   1 +
 12 files changed, 380 insertions(+), 185 deletions(-)

diff --git a/fubar/brown_horse.py b/fubar/brown_horse.py
index 60dee6d..eb189de 100644
--- a/fubar/brown_horse.py
+++ b/fubar/brown_horse.py
@@ -37,7 +37,7 @@ imgPath = dsetRoot + 'brown_horse/'
 gtBasePath = dsetRoot + 'figure_ground/'
 
 imgFiles = glob.glob(imgPath+'*.jpg')
-takeNth = 1
+takeNth = 3
 imgs = []
 gts = []
 pbar = getPbar(len(imgFiles), 'Load Image')
@@ -101,14 +101,14 @@ dataset,test_set = secondOrderImageDataset(imgs=imgs, gts=gts, numberOfLabels=2,
 
 
 
-learner =  learning.subgradientSSVM(dataset, learningRate=0.3, C=100, 
-                                    learningMode='batch',maxIterations=500)
+learner =  learning.subgradientSSVM(dataset, learningRate=0.05, C=100, 
+                                    learningMode='workingSets',maxIterations=1000)
 
 #learner = learning.structMaxMarginLearner(dataset, 0.1, 0.001, 0)
 
 
-learner.learn(infCls=opengm.inference.QpboExternal, 
-              parameter=opengm.InfParam())
+learner.learn(infCls=opengm.inference.LazyFlipper, 
+              parameter=opengm.InfParam(maxSubgraphSize=3))
 
 
 
diff --git a/fubar/make_grid_potts_dset.py b/fubar/make_grid_potts_dset.py
index cda360b..4c8f2c4 100644
--- a/fubar/make_grid_potts_dset.py
+++ b/fubar/make_grid_potts_dset.py
@@ -14,22 +14,30 @@ def getPbar(size, name):
     return pbar
 
 def secondOrderImageDataset(imgs, gts, numberOfLabels, fUnary, fBinary, addConstFeature, trainFraction=0.75):
-    assert numberOfLabels == 2
+
 
     # train test
     nImg = len(imgs)
     nTrain = int(float(nImg)*trainFraction+0.5)
     nTest = (nImg-nTrain)
     
-    
+    def getFeat(fComp, im):
+        res = []
+        for f in fComp:
+            r = f(im)
+            if r.ndim == 2:
+                r = r[:,:, None]
+            res.append(r)
+        return res
 
     # compute features for a single image
     tImg = imgs[0]
-    unaryFeat = [f(tImg) for f in fUnary]
+    unaryFeat = getFeat(fUnary, tImg)
     unaryFeat = numpy.nan_to_num(numpy.concatenate(unaryFeat,axis=2).view(numpy.ndarray))
     nUnaryFeat = unaryFeat.shape[-1] + int(addConstFeature)
+    nUnaryFeat *= numberOfLabels - int(numberOfLabels==2)
 
-    binaryFeat = [f(tImg) for f in fBinary]
+    binaryFeat = getFeat(fBinary, tImg)
     binaryFeat = numpy.nan_to_num(numpy.concatenate(binaryFeat,axis=2).view(numpy.ndarray))
     nBinaryFeat = binaryFeat.shape[-1] + int(addConstFeature)
     nWeights  = nUnaryFeat + nBinaryFeat
@@ -51,6 +59,8 @@ def secondOrderImageDataset(imgs, gts, numberOfLabels, fUnary, fBinary, addConst
     dataset = learning.createDataset(numWeights=nWeights, loss='h')
     weights = dataset.getWeights()
     uWeightIds = numpy.arange(nUnaryFeat ,dtype='uint64')
+    if numberOfLabels != 2:
+        uWeightIds = uWeightIds.reshape([numberOfLabels,-1])
     bWeightIds = numpy.arange(start=nUnaryFeat,stop=nWeights,dtype='uint64')
 
     def makeModel(img,gt):
@@ -58,13 +68,16 @@ def secondOrderImageDataset(imgs, gts, numberOfLabels, fUnary, fBinary, addConst
         numVar = shape[0] * shape[1]
 
         # make model
-        gm = opengm.gm(numpy.ones(numVar)*2)
+        gm = opengm.gm(numpy.ones(numVar)*numberOfLabels)
+
+
+
 
         # compute features
-        unaryFeat = [f(img) for f in fUnary]
+        unaryFeat = getFeat(fUnary, img)
         unaryFeat = numpy.nan_to_num(numpy.concatenate(unaryFeat,axis=2).view(numpy.ndarray))
         unaryFeat  = unaryFeat.reshape([numVar,-1])
-        binaryFeat = [f(img) for f in fBinary]
+        binaryFeat = getFeat(fBinary, img)
         binaryFeat = numpy.nan_to_num(numpy.concatenate(binaryFeat,axis=2).view(numpy.ndarray))
         binaryFeat  = binaryFeat.reshape([numVar,-1])
 
@@ -72,7 +85,7 @@ def secondOrderImageDataset(imgs, gts, numberOfLabels, fUnary, fBinary, addConst
 
         # add unaries
         lUnaries = learning.lUnaryFunctions(weights =weights,numberOfLabels = numberOfLabels, 
-                                            features=unaryFeat, weightIds = uWeightIds.reshape([1,-1]).copy(),
+                                            features=unaryFeat, weightIds = uWeightIds,
                                             featurePolicy= learning.FeaturePolicy.sharedBetweenLabels,
                                             makeFirstEntryConst=numberOfLabels==2, addConstFeature=addConstFeature)
         fids = gm.addFunctions(lUnaries)
diff --git a/fubar/real_example_2.py b/fubar/real_example_2.py
index 3429fad..1acdfe4 100644
--- a/fubar/real_example_2.py
+++ b/fubar/real_example_2.py
@@ -7,7 +7,7 @@ import pylab
 
 nModels = 20
 nLables = 2 
-shape = [200, 200]
+shape = [50, 50]
 numVar = shape[0]*shape[1]
 
 sSmooth = [1.0,1.1,1.2, 1.5, 2.0, 3.0, 4.0]
@@ -132,10 +132,10 @@ nTestPoints  =numpy.ones(nWeights).astype('uint64')*5
 #learner = learning.structMaxMarginLearner(dataset, 0.1, 0.001, 0)
 #learner = learning.maxLikelihoodLearner(dataset)
 #learner =  learning.structPerceptron(dataset, decayExponent=-0.5, learningMode='batch')
-learner =  learning.subgradientSSVM(dataset, learningRate=1.0, C=100)
+learner =  learning.subgradientSSVM(dataset, learningRate=1.0, C=100, learningMode='workingSets')
 
 
-learner.learn(infCls=opengm.inference.QpboExternal, 
+learner.learn(infCls=opengm.inference.TrwsExternal, 
               parameter=opengm.InfParam())
 
 for w in range(nWeights):
diff --git a/include/opengm/graphicalmodel/weights.hxx b/include/opengm/graphicalmodel/weights.hxx
index 948ab68..395b82d 100644
--- a/include/opengm/graphicalmodel/weights.hxx
+++ b/include/opengm/graphicalmodel/weights.hxx
@@ -6,46 +6,94 @@
 namespace opengm{
 namespace learning{
 
-   template<class T>
-   class Weights 
-   {
-   public:
-      typedef T ValueType;
-
-      Weights(const size_t numberOfWeights=0)
-      : weights_(numberOfWeights)
-      {
-
-      }
-
-      ValueType getWeight(const size_t pi)const{
-         OPENGM_ASSERT_OP(pi,<,weights_.size());
-         return weights_[pi];
-      }
-
-      void setWeight(const size_t pi,const ValueType value){
-         OPENGM_ASSERT_OP(pi,<,weights_.size());
-         weights_[pi] = value;
-      }
-
-       const ValueType& operator[](const size_t pi)const{
-          return weights_[pi];
-       }
-       ValueType& operator[](const size_t pi) {
-          return weights_[pi];
-       }
-
-      size_t numberOfWeights()const{
-         return weights_.size();
-      }
-      size_t size()const{
-        return weights_.size();
-      }
-
-   private:
-
-      std::vector<ValueType> weights_;
-   };
+    /*
+    template<class T>
+    class Weights {
+    public:
+
+        typedef T ValueType;
+
+        Weights(const size_t numberOfWeights=0)
+        :   weights_(numberOfWeights)
+        {
+
+        }
+
+        ValueType getWeight(const size_t pi)const{
+            OPENGM_ASSERT_OP(pi,<,weights_.size());
+            return weights_[pi];
+        }
+
+        void setWeight(const size_t pi,const ValueType value){
+            OPENGM_ASSERT_OP(pi,<,weights_.size());
+            weights_[pi] = value;
+        }
+
+        const ValueType& operator[](const size_t pi)const{
+            return weights_[pi];
+        }
+
+        ValueType& operator[](const size_t pi) {
+            return weights_[pi];
+        }
+
+        size_t numberOfWeights()const{
+            return weights_.size();
+        }
+
+        size_t size()const{
+            return weights_.size();
+        }
+
+    private:
+
+        std::vector<ValueType> weights_;
+    };
+    */
+    template<class T>
+    class Weights : public marray::Vector<T>
+    {
+    public:
+
+        typedef T ValueType;
+
+        Weights(const size_t numberOfWeights=0)
+        :   marray::Vector<T>(numberOfWeights)
+        {
+
+        }
+
+        ValueType getWeight(const size_t pi)const{
+            OPENGM_ASSERT_OP(pi,<,this->size());
+            return (*this)[pi];
+        }
+
+        void setWeight(const size_t pi,const ValueType value){
+            OPENGM_ASSERT_OP(pi,<,this->size());
+            (*this)[pi] = value;
+        }
+
+       //const ValueType& operator[](const size_t pi)const{
+       //    return weights_[pi];
+       //}
+
+       //ValueType& operator[](const size_t pi) {
+       //    return weights_[pi];
+       //}
+
+        size_t numberOfWeights()const{
+            return this->size();
+        }
+
+        //size_t size()const{
+        //    return weights_.size();
+        //}
+
+    private:
+
+        //std::vector<ValueType> weights_;
+    };
+
 } // namespace learning
 } // namespace opengm
 
diff --git a/include/opengm/inference/lazyflipper.hxx b/include/opengm/inference/lazyflipper.hxx
index c3d5e08..eb77727 100644
--- a/include/opengm/inference/lazyflipper.hxx
+++ b/include/opengm/inference/lazyflipper.hxx
@@ -163,10 +163,10 @@ public:
       Tribool inferMultilabel_;
    };
 
-   LazyFlipper(const GraphicalModelType&, const size_t = 2, const Tribool useMultilabelInference = Tribool::Maybe);
-   LazyFlipper(const GraphicalModelType& gm, typename LazyFlipper::Parameter param);
-   template<class StateIterator>
-      LazyFlipper(const GraphicalModelType&, const size_t, StateIterator, const Tribool useMultilabelInference = Tribool::Maybe);
+   //LazyFlipper(const GraphicalModelType&, const size_t = 2, const Tribool useMultilabelInference = Tribool::Maybe);
+   LazyFlipper(const GraphicalModelType& gm, Parameter param = Parameter());
+   //template<class StateIterator>
+      //LazyFlipper(const GraphicalModelType&, const size_t, StateIterator, const Tribool useMultilabelInference = Tribool::Maybe);
    std::string name() const;
    const GraphicalModelType& graphicalModel() const;
    const size_t maxSubgraphSize() const;
@@ -593,37 +593,37 @@ Forest<T>::setLevelOrderSuccessor(
 
 // implementation of LazyFlipper
 
-template<class GM, class ACC>
-inline
-LazyFlipper<GM, ACC>::LazyFlipper(
-   const GraphicalModelType& gm,
-   const size_t maxSubgraphSize,
-   const Tribool useMultilabelInference
-)
-:  gm_(gm),
-   variableAdjacency_(Adjacency(gm.numberOfVariables())),
-   movemaker_(Movemaker<GM>(gm)),
-   subgraphForest_(SubgraphForest()),
-   maxSubgraphSize_(maxSubgraphSize),
-   useMultilabelInference_(useMultilabelInference)
-{
-   if(gm_.numberOfVariables() == 0) {
-      throw RuntimeError("The graphical model has no variables.");
-   }
-   setMaxSubgraphSize(maxSubgraphSize);
-   // initialize activation_
-   activation_[0].append(gm_.numberOfVariables());
-   activation_[1].append(gm_.numberOfVariables());
-   // initialize variableAdjacency_
-   for(size_t j=0; j<gm_.numberOfFactors(); ++j) {
-      const FactorType& factor = gm_[j];
-      for(size_t m=0; m<factor.numberOfVariables(); ++m) {
-         for(size_t n=m+1; n<factor.numberOfVariables(); ++n) {
-            variableAdjacency_.connect(factor.variableIndex(m), factor.variableIndex(n));
-         }
-      }
-   }
-}
+//template<class GM, class ACC>
+//inline
+//LazyFlipper<GM, ACC>::LazyFlipper(
+//   const GraphicalModelType& gm,
+//   const size_t maxSubgraphSize,
+//   const Tribool useMultilabelInference
+//)
+//:  gm_(gm),
+//   variableAdjacency_(Adjacency(gm.numberOfVariables())),
+//   movemaker_(Movemaker<GM>(gm)),
+//   subgraphForest_(SubgraphForest()),
+//   maxSubgraphSize_(maxSubgraphSize),
+//   useMultilabelInference_(useMultilabelInference)
+//{
+//   if(gm_.numberOfVariables() == 0) {
+//      throw RuntimeError("The graphical model has no variables.");
+//   }
+//   setMaxSubgraphSize(maxSubgraphSize);
+//   // initialize activation_
+//   activation_[0].append(gm_.numberOfVariables());
+//   activation_[1].append(gm_.numberOfVariables());
+//   // initialize variableAdjacency_
+//   for(size_t j=0; j<gm_.numberOfFactors(); ++j) {
+//      const FactorType& factor = gm_[j];
+//      for(size_t m=0; m<factor.numberOfVariables(); ++m) {
+//         for(size_t n=m+1; n<factor.numberOfVariables(); ++n) {
+//            variableAdjacency_.connect(factor.variableIndex(m), factor.variableIndex(n));
+//         }
+//      }
+//   }
+//}
 
 template<class GM, class ACC>
 inline
@@ -662,40 +662,40 @@ inline void
 LazyFlipper<GM, ACC>::reset()
 {}
 
-/// \todo next version: get rid of redundancy with other constructor
-template<class GM, class ACC>
-template<class StateIterator>
-inline
-LazyFlipper<GM, ACC>::LazyFlipper(
-   const GraphicalModelType& gm,
-   const size_t maxSubgraphSize,
-   StateIterator it,
-   const Tribool useMultilabelInference
-)
-:  gm_(gm),
-   variableAdjacency_(Adjacency(gm_.numberOfVariables())),
-   movemaker_(Movemaker<GM>(gm, it)),
-   subgraphForest_(SubgraphForest()),
-   maxSubgraphSize_(2),
-   useMultilabelInference_(useMultilabelInference)
-{
-   if(gm_.numberOfVariables() == 0) {
-      throw RuntimeError("The graphical model has no variables.");
-   }
-   setMaxSubgraphSize(maxSubgraphSize);
-   // initialize activation_
-   activation_[0].append(gm_.numberOfVariables());
-   activation_[1].append(gm_.numberOfVariables());
-   // initialize variableAdjacency_
-   for(size_t j=0; j<gm_.numberOfFactors(); ++j) {
-      const FactorType& factor = gm_[j];
-      for(size_t m=0; m<factor.numberOfVariables(); ++m) {
-         for(size_t n=m+1; n<factor.numberOfVariables(); ++n) {
-            variableAdjacency_.connect(factor.variableIndex(m), factor.variableIndex(n));
-         }
-      }
-   }
-}
+///// \todo next version: get rid of redundancy with other constructor
+//template<class GM, class ACC>
+//template<class StateIterator>
+//inline
+//LazyFlipper<GM, ACC>::LazyFlipper(
+//   const GraphicalModelType& gm,
+//   const size_t maxSubgraphSize,
+//   StateIterator it,
+//   const Tribool useMultilabelInference
+//)
+//:  gm_(gm),
+//   variableAdjacency_(Adjacency(gm_.numberOfVariables())),
+//   movemaker_(Movemaker<GM>(gm, it)),
+//   subgraphForest_(SubgraphForest()),
+//   maxSubgraphSize_(2),
+//   useMultilabelInference_(useMultilabelInference)
+//{
+//   if(gm_.numberOfVariables() == 0) {
+//      throw RuntimeError("The graphical model has no variables.");
+//   }
+//   setMaxSubgraphSize(maxSubgraphSize);
+//   // initialize activation_
+//   activation_[0].append(gm_.numberOfVariables());
+//   activation_[1].append(gm_.numberOfVariables());
+//   // initialize variableAdjacency_
+//   for(size_t j=0; j<gm_.numberOfFactors(); ++j) {
+//      const FactorType& factor = gm_[j];
+//      for(size_t m=0; m<factor.numberOfVariables(); ++m) {
+//         for(size_t n=m+1; n<factor.numberOfVariables(); ++n) {
+//            variableAdjacency_.connect(factor.variableIndex(m), factor.variableIndex(n));
+//         }
+//      }
+//   }
+//}
 
 template<class GM, class ACC>
 inline void
diff --git a/include/opengm/inference/mqpbo.hxx b/include/opengm/inference/mqpbo.hxx
index 9c14aa1..44ee5e0 100644
--- a/include/opengm/inference/mqpbo.hxx
+++ b/include/opengm/inference/mqpbo.hxx
@@ -189,12 +189,12 @@ namespace opengm {
       }
 
       if(param_.rounds_>0){
-         std::cout << "Large" <<std::endl;
+         //std::cout << "Large" <<std::endl;
          qpbo_ = new kolmogorov::qpbo::QPBO<GraphValueType > (numNodes_, numEdges_); // max number of nodes & edges
          qpbo_->AddNode(numNodes_);
       }
       else{
-         std::cout << "Small" <<std::endl;      
+         //std::cout << "Small" <<std::endl;      
          qpbo_ = new kolmogorov::qpbo::QPBO<GraphValueType > (gm_.numberOfVariables(), numSOF); // max number of nodes & edges
          qpbo_->AddNode(gm_.numberOfVariables());
       }
@@ -845,7 +845,7 @@ namespace opengm {
 
       if(param_.useKovtunsMethod_){
          if(isPotts){
-            std::cout << "Use Kovtuns method for potts"<<std::endl;
+            //std::cout << "Use Kovtuns method for potts"<<std::endl;
             for(LabelType l=0; l<maxNumberOfLabels; ++l) {
                testQuess(l);
                double xoptimality = optimality(); 
diff --git a/include/opengm/inference/reducedinference.hxx b/include/opengm/inference/reducedinference.hxx
index 6460e9c..4ed215b 100644
--- a/include/opengm/inference/reducedinference.hxx
+++ b/include/opengm/inference/reducedinference.hxx
@@ -86,13 +86,15 @@ namespace opengm {
 
     template<class _GM>
     struct RebindGm{
-        typedef typename INF:: template RebindGm<_GM>::type RebindedInf;
+        typedef typename ReducedInferenceHelper<_GM>::InfGmType RebindedInfGmType;
+        typedef typename INF:: template RebindGm<RebindedInfGmType>::type RebindedInf;
         typedef ReducedInference<_GM, ACC, RebindedInf> type;
     };
 
     template<class _GM,class _ACC>
     struct RebindGmAndAcc{
-        typedef typename INF:: template RebindGmAndAcc<_GM,_ACC>::type RebindedInf;
+        typedef typename ReducedInferenceHelper<_GM>::InfGmType RebindedInfGmType;
+        typedef typename INF:: template RebindGmAndAcc<RebindedInfGmType,_ACC>::type RebindedInf;
         typedef ReducedInference<_GM,_ACC, RebindedInf> type;
     };
 
diff --git a/include/opengm/learning/dataset/dataset.hxx b/include/opengm/learning/dataset/dataset.hxx
index 1241607..accf927 100644
--- a/include/opengm/learning/dataset/dataset.hxx
+++ b/include/opengm/learning/dataset/dataset.hxx
@@ -71,6 +71,9 @@ namespace opengm {
         ValueType                     getTotalLoss(const typename INF::Parameter& para) const;
 
         template<class INF>
+        ValueType                     getTotalLossParallel(const typename INF::Parameter& para) const;
+
+        template<class INF>
         ValueType                     getLoss(const typename INF::Parameter& para, const size_t i) const;
         ValueType                     getLoss(std::vector<LabelType> conf , const size_t i) const;
 
@@ -117,6 +120,16 @@ namespace opengm {
         }
         return sum;
     }
+    template<class GM, class LOSS, class LOSS_GM>
+    template<class INF>
+    typename GM::ValueType Dataset<GM, LOSS, LOSS_GM>::getTotalLossParallel(const typename INF::Parameter& para) const {
+        double totalLoss = 0;
+        #pragma omp parallel for reduction(+:totalLoss)  
+        for(size_t i=0; i<this->getNumberOfModels(); ++i) {
+            totalLoss = totalLoss + this->getLoss<INF>(para, i);
+        }
+        return totalLoss;
+    }
 
     template<class GM, class LOSS, class LOSS_GM>
     template<class INF>
diff --git a/include/opengm/learning/subgradient_ssvm.hxx b/include/opengm/learning/subgradient_ssvm.hxx
index fd1a3fb..ca2d412 100644
--- a/include/opengm/learning/subgradient_ssvm.hxx
+++ b/include/opengm/learning/subgradient_ssvm.hxx
@@ -36,7 +36,8 @@ namespace opengm {
 
             enum LearningMode{
                 Online = 0,
-                Batch = 2
+                Batch = 1,
+                WorkingSets = 2
             };
 
 
@@ -219,7 +220,51 @@ namespace opengm {
                 
             }
         }
+        else if(para_.learningMode_ == Parameter::WorkingSets){
 
+            std::cout<<"working sets mode\n";
+            std::vector< std::vector< std::vector<LabelType> > > A(nModels);
+
+            RandomUniform<size_t> randModel(0, nModels);
+
+
+            for(iteration_=0 ; iteration_<para_.maxIterations_; ++iteration_){
+                // this     
+                const size_t gmi = randModel();
+
+                // lock the model
+                dataset_.lockModel(gmi);
+                const GMWITHLOSS & gmWithLoss = dataset_.getModelWithLoss(gmi);
+
+                // do inference
+                std::vector<LabelType> arg;
+                opengm::infer<InfLossGm>(gmWithLoss, infLossGmParam, arg);
+                
+                A[gmi].push_back(arg);
+
+                // accumulate all features
+                featureAcc_.resetWeights();
+                size_t aWithLoss = 0;
+                for(size_t jj=0; jj<A[gmi].size(); ++jj){
+                    if(dataset_.getLoss(A[gmi][jj], gmi)>0.0){
+                        ++aWithLoss;
+                        featureAcc_.accumulateModelFeatures(dataset_.getModel(gmi), dataset_.getGT(gmi).begin(), A[gmi][jj].begin());
+                    }
+                }
+                if(aWithLoss>0){
+                    for(size_t wi=0; wi<nWegihts; ++wi){
+                        const double n = (para_.learningRate_/double(iteration_+1));
+                        const double wOld = dataset_.getWeights().getWeight(wi);
+                        const double wNew = wOld - n*featureAcc_.getWeight(wi)/double(aWithLoss);
+                        dataset_.getWeights().setWeight(wi, wNew);
+                    }
+                }
+                if(iteration_%nModels == 0 ){
+                    std::cout<<iteration_<<" loss :"<<dataset_. template getTotalLossParallel<INF>(para)<<"\n";
+                }
+                dataset_.unlockModel(gmi);
+            }
+        }
         weights_ = dataset_.getWeights();
     }
 
diff --git a/src/interfaces/python/opengm/learning/__init__.py b/src/interfaces/python/opengm/learning/__init__.py
index 6114e65..2a5f59c 100644
--- a/src/interfaces/python/opengm/learning/__init__.py
+++ b/src/interfaces/python/opengm/learning/__init__.py
@@ -10,12 +10,27 @@ DatasetWithGeneralizedHammingLoss.lossType = 'generalized-hamming'
 
 
 
-def _extendedLearn(self, infCls, parameter = None):
+def _extendedLearn(self, infCls, parameter = None, redInf=False, persistency=True, tentacles=False, connectedComponents=False):
     if parameter is None:
         import opengm
         parameter = opengm.InfParam()
     cppParam  =  infCls.get_cpp_parameter(operator='adder',accumulator='minimizer',parameter=parameter)
-    self._learn(cppParam)
+    if not redInf:
+        
+        try:
+          self._learn(cppParam)
+        except Exception, e:
+            #print "an error ",e,"\n\n"
+            if (str(e).find("did not match C++ signature")):
+                raise RuntimeError("infCls : '%s' is not (yet) exported from c++ to python for learning"%str(infCls))
+    else:
+        try:
+          self._learnReducedInf(cppParam, bool(persistency), bool(tentacles),bool(connectedComponents))
+        except Exception, e:
+            #print "an error ",e,"\n\n"
+            if (str(e).find("did not match C++ signature")):
+                raise RuntimeError("infCls : '%s' is not (yet) exported from c++ to python for learning with reduced inference"%str(infCls))
+
 
 def _extendedGetLoss(self, model_idx, infCls, parameter = None):
     if parameter is None:
@@ -140,13 +155,15 @@ def subgradientSSVM(dataset, learningMode='batch',eps=1e-5, maxIterations=10000,
         learningModeEnum = SubgradientSSVM_GeneralizedHammingLossParameter_LearningMode
 
     lm = None
-    if learningMode not in ['online','batch']:
-        raise RuntimeError("wrong learning mode, must be 'online' or 'batch' ")
+    if learningMode not in ['online','batch','workingSets']:
+        raise RuntimeError("wrong learning mode, must be 'online', 'batch' or 'workingSets' ")
 
     if learningMode == 'online':
         lm = learningModeEnum.online
     if learningMode == 'batch':
         lm = learningModeEnum.batch
+    if learningMode == 'workingSets':
+        lm = learningModeEnum.workingSets
 
     param = learnerParamCls()
     param.eps = float(eps)
diff --git a/src/interfaces/python/opengm/learning/helper.hxx b/src/interfaces/python/opengm/learning/helper.hxx
index b981ddb..707cdb6 100644
--- a/src/interfaces/python/opengm/learning/helper.hxx
+++ b/src/interfaces/python/opengm/learning/helper.hxx
@@ -18,6 +18,7 @@
 
 #ifdef WITH_QPBO
 #include <opengm/inference/external/qpbo.hxx>
+#include <opengm/inference/reducedinference.hxx>
 #endif
 
 #ifdef WITH_TRWS
@@ -30,56 +31,106 @@ namespace opengm{
 template<class LEARNER>
 class LearnerInferenceSuite: public boost::python::def_visitor<LearnerInferenceSuite<LEARNER> >{
 public:
-   friend class boost::python::def_visitor_access;
-
-   LearnerInferenceSuite(){
-
-   }
-
-   template<class INF>
-   static void pyLearnWithInf(LEARNER & learner, const typename INF::Parameter & param)
-   {
-       learner. template learn<INF>(param);
-   }
-
-   template <class classT>
-   void visit(classT& c) const{
-       // SOME INFERENCE METHODS
-       typedef typename LEARNER::GMType GMType;
-       typedef typename LEARNER::Parameter PyLearnerParam;
-       typedef typename LEARNER::DatasetType DatasetType;
-       typedef opengm::Minimizer ACC;
-
-       typedef opengm::ICM<GMType, ACC> IcmInf;
-       typedef opengm::LazyFlipper<GMType, ACC> LazyFlipperInf;
-       typedef opengm::BeliefPropagationUpdateRules<GMType, ACC> UpdateRulesType;
-       typedef opengm::MessagePassing<GMType, ACC, UpdateRulesType, opengm::MaxDistance> BpInf;
-
-#ifdef WITH_CPLEX
-       typedef opengm::LPCplex<GMType, ACC> Cplex;
-#endif
-#ifdef WITH_QPBO
-       typedef opengm::external::QPBO<GMType>  QpboExternal;
-#endif
-#ifdef WITH_TRWS
-       typedef opengm::external::TRWS<GMType>  TrwsExternal;
-#endif
-
-      c
-          .def("_learn",&pyLearnWithInf<IcmInf>)
-          .def("_learn",&pyLearnWithInf<LazyFlipperInf>)
-          .def("_learn",&pyLearnWithInf<BpInf>)
-#ifdef WITH_CPLEX
-          .def("_learn",&pyLearnWithInf<Cplex>)
-#endif
-#ifdef WITH_QPBO
-          .def("_learn",&pyLearnWithInf<QpboExternal>)
-#endif
-#ifdef WITH_TRWS
-          .def("_learn",&pyLearnWithInf<TrwsExternal>)
-#endif
-      ;
-   }
+    friend class boost::python::def_visitor_access;
+
+    LearnerInferenceSuite(){
+
+    }
+
+    template<class INF>
+    static void pyLearn_Inf(LEARNER & learner, const typename INF::Parameter & param)
+    {
+        learner. template learn<INF>(param);
+    }
+
+    template<class INF>
+    static void pyLearn_ReducedInf(
+        LEARNER & learner, 
+        const typename INF::Parameter & param,
+        const bool persistency,
+        const bool tentacles,
+        const bool connectedComponents
+    )
+    {
+
+        typedef typename INF::GraphicalModelType GmType;
+        typedef typename opengm::ReducedInferenceHelper<GmType>::InfGmType RedInfGm;
+
+        // rebind the inference to the RedInfGm
+        typedef typename INF:: template RebindGm<RedInfGm>::type RedInfRebindInf;
+
+
+        typedef typename RedInfRebindInf::Parameter RedInfRebindInfParam;
+        typedef opengm::ReducedInference<GmType, opengm::Minimizer, RedInfRebindInf> RedInf;
+        typedef typename RedInf::Parameter RedInfParam;
+
+        RedInfRebindInfParam redInfRebindInfParam(param);
+
+        RedInfParam redInfPara;
+        redInfPara.subParameter_ = redInfRebindInfParam;
+        redInfPara.Persistency_ = persistency;
+        redInfPara.Tentacle_ = tentacles;
+        redInfPara.ConnectedComponents_ = connectedComponents;
+
+        learner. template learn<RedInf>(redInfPara);
+    }
+
+
+
+
+
+
+    template <class classT>
+    void visit(classT& c) const{
+        // SOME INFERENCE METHODS
+        typedef typename LEARNER::GMType GMType;
+        typedef typename LEARNER::Parameter PyLearnerParam;
+        typedef typename LEARNER::DatasetType DatasetType;
+        typedef opengm::Minimizer ACC;
+
+        typedef opengm::ICM<GMType, ACC> IcmInf;
+        typedef opengm::LazyFlipper<GMType, ACC> LazyFlipperInf;
+        typedef opengm::BeliefPropagationUpdateRules<GMType, ACC> UpdateRulesType;
+        typedef opengm::MessagePassing<GMType, ACC, UpdateRulesType, opengm::MaxDistance> BpInf;
+
+        #ifdef WITH_CPLEX
+            typedef opengm::LPCplex<GMType, ACC> Cplex;
+        #endif
+
+        #ifdef WITH_QPBO
+            typedef opengm::external::QPBO<GMType>  QpboExternal;
+        #endif
+
+        #ifdef WITH_TRWS
+            typedef opengm::external::TRWS<GMType>  TrwsExternal;
+        #endif
+
+        c
+            .def("_learn",&pyLearn_Inf<IcmInf>)
+            .def("_learn",&pyLearn_Inf<LazyFlipperInf>)
+            .def("_learn",&pyLearn_Inf<BpInf>)
+            #ifdef WITH_CPLEX
+            .def("_learn",&pyLearn_Inf<Cplex>)
+            #endif
+            #ifdef WITH_QPBO
+            .def("_learn",&pyLearn_Inf<QpboExternal>)
+            #endif
+            #ifdef WITH_TRWS
+            .def("_learn",&pyLearn_Inf<TrwsExternal>)
+            #endif
+
+            // REDUCED INFERENCE
+            #ifdef WITH_QPBO
+                .def("_learnReducedInf",&pyLearn_ReducedInf<LazyFlipperInf>)
+                #ifdef WITH_TRWS
+                .def("_learnReducedInf",&pyLearn_ReducedInf<TrwsExternal>)
+                #endif
+                #ifdef WITH_CPLEX
+                .def("_learnReducedInf",&pyLearn_ReducedInf<Cplex>)
+                #endif
+            #endif
+        ;
+    }
 };
 
 
@@ -126,6 +177,11 @@ public:
        typedef opengm::external::TRWS<GMType>  TrwsExternal;
 #endif
 
+
+
+
+
+
       c
           .def("_getLoss",&pyGetLossWithInf<IcmInf>)
           .def("_getTotalLoss",&pyGetTotalLossWithInf<IcmInf>)
diff --git a/src/interfaces/python/opengm/learning/pySubgradientSSVM.cxx b/src/interfaces/python/opengm/learning/pySubgradientSSVM.cxx
index 2ca92c9..779d65f 100644
--- a/src/interfaces/python/opengm/learning/pySubgradientSSVM.cxx
+++ b/src/interfaces/python/opengm/learning/pySubgradientSSVM.cxx
@@ -44,6 +44,7 @@ namespace opengm{
         bp::enum_<typename PyLearnerParam::LearningMode>(paramEnumLearningModeName.c_str())
             .value("online", PyLearnerParam::Online)
             .value("batch", PyLearnerParam::Batch)
+            .value("workingSets", PyLearnerParam::WorkingSets)
         ;
 
         // learner param

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git



More information about the debian-science-commits mailing list