[opengm] 198/386: better struct. perceptron

Ghislain Vaillant ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:37:40 UTC 2016


This is an automated email from the git hooks/post-receive script.

ghisvail-guest pushed a commit to branch debian/master
in repository opengm.

commit e86cf8e7971ee935fe5afb52e4c6253484445576
Author: DerThorsten <thorsten.beier at iwr.uni-heidelberg.de>
Date:   Sun Jan 4 18:13:10 2015 +0100

    better struct. perceptron
---
 CMakeLists.txt                                     |   2 +
 fubar/real_example_2.py                            |  28 +-
 include/opengm/functions/unary_loss_function.hxx   | 151 ++++++++++
 .../graphicalmodel/graphicalmodel_factor.hxx       |  19 +-
 include/opengm/graphicalmodel/weights.hxx          |  27 +-
 include/opengm/inference/inference.hxx             |   9 +
 include/opengm/learning/dataset/dataset.hxx        | 285 +++++++++---------
 include/opengm/learning/gradient-accumulator.hxx   | 194 ++++++++----
 include/opengm/learning/structured_perceptron.hxx  | 328 ++++++++++-----------
 include/opengm/utilities/shape_accessor.hxx        |  44 ++-
 src/interfaces/python/opengm/learning/__init__.py  |  23 +-
 .../python/opengm/learning/pyStructPerceptron.cxx  |  14 +-
 12 files changed, 722 insertions(+), 402 deletions(-)

diff --git a/CMakeLists.txt b/CMakeLists.txt
index c6316fb..9bb9dad 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -372,6 +372,8 @@ if(WITH_OPENMP)
   #SET(OPENMP_INCLUDE_DIR "" CACHE STRING "OpenMP include dir")
   #include_directories(${OPENMP_INCLUDE_DIR})
   add_definitions(-DWITH_OPENMP)
+  set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}")
+  set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}")
 else()
    message(STATUS "build without openMP -> multithreaded options disabled")
 endif(WITH_OPENMP)
diff --git a/fubar/real_example_2.py b/fubar/real_example_2.py
index a80ad07..ccad241 100644
--- a/fubar/real_example_2.py
+++ b/fubar/real_example_2.py
@@ -5,13 +5,13 @@ import vigra
 import pylab as plt
 import pylab
 
-nModels = 10
+nModels = 20
 nLables = 2 
-shape = [30, 30]
+shape = [40, 40]
 numVar = shape[0]*shape[1]
 
-sSmooth = [1.0, 1.5, 2.0, 3.0, 4.0 , 5.0]
-sGrad = [1.0, 1.5, 2.0, 3.0, 4.0, 5.0]
+sSmooth = [1.0,1.1,1.2, 1.5, 2.0, 3.0, 4.0]
+sGrad = [1.0, 1.5, 2.0, 3.0, 4.0]
 
 nUWeights = len(sSmooth) + 1
 nBWeights = len(sGrad) + 1
@@ -35,20 +35,20 @@ dataset = learning.createDataset(numWeights=nWeights, loss='h')
 weights = dataset.getWeights()
 
 def makeFeatures(gt):
-    random  = (numpy.random.rand(*gt.shape)-0.5)*3.0
+    random  = (numpy.random.rand(*gt.shape)-0.5)*5.0
     randGt = random + gt
 
     # vigra.imshow(randGt)
     # plt.colorbar()
     # vigra.show()
 
-    # f = pylab.figure()
-    # for n, a in enumerate([gt, randGt]):
-    #     f.add_subplot(2, 1, n)  # this line outputs images on top of each other
-    #     # f.add_subplot(1, 2, n)  # this line outputs images side-by-side
-    #     pylab.imshow(a,cmap='gray')
-    # pylab.title('Double image')
-    # pylab.show()
+    #f = pylab.figure()
+    #for n, a in enumerate([gt, randGt]):
+    #    f.add_subplot(2, 1, n)  # this line outputs images on top of each other
+    #    # f.add_subplot(1, 2, n)  # this line outputs images side-by-side
+    #    pylab.imshow(a,cmap='gray')
+    #pylab.title('Double image')
+    #pylab.show()
 
 
 
@@ -74,7 +74,7 @@ def makeFeatures(gt):
     return a,b
 
 for mi in range(nModels):
-
+    print mi
 
     gm = opengm.gm(numpy.ones(numVar)*nLables)
     gt = makeGt(shape) 
@@ -131,7 +131,7 @@ nTestPoints  =numpy.ones(nWeights).astype('uint64')*5
 # learner = learning.gridSearchLearner(dataset=dataset,lowerBounds=lowerBounds, upperBounds=upperBounds,nTestPoints=nTestPoints)
 #learner = learning.structMaxMarginLearner(dataset, 1.0, 0.001, 0)
 #learner = learning.maxLikelihoodLearner(dataset)
-learner =  learning.structPerceptron(dataset,kappa=0.1)
+learner =  learning.structPerceptron(dataset, decayExponent=-0.001, learningMode='batch')
 
 learner.learn(infCls=opengm.inference.QpboExternal, 
               parameter=opengm.InfParam())
diff --git a/include/opengm/functions/unary_loss_function.hxx b/include/opengm/functions/unary_loss_function.hxx
new file mode 100644
index 0000000..21f5078
--- /dev/null
+++ b/include/opengm/functions/unary_loss_function.hxx
@@ -0,0 +1,151 @@
+#pragma once
+#ifndef OPENGM_UNARY_LOSS_FUNCTION
+#define OPENGM_UNARY_LOSS_FUNCTION
+
+#include "opengm/functions/function_properties_base.hxx"
+
+namespace opengm {
+
+
+
+
+
+
+
+
+
+/// \endcond
+
+/// UnaryLossFunction convert semi-ring in a lazy fashion
+///
+/// \ingroup functions
+template<class T,class I, class L>
+class UnaryLossFunction
+: public FunctionBase<UnaryLossFunction<T,I,L>, T,I,L>
+{
+public:
+
+   typedef T ValueType;
+   typedef T value_type;
+   typedef I IndexType;
+   typedef L LabelType;
+
+
+   enum LossType{
+        HammingLoss = 0,
+        LabelVectorConf = 1,
+        LabelVectorGt = 2,
+        LabelMatrix = 3,
+        L1Loss = 4,
+        L2Loss = 5
+   };
+
+   struct SharedMultiplers{
+        marray::Marray<ValueType> labelMult_;
+   };
+
+
+
+
+    UnaryLossFunction(
+        const LabelType numberOfLabels,
+        const LabelType gtLabel,
+        const LossType lossType, 
+        const ValueType multiplier,
+        const SharedMultiplers & sharedMultiplers,
+        const bool owner
+    );
+    template<class Iterator> ValueType operator()(Iterator begin) const;
+    IndexType shape(const IndexType) const;
+    IndexType dimension() const;
+    IndexType size() const;
+
+private:
+   LabelType numberOfLabels_;
+   LabelType gtLabel_;
+   LossType lossType_;
+   ValueType multiplier_;
+   const SharedMultiplers * sharedMultiplers_;
+   bool owner_;
+};
+
+template<class T,class I, class L>
+inline
+UnaryLossFunction<T,I,L>::UnaryLossFunction(
+    const LabelType numberOfLabels,
+    const LabelType gtLabel,
+    const LossType lossType, 
+    const ValueType multiplier,
+    const SharedMultiplers & sharedMultiplers,
+    const bool owner
+)
+:   numberOfLabels_(numberOfLabels),
+    gtLabel_(gtLabel),
+    lossType_(lossType),
+    multiplier_(multiplier),
+    sharedMultiplers_(&sharedMultiplers),
+    owner_(owner)
+{
+
+}
+
+template<class T,class I, class L>
+template<class Iterator>
+inline typename UnaryLossFunction<T,I,L>::ValueType
+UnaryLossFunction<T,I,L>::operator()
+(
+   Iterator begin
+) const {
+
+    const LabelType l = *begin;
+    const ValueType isDifferent = (l != gtLabel_ ?  1.0 : 0.0);
+
+    switch(lossType_){
+        case HammingLoss:{
+            return static_cast<ValueType>(-1.0) * multiplier_ * isDifferent;
+        }
+        case LabelVectorConf:{
+            return multiplier_ * isDifferent * sharedMultiplers_->labelMult_(l);
+        }
+        case LabelVectorGt:{
+            return multiplier_ * isDifferent * sharedMultiplers_->labelMult_(gtLabel_);
+        }
+        case LabelMatrix:{
+            return multiplier_ * isDifferent * sharedMultiplers_->labelMult_(l, gtLabel_);
+        }
+        case L1Loss:{
+            return multiplier_ * static_cast<ValueType>(std::abs(int(l)-int(gtLabel_)));
+        }
+        case L2Loss:{
+            return multiplier_ * std::pow(int(l)-int(gtLabel_),2);
+        }
+        default :{
+            throw RuntimeError("wrong loss type");
+        }
+    }
+}
+
+template<class T,class I, class L>
+inline typename UnaryLossFunction<T,I,L>::IndexType
+UnaryLossFunction<T,I,L>::shape
+(
+   const typename UnaryLossFunction<T,I,L>::IndexType index
+) const{
+   return numberOfLabels_;
+}
+
+template<class T,class I, class L>
+inline typename UnaryLossFunction<T,I,L>::IndexType
+UnaryLossFunction<T,I,L>::dimension() const {
+   return 1;
+}
+
+template<class T,class I, class L>
+inline typename UnaryLossFunction<T,I,L>::IndexType
+UnaryLossFunction<T,I,L>::size() const {
+   return numberOfLabels_;
+}
+
+} // namespace opengm
+
+#endif // #ifndef OPENGM_UNARY_LOSS_FUNCTION
diff --git a/include/opengm/graphicalmodel/graphicalmodel_factor.hxx b/include/opengm/graphicalmodel/graphicalmodel_factor.hxx
index 0985830..813d982 100755
--- a/include/opengm/graphicalmodel/graphicalmodel_factor.hxx
+++ b/include/opengm/graphicalmodel/graphicalmodel_factor.hxx
@@ -195,26 +195,25 @@ public:
 
 
     template<class LABEL_ITER>
-    struct GmToLabelIter{
-        typedef GmLabelFactorLabelAccessor<Factor<GRAPHICAL_MODEL>, LABEL_ITER>  Accessor;
+    struct GmToFactorLabelIter{
+        typedef SubsetAccessor<VariablesIteratorType, LABEL_ITER> Accessor;
         typedef AccessorIterator<Accessor, true> Iter;
     };
 
     template<class LABEL_ITER>    
-    typename GmToLabelIter<LABEL_ITER>::Iter
+    typename GmToFactorLabelIter<LABEL_ITER>::Iter
     gmToFactorLabelsBegin(LABEL_ITER gmLabelsBegin)const{
-        typedef typename GmToLabelIter<LABEL_ITER>::Accessor Accessor;
-        typedef typename GmToLabelIter<LABEL_ITER>::Iter Iter;
-        Accessor accessor(*this, gmLabelsBegin);
+        typedef typename GmToFactorLabelIter<LABEL_ITER>::Accessor Accessor;
+        Accessor accessor(variableIndicesBegin(),variableIndicesEnd(), gmLabelsBegin);
         return Iter(accessor, 0);
     }
 
     template<class LABEL_ITER>    
-    typename GmToLabelIter<LABEL_ITER>::Iter
+    typename GmToFactorLabelIter<LABEL_ITER>::Iter
     gmToFactorLabelsEnd(LABEL_ITER gmLabelsBegin)const{
-        typedef typename GmToLabelIter<LABEL_ITER>::Accessor Accessor;
-        typedef typename GmToLabelIter<LABEL_ITER>::Iter Iter;
-        Accessor accessor(*this, gmLabelsBegin);
+        typedef typename GmToFactorLabelIter<LABEL_ITER>::Accessor Accessor;
+        typedef typename GmToFactorLabelIter<LABEL_ITER>::Iter Iter;
+        Accessor accessor(variableIndicesBegin(),variableIndicesEnd(), gmLabelsBegin);
         return Iter(accessor, this->numberOfVariables());
     }
 
diff --git a/include/opengm/graphicalmodel/weights.hxx b/include/opengm/graphicalmodel/weights.hxx
index cb24be5..f8e6438 100644
--- a/include/opengm/graphicalmodel/weights.hxx
+++ b/include/opengm/graphicalmodel/weights.hxx
@@ -7,40 +7,41 @@ namespace opengm{
 namespace learning{
 
    template<class T>
-   class Weights{
+   class Weights : public marray::Vector<T> 
+   {
    public:
       typedef T ValueType;
 
       Weights(const size_t numberOfWeights=0)
-      : weights_(numberOfWeights){
+      : marray::Vector<T>(numberOfWeights){
 
       }
 
       ValueType getWeight(const size_t pi)const{
          OPENGM_ASSERT_OP(pi,<,weights_.size());
-         return weights_[pi];
+         return (*this)[pi];
+         //return weights_[pi];
       }
 
       void setWeight(const size_t pi,const ValueType value){
          OPENGM_ASSERT_OP(pi,<,weights_.size());
-         weights_[pi]=value;
+         (*this)[pi] = value;
       }
 
-      const ValueType& operator[](const size_t pi)const{
-         return weights_[pi];
-      }
-
-      ValueType& operator[](const size_t pi) {
-         return weights_[pi];
-      }
+      //const ValueType& operator[](const size_t pi)const{
+      //   return weights_[pi];
+      //}
+      //ValueType& operator[](const size_t pi) {
+      //   return weights_[pi];
+      //}
 
       size_t numberOfWeights()const{
-         return weights_.size();
+         return this->size();
       }
 
    private:
 
-      std::vector<ValueType> weights_;
+      //std::vector<ValueType> weights_;
    };
 } // namespace learning
 } // namespace opengm
diff --git a/include/opengm/inference/inference.hxx b/include/opengm/inference/inference.hxx
index 46a774e..2f52edc 100644
--- a/include/opengm/inference/inference.hxx
+++ b/include/opengm/inference/inference.hxx
@@ -29,6 +29,15 @@ enum InferenceTermination {
    INFERENCE_ERROR=4
 };
 
+
+template<class INF>
+inline void infer(const typename INF::GraphicalModelType & gm, const typename INF::Parameter & param, std::vector<typename INF::LabelType> & conf){
+    INF inf(gm, param);
+    inf.infer();
+    inf.arg(conf);
+}
+
+
 /// Inference algorithm interface
 template <class GM, class ACC>
 class Inference
diff --git a/include/opengm/learning/dataset/dataset.hxx b/include/opengm/learning/dataset/dataset.hxx
index a830cbd..1241607 100644
--- a/include/opengm/learning/dataset/dataset.hxx
+++ b/include/opengm/learning/dataset/dataset.hxx
@@ -12,8 +12,8 @@
 namespace opengm {
    namespace datasets{
      
-     template<class GM>
-     struct DefaultLossGm{
+    template<class GM>
+    struct DefaultLossGm{
 
         // make the graphical model with loss
         typedef typename GM::SpaceType         SpaceType;
@@ -35,151 +35,160 @@ namespace opengm {
         // loss graphical model type
 
         typedef GraphicalModel<ValueType, OperatorType, CombinedList, SpaceType> type;
-     };
-
-     template<class GM, class LOSS=opengm::learning::NoLoss, class LOSS_GM = DefaultLossGm<GM> >
-      class Dataset{
-      public:
-         typedef GM                       GMType;
-
-         // generate the gm with loss here atm (THIS IS WRONG)
-         typedef typename opengm::meta::EvalIf<
-            opengm::meta::Compare<LOSS_GM, DefaultLossGm<GM> >::value,
-            DefaultLossGm<GM>,
-            meta::Self<LOSS_GM>
-         >::type GMWITHLOSS;
-
-         //typedef GM                       GMWITHLOSS;
-         typedef LOSS                     LossType;
-         typedef typename LOSS::Parameter LossParameterType;
-         typedef typename GM::ValueType   ValueType;
-         typedef typename GM::IndexType   IndexType;
-         typedef typename GM::LabelType   LabelType;
-         typedef opengm::learning::Weights<ValueType> Weights;
-
-         bool                          lockModel(const size_t i)               { ++count_[i]; }
-         bool                          unlockModel(const size_t i)             { OPENGM_ASSERT(count_[i]>0); --count_[i]; }
-         const GM&                     getModel(const size_t i) const          { return gms_[i]; } 
-         const GMWITHLOSS&             getModelWithLoss(const size_t i)const   { return gmsWithLoss_[i]; }
-         const LossParameterType&      getLossParameters(const size_t i)const  { return lossParams_[i]; }
-         const std::vector<LabelType>& getGT(const size_t i) const             { return gts_[i]; }
-         Weights&                      getWeights()                            { return weights_; } 
-         size_t                        getNumberOfWeights() const              { return weights_.numberOfWeights(); }
-         size_t                        getNumberOfModels() const               { return gms_.size(); } 
-
-         template<class INF>
-         ValueType                     getTotalLoss(const typename INF::Parameter& para) const;
-
-         template<class INF>
-         ValueType                     getLoss(const typename INF::Parameter& para, const size_t i) const;
-         
-         Dataset(size_t numInstances=0);
+    };
+
+    template<class GM, class LOSS=opengm::learning::NoLoss, class LOSS_GM = DefaultLossGm<GM> >
+    class Dataset{
+    public:
+        typedef GM                       GMType;
+
+        // generate the gm with loss here atm (THIS IS WRONG)
+        typedef typename opengm::meta::EvalIf<
+        opengm::meta::Compare<LOSS_GM, DefaultLossGm<GM> >::value,
+        DefaultLossGm<GM>,
+        meta::Self<LOSS_GM>
+        >::type GMWITHLOSS;
+
+        //typedef GM                       GMWITHLOSS;
+        typedef LOSS                     LossType;
+        typedef typename LOSS::Parameter LossParameterType;
+        typedef typename GM::ValueType   ValueType;
+        typedef typename GM::IndexType   IndexType;
+        typedef typename GM::LabelType   LabelType;
+        typedef opengm::learning::Weights<ValueType> Weights;
+
+        bool                          lockModel(const size_t i)               { ++count_[i]; }
+        bool                          unlockModel(const size_t i)             { OPENGM_ASSERT(count_[i]>0); --count_[i]; }
+        const GM&                     getModel(const size_t i) const          { return gms_[i]; } 
+        const GMWITHLOSS&             getModelWithLoss(const size_t i)const   { return gmsWithLoss_[i]; }
+        const LossParameterType&      getLossParameters(const size_t i)const  { return lossParams_[i]; }
+        const std::vector<LabelType>& getGT(const size_t i) const             { return gts_[i]; }
+        Weights&                      getWeights()                            { return weights_; } 
+        size_t                        getNumberOfWeights() const              { return weights_.numberOfWeights(); }
+        size_t                        getNumberOfModels() const               { return gms_.size(); } 
+
+        template<class INF>
+        ValueType                     getTotalLoss(const typename INF::Parameter& para) const;
+
+        template<class INF>
+        ValueType                     getLoss(const typename INF::Parameter& para, const size_t i) const;
+        ValueType                     getLoss(std::vector<LabelType> conf , const size_t i) const;
+
+        Dataset(size_t numInstances=0);
         //void loadAll(std::string path,std::string prefix); 
- 
+
         friend class DatasetSerialization;
         // friend void loadAll<Dataset<GM,LOSS> > (const std::string datasetpath, const std::string prefix, Dataset<GM,LOSS>& ds);
 
         //~Dataset(){
         //    std::cout<<"KILL DATASET\n";
         //}
-     protected:	
-         std::vector<size_t> count_;
-         std::vector<bool> isCached_;
-         std::vector<GM> gms_; 
-         std::vector<GMWITHLOSS> gmsWithLoss_; 
-         std::vector<LossParameterType> lossParams_;
-         std::vector<std::vector<LabelType> > gts_;
-         Weights weights_;
-
-         void buildModelWithLoss(size_t i);
-      };
+    protected:	
+        std::vector<size_t> count_;
+        std::vector<bool> isCached_;
+        std::vector<GM> gms_; 
+        std::vector<GMWITHLOSS> gmsWithLoss_; 
+        std::vector<LossParameterType> lossParams_;
+        std::vector<std::vector<LabelType> > gts_;
+        Weights weights_;
+
+        void buildModelWithLoss(size_t i);
+    };
       
 
-      template<class GM, class LOSS, class LOSS_GM>
-      Dataset<GM, LOSS, LOSS_GM>::Dataset(size_t numInstances)
-          : count_(std::vector<size_t>(numInstances)),
-            isCached_(std::vector<bool>(numInstances)),
-            gms_(std::vector<GM>(numInstances)),
-            gmsWithLoss_(std::vector<GMWITHLOSS>(numInstances)),
-            gts_(std::vector<std::vector<LabelType> >(numInstances)),
-            weights_(Weights(0)),
-            lossParams_(std::vector<LossParameterType>(numInstances))
-      {
-      }
-
-      template<class GM, class LOSS, class LOSS_GM>
-      template<class INF>
-      typename GM::ValueType Dataset<GM, LOSS, LOSS_GM>::getTotalLoss(const typename INF::Parameter& para) const {
-          ValueType sum=0;
-          for(size_t i=0; i<this->getNumberOfModels(); ++i) {
-             sum += this->getLoss<INF>(para, i);
-          }
-          return sum;
-      }
-
-      template<class GM, class LOSS, class LOSS_GM>
-      template<class INF>
-      typename GM::ValueType Dataset<GM, LOSS, LOSS_GM>::getLoss(const typename INF::Parameter& para, const size_t i) const {
-          LOSS lossFunction(lossParams_[i]);
-          const GM& gm = this->getModel(i);
-          const std::vector<typename INF::LabelType>& gt =  this->getGT(i);
-
-          std::vector<typename INF::LabelType> conf;
-          INF inf(gm,para);
-          inf.infer();
-          inf.arg(conf);
-
-          return lossFunction.loss(gm, conf.begin(), conf.end(), gt.begin(), gt.end());
-
-      }
-
-     template<class GM, class LOSS, class LOSS_GM>
-     void Dataset<GM, LOSS, LOSS_GM>::buildModelWithLoss(size_t i){
-         OPENGM_ASSERT_OP(i, <, lossParams_.size());
-         OPENGM_ASSERT_OP(i, <, gmsWithLoss_.size());
-         OPENGM_ASSERT_OP(i, <, gms_.size());
-         OPENGM_ASSERT_OP(i, <, gts_.size());
-         //std::cout<<"copy gm\n";
-         gmsWithLoss_[i] = gms_[i];    
-         //std::cout<<"copy done\n";
-         LOSS loss(lossParams_[i]);         
-         OPENGM_CHECK_OP(gts_[i].size(),==, gmsWithLoss_[i].numberOfVariables(),"");
-         loss.addLoss(gmsWithLoss_[i], gts_[i].begin());
-      }
-
-/*
-     template<class GM, class LOSS, class LOSS_GM>
-     void Dataset<GM, LOSS, LOSS_GM>::loadAll(std::string datasetpath,std::string prefix){
-
-         //Load Header 
-         std::stringstream hss;
-         hss << datasetpath << "/"<<prefix<<"info.h5";
-         hid_t file =  marray::hdf5::openFile(hss.str());
-         std::vector<size_t> temp(1);
-         marray::hdf5::loadVec(file, "numberOfWeights", temp);
-         size_t numWeights = temp[0];
-         marray::hdf5::loadVec(file, "numberOfModels", temp);
-         size_t numModel = temp[0];
-         marray::hdf5::closeFile(file);
-         
-         gms_.resize(numModel); 
-	 gmsWithLoss_.resize(numModel);
-         gt_.resize(numModel);
-         weights_ = Weights(numWeights);
-         //Load Models and ground truth
-         for(size_t m=0; m<numModel; ++m){
-            std::stringstream ss;
-            ss  << datasetpath <<"/"<<prefix<<"gm_" << m <<".h5"; 
-            hid_t file =  marray::hdf5::openFile(ss.str()); 
-            marray::hdf5::loadVec(file, "gt", gt_[m]);
-            marray::hdf5::closeFile(file);
-            opengm::hdf5::load(gms_[m],ss.str(),"gm"); 
-	    buildModelWithLoss(m);
-         }
-
-      };
-*/
-   }
+    template<class GM, class LOSS, class LOSS_GM>
+    Dataset<GM, LOSS, LOSS_GM>::Dataset(size_t numInstances)
+    : count_(std::vector<size_t>(numInstances)),
+        isCached_(std::vector<bool>(numInstances)),
+        gms_(std::vector<GM>(numInstances)),
+        gmsWithLoss_(std::vector<GMWITHLOSS>(numInstances)),
+        gts_(std::vector<std::vector<LabelType> >(numInstances)),
+        weights_(Weights(0)),
+        lossParams_(std::vector<LossParameterType>(numInstances))
+    {
+    }
+
+    template<class GM, class LOSS, class LOSS_GM>
+    template<class INF>
+    typename GM::ValueType Dataset<GM, LOSS, LOSS_GM>::getTotalLoss(const typename INF::Parameter& para) const {
+        ValueType sum=0;
+        for(size_t i=0; i<this->getNumberOfModels(); ++i) {
+            sum += this->getLoss<INF>(para, i);
+        }
+        return sum;
+    }
+
+    template<class GM, class LOSS, class LOSS_GM>
+    template<class INF>
+    typename GM::ValueType Dataset<GM, LOSS, LOSS_GM>::getLoss(const typename INF::Parameter& para, const size_t i) const {
+        LOSS lossFunction(lossParams_[i]);
+        const GM& gm = this->getModel(i);
+        const std::vector<typename INF::LabelType>& gt =  this->getGT(i);
+
+        std::vector<typename INF::LabelType> conf;
+        INF inf(gm,para);
+        inf.infer();
+        inf.arg(conf);
+
+        return lossFunction.loss(gm, conf.begin(), conf.end(), gt.begin(), gt.end());
+
+    }
+
+    template<class GM, class LOSS, class LOSS_GM>
+    typename GM::ValueType Dataset<GM, LOSS, LOSS_GM>::getLoss(std::vector<typename GM::LabelType> conf, const size_t i) const {
+        LOSS lossFunction(lossParams_[i]);
+        const GM& gm = this->getModel(i);
+        const std::vector<LabelType>& gt =  this->getGT(i);
+        return lossFunction.loss(gm, conf.begin(), conf.end(), gt.begin(), gt.end());
+    }
+
+
+
+
+    template<class GM, class LOSS, class LOSS_GM>
+    void Dataset<GM, LOSS, LOSS_GM>::buildModelWithLoss(size_t i){
+        OPENGM_ASSERT_OP(i, <, lossParams_.size());
+        OPENGM_ASSERT_OP(i, <, gmsWithLoss_.size());
+        OPENGM_ASSERT_OP(i, <, gms_.size());
+        OPENGM_ASSERT_OP(i, <, gts_.size());
+        //std::cout<<"copy gm\n";
+        gmsWithLoss_[i] = gms_[i];    
+        //std::cout<<"copy done\n";
+        LOSS loss(lossParams_[i]);         
+        OPENGM_CHECK_OP(gts_[i].size(),==, gmsWithLoss_[i].numberOfVariables(),"");
+        loss.addLoss(gmsWithLoss_[i], gts_[i].begin());
+    }
+
+    // template<class GM, class LOSS, class LOSS_GM>
+    // void Dataset<GM, LOSS, LOSS_GM>::loadAll(std::string datasetpath,std::string prefix){
+    //     //Load Header 
+    //     std::stringstream hss;
+    //     hss << datasetpath << "/"<<prefix<<"info.h5";
+    //     hid_t file =  marray::hdf5::openFile(hss.str());
+    //     std::vector<size_t> temp(1);
+    //     marray::hdf5::loadVec(file, "numberOfWeights", temp);
+    //     size_t numWeights = temp[0];
+    //     marray::hdf5::loadVec(file, "numberOfModels", temp);
+    //     size_t numModel = temp[0];
+    //     marray::hdf5::closeFile(file);
+
+    //     gms_.resize(numModel); 
+    //     gmsWithLoss_.resize(numModel);
+    //     gt_.resize(numModel);
+    //     weights_ = Weights(numWeights);
+    //     //Load Models and ground truth
+    //     for(size_t m=0; m<numModel; ++m){
+    //         std::stringstream ss;
+    //         ss  << datasetpath <<"/"<<prefix<<"gm_" << m <<".h5"; 
+    //         hid_t file =  marray::hdf5::openFile(ss.str()); 
+    //         marray::hdf5::loadVec(file, "gt", gt_[m]);
+    //         marray::hdf5::closeFile(file);
+    //         opengm::hdf5::load(gms_[m],ss.str(),"gm"); 
+    //         buildModelWithLoss(m);
+    //     }
+    // };
+
+}
 } // namespace opengm
 
 #endif 
diff --git a/include/opengm/learning/gradient-accumulator.hxx b/include/opengm/learning/gradient-accumulator.hxx
index 92f4c0f..63e432b 100644
--- a/include/opengm/learning/gradient-accumulator.hxx
+++ b/include/opengm/learning/gradient-accumulator.hxx
@@ -10,65 +10,157 @@ namespace learning {
  */
 template <typename ModelWeights, typename ConfigurationType>
 class GradientAccumulator {
+    typedef typename ConfigurationType::const_iterator ConfIter;
 
 public:
 
-	/**
-	 * How to accumulate the gradient on the provided ModelWeights.
-	 */
-	enum Mode {
-
-		Add,
-
-		Subtract
-	};
-
-	/**
-	 * @param gradient
-	 *              ModelWeights reference to store the gradients. Gradient 
-	 *              values will only be added (or subtracted, if mode == 
-	 *              Subtract), so you have to make sure gradient is properly 
-	 *              initialized to zero.
-	 *
-	 * @param configuration
-	 *              Configuration of the variables in the model, to evaluate the 
-	 *              gradient for.
-	 *
-	 * @param mode
-	 *              Add or Subtract the weight gradients from gradient.
-	 */
-	GradientAccumulator(ModelWeights& gradient, const ConfigurationType& configuration, Mode mode = Add) :
-		_gradient(gradient),
-		_configuration(configuration),
-		_mode(mode) {}
-
-	template <typename Iterator, typename FunctionType>
-	void operator()(Iterator begin, Iterator end, const FunctionType& function) {
-
-		ConfigurationType localConfiguration;
-		for (Iterator j = begin; j != end; j++)
-			localConfiguration.push_back(_configuration[*j]);
-
-		for (int i = 0; i < function.numberOfWeights(); i++) {
-
-			int index = function.weightIndex(i);
-
-			double g = function.weightGradient(i, localConfiguration.begin());
-
-			if (_mode == Add)
-				_gradient[index] += g;
-			else
-				_gradient[index] -= g;
-		}
-	}
+    /**
+     * How to accumulate the gradient on the provided ModelWeights.
+     */
+    enum Mode {
+
+        Add,
+
+        Subtract
+    };
+
+    /**
+     * @param gradient
+     *              ModelWeights reference to store the gradients. Gradient 
+     *              values will only be added (or subtracted, if mode == 
+     *              Subtract), so you have to make sure gradient is properly 
+     *              initialized to zero.
+     *
+     * @param configuration
+     *              Configuration of the variables in the model, to evaluate the 
+     *              gradient for.
+     *
+     * @param mode
+     *              Add or Subtract the weight gradients from gradient.
+     */
+    GradientAccumulator(ModelWeights& gradient, const ConfigurationType& configuration, Mode mode = Add) :
+        _gradient(gradient),
+        _configuration(configuration),
+        _mode(mode) {}
+
+    template <typename Iterator, typename FunctionType>
+    void operator()(Iterator begin, Iterator end, const FunctionType& function) {
+
+        typedef opengm::SubsetAccessor<Iterator, ConfIter> Accessor;
+        typedef opengm::AccessorIterator<Accessor, true> Iter;
+        const Accessor accessor(begin, end, _configuration.begin());
+
+        for (int i = 0; i < function.numberOfWeights(); i++) {
+
+            int index = function.weightIndex(i);
+
+            double g = function.weightGradient(i, Iter(accessor, 0));
+
+            if (_mode == Add)
+                _gradient[index] += g;
+            else
+                _gradient[index] -= g;
+        }
+    }
 
 private:
 
-	ModelWeights& _gradient;
-	const ConfigurationType& _configuration;
-	Mode _mode;
+    ModelWeights& _gradient;
+    const ConfigurationType& _configuration;
+    Mode _mode;
 };
 
+
+template<class GM, class LABEL_ITER>
+struct FeatureAccumulator{
+
+    typedef typename GM::LabelType LabelType;
+    typedef typename GM::IndexType IndexType;
+    typedef typename GM::ValueType ValueType;
+    
+
+
+    FeatureAccumulator(const size_t nW)
+    :   accWeights_(nW),
+        gtLabel_(),
+        mapLabel_(){
+            
+        for(size_t i=0; i<accWeights_.size(); ++i){
+            accWeights_[i] = 0.0;
+        }
+    }
+
+    void setLabels(const LABEL_ITER gtLabel, const LABEL_ITER mapLabel){
+        gtLabel_ = gtLabel;
+        mapLabel_  = mapLabel;
+    }
+
+    void resetWeights(){
+        //accFeaturesGt_ = 0.0;
+        //accWeights_ = 0.0;
+        for(size_t i=0; i<accWeights_.size(); ++i){
+            accWeights_[i] = 0.0;
+        }
+    }
+    double getWeight(const size_t wi)const{
+        return accWeights_[wi];
+    }
+    template<class Iter, class F>
+    void operator()(Iter begin, Iter end, const F & f){
+
+        typedef opengm::SubsetAccessor<Iter, LABEL_ITER> Accessor;
+        typedef opengm::AccessorIterator<Accessor, true> Iter;
+
+
+        // get the number of weights_
+        const size_t nWeights = f.numberOfWeights();
+        if(nWeights>0){
+            // loop over all weights
+            for(size_t wi=0; wi<nWeights; ++wi){
+                // accumulate features for both labeling
+                const size_t gwi = f.weightIndex(wi);
+
+
+                const Accessor accessorGt(begin, end, gtLabel_);
+                const Accessor accessorMap(begin, end, mapLabel_);
+                
+                // for test label
+                accWeights_[gwi] += f.weightGradient(wi, Iter(accessorMap, 0));
+
+                // for gt label
+                accWeights_[gwi] -= f.weightGradient(wi, Iter(accessorGt, 0));
+            }
+        }
+    }
+
+    void accumulateFromOther(const FeatureAccumulator & otherAcc){
+        for(size_t i=0; i<accWeights_.size(); ++i){
+            accWeights_[i] += otherAcc.accWeights_[i];
+        }
+    }
+
+    void accumulateModelFeatures(
+        const GM & gm, 
+        const LABEL_ITER & gtLabel,
+        const LABEL_ITER & mapLabel
+    ){
+        gtLabel_ = gtLabel;
+        mapLabel_  = mapLabel;
+
+        // iterate over all factors
+        // and accumulate features
+        for(size_t fi=0; fi<gm.numberOfFactors(); ++fi){
+            gm[fi].callViFunctor(*this);
+        }
+    }
+    opengm::learning::Weights<double>  accWeights_;
+    LABEL_ITER gtLabel_;
+    LABEL_ITER mapLabel_;
+};
+
+
+
+
 }} // namespace opengm::learning
 
 #endif // OPENGM_LEARNING_GRADIENT_ACCUMULATOR_H__
diff --git a/include/opengm/learning/structured_perceptron.hxx b/include/opengm/learning/structured_perceptron.hxx
index 6337df8..0056727 100644
--- a/include/opengm/learning/structured_perceptron.hxx
+++ b/include/opengm/learning/structured_perceptron.hxx
@@ -3,80 +3,19 @@
 #define OPENGM_GRIDSEARCH_LEARNER_HXX
 
 #include <vector>
+#include <opengm/inference/inference.hxx>
+#include <opengm/graphicalmodel/weights.hxx>
+#include <opengm/utilities/random.hxx>
+#include <opengm/learning/gradient-accumulator.hxx>
+#include <omp.h>
+
 
 namespace opengm {
     namespace learning {
 
-    
-    // map a global labeling 
-    // to a factor labeling iterator
-
-
-
-    template<class GM, class LABEL_ITER>
-    struct FeatureAccumulator{
-
-        typedef typename GM::LabelType LabelType;
-        typedef typename GM::IndexType IndexType;
-        typedef typename GM::ValueType ValueType;
-
-
-        FeatureAccumulator(const size_t nW)
-        :   accFeaturesGt_(nW),
-            accFeaturesMap_(nW),
-            gtLabel_(),
-            mapLabel_(),
-            factor_(NULL){
-        }
-
-        void setLabels(const LABEL_ITER gtLabel, const LABEL_ITER mapLabel){
-            gtLabel_ = gtLabel;
-            mapLabel_  = mapLabel;
-        }
-
-        void resetWeights(){
-            for(size_t i=0; i<accFeaturesGt_.size(); ++i){
-                accFeaturesGt_[i] = 0.0;
-                accFeaturesMap_[i] = 0.0;
-            }
-        }
-        double fDiff(const size_t wi)const{
-            return accFeaturesMap_[wi] - accFeaturesGt_[wi];
-        }
-        void setFactor(const typename GM::FactorType & factor){
-            factor_ = &factor;
-        }
-        template<class F>
-        void operator()(const F & f){
-
-            // get the number of weights
-            const size_t nWeights = f.numberOfWeights();
-            if(nWeights>0){
-                // loop over all weights
-                for(size_t wi=0; wi<nWeights; ++wi){
-                    // accumulate features for both labeling
-                    const size_t gwi = f.weightIndex(wi);
-
-                    // for gt label
-                    accFeaturesGt_[gwi] += f.weightGradient(wi, factor_->gmToFactorLabelsBegin(gtLabel_));
-
-                    // for test label
-                    accFeaturesMap_[gwi] += f.weightGradient(wi, factor_->gmToFactorLabelsBegin(mapLabel_));
-                }
-            }
-        }
 
 
-        std::vector<double>  accFeaturesGt_;
-        std::vector<double>  accFeaturesMap_;
-        LABEL_ITER gtLabel_;
-        LABEL_ITER mapLabel_;
-        const typename  GM::FactorType * factor_;
-    };
-
-
-
-      
+           
     template<class DATASET>
     class StructuredPerceptron
     {
@@ -88,19 +27,33 @@ namespace opengm {
         typedef typename GMType::IndexType IndexType;
         typedef typename GMType::LabelType LabelType; 
 
+        typedef typename std::vector<LabelType>::const_iterator LabelIterator;
+        typedef FeatureAccumulator<GMType, LabelIterator> FeatureAcc;
+
         class Parameter{
         public:
+
+            enum LearningMode{
+                Online = 0,
+                Batch = 2
+            };
+
+
             Parameter(){
                 eps_ = 0.00001;
-                maxIterations_ = 0;
+                maxIterations_ = 10000;
                 stopLoss_ = 0.0;
-                kappa_ = 0.1;
+                decayExponent_ = 0.0;
+                decayT0_ = 0.0;
+                learningMode_ = Online;
             }       
 
             double eps_;
             size_t maxIterations_;
             double stopLoss_;
-            double kappa_;
+            double decayExponent_;
+            double decayT0_;
+            LearningMode learningMode_;
         };
 
 
@@ -114,23 +67,35 @@ namespace opengm {
         const opengm::learning::Weights<double>& getWeights(){return weights_;}
         Parameter& getLerningParameters(){return para_;}
 
-        private:
 
-        template<class INF, class FEATURE_ACCUMULATOR>
-        double accumulateFeatures(const typename INF::Parameter& para, FEATURE_ACCUMULATOR & featureAcc); 
+        double getLearningRate( )const{
+            if(para_.decayExponent_<=0.000000001 && para_.decayExponent_>=-0.000000001 ){
+                return 1.0;
+            }
+            else{
+                return std::pow(para_.decayT0_ + static_cast<double>(iteration_),para_.decayExponent_);
+            }
+        }
+
+    private:
+
+        double updateWeights();
 
         DATASET& dataset_;
         opengm::learning::Weights<double> weights_;
         Parameter para_;
-        }; 
-
-        template<class DATASET>
-        StructuredPerceptron<DATASET>::StructuredPerceptron(DATASET& ds, const Parameter& p )
-        : dataset_(ds), para_(p)
-        {
-            weights_ = opengm::learning::Weights<double>(ds.getNumberOfWeights());
-      
-        }
+        size_t iteration_;
+        FeatureAcc featureAcc_;
+    }; 
+
+    template<class DATASET>
+    StructuredPerceptron<DATASET>::StructuredPerceptron(DATASET& ds, const Parameter& p )
+    : dataset_(ds), para_(p),iteration_(0),featureAcc_(ds.getNumberOfWeights())
+    {
+        featureAcc_.resetWeights();
+        weights_ = opengm::learning::Weights<double>(ds.getNumberOfWeights());
+  
+    }
 
 
     template<class DATASET>
@@ -138,112 +103,139 @@ namespace opengm {
     void StructuredPerceptron<DATASET>::learn(const typename INF::Parameter& para){
 
 
-        typedef typename std::vector<LabelType>::const_iterator LabelIterator;
-        typedef FeatureAccumulator<GMType, LabelIterator> FeatureAcc;
-
-
         const size_t nModels = dataset_.getNumberOfModels();
         const size_t nWegihts = dataset_.getNumberOfWeights();
 
-        FeatureAcc featureAcc(nWegihts);
-
-
-        size_t iteration = 0 ;
-        while(true){
-            if(para_.maxIterations_!=0 && iteration>para_.maxIterations_){
-                std::cout<<"reached max iteration"<<"\n";
-                break;
-            }
-
-            // accumulate features
-            double currentLoss = this-> template accumulateFeatures<INF, FeatureAcc>(para, featureAcc);
-            
+        
 
-            if(currentLoss < para_.stopLoss_){
-                std::cout<<"reached stopLoss"<<"\n";
-                break;
-            }
 
-            //if(currentLoss==0){
-            //    doLearning = false;
-            //    break;
-            //}
-
-            double wChange = 0.0;
-            // update weights
-            for(size_t wi=0; wi<nWegihts; ++wi){
-                const double learningRate = 1.0 /((1.0/para_.kappa_)*std::sqrt(1.0 + iteration));
-                const double wOld = dataset_.getWeights().getWeight(wi);
-                const double wNew = wOld + learningRate*featureAcc.fDiff(wi);
-                wChange += std::pow(wOld-wNew,2);
-                dataset_.getWeights().setWeight(wi, wNew);
-            }
-            ++iteration;
-            if(iteration % 25 ==0)
-                std::cout<<iteration<<" loss "<<currentLoss<<" dw "<<wChange<<"\n";
 
-            if(wChange <= para_.eps_ ){
-                std::cout<<"converged"<<"\n";
-                break;
-            }
-        }
-        weights_ = dataset_.getWeights();
-    }
-
-    template<class DATASET>
-    template<class INF, class FEATURE_ACCUMULATOR>
-    double StructuredPerceptron<DATASET>::accumulateFeatures(
-        const typename INF::Parameter& para,
-        FEATURE_ACCUMULATOR & featureAcc
-    ){
 
+        if(para_.learningMode_ == Parameter::Online){
+            RandomUniform<size_t> randModel(0, nModels);
+            std::cout<<"online mode\n";
+            for(iteration_=0 ; iteration_<para_.maxIterations_; ++iteration_){
 
-        typedef typename std::vector<LabelType>::const_iterator LabelIterator;
-        typedef FeatureAccumulator<GMType, LabelIterator> FeatureAcc;
-        const size_t nModels = dataset_.getNumberOfModels();
-
-        double totalLoss=0.0;
-
-        // reset the accumulated features
-        featureAcc.resetWeights();
+                if(iteration_%nModels==0){
+                    std::cout<<"loss :"<<dataset_. template getTotalLoss<INF>(para)<<"\n";
+                }
 
-        // iterate over all models
-        for(size_t gmi=0; gmi<nModels; ++gmi){
 
-            // lock the model
-            dataset_.lockModel(gmi);
+                // get random model
+                const size_t gmi = randModel();
+                // lock the model
+                dataset_.lockModel(gmi);
+                const GMType & gm = dataset_.getModel(gmi);
 
-            // get model
-            const GMType & gm = dataset_.getModel(gmi);
+                // do inference
+                std::vector<LabelType> arg;
+                opengm::infer<INF>(gm, para, arg);
+                featureAcc_.resetWeights();
+                featureAcc_.accumulateModelFeatures(gm, dataset_.getGT(gmi).begin(), arg.begin());
+                dataset_.unlockModel(gmi);
 
-            // do inference
-            INF inf(gm, para);
-            std::vector<LabelType> arg;
-            inf.infer();
-            inf.arg(arg);
+                // update weights
+                const double wChange =updateWeights();
 
-            LossType lossFunction(dataset_.getLossParameters(gmi));
+            }
+        }
+        else if(para_.learningMode_ == Parameter::Batch){
+            std::cout<<"batch mode\n";
+            for(iteration_=0 ; iteration_<para_.maxIterations_; ++iteration_){
+                // this 
+                if(iteration_%1==0){
+                    std::cout<<"loss :"<<dataset_. template getTotalLoss<INF>(para)<<"\n";
+                }
 
-            totalLoss +=lossFunction.loss(gm, arg.begin(), arg.end(),
-                dataset_.getGT(gmi).begin(), dataset_.getGT(gmi).end());
+                // reset the weights
+                featureAcc_.resetWeights();
+
+
+                //std::vector< std::vector<LabelType> > args(nModels);
+                //#pragma omp parallel for
+                //for(size_t gmi=0; gmi<nModels; ++gmi)
+                //{
+                //    int tid = omp_get_thread_num();
+                //    std::cout<<"Hello World from thread"<<tid<<"\n";
+//
+                //    dataset_.lockModel(gmi);
+                //    opengm::infer<INF>(dataset_.getModel(gmi), para, args[gmi]);
+                //    dataset_.unlockModel(gmi);
+                //}
+//
+                //for(size_t gmi=0; gmi<nModels; ++gmi)
+                //{
+                //    dataset_.lockModel(gmi);
+                //    featureAcc_.accumulateModelFeatures(dataset_.getModel(gmi), 
+                //                                        dataset_.getGT(gmi).begin(), 
+                //                                        args[gmi].begin());
+                //    dataset_.unlockModel(gmi);
+                //}
+
+
+                omp_lock_t modelLockUnlock;
+                omp_init_lock(&modelLockUnlock);
+
+                omp_lock_t featureAccLock;
+                omp_init_lock(&featureAccLock);
+
+
+                #pragma omp parallel for
+                for(size_t gmi=0; gmi<nModels; ++gmi)
+                {
+                    
+                    // lock the model
+                    omp_set_lock(&modelLockUnlock);
+                    dataset_.lockModel(gmi);     
+                    omp_unset_lock(&modelLockUnlock);
+                        
+                    
+
+                    const GMType & gm = dataset_.getModel(gmi);
+                    //run inference
+                    std::vector<LabelType> arg;
+                    opengm::infer<INF>(gm, para, arg);
+
+
+                    // 
+                    FeatureAcc featureAcc(nWegihts);
+                    featureAcc.accumulateModelFeatures(gm, dataset_.getGT(gmi).begin(), arg.begin());
+
+
+                    // acc features
+                    omp_set_lock(&featureAccLock);
+                    featureAcc_.accumulateFromOther(featureAcc);
+                    omp_unset_lock(&featureAccLock);
+
+                    // unlock the model
+                    omp_set_lock(&modelLockUnlock);
+                    dataset_.unlockModel(gmi);     
+                    omp_unset_lock(&modelLockUnlock);
+                }
 
-            // pass arg and gt to featureAccumulator
-            featureAcc.setLabels(dataset_.getGT(gmi).begin(), arg.begin());
+                // update the weights
+                const double wChange =updateWeights();
 
-            
-            // iterate over all factors
-            // and accumulate features
-            for(size_t fi=0; fi<gm.numberOfFactors(); ++fi){
-                featureAcc.setFactor(gm[fi]);
-                gm[fi].callFunctor(featureAcc);
             }
-            // unlock model
-            dataset_.unlockModel(gmi);
         }
 
-        return totalLoss;
+        weights_ = dataset_.getWeights();
     }
 
+
+    template<class DATASET>
+    double StructuredPerceptron<DATASET>::updateWeights(){
+        double wChange = 0.0;
+        const size_t nWegihts = dataset_.getNumberOfWeights();
+        for(size_t wi=0; wi<nWegihts; ++wi){
+            const double wOld = dataset_.getWeights().getWeight(wi);
+            const double wNew = wOld +1.0*featureAcc_.getWeight(wi);
+            wChange += std::pow(wOld-wNew,2);
+            dataset_.getWeights().setWeight(wi, wNew);
+        }
+        weights_ = dataset_.getWeights();
+        return wChange;
+    }
 }
 }
 #endif
diff --git a/include/opengm/utilities/shape_accessor.hxx b/include/opengm/utilities/shape_accessor.hxx
index ac5a355..6d9b45e 100644
--- a/include/opengm/utilities/shape_accessor.hxx
+++ b/include/opengm/utilities/shape_accessor.hxx
@@ -61,13 +61,53 @@ namespace opengm {
       const value_type operator[](const size_t j) const 
          { return factor_->numberOfLabels(j); }
       bool operator==(const FactorShapeAccessor<FACTOR> & other) const 
-         { return factor_ == other.factor_; }
+         { return factor_ == other.factor_;  }
    
    private:
       factor_pointer factor_;
    };
    
 
+
+   template<class SUBSET_ITERATOR, class GM_LABEL_ITER>
+   class SubsetAccessor {
+   public:
+      typedef typename std::iterator_traits<GM_LABEL_ITER>::value_type value_type;
+
+      typedef const value_type reference;
+      typedef const value_type* pointer;
+
+      SubsetAccessor()
+         :  sBegin_(),
+            sEnd_(),
+            gmLabelIter_()
+         {}
+      SubsetAccessor(SUBSET_ITERATOR sBegin, SUBSET_ITERATOR sEnd , GM_LABEL_ITER iter)
+         :  sBegin_(sBegin),
+            sEnd_(sEnd),
+            gmLabelIter_(iter)
+         {}
+      size_t size() const 
+         { return std::distance(sBegin_, sEnd_); }
+      reference operator[](const size_t j) 
+         { return gmLabelIter_[sBegin_[j]]; }
+      const value_type operator[](const size_t j) const 
+         { return gmLabelIter_[sBegin_[j]];  }
+      bool operator==(const SubsetAccessor  & other) const 
+      { 
+        return sBegin_ == other.sBegin_ && 
+               sEnd_ == other.sEnd_ && 
+               gmLabelIter_==other.gmLabelIter_; 
+      }
+   
+   private:
+      SUBSET_ITERATOR sBegin_;
+      SUBSET_ITERATOR sEnd_;
+      GM_LABEL_ITER gmLabelIter_;
+   };
+
+
+
    template<class FACTOR, class GM_LABEL_ITER>
    class GmLabelFactorLabelAccessor {
    public:
@@ -93,7 +133,7 @@ namespace opengm {
       const value_type operator[](const size_t j) const 
          { return gmLabelIter_[factor_->variableIndex(j)]; }
       bool operator==(const FactorShapeAccessor<FACTOR> & other) const 
-      { return factor_ == other.factor_; 
+      { return factor_ == other.factor_ && gmLabelIter_==other.gmLabelIter_; 
       }
    
    private:
diff --git a/src/interfaces/python/opengm/learning/__init__.py b/src/interfaces/python/opengm/learning/__init__.py
index 00ef203..3a5be86 100644
--- a/src/interfaces/python/opengm/learning/__init__.py
+++ b/src/interfaces/python/opengm/learning/__init__.py
@@ -92,21 +92,34 @@ def gridSearchLearner(dataset, lowerBounds, upperBounds, nTestPoints):
     return learner
 
 
-def structPerceptron(dataset, eps=1e-5, maxIterations=0, stopLoss=0.0, kappa=0.1):
+def structPerceptron(dataset, learningMode='online',eps=1e-5, maxIterations=10000, stopLoss=0.0, decayExponent=0.0, decayT0=0.0):
 
 
     if dataset.__class__.lossType == 'hamming':
         learnerCls = StructPerceptron_HammingLoss
         learnerParamCls = StructPerceptron_HammingLossParameter
+        learningModeEnum = StructPerceptron_HammingLossParameter_LearningMode
     elif dataset.__class__.lossType == 'generalized-hamming':
         learnerCls = StructPerceptron_GeneralizedHammingLossParameter
         learnerParamCls = StructPerceptron_GeneralizedHammingLoss
+        learningModeEnum = StructPerceptron_GeneralizedHammingLossParameter_LearningMode
+
+    lm = None
+    if learningMode not in ['online','batch']:
+        raise RuntimeError("wrong learning mode, must be 'online' or 'batch' ")
+
+    if learningMode == 'online':
+        lm = learningModeEnum.online
+    if learningMode == 'batch':
+        lm = learningModeEnum.batch
 
     param = learnerParamCls()
-    param.eps = eps
-    param.maxIterations
-    param.stopLoss = stopLoss
-    param.kappa = kappa
+    param.eps = float(eps)
+    param.maxIterations = int(maxIterations)
+    param.stopLoss = float(stopLoss)
+    param.decayExponent = float(decayExponent)
+    param.decayT0 = float(decayT0)
+    param.learningMode = lm
     learner = learnerCls(dataset, param)
     return learner
 
diff --git a/src/interfaces/python/opengm/learning/pyStructPerceptron.cxx b/src/interfaces/python/opengm/learning/pyStructPerceptron.cxx
index f9a2f06..f5554a7 100644
--- a/src/interfaces/python/opengm/learning/pyStructPerceptron.cxx
+++ b/src/interfaces/python/opengm/learning/pyStructPerceptron.cxx
@@ -38,15 +38,27 @@ namespace opengm{
 
         const std::string paramClsName = clsName + std::string("Parameter");
 
+        const std::string paramEnumLearningModeName = clsName + std::string("Parameter_LearningMode");
 
+        // learner param enum
+        bp::enum_<typename PyLearnerParam::LearningMode>(paramEnumLearningModeName.c_str())
+            .value("online", PyLearnerParam::Online)
+            .value("batch", PyLearnerParam::Batch)
+        ;
+
+        // learner param
         bp::class_<PyLearnerParam>(paramClsName.c_str(), bp::init<>())
             .def("__init__", make_constructor(&pyStructuredPerceptronParamConstructor<PyLearnerParam> ,boost::python::default_call_policies()))
             .def_readwrite("eps",  &PyLearnerParam::eps_)
             .def_readwrite("maxIterations", &PyLearnerParam::maxIterations_)
             .def_readwrite("stopLoss", &PyLearnerParam::stopLoss_)
-            .def_readwrite("kappa", &PyLearnerParam::kappa_)
+            .def_readwrite("decayExponent", &PyLearnerParam::decayExponent_)
+            .def_readwrite("decayT0", &PyLearnerParam::decayT0_)
+            .def_readwrite("learningMode", &PyLearnerParam::learningMode_)
         ;
 
+
+        // learner
         bp::class_<PyLearner>( clsName.c_str(), bp::no_init )
         .def("__init__", make_constructor(&pyStructuredPerceptronConstructor<PyLearner> ,boost::python::default_call_policies()))
         .def(LearnerInferenceSuite<PyLearner>())

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git



More information about the debian-science-commits mailing list